Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'v6.5-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto updates from Herbert Xu:
"API:
- Add linear akcipher/sig API
- Add tfm cloning (hmac, cmac)
- Add statesize to crypto_ahash

Algorithms:
- Allow only odd e and restrict value in FIPS mode for RSA
- Replace LFSR with SHA3-256 in jitter
- Add interface for gathering of raw entropy in jitter

Drivers:
- Fix race on data_avail and actual data in hwrng/virtio
- Add hash and HMAC support in starfive
- Add RSA algo support in starfive
- Add support for PCI device 0x156E in ccp"

* tag 'v6.5-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (85 commits)
crypto: akcipher - Do not copy dst if it is NULL
crypto: sig - Fix verify call
crypto: akcipher - Set request tfm on sync path
crypto: sm2 - Provide sm2_compute_z_digest when sm2 is disabled
hwrng: imx-rngc - switch to DEFINE_SIMPLE_DEV_PM_OPS
hwrng: st - keep clock enabled while hwrng is registered
hwrng: st - support compile-testing
hwrng: imx-rngc - fix the timeout for init and self check
KEYS: asymmetric: Use new crypto interface without scatterlists
KEYS: asymmetric: Move sm2 code into x509_public_key
KEYS: Add forward declaration in asymmetric-parser.h
crypto: sig - Add interface for sign/verify
crypto: akcipher - Add sync interface without SG lists
crypto: cipher - On clone do crypto_mod_get()
crypto: api - Add __crypto_alloc_tfmgfp
crypto: api - Remove crypto_init_ops()
crypto: rsa - allow only odd e and restrict value in FIPS mode
crypto: geniv - Split geniv out of AEAD Kconfig option
crypto: algboss - Add missing dependency on RNG2
crypto: starfive - Add RSA algo support
...

+4948 -977
+46
Documentation/ABI/testing/sysfs-driver-qat
··· 27 27 28 28 * sym;asym: the device is configured for running crypto 29 29 services 30 + * asym;sym: identical to sym;asym 30 31 * dc: the device is configured for running compression services 32 + * sym: the device is configured for running symmetric crypto 33 + services 34 + * asym: the device is configured for running asymmetric crypto 35 + services 36 + * asym;dc: the device is configured for running asymmetric 37 + crypto services and compression services 38 + * dc;asym: identical to asym;dc 39 + * sym;dc: the device is configured for running symmetric crypto 40 + services and compression services 41 + * dc;sym: identical to sym;dc 31 42 32 43 It is possible to set the configuration only if the device 33 44 is in the `down` state (see /sys/bus/pci/devices/<BDF>/qat/state) ··· 56 45 # echo up > /sys/bus/pci/devices/<BDF>/qat/state 57 46 # cat /sys/bus/pci/devices/<BDF>/qat/cfg_services 58 47 dc 48 + 49 + This attribute is only available for qat_4xxx devices. 50 + 51 + What: /sys/bus/pci/devices/<BDF>/qat/pm_idle_enabled 52 + Date: June 2023 53 + KernelVersion: 6.5 54 + Contact: qat-linux@intel.com 55 + Description: (RW) This configuration option provides a way to force the device into remaining in 56 + the MAX power state. 57 + If idle support is enabled the device will transition to the `MIN` power state when 58 + idle, otherwise will stay in the MAX power state. 59 + Write to the file to enable or disable idle support. 60 + 61 + The values are: 62 + 63 + * 0: idle support is disabled 64 + * 1: idle support is enabled 65 + 66 + Default value is 1. 67 + 68 + It is possible to set the pm_idle_enabled value only if the device 69 + is in the `down` state (see /sys/bus/pci/devices/<BDF>/qat/state) 70 + 71 + The following example shows how to change the pm_idle_enabled of 72 + a device:: 73 + 74 + # cat /sys/bus/pci/devices/<BDF>/qat/state 75 + up 76 + # cat /sys/bus/pci/devices/<BDF>/qat/pm_idle_enabled 77 + 1 78 + # echo down > /sys/bus/pci/devices/<BDF>/qat/state 79 + # echo 0 > /sys/bus/pci/devices/<BDF>/qat/pm_idle_enabled 80 + # echo up > /sys/bus/pci/devices/<BDF>/qat/state 81 + # cat /sys/bus/pci/devices/<BDF>/qat/pm_idle_enabled 82 + 0 59 83 60 84 This attribute is only available for qat_4xxx devices.
+42 -10
Documentation/devicetree/bindings/crypto/qcom-qce.yaml
··· 26 26 27 27 - items: 28 28 - enum: 29 + - qcom,ipq4019-qce 30 + - qcom,sm8150-qce 31 + - const: qcom,qce 32 + 33 + - items: 34 + - enum: 29 35 - qcom,ipq6018-qce 30 36 - qcom,ipq8074-qce 31 37 - qcom,msm8996-qce 38 + - qcom,qcm2290-qce 32 39 - qcom,sdm845-qce 40 + - qcom,sm6115-qce 33 41 - const: qcom,ipq4019-qce 34 42 - const: qcom,qce 35 43 ··· 54 46 maxItems: 1 55 47 56 48 clocks: 57 - items: 58 - - description: iface clocks register interface. 59 - - description: bus clocks data transfer interface. 60 - - description: core clocks rest of the crypto block. 49 + minItems: 1 50 + maxItems: 3 61 51 62 52 clock-names: 63 - items: 64 - - const: iface 65 - - const: bus 66 - - const: core 53 + minItems: 1 54 + maxItems: 3 67 55 68 56 iommus: 69 57 minItems: 1 ··· 93 89 enum: 94 90 - qcom,crypto-v5.1 95 91 - qcom,crypto-v5.4 96 - - qcom,ipq4019-qce 97 - 92 + - qcom,ipq6018-qce 93 + - qcom,ipq8074-qce 94 + - qcom,msm8996-qce 95 + - qcom,sdm845-qce 98 96 then: 97 + properties: 98 + clocks: 99 + maxItems: 3 100 + clock-names: 101 + items: 102 + - const: iface 103 + - const: bus 104 + - const: core 105 + required: 106 + - clocks 107 + - clock-names 108 + 109 + - if: 110 + properties: 111 + compatible: 112 + contains: 113 + enum: 114 + - qcom,qcm2290-qce 115 + - qcom,sm6115-qce 116 + then: 117 + properties: 118 + clocks: 119 + maxItems: 1 120 + clock-names: 121 + items: 122 + - const: core 99 123 required: 100 124 - clocks 101 125 - clock-names
+70
Documentation/devicetree/bindings/crypto/starfive,jh7110-crypto.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/crypto/starfive,jh7110-crypto.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: StarFive Cryptographic Module 8 + 9 + maintainers: 10 + - Jia Jie Ho <jiajie.ho@starfivetech.com> 11 + - William Qiu <william.qiu@starfivetech.com> 12 + 13 + properties: 14 + compatible: 15 + const: starfive,jh7110-crypto 16 + 17 + reg: 18 + maxItems: 1 19 + 20 + clocks: 21 + items: 22 + - description: Hardware reference clock 23 + - description: AHB reference clock 24 + 25 + clock-names: 26 + items: 27 + - const: hclk 28 + - const: ahb 29 + 30 + interrupts: 31 + maxItems: 1 32 + 33 + resets: 34 + maxItems: 1 35 + 36 + dmas: 37 + items: 38 + - description: TX DMA channel 39 + - description: RX DMA channel 40 + 41 + dma-names: 42 + items: 43 + - const: tx 44 + - const: rx 45 + 46 + required: 47 + - compatible 48 + - reg 49 + - clocks 50 + - clock-names 51 + - resets 52 + - dmas 53 + - dma-names 54 + 55 + additionalProperties: false 56 + 57 + examples: 58 + - | 59 + crypto: crypto@16000000 { 60 + compatible = "starfive,jh7110-crypto"; 61 + reg = <0x16000000 0x4000>; 62 + clocks = <&clk 15>, <&clk 16>; 63 + clock-names = "hclk", "ahb"; 64 + interrupts = <28>; 65 + resets = <&reset 3>; 66 + dmas = <&dma 1 2>, 67 + <&dma 0 2>; 68 + dma-names = "tx", "rx"; 69 + }; 70 + ...
+7
MAINTAINERS
··· 20265 20265 F: drivers/clk/starfive/clk-starfive-jh71* 20266 20266 F: include/dt-bindings/clock/starfive?jh71*.h 20267 20267 20268 + STARFIVE CRYPTO DRIVER 20269 + M: Jia Jie Ho <jiajie.ho@starfivetech.com> 20270 + M: William Qiu <william.qiu@starfivetech.com> 20271 + S: Supported 20272 + F: Documentation/devicetree/bindings/crypto/starfive* 20273 + F: drivers/crypto/starfive/ 20274 + 20268 20275 STARFIVE JH71X0 PINCTRL DRIVERS 20269 20276 M: Emil Renner Berthing <kernel@esmil.dk> 20270 20277 M: Jianlong Huang <jianlong.huang@starfivetech.com>
+5 -7
arch/arm/crypto/sha1_neon_glue.c
··· 26 26 27 27 #include "sha1.h" 28 28 29 - asmlinkage void sha1_transform_neon(void *state_h, const char *data, 30 - unsigned int rounds); 29 + asmlinkage void sha1_transform_neon(struct sha1_state *state_h, 30 + const u8 *data, int rounds); 31 31 32 32 static int sha1_neon_update(struct shash_desc *desc, const u8 *data, 33 33 unsigned int len) ··· 39 39 return sha1_update_arm(desc, data, len); 40 40 41 41 kernel_neon_begin(); 42 - sha1_base_do_update(desc, data, len, 43 - (sha1_block_fn *)sha1_transform_neon); 42 + sha1_base_do_update(desc, data, len, sha1_transform_neon); 44 43 kernel_neon_end(); 45 44 46 45 return 0; ··· 53 54 54 55 kernel_neon_begin(); 55 56 if (len) 56 - sha1_base_do_update(desc, data, len, 57 - (sha1_block_fn *)sha1_transform_neon); 58 - sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_transform_neon); 57 + sha1_base_do_update(desc, data, len, sha1_transform_neon); 58 + sha1_base_do_finalize(desc, sha1_transform_neon); 59 59 kernel_neon_end(); 60 60 61 61 return sha1_base_finish(desc, out);
+5 -7
arch/arm/crypto/sha256_neon_glue.c
··· 21 21 22 22 #include "sha256_glue.h" 23 23 24 - asmlinkage void sha256_block_data_order_neon(u32 *digest, const void *data, 25 - unsigned int num_blks); 24 + asmlinkage void sha256_block_data_order_neon(struct sha256_state *digest, 25 + const u8 *data, int num_blks); 26 26 27 27 static int crypto_sha256_neon_update(struct shash_desc *desc, const u8 *data, 28 28 unsigned int len) ··· 34 34 return crypto_sha256_arm_update(desc, data, len); 35 35 36 36 kernel_neon_begin(); 37 - sha256_base_do_update(desc, data, len, 38 - (sha256_block_fn *)sha256_block_data_order_neon); 37 + sha256_base_do_update(desc, data, len, sha256_block_data_order_neon); 39 38 kernel_neon_end(); 40 39 41 40 return 0; ··· 49 50 kernel_neon_begin(); 50 51 if (len) 51 52 sha256_base_do_update(desc, data, len, 52 - (sha256_block_fn *)sha256_block_data_order_neon); 53 - sha256_base_do_finalize(desc, 54 - (sha256_block_fn *)sha256_block_data_order_neon); 53 + sha256_block_data_order_neon); 54 + sha256_base_do_finalize(desc, sha256_block_data_order_neon); 55 55 kernel_neon_end(); 56 56 57 57 return sha256_base_finish(desc, out);
+5 -7
arch/arm/crypto/sha512-neon-glue.c
··· 20 20 MODULE_ALIAS_CRYPTO("sha384-neon"); 21 21 MODULE_ALIAS_CRYPTO("sha512-neon"); 22 22 23 - asmlinkage void sha512_block_data_order_neon(u64 *state, u8 const *src, 24 - int blocks); 23 + asmlinkage void sha512_block_data_order_neon(struct sha512_state *state, 24 + const u8 *src, int blocks); 25 25 26 26 static int sha512_neon_update(struct shash_desc *desc, const u8 *data, 27 27 unsigned int len) ··· 33 33 return sha512_arm_update(desc, data, len); 34 34 35 35 kernel_neon_begin(); 36 - sha512_base_do_update(desc, data, len, 37 - (sha512_block_fn *)sha512_block_data_order_neon); 36 + sha512_base_do_update(desc, data, len, sha512_block_data_order_neon); 38 37 kernel_neon_end(); 39 38 40 39 return 0; ··· 48 49 kernel_neon_begin(); 49 50 if (len) 50 51 sha512_base_do_update(desc, data, len, 51 - (sha512_block_fn *)sha512_block_data_order_neon); 52 - sha512_base_do_finalize(desc, 53 - (sha512_block_fn *)sha512_block_data_order_neon); 52 + sha512_block_data_order_neon); 53 + sha512_base_do_finalize(desc, sha512_block_data_order_neon); 54 54 kernel_neon_end(); 55 55 56 56 return sha512_base_finish(desc, out);
+2 -1
arch/arm64/crypto/sha256-glue.c
··· 12 12 #include <crypto/internal/simd.h> 13 13 #include <crypto/sha2.h> 14 14 #include <crypto/sha256_base.h> 15 - #include <linux/types.h> 15 + #include <linux/module.h> 16 16 #include <linux/string.h> 17 + #include <linux/types.h> 17 18 18 19 MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash for arm64"); 19 20 MODULE_AUTHOR("Andy Polyakov <appro@openssl.org>");
+49 -17
crypto/Kconfig
··· 71 71 config CRYPTO_AEAD2 72 72 tristate 73 73 select CRYPTO_ALGAPI2 74 - select CRYPTO_NULL2 75 - select CRYPTO_RNG2 74 + 75 + config CRYPTO_SIG 76 + tristate 77 + select CRYPTO_SIG2 78 + select CRYPTO_ALGAPI 79 + 80 + config CRYPTO_SIG2 81 + tristate 82 + select CRYPTO_ALGAPI2 76 83 77 84 config CRYPTO_SKCIPHER 78 85 tristate ··· 89 82 config CRYPTO_SKCIPHER2 90 83 tristate 91 84 select CRYPTO_ALGAPI2 92 - select CRYPTO_RNG2 93 85 94 86 config CRYPTO_HASH 95 87 tristate ··· 149 143 150 144 config CRYPTO_MANAGER2 151 145 def_tristate CRYPTO_MANAGER || (CRYPTO_MANAGER!=n && CRYPTO_ALGAPI=y) 152 - select CRYPTO_AEAD2 153 - select CRYPTO_HASH2 154 - select CRYPTO_SKCIPHER2 155 - select CRYPTO_AKCIPHER2 156 - select CRYPTO_KPP2 157 146 select CRYPTO_ACOMP2 147 + select CRYPTO_AEAD2 148 + select CRYPTO_AKCIPHER2 149 + select CRYPTO_SIG2 150 + select CRYPTO_HASH2 151 + select CRYPTO_KPP2 152 + select CRYPTO_RNG2 153 + select CRYPTO_SKCIPHER2 158 154 159 155 config CRYPTO_USER 160 156 tristate "Userspace cryptographic algorithm configuration" ··· 841 833 842 834 This is required for IPSec ESP (XFRM_ESP). 843 835 836 + config CRYPTO_GENIV 837 + tristate 838 + select CRYPTO_AEAD 839 + select CRYPTO_NULL 840 + select CRYPTO_MANAGER 841 + select CRYPTO_RNG_DEFAULT 842 + 844 843 config CRYPTO_SEQIV 845 844 tristate "Sequence Number IV Generator" 846 - select CRYPTO_AEAD 847 - select CRYPTO_SKCIPHER 848 - select CRYPTO_NULL 849 - select CRYPTO_RNG_DEFAULT 850 - select CRYPTO_MANAGER 845 + select CRYPTO_GENIV 851 846 help 852 847 Sequence Number IV generator 853 848 ··· 861 850 862 851 config CRYPTO_ECHAINIV 863 852 tristate "Encrypted Chain IV Generator" 864 - select CRYPTO_AEAD 865 - select CRYPTO_NULL 866 - select CRYPTO_RNG_DEFAULT 867 - select CRYPTO_MANAGER 853 + select CRYPTO_GENIV 868 854 help 869 855 Encrypted Chain IV generator 870 856 ··· 1285 1277 config CRYPTO_JITTERENTROPY 1286 1278 tristate "CPU Jitter Non-Deterministic RNG (Random Number Generator)" 1287 1279 select CRYPTO_RNG 1280 + select CRYPTO_SHA3 1288 1281 help 1289 1282 CPU Jitter RNG (Random Number Generator) from the Jitterentropy library 1290 1283 ··· 1295 1286 This RNG does not perform any cryptographic whitening of the generated 1296 1287 1297 1288 See https://www.chronox.de/jent.html 1289 + 1290 + config CRYPTO_JITTERENTROPY_TESTINTERFACE 1291 + bool "CPU Jitter RNG Test Interface" 1292 + depends on CRYPTO_JITTERENTROPY 1293 + help 1294 + The test interface allows a privileged process to capture 1295 + the raw unconditioned high resolution time stamp noise that 1296 + is collected by the Jitter RNG for statistical analysis. As 1297 + this data is used at the same time to generate random bits, 1298 + the Jitter RNG operates in an insecure mode as long as the 1299 + recording is enabled. This interface therefore is only 1300 + intended for testing purposes and is not suitable for 1301 + production systems. 1302 + 1303 + The raw noise data can be obtained using the jent_raw_hires 1304 + debugfs file. Using the option 1305 + jitterentropy_testing.boot_raw_hires_test=1 the raw noise of 1306 + the first 1000 entropy events since boot can be sampled. 1307 + 1308 + If unsure, select N. 1298 1309 1299 1310 config CRYPTO_KDF800108_CTR 1300 1311 tristate ··· 1400 1371 depends on CRYPTO_USER 1401 1372 help 1402 1373 Enable the gathering of crypto stats. 1374 + 1375 + Enabling this option reduces the performance of the crypto API. It 1376 + should only be enabled when there is actually a use case for it. 1403 1377 1404 1378 This collects data sizes, numbers of requests, and numbers 1405 1379 of errors processed by:
+3 -1
crypto/Makefile
··· 14 14 obj-$(CONFIG_CRYPTO_ALGAPI2) += crypto_algapi.o 15 15 16 16 obj-$(CONFIG_CRYPTO_AEAD2) += aead.o 17 - obj-$(CONFIG_CRYPTO_AEAD2) += geniv.o 17 + obj-$(CONFIG_CRYPTO_GENIV) += geniv.o 18 18 19 19 obj-$(CONFIG_CRYPTO_SKCIPHER2) += skcipher.o 20 20 obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o ··· 25 25 obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o 26 26 27 27 obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o 28 + obj-$(CONFIG_CRYPTO_SIG2) += sig.o 28 29 obj-$(CONFIG_CRYPTO_KPP2) += kpp.o 29 30 30 31 dh_generic-y := dh.o ··· 172 171 KASAN_SANITIZE_jitterentropy.o = n 173 172 UBSAN_SANITIZE_jitterentropy.o = n 174 173 jitterentropy_rng-y := jitterentropy.o jitterentropy-kcapi.o 174 + obj-$(CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE) += jitterentropy-testing.o 175 175 obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o 176 176 obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o 177 177 obj-$(CONFIG_CRYPTO_POLYVAL) += polyval-generic.o
+17
crypto/aegis-neon.h
··· 1 + // SPDX-License-Identifier: GPL-2.0-or-later 2 + 3 + #ifndef _AEGIS_NEON_H 4 + #define _AEGIS_NEON_H 5 + 6 + void crypto_aegis128_init_neon(void *state, const void *key, const void *iv); 7 + void crypto_aegis128_update_neon(void *state, const void *msg); 8 + void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src, 9 + unsigned int size); 10 + void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src, 11 + unsigned int size); 12 + int crypto_aegis128_final_neon(void *state, void *tag_xor, 13 + unsigned int assoclen, 14 + unsigned int cryptlen, 15 + unsigned int authsize); 16 + 17 + #endif
+1
crypto/aegis128-neon-inner.c
··· 16 16 #define AEGIS_BLOCK_SIZE 16 17 17 18 18 #include <stddef.h> 19 + #include "aegis-neon.h" 19 20 20 21 extern int aegis128_have_aes_insn; 21 22
+1 -11
crypto/aegis128-neon.c
··· 7 7 #include <asm/neon.h> 8 8 9 9 #include "aegis.h" 10 - 11 - void crypto_aegis128_init_neon(void *state, const void *key, const void *iv); 12 - void crypto_aegis128_update_neon(void *state, const void *msg); 13 - void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src, 14 - unsigned int size); 15 - void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src, 16 - unsigned int size); 17 - int crypto_aegis128_final_neon(void *state, void *tag_xor, 18 - unsigned int assoclen, 19 - unsigned int cryptlen, 20 - unsigned int authsize); 10 + #include "aegis-neon.h" 21 11 22 12 int aegis128_have_aes_insn __ro_after_init; 23 13
+3 -6
crypto/ahash.c
··· 31 31 void *ubuf[] CRYPTO_MINALIGN_ATTR; 32 32 }; 33 33 34 - static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash) 35 - { 36 - return container_of(crypto_hash_alg_common(hash), struct ahash_alg, 37 - halg); 38 - } 39 - 40 34 static int hash_walk_next(struct crypto_hash_walk *walk) 41 35 { 42 36 unsigned int alignmask = walk->alignmask; ··· 426 432 427 433 hash->setkey = ahash_nosetkey; 428 434 435 + crypto_ahash_set_statesize(hash, alg->halg.statesize); 436 + 429 437 if (tfm->__crt_alg->cra_type != &crypto_ahash_type) 430 438 return crypto_init_shash_ops_async(tfm); 431 439 ··· 569 573 nhash->import = hash->import; 570 574 nhash->setkey = hash->setkey; 571 575 nhash->reqsize = hash->reqsize; 576 + nhash->statesize = hash->statesize; 572 577 573 578 if (tfm->__crt_alg->cra_type != &crypto_ahash_type) 574 579 return crypto_clone_shash_ops_async(nhash, hash);
+123 -1
crypto/akcipher.c
··· 10 10 #include <linux/errno.h> 11 11 #include <linux/kernel.h> 12 12 #include <linux/module.h> 13 + #include <linux/scatterlist.h> 13 14 #include <linux/seq_file.h> 14 15 #include <linux/slab.h> 15 16 #include <linux/string.h> 16 17 #include <net/netlink.h> 17 18 18 19 #include "internal.h" 20 + 21 + #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e 19 22 20 23 static int __maybe_unused crypto_akcipher_report( 21 24 struct sk_buff *skb, struct crypto_alg *alg) ··· 108 105 .report_stat = crypto_akcipher_report_stat, 109 106 #endif 110 107 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 111 - .maskset = CRYPTO_ALG_TYPE_MASK, 108 + .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, 112 109 .type = CRYPTO_ALG_TYPE_AKCIPHER, 113 110 .tfmsize = offsetof(struct crypto_akcipher, base), 114 111 }; ··· 188 185 return crypto_register_instance(tmpl, akcipher_crypto_instance(inst)); 189 186 } 190 187 EXPORT_SYMBOL_GPL(akcipher_register_instance); 188 + 189 + int crypto_akcipher_sync_prep(struct crypto_akcipher_sync_data *data) 190 + { 191 + unsigned int reqsize = crypto_akcipher_reqsize(data->tfm); 192 + struct akcipher_request *req; 193 + struct scatterlist *sg; 194 + unsigned int mlen; 195 + unsigned int len; 196 + u8 *buf; 197 + 198 + if (data->dst) 199 + mlen = max(data->slen, data->dlen); 200 + else 201 + mlen = data->slen + data->dlen; 202 + 203 + len = sizeof(*req) + reqsize + mlen; 204 + if (len < mlen) 205 + return -EOVERFLOW; 206 + 207 + req = kzalloc(len, GFP_KERNEL); 208 + if (!req) 209 + return -ENOMEM; 210 + 211 + data->req = req; 212 + akcipher_request_set_tfm(req, data->tfm); 213 + 214 + buf = (u8 *)(req + 1) + reqsize; 215 + data->buf = buf; 216 + memcpy(buf, data->src, data->slen); 217 + 218 + sg = &data->sg; 219 + sg_init_one(sg, buf, mlen); 220 + akcipher_request_set_crypt(req, sg, data->dst ? sg : NULL, 221 + data->slen, data->dlen); 222 + 223 + crypto_init_wait(&data->cwait); 224 + akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, 225 + crypto_req_done, &data->cwait); 226 + 227 + return 0; 228 + } 229 + EXPORT_SYMBOL_GPL(crypto_akcipher_sync_prep); 230 + 231 + int crypto_akcipher_sync_post(struct crypto_akcipher_sync_data *data, int err) 232 + { 233 + err = crypto_wait_req(err, &data->cwait); 234 + if (data->dst) 235 + memcpy(data->dst, data->buf, data->dlen); 236 + data->dlen = data->req->dst_len; 237 + kfree_sensitive(data->req); 238 + return err; 239 + } 240 + EXPORT_SYMBOL_GPL(crypto_akcipher_sync_post); 241 + 242 + int crypto_akcipher_sync_encrypt(struct crypto_akcipher *tfm, 243 + const void *src, unsigned int slen, 244 + void *dst, unsigned int dlen) 245 + { 246 + struct crypto_akcipher_sync_data data = { 247 + .tfm = tfm, 248 + .src = src, 249 + .dst = dst, 250 + .slen = slen, 251 + .dlen = dlen, 252 + }; 253 + 254 + return crypto_akcipher_sync_prep(&data) ?: 255 + crypto_akcipher_sync_post(&data, 256 + crypto_akcipher_encrypt(data.req)); 257 + } 258 + EXPORT_SYMBOL_GPL(crypto_akcipher_sync_encrypt); 259 + 260 + int crypto_akcipher_sync_decrypt(struct crypto_akcipher *tfm, 261 + const void *src, unsigned int slen, 262 + void *dst, unsigned int dlen) 263 + { 264 + struct crypto_akcipher_sync_data data = { 265 + .tfm = tfm, 266 + .src = src, 267 + .dst = dst, 268 + .slen = slen, 269 + .dlen = dlen, 270 + }; 271 + 272 + return crypto_akcipher_sync_prep(&data) ?: 273 + crypto_akcipher_sync_post(&data, 274 + crypto_akcipher_decrypt(data.req)) ?: 275 + data.dlen; 276 + } 277 + EXPORT_SYMBOL_GPL(crypto_akcipher_sync_decrypt); 278 + 279 + static void crypto_exit_akcipher_ops_sig(struct crypto_tfm *tfm) 280 + { 281 + struct crypto_akcipher **ctx = crypto_tfm_ctx(tfm); 282 + 283 + crypto_free_akcipher(*ctx); 284 + } 285 + 286 + int crypto_init_akcipher_ops_sig(struct crypto_tfm *tfm) 287 + { 288 + struct crypto_akcipher **ctx = crypto_tfm_ctx(tfm); 289 + struct crypto_alg *calg = tfm->__crt_alg; 290 + struct crypto_akcipher *akcipher; 291 + 292 + if (!crypto_mod_get(calg)) 293 + return -EAGAIN; 294 + 295 + akcipher = crypto_create_tfm(calg, &crypto_akcipher_type); 296 + if (IS_ERR(akcipher)) { 297 + crypto_mod_put(calg); 298 + return PTR_ERR(akcipher); 299 + } 300 + 301 + *ctx = akcipher; 302 + tfm->exit = crypto_exit_akcipher_ops_sig; 303 + 304 + return 0; 305 + } 306 + EXPORT_SYMBOL_GPL(crypto_init_akcipher_ops_sig); 191 307 192 308 MODULE_LICENSE("GPL"); 193 309 MODULE_DESCRIPTION("Generic public key cipher type");
+10 -17
crypto/api.c
··· 345 345 } 346 346 EXPORT_SYMBOL_GPL(crypto_alg_mod_lookup); 347 347 348 - static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask) 349 - { 350 - const struct crypto_type *type_obj = tfm->__crt_alg->cra_type; 351 - 352 - if (type_obj) 353 - return type_obj->init(tfm, type, mask); 354 - return 0; 355 - } 356 - 357 348 static void crypto_exit_ops(struct crypto_tfm *tfm) 358 349 { 359 350 const struct crypto_type *type = tfm->__crt_alg->cra_type; ··· 386 395 } 387 396 EXPORT_SYMBOL_GPL(crypto_shoot_alg); 388 397 389 - struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, 390 - u32 mask) 398 + struct crypto_tfm *__crypto_alloc_tfmgfp(struct crypto_alg *alg, u32 type, 399 + u32 mask, gfp_t gfp) 391 400 { 392 401 struct crypto_tfm *tfm = NULL; 393 402 unsigned int tfm_size; 394 403 int err = -ENOMEM; 395 404 396 405 tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, type, mask); 397 - tfm = kzalloc(tfm_size, GFP_KERNEL); 406 + tfm = kzalloc(tfm_size, gfp); 398 407 if (tfm == NULL) 399 408 goto out_err; 400 409 401 410 tfm->__crt_alg = alg; 402 411 refcount_set(&tfm->refcnt, 1); 403 - 404 - err = crypto_init_ops(tfm, type, mask); 405 - if (err) 406 - goto out_free_tfm; 407 412 408 413 if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm))) 409 414 goto cra_init_failed; ··· 408 421 409 422 cra_init_failed: 410 423 crypto_exit_ops(tfm); 411 - out_free_tfm: 412 424 if (err == -EAGAIN) 413 425 crypto_shoot_alg(alg); 414 426 kfree(tfm); ··· 415 429 tfm = ERR_PTR(err); 416 430 out: 417 431 return tfm; 432 + } 433 + EXPORT_SYMBOL_GPL(__crypto_alloc_tfmgfp); 434 + 435 + struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, 436 + u32 mask) 437 + { 438 + return __crypto_alloc_tfmgfp(alg, type, mask, GFP_KERNEL); 418 439 } 419 440 EXPORT_SYMBOL_GPL(__crypto_alloc_tfm); 420 441
+146 -177
crypto/asymmetric_keys/public_key.c
··· 8 8 */ 9 9 10 10 #define pr_fmt(fmt) "PKEY: "fmt 11 - #include <linux/module.h> 12 - #include <linux/export.h> 13 - #include <linux/kernel.h> 14 - #include <linux/slab.h> 15 - #include <linux/seq_file.h> 16 - #include <linux/scatterlist.h> 17 - #include <linux/asn1.h> 18 - #include <keys/asymmetric-subtype.h> 19 - #include <crypto/public_key.h> 20 11 #include <crypto/akcipher.h> 21 - #include <crypto/sm2.h> 22 - #include <crypto/sm3_base.h> 12 + #include <crypto/public_key.h> 13 + #include <crypto/sig.h> 14 + #include <keys/asymmetric-subtype.h> 15 + #include <linux/asn1.h> 16 + #include <linux/err.h> 17 + #include <linux/kernel.h> 18 + #include <linux/module.h> 19 + #include <linux/seq_file.h> 20 + #include <linux/slab.h> 21 + #include <linux/string.h> 23 22 24 23 MODULE_DESCRIPTION("In-software asymmetric public-key subtype"); 25 24 MODULE_AUTHOR("Red Hat, Inc."); ··· 66 67 static int 67 68 software_key_determine_akcipher(const struct public_key *pkey, 68 69 const char *encoding, const char *hash_algo, 69 - char alg_name[CRYPTO_MAX_ALG_NAME]) 70 + char alg_name[CRYPTO_MAX_ALG_NAME], bool *sig, 71 + enum kernel_pkey_operation op) 70 72 { 71 73 int n; 74 + 75 + *sig = true; 72 76 73 77 if (!encoding) 74 78 return -EINVAL; ··· 81 79 * RSA signatures usually use EMSA-PKCS1-1_5 [RFC3447 sec 8.2]. 82 80 */ 83 81 if (strcmp(encoding, "pkcs1") == 0) { 84 - if (!hash_algo) 82 + if (!hash_algo) { 83 + *sig = false; 85 84 n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME, 86 85 "pkcs1pad(%s)", 87 86 pkey->pkey_algo); 88 - else 87 + } else { 88 + *sig = op == kernel_pkey_sign || 89 + op == kernel_pkey_verify; 89 90 n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME, 90 91 "pkcs1pad(%s,%s)", 91 92 pkey->pkey_algo, hash_algo); 93 + } 92 94 return n >= CRYPTO_MAX_ALG_NAME ? -EINVAL : 0; 93 95 } 94 96 if (strcmp(encoding, "raw") != 0) ··· 103 97 */ 104 98 if (hash_algo) 105 99 return -EINVAL; 100 + *sig = false; 106 101 } else if (strncmp(pkey->pkey_algo, "ecdsa", 5) == 0) { 107 102 if (strcmp(encoding, "x962") != 0) 108 103 return -EINVAL; ··· 161 154 struct crypto_akcipher *tfm; 162 155 struct public_key *pkey = params->key->payload.data[asym_crypto]; 163 156 char alg_name[CRYPTO_MAX_ALG_NAME]; 157 + struct crypto_sig *sig; 164 158 u8 *key, *ptr; 165 159 int ret, len; 160 + bool issig; 166 161 167 162 ret = software_key_determine_akcipher(pkey, params->encoding, 168 - params->hash_algo, alg_name); 163 + params->hash_algo, alg_name, 164 + &issig, kernel_pkey_sign); 169 165 if (ret < 0) 170 166 return ret; 171 167 172 - tfm = crypto_alloc_akcipher(alg_name, 0, 0); 173 - if (IS_ERR(tfm)) 174 - return PTR_ERR(tfm); 175 - 176 - ret = -ENOMEM; 177 168 key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen, 178 169 GFP_KERNEL); 179 170 if (!key) 180 - goto error_free_tfm; 171 + return -ENOMEM; 172 + 181 173 memcpy(key, pkey->key, pkey->keylen); 182 174 ptr = key + pkey->keylen; 183 175 ptr = pkey_pack_u32(ptr, pkey->algo); 184 176 ptr = pkey_pack_u32(ptr, pkey->paramlen); 185 177 memcpy(ptr, pkey->params, pkey->paramlen); 186 178 187 - if (pkey->key_is_private) 188 - ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen); 189 - else 190 - ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen); 191 - if (ret < 0) 192 - goto error_free_key; 179 + if (issig) { 180 + sig = crypto_alloc_sig(alg_name, 0, 0); 181 + if (IS_ERR(sig)) 182 + goto error_free_key; 193 183 194 - len = crypto_akcipher_maxsize(tfm); 184 + if (pkey->key_is_private) 185 + ret = crypto_sig_set_privkey(sig, key, pkey->keylen); 186 + else 187 + ret = crypto_sig_set_pubkey(sig, key, pkey->keylen); 188 + if (ret < 0) 189 + goto error_free_tfm; 190 + 191 + len = crypto_sig_maxsize(sig); 192 + 193 + info->supported_ops = KEYCTL_SUPPORTS_VERIFY; 194 + if (pkey->key_is_private) 195 + info->supported_ops |= KEYCTL_SUPPORTS_SIGN; 196 + 197 + if (strcmp(params->encoding, "pkcs1") == 0) { 198 + info->supported_ops |= KEYCTL_SUPPORTS_ENCRYPT; 199 + if (pkey->key_is_private) 200 + info->supported_ops |= KEYCTL_SUPPORTS_DECRYPT; 201 + } 202 + } else { 203 + tfm = crypto_alloc_akcipher(alg_name, 0, 0); 204 + if (IS_ERR(tfm)) 205 + goto error_free_key; 206 + 207 + if (pkey->key_is_private) 208 + ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen); 209 + else 210 + ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen); 211 + if (ret < 0) 212 + goto error_free_tfm; 213 + 214 + len = crypto_akcipher_maxsize(tfm); 215 + 216 + info->supported_ops = KEYCTL_SUPPORTS_ENCRYPT; 217 + if (pkey->key_is_private) 218 + info->supported_ops |= KEYCTL_SUPPORTS_DECRYPT; 219 + } 220 + 195 221 info->key_size = len * 8; 196 222 197 223 if (strncmp(pkey->pkey_algo, "ecdsa", 5) == 0) { ··· 250 210 251 211 info->max_enc_size = len; 252 212 info->max_dec_size = len; 253 - info->supported_ops = (KEYCTL_SUPPORTS_ENCRYPT | 254 - KEYCTL_SUPPORTS_VERIFY); 255 - if (pkey->key_is_private) 256 - info->supported_ops |= (KEYCTL_SUPPORTS_DECRYPT | 257 - KEYCTL_SUPPORTS_SIGN); 213 + 258 214 ret = 0; 259 215 216 + error_free_tfm: 217 + if (issig) 218 + crypto_free_sig(sig); 219 + else 220 + crypto_free_akcipher(tfm); 260 221 error_free_key: 261 222 kfree(key); 262 - error_free_tfm: 263 - crypto_free_akcipher(tfm); 264 223 pr_devel("<==%s() = %d\n", __func__, ret); 265 224 return ret; 266 225 } ··· 271 232 const void *in, void *out) 272 233 { 273 234 const struct public_key *pkey = params->key->payload.data[asym_crypto]; 274 - struct akcipher_request *req; 275 - struct crypto_akcipher *tfm; 276 - struct crypto_wait cwait; 277 - struct scatterlist in_sg, out_sg; 278 235 char alg_name[CRYPTO_MAX_ALG_NAME]; 236 + struct crypto_akcipher *tfm; 237 + struct crypto_sig *sig; 279 238 char *key, *ptr; 239 + bool issig; 240 + int ksz; 280 241 int ret; 281 242 282 243 pr_devel("==>%s()\n", __func__); 283 244 284 245 ret = software_key_determine_akcipher(pkey, params->encoding, 285 - params->hash_algo, alg_name); 246 + params->hash_algo, alg_name, 247 + &issig, params->op); 286 248 if (ret < 0) 287 249 return ret; 288 - 289 - tfm = crypto_alloc_akcipher(alg_name, 0, 0); 290 - if (IS_ERR(tfm)) 291 - return PTR_ERR(tfm); 292 - 293 - ret = -ENOMEM; 294 - req = akcipher_request_alloc(tfm, GFP_KERNEL); 295 - if (!req) 296 - goto error_free_tfm; 297 250 298 251 key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen, 299 252 GFP_KERNEL); 300 253 if (!key) 301 - goto error_free_req; 254 + return -ENOMEM; 302 255 303 256 memcpy(key, pkey->key, pkey->keylen); 304 257 ptr = key + pkey->keylen; ··· 298 267 ptr = pkey_pack_u32(ptr, pkey->paramlen); 299 268 memcpy(ptr, pkey->params, pkey->paramlen); 300 269 301 - if (pkey->key_is_private) 302 - ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen); 303 - else 304 - ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen); 305 - if (ret) 306 - goto error_free_key; 270 + if (issig) { 271 + sig = crypto_alloc_sig(alg_name, 0, 0); 272 + if (IS_ERR(sig)) 273 + goto error_free_key; 307 274 308 - sg_init_one(&in_sg, in, params->in_len); 309 - sg_init_one(&out_sg, out, params->out_len); 310 - akcipher_request_set_crypt(req, &in_sg, &out_sg, params->in_len, 311 - params->out_len); 312 - crypto_init_wait(&cwait); 313 - akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | 314 - CRYPTO_TFM_REQ_MAY_SLEEP, 315 - crypto_req_done, &cwait); 275 + if (pkey->key_is_private) 276 + ret = crypto_sig_set_privkey(sig, key, pkey->keylen); 277 + else 278 + ret = crypto_sig_set_pubkey(sig, key, pkey->keylen); 279 + if (ret) 280 + goto error_free_tfm; 281 + 282 + ksz = crypto_sig_maxsize(sig); 283 + } else { 284 + tfm = crypto_alloc_akcipher(alg_name, 0, 0); 285 + if (IS_ERR(tfm)) 286 + goto error_free_key; 287 + 288 + if (pkey->key_is_private) 289 + ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen); 290 + else 291 + ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen); 292 + if (ret) 293 + goto error_free_tfm; 294 + 295 + ksz = crypto_akcipher_maxsize(tfm); 296 + } 297 + 298 + ret = -EINVAL; 316 299 317 300 /* Perform the encryption calculation. */ 318 301 switch (params->op) { 319 302 case kernel_pkey_encrypt: 320 - ret = crypto_akcipher_encrypt(req); 303 + if (issig) 304 + break; 305 + ret = crypto_akcipher_sync_encrypt(tfm, in, params->in_len, 306 + out, params->out_len); 321 307 break; 322 308 case kernel_pkey_decrypt: 323 - ret = crypto_akcipher_decrypt(req); 309 + if (issig) 310 + break; 311 + ret = crypto_akcipher_sync_decrypt(tfm, in, params->in_len, 312 + out, params->out_len); 324 313 break; 325 314 case kernel_pkey_sign: 326 - ret = crypto_akcipher_sign(req); 315 + if (!issig) 316 + break; 317 + ret = crypto_sig_sign(sig, in, params->in_len, 318 + out, params->out_len); 327 319 break; 328 320 default: 329 321 BUG(); 330 322 } 331 323 332 - ret = crypto_wait_req(ret, &cwait); 333 324 if (ret == 0) 334 - ret = req->dst_len; 325 + ret = ksz; 335 326 327 + error_free_tfm: 328 + if (issig) 329 + crypto_free_sig(sig); 330 + else 331 + crypto_free_akcipher(tfm); 336 332 error_free_key: 337 333 kfree(key); 338 - error_free_req: 339 - akcipher_request_free(req); 340 - error_free_tfm: 341 - crypto_free_akcipher(tfm); 342 334 pr_devel("<==%s() = %d\n", __func__, ret); 343 335 return ret; 344 336 } 345 - 346 - #if IS_REACHABLE(CONFIG_CRYPTO_SM2) 347 - static int cert_sig_digest_update(const struct public_key_signature *sig, 348 - struct crypto_akcipher *tfm_pkey) 349 - { 350 - struct crypto_shash *tfm; 351 - struct shash_desc *desc; 352 - size_t desc_size; 353 - unsigned char dgst[SM3_DIGEST_SIZE]; 354 - int ret; 355 - 356 - BUG_ON(!sig->data); 357 - 358 - /* SM2 signatures always use the SM3 hash algorithm */ 359 - if (!sig->hash_algo || strcmp(sig->hash_algo, "sm3") != 0) 360 - return -EINVAL; 361 - 362 - ret = sm2_compute_z_digest(tfm_pkey, SM2_DEFAULT_USERID, 363 - SM2_DEFAULT_USERID_LEN, dgst); 364 - if (ret) 365 - return ret; 366 - 367 - tfm = crypto_alloc_shash(sig->hash_algo, 0, 0); 368 - if (IS_ERR(tfm)) 369 - return PTR_ERR(tfm); 370 - 371 - desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); 372 - desc = kzalloc(desc_size, GFP_KERNEL); 373 - if (!desc) { 374 - ret = -ENOMEM; 375 - goto error_free_tfm; 376 - } 377 - 378 - desc->tfm = tfm; 379 - 380 - ret = crypto_shash_init(desc); 381 - if (ret < 0) 382 - goto error_free_desc; 383 - 384 - ret = crypto_shash_update(desc, dgst, SM3_DIGEST_SIZE); 385 - if (ret < 0) 386 - goto error_free_desc; 387 - 388 - ret = crypto_shash_finup(desc, sig->data, sig->data_size, sig->digest); 389 - 390 - error_free_desc: 391 - kfree(desc); 392 - error_free_tfm: 393 - crypto_free_shash(tfm); 394 - return ret; 395 - } 396 - #else 397 - static inline int cert_sig_digest_update( 398 - const struct public_key_signature *sig, 399 - struct crypto_akcipher *tfm_pkey) 400 - { 401 - return -ENOTSUPP; 402 - } 403 - #endif /* ! IS_REACHABLE(CONFIG_CRYPTO_SM2) */ 404 337 405 338 /* 406 339 * Verify a signature using a public key. ··· 372 377 int public_key_verify_signature(const struct public_key *pkey, 373 378 const struct public_key_signature *sig) 374 379 { 375 - struct crypto_wait cwait; 376 - struct crypto_akcipher *tfm; 377 - struct akcipher_request *req; 378 - struct scatterlist src_sg; 379 380 char alg_name[CRYPTO_MAX_ALG_NAME]; 380 - char *buf, *ptr; 381 - size_t buf_len; 381 + struct crypto_sig *tfm; 382 + char *key, *ptr; 383 + bool issig; 382 384 int ret; 383 385 384 386 pr_devel("==>%s()\n", __func__); ··· 400 408 } 401 409 402 410 ret = software_key_determine_akcipher(pkey, sig->encoding, 403 - sig->hash_algo, alg_name); 411 + sig->hash_algo, alg_name, 412 + &issig, kernel_pkey_verify); 404 413 if (ret < 0) 405 414 return ret; 406 415 407 - tfm = crypto_alloc_akcipher(alg_name, 0, 0); 416 + tfm = crypto_alloc_sig(alg_name, 0, 0); 408 417 if (IS_ERR(tfm)) 409 418 return PTR_ERR(tfm); 410 419 411 - ret = -ENOMEM; 412 - req = akcipher_request_alloc(tfm, GFP_KERNEL); 413 - if (!req) 420 + key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen, 421 + GFP_KERNEL); 422 + if (!key) 414 423 goto error_free_tfm; 415 424 416 - buf_len = max_t(size_t, pkey->keylen + sizeof(u32) * 2 + pkey->paramlen, 417 - sig->s_size + sig->digest_size); 418 - 419 - buf = kmalloc(buf_len, GFP_KERNEL); 420 - if (!buf) 421 - goto error_free_req; 422 - 423 - memcpy(buf, pkey->key, pkey->keylen); 424 - ptr = buf + pkey->keylen; 425 + memcpy(key, pkey->key, pkey->keylen); 426 + ptr = key + pkey->keylen; 425 427 ptr = pkey_pack_u32(ptr, pkey->algo); 426 428 ptr = pkey_pack_u32(ptr, pkey->paramlen); 427 429 memcpy(ptr, pkey->params, pkey->paramlen); 428 430 429 431 if (pkey->key_is_private) 430 - ret = crypto_akcipher_set_priv_key(tfm, buf, pkey->keylen); 432 + ret = crypto_sig_set_privkey(tfm, key, pkey->keylen); 431 433 else 432 - ret = crypto_akcipher_set_pub_key(tfm, buf, pkey->keylen); 434 + ret = crypto_sig_set_pubkey(tfm, key, pkey->keylen); 433 435 if (ret) 434 - goto error_free_buf; 436 + goto error_free_key; 435 437 436 - if (strcmp(pkey->pkey_algo, "sm2") == 0 && sig->data_size) { 437 - ret = cert_sig_digest_update(sig, tfm); 438 - if (ret) 439 - goto error_free_buf; 440 - } 438 + ret = crypto_sig_verify(tfm, sig->s, sig->s_size, 439 + sig->digest, sig->digest_size); 441 440 442 - memcpy(buf, sig->s, sig->s_size); 443 - memcpy(buf + sig->s_size, sig->digest, sig->digest_size); 444 - 445 - sg_init_one(&src_sg, buf, sig->s_size + sig->digest_size); 446 - akcipher_request_set_crypt(req, &src_sg, NULL, sig->s_size, 447 - sig->digest_size); 448 - crypto_init_wait(&cwait); 449 - akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | 450 - CRYPTO_TFM_REQ_MAY_SLEEP, 451 - crypto_req_done, &cwait); 452 - ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait); 453 - 454 - error_free_buf: 455 - kfree(buf); 456 - error_free_req: 457 - akcipher_request_free(req); 441 + error_free_key: 442 + kfree(key); 458 443 error_free_tfm: 459 - crypto_free_akcipher(tfm); 444 + crypto_free_sig(tfm); 460 445 pr_devel("<==%s() = %d\n", __func__, ret); 461 446 if (WARN_ON_ONCE(ret > 0)) 462 447 ret = -EINVAL;
+21 -8
crypto/asymmetric_keys/x509_public_key.c
··· 6 6 */ 7 7 8 8 #define pr_fmt(fmt) "X.509: "fmt 9 + #include <crypto/hash.h> 10 + #include <crypto/sm2.h> 11 + #include <keys/asymmetric-parser.h> 12 + #include <keys/asymmetric-subtype.h> 13 + #include <keys/system_keyring.h> 9 14 #include <linux/module.h> 10 15 #include <linux/kernel.h> 11 16 #include <linux/slab.h> 12 - #include <keys/asymmetric-subtype.h> 13 - #include <keys/asymmetric-parser.h> 14 - #include <keys/system_keyring.h> 15 - #include <crypto/hash.h> 17 + #include <linux/string.h> 16 18 #include "asymmetric_keys.h" 17 19 #include "x509_parser.h" 18 20 ··· 31 29 int ret; 32 30 33 31 pr_devel("==>%s()\n", __func__); 34 - 35 - sig->data = cert->tbs; 36 - sig->data_size = cert->tbs_size; 37 32 38 33 sig->s = kmemdup(cert->raw_sig, cert->raw_sig_size, GFP_KERNEL); 39 34 if (!sig->s) ··· 64 65 65 66 desc->tfm = tfm; 66 67 67 - ret = crypto_shash_digest(desc, cert->tbs, cert->tbs_size, sig->digest); 68 + if (strcmp(cert->pub->pkey_algo, "sm2") == 0) { 69 + ret = strcmp(sig->hash_algo, "sm3") != 0 ? -EINVAL : 70 + crypto_shash_init(desc) ?: 71 + sm2_compute_z_digest(desc, cert->pub->key, 72 + cert->pub->keylen, sig->digest) ?: 73 + crypto_shash_init(desc) ?: 74 + crypto_shash_update(desc, sig->digest, 75 + sig->digest_size) ?: 76 + crypto_shash_finup(desc, cert->tbs, cert->tbs_size, 77 + sig->digest); 78 + } else { 79 + ret = crypto_shash_digest(desc, cert->tbs, cert->tbs_size, 80 + sig->digest); 81 + } 82 + 68 83 if (ret < 0) 69 84 goto error_2; 70 85
+28
crypto/cipher.c
··· 90 90 cipher_crypt_one(tfm, dst, src, false); 91 91 } 92 92 EXPORT_SYMBOL_NS_GPL(crypto_cipher_decrypt_one, CRYPTO_INTERNAL); 93 + 94 + struct crypto_cipher *crypto_clone_cipher(struct crypto_cipher *cipher) 95 + { 96 + struct crypto_tfm *tfm = crypto_cipher_tfm(cipher); 97 + struct crypto_alg *alg = tfm->__crt_alg; 98 + struct crypto_cipher *ncipher; 99 + struct crypto_tfm *ntfm; 100 + 101 + if (alg->cra_init) 102 + return ERR_PTR(-ENOSYS); 103 + 104 + if (unlikely(!crypto_mod_get(alg))) 105 + return ERR_PTR(-ESTALE); 106 + 107 + ntfm = __crypto_alloc_tfmgfp(alg, CRYPTO_ALG_TYPE_CIPHER, 108 + CRYPTO_ALG_TYPE_MASK, GFP_ATOMIC); 109 + if (IS_ERR(ntfm)) { 110 + crypto_mod_put(alg); 111 + return ERR_CAST(ntfm); 112 + } 113 + 114 + ntfm->crt_flags = tfm->crt_flags; 115 + 116 + ncipher = __crypto_cipher_cast(ntfm); 117 + 118 + return ncipher; 119 + } 120 + EXPORT_SYMBOL_GPL(crypto_clone_cipher);
+26 -10
crypto/cmac.c
··· 198 198 return 0; 199 199 } 200 200 201 - static int cmac_init_tfm(struct crypto_tfm *tfm) 201 + static int cmac_init_tfm(struct crypto_shash *tfm) 202 202 { 203 + struct shash_instance *inst = shash_alg_instance(tfm); 204 + struct cmac_tfm_ctx *ctx = crypto_shash_ctx(tfm); 205 + struct crypto_cipher_spawn *spawn; 203 206 struct crypto_cipher *cipher; 204 - struct crypto_instance *inst = (void *)tfm->__crt_alg; 205 - struct crypto_cipher_spawn *spawn = crypto_instance_ctx(inst); 206 - struct cmac_tfm_ctx *ctx = crypto_tfm_ctx(tfm); 207 207 208 + spawn = shash_instance_ctx(inst); 208 209 cipher = crypto_spawn_cipher(spawn); 209 210 if (IS_ERR(cipher)) 210 211 return PTR_ERR(cipher); ··· 213 212 ctx->child = cipher; 214 213 215 214 return 0; 216 - }; 215 + } 217 216 218 - static void cmac_exit_tfm(struct crypto_tfm *tfm) 217 + static int cmac_clone_tfm(struct crypto_shash *tfm, struct crypto_shash *otfm) 219 218 { 220 - struct cmac_tfm_ctx *ctx = crypto_tfm_ctx(tfm); 219 + struct cmac_tfm_ctx *octx = crypto_shash_ctx(otfm); 220 + struct cmac_tfm_ctx *ctx = crypto_shash_ctx(tfm); 221 + struct crypto_cipher *cipher; 222 + 223 + cipher = crypto_clone_cipher(octx->child); 224 + if (IS_ERR(cipher)) 225 + return PTR_ERR(cipher); 226 + 227 + ctx->child = cipher; 228 + 229 + return 0; 230 + } 231 + 232 + static void cmac_exit_tfm(struct crypto_shash *tfm) 233 + { 234 + struct cmac_tfm_ctx *ctx = crypto_shash_ctx(tfm); 221 235 crypto_free_cipher(ctx->child); 222 236 } 223 237 ··· 290 274 ~(crypto_tfm_ctx_alignment() - 1)) 291 275 + alg->cra_blocksize * 2; 292 276 293 - inst->alg.base.cra_init = cmac_init_tfm; 294 - inst->alg.base.cra_exit = cmac_exit_tfm; 295 - 296 277 inst->alg.init = crypto_cmac_digest_init; 297 278 inst->alg.update = crypto_cmac_digest_update; 298 279 inst->alg.final = crypto_cmac_digest_final; 299 280 inst->alg.setkey = crypto_cmac_digest_setkey; 281 + inst->alg.init_tfm = cmac_init_tfm; 282 + inst->alg.clone_tfm = cmac_clone_tfm; 283 + inst->alg.exit_tfm = cmac_exit_tfm; 300 284 301 285 inst->free = shash_free_singlespawn_instance; 302 286
+1
crypto/hmac.c
··· 177 177 static void hmac_exit_tfm(struct crypto_shash *parent) 178 178 { 179 179 struct hmac_ctx *ctx = hmac_ctx(parent); 180 + 180 181 crypto_free_shash(ctx->hash); 181 182 } 182 183
+22
crypto/internal.h
··· 18 18 #include <linux/numa.h> 19 19 #include <linux/refcount.h> 20 20 #include <linux/rwsem.h> 21 + #include <linux/scatterlist.h> 21 22 #include <linux/sched.h> 22 23 #include <linux/types.h> 23 24 25 + struct akcipher_request; 26 + struct crypto_akcipher; 24 27 struct crypto_instance; 25 28 struct crypto_template; 26 29 ··· 33 30 struct completion completion; 34 31 u32 mask; 35 32 bool test_started; 33 + }; 34 + 35 + struct crypto_akcipher_sync_data { 36 + struct crypto_akcipher *tfm; 37 + const void *src; 38 + void *dst; 39 + unsigned int slen; 40 + unsigned int dlen; 41 + 42 + struct akcipher_request *req; 43 + struct crypto_wait cwait; 44 + struct scatterlist sg; 45 + u8 *buf; 36 46 }; 37 47 38 48 enum { ··· 118 102 struct crypto_alg *nalg); 119 103 void crypto_remove_final(struct list_head *list); 120 104 void crypto_shoot_alg(struct crypto_alg *alg); 105 + struct crypto_tfm *__crypto_alloc_tfmgfp(struct crypto_alg *alg, u32 type, 106 + u32 mask, gfp_t gfp); 121 107 struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, 122 108 u32 mask); 123 109 void *crypto_create_tfm_node(struct crypto_alg *alg, 124 110 const struct crypto_type *frontend, int node); 125 111 void *crypto_clone_tfm(const struct crypto_type *frontend, 126 112 struct crypto_tfm *otfm); 113 + 114 + int crypto_akcipher_sync_prep(struct crypto_akcipher_sync_data *data); 115 + int crypto_akcipher_sync_post(struct crypto_akcipher_sync_data *data, int err); 116 + int crypto_init_akcipher_ops_sig(struct crypto_tfm *tfm); 127 117 128 118 static inline void *crypto_create_tfm(struct crypto_alg *alg, 129 119 const struct crypto_type *frontend)
+171 -21
crypto/jitterentropy-kcapi.c
··· 2 2 * Non-physical true random number generator based on timing jitter -- 3 3 * Linux Kernel Crypto API specific code 4 4 * 5 - * Copyright Stephan Mueller <smueller@chronox.de>, 2015 5 + * Copyright Stephan Mueller <smueller@chronox.de>, 2015 - 2023 6 6 * 7 7 * Redistribution and use in source and binary forms, with or without 8 8 * modification, are permitted provided that the following conditions ··· 37 37 * DAMAGE. 38 38 */ 39 39 40 + #include <crypto/hash.h> 41 + #include <crypto/sha3.h> 40 42 #include <linux/fips.h> 41 43 #include <linux/kernel.h> 42 44 #include <linux/module.h> ··· 47 45 #include <crypto/internal/rng.h> 48 46 49 47 #include "jitterentropy.h" 48 + 49 + #define JENT_CONDITIONING_HASH "sha3-256-generic" 50 50 51 51 /*************************************************************************** 52 52 * Helper function ··· 62 58 void jent_zfree(void *ptr) 63 59 { 64 60 kfree_sensitive(ptr); 65 - } 66 - 67 - void jent_memcpy(void *dest, const void *src, unsigned int n) 68 - { 69 - memcpy(dest, src, n); 70 61 } 71 62 72 63 /* ··· 88 89 tmp = ktime_get_ns(); 89 90 90 91 *out = tmp; 92 + jent_raw_hires_entropy_store(tmp); 93 + } 94 + 95 + int jent_hash_time(void *hash_state, __u64 time, u8 *addtl, 96 + unsigned int addtl_len, __u64 hash_loop_cnt, 97 + unsigned int stuck) 98 + { 99 + struct shash_desc *hash_state_desc = (struct shash_desc *)hash_state; 100 + SHASH_DESC_ON_STACK(desc, hash_state_desc->tfm); 101 + u8 intermediary[SHA3_256_DIGEST_SIZE]; 102 + __u64 j = 0; 103 + int ret; 104 + 105 + desc->tfm = hash_state_desc->tfm; 106 + 107 + if (sizeof(intermediary) != crypto_shash_digestsize(desc->tfm)) { 108 + pr_warn_ratelimited("Unexpected digest size\n"); 109 + return -EINVAL; 110 + } 111 + 112 + /* 113 + * This loop fills a buffer which is injected into the entropy pool. 114 + * The main reason for this loop is to execute something over which we 115 + * can perform a timing measurement. The injection of the resulting 116 + * data into the pool is performed to ensure the result is used and 117 + * the compiler cannot optimize the loop away in case the result is not 118 + * used at all. Yet that data is considered "additional information" 119 + * considering the terminology from SP800-90A without any entropy. 120 + * 121 + * Note, it does not matter which or how much data you inject, we are 122 + * interested in one Keccack1600 compression operation performed with 123 + * the crypto_shash_final. 124 + */ 125 + for (j = 0; j < hash_loop_cnt; j++) { 126 + ret = crypto_shash_init(desc) ?: 127 + crypto_shash_update(desc, intermediary, 128 + sizeof(intermediary)) ?: 129 + crypto_shash_finup(desc, addtl, addtl_len, intermediary); 130 + if (ret) 131 + goto err; 132 + } 133 + 134 + /* 135 + * Inject the data from the previous loop into the pool. This data is 136 + * not considered to contain any entropy, but it stirs the pool a bit. 137 + */ 138 + ret = crypto_shash_update(desc, intermediary, sizeof(intermediary)); 139 + if (ret) 140 + goto err; 141 + 142 + /* 143 + * Insert the time stamp into the hash context representing the pool. 144 + * 145 + * If the time stamp is stuck, do not finally insert the value into the 146 + * entropy pool. Although this operation should not do any harm even 147 + * when the time stamp has no entropy, SP800-90B requires that any 148 + * conditioning operation to have an identical amount of input data 149 + * according to section 3.1.5. 150 + */ 151 + if (!stuck) { 152 + ret = crypto_shash_update(hash_state_desc, (u8 *)&time, 153 + sizeof(__u64)); 154 + } 155 + 156 + err: 157 + shash_desc_zero(desc); 158 + memzero_explicit(intermediary, sizeof(intermediary)); 159 + 160 + return ret; 161 + } 162 + 163 + int jent_read_random_block(void *hash_state, char *dst, unsigned int dst_len) 164 + { 165 + struct shash_desc *hash_state_desc = (struct shash_desc *)hash_state; 166 + u8 jent_block[SHA3_256_DIGEST_SIZE]; 167 + /* Obtain data from entropy pool and re-initialize it */ 168 + int ret = crypto_shash_final(hash_state_desc, jent_block) ?: 169 + crypto_shash_init(hash_state_desc) ?: 170 + crypto_shash_update(hash_state_desc, jent_block, 171 + sizeof(jent_block)); 172 + 173 + if (!ret && dst_len) 174 + memcpy(dst, jent_block, dst_len); 175 + 176 + memzero_explicit(jent_block, sizeof(jent_block)); 177 + return ret; 91 178 } 92 179 93 180 /*************************************************************************** ··· 183 98 struct jitterentropy { 184 99 spinlock_t jent_lock; 185 100 struct rand_data *entropy_collector; 101 + struct crypto_shash *tfm; 102 + struct shash_desc *sdesc; 186 103 }; 187 - 188 - static int jent_kcapi_init(struct crypto_tfm *tfm) 189 - { 190 - struct jitterentropy *rng = crypto_tfm_ctx(tfm); 191 - int ret = 0; 192 - 193 - rng->entropy_collector = jent_entropy_collector_alloc(1, 0); 194 - if (!rng->entropy_collector) 195 - ret = -ENOMEM; 196 - 197 - spin_lock_init(&rng->jent_lock); 198 - return ret; 199 - } 200 104 201 105 static void jent_kcapi_cleanup(struct crypto_tfm *tfm) 202 106 { 203 107 struct jitterentropy *rng = crypto_tfm_ctx(tfm); 204 108 205 109 spin_lock(&rng->jent_lock); 110 + 111 + if (rng->sdesc) { 112 + shash_desc_zero(rng->sdesc); 113 + kfree(rng->sdesc); 114 + } 115 + rng->sdesc = NULL; 116 + 117 + if (rng->tfm) 118 + crypto_free_shash(rng->tfm); 119 + rng->tfm = NULL; 120 + 206 121 if (rng->entropy_collector) 207 122 jent_entropy_collector_free(rng->entropy_collector); 208 123 rng->entropy_collector = NULL; 209 124 spin_unlock(&rng->jent_lock); 125 + } 126 + 127 + static int jent_kcapi_init(struct crypto_tfm *tfm) 128 + { 129 + struct jitterentropy *rng = crypto_tfm_ctx(tfm); 130 + struct crypto_shash *hash; 131 + struct shash_desc *sdesc; 132 + int size, ret = 0; 133 + 134 + spin_lock_init(&rng->jent_lock); 135 + 136 + /* 137 + * Use SHA3-256 as conditioner. We allocate only the generic 138 + * implementation as we are not interested in high-performance. The 139 + * execution time of the SHA3 operation is measured and adds to the 140 + * Jitter RNG's unpredictable behavior. If we have a slower hash 141 + * implementation, the execution timing variations are larger. When 142 + * using a fast implementation, we would need to call it more often 143 + * as its variations are lower. 144 + */ 145 + hash = crypto_alloc_shash(JENT_CONDITIONING_HASH, 0, 0); 146 + if (IS_ERR(hash)) { 147 + pr_err("Cannot allocate conditioning digest\n"); 148 + return PTR_ERR(hash); 149 + } 150 + rng->tfm = hash; 151 + 152 + size = sizeof(struct shash_desc) + crypto_shash_descsize(hash); 153 + sdesc = kmalloc(size, GFP_KERNEL); 154 + if (!sdesc) { 155 + ret = -ENOMEM; 156 + goto err; 157 + } 158 + 159 + sdesc->tfm = hash; 160 + crypto_shash_init(sdesc); 161 + rng->sdesc = sdesc; 162 + 163 + rng->entropy_collector = jent_entropy_collector_alloc(1, 0, sdesc); 164 + if (!rng->entropy_collector) { 165 + ret = -ENOMEM; 166 + goto err; 167 + } 168 + 169 + spin_lock_init(&rng->jent_lock); 170 + return 0; 171 + 172 + err: 173 + jent_kcapi_cleanup(tfm); 174 + return ret; 210 175 } 211 176 212 177 static int jent_kcapi_random(struct crypto_rng *tfm, ··· 315 180 .cra_module = THIS_MODULE, 316 181 .cra_init = jent_kcapi_init, 317 182 .cra_exit = jent_kcapi_cleanup, 318 - 319 183 } 320 184 }; 321 185 322 186 static int __init jent_mod_init(void) 323 187 { 188 + SHASH_DESC_ON_STACK(desc, tfm); 189 + struct crypto_shash *tfm; 324 190 int ret = 0; 325 191 326 - ret = jent_entropy_init(); 192 + jent_testing_init(); 193 + 194 + tfm = crypto_alloc_shash(JENT_CONDITIONING_HASH, 0, 0); 195 + if (IS_ERR(tfm)) { 196 + jent_testing_exit(); 197 + return PTR_ERR(tfm); 198 + } 199 + 200 + desc->tfm = tfm; 201 + crypto_shash_init(desc); 202 + ret = jent_entropy_init(desc); 203 + shash_desc_zero(desc); 204 + crypto_free_shash(tfm); 327 205 if (ret) { 328 206 /* Handle permanent health test error */ 329 207 if (fips_enabled) 330 208 panic("jitterentropy: Initialization failed with host not compliant with requirements: %d\n", ret); 331 209 210 + jent_testing_exit(); 332 211 pr_info("jitterentropy: Initialization failed with host not compliant with requirements: %d\n", ret); 333 212 return -EFAULT; 334 213 } ··· 351 202 352 203 static void __exit jent_mod_exit(void) 353 204 { 205 + jent_testing_exit(); 354 206 crypto_unregister_rng(&jent_alg); 355 207 } 356 208
+294
crypto/jitterentropy-testing.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ 2 + /* 3 + * Test interface for Jitter RNG. 4 + * 5 + * Copyright (C) 2023, Stephan Mueller <smueller@chronox.de> 6 + */ 7 + 8 + #include <linux/debugfs.h> 9 + #include <linux/module.h> 10 + #include <linux/uaccess.h> 11 + 12 + #include "jitterentropy.h" 13 + 14 + #define JENT_TEST_RINGBUFFER_SIZE (1<<10) 15 + #define JENT_TEST_RINGBUFFER_MASK (JENT_TEST_RINGBUFFER_SIZE - 1) 16 + 17 + struct jent_testing { 18 + u32 jent_testing_rb[JENT_TEST_RINGBUFFER_SIZE]; 19 + u32 rb_reader; 20 + atomic_t rb_writer; 21 + atomic_t jent_testing_enabled; 22 + spinlock_t lock; 23 + wait_queue_head_t read_wait; 24 + }; 25 + 26 + static struct dentry *jent_raw_debugfs_root = NULL; 27 + 28 + /*************************** Generic Data Handling ****************************/ 29 + 30 + /* 31 + * boot variable: 32 + * 0 ==> No boot test, gathering of runtime data allowed 33 + * 1 ==> Boot test enabled and ready for collecting data, gathering runtime 34 + * data is disabled 35 + * 2 ==> Boot test completed and disabled, gathering of runtime data is 36 + * disabled 37 + */ 38 + 39 + static void jent_testing_reset(struct jent_testing *data) 40 + { 41 + unsigned long flags; 42 + 43 + spin_lock_irqsave(&data->lock, flags); 44 + data->rb_reader = 0; 45 + atomic_set(&data->rb_writer, 0); 46 + spin_unlock_irqrestore(&data->lock, flags); 47 + } 48 + 49 + static void jent_testing_data_init(struct jent_testing *data, u32 boot) 50 + { 51 + /* 52 + * The boot time testing implies we have a running test. If the 53 + * caller wants to clear it, he has to unset the boot_test flag 54 + * at runtime via sysfs to enable regular runtime testing 55 + */ 56 + if (boot) 57 + return; 58 + 59 + jent_testing_reset(data); 60 + atomic_set(&data->jent_testing_enabled, 1); 61 + pr_warn("Enabling data collection\n"); 62 + } 63 + 64 + static void jent_testing_fini(struct jent_testing *data, u32 boot) 65 + { 66 + /* If we have boot data, we do not reset yet to allow data to be read */ 67 + if (boot) 68 + return; 69 + 70 + atomic_set(&data->jent_testing_enabled, 0); 71 + jent_testing_reset(data); 72 + pr_warn("Disabling data collection\n"); 73 + } 74 + 75 + static bool jent_testing_store(struct jent_testing *data, u32 value, 76 + u32 *boot) 77 + { 78 + unsigned long flags; 79 + 80 + if (!atomic_read(&data->jent_testing_enabled) && (*boot != 1)) 81 + return false; 82 + 83 + spin_lock_irqsave(&data->lock, flags); 84 + 85 + /* 86 + * Disable entropy testing for boot time testing after ring buffer 87 + * is filled. 88 + */ 89 + if (*boot) { 90 + if (((u32)atomic_read(&data->rb_writer)) > 91 + JENT_TEST_RINGBUFFER_SIZE) { 92 + *boot = 2; 93 + pr_warn_once("One time data collection test disabled\n"); 94 + spin_unlock_irqrestore(&data->lock, flags); 95 + return false; 96 + } 97 + 98 + if (atomic_read(&data->rb_writer) == 1) 99 + pr_warn("One time data collection test enabled\n"); 100 + } 101 + 102 + data->jent_testing_rb[((u32)atomic_read(&data->rb_writer)) & 103 + JENT_TEST_RINGBUFFER_MASK] = value; 104 + atomic_inc(&data->rb_writer); 105 + 106 + spin_unlock_irqrestore(&data->lock, flags); 107 + 108 + if (wq_has_sleeper(&data->read_wait)) 109 + wake_up_interruptible(&data->read_wait); 110 + 111 + return true; 112 + } 113 + 114 + static bool jent_testing_have_data(struct jent_testing *data) 115 + { 116 + return ((((u32)atomic_read(&data->rb_writer)) & 117 + JENT_TEST_RINGBUFFER_MASK) != 118 + (data->rb_reader & JENT_TEST_RINGBUFFER_MASK)); 119 + } 120 + 121 + static int jent_testing_reader(struct jent_testing *data, u32 *boot, 122 + u8 *outbuf, u32 outbuflen) 123 + { 124 + unsigned long flags; 125 + int collected_data = 0; 126 + 127 + jent_testing_data_init(data, *boot); 128 + 129 + while (outbuflen) { 130 + u32 writer = (u32)atomic_read(&data->rb_writer); 131 + 132 + spin_lock_irqsave(&data->lock, flags); 133 + 134 + /* We have no data or reached the writer. */ 135 + if (!writer || (writer == data->rb_reader)) { 136 + 137 + spin_unlock_irqrestore(&data->lock, flags); 138 + 139 + /* 140 + * Now we gathered all boot data, enable regular data 141 + * collection. 142 + */ 143 + if (*boot) { 144 + *boot = 0; 145 + goto out; 146 + } 147 + 148 + wait_event_interruptible(data->read_wait, 149 + jent_testing_have_data(data)); 150 + if (signal_pending(current)) { 151 + collected_data = -ERESTARTSYS; 152 + goto out; 153 + } 154 + 155 + continue; 156 + } 157 + 158 + /* We copy out word-wise */ 159 + if (outbuflen < sizeof(u32)) { 160 + spin_unlock_irqrestore(&data->lock, flags); 161 + goto out; 162 + } 163 + 164 + memcpy(outbuf, &data->jent_testing_rb[data->rb_reader], 165 + sizeof(u32)); 166 + data->rb_reader++; 167 + 168 + spin_unlock_irqrestore(&data->lock, flags); 169 + 170 + outbuf += sizeof(u32); 171 + outbuflen -= sizeof(u32); 172 + collected_data += sizeof(u32); 173 + } 174 + 175 + out: 176 + jent_testing_fini(data, *boot); 177 + return collected_data; 178 + } 179 + 180 + static int jent_testing_extract_user(struct file *file, char __user *buf, 181 + size_t nbytes, loff_t *ppos, 182 + int (*reader)(u8 *outbuf, u32 outbuflen)) 183 + { 184 + u8 *tmp, *tmp_aligned; 185 + int ret = 0, large_request = (nbytes > 256); 186 + 187 + if (!nbytes) 188 + return 0; 189 + 190 + /* 191 + * The intention of this interface is for collecting at least 192 + * 1000 samples due to the SP800-90B requirements. So, we make no 193 + * effort in avoiding allocating more memory that actually needed 194 + * by the user. Hence, we allocate sufficient memory to always hold 195 + * that amount of data. 196 + */ 197 + tmp = kmalloc(JENT_TEST_RINGBUFFER_SIZE + sizeof(u32), GFP_KERNEL); 198 + if (!tmp) 199 + return -ENOMEM; 200 + 201 + tmp_aligned = PTR_ALIGN(tmp, sizeof(u32)); 202 + 203 + while (nbytes) { 204 + int i; 205 + 206 + if (large_request && need_resched()) { 207 + if (signal_pending(current)) { 208 + if (ret == 0) 209 + ret = -ERESTARTSYS; 210 + break; 211 + } 212 + schedule(); 213 + } 214 + 215 + i = min_t(int, nbytes, JENT_TEST_RINGBUFFER_SIZE); 216 + i = reader(tmp_aligned, i); 217 + if (i <= 0) { 218 + if (i < 0) 219 + ret = i; 220 + break; 221 + } 222 + if (copy_to_user(buf, tmp_aligned, i)) { 223 + ret = -EFAULT; 224 + break; 225 + } 226 + 227 + nbytes -= i; 228 + buf += i; 229 + ret += i; 230 + } 231 + 232 + kfree_sensitive(tmp); 233 + 234 + if (ret > 0) 235 + *ppos += ret; 236 + 237 + return ret; 238 + } 239 + 240 + /************** Raw High-Resolution Timer Entropy Data Handling **************/ 241 + 242 + static u32 boot_raw_hires_test = 0; 243 + module_param(boot_raw_hires_test, uint, 0644); 244 + MODULE_PARM_DESC(boot_raw_hires_test, 245 + "Enable gathering boot time high resolution timer entropy of the first Jitter RNG entropy events"); 246 + 247 + static struct jent_testing jent_raw_hires = { 248 + .rb_reader = 0, 249 + .rb_writer = ATOMIC_INIT(0), 250 + .lock = __SPIN_LOCK_UNLOCKED(jent_raw_hires.lock), 251 + .read_wait = __WAIT_QUEUE_HEAD_INITIALIZER(jent_raw_hires.read_wait) 252 + }; 253 + 254 + int jent_raw_hires_entropy_store(__u32 value) 255 + { 256 + return jent_testing_store(&jent_raw_hires, value, &boot_raw_hires_test); 257 + } 258 + EXPORT_SYMBOL(jent_raw_hires_entropy_store); 259 + 260 + static int jent_raw_hires_entropy_reader(u8 *outbuf, u32 outbuflen) 261 + { 262 + return jent_testing_reader(&jent_raw_hires, &boot_raw_hires_test, 263 + outbuf, outbuflen); 264 + } 265 + 266 + static ssize_t jent_raw_hires_read(struct file *file, char __user *to, 267 + size_t count, loff_t *ppos) 268 + { 269 + return jent_testing_extract_user(file, to, count, ppos, 270 + jent_raw_hires_entropy_reader); 271 + } 272 + 273 + static const struct file_operations jent_raw_hires_fops = { 274 + .owner = THIS_MODULE, 275 + .read = jent_raw_hires_read, 276 + }; 277 + 278 + /******************************* Initialization *******************************/ 279 + 280 + void jent_testing_init(void) 281 + { 282 + jent_raw_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); 283 + 284 + debugfs_create_file_unsafe("jent_raw_hires", 0400, 285 + jent_raw_debugfs_root, NULL, 286 + &jent_raw_hires_fops); 287 + } 288 + EXPORT_SYMBOL(jent_testing_init); 289 + 290 + void jent_testing_exit(void) 291 + { 292 + debugfs_remove_recursive(jent_raw_debugfs_root); 293 + } 294 + EXPORT_SYMBOL(jent_testing_exit);
+50 -102
crypto/jitterentropy.c
··· 2 2 * Non-physical true random number generator based on timing jitter -- 3 3 * Jitter RNG standalone code. 4 4 * 5 - * Copyright Stephan Mueller <smueller@chronox.de>, 2015 - 2020 5 + * Copyright Stephan Mueller <smueller@chronox.de>, 2015 - 2023 6 6 * 7 7 * Design 8 8 * ====== ··· 47 47 48 48 /* 49 49 * This Jitterentropy RNG is based on the jitterentropy library 50 - * version 2.2.0 provided at https://www.chronox.de/jent.html 50 + * version 3.4.0 provided at https://www.chronox.de/jent.html 51 51 */ 52 52 53 53 #ifdef __OPTIMIZE__ ··· 57 57 typedef unsigned long long __u64; 58 58 typedef long long __s64; 59 59 typedef unsigned int __u32; 60 + typedef unsigned char u8; 60 61 #define NULL ((void *) 0) 61 62 62 63 /* The entropy pool */ 63 64 struct rand_data { 65 + /* SHA3-256 is used as conditioner */ 66 + #define DATA_SIZE_BITS 256 64 67 /* all data values that are vital to maintain the security 65 68 * of the RNG are marked as SENSITIVE. A user must not 66 69 * access that information while the RNG executes its loops to 67 70 * calculate the next random value. */ 68 - __u64 data; /* SENSITIVE Actual random number */ 69 - __u64 old_data; /* SENSITIVE Previous random number */ 70 - __u64 prev_time; /* SENSITIVE Previous time stamp */ 71 - #define DATA_SIZE_BITS ((sizeof(__u64)) * 8) 72 - __u64 last_delta; /* SENSITIVE stuck test */ 73 - __s64 last_delta2; /* SENSITIVE stuck test */ 74 - unsigned int osr; /* Oversample rate */ 71 + void *hash_state; /* SENSITIVE hash state entropy pool */ 72 + __u64 prev_time; /* SENSITIVE Previous time stamp */ 73 + __u64 last_delta; /* SENSITIVE stuck test */ 74 + __s64 last_delta2; /* SENSITIVE stuck test */ 75 + unsigned int osr; /* Oversample rate */ 75 76 #define JENT_MEMORY_BLOCKS 64 76 77 #define JENT_MEMORY_BLOCKSIZE 32 77 78 #define JENT_MEMORY_ACCESSLOOPS 128 ··· 118 117 * zero). */ 119 118 #define JENT_ESTUCK 8 /* Too many stuck results during init. */ 120 119 #define JENT_EHEALTH 9 /* Health test failed during initialization */ 121 - #define JENT_ERCT 10 /* RCT failed during initialization */ 122 120 123 121 /* 124 122 * The output n bits can receive more than n bits of min entropy, of course, ··· 302 302 * an entropy collection. 303 303 * 304 304 * Input: 305 - * @ec entropy collector struct -- may be NULL 306 305 * @bits is the number of low bits of the timer to consider 307 306 * @min is the number of bits we shift the timer value to the right at 308 307 * the end to make sure we have a guaranteed minimum value 309 308 * 310 309 * @return Newly calculated loop counter 311 310 */ 312 - static __u64 jent_loop_shuffle(struct rand_data *ec, 313 - unsigned int bits, unsigned int min) 311 + static __u64 jent_loop_shuffle(unsigned int bits, unsigned int min) 314 312 { 315 313 __u64 time = 0; 316 314 __u64 shuffle = 0; ··· 316 318 unsigned int mask = (1<<bits) - 1; 317 319 318 320 jent_get_nstime(&time); 319 - /* 320 - * Mix the current state of the random number into the shuffle 321 - * calculation to balance that shuffle a bit more. 322 - */ 323 - if (ec) 324 - time ^= ec->data; 321 + 325 322 /* 326 323 * We fold the time value as much as possible to ensure that as many 327 324 * bits of the time stamp are included as possible. ··· 338 345 * execution time jitter 339 346 * 340 347 * This function injects the individual bits of the time value into the 341 - * entropy pool using an LFSR. 348 + * entropy pool using a hash. 342 349 * 343 - * The code is deliberately inefficient with respect to the bit shifting 344 - * and shall stay that way. This function is the root cause why the code 345 - * shall be compiled without optimization. This function not only acts as 346 - * folding operation, but this function's execution is used to measure 347 - * the CPU execution time jitter. Any change to the loop in this function 348 - * implies that careful retesting must be done. 349 - * 350 - * @ec [in] entropy collector struct 351 - * @time [in] time stamp to be injected 352 - * @loop_cnt [in] if a value not equal to 0 is set, use the given value as 353 - * number of loops to perform the folding 354 - * @stuck [in] Is the time stamp identified as stuck? 350 + * ec [in] entropy collector 351 + * time [in] time stamp to be injected 352 + * stuck [in] Is the time stamp identified as stuck? 355 353 * 356 354 * Output: 357 - * updated ec->data 358 - * 359 - * @return Number of loops the folding operation is performed 355 + * updated hash context in the entropy collector or error code 360 356 */ 361 - static void jent_lfsr_time(struct rand_data *ec, __u64 time, __u64 loop_cnt, 362 - int stuck) 357 + static int jent_condition_data(struct rand_data *ec, __u64 time, int stuck) 363 358 { 364 - unsigned int i; 365 - __u64 j = 0; 366 - __u64 new = 0; 367 - #define MAX_FOLD_LOOP_BIT 4 368 - #define MIN_FOLD_LOOP_BIT 0 369 - __u64 fold_loop_cnt = 370 - jent_loop_shuffle(ec, MAX_FOLD_LOOP_BIT, MIN_FOLD_LOOP_BIT); 359 + #define SHA3_HASH_LOOP (1<<3) 360 + struct { 361 + int rct_count; 362 + unsigned int apt_observations; 363 + unsigned int apt_count; 364 + unsigned int apt_base; 365 + } addtl = { 366 + ec->rct_count, 367 + ec->apt_observations, 368 + ec->apt_count, 369 + ec->apt_base 370 + }; 371 371 372 - /* 373 - * testing purposes -- allow test app to set the counter, not 374 - * needed during runtime 375 - */ 376 - if (loop_cnt) 377 - fold_loop_cnt = loop_cnt; 378 - for (j = 0; j < fold_loop_cnt; j++) { 379 - new = ec->data; 380 - for (i = 1; (DATA_SIZE_BITS) >= i; i++) { 381 - __u64 tmp = time << (DATA_SIZE_BITS - i); 382 - 383 - tmp = tmp >> (DATA_SIZE_BITS - 1); 384 - 385 - /* 386 - * Fibonacci LSFR with polynomial of 387 - * x^64 + x^61 + x^56 + x^31 + x^28 + x^23 + 1 which is 388 - * primitive according to 389 - * http://poincare.matf.bg.ac.rs/~ezivkovm/publications/primpol1.pdf 390 - * (the shift values are the polynomial values minus one 391 - * due to counting bits from 0 to 63). As the current 392 - * position is always the LSB, the polynomial only needs 393 - * to shift data in from the left without wrap. 394 - */ 395 - tmp ^= ((new >> 63) & 1); 396 - tmp ^= ((new >> 60) & 1); 397 - tmp ^= ((new >> 55) & 1); 398 - tmp ^= ((new >> 30) & 1); 399 - tmp ^= ((new >> 27) & 1); 400 - tmp ^= ((new >> 22) & 1); 401 - new <<= 1; 402 - new ^= tmp; 403 - } 404 - } 405 - 406 - /* 407 - * If the time stamp is stuck, do not finally insert the value into 408 - * the entropy pool. Although this operation should not do any harm 409 - * even when the time stamp has no entropy, SP800-90B requires that 410 - * any conditioning operation (SP800-90B considers the LFSR to be a 411 - * conditioning operation) to have an identical amount of input 412 - * data according to section 3.1.5. 413 - */ 414 - if (!stuck) 415 - ec->data = new; 372 + return jent_hash_time(ec->hash_state, time, (u8 *)&addtl, sizeof(addtl), 373 + SHA3_HASH_LOOP, stuck); 416 374 } 417 375 418 376 /* ··· 397 453 #define MAX_ACC_LOOP_BIT 7 398 454 #define MIN_ACC_LOOP_BIT 0 399 455 __u64 acc_loop_cnt = 400 - jent_loop_shuffle(ec, MAX_ACC_LOOP_BIT, MIN_ACC_LOOP_BIT); 456 + jent_loop_shuffle(MAX_ACC_LOOP_BIT, MIN_ACC_LOOP_BIT); 401 457 402 458 if (NULL == ec || NULL == ec->mem) 403 459 return; ··· 465 521 stuck = jent_stuck(ec, current_delta); 466 522 467 523 /* Now call the next noise sources which also injects the data */ 468 - jent_lfsr_time(ec, current_delta, 0, stuck); 524 + if (jent_condition_data(ec, current_delta, stuck)) 525 + stuck = 1; 469 526 470 527 return stuck; 471 528 } 472 529 473 530 /* 474 531 * Generator of one 64 bit random number 475 - * Function fills rand_data->data 532 + * Function fills rand_data->hash_state 476 533 * 477 534 * @ec [in] Reference to entropy collector 478 535 */ ··· 520 575 * @return 0 when request is fulfilled or an error 521 576 * 522 577 * The following error codes can occur: 523 - * -1 entropy_collector is NULL 578 + * -1 entropy_collector is NULL or the generation failed 524 579 * -2 Intermittent health failure 525 580 * -3 Permanent health failure 526 581 */ ··· 550 605 * Perform startup health tests and return permanent 551 606 * error if it fails. 552 607 */ 553 - if (jent_entropy_init()) 608 + if (jent_entropy_init(ec->hash_state)) 554 609 return -3; 555 610 556 611 return -2; ··· 560 615 tocopy = (DATA_SIZE_BITS / 8); 561 616 else 562 617 tocopy = len; 563 - jent_memcpy(p, &ec->data, tocopy); 618 + if (jent_read_random_block(ec->hash_state, p, tocopy)) 619 + return -1; 564 620 565 621 len -= tocopy; 566 622 p += tocopy; ··· 575 629 ***************************************************************************/ 576 630 577 631 struct rand_data *jent_entropy_collector_alloc(unsigned int osr, 578 - unsigned int flags) 632 + unsigned int flags, 633 + void *hash_state) 579 634 { 580 635 struct rand_data *entropy_collector; 581 636 ··· 603 656 osr = 1; /* minimum sampling rate is 1 */ 604 657 entropy_collector->osr = osr; 605 658 659 + entropy_collector->hash_state = hash_state; 660 + 606 661 /* fill the data pad with non-zero values */ 607 662 jent_gen_entropy(entropy_collector); 608 663 ··· 618 669 jent_zfree(entropy_collector); 619 670 } 620 671 621 - int jent_entropy_init(void) 672 + int jent_entropy_init(void *hash_state) 622 673 { 623 674 int i; 624 675 __u64 delta_sum = 0; ··· 631 682 632 683 /* Required for RCT */ 633 684 ec.osr = 1; 685 + ec.hash_state = hash_state; 634 686 635 687 /* We could perform statistical tests here, but the problem is 636 688 * that we only have a few loop counts to do testing. These ··· 669 719 /* Invoke core entropy collection logic */ 670 720 jent_get_nstime(&time); 671 721 ec.prev_time = time; 672 - jent_lfsr_time(&ec, time, 0, 0); 722 + jent_condition_data(&ec, time, 0); 673 723 jent_get_nstime(&time2); 674 724 675 725 /* test whether timer works */ ··· 712 762 if ((nonstuck % JENT_APT_WINDOW_SIZE) == 0) { 713 763 jent_apt_reset(&ec, 714 764 delta & JENT_APT_WORD_MASK); 715 - if (jent_health_failure(&ec)) 716 - return JENT_EHEALTH; 717 765 } 718 766 } 719 767 720 - /* Validate RCT */ 721 - if (jent_rct_failure(&ec)) 722 - return JENT_ERCT; 768 + /* Validate health test result */ 769 + if (jent_health_failure(&ec)) 770 + return JENT_EHEALTH; 723 771 724 772 /* test whether we have an increasing timer */ 725 773 if (!(time2 > time))
+17 -3
crypto/jitterentropy.h
··· 2 2 3 3 extern void *jent_zalloc(unsigned int len); 4 4 extern void jent_zfree(void *ptr); 5 - extern void jent_memcpy(void *dest, const void *src, unsigned int n); 6 5 extern void jent_get_nstime(__u64 *out); 6 + extern int jent_hash_time(void *hash_state, __u64 time, u8 *addtl, 7 + unsigned int addtl_len, __u64 hash_loop_cnt, 8 + unsigned int stuck); 9 + int jent_read_random_block(void *hash_state, char *dst, unsigned int dst_len); 7 10 8 11 struct rand_data; 9 - extern int jent_entropy_init(void); 12 + extern int jent_entropy_init(void *hash_state); 10 13 extern int jent_read_entropy(struct rand_data *ec, unsigned char *data, 11 14 unsigned int len); 12 15 13 16 extern struct rand_data *jent_entropy_collector_alloc(unsigned int osr, 14 - unsigned int flags); 17 + unsigned int flags, 18 + void *hash_state); 15 19 extern void jent_entropy_collector_free(struct rand_data *entropy_collector); 20 + 21 + #ifdef CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE 22 + int jent_raw_hires_entropy_store(__u32 value); 23 + void jent_testing_init(void); 24 + void jent_testing_exit(void); 25 + #else /* CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE */ 26 + static inline int jent_raw_hires_entropy_store(__u32 value) { return 0; } 27 + static inline void jent_testing_init(void) { } 28 + static inline void jent_testing_exit(void) { } 29 + #endif /* CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE */
+36
crypto/rsa.c
··· 205 205 return -EINVAL; 206 206 } 207 207 208 + static int rsa_check_exponent_fips(MPI e) 209 + { 210 + MPI e_max = NULL; 211 + 212 + /* check if odd */ 213 + if (!mpi_test_bit(e, 0)) { 214 + return -EINVAL; 215 + } 216 + 217 + /* check if 2^16 < e < 2^256. */ 218 + if (mpi_cmp_ui(e, 65536) <= 0) { 219 + return -EINVAL; 220 + } 221 + 222 + e_max = mpi_alloc(0); 223 + mpi_set_bit(e_max, 256); 224 + 225 + if (mpi_cmp(e, e_max) >= 0) { 226 + mpi_free(e_max); 227 + return -EINVAL; 228 + } 229 + 230 + mpi_free(e_max); 231 + return 0; 232 + } 233 + 208 234 static int rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, 209 235 unsigned int keylen) 210 236 { ··· 254 228 goto err; 255 229 256 230 if (rsa_check_key_length(mpi_get_size(mpi_key->n) << 3)) { 231 + rsa_free_mpi_key(mpi_key); 232 + return -EINVAL; 233 + } 234 + 235 + if (fips_enabled && rsa_check_exponent_fips(mpi_key->e)) { 257 236 rsa_free_mpi_key(mpi_key); 258 237 return -EINVAL; 259 238 } ··· 317 286 goto err; 318 287 319 288 if (rsa_check_key_length(mpi_get_size(mpi_key->n) << 3)) { 289 + rsa_free_mpi_key(mpi_key); 290 + return -EINVAL; 291 + } 292 + 293 + if (fips_enabled && rsa_check_exponent_fips(mpi_key->e)) { 320 294 rsa_free_mpi_key(mpi_key); 321 295 return -EINVAL; 322 296 }
+7 -5
crypto/shash.c
··· 597 597 return hash; 598 598 } 599 599 600 - if (!alg->clone_tfm) 600 + if (!alg->clone_tfm && (alg->init_tfm || alg->base.cra_init)) 601 601 return ERR_PTR(-ENOSYS); 602 602 603 603 nhash = crypto_clone_tfm(&crypto_shash_type, tfm); ··· 606 606 607 607 nhash->descsize = hash->descsize; 608 608 609 - err = alg->clone_tfm(nhash, hash); 610 - if (err) { 611 - crypto_free_shash(nhash); 612 - return ERR_PTR(err); 609 + if (alg->clone_tfm) { 610 + err = alg->clone_tfm(nhash, hash); 611 + if (err) { 612 + crypto_free_shash(nhash); 613 + return ERR_PTR(err); 614 + } 613 615 } 614 616 615 617 return nhash;
+157
crypto/sig.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 + /* 3 + * Public Key Signature Algorithm 4 + * 5 + * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au> 6 + */ 7 + 8 + #include <crypto/akcipher.h> 9 + #include <crypto/internal/sig.h> 10 + #include <linux/cryptouser.h> 11 + #include <linux/kernel.h> 12 + #include <linux/module.h> 13 + #include <linux/scatterlist.h> 14 + #include <linux/seq_file.h> 15 + #include <linux/string.h> 16 + #include <net/netlink.h> 17 + 18 + #include "internal.h" 19 + 20 + #define CRYPTO_ALG_TYPE_SIG_MASK 0x0000000e 21 + 22 + static const struct crypto_type crypto_sig_type; 23 + 24 + static inline struct crypto_sig *__crypto_sig_tfm(struct crypto_tfm *tfm) 25 + { 26 + return container_of(tfm, struct crypto_sig, base); 27 + } 28 + 29 + static int crypto_sig_init_tfm(struct crypto_tfm *tfm) 30 + { 31 + if (tfm->__crt_alg->cra_type != &crypto_sig_type) 32 + return crypto_init_akcipher_ops_sig(tfm); 33 + 34 + return 0; 35 + } 36 + 37 + static void __maybe_unused crypto_sig_show(struct seq_file *m, 38 + struct crypto_alg *alg) 39 + { 40 + seq_puts(m, "type : sig\n"); 41 + } 42 + 43 + static int __maybe_unused crypto_sig_report(struct sk_buff *skb, 44 + struct crypto_alg *alg) 45 + { 46 + struct crypto_report_akcipher rsig = {}; 47 + 48 + strscpy(rsig.type, "sig", sizeof(rsig.type)); 49 + 50 + return nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER, sizeof(rsig), &rsig); 51 + } 52 + 53 + static int __maybe_unused crypto_sig_report_stat(struct sk_buff *skb, 54 + struct crypto_alg *alg) 55 + { 56 + struct crypto_stat_akcipher rsig = {}; 57 + 58 + strscpy(rsig.type, "sig", sizeof(rsig.type)); 59 + 60 + return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER, sizeof(rsig), &rsig); 61 + } 62 + 63 + static const struct crypto_type crypto_sig_type = { 64 + .extsize = crypto_alg_extsize, 65 + .init_tfm = crypto_sig_init_tfm, 66 + #ifdef CONFIG_PROC_FS 67 + .show = crypto_sig_show, 68 + #endif 69 + #if IS_ENABLED(CONFIG_CRYPTO_USER) 70 + .report = crypto_sig_report, 71 + #endif 72 + #ifdef CONFIG_CRYPTO_STATS 73 + .report_stat = crypto_sig_report_stat, 74 + #endif 75 + .maskclear = ~CRYPTO_ALG_TYPE_MASK, 76 + .maskset = CRYPTO_ALG_TYPE_SIG_MASK, 77 + .type = CRYPTO_ALG_TYPE_SIG, 78 + .tfmsize = offsetof(struct crypto_sig, base), 79 + }; 80 + 81 + struct crypto_sig *crypto_alloc_sig(const char *alg_name, u32 type, u32 mask) 82 + { 83 + return crypto_alloc_tfm(alg_name, &crypto_sig_type, type, mask); 84 + } 85 + EXPORT_SYMBOL_GPL(crypto_alloc_sig); 86 + 87 + int crypto_sig_maxsize(struct crypto_sig *tfm) 88 + { 89 + struct crypto_akcipher **ctx = crypto_sig_ctx(tfm); 90 + 91 + return crypto_akcipher_maxsize(*ctx); 92 + } 93 + EXPORT_SYMBOL_GPL(crypto_sig_maxsize); 94 + 95 + int crypto_sig_sign(struct crypto_sig *tfm, 96 + const void *src, unsigned int slen, 97 + void *dst, unsigned int dlen) 98 + { 99 + struct crypto_akcipher **ctx = crypto_sig_ctx(tfm); 100 + struct crypto_akcipher_sync_data data = { 101 + .tfm = *ctx, 102 + .src = src, 103 + .dst = dst, 104 + .slen = slen, 105 + .dlen = dlen, 106 + }; 107 + 108 + return crypto_akcipher_sync_prep(&data) ?: 109 + crypto_akcipher_sync_post(&data, 110 + crypto_akcipher_sign(data.req)); 111 + } 112 + EXPORT_SYMBOL_GPL(crypto_sig_sign); 113 + 114 + int crypto_sig_verify(struct crypto_sig *tfm, 115 + const void *src, unsigned int slen, 116 + const void *digest, unsigned int dlen) 117 + { 118 + struct crypto_akcipher **ctx = crypto_sig_ctx(tfm); 119 + struct crypto_akcipher_sync_data data = { 120 + .tfm = *ctx, 121 + .src = src, 122 + .slen = slen, 123 + .dlen = dlen, 124 + }; 125 + int err; 126 + 127 + err = crypto_akcipher_sync_prep(&data); 128 + if (err) 129 + return err; 130 + 131 + memcpy(data.buf + slen, digest, dlen); 132 + 133 + return crypto_akcipher_sync_post(&data, 134 + crypto_akcipher_verify(data.req)); 135 + } 136 + EXPORT_SYMBOL_GPL(crypto_sig_verify); 137 + 138 + int crypto_sig_set_pubkey(struct crypto_sig *tfm, 139 + const void *key, unsigned int keylen) 140 + { 141 + struct crypto_akcipher **ctx = crypto_sig_ctx(tfm); 142 + 143 + return crypto_akcipher_set_pub_key(*ctx, key, keylen); 144 + } 145 + EXPORT_SYMBOL_GPL(crypto_sig_set_pubkey); 146 + 147 + int crypto_sig_set_privkey(struct crypto_sig *tfm, 148 + const void *key, unsigned int keylen) 149 + { 150 + struct crypto_akcipher **ctx = crypto_sig_ctx(tfm); 151 + 152 + return crypto_akcipher_set_priv_key(*ctx, key, keylen); 153 + } 154 + EXPORT_SYMBOL_GPL(crypto_sig_set_privkey); 155 + 156 + MODULE_LICENSE("GPL"); 157 + MODULE_DESCRIPTION("Public Key Signature Algorithms");
+69 -35
crypto/sm2.c
··· 13 13 #include <crypto/internal/akcipher.h> 14 14 #include <crypto/akcipher.h> 15 15 #include <crypto/hash.h> 16 - #include <crypto/sm3.h> 17 16 #include <crypto/rng.h> 18 17 #include <crypto/sm2.h> 19 18 #include "sm2signature.asn1.h" 19 + 20 + /* The default user id as specified in GM/T 0009-2012 */ 21 + #define SM2_DEFAULT_USERID "1234567812345678" 22 + #define SM2_DEFAULT_USERID_LEN 16 20 23 21 24 #define MPI_NBYTES(m) ((mpi_get_nbits(m) + 7) / 8) 22 25 ··· 62 59 .g_y = "0xbc3736a2f4f6779c59bdcee36b692153d0a9877cc62a474002df32e52139f0a0", 63 60 .h = 1 64 61 }; 62 + 63 + static int __sm2_set_pub_key(struct mpi_ec_ctx *ec, 64 + const void *key, unsigned int keylen); 65 65 66 66 static int sm2_ec_ctx_init(struct mpi_ec_ctx *ec) 67 67 { ··· 219 213 return 0; 220 214 } 221 215 222 - static int sm2_z_digest_update(struct sm3_state *sctx, 223 - MPI m, unsigned int pbytes) 216 + static int sm2_z_digest_update(struct shash_desc *desc, 217 + MPI m, unsigned int pbytes) 224 218 { 225 219 static const unsigned char zero[32]; 226 220 unsigned char *in; 227 221 unsigned int inlen; 222 + int err; 228 223 229 224 in = mpi_get_buffer(m, &inlen, NULL); 230 225 if (!in) ··· 233 226 234 227 if (inlen < pbytes) { 235 228 /* padding with zero */ 236 - sm3_update(sctx, zero, pbytes - inlen); 237 - sm3_update(sctx, in, inlen); 229 + err = crypto_shash_update(desc, zero, pbytes - inlen) ?: 230 + crypto_shash_update(desc, in, inlen); 238 231 } else if (inlen > pbytes) { 239 232 /* skip the starting zero */ 240 - sm3_update(sctx, in + inlen - pbytes, pbytes); 233 + err = crypto_shash_update(desc, in + inlen - pbytes, pbytes); 241 234 } else { 242 - sm3_update(sctx, in, inlen); 235 + err = crypto_shash_update(desc, in, inlen); 243 236 } 244 237 245 238 kfree(in); 246 - return 0; 239 + return err; 247 240 } 248 241 249 - static int sm2_z_digest_update_point(struct sm3_state *sctx, 250 - MPI_POINT point, struct mpi_ec_ctx *ec, unsigned int pbytes) 242 + static int sm2_z_digest_update_point(struct shash_desc *desc, 243 + MPI_POINT point, struct mpi_ec_ctx *ec, 244 + unsigned int pbytes) 251 245 { 252 246 MPI x, y; 253 247 int ret = -EINVAL; ··· 256 248 x = mpi_new(0); 257 249 y = mpi_new(0); 258 250 259 - if (!mpi_ec_get_affine(x, y, point, ec) && 260 - !sm2_z_digest_update(sctx, x, pbytes) && 261 - !sm2_z_digest_update(sctx, y, pbytes)) 262 - ret = 0; 251 + ret = mpi_ec_get_affine(x, y, point, ec) ? -EINVAL : 252 + sm2_z_digest_update(desc, x, pbytes) ?: 253 + sm2_z_digest_update(desc, y, pbytes); 263 254 264 255 mpi_free(x); 265 256 mpi_free(y); 266 257 return ret; 267 258 } 268 259 269 - int sm2_compute_z_digest(struct crypto_akcipher *tfm, 270 - const unsigned char *id, size_t id_len, 271 - unsigned char dgst[SM3_DIGEST_SIZE]) 260 + int sm2_compute_z_digest(struct shash_desc *desc, 261 + const void *key, unsigned int keylen, void *dgst) 272 262 { 273 - struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm); 274 - uint16_t bits_len; 275 - unsigned char entl[2]; 276 - struct sm3_state sctx; 263 + struct mpi_ec_ctx *ec; 264 + unsigned int bits_len; 277 265 unsigned int pbytes; 266 + u8 entl[2]; 267 + int err; 278 268 279 - if (id_len > (USHRT_MAX / 8) || !ec->Q) 280 - return -EINVAL; 269 + ec = kmalloc(sizeof(*ec), GFP_KERNEL); 270 + if (!ec) 271 + return -ENOMEM; 281 272 282 - bits_len = (uint16_t)(id_len * 8); 273 + err = __sm2_set_pub_key(ec, key, keylen); 274 + if (err) 275 + goto out_free_ec; 276 + 277 + bits_len = SM2_DEFAULT_USERID_LEN * 8; 283 278 entl[0] = bits_len >> 8; 284 279 entl[1] = bits_len & 0xff; 285 280 286 281 pbytes = MPI_NBYTES(ec->p); 287 282 288 283 /* ZA = H256(ENTLA | IDA | a | b | xG | yG | xA | yA) */ 289 - sm3_init(&sctx); 290 - sm3_update(&sctx, entl, 2); 291 - sm3_update(&sctx, id, id_len); 284 + err = crypto_shash_init(desc); 285 + if (err) 286 + goto out_deinit_ec; 292 287 293 - if (sm2_z_digest_update(&sctx, ec->a, pbytes) || 294 - sm2_z_digest_update(&sctx, ec->b, pbytes) || 295 - sm2_z_digest_update_point(&sctx, ec->G, ec, pbytes) || 296 - sm2_z_digest_update_point(&sctx, ec->Q, ec, pbytes)) 297 - return -EINVAL; 288 + err = crypto_shash_update(desc, entl, 2); 289 + if (err) 290 + goto out_deinit_ec; 298 291 299 - sm3_final(&sctx, dgst); 300 - return 0; 292 + err = crypto_shash_update(desc, SM2_DEFAULT_USERID, 293 + SM2_DEFAULT_USERID_LEN); 294 + if (err) 295 + goto out_deinit_ec; 296 + 297 + err = sm2_z_digest_update(desc, ec->a, pbytes) ?: 298 + sm2_z_digest_update(desc, ec->b, pbytes) ?: 299 + sm2_z_digest_update_point(desc, ec->G, ec, pbytes) ?: 300 + sm2_z_digest_update_point(desc, ec->Q, ec, pbytes); 301 + if (err) 302 + goto out_deinit_ec; 303 + 304 + err = crypto_shash_final(desc, dgst); 305 + 306 + out_deinit_ec: 307 + sm2_ec_ctx_deinit(ec); 308 + out_free_ec: 309 + kfree(ec); 310 + return err; 301 311 } 302 - EXPORT_SYMBOL(sm2_compute_z_digest); 312 + EXPORT_SYMBOL_GPL(sm2_compute_z_digest); 303 313 304 314 static int _sm2_verify(struct mpi_ec_ctx *ec, MPI hash, MPI sig_r, MPI sig_s) 305 315 { ··· 417 391 const void *key, unsigned int keylen) 418 392 { 419 393 struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm); 394 + 395 + return __sm2_set_pub_key(ec, key, keylen); 396 + 397 + } 398 + 399 + static int __sm2_set_pub_key(struct mpi_ec_ctx *ec, 400 + const void *key, unsigned int keylen) 401 + { 420 402 MPI a; 421 403 int rc; 422 404
+20 -7
drivers/char/hw_random/Kconfig
··· 335 335 336 336 If unsure, say Y. 337 337 338 + config HW_RANDOM_HISTB 339 + tristate "Hisilicon STB Random Number Generator support" 340 + depends on ARCH_HISI || COMPILE_TEST 341 + default ARCH_HISI 342 + help 343 + This driver provides kernel-side support for the Random Number 344 + Generator hardware found on Hisilicon Hi37xx SoC. 345 + 346 + To compile this driver as a module, choose M here: the 347 + module will be called histb-rng. 348 + 338 349 config HW_RANDOM_ST 339 350 tristate "ST Microelectronics HW Random Number Generator support" 340 - depends on HW_RANDOM && ARCH_STI 351 + depends on HW_RANDOM && (ARCH_STI || COMPILE_TEST) 341 352 help 342 353 This driver provides kernel-side support for the Random Number 343 354 Generator hardware found on STi series of SoCs. ··· 411 400 412 401 config HW_RANDOM_MESON 413 402 tristate "Amlogic Meson Random Number Generator support" 414 - depends on HW_RANDOM 415 403 depends on ARCH_MESON || COMPILE_TEST 416 - default y 404 + depends on HAS_IOMEM && OF 405 + default HW_RANDOM if ARCH_MESON 417 406 help 418 407 This driver provides kernel-side support for the Random Number 419 408 Generator hardware found on Amlogic Meson SoCs. ··· 438 427 439 428 config HW_RANDOM_MTK 440 429 tristate "Mediatek Random Number Generator support" 441 - depends on HW_RANDOM 442 430 depends on ARCH_MEDIATEK || COMPILE_TEST 443 - default y 431 + depends on HAS_IOMEM && OF 432 + default HW_RANDOM if ARCH_MEDIATEK 444 433 help 445 434 This driver provides kernel-side support for the Random Number 446 435 Generator hardware found on Mediatek SoCs. ··· 467 456 config HW_RANDOM_EXYNOS 468 457 tristate "Samsung Exynos True Random Number Generator support" 469 458 depends on ARCH_EXYNOS || COMPILE_TEST 470 - default HW_RANDOM 459 + depends on HAS_IOMEM 460 + default HW_RANDOM if ARCH_EXYNOS 471 461 help 472 462 This driver provides support for the True Random Number 473 463 Generator available in Exynos SoCs. ··· 495 483 config HW_RANDOM_NPCM 496 484 tristate "NPCM Random Number Generator support" 497 485 depends on ARCH_NPCM || COMPILE_TEST 498 - default HW_RANDOM 486 + depends on HAS_IOMEM 487 + default HW_RANDOM if ARCH_NPCM 499 488 help 500 489 This driver provides support for the Random Number 501 490 Generator hardware available in Nuvoton NPCM SoCs.
+1
drivers/char/hw_random/Makefile
··· 29 29 obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o 30 30 obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o 31 31 obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o 32 + obj-$(CONFIG_HW_RANDOM_HISTB) += histb-rng.o 32 33 obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o 33 34 obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o 34 35 obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
+59 -4
drivers/char/hw_random/cn10k-rng.c
··· 23 23 #define RNM_PF_RANDOM 0x400 24 24 #define RNM_TRNG_RESULT 0x408 25 25 26 + /* Extended TRNG Read and Status Registers */ 27 + #define RNM_PF_TRNG_DAT 0x1000 28 + #define RNM_PF_TRNG_RES 0x1008 29 + 26 30 struct cn10k_rng { 27 31 void __iomem *reg_base; 28 32 struct hwrng ops; 29 33 struct pci_dev *pdev; 34 + /* Octeon CN10K-A A0/A1, CNF10K-A A0/A1 and CNF10K-B A0/B0 35 + * does not support extended TRNG registers 36 + */ 37 + bool extended_trng_regs; 30 38 }; 31 39 32 40 #define PLAT_OCTEONTX_RESET_RNG_EBG_HEALTH_STATE 0xc2000b0f 41 + 42 + #define PCI_SUBSYS_DEVID_CN10K_A_RNG 0xB900 43 + #define PCI_SUBSYS_DEVID_CNF10K_A_RNG 0xBA00 44 + #define PCI_SUBSYS_DEVID_CNF10K_B_RNG 0xBC00 45 + 46 + static bool cn10k_is_extended_trng_regs_supported(struct pci_dev *pdev) 47 + { 48 + /* CN10K-A A0/A1 */ 49 + if ((pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_RNG) && 50 + (!pdev->revision || (pdev->revision & 0xff) == 0x50 || 51 + (pdev->revision & 0xff) == 0x51)) 52 + return false; 53 + 54 + /* CNF10K-A A0 */ 55 + if ((pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_RNG) && 56 + (!pdev->revision || (pdev->revision & 0xff) == 0x60 || 57 + (pdev->revision & 0xff) == 0x61)) 58 + return false; 59 + 60 + /* CNF10K-B A0/B0 */ 61 + if ((pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_B_RNG) && 62 + (!pdev->revision || (pdev->revision & 0xff) == 0x70 || 63 + (pdev->revision & 0xff) == 0x74)) 64 + return false; 65 + 66 + return true; 67 + } 33 68 34 69 static unsigned long reset_rng_health_state(struct cn10k_rng *rng) 35 70 { ··· 98 63 return 0; 99 64 } 100 65 101 - static void cn10k_read_trng(struct cn10k_rng *rng, u64 *value) 66 + /* Returns true when valid data available otherwise return false */ 67 + static bool cn10k_read_trng(struct cn10k_rng *rng, u64 *value) 102 68 { 69 + u16 retry_count = 0; 103 70 u64 upper, lower; 71 + u64 status; 72 + 73 + if (rng->extended_trng_regs) { 74 + do { 75 + *value = readq(rng->reg_base + RNM_PF_TRNG_DAT); 76 + if (*value) 77 + return true; 78 + status = readq(rng->reg_base + RNM_PF_TRNG_RES); 79 + if (!status && (retry_count++ > 0x1000)) 80 + return false; 81 + } while (!status); 82 + } 104 83 105 84 *value = readq(rng->reg_base + RNM_PF_RANDOM); 106 85 ··· 131 82 132 83 *value = (upper & 0xFFFFFFFF00000000) | (lower & 0xFFFFFFFF); 133 84 } 85 + return true; 134 86 } 135 87 136 88 static int cn10k_rng_read(struct hwrng *hwrng, void *data, ··· 150 100 size = max; 151 101 152 102 while (size >= 8) { 153 - cn10k_read_trng(rng, &value); 103 + if (!cn10k_read_trng(rng, &value)) 104 + goto out; 154 105 155 106 *((u64 *)pos) = value; 156 107 size -= 8; ··· 159 108 } 160 109 161 110 if (size > 0) { 162 - cn10k_read_trng(rng, &value); 111 + if (!cn10k_read_trng(rng, &value)) 112 + goto out; 163 113 164 114 while (size > 0) { 165 115 *pos = (u8)value; ··· 170 118 } 171 119 } 172 120 121 + out: 173 122 return max - size; 174 123 } 175 124 ··· 197 144 if (!rng->ops.name) 198 145 return -ENOMEM; 199 146 200 - rng->ops.read = cn10k_rng_read; 147 + rng->ops.read = cn10k_rng_read; 201 148 rng->ops.priv = (unsigned long)rng; 149 + 150 + rng->extended_trng_regs = cn10k_is_extended_trng_regs_supported(pdev); 202 151 203 152 reset_rng_health_state(rng); 204 153
+23 -30
drivers/char/hw_random/imx-rngc.c
··· 17 17 #include <linux/hw_random.h> 18 18 #include <linux/completion.h> 19 19 #include <linux/io.h> 20 + #include <linux/bitfield.h> 20 21 21 22 #define RNGC_VER_ID 0x0000 22 23 #define RNGC_COMMAND 0x0004 ··· 27 26 #define RNGC_FIFO 0x0014 28 27 29 28 /* the fields in the ver id register */ 30 - #define RNGC_TYPE_SHIFT 28 29 + #define RNG_TYPE GENMASK(31, 28) 31 30 #define RNGC_VER_MAJ_SHIFT 8 32 31 33 32 /* the rng_type field */ ··· 35 34 #define RNGC_TYPE_RNGC 0x2 36 35 37 36 38 - #define RNGC_CMD_CLR_ERR 0x00000020 39 - #define RNGC_CMD_CLR_INT 0x00000010 40 - #define RNGC_CMD_SEED 0x00000002 41 - #define RNGC_CMD_SELF_TEST 0x00000001 37 + #define RNGC_CMD_CLR_ERR BIT(5) 38 + #define RNGC_CMD_CLR_INT BIT(4) 39 + #define RNGC_CMD_SEED BIT(1) 40 + #define RNGC_CMD_SELF_TEST BIT(0) 42 41 43 - #define RNGC_CTRL_MASK_ERROR 0x00000040 44 - #define RNGC_CTRL_MASK_DONE 0x00000020 45 - #define RNGC_CTRL_AUTO_SEED 0x00000010 42 + #define RNGC_CTRL_MASK_ERROR BIT(6) 43 + #define RNGC_CTRL_MASK_DONE BIT(5) 44 + #define RNGC_CTRL_AUTO_SEED BIT(4) 46 45 47 - #define RNGC_STATUS_ERROR 0x00010000 48 - #define RNGC_STATUS_FIFO_LEVEL_MASK 0x00000f00 49 - #define RNGC_STATUS_FIFO_LEVEL_SHIFT 8 50 - #define RNGC_STATUS_SEED_DONE 0x00000020 51 - #define RNGC_STATUS_ST_DONE 0x00000010 46 + #define RNGC_STATUS_ERROR BIT(16) 47 + #define RNGC_STATUS_FIFO_LEVEL_MASK GENMASK(11, 8) 48 + #define RNGC_STATUS_SEED_DONE BIT(5) 49 + #define RNGC_STATUS_ST_DONE BIT(4) 52 50 53 51 #define RNGC_ERROR_STATUS_STAT_ERR 0x00000008 54 52 ··· 110 110 cmd = readl(rngc->base + RNGC_COMMAND); 111 111 writel(cmd | RNGC_CMD_SELF_TEST, rngc->base + RNGC_COMMAND); 112 112 113 - ret = wait_for_completion_timeout(&rngc->rng_op_done, RNGC_TIMEOUT); 113 + ret = wait_for_completion_timeout(&rngc->rng_op_done, msecs_to_jiffies(RNGC_TIMEOUT)); 114 114 imx_rngc_irq_mask_clear(rngc); 115 115 if (!ret) 116 116 return -ETIMEDOUT; ··· 122 122 { 123 123 struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng); 124 124 unsigned int status; 125 - unsigned int level; 126 125 int retval = 0; 127 126 128 127 while (max >= sizeof(u32)) { ··· 131 132 if (status & RNGC_STATUS_ERROR) 132 133 break; 133 134 134 - /* how many random numbers are in FIFO? [0-16] */ 135 - level = (status & RNGC_STATUS_FIFO_LEVEL_MASK) >> 136 - RNGC_STATUS_FIFO_LEVEL_SHIFT; 137 - 138 - if (level) { 135 + if (status & RNGC_STATUS_FIFO_LEVEL_MASK) { 139 136 /* retrieve a random number from FIFO */ 140 137 *(u32 *)data = readl(rngc->base + RNGC_FIFO); 141 138 ··· 182 187 cmd = readl(rngc->base + RNGC_COMMAND); 183 188 writel(cmd | RNGC_CMD_SEED, rngc->base + RNGC_COMMAND); 184 189 185 - ret = wait_for_completion_timeout(&rngc->rng_op_done, 186 - RNGC_TIMEOUT); 187 - 190 + ret = wait_for_completion_timeout(&rngc->rng_op_done, msecs_to_jiffies(RNGC_TIMEOUT)); 188 191 if (!ret) { 189 192 ret = -ETIMEDOUT; 190 193 goto err; ··· 222 229 imx_rngc_irq_mask_clear(rngc); 223 230 } 224 231 225 - static int imx_rngc_probe(struct platform_device *pdev) 232 + static int __init imx_rngc_probe(struct platform_device *pdev) 226 233 { 227 234 struct imx_rngc *rngc; 228 235 int ret; ··· 249 256 return irq; 250 257 251 258 ver_id = readl(rngc->base + RNGC_VER_ID); 252 - rng_type = ver_id >> RNGC_TYPE_SHIFT; 259 + rng_type = FIELD_GET(RNG_TYPE, ver_id); 253 260 /* 254 261 * This driver supports only RNGC and RNGB. (There's a different 255 262 * driver for RNGA.) ··· 298 305 return 0; 299 306 } 300 307 301 - static int __maybe_unused imx_rngc_suspend(struct device *dev) 308 + static int imx_rngc_suspend(struct device *dev) 302 309 { 303 310 struct imx_rngc *rngc = dev_get_drvdata(dev); 304 311 ··· 307 314 return 0; 308 315 } 309 316 310 - static int __maybe_unused imx_rngc_resume(struct device *dev) 317 + static int imx_rngc_resume(struct device *dev) 311 318 { 312 319 struct imx_rngc *rngc = dev_get_drvdata(dev); 313 320 ··· 316 323 return 0; 317 324 } 318 325 319 - static SIMPLE_DEV_PM_OPS(imx_rngc_pm_ops, imx_rngc_suspend, imx_rngc_resume); 326 + static DEFINE_SIMPLE_DEV_PM_OPS(imx_rngc_pm_ops, imx_rngc_suspend, imx_rngc_resume); 320 327 321 328 static const struct of_device_id imx_rngc_dt_ids[] = { 322 - { .compatible = "fsl,imx25-rngb", .data = NULL, }, 329 + { .compatible = "fsl,imx25-rngb" }, 323 330 { /* sentinel */ } 324 331 }; 325 332 MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids); ··· 327 334 static struct platform_driver imx_rngc_driver = { 328 335 .driver = { 329 336 .name = KBUILD_MODNAME, 330 - .pm = &imx_rngc_pm_ops, 337 + .pm = pm_sleep_ptr(&imx_rngc_pm_ops), 331 338 .of_match_table = imx_rngc_dt_ids, 332 339 }, 333 340 };
+1 -20
drivers/char/hw_random/st-rng.c
··· 42 42 43 43 struct st_rng_data { 44 44 void __iomem *base; 45 - struct clk *clk; 46 45 struct hwrng ops; 47 46 }; 48 47 ··· 84 85 if (IS_ERR(base)) 85 86 return PTR_ERR(base); 86 87 87 - clk = devm_clk_get(&pdev->dev, NULL); 88 + clk = devm_clk_get_enabled(&pdev->dev, NULL); 88 89 if (IS_ERR(clk)) 89 90 return PTR_ERR(clk); 90 - 91 - ret = clk_prepare_enable(clk); 92 - if (ret) 93 - return ret; 94 91 95 92 ddata->ops.priv = (unsigned long)ddata; 96 93 ddata->ops.read = st_rng_read; 97 94 ddata->ops.name = pdev->name; 98 95 ddata->base = base; 99 - ddata->clk = clk; 100 - 101 - dev_set_drvdata(&pdev->dev, ddata); 102 96 103 97 ret = devm_hwrng_register(&pdev->dev, &ddata->ops); 104 98 if (ret) { 105 99 dev_err(&pdev->dev, "Failed to register HW RNG\n"); 106 - clk_disable_unprepare(clk); 107 100 return ret; 108 101 } 109 102 110 103 dev_info(&pdev->dev, "Successfully registered HW RNG\n"); 111 - 112 - return 0; 113 - } 114 - 115 - static int st_rng_remove(struct platform_device *pdev) 116 - { 117 - struct st_rng_data *ddata = dev_get_drvdata(&pdev->dev); 118 - 119 - clk_disable_unprepare(ddata->clk); 120 104 121 105 return 0; 122 106 } ··· 116 134 .of_match_table = of_match_ptr(st_rng_match), 117 135 }, 118 136 .probe = st_rng_probe, 119 - .remove = st_rng_remove 120 137 }; 121 138 122 139 module_platform_driver(st_rng_driver);
+5 -5
drivers/char/hw_random/virtio-rng.c
··· 4 4 * Copyright (C) 2007, 2008 Rusty Russell IBM Corporation 5 5 */ 6 6 7 + #include <asm/barrier.h> 7 8 #include <linux/err.h> 8 9 #include <linux/hw_random.h> 9 10 #include <linux/scatterlist.h> ··· 38 37 static void random_recv_done(struct virtqueue *vq) 39 38 { 40 39 struct virtrng_info *vi = vq->vdev->priv; 40 + unsigned int len; 41 41 42 42 /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */ 43 - if (!virtqueue_get_buf(vi->vq, &vi->data_avail)) 43 + if (!virtqueue_get_buf(vi->vq, &len)) 44 44 return; 45 45 46 - vi->data_idx = 0; 47 - 46 + smp_store_release(&vi->data_avail, len); 48 47 complete(&vi->have_data); 49 48 } 50 49 ··· 53 52 struct scatterlist sg; 54 53 55 54 reinit_completion(&vi->have_data); 56 - vi->data_avail = 0; 57 55 vi->data_idx = 0; 58 56 59 57 sg_init_one(&sg, vi->data, sizeof(vi->data)); ··· 88 88 read = 0; 89 89 90 90 /* copy available data */ 91 - if (vi->data_avail) { 91 + if (smp_load_acquire(&vi->data_avail)) { 92 92 chunk = copy_data(vi, buf, size); 93 93 size -= chunk; 94 94 read += chunk;
+1
drivers/crypto/Kconfig
··· 807 807 acceleration for cryptographic algorithms on these devices. 808 808 809 809 source "drivers/crypto/aspeed/Kconfig" 810 + source "drivers/crypto/starfive/Kconfig" 810 811 811 812 endif # CRYPTO_HW
+1
drivers/crypto/Makefile
··· 50 50 obj-y += hisilicon/ 51 51 obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/ 52 52 obj-y += intel/ 53 + obj-y += starfive/
+1 -1
drivers/crypto/atmel-ecc.c
··· 389 389 .name = "atmel-ecc", 390 390 .of_match_table = of_match_ptr(atmel_ecc_dt_ids), 391 391 }, 392 - .probe_new = atmel_ecc_probe, 392 + .probe = atmel_ecc_probe, 393 393 .remove = atmel_ecc_remove, 394 394 .id_table = atmel_ecc_id, 395 395 };
+1 -1
drivers/crypto/atmel-sha204a.c
··· 141 141 MODULE_DEVICE_TABLE(i2c, atmel_sha204a_id); 142 142 143 143 static struct i2c_driver atmel_sha204a_driver = { 144 - .probe_new = atmel_sha204a_probe, 144 + .probe = atmel_sha204a_probe, 145 145 .remove = atmel_sha204a_remove, 146 146 .id_table = atmel_sha204a_id, 147 147
+9
drivers/crypto/caam/Kconfig
··· 162 162 config CRYPTO_DEV_FSL_CAAM_BLOB_GEN 163 163 bool 164 164 165 + config CRYPTO_DEV_FSL_CAAM_RNG_TEST 166 + bool "Test caam rng" 167 + select CRYPTO_DEV_FSL_CAAM_RNG_API 168 + help 169 + Selecting this will enable a self-test to run for the 170 + caam RNG. 171 + This test is several minutes long and executes 172 + just before the RNG is registered with the hw_random API. 173 + 165 174 endif # CRYPTO_DEV_FSL_CAAM_JR 166 175 167 176 endif # CRYPTO_DEV_FSL_CAAM
+48
drivers/crypto/caam/caamrng.c
··· 172 172 kfifo_free(&ctx->fifo); 173 173 } 174 174 175 + #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_TEST 176 + static inline void test_len(struct hwrng *rng, size_t len, bool wait) 177 + { 178 + u8 *buf; 179 + int read_len; 180 + struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng); 181 + struct device *dev = ctx->ctrldev; 182 + 183 + buf = kcalloc(CAAM_RNG_MAX_FIFO_STORE_SIZE, sizeof(u8), GFP_KERNEL); 184 + 185 + while (len > 0) { 186 + read_len = rng->read(rng, buf, len, wait); 187 + 188 + if (read_len < 0 || (read_len == 0 && wait)) { 189 + dev_err(dev, "RNG Read FAILED received %d bytes\n", 190 + read_len); 191 + kfree(buf); 192 + return; 193 + } 194 + 195 + print_hex_dump_debug("random bytes@: ", 196 + DUMP_PREFIX_ADDRESS, 16, 4, 197 + buf, read_len, 1); 198 + 199 + len = len - read_len; 200 + } 201 + 202 + kfree(buf); 203 + } 204 + 205 + static inline void test_mode_once(struct hwrng *rng, bool wait) 206 + { 207 + test_len(rng, 32, wait); 208 + test_len(rng, 64, wait); 209 + test_len(rng, 128, wait); 210 + } 211 + 212 + static void self_test(struct hwrng *rng) 213 + { 214 + pr_info("Executing RNG SELF-TEST with wait\n"); 215 + test_mode_once(rng, true); 216 + } 217 + #endif 218 + 175 219 static int caam_init(struct hwrng *rng) 176 220 { 177 221 struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng); ··· 301 257 caam_rng_exit(ctrldev); 302 258 return ret; 303 259 } 260 + 261 + #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_TEST 262 + self_test(&ctx->rng); 263 + #endif 304 264 305 265 devres_close_group(ctrldev, caam_rng_init); 306 266 return 0;
+158 -114
drivers/crypto/caam/ctrl.c
··· 79 79 append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT); 80 80 } 81 81 82 + static const struct of_device_id imx8m_machine_match[] = { 83 + { .compatible = "fsl,imx8mm", }, 84 + { .compatible = "fsl,imx8mn", }, 85 + { .compatible = "fsl,imx8mp", }, 86 + { .compatible = "fsl,imx8mq", }, 87 + { .compatible = "fsl,imx8ulp", }, 88 + { } 89 + }; 90 + 82 91 /* 83 92 * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of 84 93 * the software (no JR/QI used). ··· 114 105 * Apparently on i.MX8M{Q,M,N,P} it doesn't matter if virt_en == 1 115 106 * and the following steps should be performed regardless 116 107 */ 117 - of_machine_is_compatible("fsl,imx8mq") || 118 - of_machine_is_compatible("fsl,imx8mm") || 119 - of_machine_is_compatible("fsl,imx8mn") || 120 - of_machine_is_compatible("fsl,imx8mp")) { 108 + of_match_node(imx8m_machine_match, of_root)) { 121 109 clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0); 122 110 123 111 while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) && ··· 350 344 /* 351 345 * kick_trng - sets the various parameters for enabling the initialization 352 346 * of the RNG4 block in CAAM 353 - * @pdev - pointer to the platform device 347 + * @dev - pointer to the controller device 354 348 * @ent_delay - Defines the length (in system clocks) of each entropy sample. 355 349 */ 356 - static void kick_trng(struct platform_device *pdev, int ent_delay) 350 + static void kick_trng(struct device *dev, int ent_delay) 357 351 { 358 - struct device *ctrldev = &pdev->dev; 359 - struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); 352 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev); 360 353 struct caam_ctrl __iomem *ctrl; 361 354 struct rng4tst __iomem *r4tst; 362 - u32 val; 355 + u32 val, rtsdctl; 363 356 364 357 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl; 365 358 r4tst = &ctrl->r4tst[0]; ··· 374 369 * Performance-wise, it does not make sense to 375 370 * set the delay to a value that is lower 376 371 * than the last one that worked (i.e. the state handles 377 - * were instantiated properly. Thus, instead of wasting 378 - * time trying to set the values controlling the sample 379 - * frequency, the function simply returns. 372 + * were instantiated properly). 380 373 */ 381 - val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK) 382 - >> RTSDCTL_ENT_DLY_SHIFT; 383 - if (ent_delay <= val) 384 - goto start_rng; 374 + rtsdctl = rd_reg32(&r4tst->rtsdctl); 375 + val = (rtsdctl & RTSDCTL_ENT_DLY_MASK) >> RTSDCTL_ENT_DLY_SHIFT; 376 + if (ent_delay > val) { 377 + val = ent_delay; 378 + /* min. freq. count, equal to 1/4 of the entropy sample length */ 379 + wr_reg32(&r4tst->rtfrqmin, val >> 2); 380 + /* max. freq. count, equal to 16 times the entropy sample length */ 381 + wr_reg32(&r4tst->rtfrqmax, val << 4); 382 + } 385 383 386 - val = rd_reg32(&r4tst->rtsdctl); 387 - val = (val & ~RTSDCTL_ENT_DLY_MASK) | 388 - (ent_delay << RTSDCTL_ENT_DLY_SHIFT); 389 - wr_reg32(&r4tst->rtsdctl, val); 390 - /* min. freq. count, equal to 1/4 of the entropy sample length */ 391 - wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2); 392 - /* disable maximum frequency count */ 393 - wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE); 394 - /* read the control register */ 395 - val = rd_reg32(&r4tst->rtmctl); 396 - start_rng: 384 + wr_reg32(&r4tst->rtsdctl, (val << RTSDCTL_ENT_DLY_SHIFT) | 385 + RTSDCTL_SAMP_SIZE_VAL); 386 + 387 + /* 388 + * To avoid reprogramming the self-test parameters over and over again, 389 + * use RTSDCTL[SAMP_SIZE] as an indicator. 390 + */ 391 + if ((rtsdctl & RTSDCTL_SAMP_SIZE_MASK) != RTSDCTL_SAMP_SIZE_VAL) { 392 + wr_reg32(&r4tst->rtscmisc, (2 << 16) | 32); 393 + wr_reg32(&r4tst->rtpkrrng, 570); 394 + wr_reg32(&r4tst->rtpkrmax, 1600); 395 + wr_reg32(&r4tst->rtscml, (122 << 16) | 317); 396 + wr_reg32(&r4tst->rtscrl[0], (80 << 16) | 107); 397 + wr_reg32(&r4tst->rtscrl[1], (57 << 16) | 62); 398 + wr_reg32(&r4tst->rtscrl[2], (39 << 16) | 39); 399 + wr_reg32(&r4tst->rtscrl[3], (27 << 16) | 26); 400 + wr_reg32(&r4tst->rtscrl[4], (19 << 16) | 18); 401 + wr_reg32(&r4tst->rtscrl[5], (18 << 16) | 17); 402 + } 403 + 397 404 /* 398 405 * select raw sampling in both entropy shifter 399 406 * and statistical checker; ; put RNG4 into run mode ··· 635 618 return false; 636 619 } 637 620 621 + static int caam_ctrl_rng_init(struct device *dev) 622 + { 623 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev); 624 + struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl; 625 + int ret, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN; 626 + u8 rng_vid; 627 + 628 + if (ctrlpriv->era < 10) { 629 + struct caam_perfmon __iomem *perfmon; 630 + 631 + perfmon = ctrlpriv->total_jobrs ? 632 + (struct caam_perfmon __iomem *)&ctrlpriv->jr[0]->perfmon : 633 + (struct caam_perfmon __iomem *)&ctrl->perfmon; 634 + 635 + rng_vid = (rd_reg32(&perfmon->cha_id_ls) & 636 + CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT; 637 + } else { 638 + struct version_regs __iomem *vreg; 639 + 640 + vreg = ctrlpriv->total_jobrs ? 641 + (struct version_regs __iomem *)&ctrlpriv->jr[0]->vreg : 642 + (struct version_regs __iomem *)&ctrl->vreg; 643 + 644 + rng_vid = (rd_reg32(&vreg->rng) & CHA_VER_VID_MASK) >> 645 + CHA_VER_VID_SHIFT; 646 + } 647 + 648 + /* 649 + * If SEC has RNG version >= 4 and RNG state handle has not been 650 + * already instantiated, do RNG instantiation 651 + * In case of SoCs with Management Complex, RNG is managed by MC f/w. 652 + */ 653 + if (!(ctrlpriv->mc_en && ctrlpriv->pr_support) && rng_vid >= 4) { 654 + ctrlpriv->rng4_sh_init = 655 + rd_reg32(&ctrl->r4tst[0].rdsta); 656 + /* 657 + * If the secure keys (TDKEK, JDKEK, TDSK), were already 658 + * generated, signal this to the function that is instantiating 659 + * the state handles. An error would occur if RNG4 attempts 660 + * to regenerate these keys before the next POR. 661 + */ 662 + gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1; 663 + ctrlpriv->rng4_sh_init &= RDSTA_MASK; 664 + do { 665 + int inst_handles = 666 + rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_MASK; 667 + /* 668 + * If either SH were instantiated by somebody else 669 + * (e.g. u-boot) then it is assumed that the entropy 670 + * parameters are properly set and thus the function 671 + * setting these (kick_trng(...)) is skipped. 672 + * Also, if a handle was instantiated, do not change 673 + * the TRNG parameters. 674 + */ 675 + if (needs_entropy_delay_adjustment()) 676 + ent_delay = 12000; 677 + if (!(ctrlpriv->rng4_sh_init || inst_handles)) { 678 + dev_info(dev, 679 + "Entropy delay = %u\n", 680 + ent_delay); 681 + kick_trng(dev, ent_delay); 682 + ent_delay += 400; 683 + } 684 + /* 685 + * if instantiate_rng(...) fails, the loop will rerun 686 + * and the kick_trng(...) function will modify the 687 + * upper and lower limits of the entropy sampling 688 + * interval, leading to a successful initialization of 689 + * the RNG. 690 + */ 691 + ret = instantiate_rng(dev, inst_handles, 692 + gen_sk); 693 + /* 694 + * Entropy delay is determined via TRNG characterization. 695 + * TRNG characterization is run across different voltages 696 + * and temperatures. 697 + * If worst case value for ent_dly is identified, 698 + * the loop can be skipped for that platform. 699 + */ 700 + if (needs_entropy_delay_adjustment()) 701 + break; 702 + if (ret == -EAGAIN) 703 + /* 704 + * if here, the loop will rerun, 705 + * so don't hog the CPU 706 + */ 707 + cpu_relax(); 708 + } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX)); 709 + if (ret) { 710 + dev_err(dev, "failed to instantiate RNG"); 711 + return ret; 712 + } 713 + /* 714 + * Set handles initialized by this module as the complement of 715 + * the already initialized ones 716 + */ 717 + ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_MASK; 718 + 719 + /* Enable RDB bit so that RNG works faster */ 720 + clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE); 721 + } 722 + 723 + return 0; 724 + } 725 + 638 726 /* Probe routine for CAAM top (controller) level */ 639 727 static int caam_probe(struct platform_device *pdev) 640 728 { 641 - int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN; 729 + int ret, ring; 642 730 u64 caam_id; 643 731 const struct soc_device_attribute *imx_soc_match; 644 732 struct device *dev; ··· 753 631 struct caam_perfmon __iomem *perfmon; 754 632 struct dentry *dfs_root; 755 633 u32 scfgr, comp_params; 756 - u8 rng_vid; 757 634 int pg_size; 758 635 int BLOCK_OFFSET = 0; 759 - bool pr_support = false; 760 636 bool reg_access = true; 761 637 762 638 ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL); ··· 766 646 nprop = pdev->dev.of_node; 767 647 768 648 imx_soc_match = soc_device_match(caam_imx_soc_table); 649 + if (!imx_soc_match && of_match_node(imx8m_machine_match, of_root)) 650 + return -EPROBE_DEFER; 651 + 769 652 caam_imx = (bool)imx_soc_match; 770 653 771 654 if (imx_soc_match) { ··· 893 770 894 771 mc_version = fsl_mc_get_version(); 895 772 if (mc_version) 896 - pr_support = check_version(mc_version, 10, 20, 0); 773 + ctrlpriv->pr_support = check_version(mc_version, 10, 20, 774 + 0); 897 775 else 898 776 return -EPROBE_DEFER; 899 777 } ··· 985 861 return -ENOMEM; 986 862 } 987 863 988 - if (!reg_access) 989 - goto report_live; 990 - 991 864 comp_params = rd_reg32(&perfmon->comp_parms_ls); 992 865 ctrlpriv->blob_present = !!(comp_params & CTPR_LS_BLOB); 993 866 ··· 994 873 * check both here. 995 874 */ 996 875 if (ctrlpriv->era < 10) { 997 - rng_vid = (rd_reg32(&perfmon->cha_id_ls) & 998 - CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT; 999 876 ctrlpriv->blob_present = ctrlpriv->blob_present && 1000 877 (rd_reg32(&perfmon->cha_num_ls) & CHA_ID_LS_AES_MASK); 1001 878 } else { ··· 1003 884 (struct version_regs __iomem *)&ctrlpriv->jr[0]->vreg : 1004 885 (struct version_regs __iomem *)&ctrl->vreg; 1005 886 1006 - rng_vid = (rd_reg32(&vreg->rng) & CHA_VER_VID_MASK) >> 1007 - CHA_VER_VID_SHIFT; 1008 887 ctrlpriv->blob_present = ctrlpriv->blob_present && 1009 888 (rd_reg32(&vreg->aesa) & CHA_VER_MISC_AES_NUM_MASK); 1010 889 } 1011 890 1012 - /* 1013 - * If SEC has RNG version >= 4 and RNG state handle has not been 1014 - * already instantiated, do RNG instantiation 1015 - * In case of SoCs with Management Complex, RNG is managed by MC f/w. 1016 - */ 1017 - if (!(ctrlpriv->mc_en && pr_support) && rng_vid >= 4) { 1018 - ctrlpriv->rng4_sh_init = 1019 - rd_reg32(&ctrl->r4tst[0].rdsta); 1020 - /* 1021 - * If the secure keys (TDKEK, JDKEK, TDSK), were already 1022 - * generated, signal this to the function that is instantiating 1023 - * the state handles. An error would occur if RNG4 attempts 1024 - * to regenerate these keys before the next POR. 1025 - */ 1026 - gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1; 1027 - ctrlpriv->rng4_sh_init &= RDSTA_MASK; 1028 - do { 1029 - int inst_handles = 1030 - rd_reg32(&ctrl->r4tst[0].rdsta) & 1031 - RDSTA_MASK; 1032 - /* 1033 - * If either SH were instantiated by somebody else 1034 - * (e.g. u-boot) then it is assumed that the entropy 1035 - * parameters are properly set and thus the function 1036 - * setting these (kick_trng(...)) is skipped. 1037 - * Also, if a handle was instantiated, do not change 1038 - * the TRNG parameters. 1039 - */ 1040 - if (needs_entropy_delay_adjustment()) 1041 - ent_delay = 12000; 1042 - if (!(ctrlpriv->rng4_sh_init || inst_handles)) { 1043 - dev_info(dev, 1044 - "Entropy delay = %u\n", 1045 - ent_delay); 1046 - kick_trng(pdev, ent_delay); 1047 - ent_delay += 400; 1048 - } 1049 - /* 1050 - * if instantiate_rng(...) fails, the loop will rerun 1051 - * and the kick_trng(...) function will modify the 1052 - * upper and lower limits of the entropy sampling 1053 - * interval, leading to a successful initialization of 1054 - * the RNG. 1055 - */ 1056 - ret = instantiate_rng(dev, inst_handles, 1057 - gen_sk); 1058 - /* 1059 - * Entropy delay is determined via TRNG characterization. 1060 - * TRNG characterization is run across different voltages 1061 - * and temperatures. 1062 - * If worst case value for ent_dly is identified, 1063 - * the loop can be skipped for that platform. 1064 - */ 1065 - if (needs_entropy_delay_adjustment()) 1066 - break; 1067 - if (ret == -EAGAIN) 1068 - /* 1069 - * if here, the loop will rerun, 1070 - * so don't hog the CPU 1071 - */ 1072 - cpu_relax(); 1073 - } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX)); 1074 - if (ret) { 1075 - dev_err(dev, "failed to instantiate RNG"); 891 + if (reg_access) { 892 + ret = caam_ctrl_rng_init(dev); 893 + if (ret) 1076 894 return ret; 1077 - } 1078 - /* 1079 - * Set handles initialized by this module as the complement of 1080 - * the already initialized ones 1081 - */ 1082 - ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_MASK; 1083 - 1084 - /* Enable RDB bit so that RNG works faster */ 1085 - clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE); 1086 895 } 1087 - 1088 - report_live: 1089 - /* NOTE: RTIC detection ought to go here, around Si time */ 1090 896 1091 897 caam_id = (u64)rd_reg32(&perfmon->caam_id_ms) << 32 | 1092 898 (u64)rd_reg32(&perfmon->caam_id_ls);
+1
drivers/crypto/caam/intern.h
··· 95 95 u8 blob_present; /* Nonzero if BLOB support present in device */ 96 96 u8 mc_en; /* Nonzero if MC f/w is active */ 97 97 u8 optee_en; /* Nonzero if OP-TEE f/w is active */ 98 + bool pr_support; /* RNG prediction resistance available */ 98 99 int secvio_irq; /* Security violation interrupt number */ 99 100 int virt_en; /* Virtualization enabled in CAAM */ 100 101 int era; /* CAAM Era (internal HW revision) */
+12 -2
drivers/crypto/caam/regs.h
··· 3 3 * CAAM hardware register-level view 4 4 * 5 5 * Copyright 2008-2011 Freescale Semiconductor, Inc. 6 - * Copyright 2018 NXP 6 + * Copyright 2018, 2023 NXP 7 7 */ 8 8 9 9 #ifndef REGS_H ··· 523 523 #define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT) 524 524 #define RTSDCTL_ENT_DLY_MIN 3200 525 525 #define RTSDCTL_ENT_DLY_MAX 12800 526 + #define RTSDCTL_SAMP_SIZE_MASK 0xffff 527 + #define RTSDCTL_SAMP_SIZE_VAL 512 526 528 u32 rtsdctl; /* seed control register */ 527 529 union { 528 530 u32 rtsblim; /* PRGM=1: sparse bit limit register */ ··· 536 534 u32 rtfrqmax; /* PRGM=1: freq. count max. limit register */ 537 535 u32 rtfrqcnt; /* PRGM=0: freq. count register */ 538 536 }; 539 - u32 rsvd1[40]; 537 + union { 538 + u32 rtscmc; /* statistical check run monobit count */ 539 + u32 rtscml; /* statistical check run monobit limit */ 540 + }; 541 + union { 542 + u32 rtscrc[6]; /* statistical check run length count */ 543 + u32 rtscrl[6]; /* statistical check run length limit */ 544 + }; 545 + u32 rsvd1[33]; 540 546 #define RDSTA_SKVT 0x80000000 541 547 #define RDSTA_SKVN 0x40000000 542 548 #define RDSTA_PR0 BIT(4)
+5
drivers/crypto/ccp/platform-access.c
··· 67 67 return -ENODEV; 68 68 69 69 pa_dev = psp->platform_access_data; 70 + 71 + if (!pa_dev->vdata->cmdresp_reg || !pa_dev->vdata->cmdbuff_addr_lo_reg || 72 + !pa_dev->vdata->cmdbuff_addr_hi_reg) 73 + return -ENODEV; 74 + 70 75 cmd = psp->io_regs + pa_dev->vdata->cmdresp_reg; 71 76 lo = psp->io_regs + pa_dev->vdata->cmdbuff_addr_lo_reg; 72 77 hi = psp->io_regs + pa_dev->vdata->cmdbuff_addr_hi_reg;
+43
drivers/crypto/ccp/sp-pci.c
··· 361 361 .ring_rptr_reg = 0x10554, /* C2PMSG_21 */ 362 362 }; 363 363 364 + static const struct tee_vdata teev2 = { 365 + .cmdresp_reg = 0x10944, /* C2PMSG_17 */ 366 + .cmdbuff_addr_lo_reg = 0x10948, /* C2PMSG_18 */ 367 + .cmdbuff_addr_hi_reg = 0x1094c, /* C2PMSG_19 */ 368 + .ring_wptr_reg = 0x10950, /* C2PMSG_20 */ 369 + .ring_rptr_reg = 0x10954, /* C2PMSG_21 */ 370 + }; 371 + 364 372 static const struct platform_access_vdata pa_v1 = { 365 373 .cmdresp_reg = 0x10570, /* C2PMSG_28 */ 366 374 .cmdbuff_addr_lo_reg = 0x10574, /* C2PMSG_29 */ 367 375 .cmdbuff_addr_hi_reg = 0x10578, /* C2PMSG_30 */ 376 + .doorbell_button_reg = 0x10a24, /* C2PMSG_73 */ 377 + .doorbell_cmd_reg = 0x10a40, /* C2PMSG_80 */ 378 + }; 379 + 380 + static const struct platform_access_vdata pa_v2 = { 368 381 .doorbell_button_reg = 0x10a24, /* C2PMSG_73 */ 369 382 .doorbell_cmd_reg = 0x10a40, /* C2PMSG_80 */ 370 383 }; ··· 410 397 .feature_reg = 0x109fc, /* C2PMSG_63 */ 411 398 .inten_reg = 0x10690, /* P2CMSG_INTEN */ 412 399 .intsts_reg = 0x10694, /* P2CMSG_INTSTS */ 400 + }; 401 + 402 + static const struct psp_vdata pspv5 = { 403 + .tee = &teev2, 404 + .platform_access = &pa_v2, 405 + .feature_reg = 0x109fc, /* C2PMSG_63 */ 406 + .inten_reg = 0x10510, /* P2CMSG_INTEN */ 407 + .intsts_reg = 0x10514, /* P2CMSG_INTSTS */ 408 + }; 409 + 410 + static const struct psp_vdata pspv6 = { 411 + .sev = &sevv2, 412 + .tee = &teev2, 413 + .feature_reg = 0x109fc, /* C2PMSG_63 */ 414 + .inten_reg = 0x10510, /* P2CMSG_INTEN */ 415 + .intsts_reg = 0x10514, /* P2CMSG_INTSTS */ 413 416 }; 414 417 415 418 #endif ··· 482 453 .psp_vdata = &pspv3, 483 454 #endif 484 455 }, 456 + { /* 7 */ 457 + .bar = 2, 458 + #ifdef CONFIG_CRYPTO_DEV_SP_PSP 459 + .psp_vdata = &pspv5, 460 + #endif 461 + }, 462 + { /* 8 */ 463 + .bar = 2, 464 + #ifdef CONFIG_CRYPTO_DEV_SP_PSP 465 + .psp_vdata = &pspv6, 466 + #endif 467 + }, 485 468 }; 486 469 static const struct pci_device_id sp_pci_table[] = { 487 470 { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] }, ··· 504 463 { PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] }, 505 464 { PCI_VDEVICE(AMD, 0x15C7), (kernel_ulong_t)&dev_vdata[6] }, 506 465 { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[6] }, 466 + { PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] }, 467 + { PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] }, 507 468 /* Last entry must be zero */ 508 469 { 0, } 509 470 };
-7
drivers/crypto/hisilicon/Kconfig
··· 82 82 select CRYPTO_RNG 83 83 help 84 84 Support for HiSilicon TRNG Driver. 85 - 86 - config CRYPTO_DEV_HISTB_TRNG 87 - tristate "Support for HiSTB TRNG Driver" 88 - depends on ARCH_HISI || COMPILE_TEST 89 - select HW_RANDOM 90 - help 91 - Support for HiSTB TRNG Driver.
+1 -1
drivers/crypto/hisilicon/Makefile
··· 5 5 obj-$(CONFIG_CRYPTO_DEV_HISI_QM) += hisi_qm.o 6 6 hisi_qm-objs = qm.o sgl.o debugfs.o 7 7 obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += zip/ 8 - obj-y += trng/ 8 + obj-$(CONFIG_CRYPTO_DEV_HISI_TRNG) += trng/
-3
drivers/crypto/hisilicon/trng/Makefile
··· 1 1 obj-$(CONFIG_CRYPTO_DEV_HISI_TRNG) += hisi-trng-v2.o 2 2 hisi-trng-v2-objs = trng.o 3 - 4 - obj-$(CONFIG_CRYPTO_DEV_HISTB_TRNG) += histb-trng.o 5 - histb-trng-objs += trng-stb.o
+40 -43
drivers/crypto/hisilicon/trng/trng-stb.c drivers/char/hw_random/histb-rng.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-or-later OR MIT 2 2 /* 3 - * Device driver for True RNG in HiSTB SoCs 4 - * 5 3 * Copyright (c) 2023 David Yang 6 4 */ 7 5 8 - #include <crypto/internal/rng.h> 9 - #include <linux/device.h> 10 6 #include <linux/err.h> 11 7 #include <linux/hw_random.h> 12 8 #include <linux/io.h> 13 9 #include <linux/iopoll.h> 14 10 #include <linux/kernel.h> 11 + #include <linux/mod_devicetable.h> 15 12 #include <linux/module.h> 16 - #include <linux/mutex.h> 17 - #include <linux/of_device.h> 13 + #include <linux/platform_device.h> 18 14 19 - #define HISTB_TRNG_CTRL 0x0 15 + #define RNG_CTRL 0x0 20 16 #define RNG_SOURCE GENMASK(1, 0) 21 17 #define DROP_ENABLE BIT(5) 22 18 #define POST_PROCESS_ENABLE BIT(7) 23 19 #define POST_PROCESS_DEPTH GENMASK(15, 8) 24 - #define HISTB_TRNG_NUMBER 0x4 25 - #define HISTB_TRNG_STAT 0x8 20 + #define RNG_NUMBER 0x4 21 + #define RNG_STAT 0x8 26 22 #define DATA_COUNT GENMASK(2, 0) /* max 4 */ 27 23 28 - struct histb_trng_priv { 24 + struct histb_rng_priv { 29 25 struct hwrng rng; 30 26 void __iomem *base; 31 27 }; ··· 31 35 * depth = 1 -> ~1ms 32 36 * depth = 255 -> ~16ms 33 37 */ 34 - static int histb_trng_wait(void __iomem *base) 38 + static int histb_rng_wait(void __iomem *base) 35 39 { 36 40 u32 val; 37 41 38 - return readl_relaxed_poll_timeout(base + HISTB_TRNG_STAT, val, 42 + return readl_relaxed_poll_timeout(base + RNG_STAT, val, 39 43 val & DATA_COUNT, 1000, 30 * 1000); 40 44 } 41 45 42 - static void histb_trng_init(void __iomem *base, unsigned int depth) 46 + static void histb_rng_init(void __iomem *base, unsigned int depth) 43 47 { 44 48 u32 val; 45 49 46 - val = readl_relaxed(base + HISTB_TRNG_CTRL); 50 + val = readl_relaxed(base + RNG_CTRL); 47 51 48 52 val &= ~RNG_SOURCE; 49 53 val |= 2; ··· 54 58 val |= POST_PROCESS_ENABLE; 55 59 val |= DROP_ENABLE; 56 60 57 - writel_relaxed(val, base + HISTB_TRNG_CTRL); 61 + writel_relaxed(val, base + RNG_CTRL); 58 62 } 59 63 60 - static int histb_trng_read(struct hwrng *rng, void *data, size_t max, bool wait) 64 + static int histb_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) 61 65 { 62 - struct histb_trng_priv *priv = container_of(rng, typeof(*priv), rng); 66 + struct histb_rng_priv *priv = container_of(rng, typeof(*priv), rng); 63 67 void __iomem *base = priv->base; 64 68 65 69 for (int i = 0; i < max; i += sizeof(u32)) { 66 - if (!(readl_relaxed(base + HISTB_TRNG_STAT) & DATA_COUNT)) { 70 + if (!(readl_relaxed(base + RNG_STAT) & DATA_COUNT)) { 67 71 if (!wait) 68 72 return i; 69 - if (histb_trng_wait(base)) { 73 + if (histb_rng_wait(base)) { 70 74 pr_err("failed to generate random number, generated %d\n", 71 75 i); 72 76 return i ? i : -ETIMEDOUT; 73 77 } 74 78 } 75 - *(u32 *) (data + i) = readl_relaxed(base + HISTB_TRNG_NUMBER); 79 + *(u32 *) (data + i) = readl_relaxed(base + RNG_NUMBER); 76 80 } 77 81 78 82 return max; 79 83 } 80 84 81 - static unsigned int histb_trng_get_depth(void __iomem *base) 85 + static unsigned int histb_rng_get_depth(void __iomem *base) 82 86 { 83 - return (readl_relaxed(base + HISTB_TRNG_CTRL) & POST_PROCESS_DEPTH) >> 8; 87 + return (readl_relaxed(base + RNG_CTRL) & POST_PROCESS_DEPTH) >> 8; 84 88 } 85 89 86 90 static ssize_t 87 91 depth_show(struct device *dev, struct device_attribute *attr, char *buf) 88 92 { 89 - struct histb_trng_priv *priv = dev_get_drvdata(dev); 93 + struct histb_rng_priv *priv = dev_get_drvdata(dev); 90 94 void __iomem *base = priv->base; 91 95 92 - return sprintf(buf, "%d\n", histb_trng_get_depth(base)); 96 + return sprintf(buf, "%d\n", histb_rng_get_depth(base)); 93 97 } 94 98 95 99 static ssize_t 96 100 depth_store(struct device *dev, struct device_attribute *attr, 97 101 const char *buf, size_t count) 98 102 { 99 - struct histb_trng_priv *priv = dev_get_drvdata(dev); 103 + struct histb_rng_priv *priv = dev_get_drvdata(dev); 100 104 void __iomem *base = priv->base; 101 105 unsigned int depth; 102 106 103 107 if (kstrtouint(buf, 0, &depth)) 104 108 return -ERANGE; 105 109 106 - histb_trng_init(base, depth); 110 + histb_rng_init(base, depth); 107 111 return count; 108 112 } 109 113 110 114 static DEVICE_ATTR_RW(depth); 111 115 112 - static struct attribute *histb_trng_attrs[] = { 116 + static struct attribute *histb_rng_attrs[] = { 113 117 &dev_attr_depth.attr, 114 118 NULL, 115 119 }; 116 120 117 - ATTRIBUTE_GROUPS(histb_trng); 121 + ATTRIBUTE_GROUPS(histb_rng); 118 122 119 - static int histb_trng_probe(struct platform_device *pdev) 123 + static int histb_rng_probe(struct platform_device *pdev) 120 124 { 121 125 struct device *dev = &pdev->dev; 122 - struct histb_trng_priv *priv; 126 + struct histb_rng_priv *priv; 123 127 void __iomem *base; 124 128 int ret; 125 129 ··· 129 133 130 134 base = devm_platform_ioremap_resource(pdev, 0); 131 135 if (IS_ERR(base)) 132 - return -ENOMEM; 136 + return PTR_ERR(base); 133 137 134 - histb_trng_init(base, 144); 135 - if (histb_trng_wait(base)) { 138 + histb_rng_init(base, 144); 139 + if (histb_rng_wait(base)) { 136 140 dev_err(dev, "cannot bring up device\n"); 137 141 return -ENODEV; 138 142 } 139 143 140 144 priv->base = base; 141 145 priv->rng.name = pdev->name; 142 - priv->rng.read = histb_trng_read; 146 + priv->rng.read = histb_rng_read; 143 147 ret = devm_hwrng_register(dev, &priv->rng); 144 148 if (ret) { 145 149 dev_err(dev, "failed to register hwrng: %d\n", ret); ··· 151 155 return 0; 152 156 } 153 157 154 - static const struct of_device_id histb_trng_of_match[] = { 155 - { .compatible = "hisilicon,histb-trng", }, 158 + static const struct of_device_id histb_rng_of_match[] = { 159 + { .compatible = "hisilicon,histb-rng", }, 156 160 { } 157 161 }; 162 + MODULE_DEVICE_TABLE(of, histb_rng_of_match); 158 163 159 - static struct platform_driver histb_trng_driver = { 160 - .probe = histb_trng_probe, 164 + static struct platform_driver histb_rng_driver = { 165 + .probe = histb_rng_probe, 161 166 .driver = { 162 - .name = "histb-trng", 163 - .of_match_table = histb_trng_of_match, 164 - .dev_groups = histb_trng_groups, 167 + .name = "histb-rng", 168 + .of_match_table = histb_rng_of_match, 169 + .dev_groups = histb_rng_groups, 165 170 }, 166 171 }; 167 172 168 - module_platform_driver(histb_trng_driver); 173 + module_platform_driver(histb_rng_driver); 169 174 170 - MODULE_DESCRIPTION("HiSTB True RNG"); 175 + MODULE_DESCRIPTION("Hisilicon STB random number generator driver"); 171 176 MODULE_LICENSE("Dual MIT/GPL"); 172 177 MODULE_AUTHOR("David Yang <mmyangfl@gmail.com>");
+1 -1
drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
··· 1175 1175 /* The 12 hmac bytes are scattered, 1176 1176 * we need to copy them into a safe buffer */ 1177 1177 req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags, &dma); 1178 - crypt->icv_rev_aes = dma; 1179 1178 if (unlikely(!req_ctx->hmac_virt)) 1180 1179 goto free_buf_dst; 1180 + crypt->icv_rev_aes = dma; 1181 1181 if (!encrypt) { 1182 1182 scatterwalk_map_and_copy(req_ctx->hmac_virt, 1183 1183 req->src, cryptlen, authsize, 0);
+166 -63
drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
··· 11 11 #include "adf_4xxx_hw_data.h" 12 12 #include "icp_qat_hw.h" 13 13 14 + enum adf_fw_objs { 15 + ADF_FW_SYM_OBJ, 16 + ADF_FW_ASYM_OBJ, 17 + ADF_FW_DC_OBJ, 18 + ADF_FW_ADMIN_OBJ, 19 + }; 20 + 21 + static const char * const adf_4xxx_fw_objs[] = { 22 + [ADF_FW_SYM_OBJ] = ADF_4XXX_SYM_OBJ, 23 + [ADF_FW_ASYM_OBJ] = ADF_4XXX_ASYM_OBJ, 24 + [ADF_FW_DC_OBJ] = ADF_4XXX_DC_OBJ, 25 + [ADF_FW_ADMIN_OBJ] = ADF_4XXX_ADMIN_OBJ, 26 + }; 27 + 28 + static const char * const adf_402xx_fw_objs[] = { 29 + [ADF_FW_SYM_OBJ] = ADF_402XX_SYM_OBJ, 30 + [ADF_FW_ASYM_OBJ] = ADF_402XX_ASYM_OBJ, 31 + [ADF_FW_DC_OBJ] = ADF_402XX_DC_OBJ, 32 + [ADF_FW_ADMIN_OBJ] = ADF_402XX_ADMIN_OBJ, 33 + }; 34 + 14 35 struct adf_fw_config { 15 36 u32 ae_mask; 16 - char *obj_name; 37 + enum adf_fw_objs obj; 17 38 }; 18 39 19 - static struct adf_fw_config adf_4xxx_fw_cy_config[] = { 20 - {0xF0, ADF_4XXX_SYM_OBJ}, 21 - {0xF, ADF_4XXX_ASYM_OBJ}, 22 - {0x100, ADF_4XXX_ADMIN_OBJ}, 40 + static const struct adf_fw_config adf_fw_cy_config[] = { 41 + {0xF0, ADF_FW_SYM_OBJ}, 42 + {0xF, ADF_FW_ASYM_OBJ}, 43 + {0x100, ADF_FW_ADMIN_OBJ}, 23 44 }; 24 45 25 - static struct adf_fw_config adf_4xxx_fw_dc_config[] = { 26 - {0xF0, ADF_4XXX_DC_OBJ}, 27 - {0xF, ADF_4XXX_DC_OBJ}, 28 - {0x100, ADF_4XXX_ADMIN_OBJ}, 46 + static const struct adf_fw_config adf_fw_dc_config[] = { 47 + {0xF0, ADF_FW_DC_OBJ}, 48 + {0xF, ADF_FW_DC_OBJ}, 49 + {0x100, ADF_FW_ADMIN_OBJ}, 29 50 }; 30 51 31 - static struct adf_fw_config adf_402xx_fw_cy_config[] = { 32 - {0xF0, ADF_402XX_SYM_OBJ}, 33 - {0xF, ADF_402XX_ASYM_OBJ}, 34 - {0x100, ADF_402XX_ADMIN_OBJ}, 52 + static const struct adf_fw_config adf_fw_sym_config[] = { 53 + {0xF0, ADF_FW_SYM_OBJ}, 54 + {0xF, ADF_FW_SYM_OBJ}, 55 + {0x100, ADF_FW_ADMIN_OBJ}, 35 56 }; 36 57 37 - static struct adf_fw_config adf_402xx_fw_dc_config[] = { 38 - {0xF0, ADF_402XX_DC_OBJ}, 39 - {0xF, ADF_402XX_DC_OBJ}, 40 - {0x100, ADF_402XX_ADMIN_OBJ}, 58 + static const struct adf_fw_config adf_fw_asym_config[] = { 59 + {0xF0, ADF_FW_ASYM_OBJ}, 60 + {0xF, ADF_FW_ASYM_OBJ}, 61 + {0x100, ADF_FW_ADMIN_OBJ}, 41 62 }; 63 + 64 + static const struct adf_fw_config adf_fw_asym_dc_config[] = { 65 + {0xF0, ADF_FW_ASYM_OBJ}, 66 + {0xF, ADF_FW_DC_OBJ}, 67 + {0x100, ADF_FW_ADMIN_OBJ}, 68 + }; 69 + 70 + static const struct adf_fw_config adf_fw_sym_dc_config[] = { 71 + {0xF0, ADF_FW_SYM_OBJ}, 72 + {0xF, ADF_FW_DC_OBJ}, 73 + {0x100, ADF_FW_ADMIN_OBJ}, 74 + }; 75 + 76 + static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dc_config)); 77 + static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_config)); 78 + static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_config)); 79 + static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_dc_config)); 80 + static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_dc_config)); 42 81 43 82 /* Worker thread to service arbiter mappings */ 44 - static const u32 thrd_to_arb_map_cy[ADF_4XXX_MAX_ACCELENGINES] = { 83 + static const u32 default_thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = { 45 84 0x5555555, 0x5555555, 0x5555555, 0x5555555, 46 85 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, 47 86 0x0 ··· 100 61 101 62 enum dev_services { 102 63 SVC_CY = 0, 64 + SVC_CY2, 103 65 SVC_DC, 66 + SVC_SYM, 67 + SVC_ASYM, 68 + SVC_DC_ASYM, 69 + SVC_ASYM_DC, 70 + SVC_DC_SYM, 71 + SVC_SYM_DC, 104 72 }; 105 73 106 74 static const char *const dev_cfg_services[] = { 107 75 [SVC_CY] = ADF_CFG_CY, 76 + [SVC_CY2] = ADF_CFG_ASYM_SYM, 108 77 [SVC_DC] = ADF_CFG_DC, 78 + [SVC_SYM] = ADF_CFG_SYM, 79 + [SVC_ASYM] = ADF_CFG_ASYM, 80 + [SVC_DC_ASYM] = ADF_CFG_DC_ASYM, 81 + [SVC_ASYM_DC] = ADF_CFG_ASYM_DC, 82 + [SVC_DC_SYM] = ADF_CFG_DC_SYM, 83 + [SVC_SYM_DC] = ADF_CFG_SYM_DC, 109 84 }; 110 85 111 86 static int get_service_enabled(struct adf_accel_dev *accel_dev) ··· 209 156 static u32 get_accel_cap(struct adf_accel_dev *accel_dev) 210 157 { 211 158 struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev; 212 - u32 capabilities_cy, capabilities_dc; 159 + u32 capabilities_sym, capabilities_asym, capabilities_dc; 213 160 u32 fusectl1; 214 161 215 162 /* Read accelerator capabilities mask */ 216 163 pci_read_config_dword(pdev, ADF_4XXX_FUSECTL1_OFFSET, &fusectl1); 217 164 218 - capabilities_cy = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | 219 - ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | 165 + capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | 220 166 ICP_ACCEL_CAPABILITIES_CIPHER | 221 167 ICP_ACCEL_CAPABILITIES_AUTHENTICATION | 222 168 ICP_ACCEL_CAPABILITIES_SHA3 | 223 169 ICP_ACCEL_CAPABILITIES_SHA3_EXT | 224 170 ICP_ACCEL_CAPABILITIES_HKDF | 225 - ICP_ACCEL_CAPABILITIES_ECEDMONT | 226 171 ICP_ACCEL_CAPABILITIES_CHACHA_POLY | 227 172 ICP_ACCEL_CAPABILITIES_AESGCM_SPC | 228 173 ICP_ACCEL_CAPABILITIES_AES_V2; 229 174 230 175 /* A set bit in fusectl1 means the feature is OFF in this SKU */ 231 176 if (fusectl1 & ICP_ACCEL_4XXX_MASK_CIPHER_SLICE) { 232 - capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; 233 - capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_HKDF; 234 - capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CIPHER; 177 + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; 178 + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_HKDF; 179 + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; 235 180 } 181 + 236 182 if (fusectl1 & ICP_ACCEL_4XXX_MASK_UCS_SLICE) { 237 - capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY; 238 - capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC; 239 - capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_AES_V2; 240 - capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CIPHER; 183 + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY; 184 + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC; 185 + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2; 186 + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; 241 187 } 188 + 242 189 if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE) { 243 - capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION; 244 - capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_SHA3; 245 - capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT; 246 - capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CIPHER; 190 + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION; 191 + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3; 192 + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT; 193 + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; 247 194 } 195 + 196 + capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | 197 + ICP_ACCEL_CAPABILITIES_CIPHER | 198 + ICP_ACCEL_CAPABILITIES_ECEDMONT; 199 + 248 200 if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE) { 249 - capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC; 250 - capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT; 201 + capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC; 202 + capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT; 251 203 } 252 204 253 205 capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION | ··· 269 211 270 212 switch (get_service_enabled(accel_dev)) { 271 213 case SVC_CY: 272 - return capabilities_cy; 214 + case SVC_CY2: 215 + return capabilities_sym | capabilities_asym; 273 216 case SVC_DC: 274 217 return capabilities_dc; 218 + case SVC_SYM: 219 + return capabilities_sym; 220 + case SVC_ASYM: 221 + return capabilities_asym; 222 + case SVC_ASYM_DC: 223 + case SVC_DC_ASYM: 224 + return capabilities_asym | capabilities_dc; 225 + case SVC_SYM_DC: 226 + case SVC_DC_SYM: 227 + return capabilities_sym | capabilities_dc; 228 + default: 229 + return 0; 275 230 } 276 - 277 - return 0; 278 231 } 279 232 280 233 static enum dev_sku_info get_sku(struct adf_hw_device_data *self) ··· 296 227 static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev) 297 228 { 298 229 switch (get_service_enabled(accel_dev)) { 299 - case SVC_CY: 300 - return thrd_to_arb_map_cy; 301 230 case SVC_DC: 302 231 return thrd_to_arb_map_dc; 232 + default: 233 + return default_thrd_to_arb_map; 303 234 } 304 - 305 - return NULL; 306 235 } 307 236 308 237 static void get_arb_info(struct arb_info *arb_info) ··· 371 304 372 305 static u32 uof_get_num_objs(void) 373 306 { 374 - BUILD_BUG_ON_MSG(ARRAY_SIZE(adf_4xxx_fw_cy_config) != 375 - ARRAY_SIZE(adf_4xxx_fw_dc_config), 376 - "Size mismatch between adf_4xxx_fw_*_config arrays"); 377 - 378 - return ARRAY_SIZE(adf_4xxx_fw_cy_config); 307 + return ARRAY_SIZE(adf_fw_cy_config); 379 308 } 380 309 381 - static char *uof_get_name_4xxx(struct adf_accel_dev *accel_dev, u32 obj_num) 310 + static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num, 311 + const char * const fw_objs[], int num_objs) 382 312 { 313 + int id; 314 + 383 315 switch (get_service_enabled(accel_dev)) { 384 316 case SVC_CY: 385 - return adf_4xxx_fw_cy_config[obj_num].obj_name; 317 + case SVC_CY2: 318 + id = adf_fw_cy_config[obj_num].obj; 319 + break; 386 320 case SVC_DC: 387 - return adf_4xxx_fw_dc_config[obj_num].obj_name; 321 + id = adf_fw_dc_config[obj_num].obj; 322 + break; 323 + case SVC_SYM: 324 + id = adf_fw_sym_config[obj_num].obj; 325 + break; 326 + case SVC_ASYM: 327 + id = adf_fw_asym_config[obj_num].obj; 328 + break; 329 + case SVC_ASYM_DC: 330 + case SVC_DC_ASYM: 331 + id = adf_fw_asym_dc_config[obj_num].obj; 332 + break; 333 + case SVC_SYM_DC: 334 + case SVC_DC_SYM: 335 + id = adf_fw_sym_dc_config[obj_num].obj; 336 + break; 337 + default: 338 + id = -EINVAL; 339 + break; 388 340 } 389 341 390 - return NULL; 342 + if (id < 0 || id > num_objs) 343 + return NULL; 344 + 345 + return fw_objs[id]; 391 346 } 392 347 393 - static char *uof_get_name_402xx(struct adf_accel_dev *accel_dev, u32 obj_num) 348 + static const char *uof_get_name_4xxx(struct adf_accel_dev *accel_dev, u32 obj_num) 394 349 { 395 - switch (get_service_enabled(accel_dev)) { 396 - case SVC_CY: 397 - return adf_402xx_fw_cy_config[obj_num].obj_name; 398 - case SVC_DC: 399 - return adf_402xx_fw_dc_config[obj_num].obj_name; 400 - } 350 + int num_fw_objs = ARRAY_SIZE(adf_4xxx_fw_objs); 401 351 402 - return NULL; 352 + return uof_get_name(accel_dev, obj_num, adf_4xxx_fw_objs, num_fw_objs); 353 + } 354 + 355 + static const char *uof_get_name_402xx(struct adf_accel_dev *accel_dev, u32 obj_num) 356 + { 357 + int num_fw_objs = ARRAY_SIZE(adf_402xx_fw_objs); 358 + 359 + return uof_get_name(accel_dev, obj_num, adf_402xx_fw_objs, num_fw_objs); 403 360 } 404 361 405 362 static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num) 406 363 { 407 364 switch (get_service_enabled(accel_dev)) { 408 365 case SVC_CY: 409 - return adf_4xxx_fw_cy_config[obj_num].ae_mask; 366 + return adf_fw_cy_config[obj_num].ae_mask; 410 367 case SVC_DC: 411 - return adf_4xxx_fw_dc_config[obj_num].ae_mask; 368 + return adf_fw_dc_config[obj_num].ae_mask; 369 + case SVC_CY2: 370 + return adf_fw_cy_config[obj_num].ae_mask; 371 + case SVC_SYM: 372 + return adf_fw_sym_config[obj_num].ae_mask; 373 + case SVC_ASYM: 374 + return adf_fw_asym_config[obj_num].ae_mask; 375 + case SVC_ASYM_DC: 376 + case SVC_DC_ASYM: 377 + return adf_fw_asym_dc_config[obj_num].ae_mask; 378 + case SVC_SYM_DC: 379 + case SVC_DC_SYM: 380 + return adf_fw_sym_dc_config[obj_num].ae_mask; 381 + default: 382 + return 0; 412 383 } 413 - 414 - return 0; 415 384 } 416 385 417 386 void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
+1 -1
drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
··· 72 72 ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE = BIT(3), 73 73 ICP_ACCEL_4XXX_MASK_UCS_SLICE = BIT(4), 74 74 ICP_ACCEL_4XXX_MASK_EIA3_SLICE = BIT(5), 75 - ICP_ACCEL_4XXX_MASK_SMX_SLICE = BIT(6), 75 + ICP_ACCEL_4XXX_MASK_SMX_SLICE = BIT(7), 76 76 }; 77 77 78 78 void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id);
+37 -8
drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
··· 7 7 #include <adf_accel_devices.h> 8 8 #include <adf_cfg.h> 9 9 #include <adf_common_drv.h> 10 + #include <adf_dbgfs.h> 10 11 11 12 #include "adf_4xxx_hw_data.h" 12 13 #include "qat_compression.h" ··· 25 24 enum configs { 26 25 DEV_CFG_CY = 0, 27 26 DEV_CFG_DC, 27 + DEV_CFG_SYM, 28 + DEV_CFG_ASYM, 29 + DEV_CFG_ASYM_SYM, 30 + DEV_CFG_ASYM_DC, 31 + DEV_CFG_DC_ASYM, 32 + DEV_CFG_SYM_DC, 33 + DEV_CFG_DC_SYM, 28 34 }; 29 35 30 36 static const char * const services_operations[] = { 31 37 ADF_CFG_CY, 32 38 ADF_CFG_DC, 39 + ADF_CFG_SYM, 40 + ADF_CFG_ASYM, 41 + ADF_CFG_ASYM_SYM, 42 + ADF_CFG_ASYM_DC, 43 + ADF_CFG_DC_ASYM, 44 + ADF_CFG_SYM_DC, 45 + ADF_CFG_DC_SYM, 33 46 }; 34 47 35 48 static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) ··· 52 37 adf_clean_hw_data_4xxx(accel_dev->hw_device); 53 38 accel_dev->hw_device = NULL; 54 39 } 40 + adf_dbgfs_exit(accel_dev); 55 41 adf_cfg_dev_remove(accel_dev); 56 - debugfs_remove(accel_dev->debugfs_dir); 57 42 adf_devmgr_rm_dev(accel_dev, NULL); 58 43 } 59 44 ··· 256 241 return ret; 257 242 } 258 243 244 + static int adf_no_dev_config(struct adf_accel_dev *accel_dev) 245 + { 246 + unsigned long val; 247 + int ret; 248 + 249 + val = 0; 250 + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, 251 + &val, ADF_DEC); 252 + if (ret) 253 + return ret; 254 + 255 + return adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, 256 + &val, ADF_DEC); 257 + } 258 + 259 259 int adf_gen4_dev_config(struct adf_accel_dev *accel_dev) 260 260 { 261 261 char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; ··· 295 265 296 266 switch (ret) { 297 267 case DEV_CFG_CY: 268 + case DEV_CFG_ASYM_SYM: 298 269 ret = adf_crypto_dev_config(accel_dev); 299 270 break; 300 271 case DEV_CFG_DC: 301 272 ret = adf_comp_dev_config(accel_dev); 273 + break; 274 + default: 275 + ret = adf_no_dev_config(accel_dev); 302 276 break; 303 277 } 304 278 ··· 323 289 struct adf_accel_dev *accel_dev; 324 290 struct adf_accel_pci *accel_pci_dev; 325 291 struct adf_hw_device_data *hw_data; 326 - char name[ADF_DEVICE_NAME_LENGTH]; 327 292 unsigned int i, bar_nr; 328 293 unsigned long bar_mask; 329 294 struct adf_bar *bar; ··· 381 348 goto out_err; 382 349 } 383 350 384 - /* Create dev top level debugfs entry */ 385 - snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX, 386 - hw_data->dev_class->name, pci_name(pdev)); 387 - 388 - accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); 389 - 390 351 /* Create device configuration table */ 391 352 ret = adf_cfg_dev_add(accel_dev); 392 353 if (ret) ··· 436 409 ret = -ENOMEM; 437 410 goto out_err; 438 411 } 412 + 413 + adf_dbgfs_init(accel_dev); 439 414 440 415 ret = adf_dev_up(accel_dev, true); 441 416 if (ret)
+4 -8
drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
··· 16 16 #include <adf_accel_devices.h> 17 17 #include <adf_common_drv.h> 18 18 #include <adf_cfg.h> 19 + #include <adf_dbgfs.h> 19 20 #include "adf_c3xxx_hw_data.h" 20 21 21 22 static const struct pci_device_id adf_pci_tbl[] = { ··· 66 65 kfree(accel_dev->hw_device); 67 66 accel_dev->hw_device = NULL; 68 67 } 68 + adf_dbgfs_exit(accel_dev); 69 69 adf_cfg_dev_remove(accel_dev); 70 - debugfs_remove(accel_dev->debugfs_dir); 71 70 adf_devmgr_rm_dev(accel_dev, NULL); 72 71 } 73 72 ··· 76 75 struct adf_accel_dev *accel_dev; 77 76 struct adf_accel_pci *accel_pci_dev; 78 77 struct adf_hw_device_data *hw_data; 79 - char name[ADF_DEVICE_NAME_LENGTH]; 80 78 unsigned int i, bar_nr; 81 79 unsigned long bar_mask; 82 80 int ret; ··· 142 142 goto out_err; 143 143 } 144 144 145 - /* Create dev top level debugfs entry */ 146 - snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX, 147 - hw_data->dev_class->name, pci_name(pdev)); 148 - 149 - accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); 150 - 151 145 /* Create device configuration table */ 152 146 ret = adf_cfg_dev_add(accel_dev); 153 147 if (ret) ··· 192 198 ret = -ENOMEM; 193 199 goto out_err_free_reg; 194 200 } 201 + 202 + adf_dbgfs_init(accel_dev); 195 203 196 204 ret = adf_dev_up(accel_dev, true); 197 205 if (ret)
+4 -8
drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c
··· 16 16 #include <adf_accel_devices.h> 17 17 #include <adf_common_drv.h> 18 18 #include <adf_cfg.h> 19 + #include <adf_dbgfs.h> 19 20 #include "adf_c3xxxvf_hw_data.h" 20 21 21 22 static const struct pci_device_id adf_pci_tbl[] = { ··· 65 64 kfree(accel_dev->hw_device); 66 65 accel_dev->hw_device = NULL; 67 66 } 67 + adf_dbgfs_exit(accel_dev); 68 68 adf_cfg_dev_remove(accel_dev); 69 - debugfs_remove(accel_dev->debugfs_dir); 70 69 pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn); 71 70 adf_devmgr_rm_dev(accel_dev, pf); 72 71 } ··· 77 76 struct adf_accel_dev *pf; 78 77 struct adf_accel_pci *accel_pci_dev; 79 78 struct adf_hw_device_data *hw_data; 80 - char name[ADF_DEVICE_NAME_LENGTH]; 81 79 unsigned int i, bar_nr; 82 80 unsigned long bar_mask; 83 81 int ret; ··· 123 123 hw_data->ae_mask = hw_data->get_ae_mask(hw_data); 124 124 accel_pci_dev->sku = hw_data->get_sku(hw_data); 125 125 126 - /* Create dev top level debugfs entry */ 127 - snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX, 128 - hw_data->dev_class->name, pci_name(pdev)); 129 - 130 - accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); 131 - 132 126 /* Create device configuration table */ 133 127 ret = adf_cfg_dev_add(accel_dev); 134 128 if (ret) ··· 166 172 pci_set_master(pdev); 167 173 /* Completion for VF2PF request/response message exchange */ 168 174 init_completion(&accel_dev->vf.msg_received); 175 + 176 + adf_dbgfs_init(accel_dev); 169 177 170 178 ret = adf_dev_up(accel_dev, false); 171 179 if (ret)
+4 -8
drivers/crypto/intel/qat/qat_c62x/adf_drv.c
··· 16 16 #include <adf_accel_devices.h> 17 17 #include <adf_common_drv.h> 18 18 #include <adf_cfg.h> 19 + #include <adf_dbgfs.h> 19 20 #include "adf_c62x_hw_data.h" 20 21 21 22 static const struct pci_device_id adf_pci_tbl[] = { ··· 66 65 kfree(accel_dev->hw_device); 67 66 accel_dev->hw_device = NULL; 68 67 } 68 + adf_dbgfs_exit(accel_dev); 69 69 adf_cfg_dev_remove(accel_dev); 70 - debugfs_remove(accel_dev->debugfs_dir); 71 70 adf_devmgr_rm_dev(accel_dev, NULL); 72 71 } 73 72 ··· 76 75 struct adf_accel_dev *accel_dev; 77 76 struct adf_accel_pci *accel_pci_dev; 78 77 struct adf_hw_device_data *hw_data; 79 - char name[ADF_DEVICE_NAME_LENGTH]; 80 78 unsigned int i, bar_nr; 81 79 unsigned long bar_mask; 82 80 int ret; ··· 142 142 goto out_err; 143 143 } 144 144 145 - /* Create dev top level debugfs entry */ 146 - snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX, 147 - hw_data->dev_class->name, pci_name(pdev)); 148 - 149 - accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); 150 - 151 145 /* Create device configuration table */ 152 146 ret = adf_cfg_dev_add(accel_dev); 153 147 if (ret) ··· 192 198 ret = -ENOMEM; 193 199 goto out_err_free_reg; 194 200 } 201 + 202 + adf_dbgfs_init(accel_dev); 195 203 196 204 ret = adf_dev_up(accel_dev, true); 197 205 if (ret)
+4 -8
drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c
··· 16 16 #include <adf_accel_devices.h> 17 17 #include <adf_common_drv.h> 18 18 #include <adf_cfg.h> 19 + #include <adf_dbgfs.h> 19 20 #include "adf_c62xvf_hw_data.h" 20 21 21 22 static const struct pci_device_id adf_pci_tbl[] = { ··· 65 64 kfree(accel_dev->hw_device); 66 65 accel_dev->hw_device = NULL; 67 66 } 67 + adf_dbgfs_exit(accel_dev); 68 68 adf_cfg_dev_remove(accel_dev); 69 - debugfs_remove(accel_dev->debugfs_dir); 70 69 pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn); 71 70 adf_devmgr_rm_dev(accel_dev, pf); 72 71 } ··· 77 76 struct adf_accel_dev *pf; 78 77 struct adf_accel_pci *accel_pci_dev; 79 78 struct adf_hw_device_data *hw_data; 80 - char name[ADF_DEVICE_NAME_LENGTH]; 81 79 unsigned int i, bar_nr; 82 80 unsigned long bar_mask; 83 81 int ret; ··· 123 123 hw_data->ae_mask = hw_data->get_ae_mask(hw_data); 124 124 accel_pci_dev->sku = hw_data->get_sku(hw_data); 125 125 126 - /* Create dev top level debugfs entry */ 127 - snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX, 128 - hw_data->dev_class->name, pci_name(pdev)); 129 - 130 - accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); 131 - 132 126 /* Create device configuration table */ 133 127 ret = adf_cfg_dev_add(accel_dev); 134 128 if (ret) ··· 166 172 pci_set_master(pdev); 167 173 /* Completion for VF2PF request/response message exchange */ 168 174 init_completion(&accel_dev->vf.msg_received); 175 + 176 + adf_dbgfs_init(accel_dev); 169 177 170 178 ret = adf_dev_up(accel_dev, false); 171 179 if (ret)
+3 -1
drivers/crypto/intel/qat/qat_common/Makefile
··· 27 27 qat_hal.o \ 28 28 qat_bl.o 29 29 30 - intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o 30 + intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \ 31 + adf_dbgfs.o 32 + 31 33 intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \ 32 34 adf_pfvf_pf_msg.o adf_pfvf_pf_proto.o \ 33 35 adf_pfvf_vf_msg.o adf_pfvf_vf_proto.o \
+1 -1
drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
··· 202 202 int (*ring_pair_reset)(struct adf_accel_dev *accel_dev, u32 bank_nr); 203 203 void (*reset_device)(struct adf_accel_dev *accel_dev); 204 204 void (*set_msix_rttable)(struct adf_accel_dev *accel_dev); 205 - char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num); 205 + const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num); 206 206 u32 (*uof_get_num_objs)(void); 207 207 u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num); 208 208 int (*dev_config)(struct adf_accel_dev *accel_dev);
+1 -1
drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
··· 13 13 struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; 14 14 struct adf_hw_device_data *hw_device = accel_dev->hw_device; 15 15 struct icp_qat_fw_loader_handle *loader; 16 - char *obj_name; 16 + const char *obj_name; 17 17 u32 num_objs; 18 18 u32 ae_mask; 19 19 int i;
-1
drivers/crypto/intel/qat/qat_common/adf_admin.c
··· 286 286 287 287 return adf_send_admin(accel_dev, &req, &resp, ae_mask); 288 288 } 289 - EXPORT_SYMBOL_GPL(adf_init_admin_pm); 290 289 291 290 int adf_init_admin_comms(struct adf_accel_dev *accel_dev) 292 291 {
+21 -7
drivers/crypto/intel/qat/qat_common/adf_cfg.c
··· 74 74 INIT_LIST_HEAD(&dev_cfg_data->sec_list); 75 75 init_rwsem(&dev_cfg_data->lock); 76 76 accel_dev->cfg = dev_cfg_data; 77 - 78 - /* accel_dev->debugfs_dir should always be non-NULL here */ 79 - dev_cfg_data->debug = debugfs_create_file("dev_cfg", S_IRUSR, 80 - accel_dev->debugfs_dir, 81 - dev_cfg_data, 82 - &qat_dev_cfg_fops); 83 77 return 0; 84 78 } 85 79 EXPORT_SYMBOL_GPL(adf_cfg_dev_add); 80 + 81 + void adf_cfg_dev_dbgfs_add(struct adf_accel_dev *accel_dev) 82 + { 83 + struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg; 84 + 85 + dev_cfg_data->debug = debugfs_create_file("dev_cfg", 0400, 86 + accel_dev->debugfs_dir, 87 + dev_cfg_data, 88 + &qat_dev_cfg_fops); 89 + } 90 + 91 + void adf_cfg_dev_dbgfs_rm(struct adf_accel_dev *accel_dev) 92 + { 93 + struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg; 94 + 95 + if (!dev_cfg_data) 96 + return; 97 + 98 + debugfs_remove(dev_cfg_data->debug); 99 + dev_cfg_data->debug = NULL; 100 + } 86 101 87 102 static void adf_cfg_section_del_all(struct list_head *head); 88 103 ··· 131 116 down_write(&dev_cfg_data->lock); 132 117 adf_cfg_section_del_all(&dev_cfg_data->sec_list); 133 118 up_write(&dev_cfg_data->lock); 134 - debugfs_remove(dev_cfg_data->debug); 135 119 kfree(dev_cfg_data); 136 120 accel_dev->cfg = NULL; 137 121 }
+2
drivers/crypto/intel/qat/qat_common/adf_cfg.h
··· 31 31 32 32 int adf_cfg_dev_add(struct adf_accel_dev *accel_dev); 33 33 void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev); 34 + void adf_cfg_dev_dbgfs_add(struct adf_accel_dev *accel_dev); 35 + void adf_cfg_dev_dbgfs_rm(struct adf_accel_dev *accel_dev); 34 36 int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name); 35 37 void adf_cfg_del_all(struct adf_accel_dev *accel_dev); 36 38 int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
+8
drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
··· 25 25 #define ADF_DC "Dc" 26 26 #define ADF_CFG_DC "dc" 27 27 #define ADF_CFG_CY "sym;asym" 28 + #define ADF_CFG_SYM "sym" 29 + #define ADF_CFG_ASYM "asym" 30 + #define ADF_CFG_ASYM_SYM "asym;sym" 31 + #define ADF_CFG_ASYM_DC "asym;dc" 32 + #define ADF_CFG_DC_ASYM "dc;asym" 33 + #define ADF_CFG_SYM_DC "sym;dc" 34 + #define ADF_CFG_DC_SYM "dc;sym" 28 35 #define ADF_SERVICES_ENABLED "ServicesEnabled" 36 + #define ADF_PM_IDLE_SUPPORT "PmIdleSupport" 29 37 #define ADF_ETRMGR_COALESCING_ENABLED "InterruptCoalescingEnabled" 30 38 #define ADF_ETRMGR_COALESCING_ENABLED_FORMAT \ 31 39 ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCING_ENABLED
+1 -1
drivers/crypto/intel/qat/qat_common/adf_common_drv.h
··· 187 187 int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, void *addr_ptr, 188 188 int mem_size); 189 189 int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle, 190 - void *addr_ptr, u32 mem_size, char *obj_name); 190 + void *addr_ptr, u32 mem_size, const char *obj_name); 191 191 int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle, 192 192 unsigned int cfg_ae_mask); 193 193 int adf_init_misc_wq(void);
+69
drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* Copyright(c) 2023 Intel Corporation */ 3 + 4 + #include <linux/debugfs.h> 5 + #include "adf_accel_devices.h" 6 + #include "adf_cfg.h" 7 + #include "adf_common_drv.h" 8 + #include "adf_dbgfs.h" 9 + 10 + /** 11 + * adf_dbgfs_init() - add persistent debugfs entries 12 + * @accel_dev: Pointer to acceleration device. 13 + * 14 + * This function creates debugfs entries that are persistent through a device 15 + * state change (from up to down or vice versa). 16 + */ 17 + void adf_dbgfs_init(struct adf_accel_dev *accel_dev) 18 + { 19 + char name[ADF_DEVICE_NAME_LENGTH]; 20 + void *ret; 21 + 22 + /* Create dev top level debugfs entry */ 23 + snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX, 24 + accel_dev->hw_device->dev_class->name, 25 + pci_name(accel_dev->accel_pci_dev.pci_dev)); 26 + 27 + ret = debugfs_create_dir(name, NULL); 28 + if (IS_ERR_OR_NULL(ret)) 29 + return; 30 + 31 + accel_dev->debugfs_dir = ret; 32 + 33 + adf_cfg_dev_dbgfs_add(accel_dev); 34 + } 35 + EXPORT_SYMBOL_GPL(adf_dbgfs_init); 36 + 37 + /** 38 + * adf_dbgfs_exit() - remove persistent debugfs entries 39 + * @accel_dev: Pointer to acceleration device. 40 + */ 41 + void adf_dbgfs_exit(struct adf_accel_dev *accel_dev) 42 + { 43 + adf_cfg_dev_dbgfs_rm(accel_dev); 44 + debugfs_remove(accel_dev->debugfs_dir); 45 + } 46 + EXPORT_SYMBOL_GPL(adf_dbgfs_exit); 47 + 48 + /** 49 + * adf_dbgfs_add() - add non-persistent debugfs entries 50 + * @accel_dev: Pointer to acceleration device. 51 + * 52 + * This function creates debugfs entries that are not persistent through 53 + * a device state change (from up to down or vice versa). 54 + */ 55 + void adf_dbgfs_add(struct adf_accel_dev *accel_dev) 56 + { 57 + if (!accel_dev->debugfs_dir) 58 + return; 59 + } 60 + 61 + /** 62 + * adf_dbgfs_rm() - remove non-persistent debugfs entries 63 + * @accel_dev: Pointer to acceleration device. 64 + */ 65 + void adf_dbgfs_rm(struct adf_accel_dev *accel_dev) 66 + { 67 + if (!accel_dev->debugfs_dir) 68 + return; 69 + }
+29
drivers/crypto/intel/qat/qat_common/adf_dbgfs.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* Copyright(c) 2023 Intel Corporation */ 3 + 4 + #ifndef ADF_DBGFS_H 5 + #define ADF_DBGFS_H 6 + 7 + #ifdef CONFIG_DEBUG_FS 8 + void adf_dbgfs_init(struct adf_accel_dev *accel_dev); 9 + void adf_dbgfs_add(struct adf_accel_dev *accel_dev); 10 + void adf_dbgfs_rm(struct adf_accel_dev *accel_dev); 11 + void adf_dbgfs_exit(struct adf_accel_dev *accel_dev); 12 + #else 13 + static inline void adf_dbgfs_init(struct adf_accel_dev *accel_dev) 14 + { 15 + } 16 + 17 + static inline void adf_dbgfs_add(struct adf_accel_dev *accel_dev) 18 + { 19 + } 20 + 21 + static inline void adf_dbgfs_rm(struct adf_accel_dev *accel_dev) 22 + { 23 + } 24 + 25 + static inline void adf_dbgfs_exit(struct adf_accel_dev *accel_dev) 26 + { 27 + } 28 + #endif 29 + #endif
+11 -1
drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c
··· 23 23 24 24 static int send_host_msg(struct adf_accel_dev *accel_dev) 25 25 { 26 + char pm_idle_support_cfg[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {}; 26 27 void __iomem *pmisc = adf_get_pmisc_base(accel_dev); 28 + bool pm_idle_support; 27 29 u32 msg; 30 + int ret; 28 31 29 32 msg = ADF_CSR_RD(pmisc, ADF_GEN4_PM_HOST_MSG); 30 33 if (msg & ADF_GEN4_PM_MSG_PENDING) 31 34 return -EBUSY; 32 35 36 + adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, 37 + ADF_PM_IDLE_SUPPORT, pm_idle_support_cfg); 38 + ret = kstrtobool(pm_idle_support_cfg, &pm_idle_support); 39 + if (ret) 40 + pm_idle_support = true; 41 + 33 42 /* Send HOST_MSG */ 34 - msg = FIELD_PREP(ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK, PM_SET_MIN); 43 + msg = FIELD_PREP(ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK, 44 + pm_idle_support ? PM_SET_MIN : PM_NO_CHANGE); 35 45 msg |= ADF_GEN4_PM_MSG_PENDING; 36 46 ADF_CSR_WR(pmisc, ADF_GEN4_PM_HOST_MSG, msg); 37 47
+1
drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h
··· 37 37 38 38 #define ADF_GEN4_PM_DEFAULT_IDLE_FILTER (0x0) 39 39 #define ADF_GEN4_PM_MAX_IDLE_FILTER (0x7) 40 + #define ADF_GEN4_PM_DEFAULT_IDLE_SUPPORT (0x1) 40 41 41 42 int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev); 42 43 bool adf_gen4_handle_pm_interrupt(struct adf_accel_dev *accel_dev);
+6
drivers/crypto/intel/qat/qat_common/adf_init.c
··· 7 7 #include "adf_accel_devices.h" 8 8 #include "adf_cfg.h" 9 9 #include "adf_common_drv.h" 10 + #include "adf_dbgfs.h" 10 11 11 12 static LIST_HEAD(service_table); 12 13 static DEFINE_MUTEX(service_lock); ··· 217 216 clear_bit(ADF_STATUS_STARTED, &accel_dev->status); 218 217 return -EFAULT; 219 218 } 219 + 220 + adf_dbgfs_add(accel_dev); 221 + 220 222 return 0; 221 223 } 222 224 ··· 243 239 if (!adf_dev_started(accel_dev) && 244 240 !test_bit(ADF_STATUS_STARTING, &accel_dev->status)) 245 241 return; 242 + 243 + adf_dbgfs_rm(accel_dev); 246 244 247 245 clear_bit(ADF_STATUS_STARTING, &accel_dev->status); 248 246 clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+60
drivers/crypto/intel/qat/qat_common/adf_sysfs.c
··· 78 78 static const char * const services_operations[] = { 79 79 ADF_CFG_CY, 80 80 ADF_CFG_DC, 81 + ADF_CFG_SYM, 82 + ADF_CFG_ASYM, 83 + ADF_CFG_ASYM_SYM, 84 + ADF_CFG_ASYM_DC, 85 + ADF_CFG_DC_ASYM, 86 + ADF_CFG_SYM_DC, 87 + ADF_CFG_DC_SYM, 81 88 }; 82 89 83 90 static ssize_t cfg_services_show(struct device *dev, struct device_attribute *attr, ··· 152 145 return count; 153 146 } 154 147 148 + static ssize_t pm_idle_enabled_show(struct device *dev, struct device_attribute *attr, 149 + char *buf) 150 + { 151 + char pm_idle_enabled[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {}; 152 + struct adf_accel_dev *accel_dev; 153 + int ret; 154 + 155 + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); 156 + if (!accel_dev) 157 + return -EINVAL; 158 + 159 + ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, 160 + ADF_PM_IDLE_SUPPORT, pm_idle_enabled); 161 + if (ret) 162 + return sysfs_emit(buf, "1\n"); 163 + 164 + return sysfs_emit(buf, "%s\n", pm_idle_enabled); 165 + } 166 + 167 + static ssize_t pm_idle_enabled_store(struct device *dev, struct device_attribute *attr, 168 + const char *buf, size_t count) 169 + { 170 + unsigned long pm_idle_enabled_cfg_val; 171 + struct adf_accel_dev *accel_dev; 172 + bool pm_idle_enabled; 173 + int ret; 174 + 175 + ret = kstrtobool(buf, &pm_idle_enabled); 176 + if (ret) 177 + return ret; 178 + 179 + pm_idle_enabled_cfg_val = pm_idle_enabled; 180 + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); 181 + if (!accel_dev) 182 + return -EINVAL; 183 + 184 + if (adf_dev_started(accel_dev)) { 185 + dev_info(dev, "Device qat_dev%d must be down to set pm_idle_enabled.\n", 186 + accel_dev->accel_id); 187 + return -EINVAL; 188 + } 189 + 190 + ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, 191 + ADF_PM_IDLE_SUPPORT, &pm_idle_enabled_cfg_val, 192 + ADF_DEC); 193 + if (ret) 194 + return ret; 195 + 196 + return count; 197 + } 198 + static DEVICE_ATTR_RW(pm_idle_enabled); 199 + 155 200 static DEVICE_ATTR_RW(state); 156 201 static DEVICE_ATTR_RW(cfg_services); 157 202 158 203 static struct attribute *qat_attrs[] = { 159 204 &dev_attr_state.attr, 160 205 &dev_attr_cfg_services.attr, 206 + &dev_attr_pm_idle_enabled.attr, 161 207 NULL, 162 208 }; 163 209
+1 -2
drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
··· 87 87 ICP_ACCEL_CAPABILITIES_AUTHENTICATION = BIT(3), 88 88 ICP_ACCEL_CAPABILITIES_RESERVED_1 = BIT(4), 89 89 ICP_ACCEL_CAPABILITIES_COMPRESSION = BIT(5), 90 - ICP_ACCEL_CAPABILITIES_LZS_COMPRESSION = BIT(6), 91 - ICP_ACCEL_CAPABILITIES_RAND = BIT(7), 90 + /* Bits 6-7 are currently reserved */ 92 91 ICP_ACCEL_CAPABILITIES_ZUC = BIT(8), 93 92 ICP_ACCEL_CAPABILITIES_SHA3 = BIT(9), 94 93 /* Bits 10-11 are currently reserved */
-1
drivers/crypto/intel/qat/qat_common/qat_algs.c
··· 106 106 default: 107 107 return -EFAULT; 108 108 } 109 - return -EFAULT; 110 109 } 111 110 112 111 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
+6 -8
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
··· 170 170 } 171 171 172 172 areq->dst_len = req->ctx.dh->p_size; 173 + dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size, 174 + DMA_FROM_DEVICE); 173 175 if (req->dst_align) { 174 176 scatterwalk_map_and_copy(req->dst_align, areq->dst, 0, 175 177 areq->dst_len, 1); 176 178 kfree_sensitive(req->dst_align); 177 179 } 178 - 179 - dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size, 180 - DMA_FROM_DEVICE); 181 180 182 181 dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params), 183 182 DMA_TO_DEVICE); ··· 520 521 521 522 err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL; 522 523 523 - kfree_sensitive(req->src_align); 524 - 525 524 dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz, 526 525 DMA_TO_DEVICE); 527 526 527 + kfree_sensitive(req->src_align); 528 + 528 529 areq->dst_len = req->ctx.rsa->key_sz; 530 + dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz, 531 + DMA_FROM_DEVICE); 529 532 if (req->dst_align) { 530 533 scatterwalk_map_and_copy(req->dst_align, areq->dst, 0, 531 534 areq->dst_len, 1); 532 535 533 536 kfree_sensitive(req->dst_align); 534 537 } 535 - 536 - dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz, 537 - DMA_FROM_DEVICE); 538 538 539 539 dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params), 540 540 DMA_TO_DEVICE);
+4 -4
drivers/crypto/intel/qat/qat_common/qat_uclo.c
··· 1685 1685 } 1686 1686 1687 1687 static int qat_uclo_seek_obj_inside_mof(struct icp_qat_mof_handle *mobj_handle, 1688 - char *obj_name, char **obj_ptr, 1688 + const char *obj_name, char **obj_ptr, 1689 1689 unsigned int *obj_size) 1690 1690 { 1691 1691 struct icp_qat_mof_objhdr *obj_hdr = mobj_handle->obj_table.obj_hdr; ··· 1837 1837 1838 1838 static int qat_uclo_map_mof_obj(struct icp_qat_fw_loader_handle *handle, 1839 1839 struct icp_qat_mof_file_hdr *mof_ptr, 1840 - u32 mof_size, char *obj_name, char **obj_ptr, 1841 - unsigned int *obj_size) 1840 + u32 mof_size, const char *obj_name, 1841 + char **obj_ptr, unsigned int *obj_size) 1842 1842 { 1843 1843 struct icp_qat_mof_chunkhdr *mof_chunkhdr; 1844 1844 unsigned int file_id = mof_ptr->file_id; ··· 1888 1888 } 1889 1889 1890 1890 int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle, 1891 - void *addr_ptr, u32 mem_size, char *obj_name) 1891 + void *addr_ptr, u32 mem_size, const char *obj_name) 1892 1892 { 1893 1893 char *obj_addr; 1894 1894 u32 obj_size;
+4 -8
drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
··· 16 16 #include <adf_accel_devices.h> 17 17 #include <adf_common_drv.h> 18 18 #include <adf_cfg.h> 19 + #include <adf_dbgfs.h> 19 20 #include "adf_dh895xcc_hw_data.h" 20 21 21 22 static const struct pci_device_id adf_pci_tbl[] = { ··· 66 65 kfree(accel_dev->hw_device); 67 66 accel_dev->hw_device = NULL; 68 67 } 68 + adf_dbgfs_exit(accel_dev); 69 69 adf_cfg_dev_remove(accel_dev); 70 - debugfs_remove(accel_dev->debugfs_dir); 71 70 adf_devmgr_rm_dev(accel_dev, NULL); 72 71 } 73 72 ··· 76 75 struct adf_accel_dev *accel_dev; 77 76 struct adf_accel_pci *accel_pci_dev; 78 77 struct adf_hw_device_data *hw_data; 79 - char name[ADF_DEVICE_NAME_LENGTH]; 80 78 unsigned int i, bar_nr; 81 79 unsigned long bar_mask; 82 80 int ret; ··· 140 140 goto out_err; 141 141 } 142 142 143 - /* Create dev top level debugfs entry */ 144 - snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX, 145 - hw_data->dev_class->name, pci_name(pdev)); 146 - 147 - accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); 148 - 149 143 /* Create device configuration table */ 150 144 ret = adf_cfg_dev_add(accel_dev); 151 145 if (ret) ··· 192 198 ret = -ENOMEM; 193 199 goto out_err_free_reg; 194 200 } 201 + 202 + adf_dbgfs_init(accel_dev); 195 203 196 204 ret = adf_dev_up(accel_dev, true); 197 205 if (ret)
+4 -8
drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c
··· 16 16 #include <adf_accel_devices.h> 17 17 #include <adf_common_drv.h> 18 18 #include <adf_cfg.h> 19 + #include <adf_dbgfs.h> 19 20 #include "adf_dh895xccvf_hw_data.h" 20 21 21 22 static const struct pci_device_id adf_pci_tbl[] = { ··· 65 64 kfree(accel_dev->hw_device); 66 65 accel_dev->hw_device = NULL; 67 66 } 67 + adf_dbgfs_exit(accel_dev); 68 68 adf_cfg_dev_remove(accel_dev); 69 - debugfs_remove(accel_dev->debugfs_dir); 70 69 pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn); 71 70 adf_devmgr_rm_dev(accel_dev, pf); 72 71 } ··· 77 76 struct adf_accel_dev *pf; 78 77 struct adf_accel_pci *accel_pci_dev; 79 78 struct adf_hw_device_data *hw_data; 80 - char name[ADF_DEVICE_NAME_LENGTH]; 81 79 unsigned int i, bar_nr; 82 80 unsigned long bar_mask; 83 81 int ret; ··· 123 123 hw_data->ae_mask = hw_data->get_ae_mask(hw_data); 124 124 accel_pci_dev->sku = hw_data->get_sku(hw_data); 125 125 126 - /* Create dev top level debugfs entry */ 127 - snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX, 128 - hw_data->dev_class->name, pci_name(pdev)); 129 - 130 - accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); 131 - 132 126 /* Create device configuration table */ 133 127 ret = adf_cfg_dev_add(accel_dev); 134 128 if (ret) ··· 166 172 pci_set_master(pdev); 167 173 /* Completion for VF2PF request/response message exchange */ 168 174 init_completion(&accel_dev->vf.msg_received); 175 + 176 + adf_dbgfs_init(accel_dev); 169 177 170 178 ret = adf_dev_up(accel_dev, false); 171 179 if (ret)
+1 -1
drivers/crypto/marvell/cesa/cipher.c
··· 297 297 static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher, 298 298 const u8 *key, unsigned int len) 299 299 { 300 - struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher); 300 + struct mv_cesa_des3_ctx *ctx = crypto_skcipher_ctx(cipher); 301 301 int err; 302 302 303 303 err = verify_skcipher_des3_key(cipher, key);
+15
drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
··· 40 40 }; 41 41 42 42 /* Take mbox id from end of CPT mbox range in AF (range 0xA00 - 0xBFF) */ 43 + #define MBOX_MSG_RX_INLINE_IPSEC_LF_CFG 0xBFE 43 44 #define MBOX_MSG_GET_ENG_GRP_NUM 0xBFF 44 45 #define MBOX_MSG_GET_CAPS 0xBFD 45 46 #define MBOX_MSG_GET_KVF_LIMITS 0xBFC 47 + 48 + /* 49 + * Message request to config cpt lf for inline inbound ipsec. 50 + * This message is only used between CPT PF <-> CPT VF 51 + */ 52 + struct otx2_cpt_rx_inline_lf_cfg { 53 + struct mbox_msghdr hdr; 54 + u16 sso_pf_func; 55 + u16 param1; 56 + u16 param2; 57 + u16 opcode; 58 + u32 credit; 59 + u32 reserved; 60 + }; 46 61 47 62 /* 48 63 * Message request and response to get engine group number
+3
drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
··· 141 141 req->hdr.sig = OTX2_MBOX_REQ_SIG; 142 142 req->hdr.pcifunc = 0; 143 143 req->cptlfs = lfs->lfs_num; 144 + req->cpt_blkaddr = lfs->blkaddr; 145 + req->modify = 1; 144 146 ret = otx2_cpt_send_mbox_msg(mbox, lfs->pdev); 145 147 if (ret) 146 148 return ret; ··· 170 168 req->hdr.id = MBOX_MSG_DETACH_RESOURCES; 171 169 req->hdr.sig = OTX2_MBOX_REQ_SIG; 172 170 req->hdr.pcifunc = 0; 171 + req->cptlfs = 1; 173 172 ret = otx2_cpt_send_mbox_msg(mbox, lfs->pdev); 174 173 if (ret) 175 174 return ret;
+17 -17
drivers/crypto/marvell/octeontx2/otx2_cptlf.c
··· 13 13 { 14 14 union otx2_cptx_lf_done_wait done_wait; 15 15 16 - done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot, 17 - OTX2_CPT_LF_DONE_WAIT); 16 + done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr, 17 + lf->slot, OTX2_CPT_LF_DONE_WAIT); 18 18 done_wait.s.time_wait = time_wait; 19 - otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot, 19 + otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, 20 20 OTX2_CPT_LF_DONE_WAIT, done_wait.u); 21 21 } 22 22 ··· 24 24 { 25 25 union otx2_cptx_lf_done_wait done_wait; 26 26 27 - done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot, 28 - OTX2_CPT_LF_DONE_WAIT); 27 + done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr, 28 + lf->slot, OTX2_CPT_LF_DONE_WAIT); 29 29 done_wait.s.num_wait = num_wait; 30 - otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot, 30 + otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, 31 31 OTX2_CPT_LF_DONE_WAIT, done_wait.u); 32 32 } 33 33 ··· 147 147 irq_misc.s.nwrp = 0x1; 148 148 149 149 for (slot = 0; slot < lfs->lfs_num; slot++) 150 - otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot, reg, 150 + otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot, reg, 151 151 irq_misc.u); 152 152 } 153 153 ··· 157 157 158 158 /* Enable done interrupts */ 159 159 for (slot = 0; slot < lfs->lfs_num; slot++) 160 - otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot, 160 + otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot, 161 161 OTX2_CPT_LF_DONE_INT_ENA_W1S, 0x1); 162 162 /* Enable Misc interrupts */ 163 163 cptlf_set_misc_intrs(lfs, true); ··· 168 168 int slot; 169 169 170 170 for (slot = 0; slot < lfs->lfs_num; slot++) 171 - otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot, 171 + otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot, 172 172 OTX2_CPT_LF_DONE_INT_ENA_W1C, 0x1); 173 173 cptlf_set_misc_intrs(lfs, false); 174 174 } ··· 177 177 { 178 178 union otx2_cptx_lf_done irq_cnt; 179 179 180 - irq_cnt.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot, 180 + irq_cnt.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, 181 181 OTX2_CPT_LF_DONE); 182 182 return irq_cnt.s.done; 183 183 } ··· 189 189 struct device *dev; 190 190 191 191 dev = &lf->lfs->pdev->dev; 192 - irq_misc.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot, 193 - OTX2_CPT_LF_MISC_INT); 192 + irq_misc.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr, 193 + lf->slot, OTX2_CPT_LF_MISC_INT); 194 194 irq_misc_ack.u = 0x0; 195 195 196 196 if (irq_misc.s.fault) { ··· 222 222 } 223 223 224 224 /* Acknowledge interrupts */ 225 - otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot, 225 + otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, 226 226 OTX2_CPT_LF_MISC_INT, irq_misc_ack.u); 227 227 228 228 return IRQ_HANDLED; ··· 237 237 /* Read the number of completed requests */ 238 238 irq_cnt = cptlf_read_done_cnt(lf); 239 239 if (irq_cnt) { 240 - done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, 240 + done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr, 241 241 lf->slot, OTX2_CPT_LF_DONE_WAIT); 242 242 /* Acknowledge the number of completed requests */ 243 - otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot, 243 + otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, 244 244 OTX2_CPT_LF_DONE_ACK, irq_cnt); 245 245 246 - otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot, 246 + otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, 247 247 OTX2_CPT_LF_DONE_WAIT, done_wait.u); 248 248 if (unlikely(!lf->wqe)) { 249 249 dev_err(&lf->lfs->pdev->dev, "No work for LF %d\n", ··· 393 393 OTX2_CPT_LMT_LF_LMTLINEX(0)); 394 394 395 395 lfs->lf[slot].ioreg = lfs->reg_base + 396 - OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_CPT0, slot, 396 + OTX2_CPT_RVU_FUNC_ADDR_S(lfs->blkaddr, slot, 397 397 OTX2_CPT_LF_NQX(0)); 398 398 } 399 399 /* Send request to attach LFs */
+24 -9
drivers/crypto/marvell/octeontx2/otx2_cptlf.h
··· 180 180 181 181 for (slot = 0; slot < lfs->lfs_num; slot++) { 182 182 lf_q_base.u = lfs->lf[slot].iqueue.dma_addr; 183 - otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot, 183 + otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot, 184 184 OTX2_CPT_LF_Q_BASE, lf_q_base.u); 185 185 } 186 186 } ··· 191 191 192 192 lf_q_size.s.size_div40 = OTX2_CPT_SIZE_DIV40 + 193 193 OTX2_CPT_EXTRA_SIZE_DIV40; 194 - otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot, 194 + otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, 195 195 OTX2_CPT_LF_Q_SIZE, lf_q_size.u); 196 196 } 197 197 ··· 207 207 { 208 208 union otx2_cptx_lf_ctl lf_ctl = { .u = 0x0 }; 209 209 union otx2_cptx_lf_inprog lf_inprog; 210 + u8 blkaddr = lf->lfs->blkaddr; 210 211 int timeout = 20; 211 212 212 213 /* Disable instructions enqueuing */ 213 - otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot, 214 + otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot, 214 215 OTX2_CPT_LF_CTL, lf_ctl.u); 215 216 216 217 /* Wait for instruction queue to become empty */ 217 218 do { 218 - lf_inprog.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, 219 + lf_inprog.u = otx2_cpt_read64(lf->lfs->reg_base, blkaddr, 219 220 lf->slot, OTX2_CPT_LF_INPROG); 220 221 if (!lf_inprog.s.inflight) 221 222 break; ··· 235 234 * the queue should be empty at this point 236 235 */ 237 236 lf_inprog.s.eena = 0x0; 238 - otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot, 237 + otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot, 239 238 OTX2_CPT_LF_INPROG, lf_inprog.u); 240 239 } 241 240 ··· 250 249 static inline void otx2_cptlf_set_iqueue_enq(struct otx2_cptlf_info *lf, 251 250 bool enable) 252 251 { 252 + u8 blkaddr = lf->lfs->blkaddr; 253 253 union otx2_cptx_lf_ctl lf_ctl; 254 254 255 - lf_ctl.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot, 255 + lf_ctl.u = otx2_cpt_read64(lf->lfs->reg_base, blkaddr, lf->slot, 256 256 OTX2_CPT_LF_CTL); 257 257 258 258 /* Set iqueue's enqueuing */ 259 259 lf_ctl.s.ena = enable ? 0x1 : 0x0; 260 - otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot, 260 + otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot, 261 261 OTX2_CPT_LF_CTL, lf_ctl.u); 262 262 } 263 263 ··· 271 269 bool enable) 272 270 { 273 271 union otx2_cptx_lf_inprog lf_inprog; 272 + u8 blkaddr = lf->lfs->blkaddr; 274 273 275 - lf_inprog.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot, 274 + lf_inprog.u = otx2_cpt_read64(lf->lfs->reg_base, blkaddr, lf->slot, 276 275 OTX2_CPT_LF_INPROG); 277 276 278 277 /* Set iqueue's execution */ 279 278 lf_inprog.s.eena = enable ? 0x1 : 0x0; 280 - otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot, 279 + otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot, 281 280 OTX2_CPT_LF_INPROG, lf_inprog.u); 282 281 } 283 282 ··· 365 362 static inline bool otx2_cptlf_started(struct otx2_cptlfs_info *lfs) 366 363 { 367 364 return atomic_read(&lfs->state) == OTX2_CPTLF_STARTED; 365 + } 366 + 367 + static inline void otx2_cptlf_set_dev_info(struct otx2_cptlfs_info *lfs, 368 + struct pci_dev *pdev, 369 + void __iomem *reg_base, 370 + struct otx2_mbox *mbox, 371 + int blkaddr) 372 + { 373 + lfs->pdev = pdev; 374 + lfs->reg_base = reg_base; 375 + lfs->mbox = mbox; 376 + lfs->blkaddr = blkaddr; 368 377 } 369 378 370 379 int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_msk, int pri,
+7
drivers/crypto/marvell/octeontx2/otx2_cptpf.h
··· 31 31 struct otx2_cptvf_info vf[OTX2_CPT_MAX_VFS_NUM]; 32 32 struct otx2_cpt_eng_grps eng_grps;/* Engine groups information */ 33 33 struct otx2_cptlfs_info lfs; /* CPT LFs attached to this PF */ 34 + struct otx2_cptlfs_info cpt1_lfs; /* CPT1 LFs attached to this PF */ 34 35 /* HW capabilities for each engine type */ 35 36 union otx2_cpt_eng_caps eng_caps[OTX2_CPT_MAX_ENG_TYPES]; 36 37 bool is_eng_caps_discovered; ··· 40 39 struct otx2_mbox afpf_mbox; 41 40 struct work_struct afpf_mbox_work; 42 41 struct workqueue_struct *afpf_mbox_wq; 42 + 43 + struct otx2_mbox afpf_mbox_up; 44 + struct work_struct afpf_mbox_up_work; 43 45 44 46 /* VF <=> PF mbox */ 45 47 struct otx2_mbox vfpf_mbox; ··· 56 52 u8 pf_id; /* RVU PF number */ 57 53 u8 max_vfs; /* Maximum number of VFs supported by CPT */ 58 54 u8 enabled_vfs; /* Number of enabled VFs */ 55 + u8 sso_pf_func_ovrd; /* SSO PF_FUNC override bit */ 59 56 u8 kvf_limits; /* Kernel crypto limits */ 60 57 bool has_cpt1; 58 + u8 rsrc_req_blkaddr; 61 59 62 60 /* Devlink */ 63 61 struct devlink *dl; ··· 67 61 68 62 irqreturn_t otx2_cptpf_afpf_mbox_intr(int irq, void *arg); 69 63 void otx2_cptpf_afpf_mbox_handler(struct work_struct *work); 64 + void otx2_cptpf_afpf_mbox_up_handler(struct work_struct *work); 70 65 irqreturn_t otx2_cptpf_vfpf_mbox_intr(int irq, void *arg); 71 66 void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work); 72 67
+41
drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
··· 13 13 #define OTX2_CPT_DRV_NAME "rvu_cptpf" 14 14 #define OTX2_CPT_DRV_STRING "Marvell RVU CPT Physical Function Driver" 15 15 16 + #define CPT_UC_RID_CN9K_B0 1 17 + 16 18 static void cptpf_enable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf, 17 19 int num_vfs) 18 20 { ··· 475 473 if (err) 476 474 goto error; 477 475 476 + err = otx2_mbox_init(&cptpf->afpf_mbox_up, cptpf->afpf_mbox_base, 477 + pdev, cptpf->reg_base, MBOX_DIR_PFAF_UP, 1); 478 + if (err) 479 + goto mbox_cleanup; 480 + 478 481 INIT_WORK(&cptpf->afpf_mbox_work, otx2_cptpf_afpf_mbox_handler); 482 + INIT_WORK(&cptpf->afpf_mbox_up_work, otx2_cptpf_afpf_mbox_up_handler); 479 483 mutex_init(&cptpf->lock); 484 + 480 485 return 0; 481 486 487 + mbox_cleanup: 488 + otx2_mbox_destroy(&cptpf->afpf_mbox); 482 489 error: 483 490 destroy_workqueue(cptpf->afpf_mbox_wq); 484 491 return err; ··· 497 486 { 498 487 destroy_workqueue(cptpf->afpf_mbox_wq); 499 488 otx2_mbox_destroy(&cptpf->afpf_mbox); 489 + otx2_mbox_destroy(&cptpf->afpf_mbox_up); 490 + } 491 + 492 + static ssize_t sso_pf_func_ovrd_show(struct device *dev, 493 + struct device_attribute *attr, char *buf) 494 + { 495 + struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev); 496 + 497 + return sprintf(buf, "%d\n", cptpf->sso_pf_func_ovrd); 498 + } 499 + 500 + static ssize_t sso_pf_func_ovrd_store(struct device *dev, 501 + struct device_attribute *attr, 502 + const char *buf, size_t count) 503 + { 504 + struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev); 505 + u8 sso_pf_func_ovrd; 506 + 507 + if (!(cptpf->pdev->revision == CPT_UC_RID_CN9K_B0)) 508 + return count; 509 + 510 + if (kstrtou8(buf, 0, &sso_pf_func_ovrd)) 511 + return -EINVAL; 512 + 513 + cptpf->sso_pf_func_ovrd = sso_pf_func_ovrd; 514 + 515 + return count; 500 516 } 501 517 502 518 static ssize_t kvf_limits_show(struct device *dev, ··· 556 518 } 557 519 558 520 static DEVICE_ATTR_RW(kvf_limits); 521 + static DEVICE_ATTR_RW(sso_pf_func_ovrd); 522 + 559 523 static struct attribute *cptpf_attrs[] = { 560 524 &dev_attr_kvf_limits.attr, 525 + &dev_attr_sso_pf_func_ovrd.attr, 561 526 NULL 562 527 }; 563 528
+243 -4
drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
··· 5 5 #include "otx2_cptpf.h" 6 6 #include "rvu_reg.h" 7 7 8 + /* Fastpath ipsec opcode with inplace processing */ 9 + #define CPT_INLINE_RX_OPCODE (0x26 | (1 << 6)) 10 + #define CN10K_CPT_INLINE_RX_OPCODE (0x29 | (1 << 6)) 11 + 12 + #define cpt_inline_rx_opcode(pdev) \ 13 + ({ \ 14 + u8 opcode; \ 15 + if (is_dev_otx2(pdev)) \ 16 + opcode = CPT_INLINE_RX_OPCODE; \ 17 + else \ 18 + opcode = CN10K_CPT_INLINE_RX_OPCODE; \ 19 + (opcode); \ 20 + }) 21 + 8 22 /* 9 23 * CPT PF driver version, It will be incremented by 1 for every feature 10 24 * addition in CPT mailbox messages. ··· 126 112 return 0; 127 113 } 128 114 115 + static int send_inline_ipsec_inbound_msg(struct otx2_cptpf_dev *cptpf, 116 + int sso_pf_func, u8 slot) 117 + { 118 + struct cpt_inline_ipsec_cfg_msg *req; 119 + struct pci_dev *pdev = cptpf->pdev; 120 + 121 + req = (struct cpt_inline_ipsec_cfg_msg *) 122 + otx2_mbox_alloc_msg_rsp(&cptpf->afpf_mbox, 0, 123 + sizeof(*req), sizeof(struct msg_rsp)); 124 + if (req == NULL) { 125 + dev_err(&pdev->dev, "RVU MBOX failed to get message.\n"); 126 + return -EFAULT; 127 + } 128 + memset(req, 0, sizeof(*req)); 129 + req->hdr.id = MBOX_MSG_CPT_INLINE_IPSEC_CFG; 130 + req->hdr.sig = OTX2_MBOX_REQ_SIG; 131 + req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptpf->pf_id, 0); 132 + req->dir = CPT_INLINE_INBOUND; 133 + req->slot = slot; 134 + req->sso_pf_func_ovrd = cptpf->sso_pf_func_ovrd; 135 + req->sso_pf_func = sso_pf_func; 136 + req->enable = 1; 137 + 138 + return otx2_cpt_send_mbox_msg(&cptpf->afpf_mbox, pdev); 139 + } 140 + 141 + static int rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, u8 egrp, 142 + struct otx2_cpt_rx_inline_lf_cfg *req) 143 + { 144 + struct nix_inline_ipsec_cfg *nix_req; 145 + struct pci_dev *pdev = cptpf->pdev; 146 + int ret; 147 + 148 + nix_req = (struct nix_inline_ipsec_cfg *) 149 + otx2_mbox_alloc_msg_rsp(&cptpf->afpf_mbox, 0, 150 + sizeof(*nix_req), 151 + sizeof(struct msg_rsp)); 152 + if (nix_req == NULL) { 153 + dev_err(&pdev->dev, "RVU MBOX failed to get message.\n"); 154 + return -EFAULT; 155 + } 156 + memset(nix_req, 0, sizeof(*nix_req)); 157 + nix_req->hdr.id = MBOX_MSG_NIX_INLINE_IPSEC_CFG; 158 + nix_req->hdr.sig = OTX2_MBOX_REQ_SIG; 159 + nix_req->enable = 1; 160 + if (!req->credit || req->credit > OTX2_CPT_INST_QLEN_MSGS) 161 + nix_req->cpt_credit = OTX2_CPT_INST_QLEN_MSGS - 1; 162 + else 163 + nix_req->cpt_credit = req->credit - 1; 164 + nix_req->gen_cfg.egrp = egrp; 165 + if (req->opcode) 166 + nix_req->gen_cfg.opcode = req->opcode; 167 + else 168 + nix_req->gen_cfg.opcode = cpt_inline_rx_opcode(pdev); 169 + nix_req->gen_cfg.param1 = req->param1; 170 + nix_req->gen_cfg.param2 = req->param2; 171 + nix_req->inst_qsel.cpt_pf_func = OTX2_CPT_RVU_PFFUNC(cptpf->pf_id, 0); 172 + nix_req->inst_qsel.cpt_slot = 0; 173 + ret = otx2_cpt_send_mbox_msg(&cptpf->afpf_mbox, pdev); 174 + if (ret) 175 + return ret; 176 + 177 + if (cptpf->has_cpt1) { 178 + ret = send_inline_ipsec_inbound_msg(cptpf, req->sso_pf_func, 1); 179 + if (ret) 180 + return ret; 181 + } 182 + 183 + return send_inline_ipsec_inbound_msg(cptpf, req->sso_pf_func, 0); 184 + } 185 + 186 + static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, 187 + struct mbox_msghdr *req) 188 + { 189 + struct otx2_cpt_rx_inline_lf_cfg *cfg_req; 190 + u8 egrp; 191 + int ret; 192 + 193 + cfg_req = (struct otx2_cpt_rx_inline_lf_cfg *)req; 194 + if (cptpf->lfs.lfs_num) { 195 + dev_err(&cptpf->pdev->dev, 196 + "LF is already configured for RX inline ipsec.\n"); 197 + return -EEXIST; 198 + } 199 + /* 200 + * Allow LFs to execute requests destined to only grp IE_TYPES and 201 + * set queue priority of each LF to high 202 + */ 203 + egrp = otx2_cpt_get_eng_grp(&cptpf->eng_grps, OTX2_CPT_IE_TYPES); 204 + if (egrp == OTX2_CPT_INVALID_CRYPTO_ENG_GRP) { 205 + dev_err(&cptpf->pdev->dev, 206 + "Engine group for inline ipsec is not available\n"); 207 + return -ENOENT; 208 + } 209 + 210 + otx2_cptlf_set_dev_info(&cptpf->lfs, cptpf->pdev, cptpf->reg_base, 211 + &cptpf->afpf_mbox, BLKADDR_CPT0); 212 + ret = otx2_cptlf_init(&cptpf->lfs, 1 << egrp, OTX2_CPT_QUEUE_HI_PRIO, 213 + 1); 214 + if (ret) { 215 + dev_err(&cptpf->pdev->dev, 216 + "LF configuration failed for RX inline ipsec.\n"); 217 + return ret; 218 + } 219 + 220 + if (cptpf->has_cpt1) { 221 + cptpf->rsrc_req_blkaddr = BLKADDR_CPT1; 222 + otx2_cptlf_set_dev_info(&cptpf->cpt1_lfs, cptpf->pdev, 223 + cptpf->reg_base, &cptpf->afpf_mbox, 224 + BLKADDR_CPT1); 225 + ret = otx2_cptlf_init(&cptpf->cpt1_lfs, 1 << egrp, 226 + OTX2_CPT_QUEUE_HI_PRIO, 1); 227 + if (ret) { 228 + dev_err(&cptpf->pdev->dev, 229 + "LF configuration failed for RX inline ipsec.\n"); 230 + goto lf_cleanup; 231 + } 232 + cptpf->rsrc_req_blkaddr = 0; 233 + } 234 + 235 + ret = rx_inline_ipsec_lf_cfg(cptpf, egrp, cfg_req); 236 + if (ret) 237 + goto lf1_cleanup; 238 + 239 + return 0; 240 + 241 + lf1_cleanup: 242 + otx2_cptlf_shutdown(&cptpf->cpt1_lfs); 243 + lf_cleanup: 244 + otx2_cptlf_shutdown(&cptpf->lfs); 245 + return ret; 246 + } 247 + 129 248 static int cptpf_handle_vf_req(struct otx2_cptpf_dev *cptpf, 130 249 struct otx2_cptvf_info *vf, 131 250 struct mbox_msghdr *req, int size) ··· 279 132 case MBOX_MSG_GET_KVF_LIMITS: 280 133 err = handle_msg_kvf_limits(cptpf, vf, req); 281 134 break; 135 + case MBOX_MSG_RX_INLINE_IPSEC_LF_CFG: 136 + err = handle_msg_rx_inline_ipsec_lf_cfg(cptpf, req); 137 + break; 138 + 282 139 default: 283 140 err = forward_to_af(cptpf, vf, req, size); 284 141 break; ··· 375 224 irqreturn_t otx2_cptpf_afpf_mbox_intr(int __always_unused irq, void *arg) 376 225 { 377 226 struct otx2_cptpf_dev *cptpf = arg; 227 + struct otx2_mbox_dev *mdev; 228 + struct otx2_mbox *mbox; 229 + struct mbox_hdr *hdr; 378 230 u64 intr; 379 231 380 232 /* Read the interrupt bits */ 381 233 intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT); 382 234 383 235 if (intr & 0x1ULL) { 384 - /* Schedule work queue function to process the MBOX request */ 385 - queue_work(cptpf->afpf_mbox_wq, &cptpf->afpf_mbox_work); 236 + mbox = &cptpf->afpf_mbox; 237 + mdev = &mbox->dev[0]; 238 + hdr = mdev->mbase + mbox->rx_start; 239 + if (hdr->num_msgs) 240 + /* Schedule work queue function to process the MBOX request */ 241 + queue_work(cptpf->afpf_mbox_wq, &cptpf->afpf_mbox_work); 242 + 243 + mbox = &cptpf->afpf_mbox_up; 244 + mdev = &mbox->dev[0]; 245 + hdr = mdev->mbase + mbox->rx_start; 246 + if (hdr->num_msgs) 247 + /* Schedule work queue function to process the MBOX request */ 248 + queue_work(cptpf->afpf_mbox_wq, &cptpf->afpf_mbox_up_work); 386 249 /* Clear and ack the interrupt */ 387 250 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 388 251 0x1ULL); ··· 407 242 static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf, 408 243 struct mbox_msghdr *msg) 409 244 { 245 + struct otx2_cptlfs_info *lfs = &cptpf->lfs; 410 246 struct device *dev = &cptpf->pdev->dev; 411 247 struct cpt_rd_wr_reg_msg *rsp_rd_wr; 412 248 ··· 420 254 msg->sig, msg->id); 421 255 return; 422 256 } 257 + if (cptpf->rsrc_req_blkaddr == BLKADDR_CPT1) 258 + lfs = &cptpf->cpt1_lfs; 423 259 424 260 switch (msg->id) { 425 261 case MBOX_MSG_READY: ··· 441 273 break; 442 274 case MBOX_MSG_ATTACH_RESOURCES: 443 275 if (!msg->rc) 444 - cptpf->lfs.are_lfs_attached = 1; 276 + lfs->are_lfs_attached = 1; 445 277 break; 446 278 case MBOX_MSG_DETACH_RESOURCES: 447 279 if (!msg->rc) 448 - cptpf->lfs.are_lfs_attached = 0; 280 + lfs->are_lfs_attached = 0; 281 + break; 282 + case MBOX_MSG_CPT_INLINE_IPSEC_CFG: 283 + case MBOX_MSG_NIX_INLINE_IPSEC_CFG: 449 284 break; 450 285 451 286 default: ··· 537 366 mdev->msgs_acked++; 538 367 } 539 368 otx2_mbox_reset(afpf_mbox, 0); 369 + } 370 + 371 + static void handle_msg_cpt_inst_lmtst(struct otx2_cptpf_dev *cptpf, 372 + struct mbox_msghdr *msg) 373 + { 374 + struct cpt_inst_lmtst_req *req = (struct cpt_inst_lmtst_req *)msg; 375 + struct otx2_cptlfs_info *lfs = &cptpf->lfs; 376 + struct msg_rsp *rsp; 377 + 378 + if (cptpf->lfs.lfs_num) 379 + lfs->ops->send_cmd((union otx2_cpt_inst_s *)req->inst, 1, 380 + &lfs->lf[0]); 381 + 382 + rsp = (struct msg_rsp *)otx2_mbox_alloc_msg(&cptpf->afpf_mbox_up, 0, 383 + sizeof(*rsp)); 384 + if (!rsp) 385 + return; 386 + 387 + rsp->hdr.id = msg->id; 388 + rsp->hdr.sig = OTX2_MBOX_RSP_SIG; 389 + rsp->hdr.pcifunc = 0; 390 + rsp->hdr.rc = 0; 391 + } 392 + 393 + static void process_afpf_mbox_up_msg(struct otx2_cptpf_dev *cptpf, 394 + struct mbox_msghdr *msg) 395 + { 396 + if (msg->id >= MBOX_MSG_MAX) { 397 + dev_err(&cptpf->pdev->dev, 398 + "MBOX msg with unknown ID %d\n", msg->id); 399 + return; 400 + } 401 + 402 + switch (msg->id) { 403 + case MBOX_MSG_CPT_INST_LMTST: 404 + handle_msg_cpt_inst_lmtst(cptpf, msg); 405 + break; 406 + default: 407 + otx2_reply_invalid_msg(&cptpf->afpf_mbox_up, 0, 0, msg->id); 408 + } 409 + } 410 + 411 + void otx2_cptpf_afpf_mbox_up_handler(struct work_struct *work) 412 + { 413 + struct otx2_cptpf_dev *cptpf; 414 + struct otx2_mbox_dev *mdev; 415 + struct mbox_hdr *rsp_hdr; 416 + struct mbox_msghdr *msg; 417 + struct otx2_mbox *mbox; 418 + int offset, i; 419 + 420 + cptpf = container_of(work, struct otx2_cptpf_dev, afpf_mbox_up_work); 421 + mbox = &cptpf->afpf_mbox_up; 422 + mdev = &mbox->dev[0]; 423 + /* Sync mbox data into memory */ 424 + smp_wmb(); 425 + 426 + rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); 427 + offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); 428 + 429 + for (i = 0; i < rsp_hdr->num_msgs; i++) { 430 + msg = (struct mbox_msghdr *)(mdev->mbase + offset); 431 + 432 + process_afpf_mbox_up_msg(cptpf, msg); 433 + 434 + offset = mbox->rx_start + msg->next_msgoff; 435 + } 436 + otx2_mbox_msg_send(mbox, 0); 540 437 }
+4 -6
drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
··· 1504 1504 if (ret) 1505 1505 goto delete_grps; 1506 1506 1507 - lfs->pdev = pdev; 1508 - lfs->reg_base = cptpf->reg_base; 1509 - lfs->mbox = &cptpf->afpf_mbox; 1510 - lfs->blkaddr = BLKADDR_CPT0; 1511 - ret = otx2_cptlf_init(&cptpf->lfs, OTX2_CPT_ALL_ENG_GRPS_MASK, 1507 + otx2_cptlf_set_dev_info(lfs, cptpf->pdev, cptpf->reg_base, 1508 + &cptpf->afpf_mbox, BLKADDR_CPT0); 1509 + ret = otx2_cptlf_init(lfs, OTX2_CPT_ALL_ENG_GRPS_MASK, 1512 1510 OTX2_CPT_QUEUE_HI_PRIO, 1); 1513 1511 if (ret) 1514 1512 goto delete_grps; ··· 1560 1562 free_result: 1561 1563 kfree(result); 1562 1564 lf_cleanup: 1563 - otx2_cptlf_shutdown(&cptpf->lfs); 1565 + otx2_cptlf_shutdown(lfs); 1564 1566 delete_grps: 1565 1567 delete_engine_grps(pdev, &cptpf->eng_grps); 1566 1568
+1
drivers/crypto/marvell/octeontx2/otx2_cptvf.h
··· 19 19 struct otx2_mbox pfvf_mbox; 20 20 struct work_struct pfvf_mbox_work; 21 21 struct workqueue_struct *pfvf_mbox_wq; 22 + int blkaddr; 22 23 void *bbuf_base; 23 24 unsigned long cap_flag; 24 25 };
+4 -4
drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
··· 277 277 if (ret) 278 278 return ret; 279 279 280 - lfs->reg_base = cptvf->reg_base; 281 - lfs->pdev = cptvf->pdev; 282 - lfs->mbox = &cptvf->pfvf_mbox; 283 - 284 280 lfs_num = cptvf->lfs.kvf_limits ? cptvf->lfs.kvf_limits : 285 281 num_online_cpus(); 282 + 283 + otx2_cptlf_set_dev_info(lfs, cptvf->pdev, cptvf->reg_base, 284 + &cptvf->pfvf_mbox, cptvf->blkaddr); 286 285 ret = otx2_cptlf_init(lfs, eng_grp_msk, OTX2_CPT_QUEUE_HI_PRIO, 287 286 lfs_num); 288 287 if (ret) ··· 379 380 if (ret) 380 381 goto destroy_pfvf_mbox; 381 382 383 + cptvf->blkaddr = BLKADDR_CPT0; 382 384 /* Initialize CPT LFs */ 383 385 ret = cptvf_lf_init(cptvf); 384 386 if (ret)
+4 -5
drivers/crypto/n2_core.c
··· 9 9 #include <linux/kernel.h> 10 10 #include <linux/module.h> 11 11 #include <linux/of.h> 12 + #include <linux/of_address.h> 12 13 #include <linux/of_device.h> 13 14 #include <linux/cpumask.h> 14 15 #include <linux/slab.h> ··· 1796 1795 struct spu_mdesc_info *ip, 1797 1796 const char *node_name) 1798 1797 { 1799 - const unsigned int *reg; 1800 - u64 node; 1798 + u64 node, reg; 1801 1799 1802 - reg = of_get_property(dev->dev.of_node, "reg", NULL); 1803 - if (!reg) 1800 + if (of_property_read_reg(dev->dev.of_node, 0, &reg, NULL) < 0) 1804 1801 return -ENODEV; 1805 1802 1806 1803 mdesc_for_each_node_by_name(mdesc, node, "virtual-device") { ··· 1809 1810 if (!name || strcmp(name, node_name)) 1810 1811 continue; 1811 1812 chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL); 1812 - if (!chdl || (*chdl != *reg)) 1813 + if (!chdl || (*chdl != reg)) 1813 1814 continue; 1814 1815 ip->cfg_handle = *chdl; 1815 1816 return get_irq_props(mdesc, node, ip);
+1 -1
drivers/crypto/nx/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 obj-$(CONFIG_CRYPTO_DEV_NX_ENCRYPT) += nx-crypto.o 3 3 nx-crypto-objs := nx.o \ 4 - nx_debugfs.o \ 5 4 nx-aes-cbc.o \ 6 5 nx-aes-ecb.o \ 7 6 nx-aes-gcm.o \ ··· 10 11 nx-sha256.o \ 11 12 nx-sha512.o 12 13 14 + nx-crypto-$(CONFIG_DEBUG_FS) += nx_debugfs.o 13 15 obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o nx-compress.o 14 16 obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o nx-compress.o 15 17 nx-compress-objs := nx-842.o
+2 -2
drivers/crypto/nx/nx.h
··· 170 170 void nx_debugfs_init(struct nx_crypto_driver *); 171 171 void nx_debugfs_fini(struct nx_crypto_driver *); 172 172 #else 173 - #define NX_DEBUGFS_INIT(drv) (0) 174 - #define NX_DEBUGFS_FINI(drv) (0) 173 + #define NX_DEBUGFS_INIT(drv) do {} while (0) 174 + #define NX_DEBUGFS_FINI(drv) do {} while (0) 175 175 #endif 176 176 177 177 #define NX_PAGE_NUM(x) ((u64)(x) & 0xfffffffffffff000ULL)
+1 -1
drivers/crypto/sa2ul.h
··· 170 170 * the following range, so avoid using it. 171 171 */ 172 172 #define SA_UNSAFE_DATA_SZ_MIN 240 173 - #define SA_UNSAFE_DATA_SZ_MAX 256 173 + #define SA_UNSAFE_DATA_SZ_MAX 255 174 174 175 175 struct sa_match_data; 176 176
+20
drivers/crypto/starfive/Kconfig
··· 1 + # 2 + # StarFive crypto drivers configuration 3 + # 4 + 5 + config CRYPTO_DEV_JH7110 6 + tristate "StarFive JH7110 cryptographic engine driver" 7 + depends on SOC_STARFIVE || AMBA_PL08X || COMPILE_TEST 8 + depends on HAS_DMA 9 + select CRYPTO_ENGINE 10 + select CRYPTO_HMAC 11 + select CRYPTO_SHA256 12 + select CRYPTO_SHA512 13 + select CRYPTO_SM3_GENERIC 14 + select CRYPTO_RSA 15 + help 16 + Support for StarFive JH7110 crypto hardware acceleration engine. 17 + This module provides acceleration for public key algo, 18 + skciphers, AEAD and hash functions. 19 + 20 + If you choose 'M' here, this module will be called jh7110-crypto.
+4
drivers/crypto/starfive/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + 3 + obj-$(CONFIG_CRYPTO_DEV_JH7110) += jh7110-crypto.o 4 + jh7110-crypto-objs := jh7110-cryp.o jh7110-hash.o jh7110-rsa.o
+258
drivers/crypto/starfive/jh7110-cryp.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Cryptographic API. 4 + * 5 + * Support for StarFive hardware cryptographic engine. 6 + * Copyright (c) 2022 StarFive Technology 7 + * 8 + */ 9 + 10 + #include <linux/clk.h> 11 + #include <linux/delay.h> 12 + #include <linux/interrupt.h> 13 + #include <linux/iopoll.h> 14 + #include <linux/module.h> 15 + #include <linux/of_device.h> 16 + #include <linux/platform_device.h> 17 + #include <linux/pm_runtime.h> 18 + #include <linux/reset.h> 19 + 20 + #include "jh7110-cryp.h" 21 + 22 + #define DRIVER_NAME "jh7110-crypto" 23 + 24 + struct starfive_dev_list { 25 + struct list_head dev_list; 26 + spinlock_t lock; /* protect dev_list */ 27 + }; 28 + 29 + static struct starfive_dev_list dev_list = { 30 + .dev_list = LIST_HEAD_INIT(dev_list.dev_list), 31 + .lock = __SPIN_LOCK_UNLOCKED(dev_list.lock), 32 + }; 33 + 34 + struct starfive_cryp_dev *starfive_cryp_find_dev(struct starfive_cryp_ctx *ctx) 35 + { 36 + struct starfive_cryp_dev *cryp = NULL, *tmp; 37 + 38 + spin_lock_bh(&dev_list.lock); 39 + if (!ctx->cryp) { 40 + list_for_each_entry(tmp, &dev_list.dev_list, list) { 41 + cryp = tmp; 42 + break; 43 + } 44 + ctx->cryp = cryp; 45 + } else { 46 + cryp = ctx->cryp; 47 + } 48 + 49 + spin_unlock_bh(&dev_list.lock); 50 + 51 + return cryp; 52 + } 53 + 54 + static int starfive_dma_init(struct starfive_cryp_dev *cryp) 55 + { 56 + dma_cap_mask_t mask; 57 + 58 + dma_cap_zero(mask); 59 + dma_cap_set(DMA_SLAVE, mask); 60 + 61 + cryp->tx = dma_request_chan(cryp->dev, "tx"); 62 + if (IS_ERR(cryp->tx)) 63 + return dev_err_probe(cryp->dev, PTR_ERR(cryp->tx), 64 + "Error requesting tx dma channel.\n"); 65 + 66 + cryp->rx = dma_request_chan(cryp->dev, "rx"); 67 + if (IS_ERR(cryp->rx)) { 68 + dma_release_channel(cryp->tx); 69 + return dev_err_probe(cryp->dev, PTR_ERR(cryp->rx), 70 + "Error requesting rx dma channel.\n"); 71 + } 72 + 73 + return 0; 74 + } 75 + 76 + static void starfive_dma_cleanup(struct starfive_cryp_dev *cryp) 77 + { 78 + dma_release_channel(cryp->tx); 79 + dma_release_channel(cryp->rx); 80 + } 81 + 82 + static irqreturn_t starfive_cryp_irq(int irq, void *priv) 83 + { 84 + u32 status; 85 + struct starfive_cryp_dev *cryp = (struct starfive_cryp_dev *)priv; 86 + 87 + status = readl(cryp->base + STARFIVE_IE_FLAG_OFFSET); 88 + if (status & STARFIVE_IE_FLAG_HASH_DONE) { 89 + status = readl(cryp->base + STARFIVE_IE_MASK_OFFSET); 90 + status |= STARFIVE_IE_MASK_HASH_DONE; 91 + writel(status, cryp->base + STARFIVE_IE_MASK_OFFSET); 92 + tasklet_schedule(&cryp->hash_done); 93 + } 94 + 95 + if (status & STARFIVE_IE_FLAG_PKA_DONE) { 96 + status = readl(cryp->base + STARFIVE_IE_MASK_OFFSET); 97 + status |= STARFIVE_IE_MASK_PKA_DONE; 98 + writel(status, cryp->base + STARFIVE_IE_MASK_OFFSET); 99 + complete(&cryp->pka_done); 100 + } 101 + 102 + return IRQ_HANDLED; 103 + } 104 + 105 + static int starfive_cryp_probe(struct platform_device *pdev) 106 + { 107 + struct starfive_cryp_dev *cryp; 108 + struct resource *res; 109 + int irq; 110 + int ret; 111 + 112 + cryp = devm_kzalloc(&pdev->dev, sizeof(*cryp), GFP_KERNEL); 113 + if (!cryp) 114 + return -ENOMEM; 115 + 116 + platform_set_drvdata(pdev, cryp); 117 + cryp->dev = &pdev->dev; 118 + 119 + cryp->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); 120 + if (IS_ERR(cryp->base)) 121 + return dev_err_probe(&pdev->dev, PTR_ERR(cryp->base), 122 + "Error remapping memory for platform device\n"); 123 + 124 + tasklet_init(&cryp->hash_done, starfive_hash_done_task, (unsigned long)cryp); 125 + 126 + cryp->phys_base = res->start; 127 + cryp->dma_maxburst = 32; 128 + 129 + cryp->hclk = devm_clk_get(&pdev->dev, "hclk"); 130 + if (IS_ERR(cryp->hclk)) 131 + return dev_err_probe(&pdev->dev, PTR_ERR(cryp->hclk), 132 + "Error getting hardware reference clock\n"); 133 + 134 + cryp->ahb = devm_clk_get(&pdev->dev, "ahb"); 135 + if (IS_ERR(cryp->ahb)) 136 + return dev_err_probe(&pdev->dev, PTR_ERR(cryp->ahb), 137 + "Error getting ahb reference clock\n"); 138 + 139 + cryp->rst = devm_reset_control_get_shared(cryp->dev, NULL); 140 + if (IS_ERR(cryp->rst)) 141 + return dev_err_probe(&pdev->dev, PTR_ERR(cryp->rst), 142 + "Error getting hardware reset line\n"); 143 + 144 + init_completion(&cryp->pka_done); 145 + 146 + irq = platform_get_irq(pdev, 0); 147 + if (irq < 0) 148 + return irq; 149 + 150 + ret = devm_request_irq(&pdev->dev, irq, starfive_cryp_irq, 0, pdev->name, 151 + (void *)cryp); 152 + if (ret) 153 + return dev_err_probe(&pdev->dev, irq, 154 + "Failed to register interrupt handler\n"); 155 + 156 + clk_prepare_enable(cryp->hclk); 157 + clk_prepare_enable(cryp->ahb); 158 + reset_control_deassert(cryp->rst); 159 + 160 + spin_lock(&dev_list.lock); 161 + list_add(&cryp->list, &dev_list.dev_list); 162 + spin_unlock(&dev_list.lock); 163 + 164 + ret = starfive_dma_init(cryp); 165 + if (ret) { 166 + if (ret == -EPROBE_DEFER) 167 + goto err_probe_defer; 168 + else 169 + goto err_dma_init; 170 + } 171 + 172 + /* Initialize crypto engine */ 173 + cryp->engine = crypto_engine_alloc_init(&pdev->dev, 1); 174 + if (!cryp->engine) { 175 + ret = -ENOMEM; 176 + goto err_engine; 177 + } 178 + 179 + ret = crypto_engine_start(cryp->engine); 180 + if (ret) 181 + goto err_engine_start; 182 + 183 + ret = starfive_hash_register_algs(); 184 + if (ret) 185 + goto err_algs_hash; 186 + 187 + ret = starfive_rsa_register_algs(); 188 + if (ret) 189 + goto err_algs_rsa; 190 + 191 + return 0; 192 + 193 + err_algs_rsa: 194 + starfive_hash_unregister_algs(); 195 + err_algs_hash: 196 + crypto_engine_stop(cryp->engine); 197 + err_engine_start: 198 + crypto_engine_exit(cryp->engine); 199 + err_engine: 200 + starfive_dma_cleanup(cryp); 201 + err_dma_init: 202 + spin_lock(&dev_list.lock); 203 + list_del(&cryp->list); 204 + spin_unlock(&dev_list.lock); 205 + 206 + clk_disable_unprepare(cryp->hclk); 207 + clk_disable_unprepare(cryp->ahb); 208 + reset_control_assert(cryp->rst); 209 + 210 + tasklet_kill(&cryp->hash_done); 211 + err_probe_defer: 212 + return ret; 213 + } 214 + 215 + static int starfive_cryp_remove(struct platform_device *pdev) 216 + { 217 + struct starfive_cryp_dev *cryp = platform_get_drvdata(pdev); 218 + 219 + starfive_hash_unregister_algs(); 220 + starfive_rsa_unregister_algs(); 221 + 222 + tasklet_kill(&cryp->hash_done); 223 + 224 + crypto_engine_stop(cryp->engine); 225 + crypto_engine_exit(cryp->engine); 226 + 227 + starfive_dma_cleanup(cryp); 228 + 229 + spin_lock(&dev_list.lock); 230 + list_del(&cryp->list); 231 + spin_unlock(&dev_list.lock); 232 + 233 + clk_disable_unprepare(cryp->hclk); 234 + clk_disable_unprepare(cryp->ahb); 235 + reset_control_assert(cryp->rst); 236 + 237 + return 0; 238 + } 239 + 240 + static const struct of_device_id starfive_dt_ids[] __maybe_unused = { 241 + { .compatible = "starfive,jh7110-crypto", .data = NULL}, 242 + {}, 243 + }; 244 + MODULE_DEVICE_TABLE(of, starfive_dt_ids); 245 + 246 + static struct platform_driver starfive_cryp_driver = { 247 + .probe = starfive_cryp_probe, 248 + .remove = starfive_cryp_remove, 249 + .driver = { 250 + .name = DRIVER_NAME, 251 + .of_match_table = starfive_dt_ids, 252 + }, 253 + }; 254 + 255 + module_platform_driver(starfive_cryp_driver); 256 + 257 + MODULE_LICENSE("GPL"); 258 + MODULE_DESCRIPTION("StarFive JH7110 Cryptographic Module");
+172
drivers/crypto/starfive/jh7110-cryp.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __STARFIVE_STR_H__ 3 + #define __STARFIVE_STR_H__ 4 + 5 + #include <linux/delay.h> 6 + #include <linux/dma-mapping.h> 7 + #include <linux/dmaengine.h> 8 + 9 + #include <crypto/engine.h> 10 + #include <crypto/sha2.h> 11 + #include <crypto/sm3.h> 12 + 13 + #define STARFIVE_ALG_CR_OFFSET 0x0 14 + #define STARFIVE_ALG_FIFO_OFFSET 0x4 15 + #define STARFIVE_IE_MASK_OFFSET 0x8 16 + #define STARFIVE_IE_FLAG_OFFSET 0xc 17 + #define STARFIVE_DMA_IN_LEN_OFFSET 0x10 18 + #define STARFIVE_DMA_OUT_LEN_OFFSET 0x14 19 + 20 + #define STARFIVE_IE_MASK_HASH_DONE 0x4 21 + #define STARFIVE_IE_MASK_PKA_DONE 0x8 22 + #define STARFIVE_IE_FLAG_HASH_DONE 0x4 23 + #define STARFIVE_IE_FLAG_PKA_DONE 0x8 24 + 25 + #define STARFIVE_MSG_BUFFER_SIZE SZ_16K 26 + #define MAX_KEY_SIZE SHA512_BLOCK_SIZE 27 + 28 + union starfive_hash_csr { 29 + u32 v; 30 + struct { 31 + u32 start :1; 32 + u32 reset :1; 33 + u32 ie :1; 34 + u32 firstb :1; 35 + #define STARFIVE_HASH_SM3 0x0 36 + #define STARFIVE_HASH_SHA224 0x3 37 + #define STARFIVE_HASH_SHA256 0x4 38 + #define STARFIVE_HASH_SHA384 0x5 39 + #define STARFIVE_HASH_SHA512 0x6 40 + #define STARFIVE_HASH_MODE_MASK 0x7 41 + u32 mode :3; 42 + u32 rsvd_1 :1; 43 + u32 final :1; 44 + u32 rsvd_2 :2; 45 + #define STARFIVE_HASH_HMAC_FLAGS 0x800 46 + u32 hmac :1; 47 + u32 rsvd_3 :1; 48 + #define STARFIVE_HASH_KEY_DONE BIT(13) 49 + u32 key_done :1; 50 + u32 key_flag :1; 51 + u32 hmac_done :1; 52 + #define STARFIVE_HASH_BUSY BIT(16) 53 + u32 busy :1; 54 + u32 hashdone :1; 55 + u32 rsvd_4 :14; 56 + }; 57 + }; 58 + 59 + union starfive_pka_cacr { 60 + u32 v; 61 + struct { 62 + u32 start :1; 63 + u32 reset :1; 64 + u32 ie :1; 65 + u32 rsvd_0 :1; 66 + u32 fifo_mode :1; 67 + u32 not_r2 :1; 68 + u32 ecc_sub :1; 69 + u32 pre_expf :1; 70 + u32 cmd :4; 71 + u32 rsvd_1 :1; 72 + u32 ctrl_dummy :1; 73 + u32 ctrl_false :1; 74 + u32 cln_done :1; 75 + u32 opsize :6; 76 + u32 rsvd_2 :2; 77 + u32 exposize :6; 78 + u32 rsvd_3 :1; 79 + u32 bigendian :1; 80 + }; 81 + }; 82 + 83 + struct starfive_rsa_key { 84 + u8 *n; 85 + u8 *e; 86 + u8 *d; 87 + int e_bitlen; 88 + int d_bitlen; 89 + int bitlen; 90 + size_t key_sz; 91 + }; 92 + 93 + union starfive_alg_cr { 94 + u32 v; 95 + struct { 96 + u32 start :1; 97 + u32 aes_dma_en :1; 98 + u32 rsvd_0 :1; 99 + u32 hash_dma_en :1; 100 + u32 alg_done :1; 101 + u32 rsvd_1 :3; 102 + u32 clear :1; 103 + u32 rsvd_2 :23; 104 + }; 105 + }; 106 + 107 + struct starfive_cryp_ctx { 108 + struct crypto_engine_ctx enginectx; 109 + struct starfive_cryp_dev *cryp; 110 + struct starfive_cryp_request_ctx *rctx; 111 + 112 + unsigned int hash_mode; 113 + u8 key[MAX_KEY_SIZE]; 114 + int keylen; 115 + bool is_hmac; 116 + struct starfive_rsa_key rsa_key; 117 + struct crypto_akcipher *akcipher_fbk; 118 + struct crypto_ahash *ahash_fbk; 119 + }; 120 + 121 + struct starfive_cryp_dev { 122 + struct list_head list; 123 + struct device *dev; 124 + struct clk *hclk; 125 + struct clk *ahb; 126 + struct reset_control *rst; 127 + 128 + void __iomem *base; 129 + phys_addr_t phys_base; 130 + 131 + u32 dma_maxburst; 132 + struct dma_chan *tx; 133 + struct dma_chan *rx; 134 + struct dma_slave_config cfg_in; 135 + struct dma_slave_config cfg_out; 136 + struct crypto_engine *engine; 137 + struct tasklet_struct hash_done; 138 + struct completion pka_done; 139 + int err; 140 + union starfive_alg_cr alg_cr; 141 + union { 142 + struct ahash_request *hreq; 143 + } req; 144 + }; 145 + 146 + struct starfive_cryp_request_ctx { 147 + union { 148 + union starfive_hash_csr hash; 149 + union starfive_pka_cacr pka; 150 + } csr; 151 + 152 + struct scatterlist *in_sg; 153 + struct scatterlist *out_sg; 154 + struct ahash_request ahash_fbk_req; 155 + size_t total; 156 + size_t nents; 157 + unsigned int blksize; 158 + unsigned int digsize; 159 + unsigned long in_sg_len; 160 + u8 rsa_data[] __aligned(sizeof(u32)); 161 + }; 162 + 163 + struct starfive_cryp_dev *starfive_cryp_find_dev(struct starfive_cryp_ctx *ctx); 164 + 165 + int starfive_hash_register_algs(void); 166 + void starfive_hash_unregister_algs(void); 167 + 168 + int starfive_rsa_register_algs(void); 169 + void starfive_rsa_unregister_algs(void); 170 + 171 + void starfive_hash_done_task(unsigned long param); 172 + #endif
+899
drivers/crypto/starfive/jh7110-hash.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Hash function and HMAC support for StarFive driver 4 + * 5 + * Copyright (c) 2022 StarFive Technology 6 + * 7 + */ 8 + 9 + #include <linux/clk.h> 10 + #include <linux/crypto.h> 11 + #include <linux/dma-direct.h> 12 + #include <linux/interrupt.h> 13 + #include <linux/io.h> 14 + #include <linux/iopoll.h> 15 + #include <linux/kernel.h> 16 + #include <linux/module.h> 17 + #include <linux/of_device.h> 18 + #include <linux/platform_device.h> 19 + #include <linux/pm_runtime.h> 20 + #include <linux/reset.h> 21 + #include <linux/amba/pl080.h> 22 + 23 + #include <crypto/hash.h> 24 + #include <crypto/scatterwalk.h> 25 + #include <crypto/internal/hash.h> 26 + 27 + #include "jh7110-cryp.h" 28 + 29 + #define STARFIVE_HASH_REGS_OFFSET 0x300 30 + #define STARFIVE_HASH_SHACSR (STARFIVE_HASH_REGS_OFFSET + 0x0) 31 + #define STARFIVE_HASH_SHAWDR (STARFIVE_HASH_REGS_OFFSET + 0x4) 32 + #define STARFIVE_HASH_SHARDR (STARFIVE_HASH_REGS_OFFSET + 0x8) 33 + #define STARFIVE_HASH_SHAWSR (STARFIVE_HASH_REGS_OFFSET + 0xC) 34 + #define STARFIVE_HASH_SHAWLEN3 (STARFIVE_HASH_REGS_OFFSET + 0x10) 35 + #define STARFIVE_HASH_SHAWLEN2 (STARFIVE_HASH_REGS_OFFSET + 0x14) 36 + #define STARFIVE_HASH_SHAWLEN1 (STARFIVE_HASH_REGS_OFFSET + 0x18) 37 + #define STARFIVE_HASH_SHAWLEN0 (STARFIVE_HASH_REGS_OFFSET + 0x1C) 38 + #define STARFIVE_HASH_SHAWKR (STARFIVE_HASH_REGS_OFFSET + 0x20) 39 + #define STARFIVE_HASH_SHAWKLEN (STARFIVE_HASH_REGS_OFFSET + 0x24) 40 + 41 + #define STARFIVE_HASH_BUFLEN SHA512_BLOCK_SIZE 42 + #define STARFIVE_HASH_RESET 0x2 43 + 44 + static inline int starfive_hash_wait_busy(struct starfive_cryp_ctx *ctx) 45 + { 46 + struct starfive_cryp_dev *cryp = ctx->cryp; 47 + u32 status; 48 + 49 + return readl_relaxed_poll_timeout(cryp->base + STARFIVE_HASH_SHACSR, status, 50 + !(status & STARFIVE_HASH_BUSY), 10, 100000); 51 + } 52 + 53 + static inline int starfive_hash_wait_key_done(struct starfive_cryp_ctx *ctx) 54 + { 55 + struct starfive_cryp_dev *cryp = ctx->cryp; 56 + u32 status; 57 + 58 + return readl_relaxed_poll_timeout(cryp->base + STARFIVE_HASH_SHACSR, status, 59 + (status & STARFIVE_HASH_KEY_DONE), 10, 100000); 60 + } 61 + 62 + static int starfive_hash_hmac_key(struct starfive_cryp_ctx *ctx) 63 + { 64 + struct starfive_cryp_request_ctx *rctx = ctx->rctx; 65 + struct starfive_cryp_dev *cryp = ctx->cryp; 66 + int klen = ctx->keylen, loop; 67 + unsigned int *key = (unsigned int *)ctx->key; 68 + unsigned char *cl; 69 + 70 + writel(ctx->keylen, cryp->base + STARFIVE_HASH_SHAWKLEN); 71 + 72 + rctx->csr.hash.hmac = 1; 73 + rctx->csr.hash.key_flag = 1; 74 + 75 + writel(rctx->csr.hash.v, cryp->base + STARFIVE_HASH_SHACSR); 76 + 77 + for (loop = 0; loop < klen / sizeof(unsigned int); loop++, key++) 78 + writel(*key, cryp->base + STARFIVE_HASH_SHAWKR); 79 + 80 + if (klen & 0x3) { 81 + cl = (unsigned char *)key; 82 + for (loop = 0; loop < (klen & 0x3); loop++, cl++) 83 + writeb(*cl, cryp->base + STARFIVE_HASH_SHAWKR); 84 + } 85 + 86 + if (starfive_hash_wait_key_done(ctx)) 87 + return dev_err_probe(cryp->dev, -ETIMEDOUT, "starfive_hash_wait_key_done error\n"); 88 + 89 + return 0; 90 + } 91 + 92 + static void starfive_hash_start(void *param) 93 + { 94 + struct starfive_cryp_ctx *ctx = param; 95 + struct starfive_cryp_request_ctx *rctx = ctx->rctx; 96 + struct starfive_cryp_dev *cryp = ctx->cryp; 97 + union starfive_alg_cr alg_cr; 98 + union starfive_hash_csr csr; 99 + u32 stat; 100 + 101 + dma_unmap_sg(cryp->dev, rctx->in_sg, rctx->in_sg_len, DMA_TO_DEVICE); 102 + 103 + alg_cr.v = 0; 104 + alg_cr.clear = 1; 105 + 106 + writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET); 107 + 108 + csr.v = readl(cryp->base + STARFIVE_HASH_SHACSR); 109 + csr.firstb = 0; 110 + csr.final = 1; 111 + 112 + stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET); 113 + stat &= ~STARFIVE_IE_MASK_HASH_DONE; 114 + writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET); 115 + writel(csr.v, cryp->base + STARFIVE_HASH_SHACSR); 116 + } 117 + 118 + static int starfive_hash_xmit_dma(struct starfive_cryp_ctx *ctx) 119 + { 120 + struct starfive_cryp_request_ctx *rctx = ctx->rctx; 121 + struct starfive_cryp_dev *cryp = ctx->cryp; 122 + struct dma_async_tx_descriptor *in_desc; 123 + union starfive_alg_cr alg_cr; 124 + int total_len; 125 + int ret; 126 + 127 + if (!rctx->total) { 128 + starfive_hash_start(ctx); 129 + return 0; 130 + } 131 + 132 + writel(rctx->total, cryp->base + STARFIVE_DMA_IN_LEN_OFFSET); 133 + 134 + total_len = rctx->total; 135 + total_len = (total_len & 0x3) ? (((total_len >> 2) + 1) << 2) : total_len; 136 + sg_dma_len(rctx->in_sg) = total_len; 137 + 138 + alg_cr.v = 0; 139 + alg_cr.start = 1; 140 + alg_cr.hash_dma_en = 1; 141 + 142 + writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET); 143 + 144 + ret = dma_map_sg(cryp->dev, rctx->in_sg, rctx->in_sg_len, DMA_TO_DEVICE); 145 + if (!ret) 146 + return dev_err_probe(cryp->dev, -EINVAL, "dma_map_sg() error\n"); 147 + 148 + cryp->cfg_in.direction = DMA_MEM_TO_DEV; 149 + cryp->cfg_in.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 150 + cryp->cfg_in.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 151 + cryp->cfg_in.src_maxburst = cryp->dma_maxburst; 152 + cryp->cfg_in.dst_maxburst = cryp->dma_maxburst; 153 + cryp->cfg_in.dst_addr = cryp->phys_base + STARFIVE_ALG_FIFO_OFFSET; 154 + 155 + dmaengine_slave_config(cryp->tx, &cryp->cfg_in); 156 + 157 + in_desc = dmaengine_prep_slave_sg(cryp->tx, rctx->in_sg, 158 + ret, DMA_MEM_TO_DEV, 159 + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 160 + 161 + if (!in_desc) 162 + return -EINVAL; 163 + 164 + in_desc->callback = starfive_hash_start; 165 + in_desc->callback_param = ctx; 166 + 167 + dmaengine_submit(in_desc); 168 + dma_async_issue_pending(cryp->tx); 169 + 170 + return 0; 171 + } 172 + 173 + static int starfive_hash_xmit(struct starfive_cryp_ctx *ctx) 174 + { 175 + struct starfive_cryp_request_ctx *rctx = ctx->rctx; 176 + struct starfive_cryp_dev *cryp = ctx->cryp; 177 + int ret = 0; 178 + 179 + rctx->csr.hash.v = 0; 180 + rctx->csr.hash.reset = 1; 181 + writel(rctx->csr.hash.v, cryp->base + STARFIVE_HASH_SHACSR); 182 + 183 + if (starfive_hash_wait_busy(ctx)) 184 + return dev_err_probe(cryp->dev, -ETIMEDOUT, "Error resetting engine.\n"); 185 + 186 + rctx->csr.hash.v = 0; 187 + rctx->csr.hash.mode = ctx->hash_mode; 188 + rctx->csr.hash.ie = 1; 189 + 190 + if (ctx->is_hmac) { 191 + ret = starfive_hash_hmac_key(ctx); 192 + if (ret) 193 + return ret; 194 + } else { 195 + rctx->csr.hash.start = 1; 196 + rctx->csr.hash.firstb = 1; 197 + writel(rctx->csr.hash.v, cryp->base + STARFIVE_HASH_SHACSR); 198 + } 199 + 200 + return starfive_hash_xmit_dma(ctx); 201 + } 202 + 203 + static int starfive_hash_copy_hash(struct ahash_request *req) 204 + { 205 + struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req); 206 + struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); 207 + int count, *data; 208 + int mlen; 209 + 210 + if (!req->result) 211 + return 0; 212 + 213 + mlen = rctx->digsize / sizeof(u32); 214 + data = (u32 *)req->result; 215 + 216 + for (count = 0; count < mlen; count++) 217 + data[count] = readl(ctx->cryp->base + STARFIVE_HASH_SHARDR); 218 + 219 + return 0; 220 + } 221 + 222 + void starfive_hash_done_task(unsigned long param) 223 + { 224 + struct starfive_cryp_dev *cryp = (struct starfive_cryp_dev *)param; 225 + int err = cryp->err; 226 + 227 + if (!err) 228 + err = starfive_hash_copy_hash(cryp->req.hreq); 229 + 230 + /* Reset to clear hash_done in irq register*/ 231 + writel(STARFIVE_HASH_RESET, cryp->base + STARFIVE_HASH_SHACSR); 232 + 233 + crypto_finalize_hash_request(cryp->engine, cryp->req.hreq, err); 234 + } 235 + 236 + static int starfive_hash_check_aligned(struct scatterlist *sg, size_t total, size_t align) 237 + { 238 + int len = 0; 239 + 240 + if (!total) 241 + return 0; 242 + 243 + if (!IS_ALIGNED(total, align)) 244 + return -EINVAL; 245 + 246 + while (sg) { 247 + if (!IS_ALIGNED(sg->offset, sizeof(u32))) 248 + return -EINVAL; 249 + 250 + if (!IS_ALIGNED(sg->length, align)) 251 + return -EINVAL; 252 + 253 + len += sg->length; 254 + sg = sg_next(sg); 255 + } 256 + 257 + if (len != total) 258 + return -EINVAL; 259 + 260 + return 0; 261 + } 262 + 263 + static int starfive_hash_one_request(struct crypto_engine *engine, void *areq) 264 + { 265 + struct ahash_request *req = container_of(areq, struct ahash_request, 266 + base); 267 + struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); 268 + struct starfive_cryp_dev *cryp = ctx->cryp; 269 + 270 + if (!cryp) 271 + return -ENODEV; 272 + 273 + return starfive_hash_xmit(ctx); 274 + } 275 + 276 + static int starfive_hash_init(struct ahash_request *req) 277 + { 278 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 279 + struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req); 280 + struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm); 281 + 282 + ahash_request_set_tfm(&rctx->ahash_fbk_req, ctx->ahash_fbk); 283 + ahash_request_set_callback(&rctx->ahash_fbk_req, 284 + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, 285 + req->base.complete, req->base.data); 286 + 287 + ahash_request_set_crypt(&rctx->ahash_fbk_req, req->src, 288 + req->result, req->nbytes); 289 + 290 + return crypto_ahash_init(&rctx->ahash_fbk_req); 291 + } 292 + 293 + static int starfive_hash_update(struct ahash_request *req) 294 + { 295 + struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req); 296 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 297 + struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm); 298 + 299 + ahash_request_set_tfm(&rctx->ahash_fbk_req, ctx->ahash_fbk); 300 + ahash_request_set_callback(&rctx->ahash_fbk_req, 301 + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, 302 + req->base.complete, req->base.data); 303 + 304 + ahash_request_set_crypt(&rctx->ahash_fbk_req, req->src, 305 + req->result, req->nbytes); 306 + 307 + return crypto_ahash_update(&rctx->ahash_fbk_req); 308 + } 309 + 310 + static int starfive_hash_final(struct ahash_request *req) 311 + { 312 + struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req); 313 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 314 + struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm); 315 + 316 + ahash_request_set_tfm(&rctx->ahash_fbk_req, ctx->ahash_fbk); 317 + ahash_request_set_callback(&rctx->ahash_fbk_req, 318 + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, 319 + req->base.complete, req->base.data); 320 + 321 + ahash_request_set_crypt(&rctx->ahash_fbk_req, req->src, 322 + req->result, req->nbytes); 323 + 324 + return crypto_ahash_final(&rctx->ahash_fbk_req); 325 + } 326 + 327 + static int starfive_hash_finup(struct ahash_request *req) 328 + { 329 + struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req); 330 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 331 + struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm); 332 + 333 + ahash_request_set_tfm(&rctx->ahash_fbk_req, ctx->ahash_fbk); 334 + ahash_request_set_callback(&rctx->ahash_fbk_req, 335 + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, 336 + req->base.complete, req->base.data); 337 + 338 + ahash_request_set_crypt(&rctx->ahash_fbk_req, req->src, 339 + req->result, req->nbytes); 340 + 341 + return crypto_ahash_finup(&rctx->ahash_fbk_req); 342 + } 343 + 344 + static int starfive_hash_digest_fb(struct ahash_request *req) 345 + { 346 + struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req); 347 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 348 + struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm); 349 + 350 + ahash_request_set_tfm(&rctx->ahash_fbk_req, ctx->ahash_fbk); 351 + ahash_request_set_callback(&rctx->ahash_fbk_req, req->base.flags, 352 + req->base.complete, req->base.data); 353 + 354 + ahash_request_set_crypt(&rctx->ahash_fbk_req, req->src, 355 + req->result, req->nbytes); 356 + 357 + return crypto_ahash_digest(&rctx->ahash_fbk_req); 358 + } 359 + 360 + static int starfive_hash_digest(struct ahash_request *req) 361 + { 362 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 363 + struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm); 364 + struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req); 365 + struct starfive_cryp_dev *cryp = ctx->cryp; 366 + 367 + memset(rctx, 0, sizeof(struct starfive_cryp_request_ctx)); 368 + 369 + cryp->req.hreq = req; 370 + rctx->total = req->nbytes; 371 + rctx->in_sg = req->src; 372 + rctx->blksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 373 + rctx->digsize = crypto_ahash_digestsize(tfm); 374 + rctx->in_sg_len = sg_nents_for_len(rctx->in_sg, rctx->total); 375 + ctx->rctx = rctx; 376 + 377 + if (starfive_hash_check_aligned(rctx->in_sg, rctx->total, rctx->blksize)) 378 + return starfive_hash_digest_fb(req); 379 + 380 + return crypto_transfer_hash_request_to_engine(cryp->engine, req); 381 + } 382 + 383 + static int starfive_hash_export(struct ahash_request *req, void *out) 384 + { 385 + struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req); 386 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 387 + struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm); 388 + 389 + ahash_request_set_tfm(&rctx->ahash_fbk_req, ctx->ahash_fbk); 390 + ahash_request_set_callback(&rctx->ahash_fbk_req, 391 + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, 392 + req->base.complete, req->base.data); 393 + 394 + return crypto_ahash_export(&rctx->ahash_fbk_req, out); 395 + } 396 + 397 + static int starfive_hash_import(struct ahash_request *req, const void *in) 398 + { 399 + struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req); 400 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 401 + struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm); 402 + 403 + ahash_request_set_tfm(&rctx->ahash_fbk_req, ctx->ahash_fbk); 404 + ahash_request_set_callback(&rctx->ahash_fbk_req, 405 + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, 406 + req->base.complete, req->base.data); 407 + 408 + return crypto_ahash_import(&rctx->ahash_fbk_req, in); 409 + } 410 + 411 + static int starfive_hash_init_tfm(struct crypto_ahash *hash, 412 + const char *alg_name, 413 + unsigned int mode) 414 + { 415 + struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash); 416 + 417 + ctx->cryp = starfive_cryp_find_dev(ctx); 418 + 419 + if (!ctx->cryp) 420 + return -ENODEV; 421 + 422 + ctx->ahash_fbk = crypto_alloc_ahash(alg_name, 0, 423 + CRYPTO_ALG_NEED_FALLBACK); 424 + 425 + if (IS_ERR(ctx->ahash_fbk)) 426 + return dev_err_probe(ctx->cryp->dev, PTR_ERR(ctx->ahash_fbk), 427 + "starfive_hash: Could not load fallback driver.\n"); 428 + 429 + crypto_ahash_set_statesize(hash, crypto_ahash_statesize(ctx->ahash_fbk)); 430 + crypto_ahash_set_reqsize(hash, sizeof(struct starfive_cryp_request_ctx) + 431 + crypto_ahash_reqsize(ctx->ahash_fbk)); 432 + 433 + ctx->keylen = 0; 434 + ctx->hash_mode = mode; 435 + 436 + ctx->enginectx.op.do_one_request = starfive_hash_one_request; 437 + ctx->enginectx.op.prepare_request = NULL; 438 + ctx->enginectx.op.unprepare_request = NULL; 439 + 440 + return 0; 441 + } 442 + 443 + static void starfive_hash_exit_tfm(struct crypto_ahash *hash) 444 + { 445 + struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash); 446 + 447 + crypto_free_ahash(ctx->ahash_fbk); 448 + 449 + ctx->ahash_fbk = NULL; 450 + ctx->enginectx.op.do_one_request = NULL; 451 + ctx->enginectx.op.prepare_request = NULL; 452 + ctx->enginectx.op.unprepare_request = NULL; 453 + } 454 + 455 + static int starfive_hash_long_setkey(struct starfive_cryp_ctx *ctx, 456 + const u8 *key, unsigned int keylen, 457 + const char *alg_name) 458 + { 459 + struct crypto_wait wait; 460 + struct ahash_request *req; 461 + struct scatterlist sg; 462 + struct crypto_ahash *ahash_tfm; 463 + u8 *buf; 464 + int ret; 465 + 466 + ahash_tfm = crypto_alloc_ahash(alg_name, 0, 0); 467 + if (IS_ERR(ahash_tfm)) 468 + return PTR_ERR(ahash_tfm); 469 + 470 + req = ahash_request_alloc(ahash_tfm, GFP_KERNEL); 471 + if (!req) { 472 + ret = -ENOMEM; 473 + goto err_free_ahash; 474 + } 475 + 476 + crypto_init_wait(&wait); 477 + ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 478 + crypto_req_done, &wait); 479 + crypto_ahash_clear_flags(ahash_tfm, ~0); 480 + 481 + buf = kzalloc(keylen + STARFIVE_HASH_BUFLEN, GFP_KERNEL); 482 + if (!buf) { 483 + ret = -ENOMEM; 484 + goto err_free_req; 485 + } 486 + 487 + memcpy(buf, key, keylen); 488 + sg_init_one(&sg, buf, keylen); 489 + ahash_request_set_crypt(req, &sg, ctx->key, keylen); 490 + 491 + ret = crypto_wait_req(crypto_ahash_digest(req), &wait); 492 + 493 + kfree(buf); 494 + err_free_req: 495 + ahash_request_free(req); 496 + err_free_ahash: 497 + crypto_free_ahash(ahash_tfm); 498 + return ret; 499 + } 500 + 501 + static int starfive_hash_setkey(struct crypto_ahash *hash, 502 + const u8 *key, unsigned int keylen) 503 + { 504 + struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash); 505 + unsigned int digestsize = crypto_ahash_digestsize(hash); 506 + unsigned int blocksize = crypto_ahash_blocksize(hash); 507 + const char *alg_name; 508 + 509 + crypto_ahash_setkey(ctx->ahash_fbk, key, keylen); 510 + 511 + if (keylen <= blocksize) { 512 + memcpy(ctx->key, key, keylen); 513 + ctx->keylen = keylen; 514 + return 0; 515 + } 516 + 517 + ctx->keylen = digestsize; 518 + 519 + switch (digestsize) { 520 + case SHA224_DIGEST_SIZE: 521 + alg_name = "sha224-starfive"; 522 + break; 523 + case SHA256_DIGEST_SIZE: 524 + if (ctx->hash_mode == STARFIVE_HASH_SM3) 525 + alg_name = "sm3-starfive"; 526 + else 527 + alg_name = "sha256-starfive"; 528 + break; 529 + case SHA384_DIGEST_SIZE: 530 + alg_name = "sha384-starfive"; 531 + break; 532 + case SHA512_DIGEST_SIZE: 533 + alg_name = "sha512-starfive"; 534 + break; 535 + default: 536 + return -EINVAL; 537 + } 538 + 539 + return starfive_hash_long_setkey(ctx, key, keylen, alg_name); 540 + } 541 + 542 + static int starfive_sha224_init_tfm(struct crypto_ahash *hash) 543 + { 544 + return starfive_hash_init_tfm(hash, "sha224-generic", 545 + STARFIVE_HASH_SHA224); 546 + } 547 + 548 + static int starfive_sha256_init_tfm(struct crypto_ahash *hash) 549 + { 550 + return starfive_hash_init_tfm(hash, "sha256-generic", 551 + STARFIVE_HASH_SHA256); 552 + } 553 + 554 + static int starfive_sha384_init_tfm(struct crypto_ahash *hash) 555 + { 556 + return starfive_hash_init_tfm(hash, "sha384-generic", 557 + STARFIVE_HASH_SHA384); 558 + } 559 + 560 + static int starfive_sha512_init_tfm(struct crypto_ahash *hash) 561 + { 562 + return starfive_hash_init_tfm(hash, "sha512-generic", 563 + STARFIVE_HASH_SHA512); 564 + } 565 + 566 + static int starfive_sm3_init_tfm(struct crypto_ahash *hash) 567 + { 568 + return starfive_hash_init_tfm(hash, "sm3-generic", 569 + STARFIVE_HASH_SM3); 570 + } 571 + 572 + static int starfive_hmac_sha224_init_tfm(struct crypto_ahash *hash) 573 + { 574 + struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash); 575 + 576 + ctx->is_hmac = true; 577 + 578 + return starfive_hash_init_tfm(hash, "hmac(sha224-generic)", 579 + STARFIVE_HASH_SHA224); 580 + } 581 + 582 + static int starfive_hmac_sha256_init_tfm(struct crypto_ahash *hash) 583 + { 584 + struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash); 585 + 586 + ctx->is_hmac = true; 587 + 588 + return starfive_hash_init_tfm(hash, "hmac(sha256-generic)", 589 + STARFIVE_HASH_SHA256); 590 + } 591 + 592 + static int starfive_hmac_sha384_init_tfm(struct crypto_ahash *hash) 593 + { 594 + struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash); 595 + 596 + ctx->is_hmac = true; 597 + 598 + return starfive_hash_init_tfm(hash, "hmac(sha384-generic)", 599 + STARFIVE_HASH_SHA384); 600 + } 601 + 602 + static int starfive_hmac_sha512_init_tfm(struct crypto_ahash *hash) 603 + { 604 + struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash); 605 + 606 + ctx->is_hmac = true; 607 + 608 + return starfive_hash_init_tfm(hash, "hmac(sha512-generic)", 609 + STARFIVE_HASH_SHA512); 610 + } 611 + 612 + static int starfive_hmac_sm3_init_tfm(struct crypto_ahash *hash) 613 + { 614 + struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash); 615 + 616 + ctx->is_hmac = true; 617 + 618 + return starfive_hash_init_tfm(hash, "hmac(sm3-generic)", 619 + STARFIVE_HASH_SM3); 620 + } 621 + 622 + static struct ahash_alg algs_sha2_sm3[] = { 623 + { 624 + .init = starfive_hash_init, 625 + .update = starfive_hash_update, 626 + .final = starfive_hash_final, 627 + .finup = starfive_hash_finup, 628 + .digest = starfive_hash_digest, 629 + .export = starfive_hash_export, 630 + .import = starfive_hash_import, 631 + .init_tfm = starfive_sha224_init_tfm, 632 + .exit_tfm = starfive_hash_exit_tfm, 633 + .halg = { 634 + .digestsize = SHA224_DIGEST_SIZE, 635 + .statesize = sizeof(struct sha256_state), 636 + .base = { 637 + .cra_name = "sha224", 638 + .cra_driver_name = "sha224-starfive", 639 + .cra_priority = 200, 640 + .cra_flags = CRYPTO_ALG_ASYNC | 641 + CRYPTO_ALG_TYPE_AHASH | 642 + CRYPTO_ALG_NEED_FALLBACK, 643 + .cra_blocksize = SHA224_BLOCK_SIZE, 644 + .cra_ctxsize = sizeof(struct starfive_cryp_ctx), 645 + .cra_alignmask = 3, 646 + .cra_module = THIS_MODULE, 647 + } 648 + } 649 + }, { 650 + .init = starfive_hash_init, 651 + .update = starfive_hash_update, 652 + .final = starfive_hash_final, 653 + .finup = starfive_hash_finup, 654 + .digest = starfive_hash_digest, 655 + .export = starfive_hash_export, 656 + .import = starfive_hash_import, 657 + .init_tfm = starfive_hmac_sha224_init_tfm, 658 + .exit_tfm = starfive_hash_exit_tfm, 659 + .setkey = starfive_hash_setkey, 660 + .halg = { 661 + .digestsize = SHA224_DIGEST_SIZE, 662 + .statesize = sizeof(struct sha256_state), 663 + .base = { 664 + .cra_name = "hmac(sha224)", 665 + .cra_driver_name = "sha224-hmac-starfive", 666 + .cra_priority = 200, 667 + .cra_flags = CRYPTO_ALG_ASYNC | 668 + CRYPTO_ALG_TYPE_AHASH | 669 + CRYPTO_ALG_NEED_FALLBACK, 670 + .cra_blocksize = SHA224_BLOCK_SIZE, 671 + .cra_ctxsize = sizeof(struct starfive_cryp_ctx), 672 + .cra_alignmask = 3, 673 + .cra_module = THIS_MODULE, 674 + } 675 + } 676 + }, { 677 + .init = starfive_hash_init, 678 + .update = starfive_hash_update, 679 + .final = starfive_hash_final, 680 + .finup = starfive_hash_finup, 681 + .digest = starfive_hash_digest, 682 + .export = starfive_hash_export, 683 + .import = starfive_hash_import, 684 + .init_tfm = starfive_sha256_init_tfm, 685 + .exit_tfm = starfive_hash_exit_tfm, 686 + .halg = { 687 + .digestsize = SHA256_DIGEST_SIZE, 688 + .statesize = sizeof(struct sha256_state), 689 + .base = { 690 + .cra_name = "sha256", 691 + .cra_driver_name = "sha256-starfive", 692 + .cra_priority = 200, 693 + .cra_flags = CRYPTO_ALG_ASYNC | 694 + CRYPTO_ALG_TYPE_AHASH | 695 + CRYPTO_ALG_NEED_FALLBACK, 696 + .cra_blocksize = SHA256_BLOCK_SIZE, 697 + .cra_ctxsize = sizeof(struct starfive_cryp_ctx), 698 + .cra_alignmask = 3, 699 + .cra_module = THIS_MODULE, 700 + } 701 + } 702 + }, { 703 + .init = starfive_hash_init, 704 + .update = starfive_hash_update, 705 + .final = starfive_hash_final, 706 + .finup = starfive_hash_finup, 707 + .digest = starfive_hash_digest, 708 + .export = starfive_hash_export, 709 + .import = starfive_hash_import, 710 + .init_tfm = starfive_hmac_sha256_init_tfm, 711 + .exit_tfm = starfive_hash_exit_tfm, 712 + .setkey = starfive_hash_setkey, 713 + .halg = { 714 + .digestsize = SHA256_DIGEST_SIZE, 715 + .statesize = sizeof(struct sha256_state), 716 + .base = { 717 + .cra_name = "hmac(sha256)", 718 + .cra_driver_name = "sha256-hmac-starfive", 719 + .cra_priority = 200, 720 + .cra_flags = CRYPTO_ALG_ASYNC | 721 + CRYPTO_ALG_TYPE_AHASH | 722 + CRYPTO_ALG_NEED_FALLBACK, 723 + .cra_blocksize = SHA256_BLOCK_SIZE, 724 + .cra_ctxsize = sizeof(struct starfive_cryp_ctx), 725 + .cra_alignmask = 3, 726 + .cra_module = THIS_MODULE, 727 + } 728 + } 729 + }, { 730 + .init = starfive_hash_init, 731 + .update = starfive_hash_update, 732 + .final = starfive_hash_final, 733 + .finup = starfive_hash_finup, 734 + .digest = starfive_hash_digest, 735 + .export = starfive_hash_export, 736 + .import = starfive_hash_import, 737 + .init_tfm = starfive_sha384_init_tfm, 738 + .exit_tfm = starfive_hash_exit_tfm, 739 + .halg = { 740 + .digestsize = SHA384_DIGEST_SIZE, 741 + .statesize = sizeof(struct sha512_state), 742 + .base = { 743 + .cra_name = "sha384", 744 + .cra_driver_name = "sha384-starfive", 745 + .cra_priority = 200, 746 + .cra_flags = CRYPTO_ALG_ASYNC | 747 + CRYPTO_ALG_TYPE_AHASH | 748 + CRYPTO_ALG_NEED_FALLBACK, 749 + .cra_blocksize = SHA384_BLOCK_SIZE, 750 + .cra_ctxsize = sizeof(struct starfive_cryp_ctx), 751 + .cra_alignmask = 3, 752 + .cra_module = THIS_MODULE, 753 + } 754 + } 755 + }, { 756 + .init = starfive_hash_init, 757 + .update = starfive_hash_update, 758 + .final = starfive_hash_final, 759 + .finup = starfive_hash_finup, 760 + .digest = starfive_hash_digest, 761 + .export = starfive_hash_export, 762 + .import = starfive_hash_import, 763 + .init_tfm = starfive_hmac_sha384_init_tfm, 764 + .exit_tfm = starfive_hash_exit_tfm, 765 + .setkey = starfive_hash_setkey, 766 + .halg = { 767 + .digestsize = SHA384_DIGEST_SIZE, 768 + .statesize = sizeof(struct sha512_state), 769 + .base = { 770 + .cra_name = "hmac(sha384)", 771 + .cra_driver_name = "sha384-hmac-starfive", 772 + .cra_priority = 200, 773 + .cra_flags = CRYPTO_ALG_ASYNC | 774 + CRYPTO_ALG_TYPE_AHASH | 775 + CRYPTO_ALG_NEED_FALLBACK, 776 + .cra_blocksize = SHA384_BLOCK_SIZE, 777 + .cra_ctxsize = sizeof(struct starfive_cryp_ctx), 778 + .cra_alignmask = 3, 779 + .cra_module = THIS_MODULE, 780 + } 781 + } 782 + }, { 783 + .init = starfive_hash_init, 784 + .update = starfive_hash_update, 785 + .final = starfive_hash_final, 786 + .finup = starfive_hash_finup, 787 + .digest = starfive_hash_digest, 788 + .export = starfive_hash_export, 789 + .import = starfive_hash_import, 790 + .init_tfm = starfive_sha512_init_tfm, 791 + .exit_tfm = starfive_hash_exit_tfm, 792 + .halg = { 793 + .digestsize = SHA512_DIGEST_SIZE, 794 + .statesize = sizeof(struct sha512_state), 795 + .base = { 796 + .cra_name = "sha512", 797 + .cra_driver_name = "sha512-starfive", 798 + .cra_priority = 200, 799 + .cra_flags = CRYPTO_ALG_ASYNC | 800 + CRYPTO_ALG_TYPE_AHASH | 801 + CRYPTO_ALG_NEED_FALLBACK, 802 + .cra_blocksize = SHA512_BLOCK_SIZE, 803 + .cra_ctxsize = sizeof(struct starfive_cryp_ctx), 804 + .cra_alignmask = 3, 805 + .cra_module = THIS_MODULE, 806 + } 807 + } 808 + }, { 809 + .init = starfive_hash_init, 810 + .update = starfive_hash_update, 811 + .final = starfive_hash_final, 812 + .finup = starfive_hash_finup, 813 + .digest = starfive_hash_digest, 814 + .export = starfive_hash_export, 815 + .import = starfive_hash_import, 816 + .init_tfm = starfive_hmac_sha512_init_tfm, 817 + .exit_tfm = starfive_hash_exit_tfm, 818 + .setkey = starfive_hash_setkey, 819 + .halg = { 820 + .digestsize = SHA512_DIGEST_SIZE, 821 + .statesize = sizeof(struct sha512_state), 822 + .base = { 823 + .cra_name = "hmac(sha512)", 824 + .cra_driver_name = "sha512-hmac-starfive", 825 + .cra_priority = 200, 826 + .cra_flags = CRYPTO_ALG_ASYNC | 827 + CRYPTO_ALG_TYPE_AHASH | 828 + CRYPTO_ALG_NEED_FALLBACK, 829 + .cra_blocksize = SHA512_BLOCK_SIZE, 830 + .cra_ctxsize = sizeof(struct starfive_cryp_ctx), 831 + .cra_alignmask = 3, 832 + .cra_module = THIS_MODULE, 833 + } 834 + } 835 + }, { 836 + .init = starfive_hash_init, 837 + .update = starfive_hash_update, 838 + .final = starfive_hash_final, 839 + .finup = starfive_hash_finup, 840 + .digest = starfive_hash_digest, 841 + .export = starfive_hash_export, 842 + .import = starfive_hash_import, 843 + .init_tfm = starfive_sm3_init_tfm, 844 + .exit_tfm = starfive_hash_exit_tfm, 845 + .halg = { 846 + .digestsize = SM3_DIGEST_SIZE, 847 + .statesize = sizeof(struct sm3_state), 848 + .base = { 849 + .cra_name = "sm3", 850 + .cra_driver_name = "sm3-starfive", 851 + .cra_priority = 200, 852 + .cra_flags = CRYPTO_ALG_ASYNC | 853 + CRYPTO_ALG_TYPE_AHASH | 854 + CRYPTO_ALG_NEED_FALLBACK, 855 + .cra_blocksize = SM3_BLOCK_SIZE, 856 + .cra_ctxsize = sizeof(struct starfive_cryp_ctx), 857 + .cra_alignmask = 3, 858 + .cra_module = THIS_MODULE, 859 + } 860 + } 861 + }, { 862 + .init = starfive_hash_init, 863 + .update = starfive_hash_update, 864 + .final = starfive_hash_final, 865 + .finup = starfive_hash_finup, 866 + .digest = starfive_hash_digest, 867 + .export = starfive_hash_export, 868 + .import = starfive_hash_import, 869 + .init_tfm = starfive_hmac_sm3_init_tfm, 870 + .exit_tfm = starfive_hash_exit_tfm, 871 + .setkey = starfive_hash_setkey, 872 + .halg = { 873 + .digestsize = SM3_DIGEST_SIZE, 874 + .statesize = sizeof(struct sm3_state), 875 + .base = { 876 + .cra_name = "hmac(sm3)", 877 + .cra_driver_name = "sm3-hmac-starfive", 878 + .cra_priority = 200, 879 + .cra_flags = CRYPTO_ALG_ASYNC | 880 + CRYPTO_ALG_TYPE_AHASH | 881 + CRYPTO_ALG_NEED_FALLBACK, 882 + .cra_blocksize = SM3_BLOCK_SIZE, 883 + .cra_ctxsize = sizeof(struct starfive_cryp_ctx), 884 + .cra_alignmask = 3, 885 + .cra_module = THIS_MODULE, 886 + } 887 + } 888 + }, 889 + }; 890 + 891 + int starfive_hash_register_algs(void) 892 + { 893 + return crypto_register_ahashes(algs_sha2_sm3, ARRAY_SIZE(algs_sha2_sm3)); 894 + } 895 + 896 + void starfive_hash_unregister_algs(void) 897 + { 898 + crypto_unregister_ahashes(algs_sha2_sm3, ARRAY_SIZE(algs_sha2_sm3)); 899 + }
+617
drivers/crypto/starfive/jh7110-rsa.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * StarFive Public Key Algo acceleration driver 4 + * 5 + * Copyright (c) 2022 StarFive Technology 6 + */ 7 + 8 + #include <linux/crypto.h> 9 + #include <linux/delay.h> 10 + #include <linux/device.h> 11 + #include <linux/dma-direct.h> 12 + #include <linux/interrupt.h> 13 + #include <linux/iopoll.h> 14 + #include <linux/io.h> 15 + #include <linux/mod_devicetable.h> 16 + #include <crypto/akcipher.h> 17 + #include <crypto/algapi.h> 18 + #include <crypto/internal/akcipher.h> 19 + #include <crypto/internal/rsa.h> 20 + #include <crypto/scatterwalk.h> 21 + 22 + #include "jh7110-cryp.h" 23 + 24 + #define STARFIVE_PKA_REGS_OFFSET 0x400 25 + #define STARFIVE_PKA_CACR_OFFSET (STARFIVE_PKA_REGS_OFFSET + 0x0) 26 + #define STARFIVE_PKA_CASR_OFFSET (STARFIVE_PKA_REGS_OFFSET + 0x4) 27 + #define STARFIVE_PKA_CAAR_OFFSET (STARFIVE_PKA_REGS_OFFSET + 0x8) 28 + #define STARFIVE_PKA_CAER_OFFSET (STARFIVE_PKA_REGS_OFFSET + 0x108) 29 + #define STARFIVE_PKA_CANR_OFFSET (STARFIVE_PKA_REGS_OFFSET + 0x208) 30 + 31 + // R^2 mod N and N0' 32 + #define CRYPTO_CMD_PRE 0x0 33 + // A * R mod N ==> A 34 + #define CRYPTO_CMD_ARN 0x5 35 + // A * E * R mod N ==> A 36 + #define CRYPTO_CMD_AERN 0x6 37 + // A * A * R mod N ==> A 38 + #define CRYPTO_CMD_AARN 0x7 39 + 40 + #define STARFIVE_RSA_MAX_KEYSZ 256 41 + #define STARFIVE_RSA_RESET 0x2 42 + 43 + static inline int starfive_pka_wait_done(struct starfive_cryp_ctx *ctx) 44 + { 45 + struct starfive_cryp_dev *cryp = ctx->cryp; 46 + 47 + return wait_for_completion_timeout(&cryp->pka_done, 48 + usecs_to_jiffies(100000)); 49 + } 50 + 51 + static inline void starfive_pka_irq_mask_clear(struct starfive_cryp_ctx *ctx) 52 + { 53 + struct starfive_cryp_dev *cryp = ctx->cryp; 54 + u32 stat; 55 + 56 + stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET); 57 + stat &= ~STARFIVE_IE_MASK_PKA_DONE; 58 + writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET); 59 + 60 + reinit_completion(&cryp->pka_done); 61 + } 62 + 63 + static void starfive_rsa_free_key(struct starfive_rsa_key *key) 64 + { 65 + if (key->d) 66 + kfree_sensitive(key->d); 67 + if (key->e) 68 + kfree_sensitive(key->e); 69 + if (key->n) 70 + kfree_sensitive(key->n); 71 + memset(key, 0, sizeof(*key)); 72 + } 73 + 74 + static unsigned int starfive_rsa_get_nbit(u8 *pa, u32 snum, int key_sz) 75 + { 76 + u32 i; 77 + u8 value; 78 + 79 + i = snum >> 3; 80 + 81 + value = pa[key_sz - i - 1]; 82 + value >>= snum & 0x7; 83 + value &= 0x1; 84 + 85 + return value; 86 + } 87 + 88 + static int starfive_rsa_montgomery_form(struct starfive_cryp_ctx *ctx, 89 + u32 *out, u32 *in, u8 mont, 90 + u32 *mod, int bit_len) 91 + { 92 + struct starfive_cryp_dev *cryp = ctx->cryp; 93 + struct starfive_cryp_request_ctx *rctx = ctx->rctx; 94 + int count = rctx->total / sizeof(u32) - 1; 95 + int loop; 96 + u32 temp; 97 + u8 opsize; 98 + 99 + opsize = (bit_len - 1) >> 5; 100 + rctx->csr.pka.v = 0; 101 + 102 + writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET); 103 + 104 + for (loop = 0; loop <= opsize; loop++) 105 + writel(mod[opsize - loop], cryp->base + STARFIVE_PKA_CANR_OFFSET + loop * 4); 106 + 107 + if (mont) { 108 + rctx->csr.pka.v = 0; 109 + rctx->csr.pka.cln_done = 1; 110 + rctx->csr.pka.opsize = opsize; 111 + rctx->csr.pka.exposize = opsize; 112 + rctx->csr.pka.cmd = CRYPTO_CMD_PRE; 113 + rctx->csr.pka.start = 1; 114 + rctx->csr.pka.not_r2 = 1; 115 + rctx->csr.pka.ie = 1; 116 + 117 + starfive_pka_irq_mask_clear(ctx); 118 + writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET); 119 + 120 + if (!starfive_pka_wait_done(ctx)) 121 + return -ETIMEDOUT; 122 + 123 + for (loop = 0; loop <= opsize; loop++) 124 + writel(in[opsize - loop], cryp->base + STARFIVE_PKA_CAAR_OFFSET + loop * 4); 125 + 126 + writel(0x1000000, cryp->base + STARFIVE_PKA_CAER_OFFSET); 127 + 128 + for (loop = 1; loop <= opsize; loop++) 129 + writel(0, cryp->base + STARFIVE_PKA_CAER_OFFSET + loop * 4); 130 + 131 + rctx->csr.pka.v = 0; 132 + rctx->csr.pka.cln_done = 1; 133 + rctx->csr.pka.opsize = opsize; 134 + rctx->csr.pka.exposize = opsize; 135 + rctx->csr.pka.cmd = CRYPTO_CMD_AERN; 136 + rctx->csr.pka.start = 1; 137 + rctx->csr.pka.ie = 1; 138 + 139 + starfive_pka_irq_mask_clear(ctx); 140 + writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET); 141 + 142 + if (!starfive_pka_wait_done(ctx)) 143 + return -ETIMEDOUT; 144 + } else { 145 + rctx->csr.pka.v = 0; 146 + rctx->csr.pka.cln_done = 1; 147 + rctx->csr.pka.opsize = opsize; 148 + rctx->csr.pka.exposize = opsize; 149 + rctx->csr.pka.cmd = CRYPTO_CMD_PRE; 150 + rctx->csr.pka.start = 1; 151 + rctx->csr.pka.pre_expf = 1; 152 + rctx->csr.pka.ie = 1; 153 + 154 + starfive_pka_irq_mask_clear(ctx); 155 + writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET); 156 + 157 + if (!starfive_pka_wait_done(ctx)) 158 + return -ETIMEDOUT; 159 + 160 + for (loop = 0; loop <= count; loop++) 161 + writel(in[count - loop], cryp->base + STARFIVE_PKA_CAER_OFFSET + loop * 4); 162 + 163 + /*pad with 0 up to opsize*/ 164 + for (loop = count + 1; loop <= opsize; loop++) 165 + writel(0, cryp->base + STARFIVE_PKA_CAER_OFFSET + loop * 4); 166 + 167 + rctx->csr.pka.v = 0; 168 + rctx->csr.pka.cln_done = 1; 169 + rctx->csr.pka.opsize = opsize; 170 + rctx->csr.pka.exposize = opsize; 171 + rctx->csr.pka.cmd = CRYPTO_CMD_ARN; 172 + rctx->csr.pka.start = 1; 173 + rctx->csr.pka.ie = 1; 174 + 175 + starfive_pka_irq_mask_clear(ctx); 176 + writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET); 177 + 178 + if (!starfive_pka_wait_done(ctx)) 179 + return -ETIMEDOUT; 180 + } 181 + 182 + for (loop = 0; loop <= opsize; loop++) { 183 + temp = readl(cryp->base + STARFIVE_PKA_CAAR_OFFSET + 0x4 * loop); 184 + out[opsize - loop] = temp; 185 + } 186 + 187 + return 0; 188 + } 189 + 190 + static int starfive_rsa_cpu_start(struct starfive_cryp_ctx *ctx, u32 *result, 191 + u8 *de, u32 *n, int key_sz) 192 + { 193 + struct starfive_cryp_dev *cryp = ctx->cryp; 194 + struct starfive_cryp_request_ctx *rctx = ctx->rctx; 195 + struct starfive_rsa_key *key = &ctx->rsa_key; 196 + u32 temp; 197 + int ret = 0; 198 + int opsize, mlen, loop; 199 + unsigned int *mta; 200 + 201 + opsize = (key_sz - 1) >> 2; 202 + 203 + mta = kmalloc(key_sz, GFP_KERNEL); 204 + if (!mta) 205 + return -ENOMEM; 206 + 207 + ret = starfive_rsa_montgomery_form(ctx, mta, (u32 *)rctx->rsa_data, 208 + 0, n, key_sz << 3); 209 + if (ret) { 210 + dev_err_probe(cryp->dev, ret, "Conversion to Montgomery failed"); 211 + goto rsa_err; 212 + } 213 + 214 + for (loop = 0; loop <= opsize; loop++) 215 + writel(mta[opsize - loop], 216 + cryp->base + STARFIVE_PKA_CAER_OFFSET + loop * 4); 217 + 218 + for (loop = key->bitlen - 1; loop > 0; loop--) { 219 + mlen = starfive_rsa_get_nbit(de, loop - 1, key_sz); 220 + 221 + rctx->csr.pka.v = 0; 222 + rctx->csr.pka.cln_done = 1; 223 + rctx->csr.pka.opsize = opsize; 224 + rctx->csr.pka.exposize = opsize; 225 + rctx->csr.pka.cmd = CRYPTO_CMD_AARN; 226 + rctx->csr.pka.start = 1; 227 + rctx->csr.pka.ie = 1; 228 + 229 + starfive_pka_irq_mask_clear(ctx); 230 + writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET); 231 + 232 + ret = -ETIMEDOUT; 233 + if (!starfive_pka_wait_done(ctx)) 234 + goto rsa_err; 235 + 236 + if (mlen) { 237 + rctx->csr.pka.v = 0; 238 + rctx->csr.pka.cln_done = 1; 239 + rctx->csr.pka.opsize = opsize; 240 + rctx->csr.pka.exposize = opsize; 241 + rctx->csr.pka.cmd = CRYPTO_CMD_AERN; 242 + rctx->csr.pka.start = 1; 243 + rctx->csr.pka.ie = 1; 244 + 245 + starfive_pka_irq_mask_clear(ctx); 246 + writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET); 247 + 248 + if (!starfive_pka_wait_done(ctx)) 249 + goto rsa_err; 250 + } 251 + } 252 + 253 + for (loop = 0; loop <= opsize; loop++) { 254 + temp = readl(cryp->base + STARFIVE_PKA_CAAR_OFFSET + 0x4 * loop); 255 + result[opsize - loop] = temp; 256 + } 257 + 258 + ret = starfive_rsa_montgomery_form(ctx, result, result, 1, n, key_sz << 3); 259 + if (ret) 260 + dev_err_probe(cryp->dev, ret, "Conversion from Montgomery failed"); 261 + rsa_err: 262 + kfree(mta); 263 + return ret; 264 + } 265 + 266 + static int starfive_rsa_start(struct starfive_cryp_ctx *ctx, u8 *result, 267 + u8 *de, u8 *n, int key_sz) 268 + { 269 + return starfive_rsa_cpu_start(ctx, (u32 *)result, de, (u32 *)n, key_sz); 270 + } 271 + 272 + static int starfive_rsa_enc_core(struct starfive_cryp_ctx *ctx, int enc) 273 + { 274 + struct starfive_cryp_dev *cryp = ctx->cryp; 275 + struct starfive_cryp_request_ctx *rctx = ctx->rctx; 276 + struct starfive_rsa_key *key = &ctx->rsa_key; 277 + int ret = 0; 278 + 279 + writel(STARFIVE_RSA_RESET, cryp->base + STARFIVE_PKA_CACR_OFFSET); 280 + 281 + rctx->total = sg_copy_to_buffer(rctx->in_sg, rctx->nents, 282 + rctx->rsa_data, rctx->total); 283 + 284 + if (enc) { 285 + key->bitlen = key->e_bitlen; 286 + ret = starfive_rsa_start(ctx, rctx->rsa_data, key->e, 287 + key->n, key->key_sz); 288 + } else { 289 + key->bitlen = key->d_bitlen; 290 + ret = starfive_rsa_start(ctx, rctx->rsa_data, key->d, 291 + key->n, key->key_sz); 292 + } 293 + 294 + if (ret) 295 + goto err_rsa_crypt; 296 + 297 + sg_copy_buffer(rctx->out_sg, sg_nents(rctx->out_sg), 298 + rctx->rsa_data, key->key_sz, 0, 0); 299 + 300 + err_rsa_crypt: 301 + writel(STARFIVE_RSA_RESET, cryp->base + STARFIVE_PKA_CACR_OFFSET); 302 + kfree(rctx->rsa_data); 303 + return ret; 304 + } 305 + 306 + static int starfive_rsa_enc(struct akcipher_request *req) 307 + { 308 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 309 + struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm); 310 + struct starfive_cryp_dev *cryp = ctx->cryp; 311 + struct starfive_rsa_key *key = &ctx->rsa_key; 312 + struct starfive_cryp_request_ctx *rctx = akcipher_request_ctx(req); 313 + int ret; 314 + 315 + if (!key->key_sz) { 316 + akcipher_request_set_tfm(req, ctx->akcipher_fbk); 317 + ret = crypto_akcipher_encrypt(req); 318 + akcipher_request_set_tfm(req, tfm); 319 + return ret; 320 + } 321 + 322 + if (unlikely(!key->n || !key->e)) 323 + return -EINVAL; 324 + 325 + if (req->dst_len < key->key_sz) 326 + return dev_err_probe(cryp->dev, -EOVERFLOW, 327 + "Output buffer length less than parameter n\n"); 328 + 329 + rctx->in_sg = req->src; 330 + rctx->out_sg = req->dst; 331 + rctx->total = req->src_len; 332 + rctx->nents = sg_nents(rctx->in_sg); 333 + ctx->rctx = rctx; 334 + 335 + return starfive_rsa_enc_core(ctx, 1); 336 + } 337 + 338 + static int starfive_rsa_dec(struct akcipher_request *req) 339 + { 340 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 341 + struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm); 342 + struct starfive_cryp_dev *cryp = ctx->cryp; 343 + struct starfive_rsa_key *key = &ctx->rsa_key; 344 + struct starfive_cryp_request_ctx *rctx = akcipher_request_ctx(req); 345 + int ret; 346 + 347 + if (!key->key_sz) { 348 + akcipher_request_set_tfm(req, ctx->akcipher_fbk); 349 + ret = crypto_akcipher_decrypt(req); 350 + akcipher_request_set_tfm(req, tfm); 351 + return ret; 352 + } 353 + 354 + if (unlikely(!key->n || !key->d)) 355 + return -EINVAL; 356 + 357 + if (req->dst_len < key->key_sz) 358 + return dev_err_probe(cryp->dev, -EOVERFLOW, 359 + "Output buffer length less than parameter n\n"); 360 + 361 + rctx->in_sg = req->src; 362 + rctx->out_sg = req->dst; 363 + ctx->rctx = rctx; 364 + rctx->total = req->src_len; 365 + 366 + return starfive_rsa_enc_core(ctx, 0); 367 + } 368 + 369 + static int starfive_rsa_set_n(struct starfive_rsa_key *rsa_key, 370 + const char *value, size_t vlen) 371 + { 372 + const char *ptr = value; 373 + unsigned int bitslen; 374 + int ret; 375 + 376 + while (!*ptr && vlen) { 377 + ptr++; 378 + vlen--; 379 + } 380 + rsa_key->key_sz = vlen; 381 + bitslen = rsa_key->key_sz << 3; 382 + 383 + /* check valid key size */ 384 + if (bitslen & 0x1f) 385 + return -EINVAL; 386 + 387 + ret = -ENOMEM; 388 + rsa_key->n = kmemdup(ptr, rsa_key->key_sz, GFP_KERNEL); 389 + if (!rsa_key->n) 390 + goto err; 391 + 392 + return 0; 393 + err: 394 + rsa_key->key_sz = 0; 395 + rsa_key->n = NULL; 396 + starfive_rsa_free_key(rsa_key); 397 + return ret; 398 + } 399 + 400 + static int starfive_rsa_set_e(struct starfive_rsa_key *rsa_key, 401 + const char *value, size_t vlen) 402 + { 403 + const char *ptr = value; 404 + unsigned char pt; 405 + int loop; 406 + 407 + while (!*ptr && vlen) { 408 + ptr++; 409 + vlen--; 410 + } 411 + pt = *ptr; 412 + 413 + if (!rsa_key->key_sz || !vlen || vlen > rsa_key->key_sz) { 414 + rsa_key->e = NULL; 415 + return -EINVAL; 416 + } 417 + 418 + rsa_key->e = kzalloc(rsa_key->key_sz, GFP_KERNEL); 419 + if (!rsa_key->e) 420 + return -ENOMEM; 421 + 422 + for (loop = 8; loop > 0; loop--) { 423 + if (pt >> (loop - 1)) 424 + break; 425 + } 426 + 427 + rsa_key->e_bitlen = (vlen - 1) * 8 + loop; 428 + 429 + memcpy(rsa_key->e + (rsa_key->key_sz - vlen), ptr, vlen); 430 + 431 + return 0; 432 + } 433 + 434 + static int starfive_rsa_set_d(struct starfive_rsa_key *rsa_key, 435 + const char *value, size_t vlen) 436 + { 437 + const char *ptr = value; 438 + unsigned char pt; 439 + int loop; 440 + int ret; 441 + 442 + while (!*ptr && vlen) { 443 + ptr++; 444 + vlen--; 445 + } 446 + pt = *ptr; 447 + 448 + ret = -EINVAL; 449 + if (!rsa_key->key_sz || !vlen || vlen > rsa_key->key_sz) 450 + goto err; 451 + 452 + ret = -ENOMEM; 453 + rsa_key->d = kzalloc(rsa_key->key_sz, GFP_KERNEL); 454 + if (!rsa_key->d) 455 + goto err; 456 + 457 + for (loop = 8; loop > 0; loop--) { 458 + if (pt >> (loop - 1)) 459 + break; 460 + } 461 + 462 + rsa_key->d_bitlen = (vlen - 1) * 8 + loop; 463 + 464 + memcpy(rsa_key->d + (rsa_key->key_sz - vlen), ptr, vlen); 465 + 466 + return 0; 467 + err: 468 + rsa_key->d = NULL; 469 + return ret; 470 + } 471 + 472 + static int starfive_rsa_setkey(struct crypto_akcipher *tfm, const void *key, 473 + unsigned int keylen, bool private) 474 + { 475 + struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm); 476 + struct rsa_key raw_key = {NULL}; 477 + struct starfive_rsa_key *rsa_key = &ctx->rsa_key; 478 + int ret; 479 + 480 + if (private) 481 + ret = rsa_parse_priv_key(&raw_key, key, keylen); 482 + else 483 + ret = rsa_parse_pub_key(&raw_key, key, keylen); 484 + if (ret < 0) 485 + goto err; 486 + 487 + starfive_rsa_free_key(rsa_key); 488 + 489 + /* Use fallback for mod > 256 + 1 byte prefix */ 490 + if (raw_key.n_sz > STARFIVE_RSA_MAX_KEYSZ + 1) 491 + return 0; 492 + 493 + ret = starfive_rsa_set_n(rsa_key, raw_key.n, raw_key.n_sz); 494 + if (ret) 495 + return ret; 496 + 497 + ret = starfive_rsa_set_e(rsa_key, raw_key.e, raw_key.e_sz); 498 + if (ret) 499 + goto err; 500 + 501 + if (private) { 502 + ret = starfive_rsa_set_d(rsa_key, raw_key.d, raw_key.d_sz); 503 + if (ret) 504 + goto err; 505 + } 506 + 507 + if (!rsa_key->n || !rsa_key->e) { 508 + ret = -EINVAL; 509 + goto err; 510 + } 511 + 512 + if (private && !rsa_key->d) { 513 + ret = -EINVAL; 514 + goto err; 515 + } 516 + 517 + return 0; 518 + err: 519 + starfive_rsa_free_key(rsa_key); 520 + return ret; 521 + } 522 + 523 + static int starfive_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, 524 + unsigned int keylen) 525 + { 526 + struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm); 527 + int ret; 528 + 529 + ret = crypto_akcipher_set_pub_key(ctx->akcipher_fbk, key, keylen); 530 + if (ret) 531 + return ret; 532 + 533 + return starfive_rsa_setkey(tfm, key, keylen, false); 534 + } 535 + 536 + static int starfive_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, 537 + unsigned int keylen) 538 + { 539 + struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm); 540 + int ret; 541 + 542 + ret = crypto_akcipher_set_priv_key(ctx->akcipher_fbk, key, keylen); 543 + if (ret) 544 + return ret; 545 + 546 + return starfive_rsa_setkey(tfm, key, keylen, true); 547 + } 548 + 549 + static unsigned int starfive_rsa_max_size(struct crypto_akcipher *tfm) 550 + { 551 + struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm); 552 + 553 + if (ctx->rsa_key.key_sz) 554 + return ctx->rsa_key.key_sz; 555 + 556 + return crypto_akcipher_maxsize(ctx->akcipher_fbk); 557 + } 558 + 559 + static int starfive_rsa_init_tfm(struct crypto_akcipher *tfm) 560 + { 561 + struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm); 562 + 563 + ctx->akcipher_fbk = crypto_alloc_akcipher("rsa-generic", 0, 0); 564 + if (IS_ERR(ctx->akcipher_fbk)) 565 + return PTR_ERR(ctx->akcipher_fbk); 566 + 567 + ctx->cryp = starfive_cryp_find_dev(ctx); 568 + if (!ctx->cryp) { 569 + crypto_free_akcipher(ctx->akcipher_fbk); 570 + return -ENODEV; 571 + } 572 + 573 + akcipher_set_reqsize(tfm, sizeof(struct starfive_cryp_request_ctx) + 574 + sizeof(struct crypto_akcipher) + 32); 575 + 576 + return 0; 577 + } 578 + 579 + static void starfive_rsa_exit_tfm(struct crypto_akcipher *tfm) 580 + { 581 + struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm); 582 + struct starfive_rsa_key *key = (struct starfive_rsa_key *)&ctx->rsa_key; 583 + 584 + crypto_free_akcipher(ctx->akcipher_fbk); 585 + starfive_rsa_free_key(key); 586 + } 587 + 588 + static struct akcipher_alg starfive_rsa = { 589 + .encrypt = starfive_rsa_enc, 590 + .decrypt = starfive_rsa_dec, 591 + .sign = starfive_rsa_dec, 592 + .verify = starfive_rsa_enc, 593 + .set_pub_key = starfive_rsa_set_pub_key, 594 + .set_priv_key = starfive_rsa_set_priv_key, 595 + .max_size = starfive_rsa_max_size, 596 + .init = starfive_rsa_init_tfm, 597 + .exit = starfive_rsa_exit_tfm, 598 + .base = { 599 + .cra_name = "rsa", 600 + .cra_driver_name = "starfive-rsa", 601 + .cra_flags = CRYPTO_ALG_TYPE_AKCIPHER | 602 + CRYPTO_ALG_NEED_FALLBACK, 603 + .cra_priority = 3000, 604 + .cra_module = THIS_MODULE, 605 + .cra_ctxsize = sizeof(struct starfive_cryp_ctx), 606 + }, 607 + }; 608 + 609 + int starfive_rsa_register_algs(void) 610 + { 611 + return crypto_register_akcipher(&starfive_rsa); 612 + } 613 + 614 + void starfive_rsa_unregister_algs(void) 615 + { 616 + crypto_unregister_akcipher(&starfive_rsa); 617 + }
+11 -4
drivers/md/dm-crypt.c
··· 31 31 #include <asm/unaligned.h> 32 32 #include <crypto/hash.h> 33 33 #include <crypto/md5.h> 34 - #include <crypto/algapi.h> 35 34 #include <crypto/skcipher.h> 36 35 #include <crypto/aead.h> 37 36 #include <crypto/authenc.h> 37 + #include <crypto/utils.h> 38 38 #include <linux/rtnetlink.h> /* for struct rtattr and RTA macros only */ 39 39 #include <linux/key-type.h> 40 40 #include <keys/user-type.h> ··· 745 745 static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv, 746 746 struct dm_crypt_request *dmreq) 747 747 { 748 - u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64)); 748 + struct crypto_skcipher *tfm = any_tfm(cc); 749 749 struct skcipher_request *req; 750 750 struct scatterlist src, dst; 751 751 DECLARE_CRYPTO_WAIT(wait); 752 + unsigned int reqsize; 752 753 int err; 754 + u8 *buf; 753 755 754 - req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO); 756 + reqsize = ALIGN(crypto_skcipher_reqsize(tfm), __alignof__(__le64)); 757 + 758 + req = kmalloc(reqsize + cc->iv_size, GFP_NOIO); 755 759 if (!req) 756 760 return -ENOMEM; 757 761 762 + skcipher_request_set_tfm(req, tfm); 763 + 764 + buf = (u8 *)req + reqsize; 758 765 memset(buf, 0, cc->iv_size); 759 766 *(__le64 *)buf = cpu_to_le64(dmreq->iv_sector * cc->sector_size); 760 767 ··· 770 763 skcipher_request_set_crypt(req, &src, &dst, cc->iv_size, buf); 771 764 skcipher_request_set_callback(req, 0, crypto_req_done, &wait); 772 765 err = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); 773 - skcipher_request_free(req); 766 + kfree_sensitive(req); 774 767 775 768 return err; 776 769 }
+36
include/crypto/akcipher.h
··· 374 374 } 375 375 376 376 /** 377 + * crypto_akcipher_sync_encrypt() - Invoke public key encrypt operation 378 + * 379 + * Function invokes the specific public key encrypt operation for a given 380 + * public key algorithm 381 + * 382 + * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher() 383 + * @src: source buffer 384 + * @slen: source length 385 + * @dst: destinatino obuffer 386 + * @dlen: destination length 387 + * 388 + * Return: zero on success; error code in case of error 389 + */ 390 + int crypto_akcipher_sync_encrypt(struct crypto_akcipher *tfm, 391 + const void *src, unsigned int slen, 392 + void *dst, unsigned int dlen); 393 + 394 + /** 395 + * crypto_akcipher_sync_decrypt() - Invoke public key decrypt operation 396 + * 397 + * Function invokes the specific public key decrypt operation for a given 398 + * public key algorithm 399 + * 400 + * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher() 401 + * @src: source buffer 402 + * @slen: source length 403 + * @dst: destinatino obuffer 404 + * @dlen: destination length 405 + * 406 + * Return: Output length on success; error code in case of error 407 + */ 408 + int crypto_akcipher_sync_decrypt(struct crypto_akcipher *tfm, 409 + const void *src, unsigned int slen, 410 + void *dst, unsigned int dlen); 411 + 412 + /** 377 413 * crypto_akcipher_sign() - Invoke public key sign operation 378 414 * 379 415 * Function invokes the specific public key sign operation for a given
-1
include/crypto/algapi.h
··· 56 56 struct crypto_type { 57 57 unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask); 58 58 unsigned int (*extsize)(struct crypto_alg *alg); 59 - int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask); 60 59 int (*init_tfm)(struct crypto_tfm *tfm); 61 60 void (*show)(struct seq_file *m, struct crypto_alg *alg); 62 61 int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
+1 -1
include/crypto/engine.h
··· 78 78 79 79 /* 80 80 * struct crypto_engine_op - crypto hardware engine operations 81 - * @prepare__request: do some prepare if need before handle the current request 81 + * @prepare_request: do some preparation if needed before handling the current request 82 82 * @unprepare_request: undo any work done by prepare_request() 83 83 * @do_one_request: do encryption for current request 84 84 */
+2 -1
include/crypto/hash.h
··· 260 260 int (*setkey)(struct crypto_ahash *tfm, const u8 *key, 261 261 unsigned int keylen); 262 262 263 + unsigned int statesize; 263 264 unsigned int reqsize; 264 265 struct crypto_tfm base; 265 266 }; ··· 401 400 */ 402 401 static inline unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm) 403 402 { 404 - return crypto_hash_alg_common(tfm)->statesize; 403 + return tfm->statesize; 405 404 } 406 405 407 406 static inline u32 crypto_ahash_get_flags(struct crypto_ahash *tfm)
+2
include/crypto/internal/cipher.h
··· 176 176 void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, 177 177 u8 *dst, const u8 *src); 178 178 179 + struct crypto_cipher *crypto_clone_cipher(struct crypto_cipher *cipher); 180 + 179 181 struct crypto_cipher_spawn { 180 182 struct crypto_spawn base; 181 183 };
+12
include/crypto/internal/hash.h
··· 149 149 halg); 150 150 } 151 151 152 + static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash) 153 + { 154 + return container_of(crypto_hash_alg_common(hash), struct ahash_alg, 155 + halg); 156 + } 157 + 158 + static inline void crypto_ahash_set_statesize(struct crypto_ahash *tfm, 159 + unsigned int size) 160 + { 161 + tfm->statesize = size; 162 + } 163 + 152 164 static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm, 153 165 unsigned int reqsize) 154 166 {
+17
include/crypto/internal/sig.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 + /* 3 + * Public Key Signature Algorithm 4 + * 5 + * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au> 6 + */ 7 + #ifndef _CRYPTO_INTERNAL_SIG_H 8 + #define _CRYPTO_INTERNAL_SIG_H 9 + 10 + #include <crypto/algapi.h> 11 + #include <crypto/sig.h> 12 + 13 + static inline void *crypto_sig_ctx(struct crypto_sig *tfm) 14 + { 15 + return crypto_tfm_ctx(&tfm->base); 16 + } 17 + #endif
-2
include/crypto/public_key.h
··· 48 48 const char *pkey_algo; 49 49 const char *hash_algo; 50 50 const char *encoding; 51 - const void *data; 52 - unsigned int data_size; 53 51 }; 54 52 55 53 extern void public_key_signature_free(struct public_key_signature *sig);
+1 -1
include/crypto/sha2.h
··· 128 128 sctx->state[7] = SHA224_H7; 129 129 sctx->count = 0; 130 130 } 131 - void sha224_update(struct sha256_state *sctx, const u8 *data, unsigned int len); 131 + /* Simply use sha256_update as it is equivalent to sha224_update. */ 132 132 void sha224_final(struct sha256_state *sctx, u8 *out); 133 133 134 134 #endif /* _CRYPTO_SHA2_H */
+36 -14
include/crypto/sha256_base.h
··· 8 8 #ifndef _CRYPTO_SHA256_BASE_H 9 9 #define _CRYPTO_SHA256_BASE_H 10 10 11 + #include <asm/byteorder.h> 12 + #include <asm/unaligned.h> 11 13 #include <crypto/internal/hash.h> 12 14 #include <crypto/sha2.h> 13 - #include <linux/crypto.h> 14 - #include <linux/module.h> 15 15 #include <linux/string.h> 16 - 17 - #include <asm/unaligned.h> 16 + #include <linux/types.h> 18 17 19 18 typedef void (sha256_block_fn)(struct sha256_state *sst, u8 const *src, 20 19 int blocks); ··· 34 35 return 0; 35 36 } 36 37 37 - static inline int sha256_base_do_update(struct shash_desc *desc, 38 - const u8 *data, 39 - unsigned int len, 40 - sha256_block_fn *block_fn) 38 + static inline int lib_sha256_base_do_update(struct sha256_state *sctx, 39 + const u8 *data, 40 + unsigned int len, 41 + sha256_block_fn *block_fn) 41 42 { 42 - struct sha256_state *sctx = shash_desc_ctx(desc); 43 43 unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; 44 44 45 45 sctx->count += len; ··· 71 73 return 0; 72 74 } 73 75 74 - static inline int sha256_base_do_finalize(struct shash_desc *desc, 75 - sha256_block_fn *block_fn) 76 + static inline int sha256_base_do_update(struct shash_desc *desc, 77 + const u8 *data, 78 + unsigned int len, 79 + sha256_block_fn *block_fn) 80 + { 81 + struct sha256_state *sctx = shash_desc_ctx(desc); 82 + 83 + return lib_sha256_base_do_update(sctx, data, len, block_fn); 84 + } 85 + 86 + static inline int lib_sha256_base_do_finalize(struct sha256_state *sctx, 87 + sha256_block_fn *block_fn) 76 88 { 77 89 const int bit_offset = SHA256_BLOCK_SIZE - sizeof(__be64); 78 - struct sha256_state *sctx = shash_desc_ctx(desc); 79 90 __be64 *bits = (__be64 *)(sctx->buf + bit_offset); 80 91 unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; 81 92 ··· 103 96 return 0; 104 97 } 105 98 106 - static inline int sha256_base_finish(struct shash_desc *desc, u8 *out) 99 + static inline int sha256_base_do_finalize(struct shash_desc *desc, 100 + sha256_block_fn *block_fn) 107 101 { 108 - unsigned int digest_size = crypto_shash_digestsize(desc->tfm); 109 102 struct sha256_state *sctx = shash_desc_ctx(desc); 103 + 104 + return lib_sha256_base_do_finalize(sctx, block_fn); 105 + } 106 + 107 + static inline int lib_sha256_base_finish(struct sha256_state *sctx, u8 *out, 108 + unsigned int digest_size) 109 + { 110 110 __be32 *digest = (__be32 *)out; 111 111 int i; 112 112 ··· 122 108 123 109 memzero_explicit(sctx, sizeof(*sctx)); 124 110 return 0; 111 + } 112 + 113 + static inline int sha256_base_finish(struct shash_desc *desc, u8 *out) 114 + { 115 + unsigned int digest_size = crypto_shash_digestsize(desc->tfm); 116 + struct sha256_state *sctx = shash_desc_ctx(desc); 117 + 118 + return lib_sha256_base_finish(sctx, out, digest_size); 125 119 } 126 120 127 121 #endif /* _CRYPTO_SHA256_BASE_H */
+140
include/crypto/sig.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 + /* 3 + * Public Key Signature Algorithm 4 + * 5 + * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au> 6 + */ 7 + #ifndef _CRYPTO_SIG_H 8 + #define _CRYPTO_SIG_H 9 + 10 + #include <linux/crypto.h> 11 + 12 + /** 13 + * struct crypto_sig - user-instantiated objects which encapsulate 14 + * algorithms and core processing logic 15 + * 16 + * @base: Common crypto API algorithm data structure 17 + */ 18 + struct crypto_sig { 19 + struct crypto_tfm base; 20 + }; 21 + 22 + /** 23 + * DOC: Generic Public Key Signature API 24 + * 25 + * The Public Key Signature API is used with the algorithms of type 26 + * CRYPTO_ALG_TYPE_SIG (listed as type "sig" in /proc/crypto) 27 + */ 28 + 29 + /** 30 + * crypto_alloc_sig() - allocate signature tfm handle 31 + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 32 + * signing algorithm e.g. "ecdsa" 33 + * @type: specifies the type of the algorithm 34 + * @mask: specifies the mask for the algorithm 35 + * 36 + * Allocate a handle for public key signature algorithm. The returned struct 37 + * crypto_sig is the handle that is required for any subsequent 38 + * API invocation for signature operations. 39 + * 40 + * Return: allocated handle in case of success; IS_ERR() is true in case 41 + * of an error, PTR_ERR() returns the error code. 42 + */ 43 + struct crypto_sig *crypto_alloc_sig(const char *alg_name, u32 type, u32 mask); 44 + 45 + static inline struct crypto_tfm *crypto_sig_tfm(struct crypto_sig *tfm) 46 + { 47 + return &tfm->base; 48 + } 49 + 50 + /** 51 + * crypto_free_sig() - free signature tfm handle 52 + * 53 + * @tfm: signature tfm handle allocated with crypto_alloc_sig() 54 + * 55 + * If @tfm is a NULL or error pointer, this function does nothing. 56 + */ 57 + static inline void crypto_free_sig(struct crypto_sig *tfm) 58 + { 59 + crypto_destroy_tfm(tfm, crypto_sig_tfm(tfm)); 60 + } 61 + 62 + /** 63 + * crypto_sig_maxsize() - Get len for output buffer 64 + * 65 + * Function returns the dest buffer size required for a given key. 66 + * Function assumes that the key is already set in the transformation. If this 67 + * function is called without a setkey or with a failed setkey, you will end up 68 + * in a NULL dereference. 69 + * 70 + * @tfm: signature tfm handle allocated with crypto_alloc_sig() 71 + */ 72 + int crypto_sig_maxsize(struct crypto_sig *tfm); 73 + 74 + /** 75 + * crypto_sig_sign() - Invoke signing operation 76 + * 77 + * Function invokes the specific signing operation for a given algorithm 78 + * 79 + * @tfm: signature tfm handle allocated with crypto_alloc_sig() 80 + * @src: source buffer 81 + * @slen: source length 82 + * @dst: destinatino obuffer 83 + * @dlen: destination length 84 + * 85 + * Return: zero on success; error code in case of error 86 + */ 87 + int crypto_sig_sign(struct crypto_sig *tfm, 88 + const void *src, unsigned int slen, 89 + void *dst, unsigned int dlen); 90 + 91 + /** 92 + * crypto_sig_verify() - Invoke signature verification 93 + * 94 + * Function invokes the specific signature verification operation 95 + * for a given algorithm. 96 + * 97 + * @tfm: signature tfm handle allocated with crypto_alloc_sig() 98 + * @src: source buffer 99 + * @slen: source length 100 + * @digest: digest 101 + * @dlen: digest length 102 + * 103 + * Return: zero on verification success; error code in case of error. 104 + */ 105 + int crypto_sig_verify(struct crypto_sig *tfm, 106 + const void *src, unsigned int slen, 107 + const void *digest, unsigned int dlen); 108 + 109 + /** 110 + * crypto_sig_set_pubkey() - Invoke set public key operation 111 + * 112 + * Function invokes the algorithm specific set key function, which knows 113 + * how to decode and interpret the encoded key and parameters 114 + * 115 + * @tfm: tfm handle 116 + * @key: BER encoded public key, algo OID, paramlen, BER encoded 117 + * parameters 118 + * @keylen: length of the key (not including other data) 119 + * 120 + * Return: zero on success; error code in case of error 121 + */ 122 + int crypto_sig_set_pubkey(struct crypto_sig *tfm, 123 + const void *key, unsigned int keylen); 124 + 125 + /** 126 + * crypto_sig_set_privkey() - Invoke set private key operation 127 + * 128 + * Function invokes the algorithm specific set key function, which knows 129 + * how to decode and interpret the encoded key and parameters 130 + * 131 + * @tfm: tfm handle 132 + * @key: BER encoded private key, algo OID, paramlen, BER encoded 133 + * parameters 134 + * @keylen: length of the key (not including other data) 135 + * 136 + * Return: zero on success; error code in case of error 137 + */ 138 + int crypto_sig_set_privkey(struct crypto_sig *tfm, 139 + const void *key, unsigned int keylen); 140 + #endif
+12 -9
include/crypto/sm2.h
··· 11 11 #ifndef _CRYPTO_SM2_H 12 12 #define _CRYPTO_SM2_H 13 13 14 - #include <crypto/sm3.h> 15 - #include <crypto/akcipher.h> 14 + struct shash_desc; 16 15 17 - /* The default user id as specified in GM/T 0009-2012 */ 18 - #define SM2_DEFAULT_USERID "1234567812345678" 19 - #define SM2_DEFAULT_USERID_LEN 16 20 - 21 - extern int sm2_compute_z_digest(struct crypto_akcipher *tfm, 22 - const unsigned char *id, size_t id_len, 23 - unsigned char dgst[SM3_DIGEST_SIZE]); 16 + #if IS_REACHABLE(CONFIG_CRYPTO_SM2) 17 + int sm2_compute_z_digest(struct shash_desc *desc, 18 + const void *key, unsigned int keylen, void *dgst); 19 + #else 20 + static inline int sm2_compute_z_digest(struct shash_desc *desc, 21 + const void *key, unsigned int keylen, 22 + void *dgst) 23 + { 24 + return -ENOTSUPP; 25 + } 26 + #endif 24 27 25 28 #endif /* _CRYPTO_SM2_H */
+2
include/keys/asymmetric-parser.h
··· 10 10 #ifndef _KEYS_ASYMMETRIC_PARSER_H 11 11 #define _KEYS_ASYMMETRIC_PARSER_H 12 12 13 + struct key_preparsed_payload; 14 + 13 15 /* 14 16 * Key data parser. Called during key instantiation. 15 17 */
+2 -1
include/linux/crypto.h
··· 25 25 #define CRYPTO_ALG_TYPE_COMPRESS 0x00000002 26 26 #define CRYPTO_ALG_TYPE_AEAD 0x00000003 27 27 #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 28 + #define CRYPTO_ALG_TYPE_AKCIPHER 0x00000006 29 + #define CRYPTO_ALG_TYPE_SIG 0x00000007 28 30 #define CRYPTO_ALG_TYPE_KPP 0x00000008 29 31 #define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a 30 32 #define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b 31 33 #define CRYPTO_ALG_TYPE_RNG 0x0000000c 32 - #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d 33 34 #define CRYPTO_ALG_TYPE_HASH 0x0000000e 34 35 #define CRYPTO_ALG_TYPE_SHASH 0x0000000e 35 36 #define CRYPTO_ALG_TYPE_AHASH 0x0000000f
+19 -60
lib/crypto/sha256.c
··· 11 11 * Copyright (c) 2014 Red Hat Inc. 12 12 */ 13 13 14 - #include <linux/bitops.h> 15 - #include <linux/export.h> 14 + #include <asm/unaligned.h> 15 + #include <crypto/sha256_base.h> 16 + #include <linux/kernel.h> 16 17 #include <linux/module.h> 17 18 #include <linux/string.h> 18 - #include <crypto/sha2.h> 19 - #include <asm/unaligned.h> 20 19 21 20 static const u32 SHA256_K[] = { 22 21 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, ··· 118 119 state[4] += e; state[5] += f; state[6] += g; state[7] += h; 119 120 } 120 121 121 - void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len) 122 + static void sha256_transform_blocks(struct sha256_state *sctx, 123 + const u8 *input, int blocks) 122 124 { 123 - unsigned int partial, done; 124 - const u8 *src; 125 125 u32 W[64]; 126 126 127 - partial = sctx->count & 0x3f; 128 - sctx->count += len; 129 - done = 0; 130 - src = data; 127 + do { 128 + sha256_transform(sctx->state, input, W); 129 + input += SHA256_BLOCK_SIZE; 130 + } while (--blocks); 131 131 132 - if ((partial + len) > 63) { 133 - if (partial) { 134 - done = -partial; 135 - memcpy(sctx->buf + partial, data, done + 64); 136 - src = sctx->buf; 137 - } 132 + memzero_explicit(W, sizeof(W)); 133 + } 138 134 139 - do { 140 - sha256_transform(sctx->state, src, W); 141 - done += 64; 142 - src = data + done; 143 - } while (done + 63 < len); 144 - 145 - memzero_explicit(W, sizeof(W)); 146 - 147 - partial = 0; 148 - } 149 - memcpy(sctx->buf + partial, src, len - done); 135 + void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len) 136 + { 137 + lib_sha256_base_do_update(sctx, data, len, sha256_transform_blocks); 150 138 } 151 139 EXPORT_SYMBOL(sha256_update); 152 140 153 - void sha224_update(struct sha256_state *sctx, const u8 *data, unsigned int len) 141 + static void __sha256_final(struct sha256_state *sctx, u8 *out, int digest_size) 154 142 { 155 - sha256_update(sctx, data, len); 156 - } 157 - EXPORT_SYMBOL(sha224_update); 158 - 159 - static void __sha256_final(struct sha256_state *sctx, u8 *out, int digest_words) 160 - { 161 - __be32 *dst = (__be32 *)out; 162 - __be64 bits; 163 - unsigned int index, pad_len; 164 - int i; 165 - static const u8 padding[64] = { 0x80, }; 166 - 167 - /* Save number of bits */ 168 - bits = cpu_to_be64(sctx->count << 3); 169 - 170 - /* Pad out to 56 mod 64. */ 171 - index = sctx->count & 0x3f; 172 - pad_len = (index < 56) ? (56 - index) : ((64+56) - index); 173 - sha256_update(sctx, padding, pad_len); 174 - 175 - /* Append length (before padding) */ 176 - sha256_update(sctx, (const u8 *)&bits, sizeof(bits)); 177 - 178 - /* Store state in digest */ 179 - for (i = 0; i < digest_words; i++) 180 - put_unaligned_be32(sctx->state[i], &dst[i]); 181 - 182 - /* Zeroize sensitive information. */ 183 - memzero_explicit(sctx, sizeof(*sctx)); 143 + lib_sha256_base_do_finalize(sctx, sha256_transform_blocks); 144 + lib_sha256_base_finish(sctx, out, digest_size); 184 145 } 185 146 186 147 void sha256_final(struct sha256_state *sctx, u8 *out) 187 148 { 188 - __sha256_final(sctx, out, 8); 149 + __sha256_final(sctx, out, 32); 189 150 } 190 151 EXPORT_SYMBOL(sha256_final); 191 152 192 153 void sha224_final(struct sha256_state *sctx, u8 *out) 193 154 { 194 - __sha256_final(sctx, out, 7); 155 + __sha256_final(sctx, out, 28); 195 156 } 196 157 EXPORT_SYMBOL(sha224_final); 197 158