Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto updates from Herbert Xu:
"Here is the crypto update for 5.3:

API:
- Test shash interface directly in testmgr
- cra_driver_name is now mandatory

Algorithms:
- Replace arc4 crypto_cipher with library helper
- Implement 5 way interleave for ECB, CBC and CTR on arm64
- Add xxhash
- Add continuous self-test on noise source to drbg
- Update jitter RNG

Drivers:
- Add support for SHA204A random number generator
- Add support for 7211 in iproc-rng200
- Fix fuzz test failures in inside-secure
- Fix fuzz test failures in talitos
- Fix fuzz test failures in qat"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (143 commits)
crypto: stm32/hash - remove interruptible condition for dma
crypto: stm32/hash - Fix hmac issue more than 256 bytes
crypto: stm32/crc32 - rename driver file
crypto: amcc - remove memset after dma_alloc_coherent
crypto: ccp - Switch to SPDX license identifiers
crypto: ccp - Validate the the error value used to index error messages
crypto: doc - Fix formatting of new crypto engine content
crypto: doc - Add parameter documentation
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
crypto: arm64/aes-ce - add 5 way interleave routines
crypto: talitos - drop icv_ool
crypto: talitos - fix hash on SEC1.
crypto: talitos - move struct talitos_edesc into talitos.h
lib/scatterlist: Fix mapping iterator when sg->offset is greater than PAGE_SIZE
crypto/NX: Set receive window credits to max number of CRBs in RxFIFO
crypto: asymmetric_keys - select CRYPTO_HASH where needed
crypto: serpent - mark __serpent_setkey_sbox noinline
crypto: testmgr - dynamically allocate crypto_shash
crypto: testmgr - dynamically allocate testvec_config
crypto: talitos - eliminate unneeded 'done' functions at build time
...

+4513 -3784
+69 -91
Documentation/crypto/api-samples.rst
··· 4 4 Code Example For Symmetric Key Cipher Operation 5 5 ----------------------------------------------- 6 6 7 + This code encrypts some data with AES-256-XTS. For sake of example, 8 + all inputs are random bytes, the encryption is done in-place, and it's 9 + assumed the code is running in a context where it can sleep. 10 + 7 11 :: 8 12 9 - 10 - /* tie all data structures together */ 11 - struct skcipher_def { 12 - struct scatterlist sg; 13 - struct crypto_skcipher *tfm; 14 - struct skcipher_request *req; 15 - struct crypto_wait wait; 16 - }; 17 - 18 - /* Perform cipher operation */ 19 - static unsigned int test_skcipher_encdec(struct skcipher_def *sk, 20 - int enc) 21 - { 22 - int rc; 23 - 24 - if (enc) 25 - rc = crypto_wait_req(crypto_skcipher_encrypt(sk->req), &sk->wait); 26 - else 27 - rc = crypto_wait_req(crypto_skcipher_decrypt(sk->req), &sk->wait); 28 - 29 - if (rc) 30 - pr_info("skcipher encrypt returned with result %d\n", rc); 31 - 32 - return rc; 33 - } 34 - 35 - /* Initialize and trigger cipher operation */ 36 13 static int test_skcipher(void) 37 14 { 38 - struct skcipher_def sk; 39 - struct crypto_skcipher *skcipher = NULL; 40 - struct skcipher_request *req = NULL; 41 - char *scratchpad = NULL; 42 - char *ivdata = NULL; 43 - unsigned char key[32]; 44 - int ret = -EFAULT; 15 + struct crypto_skcipher *tfm = NULL; 16 + struct skcipher_request *req = NULL; 17 + u8 *data = NULL; 18 + const size_t datasize = 512; /* data size in bytes */ 19 + struct scatterlist sg; 20 + DECLARE_CRYPTO_WAIT(wait); 21 + u8 iv[16]; /* AES-256-XTS takes a 16-byte IV */ 22 + u8 key[64]; /* AES-256-XTS takes a 64-byte key */ 23 + int err; 45 24 46 - skcipher = crypto_alloc_skcipher("cbc-aes-aesni", 0, 0); 47 - if (IS_ERR(skcipher)) { 48 - pr_info("could not allocate skcipher handle\n"); 49 - return PTR_ERR(skcipher); 50 - } 25 + /* 26 + * Allocate a tfm (a transformation object) and set the key. 27 + * 28 + * In real-world use, a tfm and key are typically used for many 29 + * encryption/decryption operations. But in this example, we'll just do a 30 + * single encryption operation with it (which is not very efficient). 31 + */ 51 32 52 - req = skcipher_request_alloc(skcipher, GFP_KERNEL); 53 - if (!req) { 54 - pr_info("could not allocate skcipher request\n"); 55 - ret = -ENOMEM; 56 - goto out; 57 - } 33 + tfm = crypto_alloc_skcipher("xts(aes)", 0, 0); 34 + if (IS_ERR(tfm)) { 35 + pr_err("Error allocating xts(aes) handle: %ld\n", PTR_ERR(tfm)); 36 + return PTR_ERR(tfm); 37 + } 58 38 59 - skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 60 - crypto_req_done, 61 - &sk.wait); 39 + get_random_bytes(key, sizeof(key)); 40 + err = crypto_skcipher_setkey(tfm, key, sizeof(key)); 41 + if (err) { 42 + pr_err("Error setting key: %d\n", err); 43 + goto out; 44 + } 62 45 63 - /* AES 256 with random key */ 64 - get_random_bytes(&key, 32); 65 - if (crypto_skcipher_setkey(skcipher, key, 32)) { 66 - pr_info("key could not be set\n"); 67 - ret = -EAGAIN; 68 - goto out; 69 - } 46 + /* Allocate a request object */ 47 + req = skcipher_request_alloc(tfm, GFP_KERNEL); 48 + if (!req) { 49 + err = -ENOMEM; 50 + goto out; 51 + } 70 52 71 - /* IV will be random */ 72 - ivdata = kmalloc(16, GFP_KERNEL); 73 - if (!ivdata) { 74 - pr_info("could not allocate ivdata\n"); 75 - goto out; 76 - } 77 - get_random_bytes(ivdata, 16); 53 + /* Prepare the input data */ 54 + data = kmalloc(datasize, GFP_KERNEL); 55 + if (!data) { 56 + err = -ENOMEM; 57 + goto out; 58 + } 59 + get_random_bytes(data, datasize); 78 60 79 - /* Input data will be random */ 80 - scratchpad = kmalloc(16, GFP_KERNEL); 81 - if (!scratchpad) { 82 - pr_info("could not allocate scratchpad\n"); 83 - goto out; 84 - } 85 - get_random_bytes(scratchpad, 16); 61 + /* Initialize the IV */ 62 + get_random_bytes(iv, sizeof(iv)); 86 63 87 - sk.tfm = skcipher; 88 - sk.req = req; 64 + /* 65 + * Encrypt the data in-place. 66 + * 67 + * For simplicity, in this example we wait for the request to complete 68 + * before proceeding, even if the underlying implementation is asynchronous. 69 + * 70 + * To decrypt instead of encrypt, just change crypto_skcipher_encrypt() to 71 + * crypto_skcipher_decrypt(). 72 + */ 73 + sg_init_one(&sg, data, datasize); 74 + skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | 75 + CRYPTO_TFM_REQ_MAY_SLEEP, 76 + crypto_req_done, &wait); 77 + skcipher_request_set_crypt(req, &sg, &sg, datasize, iv); 78 + err = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); 79 + if (err) { 80 + pr_err("Error encrypting data: %d\n", err); 81 + goto out; 82 + } 89 83 90 - /* We encrypt one block */ 91 - sg_init_one(&sk.sg, scratchpad, 16); 92 - skcipher_request_set_crypt(req, &sk.sg, &sk.sg, 16, ivdata); 93 - crypto_init_wait(&sk.wait); 94 - 95 - /* encrypt data */ 96 - ret = test_skcipher_encdec(&sk, 1); 97 - if (ret) 98 - goto out; 99 - 100 - pr_info("Encryption triggered successfully\n"); 101 - 84 + pr_debug("Encryption was successful\n"); 102 85 out: 103 - if (skcipher) 104 - crypto_free_skcipher(skcipher); 105 - if (req) 86 + crypto_free_skcipher(tfm); 106 87 skcipher_request_free(req); 107 - if (ivdata) 108 - kfree(ivdata); 109 - if (scratchpad) 110 - kfree(scratchpad); 111 - return ret; 88 + kfree(data); 89 + return err; 112 90 } 113 91 114 92
+1 -1
Documentation/crypto/api-skcipher.rst
··· 5 5 :doc: Block Cipher Algorithm Definitions 6 6 7 7 .. kernel-doc:: include/linux/crypto.h 8 - :functions: crypto_alg ablkcipher_alg blkcipher_alg cipher_alg 8 + :functions: crypto_alg ablkcipher_alg blkcipher_alg cipher_alg compress_alg 9 9 10 10 Symmetric Key Cipher API 11 11 ------------------------
+1 -3
Documentation/crypto/architecture.rst
··· 208 208 - CRYPTO_ALG_TYPE_KPP Key-agreement Protocol Primitive (KPP) such as 209 209 an ECDH or DH implementation 210 210 211 - - CRYPTO_ALG_TYPE_DIGEST Raw message digest 212 - 213 - - CRYPTO_ALG_TYPE_HASH Alias for CRYPTO_ALG_TYPE_DIGEST 211 + - CRYPTO_ALG_TYPE_HASH Raw message digest 214 212 215 213 - CRYPTO_ALG_TYPE_SHASH Synchronous multi-block hash 216 214
+69 -34
Documentation/crypto/crypto_engine.rst
··· 1 - ============= 2 - CRYPTO ENGINE 1 + .. SPDX-License-Identifier: GPL-2.0 2 + Crypto Engine 3 3 ============= 4 4 5 5 Overview 6 6 -------- 7 - The crypto engine API (CE), is a crypto queue manager. 7 + The crypto engine (CE) API is a crypto queue manager. 8 8 9 9 Requirement 10 10 ----------- 11 - You have to put at start of your tfm_ctx the struct crypto_engine_ctx:: 11 + You must put, at the start of your transform context your_tfm_ctx, the structure 12 + crypto_engine: 12 13 13 - struct your_tfm_ctx { 14 - struct crypto_engine_ctx enginectx; 15 - ... 16 - }; 14 + :: 17 15 18 - Why: Since CE manage only crypto_async_request, it cannot know the underlying 19 - request_type and so have access only on the TFM. 20 - So using container_of for accessing __ctx is impossible. 21 - Furthermore, the crypto engine cannot know the "struct your_tfm_ctx", 22 - so it must assume that crypto_engine_ctx is at start of it. 16 + struct your_tfm_ctx { 17 + struct crypto_engine engine; 18 + ... 19 + }; 20 + 21 + The crypto engine only manages asynchronous requests in the form of 22 + crypto_async_request. It cannot know the underlying request type and thus only 23 + has access to the transform structure. It is not possible to access the context 24 + using container_of. In addition, the engine knows nothing about your 25 + structure "``struct your_tfm_ctx``". The engine assumes (requires) the placement 26 + of the known member ``struct crypto_engine`` at the beginning. 23 27 24 28 Order of operations 25 29 ------------------- 26 - You have to obtain a struct crypto_engine via crypto_engine_alloc_init(). 27 - And start it via crypto_engine_start(). 30 + You are required to obtain a struct crypto_engine via ``crypto_engine_alloc_init()``. 31 + Start it via ``crypto_engine_start()``. When finished with your work, shut down the 32 + engine using ``crypto_engine_stop()`` and destroy the engine with 33 + ``crypto_engine_exit()``. 28 34 29 - Before transferring any request, you have to fill the enginectx. 30 - - prepare_request: (taking a function pointer) If you need to do some processing before doing the request 31 - - unprepare_request: (taking a function pointer) Undoing what's done in prepare_request 32 - - do_one_request: (taking a function pointer) Do encryption for current request 35 + Before transferring any request, you have to fill the context enginectx by 36 + providing functions for the following: 33 37 34 - Note: that those three functions get the crypto_async_request associated with the received request. 35 - So your need to get the original request via container_of(areq, struct yourrequesttype_request, base); 38 + * ``prepare_crypt_hardware``: Called once before any prepare functions are 39 + called. 36 40 37 - When your driver receive a crypto_request, you have to transfer it to 38 - the cryptoengine via one of: 39 - - crypto_transfer_ablkcipher_request_to_engine() 40 - - crypto_transfer_aead_request_to_engine() 41 - - crypto_transfer_akcipher_request_to_engine() 42 - - crypto_transfer_hash_request_to_engine() 43 - - crypto_transfer_skcipher_request_to_engine() 41 + * ``unprepare_crypt_hardware``: Called once after all unprepare functions have 42 + been called. 44 43 45 - At the end of the request process, a call to one of the following function is needed: 46 - - crypto_finalize_ablkcipher_request 47 - - crypto_finalize_aead_request 48 - - crypto_finalize_akcipher_request 49 - - crypto_finalize_hash_request 50 - - crypto_finalize_skcipher_request 44 + * ``prepare_cipher_request``/``prepare_hash_request``: Called before each 45 + corresponding request is performed. If some processing or other preparatory 46 + work is required, do it here. 47 + 48 + * ``unprepare_cipher_request``/``unprepare_hash_request``: Called after each 49 + request is handled. Clean up / undo what was done in the prepare function. 50 + 51 + * ``cipher_one_request``/``hash_one_request``: Handle the current request by 52 + performing the operation. 53 + 54 + Note that these functions access the crypto_async_request structure 55 + associated with the received request. You are able to retrieve the original 56 + request by using: 57 + 58 + :: 59 + 60 + container_of(areq, struct yourrequesttype_request, base); 61 + 62 + When your driver receives a crypto_request, you must to transfer it to 63 + the crypto engine via one of: 64 + 65 + * crypto_transfer_ablkcipher_request_to_engine() 66 + 67 + * crypto_transfer_aead_request_to_engine() 68 + 69 + * crypto_transfer_akcipher_request_to_engine() 70 + 71 + * crypto_transfer_hash_request_to_engine() 72 + 73 + * crypto_transfer_skcipher_request_to_engine() 74 + 75 + At the end of the request process, a call to one of the following functions is needed: 76 + 77 + * crypto_finalize_ablkcipher_request() 78 + 79 + * crypto_finalize_aead_request() 80 + 81 + * crypto_finalize_akcipher_request() 82 + 83 + * crypto_finalize_hash_request() 84 + 85 + * crypto_finalize_skcipher_request()
-13
Documentation/devicetree/bindings/crypto/atmel-crypto.txt
··· 66 66 dmas = <&dma1 2 17>; 67 67 dma-names = "tx"; 68 68 }; 69 - 70 - * Eliptic Curve Cryptography (I2C) 71 - 72 - Required properties: 73 - - compatible : must be "atmel,atecc508a". 74 - - reg: I2C bus address of the device. 75 - - clock-frequency: must be present in the i2c controller node. 76 - 77 - Example: 78 - atecc508a@c0 { 79 - compatible = "atmel,atecc508a"; 80 - reg = <0xC0>; 81 - };
+1
Documentation/devicetree/bindings/rng/brcm,iproc-rng200.txt
··· 2 2 3 3 Required properties: 4 4 - compatible : Must be one of: 5 + "brcm,bcm7211-rng200" 5 6 "brcm,bcm7278-rng200" 6 7 "brcm,iproc-rng200" 7 8 - reg : base address and size of control register block
+4
Documentation/devicetree/bindings/trivial-devices.yaml
··· 52 52 - at,24c08 53 53 # i2c trusted platform module (TPM) 54 54 - atmel,at97sc3204t 55 + # i2c h/w symmetric crypto module 56 + - atmel,atsha204a 57 + # i2c h/w elliptic curve crypto module 58 + - atmel,atecc508a 55 59 # CM32181: Ambient Light Sensor 56 60 - capella,cm32181 57 61 # CM3232: Ambient Light Sensor
+1
MAINTAINERS
··· 4257 4257 F: drivers/crypto/ 4258 4258 F: include/crypto/ 4259 4259 F: include/linux/crypto* 4260 + F: lib/crypto/ 4260 4261 4261 4262 CRYPTOGRAPHIC RANDOM NUMBER GENERATOR 4262 4263 M: Neil Horman <nhorman@tuxdriver.com>
+23
arch/arm/boot/dts/imx7ulp.dtsi
··· 100 100 reg = <0x40000000 0x800000>; 101 101 ranges; 102 102 103 + crypto: crypto@40240000 { 104 + compatible = "fsl,sec-v4.0"; 105 + #address-cells = <1>; 106 + #size-cells = <1>; 107 + reg = <0x40240000 0x10000>; 108 + ranges = <0 0x40240000 0x10000>; 109 + clocks = <&pcc2 IMX7ULP_CLK_CAAM>, 110 + <&scg1 IMX7ULP_CLK_NIC1_BUS_DIV>; 111 + clock-names = "aclk", "ipg"; 112 + 113 + sec_jr0: jr0@1000 { 114 + compatible = "fsl,sec-v4.0-job-ring"; 115 + reg = <0x1000 0x1000>; 116 + interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>; 117 + }; 118 + 119 + sec_jr1: jr1@2000 { 120 + compatible = "fsl,sec-v4.0-job-ring"; 121 + reg = <0x2000 0x1000>; 122 + interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>; 123 + }; 124 + }; 125 + 103 126 lpuart4: serial@402d0000 { 104 127 compatible = "fsl,imx7ulp-lpuart"; 105 128 reg = <0x402d0000 0x1000>;
+1 -1
arch/arm/crypto/chacha-neon-glue.c
··· 63 63 } 64 64 65 65 static int chacha_neon_stream_xor(struct skcipher_request *req, 66 - struct chacha_ctx *ctx, u8 *iv) 66 + const struct chacha_ctx *ctx, const u8 *iv) 67 67 { 68 68 struct skcipher_walk walk; 69 69 u32 state[16];
+1 -1
arch/arm/crypto/sha512-glue.c
··· 34 34 (sha512_block_fn *)sha512_block_data_order); 35 35 } 36 36 37 - int sha512_arm_final(struct shash_desc *desc, u8 *out) 37 + static int sha512_arm_final(struct shash_desc *desc, u8 *out) 38 38 { 39 39 sha512_base_do_finalize(desc, 40 40 (sha512_block_fn *)sha512_block_data_order);
+37 -23
arch/arm64/crypto/aes-ce.S
··· 15 15 .arch armv8-a+crypto 16 16 17 17 xtsmask .req v16 18 + cbciv .req v16 19 + vctr .req v16 18 20 19 21 .macro xts_reload_mask, tmp 20 22 .endm ··· 51 49 load_round_keys \rounds, \temp 52 50 .endm 53 51 54 - .macro do_enc_Nx, de, mc, k, i0, i1, i2, i3 52 + .macro do_enc_Nx, de, mc, k, i0, i1, i2, i3, i4 55 53 aes\de \i0\().16b, \k\().16b 56 54 aes\mc \i0\().16b, \i0\().16b 57 55 .ifnb \i1 ··· 62 60 aes\mc \i2\().16b, \i2\().16b 63 61 aes\de \i3\().16b, \k\().16b 64 62 aes\mc \i3\().16b, \i3\().16b 63 + .ifnb \i4 64 + aes\de \i4\().16b, \k\().16b 65 + aes\mc \i4\().16b, \i4\().16b 66 + .endif 65 67 .endif 66 68 .endif 67 69 .endm 68 70 69 - /* up to 4 interleaved encryption rounds with the same round key */ 70 - .macro round_Nx, enc, k, i0, i1, i2, i3 71 + /* up to 5 interleaved encryption rounds with the same round key */ 72 + .macro round_Nx, enc, k, i0, i1, i2, i3, i4 71 73 .ifc \enc, e 72 - do_enc_Nx e, mc, \k, \i0, \i1, \i2, \i3 74 + do_enc_Nx e, mc, \k, \i0, \i1, \i2, \i3, \i4 73 75 .else 74 - do_enc_Nx d, imc, \k, \i0, \i1, \i2, \i3 76 + do_enc_Nx d, imc, \k, \i0, \i1, \i2, \i3, \i4 75 77 .endif 76 78 .endm 77 79 78 - /* up to 4 interleaved final rounds */ 79 - .macro fin_round_Nx, de, k, k2, i0, i1, i2, i3 80 + /* up to 5 interleaved final rounds */ 81 + .macro fin_round_Nx, de, k, k2, i0, i1, i2, i3, i4 80 82 aes\de \i0\().16b, \k\().16b 81 83 .ifnb \i1 82 84 aes\de \i1\().16b, \k\().16b 83 85 .ifnb \i3 84 86 aes\de \i2\().16b, \k\().16b 85 87 aes\de \i3\().16b, \k\().16b 88 + .ifnb \i4 89 + aes\de \i4\().16b, \k\().16b 90 + .endif 86 91 .endif 87 92 .endif 88 93 eor \i0\().16b, \i0\().16b, \k2\().16b ··· 98 89 .ifnb \i3 99 90 eor \i2\().16b, \i2\().16b, \k2\().16b 100 91 eor \i3\().16b, \i3\().16b, \k2\().16b 92 + .ifnb \i4 93 + eor \i4\().16b, \i4\().16b, \k2\().16b 94 + .endif 101 95 .endif 102 96 .endif 103 97 .endm 104 98 105 - /* up to 4 interleaved blocks */ 106 - .macro do_block_Nx, enc, rounds, i0, i1, i2, i3 99 + /* up to 5 interleaved blocks */ 100 + .macro do_block_Nx, enc, rounds, i0, i1, i2, i3, i4 107 101 cmp \rounds, #12 108 102 blo 2222f /* 128 bits */ 109 103 beq 1111f /* 192 bits */ 110 - round_Nx \enc, v17, \i0, \i1, \i2, \i3 111 - round_Nx \enc, v18, \i0, \i1, \i2, \i3 112 - 1111: round_Nx \enc, v19, \i0, \i1, \i2, \i3 113 - round_Nx \enc, v20, \i0, \i1, \i2, \i3 104 + round_Nx \enc, v17, \i0, \i1, \i2, \i3, \i4 105 + round_Nx \enc, v18, \i0, \i1, \i2, \i3, \i4 106 + 1111: round_Nx \enc, v19, \i0, \i1, \i2, \i3, \i4 107 + round_Nx \enc, v20, \i0, \i1, \i2, \i3, \i4 114 108 2222: .irp key, v21, v22, v23, v24, v25, v26, v27, v28, v29 115 - round_Nx \enc, \key, \i0, \i1, \i2, \i3 109 + round_Nx \enc, \key, \i0, \i1, \i2, \i3, \i4 116 110 .endr 117 - fin_round_Nx \enc, v30, v31, \i0, \i1, \i2, \i3 111 + fin_round_Nx \enc, v30, v31, \i0, \i1, \i2, \i3, \i4 118 112 .endm 119 113 120 114 .macro encrypt_block, in, rounds, t0, t1, t2 121 115 do_block_Nx e, \rounds, \in 122 116 .endm 123 117 124 - .macro encrypt_block2x, i0, i1, rounds, t0, t1, t2 125 - do_block_Nx e, \rounds, \i0, \i1 126 - .endm 127 - 128 118 .macro encrypt_block4x, i0, i1, i2, i3, rounds, t0, t1, t2 129 119 do_block_Nx e, \rounds, \i0, \i1, \i2, \i3 120 + .endm 121 + 122 + .macro encrypt_block5x, i0, i1, i2, i3, i4, rounds, t0, t1, t2 123 + do_block_Nx e, \rounds, \i0, \i1, \i2, \i3, \i4 130 124 .endm 131 125 132 126 .macro decrypt_block, in, rounds, t0, t1, t2 133 127 do_block_Nx d, \rounds, \in 134 128 .endm 135 129 136 - .macro decrypt_block2x, i0, i1, rounds, t0, t1, t2 137 - do_block_Nx d, \rounds, \i0, \i1 138 - .endm 139 - 140 130 .macro decrypt_block4x, i0, i1, i2, i3, rounds, t0, t1, t2 141 131 do_block_Nx d, \rounds, \i0, \i1, \i2, \i3 142 132 .endm 133 + 134 + .macro decrypt_block5x, i0, i1, i2, i3, i4, rounds, t0, t1, t2 135 + do_block_Nx d, \rounds, \i0, \i1, \i2, \i3, \i4 136 + .endm 137 + 138 + #define MAX_STRIDE 5 143 139 144 140 #include "aes-modes.S"
+87 -31
arch/arm64/crypto/aes-modes.S
··· 10 10 .text 11 11 .align 4 12 12 13 + #ifndef MAX_STRIDE 14 + #define MAX_STRIDE 4 15 + #endif 16 + 17 + #if MAX_STRIDE == 4 18 + #define ST4(x...) x 19 + #define ST5(x...) 20 + #else 21 + #define ST4(x...) 22 + #define ST5(x...) x 23 + #endif 24 + 13 25 aes_encrypt_block4x: 14 26 encrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7 15 27 ret ··· 31 19 decrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7 32 20 ret 33 21 ENDPROC(aes_decrypt_block4x) 22 + 23 + #if MAX_STRIDE == 5 24 + aes_encrypt_block5x: 25 + encrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7 26 + ret 27 + ENDPROC(aes_encrypt_block5x) 28 + 29 + aes_decrypt_block5x: 30 + decrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7 31 + ret 32 + ENDPROC(aes_decrypt_block5x) 33 + #endif 34 34 35 35 /* 36 36 * aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, ··· 58 34 enc_prepare w3, x2, x5 59 35 60 36 .LecbencloopNx: 61 - subs w4, w4, #4 37 + subs w4, w4, #MAX_STRIDE 62 38 bmi .Lecbenc1x 63 39 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */ 64 - bl aes_encrypt_block4x 40 + ST4( bl aes_encrypt_block4x ) 41 + ST5( ld1 {v4.16b}, [x1], #16 ) 42 + ST5( bl aes_encrypt_block5x ) 65 43 st1 {v0.16b-v3.16b}, [x0], #64 44 + ST5( st1 {v4.16b}, [x0], #16 ) 66 45 b .LecbencloopNx 67 46 .Lecbenc1x: 68 - adds w4, w4, #4 47 + adds w4, w4, #MAX_STRIDE 69 48 beq .Lecbencout 70 49 .Lecbencloop: 71 50 ld1 {v0.16b}, [x1], #16 /* get next pt block */ ··· 89 62 dec_prepare w3, x2, x5 90 63 91 64 .LecbdecloopNx: 92 - subs w4, w4, #4 65 + subs w4, w4, #MAX_STRIDE 93 66 bmi .Lecbdec1x 94 67 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */ 95 - bl aes_decrypt_block4x 68 + ST4( bl aes_decrypt_block4x ) 69 + ST5( ld1 {v4.16b}, [x1], #16 ) 70 + ST5( bl aes_decrypt_block5x ) 96 71 st1 {v0.16b-v3.16b}, [x0], #64 72 + ST5( st1 {v4.16b}, [x0], #16 ) 97 73 b .LecbdecloopNx 98 74 .Lecbdec1x: 99 - adds w4, w4, #4 75 + adds w4, w4, #MAX_STRIDE 100 76 beq .Lecbdecout 101 77 .Lecbdecloop: 102 78 ld1 {v0.16b}, [x1], #16 /* get next ct block */ ··· 159 129 stp x29, x30, [sp, #-16]! 160 130 mov x29, sp 161 131 162 - ld1 {v7.16b}, [x5] /* get iv */ 132 + ld1 {cbciv.16b}, [x5] /* get iv */ 163 133 dec_prepare w3, x2, x6 164 134 165 135 .LcbcdecloopNx: 166 - subs w4, w4, #4 136 + subs w4, w4, #MAX_STRIDE 167 137 bmi .Lcbcdec1x 168 138 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */ 139 + #if MAX_STRIDE == 5 140 + ld1 {v4.16b}, [x1], #16 /* get 1 ct block */ 141 + mov v5.16b, v0.16b 142 + mov v6.16b, v1.16b 143 + mov v7.16b, v2.16b 144 + bl aes_decrypt_block5x 145 + sub x1, x1, #32 146 + eor v0.16b, v0.16b, cbciv.16b 147 + eor v1.16b, v1.16b, v5.16b 148 + ld1 {v5.16b}, [x1], #16 /* reload 1 ct block */ 149 + ld1 {cbciv.16b}, [x1], #16 /* reload 1 ct block */ 150 + eor v2.16b, v2.16b, v6.16b 151 + eor v3.16b, v3.16b, v7.16b 152 + eor v4.16b, v4.16b, v5.16b 153 + #else 169 154 mov v4.16b, v0.16b 170 155 mov v5.16b, v1.16b 171 156 mov v6.16b, v2.16b 172 157 bl aes_decrypt_block4x 173 158 sub x1, x1, #16 174 - eor v0.16b, v0.16b, v7.16b 159 + eor v0.16b, v0.16b, cbciv.16b 175 160 eor v1.16b, v1.16b, v4.16b 176 - ld1 {v7.16b}, [x1], #16 /* reload 1 ct block */ 161 + ld1 {cbciv.16b}, [x1], #16 /* reload 1 ct block */ 177 162 eor v2.16b, v2.16b, v5.16b 178 163 eor v3.16b, v3.16b, v6.16b 164 + #endif 179 165 st1 {v0.16b-v3.16b}, [x0], #64 166 + ST5( st1 {v4.16b}, [x0], #16 ) 180 167 b .LcbcdecloopNx 181 168 .Lcbcdec1x: 182 - adds w4, w4, #4 169 + adds w4, w4, #MAX_STRIDE 183 170 beq .Lcbcdecout 184 171 .Lcbcdecloop: 185 172 ld1 {v1.16b}, [x1], #16 /* get next ct block */ 186 173 mov v0.16b, v1.16b /* ...and copy to v0 */ 187 174 decrypt_block v0, w3, x2, x6, w7 188 - eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */ 189 - mov v7.16b, v1.16b /* ct is next iv */ 175 + eor v0.16b, v0.16b, cbciv.16b /* xor with iv => pt */ 176 + mov cbciv.16b, v1.16b /* ct is next iv */ 190 177 st1 {v0.16b}, [x0], #16 191 178 subs w4, w4, #1 192 179 bne .Lcbcdecloop 193 180 .Lcbcdecout: 194 - st1 {v7.16b}, [x5] /* return iv */ 181 + st1 {cbciv.16b}, [x5] /* return iv */ 195 182 ldp x29, x30, [sp], #16 196 183 ret 197 184 AES_ENDPROC(aes_cbc_decrypt) ··· 302 255 mov x29, sp 303 256 304 257 enc_prepare w3, x2, x6 305 - ld1 {v4.16b}, [x5] 258 + ld1 {vctr.16b}, [x5] 306 259 307 - umov x6, v4.d[1] /* keep swabbed ctr in reg */ 260 + umov x6, vctr.d[1] /* keep swabbed ctr in reg */ 308 261 rev x6, x6 309 262 cmn w6, w4 /* 32 bit overflow? */ 310 263 bcs .Lctrloop 311 264 .LctrloopNx: 312 - subs w4, w4, #4 265 + subs w4, w4, #MAX_STRIDE 313 266 bmi .Lctr1x 314 267 add w7, w6, #1 315 - mov v0.16b, v4.16b 268 + mov v0.16b, vctr.16b 316 269 add w8, w6, #2 317 - mov v1.16b, v4.16b 270 + mov v1.16b, vctr.16b 318 271 add w9, w6, #3 319 - mov v2.16b, v4.16b 272 + mov v2.16b, vctr.16b 273 + add w9, w6, #3 320 274 rev w7, w7 321 - mov v3.16b, v4.16b 275 + mov v3.16b, vctr.16b 322 276 rev w8, w8 277 + ST5( mov v4.16b, vctr.16b ) 323 278 mov v1.s[3], w7 324 279 rev w9, w9 280 + ST5( add w10, w6, #4 ) 325 281 mov v2.s[3], w8 282 + ST5( rev w10, w10 ) 326 283 mov v3.s[3], w9 284 + ST5( mov v4.s[3], w10 ) 327 285 ld1 {v5.16b-v7.16b}, [x1], #48 /* get 3 input blocks */ 328 - bl aes_encrypt_block4x 286 + ST4( bl aes_encrypt_block4x ) 287 + ST5( bl aes_encrypt_block5x ) 329 288 eor v0.16b, v5.16b, v0.16b 330 - ld1 {v5.16b}, [x1], #16 /* get 1 input block */ 289 + ST4( ld1 {v5.16b}, [x1], #16 ) 331 290 eor v1.16b, v6.16b, v1.16b 291 + ST5( ld1 {v5.16b-v6.16b}, [x1], #32 ) 332 292 eor v2.16b, v7.16b, v2.16b 333 293 eor v3.16b, v5.16b, v3.16b 294 + ST5( eor v4.16b, v6.16b, v4.16b ) 334 295 st1 {v0.16b-v3.16b}, [x0], #64 335 - add x6, x6, #4 296 + ST5( st1 {v4.16b}, [x0], #16 ) 297 + add x6, x6, #MAX_STRIDE 336 298 rev x7, x6 337 - ins v4.d[1], x7 299 + ins vctr.d[1], x7 338 300 cbz w4, .Lctrout 339 301 b .LctrloopNx 340 302 .Lctr1x: 341 - adds w4, w4, #4 303 + adds w4, w4, #MAX_STRIDE 342 304 beq .Lctrout 343 305 .Lctrloop: 344 - mov v0.16b, v4.16b 306 + mov v0.16b, vctr.16b 345 307 encrypt_block v0, w3, x2, x8, w7 346 308 347 309 adds x6, x6, #1 /* increment BE ctr */ 348 310 rev x7, x6 349 - ins v4.d[1], x7 311 + ins vctr.d[1], x7 350 312 bcs .Lctrcarry /* overflow? */ 351 313 352 314 .Lctrcarrydone: ··· 367 311 bne .Lctrloop 368 312 369 313 .Lctrout: 370 - st1 {v4.16b}, [x5] /* return next CTR value */ 314 + st1 {vctr.16b}, [x5] /* return next CTR value */ 371 315 ldp x29, x30, [sp], #16 372 316 ret 373 317 ··· 376 320 b .Lctrout 377 321 378 322 .Lctrcarry: 379 - umov x7, v4.d[0] /* load upper word of ctr */ 323 + umov x7, vctr.d[0] /* load upper word of ctr */ 380 324 rev x7, x7 /* ... to handle the carry */ 381 325 add x7, x7, #1 382 326 rev x7, x7 383 - ins v4.d[0], x7 327 + ins vctr.d[0], x7 384 328 b .Lctrcarrydone 385 329 AES_ENDPROC(aes_ctr_encrypt) 386 330
+3 -45
arch/arm64/crypto/aes-neon.S
··· 12 12 #define AES_ENDPROC(func) ENDPROC(neon_ ## func) 13 13 14 14 xtsmask .req v7 15 + cbciv .req v7 16 + vctr .req v4 15 17 16 18 .macro xts_reload_mask, tmp 17 19 xts_load_mask \tmp ··· 116 114 117 115 /* 118 116 * Interleaved versions: functionally equivalent to the 119 - * ones above, but applied to 2 or 4 AES states in parallel. 117 + * ones above, but applied to AES states in parallel. 120 118 */ 121 - 122 - .macro sub_bytes_2x, in0, in1 123 - sub v8.16b, \in0\().16b, v15.16b 124 - tbl \in0\().16b, {v16.16b-v19.16b}, \in0\().16b 125 - sub v9.16b, \in1\().16b, v15.16b 126 - tbl \in1\().16b, {v16.16b-v19.16b}, \in1\().16b 127 - sub v10.16b, v8.16b, v15.16b 128 - tbx \in0\().16b, {v20.16b-v23.16b}, v8.16b 129 - sub v11.16b, v9.16b, v15.16b 130 - tbx \in1\().16b, {v20.16b-v23.16b}, v9.16b 131 - sub v8.16b, v10.16b, v15.16b 132 - tbx \in0\().16b, {v24.16b-v27.16b}, v10.16b 133 - sub v9.16b, v11.16b, v15.16b 134 - tbx \in1\().16b, {v24.16b-v27.16b}, v11.16b 135 - tbx \in0\().16b, {v28.16b-v31.16b}, v8.16b 136 - tbx \in1\().16b, {v28.16b-v31.16b}, v9.16b 137 - .endm 138 119 139 120 .macro sub_bytes_4x, in0, in1, in2, in3 140 121 sub v8.16b, \in0\().16b, v15.16b ··· 197 212 eor \in1\().16b, \in1\().16b, v11.16b 198 213 .endm 199 214 200 - .macro do_block_2x, enc, in0, in1, rounds, rk, rkp, i 201 - ld1 {v15.4s}, [\rk] 202 - add \rkp, \rk, #16 203 - mov \i, \rounds 204 - 1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */ 205 - eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */ 206 - movi v15.16b, #0x40 207 - tbl \in0\().16b, {\in0\().16b}, v13.16b /* ShiftRows */ 208 - tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */ 209 - sub_bytes_2x \in0, \in1 210 - subs \i, \i, #1 211 - ld1 {v15.4s}, [\rkp], #16 212 - beq 2222f 213 - mix_columns_2x \in0, \in1, \enc 214 - b 1111b 215 - 2222: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */ 216 - eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */ 217 - .endm 218 - 219 215 .macro do_block_4x, enc, in0, in1, in2, in3, rounds, rk, rkp, i 220 216 ld1 {v15.4s}, [\rk] 221 217 add \rkp, \rk, #16 ··· 221 255 eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */ 222 256 eor \in2\().16b, \in2\().16b, v15.16b /* ^round key */ 223 257 eor \in3\().16b, \in3\().16b, v15.16b /* ^round key */ 224 - .endm 225 - 226 - .macro encrypt_block2x, in0, in1, rounds, rk, rkp, i 227 - do_block_2x 1, \in0, \in1, \rounds, \rk, \rkp, \i 228 - .endm 229 - 230 - .macro decrypt_block2x, in0, in1, rounds, rk, rkp, i 231 - do_block_2x 0, \in0, \in1, \rounds, \rk, \rkp, \i 232 258 .endm 233 259 234 260 .macro encrypt_block4x, in0, in1, in2, in3, rounds, rk, rkp, i
+1 -1
arch/arm64/crypto/chacha-neon-glue.c
··· 60 60 } 61 61 62 62 static int chacha_neon_stream_xor(struct skcipher_request *req, 63 - struct chacha_ctx *ctx, u8 *iv) 63 + const struct chacha_ctx *ctx, const u8 *iv) 64 64 { 65 65 struct skcipher_walk walk; 66 66 u32 state[16];
+1 -1
arch/arm64/crypto/sha1-ce-glue.c
··· 52 52 unsigned int len, u8 *out) 53 53 { 54 54 struct sha1_ce_state *sctx = shash_desc_ctx(desc); 55 - bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE); 55 + bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE) && len; 56 56 57 57 if (!crypto_simd_usable()) 58 58 return crypto_sha1_finup(desc, data, len, out);
+1 -1
arch/arm64/crypto/sha2-ce-glue.c
··· 57 57 unsigned int len, u8 *out) 58 58 { 59 59 struct sha256_ce_state *sctx = shash_desc_ctx(desc); 60 - bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE); 60 + bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE) && len; 61 61 62 62 if (!crypto_simd_usable()) { 63 63 if (len)
+7 -38
arch/x86/crypto/aesni-intel_glue.c
··· 371 371 } 372 372 } 373 373 374 - static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 375 - { 376 - struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); 377 - 378 - aesni_enc(ctx, dst, src); 379 - } 380 - 381 - static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 382 - { 383 - struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); 384 - 385 - aesni_dec(ctx, dst, src); 386 - } 387 - 388 374 static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, 389 375 unsigned int len) 390 376 { ··· 906 920 } 907 921 #endif 908 922 909 - static struct crypto_alg aesni_algs[] = { { 923 + static struct crypto_alg aesni_cipher_alg = { 910 924 .cra_name = "aes", 911 925 .cra_driver_name = "aes-aesni", 912 926 .cra_priority = 300, ··· 923 937 .cia_decrypt = aes_decrypt 924 938 } 925 939 } 926 - }, { 927 - .cra_name = "__aes", 928 - .cra_driver_name = "__aes-aesni", 929 - .cra_priority = 300, 930 - .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL, 931 - .cra_blocksize = AES_BLOCK_SIZE, 932 - .cra_ctxsize = CRYPTO_AES_CTX_SIZE, 933 - .cra_module = THIS_MODULE, 934 - .cra_u = { 935 - .cipher = { 936 - .cia_min_keysize = AES_MIN_KEY_SIZE, 937 - .cia_max_keysize = AES_MAX_KEY_SIZE, 938 - .cia_setkey = aes_set_key, 939 - .cia_encrypt = __aes_encrypt, 940 - .cia_decrypt = __aes_decrypt 941 - } 942 - } 943 - } }; 940 + }; 944 941 945 942 static struct skcipher_alg aesni_skciphers[] = { 946 943 { ··· 1119 1150 #endif 1120 1151 #endif 1121 1152 1122 - err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); 1153 + err = crypto_register_alg(&aesni_cipher_alg); 1123 1154 if (err) 1124 1155 return err; 1125 1156 ··· 1127 1158 ARRAY_SIZE(aesni_skciphers), 1128 1159 aesni_simd_skciphers); 1129 1160 if (err) 1130 - goto unregister_algs; 1161 + goto unregister_cipher; 1131 1162 1132 1163 err = simd_register_aeads_compat(aesni_aeads, ARRAY_SIZE(aesni_aeads), 1133 1164 aesni_simd_aeads); ··· 1139 1170 unregister_skciphers: 1140 1171 simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers), 1141 1172 aesni_simd_skciphers); 1142 - unregister_algs: 1143 - crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); 1173 + unregister_cipher: 1174 + crypto_unregister_alg(&aesni_cipher_alg); 1144 1175 return err; 1145 1176 } 1146 1177 ··· 1150 1181 aesni_simd_aeads); 1151 1182 simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers), 1152 1183 aesni_simd_skciphers); 1153 - crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); 1184 + crypto_unregister_alg(&aesni_cipher_alg); 1154 1185 } 1155 1186 1156 1187 late_initcall(aesni_init);
+1 -1
arch/x86/crypto/chacha_glue.c
··· 124 124 } 125 125 126 126 static int chacha_simd_stream_xor(struct skcipher_walk *walk, 127 - struct chacha_ctx *ctx, u8 *iv) 127 + const struct chacha_ctx *ctx, const u8 *iv) 128 128 { 129 129 u32 *state, state_buf[16 + 2] __aligned(8); 130 130 int next_yield = 4096; /* bytes until next FPU yield */
+25 -14
crypto/Kconfig
··· 61 61 tristate 62 62 select CRYPTO_ALGAPI2 63 63 select CRYPTO_RNG2 64 - select CRYPTO_WORKQUEUE 65 64 66 65 config CRYPTO_HASH 67 66 tristate ··· 136 137 Userspace configuration for cryptographic instantiations such as 137 138 cbc(aes). 138 139 140 + if CRYPTO_MANAGER2 141 + 139 142 config CRYPTO_MANAGER_DISABLE_TESTS 140 143 bool "Disable run-time self tests" 141 144 default y 142 - depends on CRYPTO_MANAGER2 143 145 help 144 146 Disable run-time self tests that normally take place at 145 147 algorithm registration. ··· 155 155 This is intended for developer use only, as these tests take much 156 156 longer to run than the normal self tests. 157 157 158 + endif # if CRYPTO_MANAGER2 159 + 158 160 config CRYPTO_GF128MUL 159 - tristate "GF(2^128) multiplication functions" 160 - help 161 - Efficient table driven implementation of multiplications in the 162 - field GF(2^128). This is needed by some cypher modes. This 163 - option will be selected automatically if you select such a 164 - cipher mode. Only select this option by hand if you expect to load 165 - an external module that requires these functions. 161 + tristate 166 162 167 163 config CRYPTO_NULL 168 164 tristate "Null algorithms" ··· 182 186 This converts an arbitrary crypto algorithm into a parallel 183 187 algorithm that executes in kernel threads. 184 188 185 - config CRYPTO_WORKQUEUE 186 - tristate 187 - 188 189 config CRYPTO_CRYPTD 189 190 tristate "Software async crypto daemon" 190 191 select CRYPTO_BLKCIPHER 191 192 select CRYPTO_HASH 192 193 select CRYPTO_MANAGER 193 - select CRYPTO_WORKQUEUE 194 194 help 195 195 This is a generic software asynchronous crypto daemon that 196 196 converts an arbitrary synchronous software crypto algorithm ··· 271 279 select CRYPTO_CTR 272 280 select CRYPTO_HASH 273 281 select CRYPTO_AEAD 282 + select CRYPTO_MANAGER 274 283 help 275 284 Support for Counter with CBC MAC. Required for IPsec. 276 285 ··· 281 288 select CRYPTO_AEAD 282 289 select CRYPTO_GHASH 283 290 select CRYPTO_NULL 291 + select CRYPTO_MANAGER 284 292 help 285 293 Support for Galois/Counter Mode (GCM) and Galois Message 286 294 Authentication Code (GMAC). Required for IPSec. ··· 291 297 select CRYPTO_CHACHA20 292 298 select CRYPTO_POLY1305 293 299 select CRYPTO_AEAD 300 + select CRYPTO_MANAGER 294 301 help 295 302 ChaCha20-Poly1305 AEAD support, RFC7539. 296 303 ··· 406 411 select CRYPTO_BLKCIPHER 407 412 select CRYPTO_NULL 408 413 select CRYPTO_RNG_DEFAULT 414 + select CRYPTO_MANAGER 409 415 help 410 416 This IV generator generates an IV based on a sequence number by 411 417 xoring it with a salt. This algorithm is mainly useful for CTR ··· 416 420 select CRYPTO_AEAD 417 421 select CRYPTO_NULL 418 422 select CRYPTO_RNG_DEFAULT 419 - default m 423 + select CRYPTO_MANAGER 420 424 help 421 425 This IV generator generates an IV based on the encryption of 422 426 a sequence number xored with a salt. This is the default ··· 452 456 config CRYPTO_CTS 453 457 tristate "CTS support" 454 458 select CRYPTO_BLKCIPHER 459 + select CRYPTO_MANAGER 455 460 help 456 461 CTS: Cipher Text Stealing 457 462 This is the Cipher Text Stealing mode as described by ··· 518 521 config CRYPTO_KEYWRAP 519 522 tristate "Key wrapping support" 520 523 select CRYPTO_BLKCIPHER 524 + select CRYPTO_MANAGER 521 525 help 522 526 Support for key wrapping (NIST SP800-38F / RFC3394) without 523 527 padding. ··· 549 551 select CRYPTO_CHACHA20 550 552 select CRYPTO_POLY1305 551 553 select CRYPTO_NHPOLY1305 554 + select CRYPTO_MANAGER 552 555 help 553 556 Adiantum is a tweakable, length-preserving encryption mode 554 557 designed for fast and secure disk encryption, especially on ··· 682 683 CRC32c and CRC32 CRC algorithms implemented using mips crypto 683 684 instructions, when available. 684 685 686 + 687 + config CRYPTO_XXHASH 688 + tristate "xxHash hash algorithm" 689 + select CRYPTO_HASH 690 + select XXHASH 691 + help 692 + xxHash non-cryptographic hash algorithm. Extremely fast, working at 693 + speeds close to RAM limits. 685 694 686 695 config CRYPTO_CRCT10DIF 687 696 tristate "CRCT10DIF algorithm" ··· 1237 1230 <https://www.cosic.esat.kuleuven.be/nessie/reports/> 1238 1231 <http://www.larc.usp.br/~pbarreto/AnubisPage.html> 1239 1232 1233 + config CRYPTO_LIB_ARC4 1234 + tristate 1235 + 1240 1236 config CRYPTO_ARC4 1241 1237 tristate "ARC4 cipher algorithm" 1242 1238 select CRYPTO_BLKCIPHER 1239 + select CRYPTO_LIB_ARC4 1243 1240 help 1244 1241 ARC4 cipher algorithm. 1245 1242
+1 -2
crypto/Makefile
··· 6 6 obj-$(CONFIG_CRYPTO) += crypto.o 7 7 crypto-y := api.o cipher.o compress.o memneq.o 8 8 9 - obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o 10 - 11 9 obj-$(CONFIG_CRYPTO_ENGINE) += crypto_engine.o 12 10 obj-$(CONFIG_CRYPTO_FIPS) += fips.o 13 11 ··· 129 131 obj-$(CONFIG_CRYPTO_LZO) += lzo.o lzo-rle.o 130 132 obj-$(CONFIG_CRYPTO_LZ4) += lz4.o 131 133 obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o 134 + obj-$(CONFIG_CRYPTO_XXHASH) += xxhash_generic.o 132 135 obj-$(CONFIG_CRYPTO_842) += 842.o 133 136 obj-$(CONFIG_CRYPTO_RNG2) += rng.o 134 137 obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o
+36
crypto/aead.c
··· 84 84 } 85 85 EXPORT_SYMBOL_GPL(crypto_aead_setauthsize); 86 86 87 + int crypto_aead_encrypt(struct aead_request *req) 88 + { 89 + struct crypto_aead *aead = crypto_aead_reqtfm(req); 90 + struct crypto_alg *alg = aead->base.__crt_alg; 91 + unsigned int cryptlen = req->cryptlen; 92 + int ret; 93 + 94 + crypto_stats_get(alg); 95 + if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) 96 + ret = -ENOKEY; 97 + else 98 + ret = crypto_aead_alg(aead)->encrypt(req); 99 + crypto_stats_aead_encrypt(cryptlen, alg, ret); 100 + return ret; 101 + } 102 + EXPORT_SYMBOL_GPL(crypto_aead_encrypt); 103 + 104 + int crypto_aead_decrypt(struct aead_request *req) 105 + { 106 + struct crypto_aead *aead = crypto_aead_reqtfm(req); 107 + struct crypto_alg *alg = aead->base.__crt_alg; 108 + unsigned int cryptlen = req->cryptlen; 109 + int ret; 110 + 111 + crypto_stats_get(alg); 112 + if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) 113 + ret = -ENOKEY; 114 + else if (req->cryptlen < crypto_aead_authsize(aead)) 115 + ret = -EINVAL; 116 + else 117 + ret = crypto_aead_alg(aead)->decrypt(req); 118 + crypto_stats_aead_decrypt(cryptlen, alg, ret); 119 + return ret; 120 + } 121 + EXPORT_SYMBOL_GPL(crypto_aead_decrypt); 122 + 87 123 static void crypto_aead_exit_tfm(struct crypto_tfm *tfm) 88 124 { 89 125 struct crypto_aead *aead = __crypto_aead_cast(tfm);
+4 -31
crypto/algapi.c
··· 21 21 22 22 static LIST_HEAD(crypto_template_list); 23 23 24 - static inline int crypto_set_driver_name(struct crypto_alg *alg) 25 - { 26 - static const char suffix[] = "-generic"; 27 - char *driver_name = alg->cra_driver_name; 28 - int len; 29 - 30 - if (*driver_name) 31 - return 0; 32 - 33 - len = strlcpy(driver_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); 34 - if (len + sizeof(suffix) > CRYPTO_MAX_ALG_NAME) 35 - return -ENAMETOOLONG; 36 - 37 - memcpy(driver_name + len, suffix, sizeof(suffix)); 38 - return 0; 39 - } 40 - 41 24 static inline void crypto_check_module_sig(struct module *mod) 42 25 { 43 26 if (fips_enabled && mod && !module_sig_ok(mod)) ··· 31 48 static int crypto_check_alg(struct crypto_alg *alg) 32 49 { 33 50 crypto_check_module_sig(alg->cra_module); 51 + 52 + if (!alg->cra_name[0] || !alg->cra_driver_name[0]) 53 + return -EINVAL; 34 54 35 55 if (alg->cra_alignmask & (alg->cra_alignmask + 1)) 36 56 return -EINVAL; ··· 60 74 61 75 refcount_set(&alg->cra_refcnt, 1); 62 76 63 - return crypto_set_driver_name(alg); 77 + return 0; 64 78 } 65 79 66 80 static void crypto_free_instance(struct crypto_instance *inst) ··· 932 946 return list_entry(request, struct crypto_async_request, list); 933 947 } 934 948 EXPORT_SYMBOL_GPL(crypto_dequeue_request); 935 - 936 - int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm) 937 - { 938 - struct crypto_async_request *req; 939 - 940 - list_for_each_entry(req, &queue->list, list) { 941 - if (req->tfm == tfm) 942 - return 1; 943 - } 944 - 945 - return 0; 946 - } 947 - EXPORT_SYMBOL_GPL(crypto_tfm_in_queue); 948 949 949 950 static inline void crypto_inc_byte(u8 *a, unsigned int size) 950 951 {
+1
crypto/anubis.c
··· 673 673 674 674 static struct crypto_alg anubis_alg = { 675 675 .cra_name = "anubis", 676 + .cra_driver_name = "anubis-generic", 676 677 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 677 678 .cra_blocksize = ANUBIS_BLOCK_SIZE, 678 679 .cra_ctxsize = sizeof (struct anubis_ctx),
+17 -108
crypto/arc4.c
··· 13 13 #include <linux/init.h> 14 14 #include <linux/module.h> 15 15 16 - struct arc4_ctx { 17 - u32 S[256]; 18 - u32 x, y; 19 - }; 20 - 21 - static int arc4_set_key(struct crypto_tfm *tfm, const u8 *in_key, 22 - unsigned int key_len) 16 + static int crypto_arc4_setkey(struct crypto_skcipher *tfm, const u8 *in_key, 17 + unsigned int key_len) 23 18 { 24 - struct arc4_ctx *ctx = crypto_tfm_ctx(tfm); 25 - int i, j = 0, k = 0; 19 + struct arc4_ctx *ctx = crypto_skcipher_ctx(tfm); 26 20 27 - ctx->x = 1; 28 - ctx->y = 0; 29 - 30 - for (i = 0; i < 256; i++) 31 - ctx->S[i] = i; 32 - 33 - for (i = 0; i < 256; i++) { 34 - u32 a = ctx->S[i]; 35 - j = (j + in_key[k] + a) & 0xff; 36 - ctx->S[i] = ctx->S[j]; 37 - ctx->S[j] = a; 38 - if (++k >= key_len) 39 - k = 0; 40 - } 41 - 42 - return 0; 21 + return arc4_setkey(ctx, in_key, key_len); 43 22 } 44 23 45 - static int arc4_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key, 46 - unsigned int key_len) 47 - { 48 - return arc4_set_key(&tfm->base, in_key, key_len); 49 - } 50 - 51 - static void arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, 52 - unsigned int len) 53 - { 54 - u32 *const S = ctx->S; 55 - u32 x, y, a, b; 56 - u32 ty, ta, tb; 57 - 58 - if (len == 0) 59 - return; 60 - 61 - x = ctx->x; 62 - y = ctx->y; 63 - 64 - a = S[x]; 65 - y = (y + a) & 0xff; 66 - b = S[y]; 67 - 68 - do { 69 - S[y] = a; 70 - a = (a + b) & 0xff; 71 - S[x] = b; 72 - x = (x + 1) & 0xff; 73 - ta = S[x]; 74 - ty = (y + ta) & 0xff; 75 - tb = S[ty]; 76 - *out++ = *in++ ^ S[a]; 77 - if (--len == 0) 78 - break; 79 - y = ty; 80 - a = ta; 81 - b = tb; 82 - } while (true); 83 - 84 - ctx->x = x; 85 - ctx->y = y; 86 - } 87 - 88 - static void arc4_crypt_one(struct crypto_tfm *tfm, u8 *out, const u8 *in) 89 - { 90 - arc4_crypt(crypto_tfm_ctx(tfm), out, in, 1); 91 - } 92 - 93 - static int ecb_arc4_crypt(struct skcipher_request *req) 24 + static int crypto_arc4_crypt(struct skcipher_request *req) 94 25 { 95 26 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 96 27 struct arc4_ctx *ctx = crypto_skcipher_ctx(tfm); ··· 39 108 return err; 40 109 } 41 110 42 - static struct crypto_alg arc4_cipher = { 43 - .cra_name = "arc4", 44 - .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 45 - .cra_blocksize = ARC4_BLOCK_SIZE, 46 - .cra_ctxsize = sizeof(struct arc4_ctx), 47 - .cra_module = THIS_MODULE, 48 - .cra_u = { 49 - .cipher = { 50 - .cia_min_keysize = ARC4_MIN_KEY_SIZE, 51 - .cia_max_keysize = ARC4_MAX_KEY_SIZE, 52 - .cia_setkey = arc4_set_key, 53 - .cia_encrypt = arc4_crypt_one, 54 - .cia_decrypt = arc4_crypt_one, 55 - }, 56 - }, 57 - }; 58 - 59 - static struct skcipher_alg arc4_skcipher = { 111 + static struct skcipher_alg arc4_alg = { 112 + /* 113 + * For legacy reasons, this is named "ecb(arc4)", not "arc4". 114 + * Nevertheless it's actually a stream cipher, not a block cipher. 115 + */ 60 116 .base.cra_name = "ecb(arc4)", 117 + .base.cra_driver_name = "ecb(arc4)-generic", 61 118 .base.cra_priority = 100, 62 119 .base.cra_blocksize = ARC4_BLOCK_SIZE, 63 120 .base.cra_ctxsize = sizeof(struct arc4_ctx), 64 121 .base.cra_module = THIS_MODULE, 65 122 .min_keysize = ARC4_MIN_KEY_SIZE, 66 123 .max_keysize = ARC4_MAX_KEY_SIZE, 67 - .setkey = arc4_set_key_skcipher, 68 - .encrypt = ecb_arc4_crypt, 69 - .decrypt = ecb_arc4_crypt, 124 + .setkey = crypto_arc4_setkey, 125 + .encrypt = crypto_arc4_crypt, 126 + .decrypt = crypto_arc4_crypt, 70 127 }; 71 128 72 129 static int __init arc4_init(void) 73 130 { 74 - int err; 75 - 76 - err = crypto_register_alg(&arc4_cipher); 77 - if (err) 78 - return err; 79 - 80 - err = crypto_register_skcipher(&arc4_skcipher); 81 - if (err) 82 - crypto_unregister_alg(&arc4_cipher); 83 - return err; 131 + return crypto_register_skcipher(&arc4_alg); 84 132 } 85 133 86 134 static void __exit arc4_exit(void) 87 135 { 88 - crypto_unregister_alg(&arc4_cipher); 89 - crypto_unregister_skcipher(&arc4_skcipher); 136 + crypto_unregister_skcipher(&arc4_alg); 90 137 } 91 138 92 139 subsys_initcall(arc4_init); ··· 73 164 MODULE_LICENSE("GPL"); 74 165 MODULE_DESCRIPTION("ARC4 Cipher Algorithm"); 75 166 MODULE_AUTHOR("Jon Oberheide <jon@oberheide.org>"); 76 - MODULE_ALIAS_CRYPTO("arc4"); 167 + MODULE_ALIAS_CRYPTO("ecb(arc4)");
+3
crypto/asymmetric_keys/Kconfig
··· 15 15 select MPILIB 16 16 select CRYPTO_HASH_INFO 17 17 select CRYPTO_AKCIPHER 18 + select CRYPTO_HASH 18 19 help 19 20 This option provides support for asymmetric public key type handling. 20 21 If signature generation and/or verification are to be used, ··· 66 65 config PKCS7_MESSAGE_PARSER 67 66 tristate "PKCS#7 message parser" 68 67 depends on X509_CERTIFICATE_PARSER 68 + select CRYPTO_HASH 69 69 select ASN1 70 70 select OID_REGISTRY 71 71 help ··· 89 87 bool "Support for PE file signature verification" 90 88 depends on PKCS7_MESSAGE_PARSER=y 91 89 depends on SYSTEM_DATA_VERIFICATION 90 + select CRYPTO_HASH 92 91 select ASN1 93 92 select OID_REGISTRY 94 93 help
+32 -41
crypto/chacha20poly1305.c
··· 61 61 unsigned int cryptlen; 62 62 /* Actual AD, excluding IV */ 63 63 unsigned int assoclen; 64 + /* request flags, with MAY_SLEEP cleared if needed */ 65 + u32 flags; 64 66 union { 65 67 struct poly_req poly; 66 68 struct chacha_req chacha; ··· 72 70 static inline void async_done_continue(struct aead_request *req, int err, 73 71 int (*cont)(struct aead_request *)) 74 72 { 75 - if (!err) 73 + if (!err) { 74 + struct chachapoly_req_ctx *rctx = aead_request_ctx(req); 75 + 76 + rctx->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; 76 77 err = cont(req); 78 + } 77 79 78 80 if (err != -EINPROGRESS && err != -EBUSY) 79 81 aead_request_complete(req, err); ··· 135 129 136 130 chacha_iv(creq->iv, req, 1); 137 131 138 - sg_init_table(rctx->src, 2); 139 132 src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen); 140 133 dst = src; 141 - 142 - if (req->src != req->dst) { 143 - sg_init_table(rctx->dst, 2); 134 + if (req->src != req->dst) 144 135 dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen); 145 - } 146 136 147 - skcipher_request_set_callback(&creq->req, aead_request_flags(req), 137 + skcipher_request_set_callback(&creq->req, rctx->flags, 148 138 chacha_decrypt_done, req); 149 139 skcipher_request_set_tfm(&creq->req, ctx->chacha); 150 140 skcipher_request_set_crypt(&creq->req, src, dst, ··· 174 172 struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm); 175 173 struct chachapoly_req_ctx *rctx = aead_request_ctx(req); 176 174 struct poly_req *preq = &rctx->u.poly; 177 - __le64 len; 178 175 int err; 179 176 180 - sg_init_table(preq->src, 1); 181 - len = cpu_to_le64(rctx->assoclen); 182 - memcpy(&preq->tail.assoclen, &len, sizeof(len)); 183 - len = cpu_to_le64(rctx->cryptlen); 184 - memcpy(&preq->tail.cryptlen, &len, sizeof(len)); 185 - sg_set_buf(preq->src, &preq->tail, sizeof(preq->tail)); 177 + preq->tail.assoclen = cpu_to_le64(rctx->assoclen); 178 + preq->tail.cryptlen = cpu_to_le64(rctx->cryptlen); 179 + sg_init_one(preq->src, &preq->tail, sizeof(preq->tail)); 186 180 187 - ahash_request_set_callback(&preq->req, aead_request_flags(req), 181 + ahash_request_set_callback(&preq->req, rctx->flags, 188 182 poly_tail_done, req); 189 183 ahash_request_set_tfm(&preq->req, ctx->poly); 190 184 ahash_request_set_crypt(&preq->req, preq->src, ··· 203 205 struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); 204 206 struct chachapoly_req_ctx *rctx = aead_request_ctx(req); 205 207 struct poly_req *preq = &rctx->u.poly; 206 - unsigned int padlen, bs = POLY1305_BLOCK_SIZE; 208 + unsigned int padlen; 207 209 int err; 208 210 209 - padlen = (bs - (rctx->cryptlen % bs)) % bs; 211 + padlen = -rctx->cryptlen % POLY1305_BLOCK_SIZE; 210 212 memset(preq->pad, 0, sizeof(preq->pad)); 211 - sg_init_table(preq->src, 1); 212 - sg_set_buf(preq->src, &preq->pad, padlen); 213 + sg_init_one(preq->src, preq->pad, padlen); 213 214 214 - ahash_request_set_callback(&preq->req, aead_request_flags(req), 215 + ahash_request_set_callback(&preq->req, rctx->flags, 215 216 poly_cipherpad_done, req); 216 217 ahash_request_set_tfm(&preq->req, ctx->poly); 217 218 ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen); ··· 238 241 if (rctx->cryptlen == req->cryptlen) /* encrypting */ 239 242 crypt = req->dst; 240 243 241 - sg_init_table(rctx->src, 2); 242 244 crypt = scatterwalk_ffwd(rctx->src, crypt, req->assoclen); 243 245 244 - ahash_request_set_callback(&preq->req, aead_request_flags(req), 246 + ahash_request_set_callback(&preq->req, rctx->flags, 245 247 poly_cipher_done, req); 246 248 ahash_request_set_tfm(&preq->req, ctx->poly); 247 249 ahash_request_set_crypt(&preq->req, crypt, NULL, rctx->cryptlen); ··· 262 266 struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); 263 267 struct chachapoly_req_ctx *rctx = aead_request_ctx(req); 264 268 struct poly_req *preq = &rctx->u.poly; 265 - unsigned int padlen, bs = POLY1305_BLOCK_SIZE; 269 + unsigned int padlen; 266 270 int err; 267 271 268 - padlen = (bs - (rctx->assoclen % bs)) % bs; 272 + padlen = -rctx->assoclen % POLY1305_BLOCK_SIZE; 269 273 memset(preq->pad, 0, sizeof(preq->pad)); 270 - sg_init_table(preq->src, 1); 271 - sg_set_buf(preq->src, preq->pad, padlen); 274 + sg_init_one(preq->src, preq->pad, padlen); 272 275 273 - ahash_request_set_callback(&preq->req, aead_request_flags(req), 276 + ahash_request_set_callback(&preq->req, rctx->flags, 274 277 poly_adpad_done, req); 275 278 ahash_request_set_tfm(&preq->req, ctx->poly); 276 279 ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen); ··· 293 298 struct poly_req *preq = &rctx->u.poly; 294 299 int err; 295 300 296 - ahash_request_set_callback(&preq->req, aead_request_flags(req), 301 + ahash_request_set_callback(&preq->req, rctx->flags, 297 302 poly_ad_done, req); 298 303 ahash_request_set_tfm(&preq->req, ctx->poly); 299 304 ahash_request_set_crypt(&preq->req, req->src, NULL, rctx->assoclen); ··· 317 322 struct poly_req *preq = &rctx->u.poly; 318 323 int err; 319 324 320 - sg_init_table(preq->src, 1); 321 - sg_set_buf(preq->src, rctx->key, sizeof(rctx->key)); 325 + sg_init_one(preq->src, rctx->key, sizeof(rctx->key)); 322 326 323 - ahash_request_set_callback(&preq->req, aead_request_flags(req), 327 + ahash_request_set_callback(&preq->req, rctx->flags, 324 328 poly_setkey_done, req); 325 329 ahash_request_set_tfm(&preq->req, ctx->poly); 326 330 ahash_request_set_crypt(&preq->req, preq->src, NULL, sizeof(rctx->key)); ··· 343 349 struct poly_req *preq = &rctx->u.poly; 344 350 int err; 345 351 346 - ahash_request_set_callback(&preq->req, aead_request_flags(req), 352 + ahash_request_set_callback(&preq->req, rctx->flags, 347 353 poly_init_done, req); 348 354 ahash_request_set_tfm(&preq->req, ctx->poly); 349 355 ··· 375 381 rctx->assoclen -= 8; 376 382 } 377 383 378 - sg_init_table(creq->src, 1); 379 384 memset(rctx->key, 0, sizeof(rctx->key)); 380 - sg_set_buf(creq->src, rctx->key, sizeof(rctx->key)); 385 + sg_init_one(creq->src, rctx->key, sizeof(rctx->key)); 381 386 382 387 chacha_iv(creq->iv, req, 0); 383 388 384 - skcipher_request_set_callback(&creq->req, aead_request_flags(req), 389 + skcipher_request_set_callback(&creq->req, rctx->flags, 385 390 poly_genkey_done, req); 386 391 skcipher_request_set_tfm(&creq->req, ctx->chacha); 387 392 skcipher_request_set_crypt(&creq->req, creq->src, creq->src, ··· 411 418 412 419 chacha_iv(creq->iv, req, 1); 413 420 414 - sg_init_table(rctx->src, 2); 415 421 src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen); 416 422 dst = src; 417 - 418 - if (req->src != req->dst) { 419 - sg_init_table(rctx->dst, 2); 423 + if (req->src != req->dst) 420 424 dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen); 421 - } 422 425 423 - skcipher_request_set_callback(&creq->req, aead_request_flags(req), 426 + skcipher_request_set_callback(&creq->req, rctx->flags, 424 427 chacha_encrypt_done, req); 425 428 skcipher_request_set_tfm(&creq->req, ctx->chacha); 426 429 skcipher_request_set_crypt(&creq->req, src, dst, ··· 434 445 struct chachapoly_req_ctx *rctx = aead_request_ctx(req); 435 446 436 447 rctx->cryptlen = req->cryptlen; 448 + rctx->flags = aead_request_flags(req); 437 449 438 450 /* encrypt call chain: 439 451 * - chacha_encrypt/done() ··· 456 466 struct chachapoly_req_ctx *rctx = aead_request_ctx(req); 457 467 458 468 rctx->cryptlen = req->cryptlen - POLY1305_DIGEST_SIZE; 469 + rctx->flags = aead_request_flags(req); 459 470 460 471 /* decrypt call chain: 461 472 * - poly_genkey/done()
+2 -2
crypto/chacha_generic.c
··· 32 32 } 33 33 34 34 static int chacha_stream_xor(struct skcipher_request *req, 35 - struct chacha_ctx *ctx, u8 *iv) 35 + const struct chacha_ctx *ctx, const u8 *iv) 36 36 { 37 37 struct skcipher_walk walk; 38 38 u32 state[16]; ··· 56 56 return err; 57 57 } 58 58 59 - void crypto_chacha_init(u32 *state, struct chacha_ctx *ctx, u8 *iv) 59 + void crypto_chacha_init(u32 *state, const struct chacha_ctx *ctx, const u8 *iv) 60 60 { 61 61 state[0] = 0x61707865; /* "expa" */ 62 62 state[1] = 0x3320646e; /* "nd 3" */
+20 -6
crypto/cryptd.c
··· 16 16 #include <crypto/internal/aead.h> 17 17 #include <crypto/internal/skcipher.h> 18 18 #include <crypto/cryptd.h> 19 - #include <crypto/crypto_wq.h> 20 19 #include <linux/atomic.h> 21 20 #include <linux/err.h> 22 21 #include <linux/init.h> ··· 25 26 #include <linux/scatterlist.h> 26 27 #include <linux/sched.h> 27 28 #include <linux/slab.h> 29 + #include <linux/workqueue.h> 28 30 29 31 static unsigned int cryptd_max_cpu_qlen = 1000; 30 32 module_param(cryptd_max_cpu_qlen, uint, 0); 31 33 MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth"); 34 + 35 + static struct workqueue_struct *cryptd_wq; 32 36 33 37 struct cryptd_cpu_queue { 34 38 struct crypto_queue queue; ··· 138 136 if (err == -ENOSPC) 139 137 goto out_put_cpu; 140 138 141 - queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); 139 + queue_work_on(cpu, cryptd_wq, &cpu_queue->work); 142 140 143 141 if (!atomic_read(refcnt)) 144 142 goto out_put_cpu; ··· 181 179 req->complete(req, 0); 182 180 183 181 if (cpu_queue->queue.qlen) 184 - queue_work(kcrypto_wq, &cpu_queue->work); 182 + queue_work(cryptd_wq, &cpu_queue->work); 185 183 } 186 184 187 185 static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) ··· 921 919 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { 922 920 case CRYPTO_ALG_TYPE_BLKCIPHER: 923 921 return cryptd_create_skcipher(tmpl, tb, &queue); 924 - case CRYPTO_ALG_TYPE_DIGEST: 922 + case CRYPTO_ALG_TYPE_HASH: 925 923 return cryptd_create_hash(tmpl, tb, &queue); 926 924 case CRYPTO_ALG_TYPE_AEAD: 927 925 return cryptd_create_aead(tmpl, tb, &queue); ··· 1121 1119 { 1122 1120 int err; 1123 1121 1122 + cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1123 + 1); 1124 + if (!cryptd_wq) 1125 + return -ENOMEM; 1126 + 1124 1127 err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen); 1125 1128 if (err) 1126 - return err; 1129 + goto err_destroy_wq; 1127 1130 1128 1131 err = crypto_register_template(&cryptd_tmpl); 1129 1132 if (err) 1130 - cryptd_fini_queue(&queue); 1133 + goto err_fini_queue; 1131 1134 1135 + return 0; 1136 + 1137 + err_fini_queue: 1138 + cryptd_fini_queue(&queue); 1139 + err_destroy_wq: 1140 + destroy_workqueue(cryptd_wq); 1132 1141 return err; 1133 1142 } 1134 1143 1135 1144 static void __exit cryptd_exit(void) 1136 1145 { 1146 + destroy_workqueue(cryptd_wq); 1137 1147 cryptd_fini_queue(&queue); 1138 1148 crypto_unregister_template(&cryptd_tmpl); 1139 1149 }
+3
crypto/crypto_null.c
··· 100 100 .final = null_final, 101 101 .base = { 102 102 .cra_name = "digest_null", 103 + .cra_driver_name = "digest_null-generic", 103 104 .cra_blocksize = NULL_BLOCK_SIZE, 104 105 .cra_module = THIS_MODULE, 105 106 } ··· 123 122 124 123 static struct crypto_alg null_algs[] = { { 125 124 .cra_name = "cipher_null", 125 + .cra_driver_name = "cipher_null-generic", 126 126 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 127 127 .cra_blocksize = NULL_BLOCK_SIZE, 128 128 .cra_ctxsize = 0, ··· 136 134 .cia_decrypt = null_crypt } } 137 135 }, { 138 136 .cra_name = "compress_null", 137 + .cra_driver_name = "compress_null-generic", 139 138 .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, 140 139 .cra_blocksize = NULL_BLOCK_SIZE, 141 140 .cra_ctxsize = 0,
-35
crypto/crypto_wq.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-or-later 2 - /* 3 - * Workqueue for crypto subsystem 4 - * 5 - * Copyright (c) 2009 Intel Corp. 6 - * Author: Huang Ying <ying.huang@intel.com> 7 - */ 8 - 9 - #include <linux/workqueue.h> 10 - #include <linux/module.h> 11 - #include <crypto/algapi.h> 12 - #include <crypto/crypto_wq.h> 13 - 14 - struct workqueue_struct *kcrypto_wq; 15 - EXPORT_SYMBOL_GPL(kcrypto_wq); 16 - 17 - static int __init crypto_wq_init(void) 18 - { 19 - kcrypto_wq = alloc_workqueue("crypto", 20 - WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1); 21 - if (unlikely(!kcrypto_wq)) 22 - return -ENOMEM; 23 - return 0; 24 - } 25 - 26 - static void __exit crypto_wq_exit(void) 27 - { 28 - destroy_workqueue(kcrypto_wq); 29 - } 30 - 31 - subsys_initcall(crypto_wq_init); 32 - module_exit(crypto_wq_exit); 33 - 34 - MODULE_LICENSE("GPL"); 35 - MODULE_DESCRIPTION("Workqueue for crypto subsystem");
+1
crypto/deflate.c
··· 275 275 276 276 static struct crypto_alg alg = { 277 277 .cra_name = "deflate", 278 + .cra_driver_name = "deflate-generic", 278 279 .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, 279 280 .cra_ctxsize = sizeof(struct deflate_ctx), 280 281 .cra_module = THIS_MODULE,
+91 -3
crypto/drbg.c
··· 220 220 } 221 221 222 222 /* 223 + * FIPS 140-2 continuous self test for the noise source 224 + * The test is performed on the noise source input data. Thus, the function 225 + * implicitly knows the size of the buffer to be equal to the security 226 + * strength. 227 + * 228 + * Note, this function disregards the nonce trailing the entropy data during 229 + * initial seeding. 230 + * 231 + * drbg->drbg_mutex must have been taken. 232 + * 233 + * @drbg DRBG handle 234 + * @entropy buffer of seed data to be checked 235 + * 236 + * return: 237 + * 0 on success 238 + * -EAGAIN on when the CTRNG is not yet primed 239 + * < 0 on error 240 + */ 241 + static int drbg_fips_continuous_test(struct drbg_state *drbg, 242 + const unsigned char *entropy) 243 + { 244 + unsigned short entropylen = drbg_sec_strength(drbg->core->flags); 245 + int ret = 0; 246 + 247 + if (!IS_ENABLED(CONFIG_CRYPTO_FIPS)) 248 + return 0; 249 + 250 + /* skip test if we test the overall system */ 251 + if (list_empty(&drbg->test_data.list)) 252 + return 0; 253 + /* only perform test in FIPS mode */ 254 + if (!fips_enabled) 255 + return 0; 256 + 257 + if (!drbg->fips_primed) { 258 + /* Priming of FIPS test */ 259 + memcpy(drbg->prev, entropy, entropylen); 260 + drbg->fips_primed = true; 261 + /* priming: another round is needed */ 262 + return -EAGAIN; 263 + } 264 + ret = memcmp(drbg->prev, entropy, entropylen); 265 + if (!ret) 266 + panic("DRBG continuous self test failed\n"); 267 + memcpy(drbg->prev, entropy, entropylen); 268 + 269 + /* the test shall pass when the two values are not equal */ 270 + return 0; 271 + } 272 + 273 + /* 223 274 * Convert an integer into a byte representation of this integer. 224 275 * The byte representation is big-endian 225 276 * ··· 1049 998 return ret; 1050 999 } 1051 1000 1001 + static inline int drbg_get_random_bytes(struct drbg_state *drbg, 1002 + unsigned char *entropy, 1003 + unsigned int entropylen) 1004 + { 1005 + int ret; 1006 + 1007 + do { 1008 + get_random_bytes(entropy, entropylen); 1009 + ret = drbg_fips_continuous_test(drbg, entropy); 1010 + if (ret && ret != -EAGAIN) 1011 + return ret; 1012 + } while (ret); 1013 + 1014 + return 0; 1015 + } 1016 + 1052 1017 static void drbg_async_seed(struct work_struct *work) 1053 1018 { 1054 1019 struct drbg_string data; ··· 1073 1006 seed_work); 1074 1007 unsigned int entropylen = drbg_sec_strength(drbg->core->flags); 1075 1008 unsigned char entropy[32]; 1009 + int ret; 1076 1010 1077 1011 BUG_ON(!entropylen); 1078 1012 BUG_ON(entropylen > sizeof(entropy)); 1079 - get_random_bytes(entropy, entropylen); 1080 1013 1081 1014 drbg_string_fill(&data, entropy, entropylen); 1082 1015 list_add_tail(&data.list, &seedlist); 1083 1016 1084 1017 mutex_lock(&drbg->drbg_mutex); 1018 + 1019 + ret = drbg_get_random_bytes(drbg, entropy, entropylen); 1020 + if (ret) 1021 + goto unlock; 1085 1022 1086 1023 /* If nonblocking pool is initialized, deactivate Jitter RNG */ 1087 1024 crypto_free_rng(drbg->jent); ··· 1101 1030 if (drbg->seeded) 1102 1031 drbg->reseed_threshold = drbg_max_requests(drbg); 1103 1032 1033 + unlock: 1104 1034 mutex_unlock(&drbg->drbg_mutex); 1105 1035 1106 1036 memzero_explicit(entropy, entropylen); ··· 1153 1081 BUG_ON((entropylen * 2) > sizeof(entropy)); 1154 1082 1155 1083 /* Get seed from in-kernel /dev/urandom */ 1156 - get_random_bytes(entropy, entropylen); 1084 + ret = drbg_get_random_bytes(drbg, entropy, entropylen); 1085 + if (ret) 1086 + goto out; 1157 1087 1158 1088 if (!drbg->jent) { 1159 1089 drbg_string_fill(&data1, entropy, entropylen); ··· 1168 1094 entropylen); 1169 1095 if (ret) { 1170 1096 pr_devel("DRBG: jent failed with %d\n", ret); 1171 - return ret; 1097 + goto out; 1172 1098 } 1173 1099 1174 1100 drbg_string_fill(&data1, entropy, entropylen * 2); ··· 1195 1121 1196 1122 ret = __drbg_seed(drbg, &seedlist, reseed); 1197 1123 1124 + out: 1198 1125 memzero_explicit(entropy, entropylen * 2); 1199 1126 1200 1127 return ret; ··· 1217 1142 drbg->reseed_ctr = 0; 1218 1143 drbg->d_ops = NULL; 1219 1144 drbg->core = NULL; 1145 + if (IS_ENABLED(CONFIG_CRYPTO_FIPS)) { 1146 + kzfree(drbg->prev); 1147 + drbg->prev = NULL; 1148 + drbg->fips_primed = false; 1149 + } 1220 1150 } 1221 1151 1222 1152 /* ··· 1289 1209 goto fini; 1290 1210 } 1291 1211 drbg->scratchpad = PTR_ALIGN(drbg->scratchpadbuf, ret + 1); 1212 + } 1213 + 1214 + if (IS_ENABLED(CONFIG_CRYPTO_FIPS)) { 1215 + drbg->prev = kzalloc(drbg_sec_strength(drbg->core->flags), 1216 + GFP_KERNEL); 1217 + if (!drbg->prev) 1218 + goto fini; 1219 + drbg->fips_primed = false; 1292 1220 } 1293 1221 1294 1222 return 0;
+1
crypto/fcrypt.c
··· 391 391 392 392 static struct crypto_alg fcrypt_alg = { 393 393 .cra_name = "fcrypt", 394 + .cra_driver_name = "fcrypt-generic", 394 395 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 395 396 .cra_blocksize = 8, 396 397 .cra_ctxsize = sizeof(struct fcrypt_ctx),
+7 -1
crypto/ghash-generic.c
··· 31 31 const u8 *key, unsigned int keylen) 32 32 { 33 33 struct ghash_ctx *ctx = crypto_shash_ctx(tfm); 34 + be128 k; 34 35 35 36 if (keylen != GHASH_BLOCK_SIZE) { 36 37 crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); ··· 40 39 41 40 if (ctx->gf128) 42 41 gf128mul_free_4k(ctx->gf128); 43 - ctx->gf128 = gf128mul_init_4k_lle((be128 *)key); 42 + 43 + BUILD_BUG_ON(sizeof(k) != GHASH_BLOCK_SIZE); 44 + memcpy(&k, key, GHASH_BLOCK_SIZE); /* avoid violating alignment rules */ 45 + ctx->gf128 = gf128mul_init_4k_lle(&k); 46 + memzero_explicit(&k, GHASH_BLOCK_SIZE); 47 + 44 48 if (!ctx->gf128) 45 49 return -ENOMEM; 46 50
-5
crypto/jitterentropy-kcapi.c
··· 56 56 * Helper function 57 57 ***************************************************************************/ 58 58 59 - __u64 jent_rol64(__u64 word, unsigned int shift) 60 - { 61 - return rol64(word, shift); 62 - } 63 - 64 59 void *jent_zalloc(unsigned int len) 65 60 { 66 61 return kzalloc(len, GFP_KERNEL);
+82 -223
crypto/jitterentropy.c
··· 2 2 * Non-physical true random number generator based on timing jitter -- 3 3 * Jitter RNG standalone code. 4 4 * 5 - * Copyright Stephan Mueller <smueller@chronox.de>, 2015 5 + * Copyright Stephan Mueller <smueller@chronox.de>, 2015 - 2019 6 6 * 7 7 * Design 8 8 * ====== ··· 47 47 48 48 /* 49 49 * This Jitterentropy RNG is based on the jitterentropy library 50 - * version 1.1.0 provided at http://www.chronox.de/jent.html 50 + * version 2.1.2 provided at http://www.chronox.de/jent.html 51 51 */ 52 52 53 53 #ifdef __OPTIMIZE__ ··· 71 71 #define DATA_SIZE_BITS ((sizeof(__u64)) * 8) 72 72 __u64 last_delta; /* SENSITIVE stuck test */ 73 73 __s64 last_delta2; /* SENSITIVE stuck test */ 74 - unsigned int stuck:1; /* Time measurement stuck */ 75 74 unsigned int osr; /* Oversample rate */ 76 - unsigned int stir:1; /* Post-processing stirring */ 77 - unsigned int disable_unbias:1; /* Deactivate Von-Neuman unbias */ 78 75 #define JENT_MEMORY_BLOCKS 64 79 76 #define JENT_MEMORY_BLOCKSIZE 32 80 77 #define JENT_MEMORY_ACCESSLOOPS 128 ··· 86 89 }; 87 90 88 91 /* Flags that can be used to initialize the RNG */ 89 - #define JENT_DISABLE_STIR (1<<0) /* Disable stirring the entropy pool */ 90 - #define JENT_DISABLE_UNBIAS (1<<1) /* Disable the Von-Neuman Unbiaser */ 91 92 #define JENT_DISABLE_MEMORY_ACCESS (1<<2) /* Disable memory access for more 92 93 * entropy, saves MEMORY_SIZE RAM for 93 94 * entropy collector */ ··· 94 99 #define JENT_ENOTIME 1 /* Timer service not available */ 95 100 #define JENT_ECOARSETIME 2 /* Timer too coarse for RNG */ 96 101 #define JENT_ENOMONOTONIC 3 /* Timer is not monotonic increasing */ 97 - #define JENT_EMINVARIATION 4 /* Timer variations too small for RNG */ 98 102 #define JENT_EVARVAR 5 /* Timer does not produce variations of 99 103 * variations (2nd derivation of time is 100 104 * zero). */ 101 - #define JENT_EMINVARVAR 6 /* Timer variations of variations is tooi 102 - * small. */ 105 + #define JENT_ESTUCK 8 /* Too many stuck results during init. */ 103 106 104 107 /*************************************************************************** 105 108 * Helper functions 106 109 ***************************************************************************/ 107 110 108 111 void jent_get_nstime(__u64 *out); 109 - __u64 jent_rol64(__u64 word, unsigned int shift); 110 112 void *jent_zalloc(unsigned int len); 111 113 void jent_zfree(void *ptr); 112 114 int jent_fips_enabled(void); ··· 132 140 133 141 jent_get_nstime(&time); 134 142 /* 135 - * mix the current state of the random number into the shuffle 136 - * calculation to balance that shuffle a bit more 143 + * Mix the current state of the random number into the shuffle 144 + * calculation to balance that shuffle a bit more. 137 145 */ 138 146 if (ec) 139 147 time ^= ec->data; 140 148 /* 141 - * we fold the time value as much as possible to ensure that as many 142 - * bits of the time stamp are included as possible 149 + * We fold the time value as much as possible to ensure that as many 150 + * bits of the time stamp are included as possible. 143 151 */ 144 - for (i = 0; (DATA_SIZE_BITS / bits) > i; i++) { 152 + for (i = 0; ((DATA_SIZE_BITS + bits - 1) / bits) > i; i++) { 145 153 shuffle ^= time & mask; 146 154 time = time >> bits; 147 155 } ··· 161 169 * CPU Jitter noise source -- this is the noise source based on the CPU 162 170 * execution time jitter 163 171 * 164 - * This function folds the time into one bit units by iterating 165 - * through the DATA_SIZE_BITS bit time value as follows: assume our time value 166 - * is 0xabcd 167 - * 1st loop, 1st shift generates 0xd000 168 - * 1st loop, 2nd shift generates 0x000d 169 - * 2nd loop, 1st shift generates 0xcd00 170 - * 2nd loop, 2nd shift generates 0x000c 171 - * 3rd loop, 1st shift generates 0xbcd0 172 - * 3rd loop, 2nd shift generates 0x000b 173 - * 4th loop, 1st shift generates 0xabcd 174 - * 4th loop, 2nd shift generates 0x000a 175 - * Now, the values at the end of the 2nd shifts are XORed together. 172 + * This function injects the individual bits of the time value into the 173 + * entropy pool using an LFSR. 176 174 * 177 - * The code is deliberately inefficient and shall stay that way. This function 178 - * is the root cause why the code shall be compiled without optimization. This 179 - * function not only acts as folding operation, but this function's execution 180 - * is used to measure the CPU execution time jitter. Any change to the loop in 181 - * this function implies that careful retesting must be done. 175 + * The code is deliberately inefficient with respect to the bit shifting 176 + * and shall stay that way. This function is the root cause why the code 177 + * shall be compiled without optimization. This function not only acts as 178 + * folding operation, but this function's execution is used to measure 179 + * the CPU execution time jitter. Any change to the loop in this function 180 + * implies that careful retesting must be done. 182 181 * 183 182 * Input: 184 183 * @ec entropy collector struct -- may be NULL 185 - * @time time stamp to be folded 184 + * @time time stamp to be injected 186 185 * @loop_cnt if a value not equal to 0 is set, use the given value as number of 187 186 * loops to perform the folding 188 187 * 189 188 * Output: 190 - * @folded result of folding operation 189 + * updated ec->data 191 190 * 192 191 * @return Number of loops the folding operation is performed 193 192 */ 194 - static __u64 jent_fold_time(struct rand_data *ec, __u64 time, 195 - __u64 *folded, __u64 loop_cnt) 193 + static __u64 jent_lfsr_time(struct rand_data *ec, __u64 time, __u64 loop_cnt) 196 194 { 197 195 unsigned int i; 198 196 __u64 j = 0; ··· 199 217 if (loop_cnt) 200 218 fold_loop_cnt = loop_cnt; 201 219 for (j = 0; j < fold_loop_cnt; j++) { 202 - new = 0; 220 + new = ec->data; 203 221 for (i = 1; (DATA_SIZE_BITS) >= i; i++) { 204 222 __u64 tmp = time << (DATA_SIZE_BITS - i); 205 223 206 224 tmp = tmp >> (DATA_SIZE_BITS - 1); 225 + 226 + /* 227 + * Fibonacci LSFR with polynomial of 228 + * x^64 + x^61 + x^56 + x^31 + x^28 + x^23 + 1 which is 229 + * primitive according to 230 + * http://poincare.matf.bg.ac.rs/~ezivkovm/publications/primpol1.pdf 231 + * (the shift values are the polynomial values minus one 232 + * due to counting bits from 0 to 63). As the current 233 + * position is always the LSB, the polynomial only needs 234 + * to shift data in from the left without wrap. 235 + */ 236 + tmp ^= ((new >> 63) & 1); 237 + tmp ^= ((new >> 60) & 1); 238 + tmp ^= ((new >> 55) & 1); 239 + tmp ^= ((new >> 30) & 1); 240 + tmp ^= ((new >> 27) & 1); 241 + tmp ^= ((new >> 22) & 1); 242 + new <<= 1; 207 243 new ^= tmp; 208 244 } 209 245 } 210 - *folded = new; 246 + ec->data = new; 247 + 211 248 return fold_loop_cnt; 212 249 } 213 250 ··· 259 258 */ 260 259 static unsigned int jent_memaccess(struct rand_data *ec, __u64 loop_cnt) 261 260 { 262 - unsigned char *tmpval = NULL; 263 261 unsigned int wrap = 0; 264 262 __u64 i = 0; 265 263 #define MAX_ACC_LOOP_BIT 7 ··· 278 278 acc_loop_cnt = loop_cnt; 279 279 280 280 for (i = 0; i < (ec->memaccessloops + acc_loop_cnt); i++) { 281 - tmpval = ec->mem + ec->memlocation; 281 + unsigned char *tmpval = ec->mem + ec->memlocation; 282 282 /* 283 283 * memory access: just add 1 to one byte, 284 284 * wrap at 255 -- memory access implies read ··· 316 316 * 0 jitter measurement not stuck (good bit) 317 317 * 1 jitter measurement stuck (reject bit) 318 318 */ 319 - static void jent_stuck(struct rand_data *ec, __u64 current_delta) 319 + static int jent_stuck(struct rand_data *ec, __u64 current_delta) 320 320 { 321 321 __s64 delta2 = ec->last_delta - current_delta; 322 322 __s64 delta3 = delta2 - ec->last_delta2; ··· 325 325 ec->last_delta2 = delta2; 326 326 327 327 if (!current_delta || !delta2 || !delta3) 328 - ec->stuck = 1; 328 + return 1; 329 + 330 + return 0; 329 331 } 330 332 331 333 /** 332 334 * This is the heart of the entropy generation: calculate time deltas and 333 - * use the CPU jitter in the time deltas. The jitter is folded into one 334 - * bit. You can call this function the "random bit generator" as it 335 - * produces one random bit per invocation. 335 + * use the CPU jitter in the time deltas. The jitter is injected into the 336 + * entropy pool. 336 337 * 337 338 * WARNING: ensure that ->prev_time is primed before using the output 338 339 * of this function! This can be done by calling this function ··· 342 341 * Input: 343 342 * @entropy_collector Reference to entropy collector 344 343 * 345 - * @return One random bit 344 + * @return result of stuck test 346 345 */ 347 - static __u64 jent_measure_jitter(struct rand_data *ec) 346 + static int jent_measure_jitter(struct rand_data *ec) 348 347 { 349 348 __u64 time = 0; 350 - __u64 data = 0; 351 349 __u64 current_delta = 0; 352 350 353 351 /* Invoke one noise source before time measurement to add variations */ ··· 360 360 current_delta = time - ec->prev_time; 361 361 ec->prev_time = time; 362 362 363 - /* Now call the next noise sources which also folds the data */ 364 - jent_fold_time(ec, current_delta, &data, 0); 363 + /* Now call the next noise sources which also injects the data */ 364 + jent_lfsr_time(ec, current_delta, 0); 365 365 366 - /* 367 - * Check whether we have a stuck measurement. The enforcement 368 - * is performed after the stuck value has been mixed into the 369 - * entropy pool. 370 - */ 371 - jent_stuck(ec, current_delta); 372 - 373 - return data; 374 - } 375 - 376 - /** 377 - * Von Neuman unbias as explained in RFC 4086 section 4.2. As shown in the 378 - * documentation of that RNG, the bits from jent_measure_jitter are considered 379 - * independent which implies that the Von Neuman unbias operation is applicable. 380 - * A proof of the Von-Neumann unbias operation to remove skews is given in the 381 - * document "A proposal for: Functionality classes for random number 382 - * generators", version 2.0 by Werner Schindler, section 5.4.1. 383 - * 384 - * Input: 385 - * @entropy_collector Reference to entropy collector 386 - * 387 - * @return One random bit 388 - */ 389 - static __u64 jent_unbiased_bit(struct rand_data *entropy_collector) 390 - { 391 - do { 392 - __u64 a = jent_measure_jitter(entropy_collector); 393 - __u64 b = jent_measure_jitter(entropy_collector); 394 - 395 - if (a == b) 396 - continue; 397 - if (1 == a) 398 - return 1; 399 - else 400 - return 0; 401 - } while (1); 402 - } 403 - 404 - /** 405 - * Shuffle the pool a bit by mixing some value with a bijective function (XOR) 406 - * into the pool. 407 - * 408 - * The function generates a mixer value that depends on the bits set and the 409 - * location of the set bits in the random number generated by the entropy 410 - * source. Therefore, based on the generated random number, this mixer value 411 - * can have 2**64 different values. That mixer value is initialized with the 412 - * first two SHA-1 constants. After obtaining the mixer value, it is XORed into 413 - * the random number. 414 - * 415 - * The mixer value is not assumed to contain any entropy. But due to the XOR 416 - * operation, it can also not destroy any entropy present in the entropy pool. 417 - * 418 - * Input: 419 - * @entropy_collector Reference to entropy collector 420 - */ 421 - static void jent_stir_pool(struct rand_data *entropy_collector) 422 - { 423 - /* 424 - * to shut up GCC on 32 bit, we have to initialize the 64 variable 425 - * with two 32 bit variables 426 - */ 427 - union c { 428 - __u64 u64; 429 - __u32 u32[2]; 430 - }; 431 - /* 432 - * This constant is derived from the first two 32 bit initialization 433 - * vectors of SHA-1 as defined in FIPS 180-4 section 5.3.1 434 - */ 435 - union c constant; 436 - /* 437 - * The start value of the mixer variable is derived from the third 438 - * and fourth 32 bit initialization vector of SHA-1 as defined in 439 - * FIPS 180-4 section 5.3.1 440 - */ 441 - union c mixer; 442 - unsigned int i = 0; 443 - 444 - /* 445 - * Store the SHA-1 constants in reverse order to make up the 64 bit 446 - * value -- this applies to a little endian system, on a big endian 447 - * system, it reverses as expected. But this really does not matter 448 - * as we do not rely on the specific numbers. We just pick the SHA-1 449 - * constants as they have a good mix of bit set and unset. 450 - */ 451 - constant.u32[1] = 0x67452301; 452 - constant.u32[0] = 0xefcdab89; 453 - mixer.u32[1] = 0x98badcfe; 454 - mixer.u32[0] = 0x10325476; 455 - 456 - for (i = 0; i < DATA_SIZE_BITS; i++) { 457 - /* 458 - * get the i-th bit of the input random number and only XOR 459 - * the constant into the mixer value when that bit is set 460 - */ 461 - if ((entropy_collector->data >> i) & 1) 462 - mixer.u64 ^= constant.u64; 463 - mixer.u64 = jent_rol64(mixer.u64, 1); 464 - } 465 - entropy_collector->data ^= mixer.u64; 366 + /* Check whether we have a stuck measurement. */ 367 + return jent_stuck(ec, current_delta); 466 368 } 467 369 468 370 /** ··· 382 480 jent_measure_jitter(ec); 383 481 384 482 while (1) { 385 - __u64 data = 0; 386 - 387 - if (ec->disable_unbias == 1) 388 - data = jent_measure_jitter(ec); 389 - else 390 - data = jent_unbiased_bit(ec); 391 - 392 - /* enforcement of the jent_stuck test */ 393 - if (ec->stuck) { 394 - /* 395 - * We only mix in the bit considered not appropriate 396 - * without the LSFR. The reason is that if we apply 397 - * the LSFR and we do not rotate, the 2nd bit with LSFR 398 - * will cancel out the first LSFR application on the 399 - * bad bit. 400 - * 401 - * And we do not rotate as we apply the next bit to the 402 - * current bit location again. 403 - */ 404 - ec->data ^= data; 405 - ec->stuck = 0; 483 + /* If a stuck measurement is received, repeat measurement */ 484 + if (jent_measure_jitter(ec)) 406 485 continue; 407 - } 408 - 409 - /* 410 - * Fibonacci LSFR with polynom of 411 - * x^64 + x^61 + x^56 + x^31 + x^28 + x^23 + 1 which is 412 - * primitive according to 413 - * http://poincare.matf.bg.ac.rs/~ezivkovm/publications/primpol1.pdf 414 - * (the shift values are the polynom values minus one 415 - * due to counting bits from 0 to 63). As the current 416 - * position is always the LSB, the polynom only needs 417 - * to shift data in from the left without wrap. 418 - */ 419 - ec->data ^= data; 420 - ec->data ^= ((ec->data >> 63) & 1); 421 - ec->data ^= ((ec->data >> 60) & 1); 422 - ec->data ^= ((ec->data >> 55) & 1); 423 - ec->data ^= ((ec->data >> 30) & 1); 424 - ec->data ^= ((ec->data >> 27) & 1); 425 - ec->data ^= ((ec->data >> 22) & 1); 426 - ec->data = jent_rol64(ec->data, 1); 427 486 428 487 /* 429 488 * We multiply the loop value with ->osr to obtain the ··· 393 530 if (++k >= (DATA_SIZE_BITS * ec->osr)) 394 531 break; 395 532 } 396 - if (ec->stir) 397 - jent_stir_pool(ec); 398 533 } 399 534 400 535 /** ··· 500 639 osr = 1; /* minimum sampling rate is 1 */ 501 640 entropy_collector->osr = osr; 502 641 503 - entropy_collector->stir = 1; 504 - if (flags & JENT_DISABLE_STIR) 505 - entropy_collector->stir = 0; 506 - if (flags & JENT_DISABLE_UNBIAS) 507 - entropy_collector->disable_unbias = 1; 508 - 509 642 /* fill the data pad with non-zero values */ 510 643 jent_gen_entropy(entropy_collector); 511 644 ··· 511 656 jent_zfree(entropy_collector->mem); 512 657 entropy_collector->mem = NULL; 513 658 jent_zfree(entropy_collector); 514 - entropy_collector = NULL; 515 659 } 516 660 517 661 int jent_entropy_init(void) ··· 519 665 __u64 delta_sum = 0; 520 666 __u64 old_delta = 0; 521 667 int time_backwards = 0; 522 - int count_var = 0; 523 668 int count_mod = 0; 669 + int count_stuck = 0; 670 + struct rand_data ec = { 0 }; 524 671 525 672 /* We could perform statistical tests here, but the problem is 526 673 * that we only have a few loop counts to do testing. These ··· 550 695 for (i = 0; (TESTLOOPCOUNT + CLEARCACHE) > i; i++) { 551 696 __u64 time = 0; 552 697 __u64 time2 = 0; 553 - __u64 folded = 0; 554 698 __u64 delta = 0; 555 699 unsigned int lowdelta = 0; 700 + int stuck; 556 701 702 + /* Invoke core entropy collection logic */ 557 703 jent_get_nstime(&time); 558 - jent_fold_time(NULL, time, &folded, 1<<MIN_FOLD_LOOP_BIT); 704 + ec.prev_time = time; 705 + jent_lfsr_time(&ec, time, 0); 559 706 jent_get_nstime(&time2); 560 707 561 708 /* test whether timer works */ ··· 572 715 if (!delta) 573 716 return JENT_ECOARSETIME; 574 717 718 + stuck = jent_stuck(&ec, delta); 719 + 575 720 /* 576 721 * up to here we did not modify any variable that will be 577 722 * evaluated later, but we already performed some work. Thus we ··· 584 725 if (CLEARCACHE > i) 585 726 continue; 586 727 728 + if (stuck) 729 + count_stuck++; 730 + 587 731 /* test whether we have an increasing timer */ 588 732 if (!(time2 > time)) 589 733 time_backwards++; 590 734 591 - /* 592 - * Avoid modulo of 64 bit integer to allow code to compile 593 - * on 32 bit architectures. 594 - */ 735 + /* use 32 bit value to ensure compilation on 32 bit arches */ 595 736 lowdelta = time2 - time; 596 737 if (!(lowdelta % 100)) 597 738 count_mod++; ··· 602 743 * only after the first loop is executed as we need to prime 603 744 * the old_data value 604 745 */ 605 - if (i) { 606 - if (delta != old_delta) 607 - count_var++; 608 - if (delta > old_delta) 609 - delta_sum += (delta - old_delta); 610 - else 611 - delta_sum += (old_delta - delta); 612 - } 746 + if (delta > old_delta) 747 + delta_sum += (delta - old_delta); 748 + else 749 + delta_sum += (old_delta - delta); 613 750 old_delta = delta; 614 751 } 615 752 ··· 618 763 */ 619 764 if (3 < time_backwards) 620 765 return JENT_ENOMONOTONIC; 621 - /* Error if the time variances are always identical */ 622 - if (!delta_sum) 623 - return JENT_EVARVAR; 624 766 625 767 /* 626 768 * Variations of deltas of time must on average be larger 627 769 * than 1 to ensure the entropy estimation 628 770 * implied with 1 is preserved 629 771 */ 630 - if (delta_sum <= 1) 631 - return JENT_EMINVARVAR; 772 + if ((delta_sum) <= 1) 773 + return JENT_EVARVAR; 632 774 633 775 /* 634 776 * Ensure that we have variations in the time stamp below 10 for at 635 - * least 10% of all checks -- on some platforms, the counter 636 - * increments in multiples of 100, but not always 777 + * least 10% of all checks -- on some platforms, the counter increments 778 + * in multiples of 100, but not always 637 779 */ 638 780 if ((TESTLOOPCOUNT/10 * 9) < count_mod) 639 781 return JENT_ECOARSETIME; 782 + 783 + /* 784 + * If we have more than 90% stuck results, then this Jitter RNG is 785 + * likely to not work well. 786 + */ 787 + if ((TESTLOOPCOUNT/10 * 9) < count_stuck) 788 + return JENT_ESTUCK; 640 789 641 790 return 0; 642 791 }
+1
crypto/khazad.c
··· 848 848 849 849 static struct crypto_alg khazad_alg = { 850 850 .cra_name = "khazad", 851 + .cra_driver_name = "khazad-generic", 851 852 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 852 853 .cra_blocksize = KHAZAD_BLOCK_SIZE, 853 854 .cra_ctxsize = sizeof (struct khazad_ctx),
+1 -1
crypto/lrw.c
··· 384 384 inst->alg.base.cra_priority = alg->base.cra_priority; 385 385 inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE; 386 386 inst->alg.base.cra_alignmask = alg->base.cra_alignmask | 387 - (__alignof__(__be32) - 1); 387 + (__alignof__(be128) - 1); 388 388 389 389 inst->alg.ivsize = LRW_BLOCK_SIZE; 390 390 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
+1
crypto/lz4.c
··· 106 106 107 107 static struct crypto_alg alg_lz4 = { 108 108 .cra_name = "lz4", 109 + .cra_driver_name = "lz4-generic", 109 110 .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, 110 111 .cra_ctxsize = sizeof(struct lz4_ctx), 111 112 .cra_module = THIS_MODULE,
+1
crypto/lz4hc.c
··· 107 107 108 108 static struct crypto_alg alg_lz4hc = { 109 109 .cra_name = "lz4hc", 110 + .cra_driver_name = "lz4hc-generic", 110 111 .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, 111 112 .cra_ctxsize = sizeof(struct lz4hc_ctx), 112 113 .cra_module = THIS_MODULE,
+1
crypto/lzo-rle.c
··· 109 109 110 110 static struct crypto_alg alg = { 111 111 .cra_name = "lzo-rle", 112 + .cra_driver_name = "lzo-rle-generic", 112 113 .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, 113 114 .cra_ctxsize = sizeof(struct lzorle_ctx), 114 115 .cra_module = THIS_MODULE,
+1
crypto/lzo.c
··· 109 109 110 110 static struct crypto_alg alg = { 111 111 .cra_name = "lzo", 112 + .cra_driver_name = "lzo-generic", 112 113 .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, 113 114 .cra_ctxsize = sizeof(struct lzo_ctx), 114 115 .cra_module = THIS_MODULE,
+4 -3
crypto/md4.c
··· 216 216 .final = md4_final, 217 217 .descsize = sizeof(struct md4_ctx), 218 218 .base = { 219 - .cra_name = "md4", 220 - .cra_blocksize = MD4_HMAC_BLOCK_SIZE, 221 - .cra_module = THIS_MODULE, 219 + .cra_name = "md4", 220 + .cra_driver_name = "md4-generic", 221 + .cra_blocksize = MD4_HMAC_BLOCK_SIZE, 222 + .cra_module = THIS_MODULE, 222 223 } 223 224 }; 224 225
+4 -3
crypto/md5.c
··· 228 228 .descsize = sizeof(struct md5_state), 229 229 .statesize = sizeof(struct md5_state), 230 230 .base = { 231 - .cra_name = "md5", 232 - .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 233 - .cra_module = THIS_MODULE, 231 + .cra_name = "md5", 232 + .cra_driver_name = "md5-generic", 233 + .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 234 + .cra_module = THIS_MODULE, 234 235 } 235 236 }; 236 237
+1
crypto/michael_mic.c
··· 156 156 .descsize = sizeof(struct michael_mic_desc_ctx), 157 157 .base = { 158 158 .cra_name = "michael_mic", 159 + .cra_driver_name = "michael_mic-generic", 159 160 .cra_blocksize = 8, 160 161 .cra_alignmask = 3, 161 162 .cra_ctxsize = sizeof(struct michael_mic_ctx),
+1
crypto/rmd128.c
··· 298 298 .descsize = sizeof(struct rmd128_ctx), 299 299 .base = { 300 300 .cra_name = "rmd128", 301 + .cra_driver_name = "rmd128-generic", 301 302 .cra_blocksize = RMD128_BLOCK_SIZE, 302 303 .cra_module = THIS_MODULE, 303 304 }
+1
crypto/rmd160.c
··· 342 342 .descsize = sizeof(struct rmd160_ctx), 343 343 .base = { 344 344 .cra_name = "rmd160", 345 + .cra_driver_name = "rmd160-generic", 345 346 .cra_blocksize = RMD160_BLOCK_SIZE, 346 347 .cra_module = THIS_MODULE, 347 348 }
+1
crypto/rmd256.c
··· 317 317 .descsize = sizeof(struct rmd256_ctx), 318 318 .base = { 319 319 .cra_name = "rmd256", 320 + .cra_driver_name = "rmd256-generic", 320 321 .cra_blocksize = RMD256_BLOCK_SIZE, 321 322 .cra_module = THIS_MODULE, 322 323 }
+1
crypto/rmd320.c
··· 366 366 .descsize = sizeof(struct rmd320_ctx), 367 367 .base = { 368 368 .cra_name = "rmd320", 369 + .cra_driver_name = "rmd320-generic", 369 370 .cra_blocksize = RMD320_BLOCK_SIZE, 370 371 .cra_module = THIS_MODULE, 371 372 }
+8 -1
crypto/serpent_generic.c
··· 225 225 x4 ^= x2; \ 226 226 }) 227 227 228 - static void __serpent_setkey_sbox(u32 r0, u32 r1, u32 r2, u32 r3, u32 r4, u32 *k) 228 + /* 229 + * both gcc and clang have misoptimized this function in the past, 230 + * producing horrible object code from spilling temporary variables 231 + * on the stack. Forcing this part out of line avoids that. 232 + */ 233 + static noinline void __serpent_setkey_sbox(u32 r0, u32 r1, u32 r2, 234 + u32 r3, u32 r4, u32 *k) 229 235 { 230 236 k += 100; 231 237 S3(r3, r4, r0, r1, r2); store_and_load_keys(r1, r2, r4, r3, 28, 24); ··· 643 637 .cia_decrypt = serpent_decrypt } } 644 638 }, { 645 639 .cra_name = "tnepres", 640 + .cra_driver_name = "tnepres-generic", 646 641 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 647 642 .cra_blocksize = SERPENT_BLOCK_SIZE, 648 643 .cra_ctxsize = sizeof(struct serpent_ctx),
+34
crypto/skcipher.c
··· 837 837 return 0; 838 838 } 839 839 840 + int crypto_skcipher_encrypt(struct skcipher_request *req) 841 + { 842 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 843 + struct crypto_alg *alg = tfm->base.__crt_alg; 844 + unsigned int cryptlen = req->cryptlen; 845 + int ret; 846 + 847 + crypto_stats_get(alg); 848 + if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 849 + ret = -ENOKEY; 850 + else 851 + ret = tfm->encrypt(req); 852 + crypto_stats_skcipher_encrypt(cryptlen, ret, alg); 853 + return ret; 854 + } 855 + EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt); 856 + 857 + int crypto_skcipher_decrypt(struct skcipher_request *req) 858 + { 859 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 860 + struct crypto_alg *alg = tfm->base.__crt_alg; 861 + unsigned int cryptlen = req->cryptlen; 862 + int ret; 863 + 864 + crypto_stats_get(alg); 865 + if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 866 + ret = -ENOKEY; 867 + else 868 + ret = tfm->decrypt(req); 869 + crypto_stats_skcipher_decrypt(cryptlen, ret, alg); 870 + return ret; 871 + } 872 + EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt); 873 + 840 874 static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm) 841 875 { 842 876 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
+3
crypto/tea.c
··· 216 216 217 217 static struct crypto_alg tea_algs[3] = { { 218 218 .cra_name = "tea", 219 + .cra_driver_name = "tea-generic", 219 220 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 220 221 .cra_blocksize = TEA_BLOCK_SIZE, 221 222 .cra_ctxsize = sizeof (struct tea_ctx), ··· 230 229 .cia_decrypt = tea_decrypt } } 231 230 }, { 232 231 .cra_name = "xtea", 232 + .cra_driver_name = "xtea-generic", 233 233 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 234 234 .cra_blocksize = XTEA_BLOCK_SIZE, 235 235 .cra_ctxsize = sizeof (struct xtea_ctx), ··· 244 242 .cia_decrypt = xtea_decrypt } } 245 243 }, { 246 244 .cra_name = "xeta", 245 + .cra_driver_name = "xeta-generic", 247 246 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 248 247 .cra_blocksize = XTEA_BLOCK_SIZE, 249 248 .cra_ctxsize = sizeof (struct xtea_ctx),
+394 -84
crypto/testmgr.c
··· 1032 1032 } 1033 1033 #endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ 1034 1034 1035 + static int build_hash_sglist(struct test_sglist *tsgl, 1036 + const struct hash_testvec *vec, 1037 + const struct testvec_config *cfg, 1038 + unsigned int alignmask, 1039 + const struct test_sg_division *divs[XBUFSIZE]) 1040 + { 1041 + struct kvec kv; 1042 + struct iov_iter input; 1043 + 1044 + kv.iov_base = (void *)vec->plaintext; 1045 + kv.iov_len = vec->psize; 1046 + iov_iter_kvec(&input, WRITE, &kv, 1, vec->psize); 1047 + return build_test_sglist(tsgl, cfg->src_divs, alignmask, vec->psize, 1048 + &input, divs); 1049 + } 1050 + 1051 + static int check_hash_result(const char *type, 1052 + const u8 *result, unsigned int digestsize, 1053 + const struct hash_testvec *vec, 1054 + const char *vec_name, 1055 + const char *driver, 1056 + const struct testvec_config *cfg) 1057 + { 1058 + if (memcmp(result, vec->digest, digestsize) != 0) { 1059 + pr_err("alg: %s: %s test failed (wrong result) on test vector %s, cfg=\"%s\"\n", 1060 + type, driver, vec_name, cfg->name); 1061 + return -EINVAL; 1062 + } 1063 + if (!testmgr_is_poison(&result[digestsize], TESTMGR_POISON_LEN)) { 1064 + pr_err("alg: %s: %s overran result buffer on test vector %s, cfg=\"%s\"\n", 1065 + type, driver, vec_name, cfg->name); 1066 + return -EOVERFLOW; 1067 + } 1068 + return 0; 1069 + } 1070 + 1071 + static inline int check_shash_op(const char *op, int err, 1072 + const char *driver, const char *vec_name, 1073 + const struct testvec_config *cfg) 1074 + { 1075 + if (err) 1076 + pr_err("alg: shash: %s %s() failed with err %d on test vector %s, cfg=\"%s\"\n", 1077 + driver, op, err, vec_name, cfg->name); 1078 + return err; 1079 + } 1080 + 1081 + static inline const void *sg_data(struct scatterlist *sg) 1082 + { 1083 + return page_address(sg_page(sg)) + sg->offset; 1084 + } 1085 + 1086 + /* Test one hash test vector in one configuration, using the shash API */ 1087 + static int test_shash_vec_cfg(const char *driver, 1088 + const struct hash_testvec *vec, 1089 + const char *vec_name, 1090 + const struct testvec_config *cfg, 1091 + struct shash_desc *desc, 1092 + struct test_sglist *tsgl, 1093 + u8 *hashstate) 1094 + { 1095 + struct crypto_shash *tfm = desc->tfm; 1096 + const unsigned int alignmask = crypto_shash_alignmask(tfm); 1097 + const unsigned int digestsize = crypto_shash_digestsize(tfm); 1098 + const unsigned int statesize = crypto_shash_statesize(tfm); 1099 + const struct test_sg_division *divs[XBUFSIZE]; 1100 + unsigned int i; 1101 + u8 result[HASH_MAX_DIGESTSIZE + TESTMGR_POISON_LEN]; 1102 + int err; 1103 + 1104 + /* Set the key, if specified */ 1105 + if (vec->ksize) { 1106 + err = crypto_shash_setkey(tfm, vec->key, vec->ksize); 1107 + if (err) { 1108 + if (err == vec->setkey_error) 1109 + return 0; 1110 + pr_err("alg: shash: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n", 1111 + driver, vec_name, vec->setkey_error, err, 1112 + crypto_shash_get_flags(tfm)); 1113 + return err; 1114 + } 1115 + if (vec->setkey_error) { 1116 + pr_err("alg: shash: %s setkey unexpectedly succeeded on test vector %s; expected_error=%d\n", 1117 + driver, vec_name, vec->setkey_error); 1118 + return -EINVAL; 1119 + } 1120 + } 1121 + 1122 + /* Build the scatterlist for the source data */ 1123 + err = build_hash_sglist(tsgl, vec, cfg, alignmask, divs); 1124 + if (err) { 1125 + pr_err("alg: shash: %s: error preparing scatterlist for test vector %s, cfg=\"%s\"\n", 1126 + driver, vec_name, cfg->name); 1127 + return err; 1128 + } 1129 + 1130 + /* Do the actual hashing */ 1131 + 1132 + testmgr_poison(desc->__ctx, crypto_shash_descsize(tfm)); 1133 + testmgr_poison(result, digestsize + TESTMGR_POISON_LEN); 1134 + 1135 + if (cfg->finalization_type == FINALIZATION_TYPE_DIGEST || 1136 + vec->digest_error) { 1137 + /* Just using digest() */ 1138 + if (tsgl->nents != 1) 1139 + return 0; 1140 + if (cfg->nosimd) 1141 + crypto_disable_simd_for_test(); 1142 + err = crypto_shash_digest(desc, sg_data(&tsgl->sgl[0]), 1143 + tsgl->sgl[0].length, result); 1144 + if (cfg->nosimd) 1145 + crypto_reenable_simd_for_test(); 1146 + if (err) { 1147 + if (err == vec->digest_error) 1148 + return 0; 1149 + pr_err("alg: shash: %s digest() failed on test vector %s; expected_error=%d, actual_error=%d, cfg=\"%s\"\n", 1150 + driver, vec_name, vec->digest_error, err, 1151 + cfg->name); 1152 + return err; 1153 + } 1154 + if (vec->digest_error) { 1155 + pr_err("alg: shash: %s digest() unexpectedly succeeded on test vector %s; expected_error=%d, cfg=\"%s\"\n", 1156 + driver, vec_name, vec->digest_error, cfg->name); 1157 + return -EINVAL; 1158 + } 1159 + goto result_ready; 1160 + } 1161 + 1162 + /* Using init(), zero or more update(), then final() or finup() */ 1163 + 1164 + if (cfg->nosimd) 1165 + crypto_disable_simd_for_test(); 1166 + err = crypto_shash_init(desc); 1167 + if (cfg->nosimd) 1168 + crypto_reenable_simd_for_test(); 1169 + err = check_shash_op("init", err, driver, vec_name, cfg); 1170 + if (err) 1171 + return err; 1172 + 1173 + for (i = 0; i < tsgl->nents; i++) { 1174 + if (i + 1 == tsgl->nents && 1175 + cfg->finalization_type == FINALIZATION_TYPE_FINUP) { 1176 + if (divs[i]->nosimd) 1177 + crypto_disable_simd_for_test(); 1178 + err = crypto_shash_finup(desc, sg_data(&tsgl->sgl[i]), 1179 + tsgl->sgl[i].length, result); 1180 + if (divs[i]->nosimd) 1181 + crypto_reenable_simd_for_test(); 1182 + err = check_shash_op("finup", err, driver, vec_name, 1183 + cfg); 1184 + if (err) 1185 + return err; 1186 + goto result_ready; 1187 + } 1188 + if (divs[i]->nosimd) 1189 + crypto_disable_simd_for_test(); 1190 + err = crypto_shash_update(desc, sg_data(&tsgl->sgl[i]), 1191 + tsgl->sgl[i].length); 1192 + if (divs[i]->nosimd) 1193 + crypto_reenable_simd_for_test(); 1194 + err = check_shash_op("update", err, driver, vec_name, cfg); 1195 + if (err) 1196 + return err; 1197 + if (divs[i]->flush_type == FLUSH_TYPE_REIMPORT) { 1198 + /* Test ->export() and ->import() */ 1199 + testmgr_poison(hashstate + statesize, 1200 + TESTMGR_POISON_LEN); 1201 + err = crypto_shash_export(desc, hashstate); 1202 + err = check_shash_op("export", err, driver, vec_name, 1203 + cfg); 1204 + if (err) 1205 + return err; 1206 + if (!testmgr_is_poison(hashstate + statesize, 1207 + TESTMGR_POISON_LEN)) { 1208 + pr_err("alg: shash: %s export() overran state buffer on test vector %s, cfg=\"%s\"\n", 1209 + driver, vec_name, cfg->name); 1210 + return -EOVERFLOW; 1211 + } 1212 + testmgr_poison(desc->__ctx, crypto_shash_descsize(tfm)); 1213 + err = crypto_shash_import(desc, hashstate); 1214 + err = check_shash_op("import", err, driver, vec_name, 1215 + cfg); 1216 + if (err) 1217 + return err; 1218 + } 1219 + } 1220 + 1221 + if (cfg->nosimd) 1222 + crypto_disable_simd_for_test(); 1223 + err = crypto_shash_final(desc, result); 1224 + if (cfg->nosimd) 1225 + crypto_reenable_simd_for_test(); 1226 + err = check_shash_op("final", err, driver, vec_name, cfg); 1227 + if (err) 1228 + return err; 1229 + result_ready: 1230 + return check_hash_result("shash", result, digestsize, vec, vec_name, 1231 + driver, cfg); 1232 + } 1233 + 1035 1234 static int do_ahash_op(int (*op)(struct ahash_request *req), 1036 1235 struct ahash_request *req, 1037 1236 struct crypto_wait *wait, bool nosimd) ··· 1248 1049 return crypto_wait_req(err, wait); 1249 1050 } 1250 1051 1251 - static int check_nonfinal_hash_op(const char *op, int err, 1252 - u8 *result, unsigned int digestsize, 1253 - const char *driver, const char *vec_name, 1254 - const struct testvec_config *cfg) 1052 + static int check_nonfinal_ahash_op(const char *op, int err, 1053 + u8 *result, unsigned int digestsize, 1054 + const char *driver, const char *vec_name, 1055 + const struct testvec_config *cfg) 1255 1056 { 1256 1057 if (err) { 1257 - pr_err("alg: hash: %s %s() failed with err %d on test vector %s, cfg=\"%s\"\n", 1058 + pr_err("alg: ahash: %s %s() failed with err %d on test vector %s, cfg=\"%s\"\n", 1258 1059 driver, op, err, vec_name, cfg->name); 1259 1060 return err; 1260 1061 } 1261 1062 if (!testmgr_is_poison(result, digestsize)) { 1262 - pr_err("alg: hash: %s %s() used result buffer on test vector %s, cfg=\"%s\"\n", 1063 + pr_err("alg: ahash: %s %s() used result buffer on test vector %s, cfg=\"%s\"\n", 1263 1064 driver, op, vec_name, cfg->name); 1264 1065 return -EINVAL; 1265 1066 } 1266 1067 return 0; 1267 1068 } 1268 1069 1269 - static int test_hash_vec_cfg(const char *driver, 1270 - const struct hash_testvec *vec, 1271 - const char *vec_name, 1272 - const struct testvec_config *cfg, 1273 - struct ahash_request *req, 1274 - struct test_sglist *tsgl, 1275 - u8 *hashstate) 1070 + /* Test one hash test vector in one configuration, using the ahash API */ 1071 + static int test_ahash_vec_cfg(const char *driver, 1072 + const struct hash_testvec *vec, 1073 + const char *vec_name, 1074 + const struct testvec_config *cfg, 1075 + struct ahash_request *req, 1076 + struct test_sglist *tsgl, 1077 + u8 *hashstate) 1276 1078 { 1277 1079 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1278 1080 const unsigned int alignmask = crypto_ahash_alignmask(tfm); ··· 1282 1082 const u32 req_flags = CRYPTO_TFM_REQ_MAY_BACKLOG | cfg->req_flags; 1283 1083 const struct test_sg_division *divs[XBUFSIZE]; 1284 1084 DECLARE_CRYPTO_WAIT(wait); 1285 - struct kvec _input; 1286 - struct iov_iter input; 1287 1085 unsigned int i; 1288 1086 struct scatterlist *pending_sgl; 1289 1087 unsigned int pending_len; ··· 1294 1096 if (err) { 1295 1097 if (err == vec->setkey_error) 1296 1098 return 0; 1297 - pr_err("alg: hash: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n", 1099 + pr_err("alg: ahash: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n", 1298 1100 driver, vec_name, vec->setkey_error, err, 1299 1101 crypto_ahash_get_flags(tfm)); 1300 1102 return err; 1301 1103 } 1302 1104 if (vec->setkey_error) { 1303 - pr_err("alg: hash: %s setkey unexpectedly succeeded on test vector %s; expected_error=%d\n", 1105 + pr_err("alg: ahash: %s setkey unexpectedly succeeded on test vector %s; expected_error=%d\n", 1304 1106 driver, vec_name, vec->setkey_error); 1305 1107 return -EINVAL; 1306 1108 } 1307 1109 } 1308 1110 1309 1111 /* Build the scatterlist for the source data */ 1310 - _input.iov_base = (void *)vec->plaintext; 1311 - _input.iov_len = vec->psize; 1312 - iov_iter_kvec(&input, WRITE, &_input, 1, vec->psize); 1313 - err = build_test_sglist(tsgl, cfg->src_divs, alignmask, vec->psize, 1314 - &input, divs); 1112 + err = build_hash_sglist(tsgl, vec, cfg, alignmask, divs); 1315 1113 if (err) { 1316 - pr_err("alg: hash: %s: error preparing scatterlist for test vector %s, cfg=\"%s\"\n", 1114 + pr_err("alg: ahash: %s: error preparing scatterlist for test vector %s, cfg=\"%s\"\n", 1317 1115 driver, vec_name, cfg->name); 1318 1116 return err; 1319 1117 } ··· 1329 1135 if (err) { 1330 1136 if (err == vec->digest_error) 1331 1137 return 0; 1332 - pr_err("alg: hash: %s digest() failed on test vector %s; expected_error=%d, actual_error=%d, cfg=\"%s\"\n", 1138 + pr_err("alg: ahash: %s digest() failed on test vector %s; expected_error=%d, actual_error=%d, cfg=\"%s\"\n", 1333 1139 driver, vec_name, vec->digest_error, err, 1334 1140 cfg->name); 1335 1141 return err; 1336 1142 } 1337 1143 if (vec->digest_error) { 1338 - pr_err("alg: hash: %s digest() unexpectedly succeeded on test vector %s; expected_error=%d, cfg=\"%s\"\n", 1144 + pr_err("alg: ahash: %s digest() unexpectedly succeeded on test vector %s; expected_error=%d, cfg=\"%s\"\n", 1339 1145 driver, vec_name, vec->digest_error, cfg->name); 1340 1146 return -EINVAL; 1341 1147 } ··· 1347 1153 ahash_request_set_callback(req, req_flags, crypto_req_done, &wait); 1348 1154 ahash_request_set_crypt(req, NULL, result, 0); 1349 1155 err = do_ahash_op(crypto_ahash_init, req, &wait, cfg->nosimd); 1350 - err = check_nonfinal_hash_op("init", err, result, digestsize, 1351 - driver, vec_name, cfg); 1156 + err = check_nonfinal_ahash_op("init", err, result, digestsize, 1157 + driver, vec_name, cfg); 1352 1158 if (err) 1353 1159 return err; 1354 1160 ··· 1364 1170 pending_len); 1365 1171 err = do_ahash_op(crypto_ahash_update, req, &wait, 1366 1172 divs[i]->nosimd); 1367 - err = check_nonfinal_hash_op("update", err, 1368 - result, digestsize, 1369 - driver, vec_name, cfg); 1173 + err = check_nonfinal_ahash_op("update", err, 1174 + result, digestsize, 1175 + driver, vec_name, cfg); 1370 1176 if (err) 1371 1177 return err; 1372 1178 pending_sgl = NULL; ··· 1377 1183 testmgr_poison(hashstate + statesize, 1378 1184 TESTMGR_POISON_LEN); 1379 1185 err = crypto_ahash_export(req, hashstate); 1380 - err = check_nonfinal_hash_op("export", err, 1381 - result, digestsize, 1382 - driver, vec_name, cfg); 1186 + err = check_nonfinal_ahash_op("export", err, 1187 + result, digestsize, 1188 + driver, vec_name, cfg); 1383 1189 if (err) 1384 1190 return err; 1385 1191 if (!testmgr_is_poison(hashstate + statesize, 1386 1192 TESTMGR_POISON_LEN)) { 1387 - pr_err("alg: hash: %s export() overran state buffer on test vector %s, cfg=\"%s\"\n", 1193 + pr_err("alg: ahash: %s export() overran state buffer on test vector %s, cfg=\"%s\"\n", 1388 1194 driver, vec_name, cfg->name); 1389 1195 return -EOVERFLOW; 1390 1196 } 1391 1197 1392 1198 testmgr_poison(req->__ctx, crypto_ahash_reqsize(tfm)); 1393 1199 err = crypto_ahash_import(req, hashstate); 1394 - err = check_nonfinal_hash_op("import", err, 1395 - result, digestsize, 1396 - driver, vec_name, cfg); 1200 + err = check_nonfinal_ahash_op("import", err, 1201 + result, digestsize, 1202 + driver, vec_name, cfg); 1397 1203 if (err) 1398 1204 return err; 1399 1205 } ··· 1407 1213 if (cfg->finalization_type == FINALIZATION_TYPE_FINAL) { 1408 1214 /* finish with update() and final() */ 1409 1215 err = do_ahash_op(crypto_ahash_update, req, &wait, cfg->nosimd); 1410 - err = check_nonfinal_hash_op("update", err, result, digestsize, 1411 - driver, vec_name, cfg); 1216 + err = check_nonfinal_ahash_op("update", err, result, digestsize, 1217 + driver, vec_name, cfg); 1412 1218 if (err) 1413 1219 return err; 1414 1220 err = do_ahash_op(crypto_ahash_final, req, &wait, cfg->nosimd); 1415 1221 if (err) { 1416 - pr_err("alg: hash: %s final() failed with err %d on test vector %s, cfg=\"%s\"\n", 1222 + pr_err("alg: ahash: %s final() failed with err %d on test vector %s, cfg=\"%s\"\n", 1417 1223 driver, err, vec_name, cfg->name); 1418 1224 return err; 1419 1225 } ··· 1421 1227 /* finish with finup() */ 1422 1228 err = do_ahash_op(crypto_ahash_finup, req, &wait, cfg->nosimd); 1423 1229 if (err) { 1424 - pr_err("alg: hash: %s finup() failed with err %d on test vector %s, cfg=\"%s\"\n", 1230 + pr_err("alg: ahash: %s finup() failed with err %d on test vector %s, cfg=\"%s\"\n", 1425 1231 driver, err, vec_name, cfg->name); 1426 1232 return err; 1427 1233 } 1428 1234 } 1429 1235 1430 1236 result_ready: 1431 - /* Check that the algorithm produced the correct digest */ 1432 - if (memcmp(result, vec->digest, digestsize) != 0) { 1433 - pr_err("alg: hash: %s test failed (wrong result) on test vector %s, cfg=\"%s\"\n", 1434 - driver, vec_name, cfg->name); 1435 - return -EINVAL; 1436 - } 1437 - if (!testmgr_is_poison(&result[digestsize], TESTMGR_POISON_LEN)) { 1438 - pr_err("alg: hash: %s overran result buffer on test vector %s, cfg=\"%s\"\n", 1439 - driver, vec_name, cfg->name); 1440 - return -EOVERFLOW; 1237 + return check_hash_result("ahash", result, digestsize, vec, vec_name, 1238 + driver, cfg); 1239 + } 1240 + 1241 + static int test_hash_vec_cfg(const char *driver, 1242 + const struct hash_testvec *vec, 1243 + const char *vec_name, 1244 + const struct testvec_config *cfg, 1245 + struct ahash_request *req, 1246 + struct shash_desc *desc, 1247 + struct test_sglist *tsgl, 1248 + u8 *hashstate) 1249 + { 1250 + int err; 1251 + 1252 + /* 1253 + * For algorithms implemented as "shash", most bugs will be detected by 1254 + * both the shash and ahash tests. Test the shash API first so that the 1255 + * failures involve less indirection, so are easier to debug. 1256 + */ 1257 + 1258 + if (desc) { 1259 + err = test_shash_vec_cfg(driver, vec, vec_name, cfg, desc, tsgl, 1260 + hashstate); 1261 + if (err) 1262 + return err; 1441 1263 } 1442 1264 1443 - return 0; 1265 + return test_ahash_vec_cfg(driver, vec, vec_name, cfg, req, tsgl, 1266 + hashstate); 1444 1267 } 1445 1268 1446 1269 static int test_hash_vec(const char *driver, const struct hash_testvec *vec, 1447 1270 unsigned int vec_num, struct ahash_request *req, 1448 - struct test_sglist *tsgl, u8 *hashstate) 1271 + struct shash_desc *desc, struct test_sglist *tsgl, 1272 + u8 *hashstate) 1449 1273 { 1450 1274 char vec_name[16]; 1451 1275 unsigned int i; ··· 1474 1262 for (i = 0; i < ARRAY_SIZE(default_hash_testvec_configs); i++) { 1475 1263 err = test_hash_vec_cfg(driver, vec, vec_name, 1476 1264 &default_hash_testvec_configs[i], 1477 - req, tsgl, hashstate); 1265 + req, desc, tsgl, hashstate); 1478 1266 if (err) 1479 1267 return err; 1480 1268 } ··· 1488 1276 generate_random_testvec_config(&cfg, cfgname, 1489 1277 sizeof(cfgname)); 1490 1278 err = test_hash_vec_cfg(driver, vec, vec_name, &cfg, 1491 - req, tsgl, hashstate); 1279 + req, desc, tsgl, hashstate); 1492 1280 if (err) 1493 1281 return err; 1282 + cond_resched(); 1494 1283 } 1495 1284 } 1496 1285 #endif ··· 1503 1290 * Generate a hash test vector from the given implementation. 1504 1291 * Assumes the buffers in 'vec' were already allocated. 1505 1292 */ 1506 - static void generate_random_hash_testvec(struct crypto_shash *tfm, 1293 + static void generate_random_hash_testvec(struct shash_desc *desc, 1507 1294 struct hash_testvec *vec, 1508 1295 unsigned int maxkeysize, 1509 1296 unsigned int maxdatasize, 1510 1297 char *name, size_t max_namelen) 1511 1298 { 1512 - SHASH_DESC_ON_STACK(desc, tfm); 1513 - 1514 1299 /* Data */ 1515 1300 vec->psize = generate_random_length(maxdatasize); 1516 1301 generate_random_bytes((u8 *)vec->plaintext, vec->psize); ··· 1525 1314 vec->ksize = 1 + (prandom_u32() % maxkeysize); 1526 1315 generate_random_bytes((u8 *)vec->key, vec->ksize); 1527 1316 1528 - vec->setkey_error = crypto_shash_setkey(tfm, vec->key, 1317 + vec->setkey_error = crypto_shash_setkey(desc->tfm, vec->key, 1529 1318 vec->ksize); 1530 1319 /* If the key couldn't be set, no need to continue to digest. */ 1531 1320 if (vec->setkey_error) ··· 1533 1322 } 1534 1323 1535 1324 /* Digest */ 1536 - desc->tfm = tfm; 1537 1325 vec->digest_error = crypto_shash_digest(desc, vec->plaintext, 1538 1326 vec->psize, (u8 *)vec->digest); 1539 1327 done: ··· 1548 1338 const char *generic_driver, 1549 1339 unsigned int maxkeysize, 1550 1340 struct ahash_request *req, 1341 + struct shash_desc *desc, 1551 1342 struct test_sglist *tsgl, 1552 1343 u8 *hashstate) 1553 1344 { ··· 1559 1348 const char *algname = crypto_hash_alg_common(tfm)->base.cra_name; 1560 1349 char _generic_driver[CRYPTO_MAX_ALG_NAME]; 1561 1350 struct crypto_shash *generic_tfm = NULL; 1351 + struct shash_desc *generic_desc = NULL; 1562 1352 unsigned int i; 1563 1353 struct hash_testvec vec = { 0 }; 1564 1354 char vec_name[64]; 1565 - struct testvec_config cfg; 1355 + struct testvec_config *cfg; 1566 1356 char cfgname[TESTVEC_CONFIG_NAMELEN]; 1567 1357 int err; 1568 1358 ··· 1592 1380 generic_driver, algname, err); 1593 1381 return err; 1594 1382 } 1383 + 1384 + cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); 1385 + if (!cfg) { 1386 + err = -ENOMEM; 1387 + goto out; 1388 + } 1389 + 1390 + generic_desc = kzalloc(sizeof(*desc) + 1391 + crypto_shash_descsize(generic_tfm), GFP_KERNEL); 1392 + if (!generic_desc) { 1393 + err = -ENOMEM; 1394 + goto out; 1395 + } 1396 + generic_desc->tfm = generic_tfm; 1595 1397 1596 1398 /* Check the algorithm properties for consistency. */ 1597 1399 ··· 1638 1412 } 1639 1413 1640 1414 for (i = 0; i < fuzz_iterations * 8; i++) { 1641 - generate_random_hash_testvec(generic_tfm, &vec, 1415 + generate_random_hash_testvec(generic_desc, &vec, 1642 1416 maxkeysize, maxdatasize, 1643 1417 vec_name, sizeof(vec_name)); 1644 - generate_random_testvec_config(&cfg, cfgname, sizeof(cfgname)); 1418 + generate_random_testvec_config(cfg, cfgname, sizeof(cfgname)); 1645 1419 1646 - err = test_hash_vec_cfg(driver, &vec, vec_name, &cfg, 1647 - req, tsgl, hashstate); 1420 + err = test_hash_vec_cfg(driver, &vec, vec_name, cfg, 1421 + req, desc, tsgl, hashstate); 1648 1422 if (err) 1649 1423 goto out; 1650 1424 cond_resched(); 1651 1425 } 1652 1426 err = 0; 1653 1427 out: 1428 + kfree(cfg); 1654 1429 kfree(vec.key); 1655 1430 kfree(vec.plaintext); 1656 1431 kfree(vec.digest); 1657 1432 crypto_free_shash(generic_tfm); 1433 + kzfree(generic_desc); 1658 1434 return err; 1659 1435 } 1660 1436 #else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ ··· 1664 1436 const char *generic_driver, 1665 1437 unsigned int maxkeysize, 1666 1438 struct ahash_request *req, 1439 + struct shash_desc *desc, 1667 1440 struct test_sglist *tsgl, 1668 1441 u8 *hashstate) 1669 1442 { ··· 1672 1443 } 1673 1444 #endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ 1674 1445 1446 + static int alloc_shash(const char *driver, u32 type, u32 mask, 1447 + struct crypto_shash **tfm_ret, 1448 + struct shash_desc **desc_ret) 1449 + { 1450 + struct crypto_shash *tfm; 1451 + struct shash_desc *desc; 1452 + 1453 + tfm = crypto_alloc_shash(driver, type, mask); 1454 + if (IS_ERR(tfm)) { 1455 + if (PTR_ERR(tfm) == -ENOENT) { 1456 + /* 1457 + * This algorithm is only available through the ahash 1458 + * API, not the shash API, so skip the shash tests. 1459 + */ 1460 + return 0; 1461 + } 1462 + pr_err("alg: hash: failed to allocate shash transform for %s: %ld\n", 1463 + driver, PTR_ERR(tfm)); 1464 + return PTR_ERR(tfm); 1465 + } 1466 + 1467 + desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL); 1468 + if (!desc) { 1469 + crypto_free_shash(tfm); 1470 + return -ENOMEM; 1471 + } 1472 + desc->tfm = tfm; 1473 + 1474 + *tfm_ret = tfm; 1475 + *desc_ret = desc; 1476 + return 0; 1477 + } 1478 + 1675 1479 static int __alg_test_hash(const struct hash_testvec *vecs, 1676 1480 unsigned int num_vecs, const char *driver, 1677 1481 u32 type, u32 mask, 1678 1482 const char *generic_driver, unsigned int maxkeysize) 1679 1483 { 1680 - struct crypto_ahash *tfm; 1484 + struct crypto_ahash *atfm = NULL; 1681 1485 struct ahash_request *req = NULL; 1486 + struct crypto_shash *stfm = NULL; 1487 + struct shash_desc *desc = NULL; 1682 1488 struct test_sglist *tsgl = NULL; 1683 1489 u8 *hashstate = NULL; 1490 + unsigned int statesize; 1684 1491 unsigned int i; 1685 1492 int err; 1686 1493 1687 - tfm = crypto_alloc_ahash(driver, type, mask); 1688 - if (IS_ERR(tfm)) { 1494 + /* 1495 + * Always test the ahash API. This works regardless of whether the 1496 + * algorithm is implemented as ahash or shash. 1497 + */ 1498 + 1499 + atfm = crypto_alloc_ahash(driver, type, mask); 1500 + if (IS_ERR(atfm)) { 1689 1501 pr_err("alg: hash: failed to allocate transform for %s: %ld\n", 1690 - driver, PTR_ERR(tfm)); 1691 - return PTR_ERR(tfm); 1502 + driver, PTR_ERR(atfm)); 1503 + return PTR_ERR(atfm); 1692 1504 } 1693 1505 1694 - req = ahash_request_alloc(tfm, GFP_KERNEL); 1506 + req = ahash_request_alloc(atfm, GFP_KERNEL); 1695 1507 if (!req) { 1696 1508 pr_err("alg: hash: failed to allocate request for %s\n", 1697 1509 driver); 1698 1510 err = -ENOMEM; 1699 1511 goto out; 1700 1512 } 1513 + 1514 + /* 1515 + * If available also test the shash API, to cover corner cases that may 1516 + * be missed by testing the ahash API only. 1517 + */ 1518 + err = alloc_shash(driver, type, mask, &stfm, &desc); 1519 + if (err) 1520 + goto out; 1701 1521 1702 1522 tsgl = kmalloc(sizeof(*tsgl), GFP_KERNEL); 1703 1523 if (!tsgl || init_test_sglist(tsgl) != 0) { ··· 1758 1480 goto out; 1759 1481 } 1760 1482 1761 - hashstate = kmalloc(crypto_ahash_statesize(tfm) + TESTMGR_POISON_LEN, 1762 - GFP_KERNEL); 1483 + statesize = crypto_ahash_statesize(atfm); 1484 + if (stfm) 1485 + statesize = max(statesize, crypto_shash_statesize(stfm)); 1486 + hashstate = kmalloc(statesize + TESTMGR_POISON_LEN, GFP_KERNEL); 1763 1487 if (!hashstate) { 1764 1488 pr_err("alg: hash: failed to allocate hash state buffer for %s\n", 1765 1489 driver); ··· 1770 1490 } 1771 1491 1772 1492 for (i = 0; i < num_vecs; i++) { 1773 - err = test_hash_vec(driver, &vecs[i], i, req, tsgl, hashstate); 1493 + err = test_hash_vec(driver, &vecs[i], i, req, desc, tsgl, 1494 + hashstate); 1774 1495 if (err) 1775 1496 goto out; 1497 + cond_resched(); 1776 1498 } 1777 1499 err = test_hash_vs_generic_impl(driver, generic_driver, maxkeysize, req, 1778 - tsgl, hashstate); 1500 + desc, tsgl, hashstate); 1779 1501 out: 1780 1502 kfree(hashstate); 1781 1503 if (tsgl) { 1782 1504 destroy_test_sglist(tsgl); 1783 1505 kfree(tsgl); 1784 1506 } 1507 + kfree(desc); 1508 + crypto_free_shash(stfm); 1785 1509 ahash_request_free(req); 1786 - crypto_free_ahash(tfm); 1510 + crypto_free_ahash(atfm); 1787 1511 return err; 1788 1512 } 1789 1513 ··· 2039 1755 &cfg, req, tsgls); 2040 1756 if (err) 2041 1757 return err; 1758 + cond_resched(); 2042 1759 } 2043 1760 } 2044 1761 #endif ··· 2149 1864 unsigned int i; 2150 1865 struct aead_testvec vec = { 0 }; 2151 1866 char vec_name[64]; 2152 - struct testvec_config cfg; 1867 + struct testvec_config *cfg; 2153 1868 char cfgname[TESTVEC_CONFIG_NAMELEN]; 2154 1869 int err; 2155 1870 ··· 2177 1892 pr_err("alg: aead: error allocating %s (generic impl of %s): %d\n", 2178 1893 generic_driver, algname, err); 2179 1894 return err; 1895 + } 1896 + 1897 + cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); 1898 + if (!cfg) { 1899 + err = -ENOMEM; 1900 + goto out; 2180 1901 } 2181 1902 2182 1903 generic_req = aead_request_alloc(generic_tfm, GFP_KERNEL); ··· 2239 1948 generate_random_aead_testvec(generic_req, &vec, 2240 1949 maxkeysize, maxdatasize, 2241 1950 vec_name, sizeof(vec_name)); 2242 - generate_random_testvec_config(&cfg, cfgname, sizeof(cfgname)); 1951 + generate_random_testvec_config(cfg, cfgname, sizeof(cfgname)); 2243 1952 2244 - err = test_aead_vec_cfg(driver, ENCRYPT, &vec, vec_name, &cfg, 1953 + err = test_aead_vec_cfg(driver, ENCRYPT, &vec, vec_name, cfg, 2245 1954 req, tsgls); 2246 1955 if (err) 2247 1956 goto out; 2248 - err = test_aead_vec_cfg(driver, DECRYPT, &vec, vec_name, &cfg, 1957 + err = test_aead_vec_cfg(driver, DECRYPT, &vec, vec_name, cfg, 2249 1958 req, tsgls); 2250 1959 if (err) 2251 1960 goto out; ··· 2253 1962 } 2254 1963 err = 0; 2255 1964 out: 1965 + kfree(cfg); 2256 1966 kfree(vec.key); 2257 1967 kfree(vec.iv); 2258 1968 kfree(vec.assoc); ··· 2286 1994 tsgls); 2287 1995 if (err) 2288 1996 return err; 1997 + cond_resched(); 2289 1998 } 2290 1999 return 0; 2291 2000 } ··· 2629 2336 &cfg, req, tsgls); 2630 2337 if (err) 2631 2338 return err; 2339 + cond_resched(); 2632 2340 } 2633 2341 } 2634 2342 #endif ··· 2703 2409 unsigned int i; 2704 2410 struct cipher_testvec vec = { 0 }; 2705 2411 char vec_name[64]; 2706 - struct testvec_config cfg; 2412 + struct testvec_config *cfg; 2707 2413 char cfgname[TESTVEC_CONFIG_NAMELEN]; 2708 2414 int err; 2709 2415 ··· 2735 2441 pr_err("alg: skcipher: error allocating %s (generic impl of %s): %d\n", 2736 2442 generic_driver, algname, err); 2737 2443 return err; 2444 + } 2445 + 2446 + cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); 2447 + if (!cfg) { 2448 + err = -ENOMEM; 2449 + goto out; 2738 2450 } 2739 2451 2740 2452 generic_req = skcipher_request_alloc(generic_tfm, GFP_KERNEL); ··· 2790 2490 for (i = 0; i < fuzz_iterations * 8; i++) { 2791 2491 generate_random_cipher_testvec(generic_req, &vec, maxdatasize, 2792 2492 vec_name, sizeof(vec_name)); 2793 - generate_random_testvec_config(&cfg, cfgname, sizeof(cfgname)); 2493 + generate_random_testvec_config(cfg, cfgname, sizeof(cfgname)); 2794 2494 2795 2495 err = test_skcipher_vec_cfg(driver, ENCRYPT, &vec, vec_name, 2796 - &cfg, req, tsgls); 2496 + cfg, req, tsgls); 2797 2497 if (err) 2798 2498 goto out; 2799 2499 err = test_skcipher_vec_cfg(driver, DECRYPT, &vec, vec_name, 2800 - &cfg, req, tsgls); 2500 + cfg, req, tsgls); 2801 2501 if (err) 2802 2502 goto out; 2803 2503 cond_resched(); 2804 2504 } 2805 2505 err = 0; 2806 2506 out: 2507 + kfree(cfg); 2807 2508 kfree(vec.key); 2808 2509 kfree(vec.iv); 2809 2510 kfree(vec.ptext); ··· 2836 2535 tsgls); 2837 2536 if (err) 2838 2537 return err; 2538 + cond_resched(); 2839 2539 } 2840 2540 return 0; 2841 2541 } ··· 4427 4125 } 4428 4126 }, { 4429 4127 .alg = "ecb(arc4)", 4128 + .generic_driver = "ecb(arc4)-generic", 4430 4129 .test = alg_test_skcipher, 4431 4130 .suite = { 4432 4131 .cipher = __VECS(arc4_tv_template) ··· 5092 4789 .alg = "xts512(paes)", 5093 4790 .test = alg_test_null, 5094 4791 .fips_allowed = 1, 4792 + }, { 4793 + .alg = "xxhash64", 4794 + .test = alg_test_hash, 4795 + .fips_allowed = 1, 4796 + .suite = { 4797 + .hash = __VECS(xxhash64_tv_template) 4798 + } 5095 4799 }, { 5096 4800 .alg = "zlib-deflate", 5097 4801 .test = alg_test_comp,
+111 -5
crypto/testmgr.h
··· 38 38 const char *key; 39 39 const char *plaintext; 40 40 const char *digest; 41 - unsigned short psize; 41 + unsigned int psize; 42 42 unsigned short ksize; 43 43 int setkey_error; 44 44 int digest_error; ··· 69 69 const char *ctext; 70 70 unsigned char wk; /* weak key flag */ 71 71 unsigned short klen; 72 - unsigned short len; 72 + unsigned int len; 73 73 bool fips_skip; 74 74 bool generates_iv; 75 75 int setkey_error; ··· 105 105 unsigned char novrfy; 106 106 unsigned char wk; 107 107 unsigned char klen; 108 - unsigned short plen; 109 - unsigned short clen; 110 - unsigned short alen; 108 + unsigned int plen; 109 + unsigned int clen; 110 + unsigned int alen; 111 111 int setkey_error; 112 112 int setauthsize_error; 113 113 int crypt_error; ··· 33380 33380 .psize = 2048, 33381 33381 .digest = "\xec\x26\x4d\x95", 33382 33382 } 33383 + }; 33384 + 33385 + static const struct hash_testvec xxhash64_tv_template[] = { 33386 + { 33387 + .psize = 0, 33388 + .digest = "\x99\xe9\xd8\x51\x37\xdb\x46\xef", 33389 + }, 33390 + { 33391 + .plaintext = "\x40", 33392 + .psize = 1, 33393 + .digest = "\x20\x5c\x91\xaa\x88\xeb\x59\xd0", 33394 + }, 33395 + { 33396 + .plaintext = "\x40\x8b\xb8\x41\xe4\x42\x15\x2d" 33397 + "\x88\xc7\x9a\x09\x1a\x9b", 33398 + .psize = 14, 33399 + .digest = "\xa8\xe8\x2b\xa9\x92\xa1\x37\x4a", 33400 + }, 33401 + { 33402 + .plaintext = "\x40\x8b\xb8\x41\xe4\x42\x15\x2d" 33403 + "\x88\xc7\x9a\x09\x1a\x9b\x42\xe0" 33404 + "\xd4\x38\xa5\x2a\x26\xa5\x19\x4b" 33405 + "\x57\x65\x7f\xad\xc3\x7d\xca\x40" 33406 + "\x31\x65\x05\xbb\x31\xae\x51\x11" 33407 + "\xa8\xc0\xb3\x28\x42\xeb\x3c\x46" 33408 + "\xc8\xed\xed\x0f\x8d\x0b\xfa\x6e" 33409 + "\xbc\xe3\x88\x53\xca\x8f\xc8\xd9" 33410 + "\x41\x26\x7a\x3d\x21\xdb\x1a\x3c" 33411 + "\x01\x1d\xc9\xe9\xb7\x3a\x78\x67" 33412 + "\x57\x20\x94\xf1\x1e\xfd\xce\x39" 33413 + "\x99\x57\x69\x39\xa5\xd0\x8d\xd9" 33414 + "\x43\xfe\x1d\x66\x04\x3c\x27\x6a" 33415 + "\xe1\x0d\xe7\xc9\xfa\xc9\x07\x56" 33416 + "\xa5\xb3\xec\xd9\x1f\x42\x65\x66" 33417 + "\xaa\xbf\x87\x9b\xc5\x41\x9c\x27" 33418 + "\x3f\x2f\xa9\x55\x93\x01\x27\x33" 33419 + "\x43\x99\x4d\x81\x85\xae\x82\x00" 33420 + "\x6c\xd0\xd1\xa3\x57\x18\x06\xcc" 33421 + "\xec\x72\xf7\x8e\x87\x2d\x1f\x5e" 33422 + "\xd7\x5b\x1f\x36\x4c\xfa\xfd\x18" 33423 + "\x89\x76\xd3\x5e\xb5\x5a\xc0\x01" 33424 + "\xd2\xa1\x9a\x50\xe6\x08\xb4\x76" 33425 + "\x56\x4f\x0e\xbc\x54\xfc\x67\xe6" 33426 + "\xb9\xc0\x28\x4b\xb5\xc3\xff\x79" 33427 + "\x52\xea\xa1\x90\xc3\xaf\x08\x70" 33428 + "\x12\x02\x0c\xdb\x94\x00\x38\x95" 33429 + "\xed\xfd\x08\xf7\xe8\x04", 33430 + .psize = 222, 33431 + .digest = "\x41\xfc\xd4\x29\xfe\xe7\x85\x17", 33432 + }, 33433 + { 33434 + .psize = 0, 33435 + .key = "\xb1\x79\x37\x9e\x00\x00\x00\x00", 33436 + .ksize = 8, 33437 + .digest = "\xef\x17\x9b\x92\xa2\xfd\x75\xac", 33438 + }, 33439 + 33440 + { 33441 + .plaintext = "\x40", 33442 + .psize = 1, 33443 + .key = "\xb1\x79\x37\x9e\x00\x00\x00\x00", 33444 + .ksize = 8, 33445 + .digest = "\xd1\x70\x4f\x14\x02\xc4\x9e\x71", 33446 + }, 33447 + { 33448 + .plaintext = "\x40\x8b\xb8\x41\xe4\x42\x15\x2d" 33449 + "\x88\xc7\x9a\x09\x1a\x9b", 33450 + .psize = 14, 33451 + .key = "\xb1\x79\x37\x9e\x00\x00\x00\x00", 33452 + .ksize = 8, 33453 + .digest = "\xa4\xcd\xfe\x8e\x37\xe2\x1c\x64" 33454 + }, 33455 + { 33456 + .plaintext = "\x40\x8b\xb8\x41\xe4\x42\x15\x2d" 33457 + "\x88\xc7\x9a\x09\x1a\x9b\x42\xe0" 33458 + "\xd4\x38\xa5\x2a\x26\xa5\x19\x4b" 33459 + "\x57\x65\x7f\xad\xc3\x7d\xca\x40" 33460 + "\x31\x65\x05\xbb\x31\xae\x51\x11" 33461 + "\xa8\xc0\xb3\x28\x42\xeb\x3c\x46" 33462 + "\xc8\xed\xed\x0f\x8d\x0b\xfa\x6e" 33463 + "\xbc\xe3\x88\x53\xca\x8f\xc8\xd9" 33464 + "\x41\x26\x7a\x3d\x21\xdb\x1a\x3c" 33465 + "\x01\x1d\xc9\xe9\xb7\x3a\x78\x67" 33466 + "\x57\x20\x94\xf1\x1e\xfd\xce\x39" 33467 + "\x99\x57\x69\x39\xa5\xd0\x8d\xd9" 33468 + "\x43\xfe\x1d\x66\x04\x3c\x27\x6a" 33469 + "\xe1\x0d\xe7\xc9\xfa\xc9\x07\x56" 33470 + "\xa5\xb3\xec\xd9\x1f\x42\x65\x66" 33471 + "\xaa\xbf\x87\x9b\xc5\x41\x9c\x27" 33472 + "\x3f\x2f\xa9\x55\x93\x01\x27\x33" 33473 + "\x43\x99\x4d\x81\x85\xae\x82\x00" 33474 + "\x6c\xd0\xd1\xa3\x57\x18\x06\xcc" 33475 + "\xec\x72\xf7\x8e\x87\x2d\x1f\x5e" 33476 + "\xd7\x5b\x1f\x36\x4c\xfa\xfd\x18" 33477 + "\x89\x76\xd3\x5e\xb5\x5a\xc0\x01" 33478 + "\xd2\xa1\x9a\x50\xe6\x08\xb4\x76" 33479 + "\x56\x4f\x0e\xbc\x54\xfc\x67\xe6" 33480 + "\xb9\xc0\x28\x4b\xb5\xc3\xff\x79" 33481 + "\x52\xea\xa1\x90\xc3\xaf\x08\x70" 33482 + "\x12\x02\x0c\xdb\x94\x00\x38\x95" 33483 + "\xed\xfd\x08\xf7\xe8\x04", 33484 + .psize = 222, 33485 + .key = "\xb1\x79\x37\x9e\x00\x00\x00\x00", 33486 + .ksize = 8, 33487 + .digest = "\x58\xbc\x55\xf2\x42\x81\x5c\xf0" 33488 + }, 33383 33489 }; 33384 33490 33385 33491 static const struct comp_testvec lz4_comp_tv_template[] = {
+12 -9
crypto/tgr192.c
··· 630 630 .final = tgr192_final, 631 631 .descsize = sizeof(struct tgr192_ctx), 632 632 .base = { 633 - .cra_name = "tgr192", 634 - .cra_blocksize = TGR192_BLOCK_SIZE, 635 - .cra_module = THIS_MODULE, 633 + .cra_name = "tgr192", 634 + .cra_driver_name = "tgr192-generic", 635 + .cra_blocksize = TGR192_BLOCK_SIZE, 636 + .cra_module = THIS_MODULE, 636 637 } 637 638 }, { 638 639 .digestsize = TGR160_DIGEST_SIZE, ··· 642 641 .final = tgr160_final, 643 642 .descsize = sizeof(struct tgr192_ctx), 644 643 .base = { 645 - .cra_name = "tgr160", 646 - .cra_blocksize = TGR192_BLOCK_SIZE, 647 - .cra_module = THIS_MODULE, 644 + .cra_name = "tgr160", 645 + .cra_driver_name = "tgr160-generic", 646 + .cra_blocksize = TGR192_BLOCK_SIZE, 647 + .cra_module = THIS_MODULE, 648 648 } 649 649 }, { 650 650 .digestsize = TGR128_DIGEST_SIZE, ··· 654 652 .final = tgr128_final, 655 653 .descsize = sizeof(struct tgr192_ctx), 656 654 .base = { 657 - .cra_name = "tgr128", 658 - .cra_blocksize = TGR192_BLOCK_SIZE, 659 - .cra_module = THIS_MODULE, 655 + .cra_name = "tgr128", 656 + .cra_driver_name = "tgr128-generic", 657 + .cra_blocksize = TGR192_BLOCK_SIZE, 658 + .cra_module = THIS_MODULE, 660 659 } 661 660 } }; 662 661
+12 -9
crypto/wp512.c
··· 1126 1126 .final = wp512_final, 1127 1127 .descsize = sizeof(struct wp512_ctx), 1128 1128 .base = { 1129 - .cra_name = "wp512", 1130 - .cra_blocksize = WP512_BLOCK_SIZE, 1131 - .cra_module = THIS_MODULE, 1129 + .cra_name = "wp512", 1130 + .cra_driver_name = "wp512-generic", 1131 + .cra_blocksize = WP512_BLOCK_SIZE, 1132 + .cra_module = THIS_MODULE, 1132 1133 } 1133 1134 }, { 1134 1135 .digestsize = WP384_DIGEST_SIZE, ··· 1138 1137 .final = wp384_final, 1139 1138 .descsize = sizeof(struct wp512_ctx), 1140 1139 .base = { 1141 - .cra_name = "wp384", 1142 - .cra_blocksize = WP512_BLOCK_SIZE, 1143 - .cra_module = THIS_MODULE, 1140 + .cra_name = "wp384", 1141 + .cra_driver_name = "wp384-generic", 1142 + .cra_blocksize = WP512_BLOCK_SIZE, 1143 + .cra_module = THIS_MODULE, 1144 1144 } 1145 1145 }, { 1146 1146 .digestsize = WP256_DIGEST_SIZE, ··· 1150 1148 .final = wp256_final, 1151 1149 .descsize = sizeof(struct wp512_ctx), 1152 1150 .base = { 1153 - .cra_name = "wp256", 1154 - .cra_blocksize = WP512_BLOCK_SIZE, 1155 - .cra_module = THIS_MODULE, 1151 + .cra_name = "wp256", 1152 + .cra_driver_name = "wp256-generic", 1153 + .cra_blocksize = WP512_BLOCK_SIZE, 1154 + .cra_module = THIS_MODULE, 1156 1155 } 1157 1156 } }; 1158 1157
+108
crypto/xxhash_generic.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include <crypto/internal/hash.h> 4 + #include <linux/init.h> 5 + #include <linux/module.h> 6 + #include <linux/xxhash.h> 7 + #include <asm/unaligned.h> 8 + 9 + #define XXHASH64_BLOCK_SIZE 32 10 + #define XXHASH64_DIGEST_SIZE 8 11 + 12 + struct xxhash64_tfm_ctx { 13 + u64 seed; 14 + }; 15 + 16 + struct xxhash64_desc_ctx { 17 + struct xxh64_state xxhstate; 18 + }; 19 + 20 + static int xxhash64_setkey(struct crypto_shash *tfm, const u8 *key, 21 + unsigned int keylen) 22 + { 23 + struct xxhash64_tfm_ctx *tctx = crypto_shash_ctx(tfm); 24 + 25 + if (keylen != sizeof(tctx->seed)) { 26 + crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 27 + return -EINVAL; 28 + } 29 + tctx->seed = get_unaligned_le64(key); 30 + return 0; 31 + } 32 + 33 + static int xxhash64_init(struct shash_desc *desc) 34 + { 35 + struct xxhash64_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); 36 + struct xxhash64_desc_ctx *dctx = shash_desc_ctx(desc); 37 + 38 + xxh64_reset(&dctx->xxhstate, tctx->seed); 39 + 40 + return 0; 41 + } 42 + 43 + static int xxhash64_update(struct shash_desc *desc, const u8 *data, 44 + unsigned int length) 45 + { 46 + struct xxhash64_desc_ctx *dctx = shash_desc_ctx(desc); 47 + 48 + xxh64_update(&dctx->xxhstate, data, length); 49 + 50 + return 0; 51 + } 52 + 53 + static int xxhash64_final(struct shash_desc *desc, u8 *out) 54 + { 55 + struct xxhash64_desc_ctx *dctx = shash_desc_ctx(desc); 56 + 57 + put_unaligned_le64(xxh64_digest(&dctx->xxhstate), out); 58 + 59 + return 0; 60 + } 61 + 62 + static int xxhash64_digest(struct shash_desc *desc, const u8 *data, 63 + unsigned int length, u8 *out) 64 + { 65 + struct xxhash64_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); 66 + 67 + put_unaligned_le64(xxh64(data, length, tctx->seed), out); 68 + 69 + return 0; 70 + } 71 + 72 + static struct shash_alg alg = { 73 + .digestsize = XXHASH64_DIGEST_SIZE, 74 + .setkey = xxhash64_setkey, 75 + .init = xxhash64_init, 76 + .update = xxhash64_update, 77 + .final = xxhash64_final, 78 + .digest = xxhash64_digest, 79 + .descsize = sizeof(struct xxhash64_desc_ctx), 80 + .base = { 81 + .cra_name = "xxhash64", 82 + .cra_driver_name = "xxhash64-generic", 83 + .cra_priority = 100, 84 + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, 85 + .cra_blocksize = XXHASH64_BLOCK_SIZE, 86 + .cra_ctxsize = sizeof(struct xxhash64_tfm_ctx), 87 + .cra_module = THIS_MODULE, 88 + } 89 + }; 90 + 91 + static int __init xxhash_mod_init(void) 92 + { 93 + return crypto_register_shash(&alg); 94 + } 95 + 96 + static void __exit xxhash_mod_fini(void) 97 + { 98 + crypto_unregister_shash(&alg); 99 + } 100 + 101 + subsys_initcall(xxhash_mod_init); 102 + module_exit(xxhash_mod_fini); 103 + 104 + MODULE_AUTHOR("Nikolay Borisov <nborisov@suse.com>"); 105 + MODULE_DESCRIPTION("xxhash calculations wrapper for lib/xxhash.c"); 106 + MODULE_LICENSE("GPL"); 107 + MODULE_ALIAS_CRYPTO("xxhash64"); 108 + MODULE_ALIAS_CRYPTO("xxhash64-generic");
+1
crypto/zstd.c
··· 206 206 207 207 static struct crypto_alg alg = { 208 208 .cra_name = "zstd", 209 + .cra_driver_name = "zstd-generic", 209 210 .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, 210 211 .cra_ctxsize = sizeof(struct zstd_ctx), 211 212 .cra_module = THIS_MODULE,
+1
drivers/char/hw_random/iproc-rng200.c
··· 220 220 } 221 221 222 222 static const struct of_device_id iproc_rng200_of_match[] = { 223 + { .compatible = "brcm,bcm7211-rng200", }, 223 224 { .compatible = "brcm,bcm7278-rng200", }, 224 225 { .compatible = "brcm,iproc-rng200", }, 225 226 {},
+1 -51
drivers/char/hw_random/meson-rng.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 1 2 /* 2 - * This file is provided under a dual BSD/GPLv2 license. When using or 3 - * redistributing this file, you may do so under either license. 4 - * 5 - * GPL LICENSE SUMMARY 6 - * 7 3 * Copyright (c) 2016 BayLibre, SAS. 8 4 * Author: Neil Armstrong <narmstrong@baylibre.com> 9 5 * Copyright (C) 2014 Amlogic, Inc. 10 - * 11 - * This program is free software; you can redistribute it and/or modify 12 - * it under the terms of version 2 of the GNU General Public License as 13 - * published by the Free Software Foundation. 14 - * 15 - * This program is distributed in the hope that it will be useful, but 16 - * WITHOUT ANY WARRANTY; without even the implied warranty of 17 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 - * General Public License for more details. 19 - * 20 - * You should have received a copy of the GNU General Public License 21 - * along with this program; if not, see <http://www.gnu.org/licenses/>. 22 - * The full GNU General Public License is included in this distribution 23 - * in the file called COPYING. 24 - * 25 - * BSD LICENSE 26 - * 27 - * Copyright (c) 2016 BayLibre, SAS. 28 - * Author: Neil Armstrong <narmstrong@baylibre.com> 29 - * Copyright (C) 2014 Amlogic, Inc. 30 - * 31 - * Redistribution and use in source and binary forms, with or without 32 - * modification, are permitted provided that the following conditions 33 - * are met: 34 - * 35 - * * Redistributions of source code must retain the above copyright 36 - * notice, this list of conditions and the following disclaimer. 37 - * * Redistributions in binary form must reproduce the above copyright 38 - * notice, this list of conditions and the following disclaimer in 39 - * the documentation and/or other materials provided with the 40 - * distribution. 41 - * * Neither the name of Intel Corporation nor the names of its 42 - * contributors may be used to endorse or promote products derived 43 - * from this software without specific prior written permission. 44 - * 45 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 46 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 47 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 48 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 49 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 50 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 51 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 52 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 53 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 54 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 55 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 56 6 */ 57 7 #include <linux/err.h> 58 8 #include <linux/module.h>
+19 -1
drivers/crypto/Kconfig
··· 520 520 To compile this driver as a module, choose M here: the module 521 521 will be called atmel-sha. 522 522 523 + config CRYPTO_DEV_ATMEL_I2C 524 + tristate 525 + 523 526 config CRYPTO_DEV_ATMEL_ECC 524 527 tristate "Support for Microchip / Atmel ECC hw accelerator" 525 - depends on ARCH_AT91 || COMPILE_TEST 526 528 depends on I2C 529 + select CRYPTO_DEV_ATMEL_I2C 527 530 select CRYPTO_ECDH 528 531 select CRC16 529 532 help ··· 536 533 537 534 To compile this driver as a module, choose M here: the module 538 535 will be called atmel-ecc. 536 + 537 + config CRYPTO_DEV_ATMEL_SHA204A 538 + tristate "Support for Microchip / Atmel SHA accelerator and RNG" 539 + depends on I2C 540 + select CRYPTO_DEV_ATMEL_I2C 541 + select HW_RANDOM 542 + select CRC16 543 + help 544 + Microhip / Atmel SHA accelerator and RNG. 545 + Select this if you want to use the Microchip / Atmel SHA204A 546 + module as a random number generator. (Other functions of the 547 + chip are currently not exposed by this driver) 548 + 549 + To compile this driver as a module, choose M here: the module 550 + will be called atmel-sha204a. 539 551 540 552 config CRYPTO_DEV_CCP 541 553 bool "Support for AMD Secure Processor"
+2
drivers/crypto/Makefile
··· 2 2 obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o 3 3 obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o 4 4 obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o 5 + obj-$(CONFIG_CRYPTO_DEV_ATMEL_I2C) += atmel-i2c.o 5 6 obj-$(CONFIG_CRYPTO_DEV_ATMEL_ECC) += atmel-ecc.o 7 + obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA204A) += atmel-sha204a.o 6 8 obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/ 7 9 obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/ 8 10 obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/
+25 -11
drivers/crypto/amcc/crypto4xx_alg.c
··· 67 67 } 68 68 69 69 static inline int crypto4xx_crypt(struct skcipher_request *req, 70 - const unsigned int ivlen, bool decrypt) 70 + const unsigned int ivlen, bool decrypt, 71 + bool check_blocksize) 71 72 { 72 73 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); 73 74 struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher); 74 75 __le32 iv[AES_IV_SIZE]; 76 + 77 + if (check_blocksize && !IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE)) 78 + return -EINVAL; 75 79 76 80 if (ivlen) 77 81 crypto4xx_memcpy_to_le32(iv, req->iv, ivlen); ··· 85 81 ctx->sa_len, 0, NULL); 86 82 } 87 83 88 - int crypto4xx_encrypt_noiv(struct skcipher_request *req) 84 + int crypto4xx_encrypt_noiv_block(struct skcipher_request *req) 89 85 { 90 - return crypto4xx_crypt(req, 0, false); 86 + return crypto4xx_crypt(req, 0, false, true); 91 87 } 92 88 93 - int crypto4xx_encrypt_iv(struct skcipher_request *req) 89 + int crypto4xx_encrypt_iv_stream(struct skcipher_request *req) 94 90 { 95 - return crypto4xx_crypt(req, AES_IV_SIZE, false); 91 + return crypto4xx_crypt(req, AES_IV_SIZE, false, false); 96 92 } 97 93 98 - int crypto4xx_decrypt_noiv(struct skcipher_request *req) 94 + int crypto4xx_decrypt_noiv_block(struct skcipher_request *req) 99 95 { 100 - return crypto4xx_crypt(req, 0, true); 96 + return crypto4xx_crypt(req, 0, true, true); 101 97 } 102 98 103 - int crypto4xx_decrypt_iv(struct skcipher_request *req) 99 + int crypto4xx_decrypt_iv_stream(struct skcipher_request *req) 104 100 { 105 - return crypto4xx_crypt(req, AES_IV_SIZE, true); 101 + return crypto4xx_crypt(req, AES_IV_SIZE, true, false); 102 + } 103 + 104 + int crypto4xx_encrypt_iv_block(struct skcipher_request *req) 105 + { 106 + return crypto4xx_crypt(req, AES_IV_SIZE, false, true); 107 + } 108 + 109 + int crypto4xx_decrypt_iv_block(struct skcipher_request *req) 110 + { 111 + return crypto4xx_crypt(req, AES_IV_SIZE, true, true); 106 112 } 107 113 108 114 /** ··· 283 269 return ret; 284 270 } 285 271 286 - return encrypt ? crypto4xx_encrypt_iv(req) 287 - : crypto4xx_decrypt_iv(req); 272 + return encrypt ? crypto4xx_encrypt_iv_stream(req) 273 + : crypto4xx_decrypt_iv_stream(req); 288 274 } 289 275 290 276 static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx,
+12 -13
drivers/crypto/amcc/crypto4xx_core.c
··· 182 182 dev->pdr_pa); 183 183 return -ENOMEM; 184 184 } 185 - memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD); 186 185 dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device, 187 186 sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD, 188 187 &dev->shadow_sa_pool_pa, ··· 1209 1210 .max_keysize = AES_MAX_KEY_SIZE, 1210 1211 .ivsize = AES_IV_SIZE, 1211 1212 .setkey = crypto4xx_setkey_aes_cbc, 1212 - .encrypt = crypto4xx_encrypt_iv, 1213 - .decrypt = crypto4xx_decrypt_iv, 1213 + .encrypt = crypto4xx_encrypt_iv_block, 1214 + .decrypt = crypto4xx_decrypt_iv_block, 1214 1215 .init = crypto4xx_sk_init, 1215 1216 .exit = crypto4xx_sk_exit, 1216 1217 } }, ··· 1221 1222 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, 1222 1223 .cra_flags = CRYPTO_ALG_ASYNC | 1223 1224 CRYPTO_ALG_KERN_DRIVER_ONLY, 1224 - .cra_blocksize = AES_BLOCK_SIZE, 1225 + .cra_blocksize = 1, 1225 1226 .cra_ctxsize = sizeof(struct crypto4xx_ctx), 1226 1227 .cra_module = THIS_MODULE, 1227 1228 }, ··· 1229 1230 .max_keysize = AES_MAX_KEY_SIZE, 1230 1231 .ivsize = AES_IV_SIZE, 1231 1232 .setkey = crypto4xx_setkey_aes_cfb, 1232 - .encrypt = crypto4xx_encrypt_iv, 1233 - .decrypt = crypto4xx_decrypt_iv, 1233 + .encrypt = crypto4xx_encrypt_iv_stream, 1234 + .decrypt = crypto4xx_decrypt_iv_stream, 1234 1235 .init = crypto4xx_sk_init, 1235 1236 .exit = crypto4xx_sk_exit, 1236 1237 } }, ··· 1242 1243 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | 1243 1244 CRYPTO_ALG_ASYNC | 1244 1245 CRYPTO_ALG_KERN_DRIVER_ONLY, 1245 - .cra_blocksize = AES_BLOCK_SIZE, 1246 + .cra_blocksize = 1, 1246 1247 .cra_ctxsize = sizeof(struct crypto4xx_ctx), 1247 1248 .cra_module = THIS_MODULE, 1248 1249 }, ··· 1262 1263 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, 1263 1264 .cra_flags = CRYPTO_ALG_ASYNC | 1264 1265 CRYPTO_ALG_KERN_DRIVER_ONLY, 1265 - .cra_blocksize = AES_BLOCK_SIZE, 1266 + .cra_blocksize = 1, 1266 1267 .cra_ctxsize = sizeof(struct crypto4xx_ctx), 1267 1268 .cra_module = THIS_MODULE, 1268 1269 }, ··· 1289 1290 .min_keysize = AES_MIN_KEY_SIZE, 1290 1291 .max_keysize = AES_MAX_KEY_SIZE, 1291 1292 .setkey = crypto4xx_setkey_aes_ecb, 1292 - .encrypt = crypto4xx_encrypt_noiv, 1293 - .decrypt = crypto4xx_decrypt_noiv, 1293 + .encrypt = crypto4xx_encrypt_noiv_block, 1294 + .decrypt = crypto4xx_decrypt_noiv_block, 1294 1295 .init = crypto4xx_sk_init, 1295 1296 .exit = crypto4xx_sk_exit, 1296 1297 } }, ··· 1301 1302 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, 1302 1303 .cra_flags = CRYPTO_ALG_ASYNC | 1303 1304 CRYPTO_ALG_KERN_DRIVER_ONLY, 1304 - .cra_blocksize = AES_BLOCK_SIZE, 1305 + .cra_blocksize = 1, 1305 1306 .cra_ctxsize = sizeof(struct crypto4xx_ctx), 1306 1307 .cra_module = THIS_MODULE, 1307 1308 }, ··· 1309 1310 .max_keysize = AES_MAX_KEY_SIZE, 1310 1311 .ivsize = AES_IV_SIZE, 1311 1312 .setkey = crypto4xx_setkey_aes_ofb, 1312 - .encrypt = crypto4xx_encrypt_iv, 1313 - .decrypt = crypto4xx_decrypt_iv, 1313 + .encrypt = crypto4xx_encrypt_iv_stream, 1314 + .decrypt = crypto4xx_decrypt_iv_stream, 1314 1315 .init = crypto4xx_sk_init, 1315 1316 .exit = crypto4xx_sk_exit, 1316 1317 } },
+6 -4
drivers/crypto/amcc/crypto4xx_core.h
··· 173 173 const u8 *key, unsigned int keylen); 174 174 int crypto4xx_encrypt_ctr(struct skcipher_request *req); 175 175 int crypto4xx_decrypt_ctr(struct skcipher_request *req); 176 - int crypto4xx_encrypt_iv(struct skcipher_request *req); 177 - int crypto4xx_decrypt_iv(struct skcipher_request *req); 178 - int crypto4xx_encrypt_noiv(struct skcipher_request *req); 179 - int crypto4xx_decrypt_noiv(struct skcipher_request *req); 176 + int crypto4xx_encrypt_iv_stream(struct skcipher_request *req); 177 + int crypto4xx_decrypt_iv_stream(struct skcipher_request *req); 178 + int crypto4xx_encrypt_iv_block(struct skcipher_request *req); 179 + int crypto4xx_decrypt_iv_block(struct skcipher_request *req); 180 + int crypto4xx_encrypt_noiv_block(struct skcipher_request *req); 181 + int crypto4xx_decrypt_noiv_block(struct skcipher_request *req); 180 182 int crypto4xx_rfc3686_encrypt(struct skcipher_request *req); 181 183 int crypto4xx_rfc3686_decrypt(struct skcipher_request *req); 182 184 int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
+22 -381
drivers/crypto/atmel-ecc.c
··· 6 6 * Author: Tudor Ambarus <tudor.ambarus@microchip.com> 7 7 */ 8 8 9 - #include <linux/bitrev.h> 10 - #include <linux/crc16.h> 11 9 #include <linux/delay.h> 12 10 #include <linux/device.h> 13 11 #include <linux/err.h> ··· 21 23 #include <crypto/internal/kpp.h> 22 24 #include <crypto/ecdh.h> 23 25 #include <crypto/kpp.h> 24 - #include "atmel-ecc.h" 25 - 26 - /* Used for binding tfm objects to i2c clients. */ 27 - struct atmel_ecc_driver_data { 28 - struct list_head i2c_client_list; 29 - spinlock_t i2c_list_lock; 30 - } ____cacheline_aligned; 26 + #include "atmel-i2c.h" 31 27 32 28 static struct atmel_ecc_driver_data driver_data; 33 - 34 - /** 35 - * atmel_ecc_i2c_client_priv - i2c_client private data 36 - * @client : pointer to i2c client device 37 - * @i2c_client_list_node: part of i2c_client_list 38 - * @lock : lock for sending i2c commands 39 - * @wake_token : wake token array of zeros 40 - * @wake_token_sz : size in bytes of the wake_token 41 - * @tfm_count : number of active crypto transformations on i2c client 42 - * 43 - * Reads and writes from/to the i2c client are sequential. The first byte 44 - * transmitted to the device is treated as the byte size. Any attempt to send 45 - * more than this number of bytes will cause the device to not ACK those bytes. 46 - * After the host writes a single command byte to the input buffer, reads are 47 - * prohibited until after the device completes command execution. Use a mutex 48 - * when sending i2c commands. 49 - */ 50 - struct atmel_ecc_i2c_client_priv { 51 - struct i2c_client *client; 52 - struct list_head i2c_client_list_node; 53 - struct mutex lock; 54 - u8 wake_token[WAKE_TOKEN_MAX_SIZE]; 55 - size_t wake_token_sz; 56 - atomic_t tfm_count ____cacheline_aligned; 57 - }; 58 29 59 30 /** 60 31 * atmel_ecdh_ctx - transformation context ··· 47 80 bool do_fallback; 48 81 }; 49 82 50 - /** 51 - * atmel_ecc_work_data - data structure representing the work 52 - * @ctx : transformation context. 53 - * @cbk : pointer to a callback function to be invoked upon completion of this 54 - * request. This has the form: 55 - * callback(struct atmel_ecc_work_data *work_data, void *areq, u8 status) 56 - * where: 57 - * @work_data: data structure representing the work 58 - * @areq : optional pointer to an argument passed with the original 59 - * request. 60 - * @status : status returned from the i2c client device or i2c error. 61 - * @areq: optional pointer to a user argument for use at callback time. 62 - * @work: describes the task to be executed. 63 - * @cmd : structure used for communicating with the device. 64 - */ 65 - struct atmel_ecc_work_data { 66 - struct atmel_ecdh_ctx *ctx; 67 - void (*cbk)(struct atmel_ecc_work_data *work_data, void *areq, 68 - int status); 69 - void *areq; 70 - struct work_struct work; 71 - struct atmel_ecc_cmd cmd; 72 - }; 73 - 74 - static u16 atmel_ecc_crc16(u16 crc, const u8 *buffer, size_t len) 75 - { 76 - return cpu_to_le16(bitrev16(crc16(crc, buffer, len))); 77 - } 78 - 79 - /** 80 - * atmel_ecc_checksum() - Generate 16-bit CRC as required by ATMEL ECC. 81 - * CRC16 verification of the count, opcode, param1, param2 and data bytes. 82 - * The checksum is saved in little-endian format in the least significant 83 - * two bytes of the command. CRC polynomial is 0x8005 and the initial register 84 - * value should be zero. 85 - * 86 - * @cmd : structure used for communicating with the device. 87 - */ 88 - static void atmel_ecc_checksum(struct atmel_ecc_cmd *cmd) 89 - { 90 - u8 *data = &cmd->count; 91 - size_t len = cmd->count - CRC_SIZE; 92 - u16 *crc16 = (u16 *)(data + len); 93 - 94 - *crc16 = atmel_ecc_crc16(0, data, len); 95 - } 96 - 97 - static void atmel_ecc_init_read_cmd(struct atmel_ecc_cmd *cmd) 98 - { 99 - cmd->word_addr = COMMAND; 100 - cmd->opcode = OPCODE_READ; 101 - /* 102 - * Read the word from Configuration zone that contains the lock bytes 103 - * (UserExtra, Selector, LockValue, LockConfig). 104 - */ 105 - cmd->param1 = CONFIG_ZONE; 106 - cmd->param2 = DEVICE_LOCK_ADDR; 107 - cmd->count = READ_COUNT; 108 - 109 - atmel_ecc_checksum(cmd); 110 - 111 - cmd->msecs = MAX_EXEC_TIME_READ; 112 - cmd->rxsize = READ_RSP_SIZE; 113 - } 114 - 115 - static void atmel_ecc_init_genkey_cmd(struct atmel_ecc_cmd *cmd, u16 keyid) 116 - { 117 - cmd->word_addr = COMMAND; 118 - cmd->count = GENKEY_COUNT; 119 - cmd->opcode = OPCODE_GENKEY; 120 - cmd->param1 = GENKEY_MODE_PRIVATE; 121 - /* a random private key will be generated and stored in slot keyID */ 122 - cmd->param2 = cpu_to_le16(keyid); 123 - 124 - atmel_ecc_checksum(cmd); 125 - 126 - cmd->msecs = MAX_EXEC_TIME_GENKEY; 127 - cmd->rxsize = GENKEY_RSP_SIZE; 128 - } 129 - 130 - static int atmel_ecc_init_ecdh_cmd(struct atmel_ecc_cmd *cmd, 131 - struct scatterlist *pubkey) 132 - { 133 - size_t copied; 134 - 135 - cmd->word_addr = COMMAND; 136 - cmd->count = ECDH_COUNT; 137 - cmd->opcode = OPCODE_ECDH; 138 - cmd->param1 = ECDH_PREFIX_MODE; 139 - /* private key slot */ 140 - cmd->param2 = cpu_to_le16(DATA_SLOT_2); 141 - 142 - /* 143 - * The device only supports NIST P256 ECC keys. The public key size will 144 - * always be the same. Use a macro for the key size to avoid unnecessary 145 - * computations. 146 - */ 147 - copied = sg_copy_to_buffer(pubkey, 148 - sg_nents_for_len(pubkey, 149 - ATMEL_ECC_PUBKEY_SIZE), 150 - cmd->data, ATMEL_ECC_PUBKEY_SIZE); 151 - if (copied != ATMEL_ECC_PUBKEY_SIZE) 152 - return -EINVAL; 153 - 154 - atmel_ecc_checksum(cmd); 155 - 156 - cmd->msecs = MAX_EXEC_TIME_ECDH; 157 - cmd->rxsize = ECDH_RSP_SIZE; 158 - 159 - return 0; 160 - } 161 - 162 - /* 163 - * After wake and after execution of a command, there will be error, status, or 164 - * result bytes in the device's output register that can be retrieved by the 165 - * system. When the length of that group is four bytes, the codes returned are 166 - * detailed in error_list. 167 - */ 168 - static int atmel_ecc_status(struct device *dev, u8 *status) 169 - { 170 - size_t err_list_len = ARRAY_SIZE(error_list); 171 - int i; 172 - u8 err_id = status[1]; 173 - 174 - if (*status != STATUS_SIZE) 175 - return 0; 176 - 177 - if (err_id == STATUS_WAKE_SUCCESSFUL || err_id == STATUS_NOERR) 178 - return 0; 179 - 180 - for (i = 0; i < err_list_len; i++) 181 - if (error_list[i].value == err_id) 182 - break; 183 - 184 - /* if err_id is not in the error_list then ignore it */ 185 - if (i != err_list_len) { 186 - dev_err(dev, "%02x: %s:\n", err_id, error_list[i].error_text); 187 - return err_id; 188 - } 189 - 190 - return 0; 191 - } 192 - 193 - static int atmel_ecc_wakeup(struct i2c_client *client) 194 - { 195 - struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client); 196 - u8 status[STATUS_RSP_SIZE]; 197 - int ret; 198 - 199 - /* 200 - * The device ignores any levels or transitions on the SCL pin when the 201 - * device is idle, asleep or during waking up. Don't check for error 202 - * when waking up the device. 203 - */ 204 - i2c_master_send(client, i2c_priv->wake_token, i2c_priv->wake_token_sz); 205 - 206 - /* 207 - * Wait to wake the device. Typical execution times for ecdh and genkey 208 - * are around tens of milliseconds. Delta is chosen to 50 microseconds. 209 - */ 210 - usleep_range(TWHI_MIN, TWHI_MAX); 211 - 212 - ret = i2c_master_recv(client, status, STATUS_SIZE); 213 - if (ret < 0) 214 - return ret; 215 - 216 - return atmel_ecc_status(&client->dev, status); 217 - } 218 - 219 - static int atmel_ecc_sleep(struct i2c_client *client) 220 - { 221 - u8 sleep = SLEEP_TOKEN; 222 - 223 - return i2c_master_send(client, &sleep, 1); 224 - } 225 - 226 - static void atmel_ecdh_done(struct atmel_ecc_work_data *work_data, void *areq, 83 + static void atmel_ecdh_done(struct atmel_i2c_work_data *work_data, void *areq, 227 84 int status) 228 85 { 229 86 struct kpp_request *req = areq; 230 87 struct atmel_ecdh_ctx *ctx = work_data->ctx; 231 - struct atmel_ecc_cmd *cmd = &work_data->cmd; 88 + struct atmel_i2c_cmd *cmd = &work_data->cmd; 232 89 size_t copied, n_sz; 233 90 234 91 if (status) ··· 73 282 kpp_request_complete(req, status); 74 283 } 75 284 76 - /* 77 - * atmel_ecc_send_receive() - send a command to the device and receive its 78 - * response. 79 - * @client: i2c client device 80 - * @cmd : structure used to communicate with the device 81 - * 82 - * After the device receives a Wake token, a watchdog counter starts within the 83 - * device. After the watchdog timer expires, the device enters sleep mode 84 - * regardless of whether some I/O transmission or command execution is in 85 - * progress. If a command is attempted when insufficient time remains prior to 86 - * watchdog timer execution, the device will return the watchdog timeout error 87 - * code without attempting to execute the command. There is no way to reset the 88 - * counter other than to put the device into sleep or idle mode and then 89 - * wake it up again. 90 - */ 91 - static int atmel_ecc_send_receive(struct i2c_client *client, 92 - struct atmel_ecc_cmd *cmd) 93 - { 94 - struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client); 95 - int ret; 96 - 97 - mutex_lock(&i2c_priv->lock); 98 - 99 - ret = atmel_ecc_wakeup(client); 100 - if (ret) 101 - goto err; 102 - 103 - /* send the command */ 104 - ret = i2c_master_send(client, (u8 *)cmd, cmd->count + WORD_ADDR_SIZE); 105 - if (ret < 0) 106 - goto err; 107 - 108 - /* delay the appropriate amount of time for command to execute */ 109 - msleep(cmd->msecs); 110 - 111 - /* receive the response */ 112 - ret = i2c_master_recv(client, cmd->data, cmd->rxsize); 113 - if (ret < 0) 114 - goto err; 115 - 116 - /* put the device into low-power mode */ 117 - ret = atmel_ecc_sleep(client); 118 - if (ret < 0) 119 - goto err; 120 - 121 - mutex_unlock(&i2c_priv->lock); 122 - return atmel_ecc_status(&client->dev, cmd->data); 123 - err: 124 - mutex_unlock(&i2c_priv->lock); 125 - return ret; 126 - } 127 - 128 - static void atmel_ecc_work_handler(struct work_struct *work) 129 - { 130 - struct atmel_ecc_work_data *work_data = 131 - container_of(work, struct atmel_ecc_work_data, work); 132 - struct atmel_ecc_cmd *cmd = &work_data->cmd; 133 - struct i2c_client *client = work_data->ctx->client; 134 - int status; 135 - 136 - status = atmel_ecc_send_receive(client, cmd); 137 - work_data->cbk(work_data, work_data->areq, status); 138 - } 139 - 140 - static void atmel_ecc_enqueue(struct atmel_ecc_work_data *work_data, 141 - void (*cbk)(struct atmel_ecc_work_data *work_data, 142 - void *areq, int status), 143 - void *areq) 144 - { 145 - work_data->cbk = (void *)cbk; 146 - work_data->areq = areq; 147 - 148 - INIT_WORK(&work_data->work, atmel_ecc_work_handler); 149 - schedule_work(&work_data->work); 150 - } 151 - 152 285 static unsigned int atmel_ecdh_supported_curve(unsigned int curve_id) 153 286 { 154 287 if (curve_id == ECC_CURVE_NIST_P256) ··· 89 374 unsigned int len) 90 375 { 91 376 struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm); 92 - struct atmel_ecc_cmd *cmd; 377 + struct atmel_i2c_cmd *cmd; 93 378 void *public_key; 94 379 struct ecdh params; 95 380 int ret = -ENOMEM; ··· 127 412 ctx->do_fallback = false; 128 413 ctx->curve_id = params.curve_id; 129 414 130 - atmel_ecc_init_genkey_cmd(cmd, DATA_SLOT_2); 415 + atmel_i2c_init_genkey_cmd(cmd, DATA_SLOT_2); 131 416 132 - ret = atmel_ecc_send_receive(ctx->client, cmd); 417 + ret = atmel_i2c_send_receive(ctx->client, cmd); 133 418 if (ret) 134 419 goto free_public_key; 135 420 ··· 159 444 return crypto_kpp_generate_public_key(req); 160 445 } 161 446 447 + if (!ctx->public_key) 448 + return -EINVAL; 449 + 162 450 /* might want less than we've got */ 163 451 nbytes = min_t(size_t, ATMEL_ECC_PUBKEY_SIZE, req->dst_len); 164 452 ··· 179 461 { 180 462 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); 181 463 struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm); 182 - struct atmel_ecc_work_data *work_data; 464 + struct atmel_i2c_work_data *work_data; 183 465 gfp_t gfp; 184 466 int ret; 185 467 ··· 200 482 return -ENOMEM; 201 483 202 484 work_data->ctx = ctx; 485 + work_data->client = ctx->client; 203 486 204 - ret = atmel_ecc_init_ecdh_cmd(&work_data->cmd, req->src); 487 + ret = atmel_i2c_init_ecdh_cmd(&work_data->cmd, req->src); 205 488 if (ret) 206 489 goto free_work_data; 207 490 208 - atmel_ecc_enqueue(work_data, atmel_ecdh_done, req); 491 + atmel_i2c_enqueue(work_data, atmel_ecdh_done, req); 209 492 210 493 return -EINPROGRESS; 211 494 ··· 217 498 218 499 static struct i2c_client *atmel_ecc_i2c_client_alloc(void) 219 500 { 220 - struct atmel_ecc_i2c_client_priv *i2c_priv, *min_i2c_priv = NULL; 501 + struct atmel_i2c_client_priv *i2c_priv, *min_i2c_priv = NULL; 221 502 struct i2c_client *client = ERR_PTR(-ENODEV); 222 503 int min_tfm_cnt = INT_MAX; 223 504 int tfm_cnt; ··· 252 533 253 534 static void atmel_ecc_i2c_client_free(struct i2c_client *client) 254 535 { 255 - struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client); 536 + struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client); 256 537 257 538 atomic_dec(&i2c_priv->tfm_count); 258 539 } ··· 323 604 }, 324 605 }; 325 606 326 - static inline size_t atmel_ecc_wake_token_sz(u32 bus_clk_rate) 327 - { 328 - u32 no_of_bits = DIV_ROUND_UP(TWLO_USEC * bus_clk_rate, USEC_PER_SEC); 329 - 330 - /* return the size of the wake_token in bytes */ 331 - return DIV_ROUND_UP(no_of_bits, 8); 332 - } 333 - 334 - static int device_sanity_check(struct i2c_client *client) 335 - { 336 - struct atmel_ecc_cmd *cmd; 337 - int ret; 338 - 339 - cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); 340 - if (!cmd) 341 - return -ENOMEM; 342 - 343 - atmel_ecc_init_read_cmd(cmd); 344 - 345 - ret = atmel_ecc_send_receive(client, cmd); 346 - if (ret) 347 - goto free_cmd; 348 - 349 - /* 350 - * It is vital that the Configuration, Data and OTP zones be locked 351 - * prior to release into the field of the system containing the device. 352 - * Failure to lock these zones may permit modification of any secret 353 - * keys and may lead to other security problems. 354 - */ 355 - if (cmd->data[LOCK_CONFIG_IDX] || cmd->data[LOCK_VALUE_IDX]) { 356 - dev_err(&client->dev, "Configuration or Data and OTP zones are unlocked!\n"); 357 - ret = -ENOTSUPP; 358 - } 359 - 360 - /* fall through */ 361 - free_cmd: 362 - kfree(cmd); 363 - return ret; 364 - } 365 - 366 607 static int atmel_ecc_probe(struct i2c_client *client, 367 608 const struct i2c_device_id *id) 368 609 { 369 - struct atmel_ecc_i2c_client_priv *i2c_priv; 370 - struct device *dev = &client->dev; 610 + struct atmel_i2c_client_priv *i2c_priv; 371 611 int ret; 372 - u32 bus_clk_rate; 373 612 374 - if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { 375 - dev_err(dev, "I2C_FUNC_I2C not supported\n"); 376 - return -ENODEV; 377 - } 378 - 379 - ret = of_property_read_u32(client->adapter->dev.of_node, 380 - "clock-frequency", &bus_clk_rate); 381 - if (ret) { 382 - dev_err(dev, "of: failed to read clock-frequency property\n"); 383 - return ret; 384 - } 385 - 386 - if (bus_clk_rate > 1000000L) { 387 - dev_err(dev, "%d exceeds maximum supported clock frequency (1MHz)\n", 388 - bus_clk_rate); 389 - return -EINVAL; 390 - } 391 - 392 - i2c_priv = devm_kmalloc(dev, sizeof(*i2c_priv), GFP_KERNEL); 393 - if (!i2c_priv) 394 - return -ENOMEM; 395 - 396 - i2c_priv->client = client; 397 - mutex_init(&i2c_priv->lock); 398 - 399 - /* 400 - * WAKE_TOKEN_MAX_SIZE was calculated for the maximum bus_clk_rate - 401 - * 1MHz. The previous bus_clk_rate check ensures us that wake_token_sz 402 - * will always be smaller than or equal to WAKE_TOKEN_MAX_SIZE. 403 - */ 404 - i2c_priv->wake_token_sz = atmel_ecc_wake_token_sz(bus_clk_rate); 405 - 406 - memset(i2c_priv->wake_token, 0, sizeof(i2c_priv->wake_token)); 407 - 408 - atomic_set(&i2c_priv->tfm_count, 0); 409 - 410 - i2c_set_clientdata(client, i2c_priv); 411 - 412 - ret = device_sanity_check(client); 613 + ret = atmel_i2c_probe(client, id); 413 614 if (ret) 414 615 return ret; 616 + 617 + i2c_priv = i2c_get_clientdata(client); 415 618 416 619 spin_lock(&driver_data.i2c_list_lock); 417 620 list_add_tail(&i2c_priv->i2c_client_list_node, ··· 346 705 list_del(&i2c_priv->i2c_client_list_node); 347 706 spin_unlock(&driver_data.i2c_list_lock); 348 707 349 - dev_err(dev, "%s alg registration failed\n", 708 + dev_err(&client->dev, "%s alg registration failed\n", 350 709 atmel_ecdh.base.cra_driver_name); 351 710 } else { 352 - dev_info(dev, "atmel ecc algorithms registered in /proc/crypto\n"); 711 + dev_info(&client->dev, "atmel ecc algorithms registered in /proc/crypto\n"); 353 712 } 354 713 355 714 return ret; ··· 357 716 358 717 static int atmel_ecc_remove(struct i2c_client *client) 359 718 { 360 - struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client); 719 + struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client); 361 720 362 721 /* Return EBUSY if i2c client already allocated. */ 363 722 if (atomic_read(&i2c_priv->tfm_count)) {
-116
drivers/crypto/atmel-ecc.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* 3 - * Copyright (c) 2017, Microchip Technology Inc. 4 - * Author: Tudor Ambarus <tudor.ambarus@microchip.com> 5 - */ 6 - 7 - #ifndef __ATMEL_ECC_H__ 8 - #define __ATMEL_ECC_H__ 9 - 10 - #define ATMEL_ECC_PRIORITY 300 11 - 12 - #define COMMAND 0x03 /* packet function */ 13 - #define SLEEP_TOKEN 0x01 14 - #define WAKE_TOKEN_MAX_SIZE 8 15 - 16 - /* Definitions of Data and Command sizes */ 17 - #define WORD_ADDR_SIZE 1 18 - #define COUNT_SIZE 1 19 - #define CRC_SIZE 2 20 - #define CMD_OVERHEAD_SIZE (COUNT_SIZE + CRC_SIZE) 21 - 22 - /* size in bytes of the n prime */ 23 - #define ATMEL_ECC_NIST_P256_N_SIZE 32 24 - #define ATMEL_ECC_PUBKEY_SIZE (2 * ATMEL_ECC_NIST_P256_N_SIZE) 25 - 26 - #define STATUS_RSP_SIZE 4 27 - #define ECDH_RSP_SIZE (32 + CMD_OVERHEAD_SIZE) 28 - #define GENKEY_RSP_SIZE (ATMEL_ECC_PUBKEY_SIZE + \ 29 - CMD_OVERHEAD_SIZE) 30 - #define READ_RSP_SIZE (4 + CMD_OVERHEAD_SIZE) 31 - #define MAX_RSP_SIZE GENKEY_RSP_SIZE 32 - 33 - /** 34 - * atmel_ecc_cmd - structure used for communicating with the device. 35 - * @word_addr: indicates the function of the packet sent to the device. This 36 - * byte should have a value of COMMAND for normal operation. 37 - * @count : number of bytes to be transferred to (or from) the device. 38 - * @opcode : the command code. 39 - * @param1 : the first parameter; always present. 40 - * @param2 : the second parameter; always present. 41 - * @data : optional remaining input data. Includes a 2-byte CRC. 42 - * @rxsize : size of the data received from i2c client. 43 - * @msecs : command execution time in milliseconds 44 - */ 45 - struct atmel_ecc_cmd { 46 - u8 word_addr; 47 - u8 count; 48 - u8 opcode; 49 - u8 param1; 50 - u16 param2; 51 - u8 data[MAX_RSP_SIZE]; 52 - u8 msecs; 53 - u16 rxsize; 54 - } __packed; 55 - 56 - /* Status/Error codes */ 57 - #define STATUS_SIZE 0x04 58 - #define STATUS_NOERR 0x00 59 - #define STATUS_WAKE_SUCCESSFUL 0x11 60 - 61 - static const struct { 62 - u8 value; 63 - const char *error_text; 64 - } error_list[] = { 65 - { 0x01, "CheckMac or Verify miscompare" }, 66 - { 0x03, "Parse Error" }, 67 - { 0x05, "ECC Fault" }, 68 - { 0x0F, "Execution Error" }, 69 - { 0xEE, "Watchdog about to expire" }, 70 - { 0xFF, "CRC or other communication error" }, 71 - }; 72 - 73 - /* Definitions for eeprom organization */ 74 - #define CONFIG_ZONE 0 75 - 76 - /* Definitions for Indexes common to all commands */ 77 - #define RSP_DATA_IDX 1 /* buffer index of data in response */ 78 - #define DATA_SLOT_2 2 /* used for ECDH private key */ 79 - 80 - /* Definitions for the device lock state */ 81 - #define DEVICE_LOCK_ADDR 0x15 82 - #define LOCK_VALUE_IDX (RSP_DATA_IDX + 2) 83 - #define LOCK_CONFIG_IDX (RSP_DATA_IDX + 3) 84 - 85 - /* 86 - * Wake High delay to data communication (microseconds). SDA should be stable 87 - * high for this entire duration. 88 - */ 89 - #define TWHI_MIN 1500 90 - #define TWHI_MAX 1550 91 - 92 - /* Wake Low duration */ 93 - #define TWLO_USEC 60 94 - 95 - /* Command execution time (milliseconds) */ 96 - #define MAX_EXEC_TIME_ECDH 58 97 - #define MAX_EXEC_TIME_GENKEY 115 98 - #define MAX_EXEC_TIME_READ 1 99 - 100 - /* Command opcode */ 101 - #define OPCODE_ECDH 0x43 102 - #define OPCODE_GENKEY 0x40 103 - #define OPCODE_READ 0x02 104 - 105 - /* Definitions for the READ Command */ 106 - #define READ_COUNT 7 107 - 108 - /* Definitions for the GenKey Command */ 109 - #define GENKEY_COUNT 7 110 - #define GENKEY_MODE_PRIVATE 0x04 111 - 112 - /* Definitions for the ECDH Command */ 113 - #define ECDH_COUNT 71 114 - #define ECDH_PREFIX_MODE 0x00 115 - 116 - #endif /* __ATMEL_ECC_H__ */
+364
drivers/crypto/atmel-i2c.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Microchip / Atmel ECC (I2C) driver. 4 + * 5 + * Copyright (c) 2017, Microchip Technology Inc. 6 + * Author: Tudor Ambarus <tudor.ambarus@microchip.com> 7 + */ 8 + 9 + #include <linux/bitrev.h> 10 + #include <linux/crc16.h> 11 + #include <linux/delay.h> 12 + #include <linux/device.h> 13 + #include <linux/err.h> 14 + #include <linux/errno.h> 15 + #include <linux/i2c.h> 16 + #include <linux/init.h> 17 + #include <linux/kernel.h> 18 + #include <linux/module.h> 19 + #include <linux/scatterlist.h> 20 + #include <linux/slab.h> 21 + #include <linux/workqueue.h> 22 + #include "atmel-i2c.h" 23 + 24 + /** 25 + * atmel_i2c_checksum() - Generate 16-bit CRC as required by ATMEL ECC. 26 + * CRC16 verification of the count, opcode, param1, param2 and data bytes. 27 + * The checksum is saved in little-endian format in the least significant 28 + * two bytes of the command. CRC polynomial is 0x8005 and the initial register 29 + * value should be zero. 30 + * 31 + * @cmd : structure used for communicating with the device. 32 + */ 33 + static void atmel_i2c_checksum(struct atmel_i2c_cmd *cmd) 34 + { 35 + u8 *data = &cmd->count; 36 + size_t len = cmd->count - CRC_SIZE; 37 + __le16 *__crc16 = (__le16 *)(data + len); 38 + 39 + *__crc16 = cpu_to_le16(bitrev16(crc16(0, data, len))); 40 + } 41 + 42 + void atmel_i2c_init_read_cmd(struct atmel_i2c_cmd *cmd) 43 + { 44 + cmd->word_addr = COMMAND; 45 + cmd->opcode = OPCODE_READ; 46 + /* 47 + * Read the word from Configuration zone that contains the lock bytes 48 + * (UserExtra, Selector, LockValue, LockConfig). 49 + */ 50 + cmd->param1 = CONFIG_ZONE; 51 + cmd->param2 = cpu_to_le16(DEVICE_LOCK_ADDR); 52 + cmd->count = READ_COUNT; 53 + 54 + atmel_i2c_checksum(cmd); 55 + 56 + cmd->msecs = MAX_EXEC_TIME_READ; 57 + cmd->rxsize = READ_RSP_SIZE; 58 + } 59 + EXPORT_SYMBOL(atmel_i2c_init_read_cmd); 60 + 61 + void atmel_i2c_init_random_cmd(struct atmel_i2c_cmd *cmd) 62 + { 63 + cmd->word_addr = COMMAND; 64 + cmd->opcode = OPCODE_RANDOM; 65 + cmd->param1 = 0; 66 + cmd->param2 = 0; 67 + cmd->count = RANDOM_COUNT; 68 + 69 + atmel_i2c_checksum(cmd); 70 + 71 + cmd->msecs = MAX_EXEC_TIME_RANDOM; 72 + cmd->rxsize = RANDOM_RSP_SIZE; 73 + } 74 + EXPORT_SYMBOL(atmel_i2c_init_random_cmd); 75 + 76 + void atmel_i2c_init_genkey_cmd(struct atmel_i2c_cmd *cmd, u16 keyid) 77 + { 78 + cmd->word_addr = COMMAND; 79 + cmd->count = GENKEY_COUNT; 80 + cmd->opcode = OPCODE_GENKEY; 81 + cmd->param1 = GENKEY_MODE_PRIVATE; 82 + /* a random private key will be generated and stored in slot keyID */ 83 + cmd->param2 = cpu_to_le16(keyid); 84 + 85 + atmel_i2c_checksum(cmd); 86 + 87 + cmd->msecs = MAX_EXEC_TIME_GENKEY; 88 + cmd->rxsize = GENKEY_RSP_SIZE; 89 + } 90 + EXPORT_SYMBOL(atmel_i2c_init_genkey_cmd); 91 + 92 + int atmel_i2c_init_ecdh_cmd(struct atmel_i2c_cmd *cmd, 93 + struct scatterlist *pubkey) 94 + { 95 + size_t copied; 96 + 97 + cmd->word_addr = COMMAND; 98 + cmd->count = ECDH_COUNT; 99 + cmd->opcode = OPCODE_ECDH; 100 + cmd->param1 = ECDH_PREFIX_MODE; 101 + /* private key slot */ 102 + cmd->param2 = cpu_to_le16(DATA_SLOT_2); 103 + 104 + /* 105 + * The device only supports NIST P256 ECC keys. The public key size will 106 + * always be the same. Use a macro for the key size to avoid unnecessary 107 + * computations. 108 + */ 109 + copied = sg_copy_to_buffer(pubkey, 110 + sg_nents_for_len(pubkey, 111 + ATMEL_ECC_PUBKEY_SIZE), 112 + cmd->data, ATMEL_ECC_PUBKEY_SIZE); 113 + if (copied != ATMEL_ECC_PUBKEY_SIZE) 114 + return -EINVAL; 115 + 116 + atmel_i2c_checksum(cmd); 117 + 118 + cmd->msecs = MAX_EXEC_TIME_ECDH; 119 + cmd->rxsize = ECDH_RSP_SIZE; 120 + 121 + return 0; 122 + } 123 + EXPORT_SYMBOL(atmel_i2c_init_ecdh_cmd); 124 + 125 + /* 126 + * After wake and after execution of a command, there will be error, status, or 127 + * result bytes in the device's output register that can be retrieved by the 128 + * system. When the length of that group is four bytes, the codes returned are 129 + * detailed in error_list. 130 + */ 131 + static int atmel_i2c_status(struct device *dev, u8 *status) 132 + { 133 + size_t err_list_len = ARRAY_SIZE(error_list); 134 + int i; 135 + u8 err_id = status[1]; 136 + 137 + if (*status != STATUS_SIZE) 138 + return 0; 139 + 140 + if (err_id == STATUS_WAKE_SUCCESSFUL || err_id == STATUS_NOERR) 141 + return 0; 142 + 143 + for (i = 0; i < err_list_len; i++) 144 + if (error_list[i].value == err_id) 145 + break; 146 + 147 + /* if err_id is not in the error_list then ignore it */ 148 + if (i != err_list_len) { 149 + dev_err(dev, "%02x: %s:\n", err_id, error_list[i].error_text); 150 + return err_id; 151 + } 152 + 153 + return 0; 154 + } 155 + 156 + static int atmel_i2c_wakeup(struct i2c_client *client) 157 + { 158 + struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client); 159 + u8 status[STATUS_RSP_SIZE]; 160 + int ret; 161 + 162 + /* 163 + * The device ignores any levels or transitions on the SCL pin when the 164 + * device is idle, asleep or during waking up. Don't check for error 165 + * when waking up the device. 166 + */ 167 + i2c_master_send(client, i2c_priv->wake_token, i2c_priv->wake_token_sz); 168 + 169 + /* 170 + * Wait to wake the device. Typical execution times for ecdh and genkey 171 + * are around tens of milliseconds. Delta is chosen to 50 microseconds. 172 + */ 173 + usleep_range(TWHI_MIN, TWHI_MAX); 174 + 175 + ret = i2c_master_recv(client, status, STATUS_SIZE); 176 + if (ret < 0) 177 + return ret; 178 + 179 + return atmel_i2c_status(&client->dev, status); 180 + } 181 + 182 + static int atmel_i2c_sleep(struct i2c_client *client) 183 + { 184 + u8 sleep = SLEEP_TOKEN; 185 + 186 + return i2c_master_send(client, &sleep, 1); 187 + } 188 + 189 + /* 190 + * atmel_i2c_send_receive() - send a command to the device and receive its 191 + * response. 192 + * @client: i2c client device 193 + * @cmd : structure used to communicate with the device 194 + * 195 + * After the device receives a Wake token, a watchdog counter starts within the 196 + * device. After the watchdog timer expires, the device enters sleep mode 197 + * regardless of whether some I/O transmission or command execution is in 198 + * progress. If a command is attempted when insufficient time remains prior to 199 + * watchdog timer execution, the device will return the watchdog timeout error 200 + * code without attempting to execute the command. There is no way to reset the 201 + * counter other than to put the device into sleep or idle mode and then 202 + * wake it up again. 203 + */ 204 + int atmel_i2c_send_receive(struct i2c_client *client, struct atmel_i2c_cmd *cmd) 205 + { 206 + struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client); 207 + int ret; 208 + 209 + mutex_lock(&i2c_priv->lock); 210 + 211 + ret = atmel_i2c_wakeup(client); 212 + if (ret) 213 + goto err; 214 + 215 + /* send the command */ 216 + ret = i2c_master_send(client, (u8 *)cmd, cmd->count + WORD_ADDR_SIZE); 217 + if (ret < 0) 218 + goto err; 219 + 220 + /* delay the appropriate amount of time for command to execute */ 221 + msleep(cmd->msecs); 222 + 223 + /* receive the response */ 224 + ret = i2c_master_recv(client, cmd->data, cmd->rxsize); 225 + if (ret < 0) 226 + goto err; 227 + 228 + /* put the device into low-power mode */ 229 + ret = atmel_i2c_sleep(client); 230 + if (ret < 0) 231 + goto err; 232 + 233 + mutex_unlock(&i2c_priv->lock); 234 + return atmel_i2c_status(&client->dev, cmd->data); 235 + err: 236 + mutex_unlock(&i2c_priv->lock); 237 + return ret; 238 + } 239 + EXPORT_SYMBOL(atmel_i2c_send_receive); 240 + 241 + static void atmel_i2c_work_handler(struct work_struct *work) 242 + { 243 + struct atmel_i2c_work_data *work_data = 244 + container_of(work, struct atmel_i2c_work_data, work); 245 + struct atmel_i2c_cmd *cmd = &work_data->cmd; 246 + struct i2c_client *client = work_data->client; 247 + int status; 248 + 249 + status = atmel_i2c_send_receive(client, cmd); 250 + work_data->cbk(work_data, work_data->areq, status); 251 + } 252 + 253 + void atmel_i2c_enqueue(struct atmel_i2c_work_data *work_data, 254 + void (*cbk)(struct atmel_i2c_work_data *work_data, 255 + void *areq, int status), 256 + void *areq) 257 + { 258 + work_data->cbk = (void *)cbk; 259 + work_data->areq = areq; 260 + 261 + INIT_WORK(&work_data->work, atmel_i2c_work_handler); 262 + schedule_work(&work_data->work); 263 + } 264 + EXPORT_SYMBOL(atmel_i2c_enqueue); 265 + 266 + static inline size_t atmel_i2c_wake_token_sz(u32 bus_clk_rate) 267 + { 268 + u32 no_of_bits = DIV_ROUND_UP(TWLO_USEC * bus_clk_rate, USEC_PER_SEC); 269 + 270 + /* return the size of the wake_token in bytes */ 271 + return DIV_ROUND_UP(no_of_bits, 8); 272 + } 273 + 274 + static int device_sanity_check(struct i2c_client *client) 275 + { 276 + struct atmel_i2c_cmd *cmd; 277 + int ret; 278 + 279 + cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); 280 + if (!cmd) 281 + return -ENOMEM; 282 + 283 + atmel_i2c_init_read_cmd(cmd); 284 + 285 + ret = atmel_i2c_send_receive(client, cmd); 286 + if (ret) 287 + goto free_cmd; 288 + 289 + /* 290 + * It is vital that the Configuration, Data and OTP zones be locked 291 + * prior to release into the field of the system containing the device. 292 + * Failure to lock these zones may permit modification of any secret 293 + * keys and may lead to other security problems. 294 + */ 295 + if (cmd->data[LOCK_CONFIG_IDX] || cmd->data[LOCK_VALUE_IDX]) { 296 + dev_err(&client->dev, "Configuration or Data and OTP zones are unlocked!\n"); 297 + ret = -ENOTSUPP; 298 + } 299 + 300 + /* fall through */ 301 + free_cmd: 302 + kfree(cmd); 303 + return ret; 304 + } 305 + 306 + int atmel_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) 307 + { 308 + struct atmel_i2c_client_priv *i2c_priv; 309 + struct device *dev = &client->dev; 310 + int ret; 311 + u32 bus_clk_rate; 312 + 313 + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { 314 + dev_err(dev, "I2C_FUNC_I2C not supported\n"); 315 + return -ENODEV; 316 + } 317 + 318 + bus_clk_rate = i2c_acpi_find_bus_speed(&client->adapter->dev); 319 + if (!bus_clk_rate) { 320 + ret = device_property_read_u32(&client->adapter->dev, 321 + "clock-frequency", &bus_clk_rate); 322 + if (ret) { 323 + dev_err(dev, "failed to read clock-frequency property\n"); 324 + return ret; 325 + } 326 + } 327 + 328 + if (bus_clk_rate > 1000000L) { 329 + dev_err(dev, "%d exceeds maximum supported clock frequency (1MHz)\n", 330 + bus_clk_rate); 331 + return -EINVAL; 332 + } 333 + 334 + i2c_priv = devm_kmalloc(dev, sizeof(*i2c_priv), GFP_KERNEL); 335 + if (!i2c_priv) 336 + return -ENOMEM; 337 + 338 + i2c_priv->client = client; 339 + mutex_init(&i2c_priv->lock); 340 + 341 + /* 342 + * WAKE_TOKEN_MAX_SIZE was calculated for the maximum bus_clk_rate - 343 + * 1MHz. The previous bus_clk_rate check ensures us that wake_token_sz 344 + * will always be smaller than or equal to WAKE_TOKEN_MAX_SIZE. 345 + */ 346 + i2c_priv->wake_token_sz = atmel_i2c_wake_token_sz(bus_clk_rate); 347 + 348 + memset(i2c_priv->wake_token, 0, sizeof(i2c_priv->wake_token)); 349 + 350 + atomic_set(&i2c_priv->tfm_count, 0); 351 + 352 + i2c_set_clientdata(client, i2c_priv); 353 + 354 + ret = device_sanity_check(client); 355 + if (ret) 356 + return ret; 357 + 358 + return 0; 359 + } 360 + EXPORT_SYMBOL(atmel_i2c_probe); 361 + 362 + MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@microchip.com>"); 363 + MODULE_DESCRIPTION("Microchip / Atmel ECC (I2C) driver"); 364 + MODULE_LICENSE("GPL v2");
+197
drivers/crypto/atmel-i2c.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2017, Microchip Technology Inc. 4 + * Author: Tudor Ambarus <tudor.ambarus@microchip.com> 5 + */ 6 + 7 + #ifndef __ATMEL_I2C_H__ 8 + #define __ATMEL_I2C_H__ 9 + 10 + #include <linux/hw_random.h> 11 + #include <linux/types.h> 12 + 13 + #define ATMEL_ECC_PRIORITY 300 14 + 15 + #define COMMAND 0x03 /* packet function */ 16 + #define SLEEP_TOKEN 0x01 17 + #define WAKE_TOKEN_MAX_SIZE 8 18 + 19 + /* Definitions of Data and Command sizes */ 20 + #define WORD_ADDR_SIZE 1 21 + #define COUNT_SIZE 1 22 + #define CRC_SIZE 2 23 + #define CMD_OVERHEAD_SIZE (COUNT_SIZE + CRC_SIZE) 24 + 25 + /* size in bytes of the n prime */ 26 + #define ATMEL_ECC_NIST_P256_N_SIZE 32 27 + #define ATMEL_ECC_PUBKEY_SIZE (2 * ATMEL_ECC_NIST_P256_N_SIZE) 28 + 29 + #define STATUS_RSP_SIZE 4 30 + #define ECDH_RSP_SIZE (32 + CMD_OVERHEAD_SIZE) 31 + #define GENKEY_RSP_SIZE (ATMEL_ECC_PUBKEY_SIZE + \ 32 + CMD_OVERHEAD_SIZE) 33 + #define READ_RSP_SIZE (4 + CMD_OVERHEAD_SIZE) 34 + #define RANDOM_RSP_SIZE (32 + CMD_OVERHEAD_SIZE) 35 + #define MAX_RSP_SIZE GENKEY_RSP_SIZE 36 + 37 + /** 38 + * atmel_i2c_cmd - structure used for communicating with the device. 39 + * @word_addr: indicates the function of the packet sent to the device. This 40 + * byte should have a value of COMMAND for normal operation. 41 + * @count : number of bytes to be transferred to (or from) the device. 42 + * @opcode : the command code. 43 + * @param1 : the first parameter; always present. 44 + * @param2 : the second parameter; always present. 45 + * @data : optional remaining input data. Includes a 2-byte CRC. 46 + * @rxsize : size of the data received from i2c client. 47 + * @msecs : command execution time in milliseconds 48 + */ 49 + struct atmel_i2c_cmd { 50 + u8 word_addr; 51 + u8 count; 52 + u8 opcode; 53 + u8 param1; 54 + __le16 param2; 55 + u8 data[MAX_RSP_SIZE]; 56 + u8 msecs; 57 + u16 rxsize; 58 + } __packed; 59 + 60 + /* Status/Error codes */ 61 + #define STATUS_SIZE 0x04 62 + #define STATUS_NOERR 0x00 63 + #define STATUS_WAKE_SUCCESSFUL 0x11 64 + 65 + static const struct { 66 + u8 value; 67 + const char *error_text; 68 + } error_list[] = { 69 + { 0x01, "CheckMac or Verify miscompare" }, 70 + { 0x03, "Parse Error" }, 71 + { 0x05, "ECC Fault" }, 72 + { 0x0F, "Execution Error" }, 73 + { 0xEE, "Watchdog about to expire" }, 74 + { 0xFF, "CRC or other communication error" }, 75 + }; 76 + 77 + /* Definitions for eeprom organization */ 78 + #define CONFIG_ZONE 0 79 + 80 + /* Definitions for Indexes common to all commands */ 81 + #define RSP_DATA_IDX 1 /* buffer index of data in response */ 82 + #define DATA_SLOT_2 2 /* used for ECDH private key */ 83 + 84 + /* Definitions for the device lock state */ 85 + #define DEVICE_LOCK_ADDR 0x15 86 + #define LOCK_VALUE_IDX (RSP_DATA_IDX + 2) 87 + #define LOCK_CONFIG_IDX (RSP_DATA_IDX + 3) 88 + 89 + /* 90 + * Wake High delay to data communication (microseconds). SDA should be stable 91 + * high for this entire duration. 92 + */ 93 + #define TWHI_MIN 1500 94 + #define TWHI_MAX 1550 95 + 96 + /* Wake Low duration */ 97 + #define TWLO_USEC 60 98 + 99 + /* Command execution time (milliseconds) */ 100 + #define MAX_EXEC_TIME_ECDH 58 101 + #define MAX_EXEC_TIME_GENKEY 115 102 + #define MAX_EXEC_TIME_READ 1 103 + #define MAX_EXEC_TIME_RANDOM 50 104 + 105 + /* Command opcode */ 106 + #define OPCODE_ECDH 0x43 107 + #define OPCODE_GENKEY 0x40 108 + #define OPCODE_READ 0x02 109 + #define OPCODE_RANDOM 0x1b 110 + 111 + /* Definitions for the READ Command */ 112 + #define READ_COUNT 7 113 + 114 + /* Definitions for the RANDOM Command */ 115 + #define RANDOM_COUNT 7 116 + 117 + /* Definitions for the GenKey Command */ 118 + #define GENKEY_COUNT 7 119 + #define GENKEY_MODE_PRIVATE 0x04 120 + 121 + /* Definitions for the ECDH Command */ 122 + #define ECDH_COUNT 71 123 + #define ECDH_PREFIX_MODE 0x00 124 + 125 + /* Used for binding tfm objects to i2c clients. */ 126 + struct atmel_ecc_driver_data { 127 + struct list_head i2c_client_list; 128 + spinlock_t i2c_list_lock; 129 + } ____cacheline_aligned; 130 + 131 + /** 132 + * atmel_i2c_client_priv - i2c_client private data 133 + * @client : pointer to i2c client device 134 + * @i2c_client_list_node: part of i2c_client_list 135 + * @lock : lock for sending i2c commands 136 + * @wake_token : wake token array of zeros 137 + * @wake_token_sz : size in bytes of the wake_token 138 + * @tfm_count : number of active crypto transformations on i2c client 139 + * 140 + * Reads and writes from/to the i2c client are sequential. The first byte 141 + * transmitted to the device is treated as the byte size. Any attempt to send 142 + * more than this number of bytes will cause the device to not ACK those bytes. 143 + * After the host writes a single command byte to the input buffer, reads are 144 + * prohibited until after the device completes command execution. Use a mutex 145 + * when sending i2c commands. 146 + */ 147 + struct atmel_i2c_client_priv { 148 + struct i2c_client *client; 149 + struct list_head i2c_client_list_node; 150 + struct mutex lock; 151 + u8 wake_token[WAKE_TOKEN_MAX_SIZE]; 152 + size_t wake_token_sz; 153 + atomic_t tfm_count ____cacheline_aligned; 154 + struct hwrng hwrng; 155 + }; 156 + 157 + /** 158 + * atmel_i2c_work_data - data structure representing the work 159 + * @ctx : transformation context. 160 + * @cbk : pointer to a callback function to be invoked upon completion of this 161 + * request. This has the form: 162 + * callback(struct atmel_i2c_work_data *work_data, void *areq, u8 status) 163 + * where: 164 + * @work_data: data structure representing the work 165 + * @areq : optional pointer to an argument passed with the original 166 + * request. 167 + * @status : status returned from the i2c client device or i2c error. 168 + * @areq: optional pointer to a user argument for use at callback time. 169 + * @work: describes the task to be executed. 170 + * @cmd : structure used for communicating with the device. 171 + */ 172 + struct atmel_i2c_work_data { 173 + void *ctx; 174 + struct i2c_client *client; 175 + void (*cbk)(struct atmel_i2c_work_data *work_data, void *areq, 176 + int status); 177 + void *areq; 178 + struct work_struct work; 179 + struct atmel_i2c_cmd cmd; 180 + }; 181 + 182 + int atmel_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id); 183 + 184 + void atmel_i2c_enqueue(struct atmel_i2c_work_data *work_data, 185 + void (*cbk)(struct atmel_i2c_work_data *work_data, 186 + void *areq, int status), 187 + void *areq); 188 + 189 + int atmel_i2c_send_receive(struct i2c_client *client, struct atmel_i2c_cmd *cmd); 190 + 191 + void atmel_i2c_init_read_cmd(struct atmel_i2c_cmd *cmd); 192 + void atmel_i2c_init_random_cmd(struct atmel_i2c_cmd *cmd); 193 + void atmel_i2c_init_genkey_cmd(struct atmel_i2c_cmd *cmd, u16 keyid); 194 + int atmel_i2c_init_ecdh_cmd(struct atmel_i2c_cmd *cmd, 195 + struct scatterlist *pubkey); 196 + 197 + #endif /* __ATMEL_I2C_H__ */
+171
drivers/crypto/atmel-sha204a.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Microchip / Atmel SHA204A (I2C) driver. 4 + * 5 + * Copyright (c) 2019 Linaro, Ltd. <ard.biesheuvel@linaro.org> 6 + */ 7 + 8 + #include <linux/delay.h> 9 + #include <linux/device.h> 10 + #include <linux/err.h> 11 + #include <linux/errno.h> 12 + #include <linux/i2c.h> 13 + #include <linux/init.h> 14 + #include <linux/kernel.h> 15 + #include <linux/module.h> 16 + #include <linux/scatterlist.h> 17 + #include <linux/slab.h> 18 + #include <linux/workqueue.h> 19 + #include "atmel-i2c.h" 20 + 21 + static void atmel_sha204a_rng_done(struct atmel_i2c_work_data *work_data, 22 + void *areq, int status) 23 + { 24 + struct atmel_i2c_client_priv *i2c_priv = work_data->ctx; 25 + struct hwrng *rng = areq; 26 + 27 + if (status) 28 + dev_warn_ratelimited(&i2c_priv->client->dev, 29 + "i2c transaction failed (%d)\n", 30 + status); 31 + 32 + rng->priv = (unsigned long)work_data; 33 + atomic_dec(&i2c_priv->tfm_count); 34 + } 35 + 36 + static int atmel_sha204a_rng_read_nonblocking(struct hwrng *rng, void *data, 37 + size_t max) 38 + { 39 + struct atmel_i2c_client_priv *i2c_priv; 40 + struct atmel_i2c_work_data *work_data; 41 + 42 + i2c_priv = container_of(rng, struct atmel_i2c_client_priv, hwrng); 43 + 44 + /* keep maximum 1 asynchronous read in flight at any time */ 45 + if (!atomic_add_unless(&i2c_priv->tfm_count, 1, 1)) 46 + return 0; 47 + 48 + if (rng->priv) { 49 + work_data = (struct atmel_i2c_work_data *)rng->priv; 50 + max = min(sizeof(work_data->cmd.data), max); 51 + memcpy(data, &work_data->cmd.data, max); 52 + rng->priv = 0; 53 + } else { 54 + work_data = kmalloc(sizeof(*work_data), GFP_ATOMIC); 55 + if (!work_data) 56 + return -ENOMEM; 57 + 58 + work_data->ctx = i2c_priv; 59 + work_data->client = i2c_priv->client; 60 + 61 + max = 0; 62 + } 63 + 64 + atmel_i2c_init_random_cmd(&work_data->cmd); 65 + atmel_i2c_enqueue(work_data, atmel_sha204a_rng_done, rng); 66 + 67 + return max; 68 + } 69 + 70 + static int atmel_sha204a_rng_read(struct hwrng *rng, void *data, size_t max, 71 + bool wait) 72 + { 73 + struct atmel_i2c_client_priv *i2c_priv; 74 + struct atmel_i2c_cmd cmd; 75 + int ret; 76 + 77 + if (!wait) 78 + return atmel_sha204a_rng_read_nonblocking(rng, data, max); 79 + 80 + i2c_priv = container_of(rng, struct atmel_i2c_client_priv, hwrng); 81 + 82 + atmel_i2c_init_random_cmd(&cmd); 83 + 84 + ret = atmel_i2c_send_receive(i2c_priv->client, &cmd); 85 + if (ret) 86 + return ret; 87 + 88 + max = min(sizeof(cmd.data), max); 89 + memcpy(data, cmd.data, max); 90 + 91 + return max; 92 + } 93 + 94 + static int atmel_sha204a_probe(struct i2c_client *client, 95 + const struct i2c_device_id *id) 96 + { 97 + struct atmel_i2c_client_priv *i2c_priv; 98 + int ret; 99 + 100 + ret = atmel_i2c_probe(client, id); 101 + if (ret) 102 + return ret; 103 + 104 + i2c_priv = i2c_get_clientdata(client); 105 + 106 + memset(&i2c_priv->hwrng, 0, sizeof(i2c_priv->hwrng)); 107 + 108 + i2c_priv->hwrng.name = dev_name(&client->dev); 109 + i2c_priv->hwrng.read = atmel_sha204a_rng_read; 110 + i2c_priv->hwrng.quality = 1024; 111 + 112 + ret = hwrng_register(&i2c_priv->hwrng); 113 + if (ret) 114 + dev_warn(&client->dev, "failed to register RNG (%d)\n", ret); 115 + 116 + return ret; 117 + } 118 + 119 + static int atmel_sha204a_remove(struct i2c_client *client) 120 + { 121 + struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client); 122 + 123 + if (atomic_read(&i2c_priv->tfm_count)) { 124 + dev_err(&client->dev, "Device is busy\n"); 125 + return -EBUSY; 126 + } 127 + 128 + if (i2c_priv->hwrng.priv) 129 + kfree((void *)i2c_priv->hwrng.priv); 130 + hwrng_unregister(&i2c_priv->hwrng); 131 + 132 + return 0; 133 + } 134 + 135 + static const struct of_device_id atmel_sha204a_dt_ids[] = { 136 + { .compatible = "atmel,atsha204a", }, 137 + { /* sentinel */ } 138 + }; 139 + MODULE_DEVICE_TABLE(of, atmel_sha204a_dt_ids); 140 + 141 + static const struct i2c_device_id atmel_sha204a_id[] = { 142 + { "atsha204a", 0 }, 143 + { /* sentinel */ } 144 + }; 145 + MODULE_DEVICE_TABLE(i2c, atmel_sha204a_id); 146 + 147 + static struct i2c_driver atmel_sha204a_driver = { 148 + .probe = atmel_sha204a_probe, 149 + .remove = atmel_sha204a_remove, 150 + .id_table = atmel_sha204a_id, 151 + 152 + .driver.name = "atmel-sha204a", 153 + .driver.of_match_table = of_match_ptr(atmel_sha204a_dt_ids), 154 + }; 155 + 156 + static int __init atmel_sha204a_init(void) 157 + { 158 + return i2c_add_driver(&atmel_sha204a_driver); 159 + } 160 + 161 + static void __exit atmel_sha204a_exit(void) 162 + { 163 + flush_scheduled_work(); 164 + i2c_del_driver(&atmel_sha204a_driver); 165 + } 166 + 167 + module_init(atmel_sha204a_init); 168 + module_exit(atmel_sha204a_exit); 169 + 170 + MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); 171 + MODULE_LICENSE("GPL v2");
+4 -4
drivers/crypto/bcm/cipher.c
··· 85 85 * 0x70 - ring 2 86 86 * 0x78 - ring 3 87 87 */ 88 - char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 }; 88 + static char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 }; 89 89 /* 90 90 * Some SPU hw does not use BCM header on SPU messages. So BCM_HDR_LEN 91 91 * is set dynamically after reading SPU type from device tree. ··· 2083 2083 * Return: true if incremental hashing is not supported 2084 2084 * false otherwise 2085 2085 */ 2086 - bool spu_no_incr_hash(struct iproc_ctx_s *ctx) 2086 + static bool spu_no_incr_hash(struct iproc_ctx_s *ctx) 2087 2087 { 2088 2088 struct spu_hw *spu = &iproc_priv.spu; 2089 2089 ··· 4809 4809 return 0; 4810 4810 } 4811 4811 4812 - int bcm_spu_probe(struct platform_device *pdev) 4812 + static int bcm_spu_probe(struct platform_device *pdev) 4813 4813 { 4814 4814 struct device *dev = &pdev->dev; 4815 4815 struct spu_hw *spu = &iproc_priv.spu; ··· 4853 4853 return err; 4854 4854 } 4855 4855 4856 - int bcm_spu_remove(struct platform_device *pdev) 4856 + static int bcm_spu_remove(struct platform_device *pdev) 4857 4857 { 4858 4858 int i; 4859 4859 struct device *dev = &pdev->dev;
+5 -5
drivers/crypto/bcm/spu2.c
··· 38 38 SPU2_DTLS_AEAD = 10 39 39 }; 40 40 41 - char *spu2_cipher_type_names[] = { "None", "AES128", "AES192", "AES256", 41 + static char *spu2_cipher_type_names[] = { "None", "AES128", "AES192", "AES256", 42 42 "DES", "3DES" 43 43 }; 44 44 45 - char *spu2_cipher_mode_names[] = { "ECB", "CBC", "CTR", "CFB", "OFB", "XTS", 46 - "CCM", "GCM" 45 + static char *spu2_cipher_mode_names[] = { "ECB", "CBC", "CTR", "CFB", "OFB", 46 + "XTS", "CCM", "GCM" 47 47 }; 48 48 49 - char *spu2_hash_type_names[] = { "None", "AES128", "AES192", "AES256", 49 + static char *spu2_hash_type_names[] = { "None", "AES128", "AES192", "AES256", 50 50 "Reserved", "Reserved", "MD5", "SHA1", "SHA224", "SHA256", "SHA384", 51 51 "SHA512", "SHA512/224", "SHA512/256", "SHA3-224", "SHA3-256", 52 52 "SHA3-384", "SHA3-512" 53 53 }; 54 54 55 - char *spu2_hash_mode_names[] = { "CMAC", "CBC-MAC", "XCBC-MAC", "HMAC", 55 + static char *spu2_hash_mode_names[] = { "CMAC", "CBC-MAC", "XCBC-MAC", "HMAC", 56 56 "Rabin", "CCM", "GCM", "Reserved" 57 57 }; 58 58
+17 -29
drivers/crypto/caam/Kconfig
··· 2 2 config CRYPTO_DEV_FSL_CAAM_COMMON 3 3 tristate 4 4 5 + config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC 6 + tristate 7 + 8 + config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC 9 + tristate 10 + 5 11 config CRYPTO_DEV_FSL_CAAM 6 12 tristate "Freescale CAAM-Multicore platform driver backend" 7 13 depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE ··· 31 25 Selecting this will enable printing of various debug 32 26 information in the CAAM driver. 33 27 34 - config CRYPTO_DEV_FSL_CAAM_JR 28 + menuconfig CRYPTO_DEV_FSL_CAAM_JR 35 29 tristate "Freescale CAAM Job Ring driver backend" 36 30 default y 37 31 help ··· 92 86 threshold. Range is 1-65535. 93 87 94 88 config CRYPTO_DEV_FSL_CAAM_CRYPTO_API 95 - tristate "Register algorithm implementations with the Crypto API" 89 + bool "Register algorithm implementations with the Crypto API" 96 90 default y 91 + select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC 97 92 select CRYPTO_AEAD 98 93 select CRYPTO_AUTHENC 99 94 select CRYPTO_BLKCIPHER ··· 104 97 scatterlist crypto API (such as the linux native IPSec 105 98 stack) to the SEC4 via job ring. 106 99 107 - To compile this as a module, choose M here: the module 108 - will be called caamalg. 109 - 110 100 config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI 111 - tristate "Queue Interface as Crypto API backend" 101 + bool "Queue Interface as Crypto API backend" 112 102 depends on FSL_DPAA && NET 113 103 default y 104 + select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC 114 105 select CRYPTO_AUTHENC 115 106 select CRYPTO_BLKCIPHER 116 107 help ··· 119 114 assigned to the kernel should also be more than the number of 120 115 job rings. 121 116 122 - To compile this as a module, choose M here: the module 123 - will be called caamalg_qi. 124 - 125 117 config CRYPTO_DEV_FSL_CAAM_AHASH_API 126 - tristate "Register hash algorithm implementations with Crypto API" 118 + bool "Register hash algorithm implementations with Crypto API" 127 119 default y 120 + select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC 128 121 select CRYPTO_HASH 129 122 help 130 123 Selecting this will offload ahash for users of the 131 124 scatterlist crypto API to the SEC4 via job ring. 132 125 133 - To compile this as a module, choose M here: the module 134 - will be called caamhash. 135 - 136 126 config CRYPTO_DEV_FSL_CAAM_PKC_API 137 - tristate "Register public key cryptography implementations with Crypto API" 127 + bool "Register public key cryptography implementations with Crypto API" 138 128 default y 139 129 select CRYPTO_RSA 140 130 help 141 131 Selecting this will allow SEC Public key support for RSA. 142 132 Supported cryptographic primitives: encryption, decryption, 143 133 signature and verification. 144 - To compile this as a module, choose M here: the module 145 - will be called caam_pkc. 146 134 147 135 config CRYPTO_DEV_FSL_CAAM_RNG_API 148 - tristate "Register caam device for hwrng API" 136 + bool "Register caam device for hwrng API" 149 137 default y 150 138 select CRYPTO_RNG 151 139 select HW_RANDOM 152 140 help 153 141 Selecting this will register the SEC4 hardware rng to 154 142 the hw_random API for suppying the kernel entropy pool. 155 - 156 - To compile this as a module, choose M here: the module 157 - will be called caamrng. 158 143 159 144 endif # CRYPTO_DEV_FSL_CAAM_JR 160 145 ··· 155 160 depends on FSL_MC_DPIO 156 161 depends on NETDEVICES 157 162 select CRYPTO_DEV_FSL_CAAM_COMMON 163 + select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC 164 + select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC 158 165 select CRYPTO_BLKCIPHER 159 166 select CRYPTO_AUTHENC 160 167 select CRYPTO_AEAD ··· 168 171 169 172 To compile this as a module, choose M here: the module 170 173 will be called dpaa2_caam. 171 - 172 - config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC 173 - def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \ 174 - CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \ 175 - CRYPTO_DEV_FSL_DPAA2_CAAM) 176 - 177 - config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC 178 - def_tristate (CRYPTO_DEV_FSL_CAAM_AHASH_API || \ 179 - CRYPTO_DEV_FSL_DPAA2_CAAM)
+9 -9
drivers/crypto/caam/Makefile
··· 11 11 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o 12 12 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o 13 13 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o 14 - obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o 15 - obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o 16 14 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o 17 - obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o 18 15 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o 19 - obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o 20 - obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o 21 16 22 - caam-objs := ctrl.o 23 - caam_jr-objs := jr.o key_gen.o 24 - caam_pkc-y := caampkc.o pkc_desc.o 17 + caam-y := ctrl.o 18 + caam_jr-y := jr.o key_gen.o 19 + caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o 20 + caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o 21 + caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o 22 + caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o 23 + caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caampkc.o pkc_desc.o 24 + 25 + caam-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += qi.o 25 26 ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),) 26 27 ccflags-y += -DCONFIG_CAAM_QI 27 - caam-objs += qi.o 28 28 endif 29 29 30 30 obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
+154 -184
drivers/crypto/caam/caamalg.c
··· 77 77 #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) 78 78 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) 79 79 80 - #ifdef DEBUG 81 - /* for print_hex_dumps with line references */ 82 - #define debug(format, arg...) printk(format, arg) 83 - #else 84 - #define debug(format, arg...) 85 - #endif 86 - 87 80 struct caam_alg_entry { 88 81 int class1_alg_type; 89 82 int class2_alg_type; ··· 576 583 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) 577 584 goto badkey; 578 585 579 - #ifdef DEBUG 580 - printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n", 586 + dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n", 581 587 keys.authkeylen + keys.enckeylen, keys.enckeylen, 582 588 keys.authkeylen); 583 - print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 584 - DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 585 - #endif 589 + print_hex_dump_debug("key in @"__stringify(__LINE__)": ", 590 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 586 591 587 592 /* 588 593 * If DKP is supported, use it in the shared descriptor to generate ··· 614 623 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); 615 624 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + 616 625 keys.enckeylen, ctx->dir); 617 - #ifdef DEBUG 618 - print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", 619 - DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 620 - ctx->adata.keylen_pad + keys.enckeylen, 1); 621 - #endif 626 + 627 + print_hex_dump_debug("ctx.key@"__stringify(__LINE__)": ", 628 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 629 + ctx->adata.keylen_pad + keys.enckeylen, 1); 622 630 623 631 skip_split_key: 624 632 ctx->cdata.keylen = keys.enckeylen; ··· 668 678 struct caam_ctx *ctx = crypto_aead_ctx(aead); 669 679 struct device *jrdev = ctx->jrdev; 670 680 671 - #ifdef DEBUG 672 - print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 673 - DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 674 - #endif 681 + print_hex_dump_debug("key in @"__stringify(__LINE__)": ", 682 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 675 683 676 684 memcpy(ctx->key, key, keylen); 677 685 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir); ··· 687 699 if (keylen < 4) 688 700 return -EINVAL; 689 701 690 - #ifdef DEBUG 691 - print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 692 - DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 693 - #endif 702 + print_hex_dump_debug("key in @"__stringify(__LINE__)": ", 703 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 694 704 695 705 memcpy(ctx->key, key, keylen); 696 706 ··· 711 725 if (keylen < 4) 712 726 return -EINVAL; 713 727 714 - #ifdef DEBUG 715 - print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 716 - DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 717 - #endif 728 + print_hex_dump_debug("key in @"__stringify(__LINE__)": ", 729 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 718 730 719 731 memcpy(ctx->key, key, keylen); 720 732 ··· 741 757 OP_ALG_AAI_CTR_MOD128); 742 758 const bool is_rfc3686 = alg->caam.rfc3686; 743 759 744 - #ifdef DEBUG 745 - print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 746 - DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 747 - #endif 760 + print_hex_dump_debug("key in @"__stringify(__LINE__)": ", 761 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 748 762 /* 749 763 * AES-CTR needs to load IV in CONTEXT1 reg 750 764 * at an offset of 128bits (16bytes) ··· 898 916 } 899 917 900 918 if (iv_dma) 901 - dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); 919 + dma_unmap_single(dev, iv_dma, ivsize, DMA_BIDIRECTIONAL); 902 920 if (sec4_sg_bytes) 903 921 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes, 904 922 DMA_TO_DEVICE); ··· 931 949 struct aead_request *req = context; 932 950 struct aead_edesc *edesc; 933 951 934 - #ifdef DEBUG 935 - dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 936 - #endif 952 + dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 937 953 938 954 edesc = container_of(desc, struct aead_edesc, hw_desc[0]); 939 955 ··· 951 971 struct aead_request *req = context; 952 972 struct aead_edesc *edesc; 953 973 954 - #ifdef DEBUG 955 - dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 956 - #endif 974 + dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 957 975 958 976 edesc = container_of(desc, struct aead_edesc, hw_desc[0]); 959 977 ··· 979 1001 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 980 1002 int ivsize = crypto_skcipher_ivsize(skcipher); 981 1003 982 - #ifdef DEBUG 983 - dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 984 - #endif 1004 + dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 985 1005 986 1006 edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]); 987 1007 988 1008 if (err) 989 1009 caam_jr_strstatus(jrdev, err); 990 1010 991 - #ifdef DEBUG 992 - print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", 993 - DUMP_PREFIX_ADDRESS, 16, 4, req->iv, 994 - edesc->src_nents > 1 ? 100 : ivsize, 1); 995 - #endif 996 - caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", 997 - DUMP_PREFIX_ADDRESS, 16, 4, req->dst, 998 - edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); 999 - 1000 1011 skcipher_unmap(jrdev, edesc, req); 1001 1012 1002 1013 /* 1003 1014 * The crypto API expects us to set the IV (req->iv) to the last 1004 - * ciphertext block. This is used e.g. by the CTS mode. 1015 + * ciphertext block (CBC mode) or last counter (CTR mode). 1016 + * This is used e.g. by the CTS mode. 1005 1017 */ 1006 - if (ivsize) 1007 - scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - 1008 - ivsize, ivsize, 0); 1018 + if (ivsize) { 1019 + memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes, 1020 + ivsize); 1021 + 1022 + print_hex_dump_debug("dstiv @"__stringify(__LINE__)": ", 1023 + DUMP_PREFIX_ADDRESS, 16, 4, req->iv, 1024 + edesc->src_nents > 1 ? 100 : ivsize, 1); 1025 + } 1026 + 1027 + caam_dump_sg("dst @" __stringify(__LINE__)": ", 1028 + DUMP_PREFIX_ADDRESS, 16, 4, req->dst, 1029 + edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); 1009 1030 1010 1031 kfree(edesc); 1011 1032 ··· 1016 1039 { 1017 1040 struct skcipher_request *req = context; 1018 1041 struct skcipher_edesc *edesc; 1019 - #ifdef DEBUG 1020 1042 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1021 1043 int ivsize = crypto_skcipher_ivsize(skcipher); 1022 1044 1023 - dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 1024 - #endif 1045 + dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 1025 1046 1026 1047 edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]); 1027 1048 if (err) 1028 1049 caam_jr_strstatus(jrdev, err); 1029 1050 1030 - #ifdef DEBUG 1031 - print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", 1032 - DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); 1033 - #endif 1034 - caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", 1051 + skcipher_unmap(jrdev, edesc, req); 1052 + 1053 + /* 1054 + * The crypto API expects us to set the IV (req->iv) to the last 1055 + * ciphertext block (CBC mode) or last counter (CTR mode). 1056 + * This is used e.g. by the CTS mode. 1057 + */ 1058 + if (ivsize) { 1059 + memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes, 1060 + ivsize); 1061 + 1062 + print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", 1063 + DUMP_PREFIX_ADDRESS, 16, 4, req->iv, 1064 + ivsize, 1); 1065 + } 1066 + 1067 + caam_dump_sg("dst @" __stringify(__LINE__)": ", 1035 1068 DUMP_PREFIX_ADDRESS, 16, 4, req->dst, 1036 1069 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); 1037 1070 1038 - skcipher_unmap(jrdev, edesc, req); 1039 1071 kfree(edesc); 1040 1072 1041 1073 skcipher_request_complete(req, err); ··· 1092 1106 if (unlikely(req->src != req->dst)) { 1093 1107 if (!edesc->mapped_dst_nents) { 1094 1108 dst_dma = 0; 1109 + out_options = 0; 1095 1110 } else if (edesc->mapped_dst_nents == 1) { 1096 1111 dst_dma = sg_dma_address(req->dst); 1097 1112 out_options = 0; ··· 1236 1249 { 1237 1250 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1238 1251 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 1252 + struct device *jrdev = ctx->jrdev; 1239 1253 int ivsize = crypto_skcipher_ivsize(skcipher); 1240 1254 u32 *desc = edesc->hw_desc; 1241 1255 u32 *sh_desc; ··· 1244 1256 dma_addr_t src_dma, dst_dma, ptr; 1245 1257 int len, sec4_sg_index = 0; 1246 1258 1247 - #ifdef DEBUG 1248 - print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", 1249 - DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); 1250 - pr_err("asked=%d, cryptlen%d\n", 1259 + print_hex_dump_debug("presciv@"__stringify(__LINE__)": ", 1260 + DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); 1261 + dev_dbg(jrdev, "asked=%d, cryptlen%d\n", 1251 1262 (int)edesc->src_nents > 1 ? 100 : req->cryptlen, req->cryptlen); 1252 - #endif 1253 - caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ", 1263 + 1264 + caam_dump_sg("src @" __stringify(__LINE__)": ", 1254 1265 DUMP_PREFIX_ADDRESS, 16, 4, req->src, 1255 1266 edesc->src_nents > 1 ? 100 : req->cryptlen, 1); 1256 1267 ··· 1272 1285 if (likely(req->src == req->dst)) { 1273 1286 dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry); 1274 1287 out_options = in_options; 1275 - } else if (edesc->mapped_dst_nents == 1) { 1288 + } else if (!ivsize && edesc->mapped_dst_nents == 1) { 1276 1289 dst_dma = sg_dma_address(req->dst); 1277 1290 } else { 1278 1291 dst_dma = edesc->sec4_sg_dma + sec4_sg_index * ··· 1280 1293 out_options = LDST_SGF; 1281 1294 } 1282 1295 1283 - append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options); 1296 + append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options); 1284 1297 } 1285 1298 1286 1299 /* ··· 1296 1309 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1297 1310 GFP_KERNEL : GFP_ATOMIC; 1298 1311 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; 1312 + int src_len, dst_len = 0; 1299 1313 struct aead_edesc *edesc; 1300 1314 int sec4_sg_index, sec4_sg_len, sec4_sg_bytes; 1301 1315 unsigned int authsize = ctx->authsize; 1302 1316 1303 1317 if (unlikely(req->dst != req->src)) { 1304 - src_nents = sg_nents_for_len(req->src, req->assoclen + 1305 - req->cryptlen); 1318 + src_len = req->assoclen + req->cryptlen; 1319 + dst_len = src_len + (encrypt ? authsize : (-authsize)); 1320 + 1321 + src_nents = sg_nents_for_len(req->src, src_len); 1306 1322 if (unlikely(src_nents < 0)) { 1307 1323 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", 1308 - req->assoclen + req->cryptlen); 1324 + src_len); 1309 1325 return ERR_PTR(src_nents); 1310 1326 } 1311 1327 1312 - dst_nents = sg_nents_for_len(req->dst, req->assoclen + 1313 - req->cryptlen + 1314 - (encrypt ? authsize : 1315 - (-authsize))); 1328 + dst_nents = sg_nents_for_len(req->dst, dst_len); 1316 1329 if (unlikely(dst_nents < 0)) { 1317 1330 dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n", 1318 - req->assoclen + req->cryptlen + 1319 - (encrypt ? authsize : (-authsize))); 1331 + dst_len); 1320 1332 return ERR_PTR(dst_nents); 1321 1333 } 1322 1334 } else { 1323 - src_nents = sg_nents_for_len(req->src, req->assoclen + 1324 - req->cryptlen + 1325 - (encrypt ? authsize : 0)); 1335 + src_len = req->assoclen + req->cryptlen + 1336 + (encrypt ? authsize : 0); 1337 + 1338 + src_nents = sg_nents_for_len(req->src, src_len); 1326 1339 if (unlikely(src_nents < 0)) { 1327 1340 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", 1328 - req->assoclen + req->cryptlen + 1329 - (encrypt ? authsize : 0)); 1341 + src_len); 1330 1342 return ERR_PTR(src_nents); 1331 1343 } 1332 1344 } ··· 1366 1380 } 1367 1381 } 1368 1382 1383 + /* 1384 + * HW reads 4 S/G entries at a time; make sure the reads don't go beyond 1385 + * the end of the table by allocating more S/G entries. 1386 + */ 1369 1387 sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0; 1370 - sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0; 1388 + if (mapped_dst_nents > 1) 1389 + sec4_sg_len += pad_sg_nents(mapped_dst_nents); 1390 + else 1391 + sec4_sg_len = pad_sg_nents(sec4_sg_len); 1392 + 1371 1393 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); 1372 1394 1373 1395 /* allocate space for base edesc and hw desc commands, link tables */ ··· 1397 1403 1398 1404 sec4_sg_index = 0; 1399 1405 if (mapped_src_nents > 1) { 1400 - sg_to_sec4_sg_last(req->src, mapped_src_nents, 1406 + sg_to_sec4_sg_last(req->src, src_len, 1401 1407 edesc->sec4_sg + sec4_sg_index, 0); 1402 1408 sec4_sg_index += mapped_src_nents; 1403 1409 } 1404 1410 if (mapped_dst_nents > 1) { 1405 - sg_to_sec4_sg_last(req->dst, mapped_dst_nents, 1411 + sg_to_sec4_sg_last(req->dst, dst_len, 1406 1412 edesc->sec4_sg + sec4_sg_index, 0); 1407 1413 } 1408 1414 ··· 1440 1446 1441 1447 /* Create and submit job descriptor */ 1442 1448 init_gcm_job(req, edesc, all_contig, true); 1443 - #ifdef DEBUG 1444 - print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", 1445 - DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1446 - desc_bytes(edesc->hw_desc), 1); 1447 - #endif 1449 + 1450 + print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ", 1451 + DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1452 + desc_bytes(edesc->hw_desc), 1); 1448 1453 1449 1454 desc = edesc->hw_desc; 1450 1455 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); ··· 1549 1556 1550 1557 /* Create and submit job descriptor */ 1551 1558 init_authenc_job(req, edesc, all_contig, true); 1552 - #ifdef DEBUG 1553 - print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", 1554 - DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1555 - desc_bytes(edesc->hw_desc), 1); 1556 - #endif 1559 + 1560 + print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ", 1561 + DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1562 + desc_bytes(edesc->hw_desc), 1); 1557 1563 1558 1564 desc = edesc->hw_desc; 1559 1565 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); ··· 1583 1591 1584 1592 /* Create and submit job descriptor*/ 1585 1593 init_gcm_job(req, edesc, all_contig, false); 1586 - #ifdef DEBUG 1587 - print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", 1588 - DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1589 - desc_bytes(edesc->hw_desc), 1); 1590 - #endif 1594 + 1595 + print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ", 1596 + DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1597 + desc_bytes(edesc->hw_desc), 1); 1591 1598 1592 1599 desc = edesc->hw_desc; 1593 1600 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); ··· 1618 1627 u32 *desc; 1619 1628 int ret = 0; 1620 1629 1621 - caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ", 1630 + caam_dump_sg("dec src@" __stringify(__LINE__)": ", 1622 1631 DUMP_PREFIX_ADDRESS, 16, 4, req->src, 1623 1632 req->assoclen + req->cryptlen, 1); 1624 1633 ··· 1630 1639 1631 1640 /* Create and submit job descriptor*/ 1632 1641 init_authenc_job(req, edesc, all_contig, false); 1633 - #ifdef DEBUG 1634 - print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", 1635 - DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1636 - desc_bytes(edesc->hw_desc), 1); 1637 - #endif 1642 + 1643 + print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ", 1644 + DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1645 + desc_bytes(edesc->hw_desc), 1); 1638 1646 1639 1647 desc = edesc->hw_desc; 1640 1648 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); ··· 1709 1719 else 1710 1720 sec4_sg_ents = mapped_src_nents + !!ivsize; 1711 1721 dst_sg_idx = sec4_sg_ents; 1712 - sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; 1722 + 1723 + /* 1724 + * Input, output HW S/G tables: [IV, src][dst, IV] 1725 + * IV entries point to the same buffer 1726 + * If src == dst, S/G entries are reused (S/G tables overlap) 1727 + * 1728 + * HW reads 4 S/G entries at a time; make sure the reads don't go beyond 1729 + * the end of the table by allocating more S/G entries. Logic: 1730 + * if (output S/G) 1731 + * pad output S/G, if needed 1732 + * else if (input S/G) ... 1733 + * pad input S/G, if needed 1734 + */ 1735 + if (ivsize || mapped_dst_nents > 1) { 1736 + if (req->src == req->dst) 1737 + sec4_sg_ents = !!ivsize + pad_sg_nents(sec4_sg_ents); 1738 + else 1739 + sec4_sg_ents += pad_sg_nents(mapped_dst_nents + 1740 + !!ivsize); 1741 + } else { 1742 + sec4_sg_ents = pad_sg_nents(sec4_sg_ents); 1743 + } 1744 + 1713 1745 sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry); 1714 1746 1715 1747 /* ··· 1756 1744 1757 1745 /* Make sure IV is located in a DMAable area */ 1758 1746 if (ivsize) { 1759 - iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes; 1747 + iv = (u8 *)edesc->sec4_sg + sec4_sg_bytes; 1760 1748 memcpy(iv, req->iv, ivsize); 1761 1749 1762 - iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE); 1750 + iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_BIDIRECTIONAL); 1763 1751 if (dma_mapping_error(jrdev, iv_dma)) { 1764 1752 dev_err(jrdev, "unable to map IV\n"); 1765 1753 caam_unmap(jrdev, req->src, req->dst, src_nents, ··· 1771 1759 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); 1772 1760 } 1773 1761 if (dst_sg_idx) 1774 - sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + 1775 - !!ivsize, 0); 1762 + sg_to_sec4_sg(req->src, req->cryptlen, edesc->sec4_sg + 1763 + !!ivsize, 0); 1776 1764 1777 - if (mapped_dst_nents > 1) { 1778 - sg_to_sec4_sg_last(req->dst, mapped_dst_nents, 1779 - edesc->sec4_sg + dst_sg_idx, 0); 1780 - } 1765 + if (req->src != req->dst && (ivsize || mapped_dst_nents > 1)) 1766 + sg_to_sec4_sg(req->dst, req->cryptlen, edesc->sec4_sg + 1767 + dst_sg_idx, 0); 1768 + 1769 + if (ivsize) 1770 + dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx + 1771 + mapped_dst_nents, iv_dma, ivsize, 0); 1772 + 1773 + if (ivsize || mapped_dst_nents > 1) 1774 + sg_to_sec4_set_last(edesc->sec4_sg + dst_sg_idx + 1775 + mapped_dst_nents); 1781 1776 1782 1777 if (sec4_sg_bytes) { 1783 1778 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, ··· 1801 1782 1802 1783 edesc->iv_dma = iv_dma; 1803 1784 1804 - #ifdef DEBUG 1805 - print_hex_dump(KERN_ERR, "skcipher sec4_sg@" __stringify(__LINE__)": ", 1806 - DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, 1807 - sec4_sg_bytes, 1); 1808 - #endif 1785 + print_hex_dump_debug("skcipher sec4_sg@" __stringify(__LINE__)": ", 1786 + DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, 1787 + sec4_sg_bytes, 1); 1809 1788 1810 1789 return edesc; 1811 1790 } ··· 1824 1807 1825 1808 /* Create and submit job descriptor*/ 1826 1809 init_skcipher_job(req, edesc, true); 1827 - #ifdef DEBUG 1828 - print_hex_dump(KERN_ERR, "skcipher jobdesc@" __stringify(__LINE__)": ", 1829 - DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1830 - desc_bytes(edesc->hw_desc), 1); 1831 - #endif 1810 + 1811 + print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ", 1812 + DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1813 + desc_bytes(edesc->hw_desc), 1); 1814 + 1832 1815 desc = edesc->hw_desc; 1833 1816 ret = caam_jr_enqueue(jrdev, desc, skcipher_encrypt_done, req); 1834 1817 ··· 1847 1830 struct skcipher_edesc *edesc; 1848 1831 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1849 1832 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 1850 - int ivsize = crypto_skcipher_ivsize(skcipher); 1851 1833 struct device *jrdev = ctx->jrdev; 1852 1834 u32 *desc; 1853 1835 int ret = 0; ··· 1856 1840 if (IS_ERR(edesc)) 1857 1841 return PTR_ERR(edesc); 1858 1842 1859 - /* 1860 - * The crypto API expects us to set the IV (req->iv) to the last 1861 - * ciphertext block. 1862 - */ 1863 - if (ivsize) 1864 - scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - 1865 - ivsize, ivsize, 0); 1866 - 1867 1843 /* Create and submit job descriptor*/ 1868 1844 init_skcipher_job(req, edesc, false); 1869 1845 desc = edesc->hw_desc; 1870 - #ifdef DEBUG 1871 - print_hex_dump(KERN_ERR, "skcipher jobdesc@" __stringify(__LINE__)": ", 1872 - DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1873 - desc_bytes(edesc->hw_desc), 1); 1874 - #endif 1846 + 1847 + print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ", 1848 + DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1849 + desc_bytes(edesc->hw_desc), 1); 1875 1850 1876 1851 ret = caam_jr_enqueue(jrdev, desc, skcipher_decrypt_done, req); 1877 1852 if (!ret) { ··· 3451 3444 caam_exit_common(crypto_aead_ctx(tfm)); 3452 3445 } 3453 3446 3454 - static void __exit caam_algapi_exit(void) 3447 + void caam_algapi_exit(void) 3455 3448 { 3456 3449 int i; 3457 3450 ··· 3496 3489 alg->exit = caam_aead_exit; 3497 3490 } 3498 3491 3499 - static int __init caam_algapi_init(void) 3492 + int caam_algapi_init(struct device *ctrldev) 3500 3493 { 3501 - struct device_node *dev_node; 3502 - struct platform_device *pdev; 3503 - struct caam_drv_private *priv; 3494 + struct caam_drv_private *priv = dev_get_drvdata(ctrldev); 3504 3495 int i = 0, err = 0; 3505 3496 u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst; 3506 3497 u32 arc4_inst; 3507 3498 unsigned int md_limit = SHA512_DIGEST_SIZE; 3508 3499 bool registered = false, gcm_support; 3509 - 3510 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 3511 - if (!dev_node) { 3512 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); 3513 - if (!dev_node) 3514 - return -ENODEV; 3515 - } 3516 - 3517 - pdev = of_find_device_by_node(dev_node); 3518 - if (!pdev) { 3519 - of_node_put(dev_node); 3520 - return -ENODEV; 3521 - } 3522 - 3523 - priv = dev_get_drvdata(&pdev->dev); 3524 - of_node_put(dev_node); 3525 - 3526 - /* 3527 - * If priv is NULL, it's probably because the caam driver wasn't 3528 - * properly initialized (e.g. RNG4 init failed). Thus, bail out here. 3529 - */ 3530 - if (!priv) { 3531 - err = -ENODEV; 3532 - goto out_put_dev; 3533 - } 3534 - 3535 3500 3536 3501 /* 3537 3502 * Register crypto algorithms the device supports. ··· 3647 3668 if (registered) 3648 3669 pr_info("caam algorithms registered in /proc/crypto\n"); 3649 3670 3650 - out_put_dev: 3651 - put_device(&pdev->dev); 3652 3671 return err; 3653 3672 } 3654 - 3655 - module_init(caam_algapi_init); 3656 - module_exit(caam_algapi_exit); 3657 - 3658 - MODULE_LICENSE("GPL"); 3659 - MODULE_DESCRIPTION("FSL CAAM support for crypto API"); 3660 - MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
+70 -77
drivers/crypto/caam/caamalg_desc.c
··· 33 33 } 34 34 35 35 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); 36 - append_operation(desc, type | OP_ALG_AS_INITFINAL | 37 - OP_ALG_DECRYPT); 36 + append_operation(desc, type | OP_ALG_AS_INIT | OP_ALG_DECRYPT); 38 37 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL); 39 38 set_jump_tgt_here(desc, jump_cmd); 40 - append_operation(desc, type | OP_ALG_AS_INITFINAL | 41 - OP_ALG_DECRYPT | OP_ALG_AAI_DK); 39 + append_operation(desc, type | OP_ALG_AS_INIT | OP_ALG_DECRYPT | 40 + OP_ALG_AAI_DK); 42 41 set_jump_tgt_here(desc, uncond_jump_cmd); 43 42 } 44 43 ··· 114 115 append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | 115 116 LDST_SRCDST_BYTE_CONTEXT); 116 117 117 - #ifdef DEBUG 118 - print_hex_dump(KERN_ERR, 119 - "aead null enc shdesc@" __stringify(__LINE__)": ", 120 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 121 - #endif 118 + print_hex_dump_debug("aead null enc shdesc@" __stringify(__LINE__)": ", 119 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 120 + 1); 122 121 } 123 122 EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap); 124 123 ··· 201 204 append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 | 202 205 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); 203 206 204 - #ifdef DEBUG 205 - print_hex_dump(KERN_ERR, 206 - "aead null dec shdesc@" __stringify(__LINE__)": ", 207 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 208 - #endif 207 + print_hex_dump_debug("aead null dec shdesc@" __stringify(__LINE__)": ", 208 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 209 + 1); 209 210 } 210 211 EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap); 211 212 ··· 353 358 append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | 354 359 LDST_SRCDST_BYTE_CONTEXT); 355 360 356 - #ifdef DEBUG 357 - print_hex_dump(KERN_ERR, "aead enc shdesc@" __stringify(__LINE__)": ", 358 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 359 - #endif 361 + print_hex_dump_debug("aead enc shdesc@" __stringify(__LINE__)": ", 362 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 363 + 1); 360 364 } 361 365 EXPORT_SYMBOL(cnstr_shdsc_aead_encap); 362 366 ··· 469 475 append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 | 470 476 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); 471 477 472 - #ifdef DEBUG 473 - print_hex_dump(KERN_ERR, "aead dec shdesc@" __stringify(__LINE__)": ", 474 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 475 - #endif 478 + print_hex_dump_debug("aead dec shdesc@" __stringify(__LINE__)": ", 479 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 480 + 1); 476 481 } 477 482 EXPORT_SYMBOL(cnstr_shdsc_aead_decap); 478 483 ··· 606 613 append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | 607 614 LDST_SRCDST_BYTE_CONTEXT); 608 615 609 - #ifdef DEBUG 610 - print_hex_dump(KERN_ERR, 611 - "aead givenc shdesc@" __stringify(__LINE__)": ", 612 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 613 - #endif 616 + print_hex_dump_debug("aead givenc shdesc@" __stringify(__LINE__)": ", 617 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 618 + 1); 614 619 } 615 620 EXPORT_SYMBOL(cnstr_shdsc_aead_givencap); 616 621 ··· 733 742 append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | 734 743 LDST_SRCDST_BYTE_CONTEXT); 735 744 736 - #ifdef DEBUG 737 - print_hex_dump(KERN_ERR, "gcm enc shdesc@" __stringify(__LINE__)": ", 738 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 739 - #endif 745 + print_hex_dump_debug("gcm enc shdesc@" __stringify(__LINE__)": ", 746 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 747 + 1); 740 748 } 741 749 EXPORT_SYMBOL(cnstr_shdsc_gcm_encap); 742 750 ··· 828 838 append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 | 829 839 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); 830 840 831 - #ifdef DEBUG 832 - print_hex_dump(KERN_ERR, "gcm dec shdesc@" __stringify(__LINE__)": ", 833 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 834 - #endif 841 + print_hex_dump_debug("gcm dec shdesc@" __stringify(__LINE__)": ", 842 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 843 + 1); 835 844 } 836 845 EXPORT_SYMBOL(cnstr_shdsc_gcm_decap); 837 846 ··· 922 933 append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | 923 934 LDST_SRCDST_BYTE_CONTEXT); 924 935 925 - #ifdef DEBUG 926 - print_hex_dump(KERN_ERR, 927 - "rfc4106 enc shdesc@" __stringify(__LINE__)": ", 928 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 929 - #endif 936 + print_hex_dump_debug("rfc4106 enc shdesc@" __stringify(__LINE__)": ", 937 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 938 + 1); 930 939 } 931 940 EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap); 932 941 ··· 1017 1030 append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 | 1018 1031 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); 1019 1032 1020 - #ifdef DEBUG 1021 - print_hex_dump(KERN_ERR, 1022 - "rfc4106 dec shdesc@" __stringify(__LINE__)": ", 1023 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1024 - #endif 1033 + print_hex_dump_debug("rfc4106 dec shdesc@" __stringify(__LINE__)": ", 1034 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1035 + 1); 1025 1036 } 1026 1037 EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap); 1027 1038 ··· 1100 1115 append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | 1101 1116 LDST_SRCDST_BYTE_CONTEXT); 1102 1117 1103 - #ifdef DEBUG 1104 - print_hex_dump(KERN_ERR, 1105 - "rfc4543 enc shdesc@" __stringify(__LINE__)": ", 1106 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1107 - #endif 1118 + print_hex_dump_debug("rfc4543 enc shdesc@" __stringify(__LINE__)": ", 1119 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1120 + 1); 1108 1121 } 1109 1122 EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap); 1110 1123 ··· 1188 1205 append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 | 1189 1206 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); 1190 1207 1191 - #ifdef DEBUG 1192 - print_hex_dump(KERN_ERR, 1193 - "rfc4543 dec shdesc@" __stringify(__LINE__)": ", 1194 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1195 - #endif 1208 + print_hex_dump_debug("rfc4543 dec shdesc@" __stringify(__LINE__)": ", 1209 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1210 + 1); 1196 1211 } 1197 1212 EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap); 1198 1213 ··· 1391 1410 LDST_OFFSET_SHIFT)); 1392 1411 1393 1412 /* Load operation */ 1394 - append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | 1413 + append_operation(desc, cdata->algtype | OP_ALG_AS_INIT | 1395 1414 OP_ALG_ENCRYPT); 1396 1415 1397 1416 /* Perform operation */ 1398 1417 skcipher_append_src_dst(desc); 1399 1418 1400 - #ifdef DEBUG 1401 - print_hex_dump(KERN_ERR, 1402 - "skcipher enc shdesc@" __stringify(__LINE__)": ", 1403 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1404 - #endif 1419 + /* Store IV */ 1420 + if (ivsize) 1421 + append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | 1422 + LDST_CLASS_1_CCB | (ctx1_iv_off << 1423 + LDST_OFFSET_SHIFT)); 1424 + 1425 + print_hex_dump_debug("skcipher enc shdesc@" __stringify(__LINE__)": ", 1426 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1427 + 1); 1405 1428 } 1406 1429 EXPORT_SYMBOL(cnstr_shdsc_skcipher_encap); 1407 1430 ··· 1464 1479 1465 1480 /* Choose operation */ 1466 1481 if (ctx1_iv_off) 1467 - append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | 1482 + append_operation(desc, cdata->algtype | OP_ALG_AS_INIT | 1468 1483 OP_ALG_DECRYPT); 1469 1484 else 1470 1485 append_dec_op1(desc, cdata->algtype); ··· 1472 1487 /* Perform operation */ 1473 1488 skcipher_append_src_dst(desc); 1474 1489 1475 - #ifdef DEBUG 1476 - print_hex_dump(KERN_ERR, 1477 - "skcipher dec shdesc@" __stringify(__LINE__)": ", 1478 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1479 - #endif 1490 + /* Store IV */ 1491 + if (ivsize) 1492 + append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | 1493 + LDST_CLASS_1_CCB | (ctx1_iv_off << 1494 + LDST_OFFSET_SHIFT)); 1495 + 1496 + print_hex_dump_debug("skcipher dec shdesc@" __stringify(__LINE__)": ", 1497 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1498 + 1); 1480 1499 } 1481 1500 EXPORT_SYMBOL(cnstr_shdsc_skcipher_decap); 1482 1501 ··· 1527 1538 /* Perform operation */ 1528 1539 skcipher_append_src_dst(desc); 1529 1540 1530 - #ifdef DEBUG 1531 - print_hex_dump(KERN_ERR, 1532 - "xts skcipher enc shdesc@" __stringify(__LINE__) ": ", 1533 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1534 - #endif 1541 + /* Store upper 8B of IV */ 1542 + append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | 1543 + (0x20 << LDST_OFFSET_SHIFT)); 1544 + 1545 + print_hex_dump_debug("xts skcipher enc shdesc@" __stringify(__LINE__) 1546 + ": ", DUMP_PREFIX_ADDRESS, 16, 4, 1547 + desc, desc_bytes(desc), 1); 1535 1548 } 1536 1549 EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_encap); 1537 1550 ··· 1579 1588 /* Perform operation */ 1580 1589 skcipher_append_src_dst(desc); 1581 1590 1582 - #ifdef DEBUG 1583 - print_hex_dump(KERN_ERR, 1584 - "xts skcipher dec shdesc@" __stringify(__LINE__) ": ", 1585 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1586 - #endif 1591 + /* Store upper 8B of IV */ 1592 + append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | 1593 + (0x20 << LDST_OFFSET_SHIFT)); 1594 + 1595 + print_hex_dump_debug("xts skcipher dec shdesc@" __stringify(__LINE__) 1596 + ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, 1597 + desc_bytes(desc), 1); 1587 1598 } 1588 1599 EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_decap); 1589 1600
+2 -2
drivers/crypto/caam/caamalg_desc.h
··· 44 44 45 45 #define DESC_SKCIPHER_BASE (3 * CAAM_CMD_SZ) 46 46 #define DESC_SKCIPHER_ENC_LEN (DESC_SKCIPHER_BASE + \ 47 - 20 * CAAM_CMD_SZ) 47 + 21 * CAAM_CMD_SZ) 48 48 #define DESC_SKCIPHER_DEC_LEN (DESC_SKCIPHER_BASE + \ 49 - 15 * CAAM_CMD_SZ) 49 + 16 * CAAM_CMD_SZ) 50 50 51 51 void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, 52 52 unsigned int icvsize, int era);
+120 -147
drivers/crypto/caam/caamalg_qi.c
··· 4 4 * Based on caamalg.c 5 5 * 6 6 * Copyright 2013-2016 Freescale Semiconductor, Inc. 7 - * Copyright 2016-2018 NXP 7 + * Copyright 2016-2019 NXP 8 8 */ 9 9 10 10 #include "compat.h" ··· 214 214 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) 215 215 goto badkey; 216 216 217 - #ifdef DEBUG 218 - dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n", 217 + dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n", 219 218 keys.authkeylen + keys.enckeylen, keys.enckeylen, 220 219 keys.authkeylen); 221 - print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", 222 - DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 223 - #endif 220 + print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 221 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 224 222 225 223 /* 226 224 * If DKP is supported, use it in the shared descriptor to generate ··· 235 237 memcpy(ctx->key, keys.authkey, keys.authkeylen); 236 238 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, 237 239 keys.enckeylen); 238 - dma_sync_single_for_device(jrdev, ctx->key_dma, 240 + dma_sync_single_for_device(jrdev->parent, ctx->key_dma, 239 241 ctx->adata.keylen_pad + 240 242 keys.enckeylen, ctx->dir); 241 243 goto skip_split_key; ··· 249 251 250 252 /* postpend encryption key to auth split key */ 251 253 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); 252 - dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + 253 - keys.enckeylen, ctx->dir); 254 + dma_sync_single_for_device(jrdev->parent, ctx->key_dma, 255 + ctx->adata.keylen_pad + keys.enckeylen, 256 + ctx->dir); 254 257 #ifdef DEBUG 255 258 print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", 256 259 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, ··· 385 386 struct device *jrdev = ctx->jrdev; 386 387 int ret; 387 388 388 - #ifdef DEBUG 389 - print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", 390 - DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 391 - #endif 389 + print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 390 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 392 391 393 392 memcpy(ctx->key, key, keylen); 394 - dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir); 393 + dma_sync_single_for_device(jrdev->parent, ctx->key_dma, keylen, 394 + ctx->dir); 395 395 ctx->cdata.keylen = keylen; 396 396 397 397 ret = gcm_set_sh_desc(aead); ··· 483 485 if (keylen < 4) 484 486 return -EINVAL; 485 487 486 - #ifdef DEBUG 487 - print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", 488 - DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 489 - #endif 488 + print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 489 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 490 490 491 491 memcpy(ctx->key, key, keylen); 492 492 /* ··· 492 496 * in the nonce. Update the AES key length. 493 497 */ 494 498 ctx->cdata.keylen = keylen - 4; 495 - dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, 496 - ctx->dir); 499 + dma_sync_single_for_device(jrdev->parent, ctx->key_dma, 500 + ctx->cdata.keylen, ctx->dir); 497 501 498 502 ret = rfc4106_set_sh_desc(aead); 499 503 if (ret) ··· 585 589 if (keylen < 4) 586 590 return -EINVAL; 587 591 588 - #ifdef DEBUG 589 - print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", 590 - DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 591 - #endif 592 + print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 593 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 592 594 593 595 memcpy(ctx->key, key, keylen); 594 596 /* ··· 594 600 * in the nonce. Update the AES key length. 595 601 */ 596 602 ctx->cdata.keylen = keylen - 4; 597 - dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, 598 - ctx->dir); 603 + dma_sync_single_for_device(jrdev->parent, ctx->key_dma, 604 + ctx->cdata.keylen, ctx->dir); 599 605 600 606 ret = rfc4543_set_sh_desc(aead); 601 607 if (ret) ··· 638 644 const bool is_rfc3686 = alg->caam.rfc3686; 639 645 int ret = 0; 640 646 641 - #ifdef DEBUG 642 - print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", 643 - DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 644 - #endif 647 + print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 648 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 649 + 645 650 /* 646 651 * AES-CTR needs to load IV in CONTEXT1 reg 647 652 * at an offset of 128bits (16bytes) ··· 831 838 static void caam_unmap(struct device *dev, struct scatterlist *src, 832 839 struct scatterlist *dst, int src_nents, 833 840 int dst_nents, dma_addr_t iv_dma, int ivsize, 834 - dma_addr_t qm_sg_dma, int qm_sg_bytes) 841 + enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma, 842 + int qm_sg_bytes) 835 843 { 836 844 if (dst != src) { 837 845 if (src_nents) ··· 844 850 } 845 851 846 852 if (iv_dma) 847 - dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); 853 + dma_unmap_single(dev, iv_dma, ivsize, iv_dir); 848 854 if (qm_sg_bytes) 849 855 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); 850 856 } ··· 857 863 int ivsize = crypto_aead_ivsize(aead); 858 864 859 865 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, 860 - edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes); 866 + edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma, 867 + edesc->qm_sg_bytes); 861 868 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); 862 869 } 863 870 ··· 869 874 int ivsize = crypto_skcipher_ivsize(skcipher); 870 875 871 876 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, 872 - edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes); 877 + edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma, 878 + edesc->qm_sg_bytes); 873 879 } 874 880 875 881 static void aead_done(struct caam_drv_req *drv_req, u32 status) ··· 920 924 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 921 925 GFP_KERNEL : GFP_ATOMIC; 922 926 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; 927 + int src_len, dst_len = 0; 923 928 struct aead_edesc *edesc; 924 929 dma_addr_t qm_sg_dma, iv_dma = 0; 925 930 int ivsize = 0; ··· 942 945 } 943 946 944 947 if (likely(req->src == req->dst)) { 945 - src_nents = sg_nents_for_len(req->src, req->assoclen + 946 - req->cryptlen + 947 - (encrypt ? authsize : 0)); 948 + src_len = req->assoclen + req->cryptlen + 949 + (encrypt ? authsize : 0); 950 + 951 + src_nents = sg_nents_for_len(req->src, src_len); 948 952 if (unlikely(src_nents < 0)) { 949 953 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", 950 - req->assoclen + req->cryptlen + 951 - (encrypt ? authsize : 0)); 954 + src_len); 952 955 qi_cache_free(edesc); 953 956 return ERR_PTR(src_nents); 954 957 } ··· 961 964 return ERR_PTR(-ENOMEM); 962 965 } 963 966 } else { 964 - src_nents = sg_nents_for_len(req->src, req->assoclen + 965 - req->cryptlen); 967 + src_len = req->assoclen + req->cryptlen; 968 + dst_len = src_len + (encrypt ? authsize : (-authsize)); 969 + 970 + src_nents = sg_nents_for_len(req->src, src_len); 966 971 if (unlikely(src_nents < 0)) { 967 972 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", 968 - req->assoclen + req->cryptlen); 973 + src_len); 969 974 qi_cache_free(edesc); 970 975 return ERR_PTR(src_nents); 971 976 } 972 977 973 - dst_nents = sg_nents_for_len(req->dst, req->assoclen + 974 - req->cryptlen + 975 - (encrypt ? authsize : 976 - (-authsize))); 978 + dst_nents = sg_nents_for_len(req->dst, dst_len); 977 979 if (unlikely(dst_nents < 0)) { 978 980 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", 979 - req->assoclen + req->cryptlen + 980 - (encrypt ? authsize : (-authsize))); 981 + dst_len); 981 982 qi_cache_free(edesc); 982 983 return ERR_PTR(dst_nents); 983 984 } ··· 1014 1019 /* 1015 1020 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. 1016 1021 * Input is not contiguous. 1022 + * HW reads 4 S/G entries at a time; make sure the reads don't go beyond 1023 + * the end of the table by allocating more S/G entries. Logic: 1024 + * if (src != dst && output S/G) 1025 + * pad output S/G, if needed 1026 + * else if (src == dst && S/G) 1027 + * overlapping S/Gs; pad one of them 1028 + * else if (input S/G) ... 1029 + * pad input S/G, if needed 1017 1030 */ 1018 - qm_sg_ents = 1 + !!ivsize + mapped_src_nents + 1019 - (mapped_dst_nents > 1 ? mapped_dst_nents : 0); 1031 + qm_sg_ents = 1 + !!ivsize + mapped_src_nents; 1032 + if (mapped_dst_nents > 1) 1033 + qm_sg_ents += pad_sg_nents(mapped_dst_nents); 1034 + else if ((req->src == req->dst) && (mapped_src_nents > 1)) 1035 + qm_sg_ents = max(pad_sg_nents(qm_sg_ents), 1036 + 1 + !!ivsize + pad_sg_nents(mapped_src_nents)); 1037 + else 1038 + qm_sg_ents = pad_sg_nents(qm_sg_ents); 1039 + 1020 1040 sg_table = &edesc->sgt[0]; 1021 1041 qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); 1022 1042 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > ··· 1039 1029 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", 1040 1030 qm_sg_ents, ivsize); 1041 1031 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1042 - 0, 0, 0); 1032 + 0, DMA_NONE, 0, 0); 1043 1033 qi_cache_free(edesc); 1044 1034 return ERR_PTR(-ENOMEM); 1045 1035 } ··· 1054 1044 if (dma_mapping_error(qidev, iv_dma)) { 1055 1045 dev_err(qidev, "unable to map IV\n"); 1056 1046 caam_unmap(qidev, req->src, req->dst, src_nents, 1057 - dst_nents, 0, 0, 0, 0); 1047 + dst_nents, 0, 0, DMA_NONE, 0, 0); 1058 1048 qi_cache_free(edesc); 1059 1049 return ERR_PTR(-ENOMEM); 1060 1050 } ··· 1073 1063 if (dma_mapping_error(qidev, edesc->assoclen_dma)) { 1074 1064 dev_err(qidev, "unable to map assoclen\n"); 1075 1065 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1076 - iv_dma, ivsize, 0, 0); 1066 + iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); 1077 1067 qi_cache_free(edesc); 1078 1068 return ERR_PTR(-ENOMEM); 1079 1069 } ··· 1084 1074 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); 1085 1075 qm_sg_index++; 1086 1076 } 1087 - sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0); 1077 + sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0); 1088 1078 qm_sg_index += mapped_src_nents; 1089 1079 1090 1080 if (mapped_dst_nents > 1) 1091 - sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + 1092 - qm_sg_index, 0); 1081 + sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0); 1093 1082 1094 1083 qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); 1095 1084 if (dma_mapping_error(qidev, qm_sg_dma)) { 1096 1085 dev_err(qidev, "unable to map S/G table\n"); 1097 1086 dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); 1098 1087 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1099 - iv_dma, ivsize, 0, 0); 1088 + iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); 1100 1089 qi_cache_free(edesc); 1101 1090 return ERR_PTR(-ENOMEM); 1102 1091 } ··· 1118 1109 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + 1119 1110 (1 + !!ivsize) * sizeof(*sg_table), 1120 1111 out_len, 0); 1121 - } else if (mapped_dst_nents == 1) { 1112 + } else if (mapped_dst_nents <= 1) { 1122 1113 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len, 1123 1114 0); 1124 1115 } else { ··· 1191 1182 struct device *qidev = caam_ctx->qidev; 1192 1183 int ivsize = crypto_skcipher_ivsize(skcipher); 1193 1184 1194 - #ifdef DEBUG 1195 - dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status); 1196 - #endif 1185 + dev_dbg(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status); 1197 1186 1198 1187 edesc = container_of(drv_req, typeof(*edesc), drv_req); 1199 1188 1200 1189 if (status) 1201 1190 caam_jr_strstatus(qidev, status); 1202 1191 1203 - #ifdef DEBUG 1204 - print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ", 1205 - DUMP_PREFIX_ADDRESS, 16, 4, req->iv, 1206 - edesc->src_nents > 1 ? 100 : ivsize, 1); 1207 - caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", 1192 + print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", 1193 + DUMP_PREFIX_ADDRESS, 16, 4, req->iv, 1194 + edesc->src_nents > 1 ? 100 : ivsize, 1); 1195 + caam_dump_sg("dst @" __stringify(__LINE__)": ", 1208 1196 DUMP_PREFIX_ADDRESS, 16, 4, req->dst, 1209 1197 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); 1210 - #endif 1211 1198 1212 1199 skcipher_unmap(qidev, edesc, req); 1213 1200 1214 1201 /* 1215 1202 * The crypto API expects us to set the IV (req->iv) to the last 1216 - * ciphertext block. This is used e.g. by the CTS mode. 1203 + * ciphertext block (CBC mode) or last counter (CTR mode). 1204 + * This is used e.g. by the CTS mode. 1217 1205 */ 1218 - if (edesc->drv_req.drv_ctx->op_type == ENCRYPT) 1219 - scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - 1220 - ivsize, ivsize, 0); 1206 + memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize); 1221 1207 1222 1208 qi_cache_free(edesc); 1223 1209 skcipher_request_complete(req, status); ··· 1280 1276 qm_sg_ents = 1 + mapped_src_nents; 1281 1277 dst_sg_idx = qm_sg_ents; 1282 1278 1283 - qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; 1279 + /* 1280 + * Input, output HW S/G tables: [IV, src][dst, IV] 1281 + * IV entries point to the same buffer 1282 + * If src == dst, S/G entries are reused (S/G tables overlap) 1283 + * 1284 + * HW reads 4 S/G entries at a time; make sure the reads don't go beyond 1285 + * the end of the table by allocating more S/G entries. 1286 + */ 1287 + if (req->src != req->dst) 1288 + qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1); 1289 + else 1290 + qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents); 1291 + 1284 1292 qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry); 1285 1293 if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes + 1286 1294 ivsize > CAAM_QI_MEMCACHE_SIZE)) { 1287 1295 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", 1288 1296 qm_sg_ents, ivsize); 1289 1297 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1290 - 0, 0, 0); 1298 + 0, DMA_NONE, 0, 0); 1291 1299 return ERR_PTR(-ENOMEM); 1292 1300 } 1293 1301 ··· 1308 1292 if (unlikely(!edesc)) { 1309 1293 dev_err(qidev, "could not allocate extended descriptor\n"); 1310 1294 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1311 - 0, 0, 0); 1295 + 0, DMA_NONE, 0, 0); 1312 1296 return ERR_PTR(-ENOMEM); 1313 1297 } 1314 1298 ··· 1317 1301 iv = (u8 *)(sg_table + qm_sg_ents); 1318 1302 memcpy(iv, req->iv, ivsize); 1319 1303 1320 - iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); 1304 + iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL); 1321 1305 if (dma_mapping_error(qidev, iv_dma)) { 1322 1306 dev_err(qidev, "unable to map IV\n"); 1323 1307 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1324 - 0, 0, 0); 1308 + 0, DMA_NONE, 0, 0); 1325 1309 qi_cache_free(edesc); 1326 1310 return ERR_PTR(-ENOMEM); 1327 1311 } ··· 1335 1319 edesc->drv_req.drv_ctx = drv_ctx; 1336 1320 1337 1321 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); 1338 - sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0); 1322 + sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0); 1339 1323 1340 - if (mapped_dst_nents > 1) 1341 - sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + 1342 - dst_sg_idx, 0); 1324 + if (req->src != req->dst) 1325 + sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0); 1326 + 1327 + dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma, 1328 + ivsize, 0); 1343 1329 1344 1330 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes, 1345 1331 DMA_TO_DEVICE); 1346 1332 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) { 1347 1333 dev_err(qidev, "unable to map S/G table\n"); 1348 1334 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1349 - iv_dma, ivsize, 0, 0); 1335 + iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0); 1350 1336 qi_cache_free(edesc); 1351 1337 return ERR_PTR(-ENOMEM); 1352 1338 } ··· 1358 1340 dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma, 1359 1341 ivsize + req->cryptlen, 0); 1360 1342 1361 - if (req->src == req->dst) { 1343 + if (req->src == req->dst) 1362 1344 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + 1363 - sizeof(*sg_table), req->cryptlen, 0); 1364 - } else if (mapped_dst_nents > 1) { 1345 + sizeof(*sg_table), req->cryptlen + ivsize, 1346 + 0); 1347 + else 1365 1348 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx * 1366 - sizeof(*sg_table), req->cryptlen, 0); 1367 - } else { 1368 - dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), 1369 - req->cryptlen, 0); 1370 - } 1349 + sizeof(*sg_table), req->cryptlen + ivsize, 1350 + 0); 1371 1351 1372 1352 return edesc; 1373 1353 } ··· 1375 1359 struct skcipher_edesc *edesc; 1376 1360 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1377 1361 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 1378 - int ivsize = crypto_skcipher_ivsize(skcipher); 1379 1362 int ret; 1380 1363 1381 1364 if (unlikely(caam_congested)) ··· 1384 1369 edesc = skcipher_edesc_alloc(req, encrypt); 1385 1370 if (IS_ERR(edesc)) 1386 1371 return PTR_ERR(edesc); 1387 - 1388 - /* 1389 - * The crypto API expects us to set the IV (req->iv) to the last 1390 - * ciphertext block. 1391 - */ 1392 - if (!encrypt) 1393 - scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - 1394 - ivsize, ivsize, 0); 1395 1372 1396 1373 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); 1397 1374 if (!ret) { ··· 2389 2382 bool uses_dkp) 2390 2383 { 2391 2384 struct caam_drv_private *priv; 2385 + struct device *dev; 2392 2386 2393 2387 /* 2394 2388 * distribute tfms across job rings to ensure in-order ··· 2401 2393 return PTR_ERR(ctx->jrdev); 2402 2394 } 2403 2395 2404 - priv = dev_get_drvdata(ctx->jrdev->parent); 2396 + dev = ctx->jrdev->parent; 2397 + priv = dev_get_drvdata(dev); 2405 2398 if (priv->era >= 6 && uses_dkp) 2406 2399 ctx->dir = DMA_BIDIRECTIONAL; 2407 2400 else 2408 2401 ctx->dir = DMA_TO_DEVICE; 2409 2402 2410 - ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key), 2403 + ctx->key_dma = dma_map_single(dev, ctx->key, sizeof(ctx->key), 2411 2404 ctx->dir); 2412 - if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) { 2413 - dev_err(ctx->jrdev, "unable to map key\n"); 2405 + if (dma_mapping_error(dev, ctx->key_dma)) { 2406 + dev_err(dev, "unable to map key\n"); 2414 2407 caam_jr_free(ctx->jrdev); 2415 2408 return -ENOMEM; 2416 2409 } ··· 2420 2411 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; 2421 2412 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; 2422 2413 2423 - ctx->qidev = priv->qidev; 2414 + ctx->qidev = dev; 2424 2415 2425 2416 spin_lock_init(&ctx->lock); 2426 2417 ctx->drv_ctx[ENCRYPT] = NULL; ··· 2454 2445 caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]); 2455 2446 caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]); 2456 2447 2457 - dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir); 2448 + dma_unmap_single(ctx->jrdev->parent, ctx->key_dma, sizeof(ctx->key), 2449 + ctx->dir); 2458 2450 2459 2451 caam_jr_free(ctx->jrdev); 2460 2452 } ··· 2470 2460 caam_exit_common(crypto_aead_ctx(tfm)); 2471 2461 } 2472 2462 2473 - static void __exit caam_qi_algapi_exit(void) 2463 + void caam_qi_algapi_exit(void) 2474 2464 { 2475 2465 int i; 2476 2466 ··· 2515 2505 alg->exit = caam_aead_exit; 2516 2506 } 2517 2507 2518 - static int __init caam_qi_algapi_init(void) 2508 + int caam_qi_algapi_init(struct device *ctrldev) 2519 2509 { 2520 - struct device_node *dev_node; 2521 - struct platform_device *pdev; 2522 - struct device *ctrldev; 2523 - struct caam_drv_private *priv; 2510 + struct caam_drv_private *priv = dev_get_drvdata(ctrldev); 2524 2511 int i = 0, err = 0; 2525 2512 u32 aes_vid, aes_inst, des_inst, md_vid, md_inst; 2526 2513 unsigned int md_limit = SHA512_DIGEST_SIZE; 2527 2514 bool registered = false; 2528 2515 2529 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 2530 - if (!dev_node) { 2531 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); 2532 - if (!dev_node) 2533 - return -ENODEV; 2534 - } 2535 - 2536 - pdev = of_find_device_by_node(dev_node); 2537 - of_node_put(dev_node); 2538 - if (!pdev) 2539 - return -ENODEV; 2540 - 2541 - ctrldev = &pdev->dev; 2542 - priv = dev_get_drvdata(ctrldev); 2543 - 2544 - /* 2545 - * If priv is NULL, it's probably because the caam driver wasn't 2546 - * properly initialized (e.g. RNG4 init failed). Thus, bail out here. 2547 - */ 2548 - if (!priv || !priv->qi_present) { 2549 - err = -ENODEV; 2550 - goto out_put_dev; 2551 - } 2552 - 2553 2516 if (caam_dpaa2) { 2554 2517 dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n"); 2555 - err = -ENODEV; 2556 - goto out_put_dev; 2518 + return -ENODEV; 2557 2519 } 2558 2520 2559 2521 /* ··· 2580 2598 2581 2599 err = crypto_register_skcipher(&t_alg->skcipher); 2582 2600 if (err) { 2583 - dev_warn(priv->qidev, "%s alg registration failed\n", 2601 + dev_warn(ctrldev, "%s alg registration failed\n", 2584 2602 t_alg->skcipher.base.cra_driver_name); 2585 2603 continue; 2586 2604 } ··· 2636 2654 } 2637 2655 2638 2656 if (registered) 2639 - dev_info(priv->qidev, "algorithms registered in /proc/crypto\n"); 2657 + dev_info(ctrldev, "algorithms registered in /proc/crypto\n"); 2640 2658 2641 - out_put_dev: 2642 - put_device(ctrldev); 2643 2659 return err; 2644 2660 } 2645 - 2646 - module_init(caam_qi_algapi_init); 2647 - module_exit(caam_qi_algapi_exit); 2648 - 2649 - MODULE_LICENSE("GPL"); 2650 - MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend"); 2651 - MODULE_AUTHOR("Freescale Semiconductor");
+120 -82
drivers/crypto/caam/caamalg_qi2.c
··· 1 1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 2 /* 3 3 * Copyright 2015-2016 Freescale Semiconductor Inc. 4 - * Copyright 2017-2018 NXP 4 + * Copyright 2017-2019 NXP 5 5 */ 6 6 7 7 #include "compat.h" ··· 140 140 static void caam_unmap(struct device *dev, struct scatterlist *src, 141 141 struct scatterlist *dst, int src_nents, 142 142 int dst_nents, dma_addr_t iv_dma, int ivsize, 143 - dma_addr_t qm_sg_dma, int qm_sg_bytes) 143 + enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma, 144 + int qm_sg_bytes) 144 145 { 145 146 if (dst != src) { 146 147 if (src_nents) ··· 153 152 } 154 153 155 154 if (iv_dma) 156 - dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); 155 + dma_unmap_single(dev, iv_dma, ivsize, iv_dir); 157 156 158 157 if (qm_sg_bytes) 159 158 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); ··· 372 371 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 373 372 GFP_KERNEL : GFP_ATOMIC; 374 373 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; 374 + int src_len, dst_len = 0; 375 375 struct aead_edesc *edesc; 376 376 dma_addr_t qm_sg_dma, iv_dma = 0; 377 377 int ivsize = 0; ··· 389 387 } 390 388 391 389 if (unlikely(req->dst != req->src)) { 392 - src_nents = sg_nents_for_len(req->src, req->assoclen + 393 - req->cryptlen); 390 + src_len = req->assoclen + req->cryptlen; 391 + dst_len = src_len + (encrypt ? authsize : (-authsize)); 392 + 393 + src_nents = sg_nents_for_len(req->src, src_len); 394 394 if (unlikely(src_nents < 0)) { 395 395 dev_err(dev, "Insufficient bytes (%d) in src S/G\n", 396 - req->assoclen + req->cryptlen); 396 + src_len); 397 397 qi_cache_free(edesc); 398 398 return ERR_PTR(src_nents); 399 399 } 400 400 401 - dst_nents = sg_nents_for_len(req->dst, req->assoclen + 402 - req->cryptlen + 403 - (encrypt ? authsize : 404 - (-authsize))); 401 + dst_nents = sg_nents_for_len(req->dst, dst_len); 405 402 if (unlikely(dst_nents < 0)) { 406 403 dev_err(dev, "Insufficient bytes (%d) in dst S/G\n", 407 - req->assoclen + req->cryptlen + 408 - (encrypt ? authsize : (-authsize))); 404 + dst_len); 409 405 qi_cache_free(edesc); 410 406 return ERR_PTR(dst_nents); 411 407 } ··· 434 434 mapped_dst_nents = 0; 435 435 } 436 436 } else { 437 - src_nents = sg_nents_for_len(req->src, req->assoclen + 438 - req->cryptlen + 439 - (encrypt ? authsize : 0)); 437 + src_len = req->assoclen + req->cryptlen + 438 + (encrypt ? authsize : 0); 439 + 440 + src_nents = sg_nents_for_len(req->src, src_len); 440 441 if (unlikely(src_nents < 0)) { 441 442 dev_err(dev, "Insufficient bytes (%d) in src S/G\n", 442 - req->assoclen + req->cryptlen + 443 - (encrypt ? authsize : 0)); 443 + src_len); 444 444 qi_cache_free(edesc); 445 445 return ERR_PTR(src_nents); 446 446 } ··· 460 460 /* 461 461 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. 462 462 * Input is not contiguous. 463 + * HW reads 4 S/G entries at a time; make sure the reads don't go beyond 464 + * the end of the table by allocating more S/G entries. Logic: 465 + * if (src != dst && output S/G) 466 + * pad output S/G, if needed 467 + * else if (src == dst && S/G) 468 + * overlapping S/Gs; pad one of them 469 + * else if (input S/G) ... 470 + * pad input S/G, if needed 463 471 */ 464 - qm_sg_nents = 1 + !!ivsize + mapped_src_nents + 465 - (mapped_dst_nents > 1 ? mapped_dst_nents : 0); 472 + qm_sg_nents = 1 + !!ivsize + mapped_src_nents; 473 + if (mapped_dst_nents > 1) 474 + qm_sg_nents += pad_sg_nents(mapped_dst_nents); 475 + else if ((req->src == req->dst) && (mapped_src_nents > 1)) 476 + qm_sg_nents = max(pad_sg_nents(qm_sg_nents), 477 + 1 + !!ivsize + 478 + pad_sg_nents(mapped_src_nents)); 479 + else 480 + qm_sg_nents = pad_sg_nents(qm_sg_nents); 481 + 466 482 sg_table = &edesc->sgt[0]; 467 483 qm_sg_bytes = qm_sg_nents * sizeof(*sg_table); 468 484 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > ··· 486 470 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n", 487 471 qm_sg_nents, ivsize); 488 472 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, 489 - 0, 0, 0); 473 + 0, DMA_NONE, 0, 0); 490 474 qi_cache_free(edesc); 491 475 return ERR_PTR(-ENOMEM); 492 476 } ··· 501 485 if (dma_mapping_error(dev, iv_dma)) { 502 486 dev_err(dev, "unable to map IV\n"); 503 487 caam_unmap(dev, req->src, req->dst, src_nents, 504 - dst_nents, 0, 0, 0, 0); 488 + dst_nents, 0, 0, DMA_NONE, 0, 0); 505 489 qi_cache_free(edesc); 506 490 return ERR_PTR(-ENOMEM); 507 491 } ··· 525 509 if (dma_mapping_error(dev, edesc->assoclen_dma)) { 526 510 dev_err(dev, "unable to map assoclen\n"); 527 511 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 528 - iv_dma, ivsize, 0, 0); 512 + iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); 529 513 qi_cache_free(edesc); 530 514 return ERR_PTR(-ENOMEM); 531 515 } ··· 536 520 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); 537 521 qm_sg_index++; 538 522 } 539 - sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0); 523 + sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0); 540 524 qm_sg_index += mapped_src_nents; 541 525 542 526 if (mapped_dst_nents > 1) 543 - sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + 544 - qm_sg_index, 0); 527 + sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0); 545 528 546 529 qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); 547 530 if (dma_mapping_error(dev, qm_sg_dma)) { 548 531 dev_err(dev, "unable to map S/G table\n"); 549 532 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); 550 533 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 551 - iv_dma, ivsize, 0, 0); 534 + iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); 552 535 qi_cache_free(edesc); 553 536 return ERR_PTR(-ENOMEM); 554 537 } ··· 574 559 dpaa2_fl_set_addr(out_fle, qm_sg_dma + 575 560 (1 + !!ivsize) * sizeof(*sg_table)); 576 561 } 562 + } else if (!mapped_dst_nents) { 563 + /* 564 + * crypto engine requires the output entry to be present when 565 + * "frame list" FD is used. 566 + * Since engine does not support FMT=2'b11 (unused entry type), 567 + * leaving out_fle zeroized is the best option. 568 + */ 569 + goto skip_out_fle; 577 570 } else if (mapped_dst_nents == 1) { 578 571 dpaa2_fl_set_format(out_fle, dpaa2_fl_single); 579 572 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst)); ··· 593 570 594 571 dpaa2_fl_set_len(out_fle, out_len); 595 572 573 + skip_out_fle: 596 574 return edesc; 597 575 } 598 576 ··· 1101 1077 qm_sg_ents = 1 + mapped_src_nents; 1102 1078 dst_sg_idx = qm_sg_ents; 1103 1079 1104 - qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; 1080 + /* 1081 + * Input, output HW S/G tables: [IV, src][dst, IV] 1082 + * IV entries point to the same buffer 1083 + * If src == dst, S/G entries are reused (S/G tables overlap) 1084 + * 1085 + * HW reads 4 S/G entries at a time; make sure the reads don't go beyond 1086 + * the end of the table by allocating more S/G entries. 1087 + */ 1088 + if (req->src != req->dst) 1089 + qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1); 1090 + else 1091 + qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents); 1092 + 1105 1093 qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry); 1106 1094 if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes + 1107 1095 ivsize > CAAM_QI_MEMCACHE_SIZE)) { 1108 1096 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n", 1109 1097 qm_sg_ents, ivsize); 1110 1098 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, 1111 - 0, 0, 0); 1099 + 0, DMA_NONE, 0, 0); 1112 1100 return ERR_PTR(-ENOMEM); 1113 1101 } 1114 1102 ··· 1129 1093 if (unlikely(!edesc)) { 1130 1094 dev_err(dev, "could not allocate extended descriptor\n"); 1131 1095 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, 1132 - 0, 0, 0); 1096 + 0, DMA_NONE, 0, 0); 1133 1097 return ERR_PTR(-ENOMEM); 1134 1098 } 1135 1099 ··· 1138 1102 iv = (u8 *)(sg_table + qm_sg_ents); 1139 1103 memcpy(iv, req->iv, ivsize); 1140 1104 1141 - iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); 1105 + iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL); 1142 1106 if (dma_mapping_error(dev, iv_dma)) { 1143 1107 dev_err(dev, "unable to map IV\n"); 1144 1108 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, 1145 - 0, 0, 0); 1109 + 0, DMA_NONE, 0, 0); 1146 1110 qi_cache_free(edesc); 1147 1111 return ERR_PTR(-ENOMEM); 1148 1112 } ··· 1153 1117 edesc->qm_sg_bytes = qm_sg_bytes; 1154 1118 1155 1119 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); 1156 - sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0); 1120 + sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0); 1157 1121 1158 - if (mapped_dst_nents > 1) 1159 - sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + 1160 - dst_sg_idx, 0); 1122 + if (req->src != req->dst) 1123 + sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0); 1124 + 1125 + dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma, 1126 + ivsize, 0); 1161 1127 1162 1128 edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes, 1163 1129 DMA_TO_DEVICE); 1164 1130 if (dma_mapping_error(dev, edesc->qm_sg_dma)) { 1165 1131 dev_err(dev, "unable to map S/G table\n"); 1166 1132 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 1167 - iv_dma, ivsize, 0, 0); 1133 + iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0); 1168 1134 qi_cache_free(edesc); 1169 1135 return ERR_PTR(-ENOMEM); 1170 1136 } ··· 1174 1136 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); 1175 1137 dpaa2_fl_set_final(in_fle, true); 1176 1138 dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize); 1177 - dpaa2_fl_set_len(out_fle, req->cryptlen); 1139 + dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize); 1178 1140 1179 1141 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); 1180 1142 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); 1181 1143 1182 - if (req->src == req->dst) { 1183 - dpaa2_fl_set_format(out_fle, dpaa2_fl_sg); 1144 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg); 1145 + 1146 + if (req->src == req->dst) 1184 1147 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + 1185 1148 sizeof(*sg_table)); 1186 - } else if (mapped_dst_nents > 1) { 1187 - dpaa2_fl_set_format(out_fle, dpaa2_fl_sg); 1149 + else 1188 1150 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx * 1189 1151 sizeof(*sg_table)); 1190 - } else { 1191 - dpaa2_fl_set_format(out_fle, dpaa2_fl_single); 1192 - dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst)); 1193 - } 1194 1152 1195 1153 return edesc; 1196 1154 } ··· 1198 1164 int ivsize = crypto_aead_ivsize(aead); 1199 1165 1200 1166 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, 1201 - edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes); 1167 + edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma, 1168 + edesc->qm_sg_bytes); 1202 1169 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); 1203 1170 } 1204 1171 ··· 1210 1175 int ivsize = crypto_skcipher_ivsize(skcipher); 1211 1176 1212 1177 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, 1213 - edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes); 1178 + edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma, 1179 + edesc->qm_sg_bytes); 1214 1180 } 1215 1181 1216 1182 static void aead_encrypt_done(void *cbk_ctx, u32 status) ··· 1360 1324 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", 1361 1325 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, 1362 1326 edesc->src_nents > 1 ? 100 : ivsize, 1); 1363 - caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ", 1327 + caam_dump_sg("dst @" __stringify(__LINE__)": ", 1364 1328 DUMP_PREFIX_ADDRESS, 16, 4, req->dst, 1365 1329 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); 1366 1330 ··· 1368 1332 1369 1333 /* 1370 1334 * The crypto API expects us to set the IV (req->iv) to the last 1371 - * ciphertext block. This is used e.g. by the CTS mode. 1335 + * ciphertext block (CBC mode) or last counter (CTR mode). 1336 + * This is used e.g. by the CTS mode. 1372 1337 */ 1373 - scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize, 1374 - ivsize, 0); 1338 + memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize); 1375 1339 1376 1340 qi_cache_free(edesc); 1377 1341 skcipher_request_complete(req, ecode); ··· 1398 1362 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", 1399 1363 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, 1400 1364 edesc->src_nents > 1 ? 100 : ivsize, 1); 1401 - caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ", 1365 + caam_dump_sg("dst @" __stringify(__LINE__)": ", 1402 1366 DUMP_PREFIX_ADDRESS, 16, 4, req->dst, 1403 1367 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); 1404 1368 1405 1369 skcipher_unmap(ctx->dev, edesc, req); 1370 + 1371 + /* 1372 + * The crypto API expects us to set the IV (req->iv) to the last 1373 + * ciphertext block (CBC mode) or last counter (CTR mode). 1374 + * This is used e.g. by the CTS mode. 1375 + */ 1376 + memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize); 1377 + 1406 1378 qi_cache_free(edesc); 1407 1379 skcipher_request_complete(req, ecode); 1408 1380 } ··· 1449 1405 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1450 1406 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 1451 1407 struct caam_request *caam_req = skcipher_request_ctx(req); 1452 - int ivsize = crypto_skcipher_ivsize(skcipher); 1453 1408 int ret; 1454 1409 1455 1410 /* allocate extended descriptor */ 1456 1411 edesc = skcipher_edesc_alloc(req); 1457 1412 if (IS_ERR(edesc)) 1458 1413 return PTR_ERR(edesc); 1459 - 1460 - /* 1461 - * The crypto API expects us to set the IV (req->iv) to the last 1462 - * ciphertext block. 1463 - */ 1464 - scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize, 1465 - ivsize, 0); 1466 1414 1467 1415 caam_req->flc = &ctx->flc[DECRYPT]; 1468 1416 caam_req->flc_dma = ctx->flc_dma[DECRYPT]; ··· 3416 3380 3417 3381 if (to_hash) { 3418 3382 struct dpaa2_sg_entry *sg_table; 3383 + int src_len = req->nbytes - *next_buflen; 3419 3384 3420 - src_nents = sg_nents_for_len(req->src, 3421 - req->nbytes - (*next_buflen)); 3385 + src_nents = sg_nents_for_len(req->src, src_len); 3422 3386 if (src_nents < 0) { 3423 3387 dev_err(ctx->dev, "Invalid number of src SG.\n"); 3424 3388 return src_nents; ··· 3445 3409 3446 3410 edesc->src_nents = src_nents; 3447 3411 qm_sg_src_index = 1 + (*buflen ? 1 : 0); 3448 - qm_sg_bytes = (qm_sg_src_index + mapped_nents) * 3412 + qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) * 3449 3413 sizeof(*sg_table); 3450 3414 sg_table = &edesc->sgt[0]; 3451 3415 ··· 3459 3423 goto unmap_ctx; 3460 3424 3461 3425 if (mapped_nents) { 3462 - sg_to_qm_sg_last(req->src, mapped_nents, 3426 + sg_to_qm_sg_last(req->src, src_len, 3463 3427 sg_table + qm_sg_src_index, 0); 3464 3428 if (*next_buflen) 3465 3429 scatterwalk_map_and_copy(next_buf, req->src, ··· 3530 3494 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 3531 3495 GFP_KERNEL : GFP_ATOMIC; 3532 3496 int buflen = *current_buflen(state); 3533 - int qm_sg_bytes, qm_sg_src_index; 3497 + int qm_sg_bytes; 3534 3498 int digestsize = crypto_ahash_digestsize(ahash); 3535 3499 struct ahash_edesc *edesc; 3536 3500 struct dpaa2_sg_entry *sg_table; ··· 3541 3505 if (!edesc) 3542 3506 return -ENOMEM; 3543 3507 3544 - qm_sg_src_index = 1 + (buflen ? 1 : 0); 3545 - qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table); 3508 + qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table); 3546 3509 sg_table = &edesc->sgt[0]; 3547 3510 3548 3511 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, ··· 3553 3518 if (ret) 3554 3519 goto unmap_ctx; 3555 3520 3556 - dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true); 3521 + dpaa2_sg_set_final(sg_table + (buflen ? 1 : 0), true); 3557 3522 3558 3523 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, 3559 3524 DMA_TO_DEVICE); ··· 3634 3599 3635 3600 edesc->src_nents = src_nents; 3636 3601 qm_sg_src_index = 1 + (buflen ? 1 : 0); 3637 - qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table); 3602 + qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) * 3603 + sizeof(*sg_table); 3638 3604 sg_table = &edesc->sgt[0]; 3639 3605 3640 3606 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, ··· 3647 3611 if (ret) 3648 3612 goto unmap_ctx; 3649 3613 3650 - sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0); 3614 + sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0); 3651 3615 3652 3616 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, 3653 3617 DMA_TO_DEVICE); ··· 3732 3696 int qm_sg_bytes; 3733 3697 struct dpaa2_sg_entry *sg_table = &edesc->sgt[0]; 3734 3698 3735 - qm_sg_bytes = mapped_nents * sizeof(*sg_table); 3736 - sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0); 3699 + qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table); 3700 + sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0); 3737 3701 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, 3738 3702 qm_sg_bytes, DMA_TO_DEVICE); 3739 3703 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { ··· 3876 3840 3877 3841 if (to_hash) { 3878 3842 struct dpaa2_sg_entry *sg_table; 3843 + int src_len = req->nbytes - *next_buflen; 3879 3844 3880 - src_nents = sg_nents_for_len(req->src, 3881 - req->nbytes - *next_buflen); 3845 + src_nents = sg_nents_for_len(req->src, src_len); 3882 3846 if (src_nents < 0) { 3883 3847 dev_err(ctx->dev, "Invalid number of src SG.\n"); 3884 3848 return src_nents; ··· 3904 3868 } 3905 3869 3906 3870 edesc->src_nents = src_nents; 3907 - qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table); 3871 + qm_sg_bytes = pad_sg_nents(1 + mapped_nents) * 3872 + sizeof(*sg_table); 3908 3873 sg_table = &edesc->sgt[0]; 3909 3874 3910 3875 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state); 3911 3876 if (ret) 3912 3877 goto unmap_ctx; 3913 3878 3914 - sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0); 3879 + sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0); 3915 3880 3916 3881 if (*next_buflen) 3917 3882 scatterwalk_map_and_copy(next_buf, req->src, ··· 4024 3987 } 4025 3988 4026 3989 edesc->src_nents = src_nents; 4027 - qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table); 3990 + qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table); 4028 3991 sg_table = &edesc->sgt[0]; 4029 3992 4030 3993 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state); 4031 3994 if (ret) 4032 3995 goto unmap; 4033 3996 4034 - sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0); 3997 + sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0); 4035 3998 4036 3999 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, 4037 4000 DMA_TO_DEVICE); ··· 4101 4064 4102 4065 if (to_hash) { 4103 4066 struct dpaa2_sg_entry *sg_table; 4067 + int src_len = req->nbytes - *next_buflen; 4104 4068 4105 - src_nents = sg_nents_for_len(req->src, 4106 - req->nbytes - (*next_buflen)); 4069 + src_nents = sg_nents_for_len(req->src, src_len); 4107 4070 if (src_nents < 0) { 4108 4071 dev_err(ctx->dev, "Invalid number of src SG.\n"); 4109 4072 return src_nents; ··· 4138 4101 if (mapped_nents > 1) { 4139 4102 int qm_sg_bytes; 4140 4103 4141 - sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0); 4142 - qm_sg_bytes = mapped_nents * sizeof(*sg_table); 4104 + sg_to_qm_sg_last(req->src, src_len, sg_table, 0); 4105 + qm_sg_bytes = pad_sg_nents(mapped_nents) * 4106 + sizeof(*sg_table); 4143 4107 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, 4144 4108 qm_sg_bytes, 4145 4109 DMA_TO_DEVICE);
+123 -206
drivers/crypto/caam/caamhash.c
··· 82 82 #define HASH_MSG_LEN 8 83 83 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE) 84 84 85 - #ifdef DEBUG 86 - /* for print_hex_dumps with line references */ 87 - #define debug(format, arg...) printk(format, arg) 88 - #else 89 - #define debug(format, arg...) 90 - #endif 91 - 92 - 93 85 static struct list_head hash_list; 94 86 95 87 /* ahash per-session context */ ··· 235 243 ctx->ctx_len, true, ctrlpriv->era); 236 244 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, 237 245 desc_bytes(desc), ctx->dir); 238 - #ifdef DEBUG 239 - print_hex_dump(KERN_ERR, 240 - "ahash update shdesc@"__stringify(__LINE__)": ", 241 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 242 - #endif 246 + 247 + print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ", 248 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 249 + 1); 243 250 244 251 /* ahash_update_first shared descriptor */ 245 252 desc = ctx->sh_desc_update_first; ··· 246 255 ctx->ctx_len, false, ctrlpriv->era); 247 256 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, 248 257 desc_bytes(desc), ctx->dir); 249 - #ifdef DEBUG 250 - print_hex_dump(KERN_ERR, 251 - "ahash update first shdesc@"__stringify(__LINE__)": ", 252 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 253 - #endif 258 + print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__) 259 + ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, 260 + desc_bytes(desc), 1); 254 261 255 262 /* ahash_final shared descriptor */ 256 263 desc = ctx->sh_desc_fin; ··· 256 267 ctx->ctx_len, true, ctrlpriv->era); 257 268 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, 258 269 desc_bytes(desc), ctx->dir); 259 - #ifdef DEBUG 260 - print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ", 261 - DUMP_PREFIX_ADDRESS, 16, 4, desc, 262 - desc_bytes(desc), 1); 263 - #endif 270 + 271 + print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ", 272 + DUMP_PREFIX_ADDRESS, 16, 4, desc, 273 + desc_bytes(desc), 1); 264 274 265 275 /* ahash_digest shared descriptor */ 266 276 desc = ctx->sh_desc_digest; ··· 267 279 ctx->ctx_len, false, ctrlpriv->era); 268 280 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, 269 281 desc_bytes(desc), ctx->dir); 270 - #ifdef DEBUG 271 - print_hex_dump(KERN_ERR, 272 - "ahash digest shdesc@"__stringify(__LINE__)": ", 273 - DUMP_PREFIX_ADDRESS, 16, 4, desc, 274 - desc_bytes(desc), 1); 275 - #endif 282 + 283 + print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ", 284 + DUMP_PREFIX_ADDRESS, 16, 4, desc, 285 + desc_bytes(desc), 1); 276 286 277 287 return 0; 278 288 } ··· 314 328 ctx->ctx_len, ctx->key_dma); 315 329 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, 316 330 desc_bytes(desc), ctx->dir); 317 - print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)" : ", 318 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 319 - 1); 331 + print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__) 332 + " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc, 333 + desc_bytes(desc), 1); 320 334 321 335 /* shared descriptor for ahash_digest */ 322 336 desc = ctx->sh_desc_digest; ··· 363 377 ctx->ctx_len, 0); 364 378 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, 365 379 desc_bytes(desc), ctx->dir); 366 - print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)" : ", 367 - DUMP_PREFIX_ADDRESS, 16, 4, desc, 380 + print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__) 381 + " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc, 368 382 desc_bytes(desc), 1); 369 383 370 384 /* shared descriptor for ahash_digest */ ··· 415 429 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | 416 430 LDST_SRCDST_BYTE_CONTEXT); 417 431 418 - #ifdef DEBUG 419 - print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ", 420 - DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1); 421 - print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 422 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 423 - #endif 432 + print_hex_dump_debug("key_in@"__stringify(__LINE__)": ", 433 + DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1); 434 + print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", 435 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 436 + 1); 424 437 425 438 result.err = 0; 426 439 init_completion(&result.completion); ··· 429 444 /* in progress */ 430 445 wait_for_completion(&result.completion); 431 446 ret = result.err; 432 - #ifdef DEBUG 433 - print_hex_dump(KERN_ERR, 434 - "digested key@"__stringify(__LINE__)": ", 435 - DUMP_PREFIX_ADDRESS, 16, 4, key, digestsize, 1); 436 - #endif 447 + 448 + print_hex_dump_debug("digested key@"__stringify(__LINE__)": ", 449 + DUMP_PREFIX_ADDRESS, 16, 4, key, 450 + digestsize, 1); 437 451 } 438 452 dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL); 439 453 ··· 447 463 const u8 *key, unsigned int keylen) 448 464 { 449 465 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 466 + struct device *jrdev = ctx->jrdev; 450 467 int blocksize = crypto_tfm_alg_blocksize(&ahash->base); 451 468 int digestsize = crypto_ahash_digestsize(ahash); 452 469 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); 453 470 int ret; 454 471 u8 *hashed_key = NULL; 455 472 456 - #ifdef DEBUG 457 - printk(KERN_ERR "keylen %d\n", keylen); 458 - #endif 473 + dev_dbg(jrdev, "keylen %d\n", keylen); 459 474 460 475 if (keylen > blocksize) { 461 476 hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); ··· 583 600 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 584 601 int digestsize = crypto_ahash_digestsize(ahash); 585 602 struct caam_hash_state *state = ahash_request_ctx(req); 586 - #ifdef DEBUG 587 603 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 588 604 589 - dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 590 - #endif 605 + dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 591 606 592 607 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 593 608 if (err) ··· 595 614 memcpy(req->result, state->caam_ctx, digestsize); 596 615 kfree(edesc); 597 616 598 - #ifdef DEBUG 599 - print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 600 - DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 601 - ctx->ctx_len, 1); 602 - #endif 617 + print_hex_dump_debug("ctx@"__stringify(__LINE__)": ", 618 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 619 + ctx->ctx_len, 1); 603 620 604 621 req->base.complete(&req->base, err); 605 622 } ··· 610 631 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 611 632 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 612 633 struct caam_hash_state *state = ahash_request_ctx(req); 613 - #ifdef DEBUG 614 634 int digestsize = crypto_ahash_digestsize(ahash); 615 635 616 - dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 617 - #endif 636 + dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 618 637 619 638 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 620 639 if (err) ··· 622 645 switch_buf(state); 623 646 kfree(edesc); 624 647 625 - #ifdef DEBUG 626 - print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 627 - DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 628 - ctx->ctx_len, 1); 648 + print_hex_dump_debug("ctx@"__stringify(__LINE__)": ", 649 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 650 + ctx->ctx_len, 1); 629 651 if (req->result) 630 - print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 631 - DUMP_PREFIX_ADDRESS, 16, 4, req->result, 632 - digestsize, 1); 633 - #endif 652 + print_hex_dump_debug("result@"__stringify(__LINE__)": ", 653 + DUMP_PREFIX_ADDRESS, 16, 4, req->result, 654 + digestsize, 1); 634 655 635 656 req->base.complete(&req->base, err); 636 657 } ··· 641 666 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 642 667 int digestsize = crypto_ahash_digestsize(ahash); 643 668 struct caam_hash_state *state = ahash_request_ctx(req); 644 - #ifdef DEBUG 645 669 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 646 670 647 - dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 648 - #endif 671 + dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 649 672 650 673 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 651 674 if (err) ··· 653 680 memcpy(req->result, state->caam_ctx, digestsize); 654 681 kfree(edesc); 655 682 656 - #ifdef DEBUG 657 - print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 658 - DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 659 - ctx->ctx_len, 1); 660 - #endif 683 + print_hex_dump_debug("ctx@"__stringify(__LINE__)": ", 684 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 685 + ctx->ctx_len, 1); 661 686 662 687 req->base.complete(&req->base, err); 663 688 } ··· 668 697 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 669 698 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 670 699 struct caam_hash_state *state = ahash_request_ctx(req); 671 - #ifdef DEBUG 672 700 int digestsize = crypto_ahash_digestsize(ahash); 673 701 674 - dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 675 - #endif 702 + dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 676 703 677 704 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 678 705 if (err) ··· 680 711 switch_buf(state); 681 712 kfree(edesc); 682 713 683 - #ifdef DEBUG 684 - print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 685 - DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 686 - ctx->ctx_len, 1); 714 + print_hex_dump_debug("ctx@"__stringify(__LINE__)": ", 715 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 716 + ctx->ctx_len, 1); 687 717 if (req->result) 688 - print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 689 - DUMP_PREFIX_ADDRESS, 16, 4, req->result, 690 - digestsize, 1); 691 - #endif 718 + print_hex_dump_debug("result@"__stringify(__LINE__)": ", 719 + DUMP_PREFIX_ADDRESS, 16, 4, req->result, 720 + digestsize, 1); 692 721 693 722 req->base.complete(&req->base, err); 694 723 } ··· 726 759 727 760 if (nents > 1 || first_sg) { 728 761 struct sec4_sg_entry *sg = edesc->sec4_sg; 729 - unsigned int sgsize = sizeof(*sg) * (first_sg + nents); 762 + unsigned int sgsize = sizeof(*sg) * 763 + pad_sg_nents(first_sg + nents); 730 764 731 - sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0); 765 + sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0); 732 766 733 767 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE); 734 768 if (dma_mapping_error(ctx->jrdev, src_dma)) { ··· 787 819 } 788 820 789 821 if (to_hash) { 790 - src_nents = sg_nents_for_len(req->src, 791 - req->nbytes - (*next_buflen)); 822 + int pad_nents; 823 + int src_len = req->nbytes - *next_buflen; 824 + 825 + src_nents = sg_nents_for_len(req->src, src_len); 792 826 if (src_nents < 0) { 793 827 dev_err(jrdev, "Invalid number of src SG.\n"); 794 828 return src_nents; ··· 808 838 } 809 839 810 840 sec4_sg_src_index = 1 + (*buflen ? 1 : 0); 811 - sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * 812 - sizeof(struct sec4_sg_entry); 841 + pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents); 842 + sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry); 813 843 814 844 /* 815 845 * allocate space for base edesc and hw desc commands, 816 846 * link tables 817 847 */ 818 - edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, 819 - ctx->sh_desc_update, 848 + edesc = ahash_edesc_alloc(ctx, pad_nents, ctx->sh_desc_update, 820 849 ctx->sh_desc_update_dma, flags); 821 850 if (!edesc) { 822 851 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); ··· 835 866 goto unmap_ctx; 836 867 837 868 if (mapped_nents) 838 - sg_to_sec4_sg_last(req->src, mapped_nents, 869 + sg_to_sec4_sg_last(req->src, src_len, 839 870 edesc->sec4_sg + sec4_sg_src_index, 840 871 0); 841 872 else ··· 862 893 863 894 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); 864 895 865 - #ifdef DEBUG 866 - print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 867 - DUMP_PREFIX_ADDRESS, 16, 4, desc, 868 - desc_bytes(desc), 1); 869 - #endif 896 + print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", 897 + DUMP_PREFIX_ADDRESS, 16, 4, desc, 898 + desc_bytes(desc), 1); 870 899 871 900 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req); 872 901 if (ret) ··· 877 910 *buflen = *next_buflen; 878 911 *next_buflen = last_buflen; 879 912 } 880 - #ifdef DEBUG 881 - print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", 882 - DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); 883 - print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 884 - DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 885 - *next_buflen, 1); 886 - #endif 913 + 914 + print_hex_dump_debug("buf@"__stringify(__LINE__)": ", 915 + DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); 916 + print_hex_dump_debug("next buf@"__stringify(__LINE__)": ", 917 + DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 918 + *next_buflen, 1); 887 919 888 920 return ret; 889 921 unmap_ctx: ··· 901 935 GFP_KERNEL : GFP_ATOMIC; 902 936 int buflen = *current_buflen(state); 903 937 u32 *desc; 904 - int sec4_sg_bytes, sec4_sg_src_index; 938 + int sec4_sg_bytes; 905 939 int digestsize = crypto_ahash_digestsize(ahash); 906 940 struct ahash_edesc *edesc; 907 941 int ret; 908 942 909 - sec4_sg_src_index = 1 + (buflen ? 1 : 0); 910 - sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry); 943 + sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * 944 + sizeof(struct sec4_sg_entry); 911 945 912 946 /* allocate space for base edesc and hw desc commands, link tables */ 913 - edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index, 914 - ctx->sh_desc_fin, ctx->sh_desc_fin_dma, 915 - flags); 947 + edesc = ahash_edesc_alloc(ctx, 4, ctx->sh_desc_fin, 948 + ctx->sh_desc_fin_dma, flags); 916 949 if (!edesc) 917 950 return -ENOMEM; 918 951 ··· 928 963 if (ret) 929 964 goto unmap_ctx; 930 965 931 - sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1); 966 + sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0)); 932 967 933 968 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 934 969 sec4_sg_bytes, DMA_TO_DEVICE); ··· 942 977 LDST_SGF); 943 978 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); 944 979 945 - #ifdef DEBUG 946 - print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 947 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 948 - #endif 980 + print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", 981 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 982 + 1); 949 983 950 984 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); 951 985 if (ret) ··· 1022 1058 1023 1059 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); 1024 1060 1025 - #ifdef DEBUG 1026 - print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1027 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1028 - #endif 1061 + print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", 1062 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1063 + 1); 1029 1064 1030 1065 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); 1031 1066 if (ret) ··· 1098 1135 return -ENOMEM; 1099 1136 } 1100 1137 1101 - #ifdef DEBUG 1102 - print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1103 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1104 - #endif 1138 + print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", 1139 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1140 + 1); 1105 1141 1106 1142 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1107 1143 if (!ret) { ··· 1152 1190 if (ret) 1153 1191 goto unmap; 1154 1192 1155 - #ifdef DEBUG 1156 - print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1157 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1158 - #endif 1193 + print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", 1194 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1195 + 1); 1159 1196 1160 1197 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1161 1198 if (!ret) { ··· 1207 1246 } 1208 1247 1209 1248 if (to_hash) { 1210 - src_nents = sg_nents_for_len(req->src, 1211 - req->nbytes - *next_buflen); 1249 + int pad_nents; 1250 + int src_len = req->nbytes - *next_buflen; 1251 + 1252 + src_nents = sg_nents_for_len(req->src, src_len); 1212 1253 if (src_nents < 0) { 1213 1254 dev_err(jrdev, "Invalid number of src SG.\n"); 1214 1255 return src_nents; ··· 1227 1264 mapped_nents = 0; 1228 1265 } 1229 1266 1230 - sec4_sg_bytes = (1 + mapped_nents) * 1231 - sizeof(struct sec4_sg_entry); 1267 + pad_nents = pad_sg_nents(1 + mapped_nents); 1268 + sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry); 1232 1269 1233 1270 /* 1234 1271 * allocate space for base edesc and hw desc commands, 1235 1272 * link tables 1236 1273 */ 1237 - edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents, 1274 + edesc = ahash_edesc_alloc(ctx, pad_nents, 1238 1275 ctx->sh_desc_update_first, 1239 1276 ctx->sh_desc_update_first_dma, 1240 1277 flags); ··· 1250 1287 if (ret) 1251 1288 goto unmap_ctx; 1252 1289 1253 - sg_to_sec4_sg_last(req->src, mapped_nents, 1254 - edesc->sec4_sg + 1, 0); 1290 + sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0); 1255 1291 1256 1292 if (*next_buflen) { 1257 1293 scatterwalk_map_and_copy(next_buf, req->src, ··· 1275 1313 if (ret) 1276 1314 goto unmap_ctx; 1277 1315 1278 - #ifdef DEBUG 1279 - print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1280 - DUMP_PREFIX_ADDRESS, 16, 4, desc, 1281 - desc_bytes(desc), 1); 1282 - #endif 1316 + print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", 1317 + DUMP_PREFIX_ADDRESS, 16, 4, desc, 1318 + desc_bytes(desc), 1); 1283 1319 1284 1320 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); 1285 1321 if (ret) ··· 1293 1333 *buflen = *next_buflen; 1294 1334 *next_buflen = 0; 1295 1335 } 1296 - #ifdef DEBUG 1297 - print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", 1298 - DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); 1299 - print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 1300 - DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 1301 - *next_buflen, 1); 1302 - #endif 1336 + 1337 + print_hex_dump_debug("buf@"__stringify(__LINE__)": ", 1338 + DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); 1339 + print_hex_dump_debug("next buf@"__stringify(__LINE__)": ", 1340 + DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, 1341 + 1); 1303 1342 1304 1343 return ret; 1305 1344 unmap_ctx: ··· 1373 1414 if (ret) 1374 1415 goto unmap; 1375 1416 1376 - #ifdef DEBUG 1377 - print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1378 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1379 - #endif 1417 + print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", 1418 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1419 + 1); 1380 1420 1381 1421 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1382 1422 if (!ret) { ··· 1475 1517 if (ret) 1476 1518 goto unmap_ctx; 1477 1519 1478 - #ifdef DEBUG 1479 - print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1480 - DUMP_PREFIX_ADDRESS, 16, 4, desc, 1481 - desc_bytes(desc), 1); 1482 - #endif 1520 + print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", 1521 + DUMP_PREFIX_ADDRESS, 16, 4, desc, 1522 + desc_bytes(desc), 1); 1483 1523 1484 1524 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); 1485 1525 if (ret) ··· 1495 1539 req->nbytes, 0); 1496 1540 switch_buf(state); 1497 1541 } 1498 - #ifdef DEBUG 1499 - print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 1500 - DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 1501 - *next_buflen, 1); 1502 - #endif 1542 + 1543 + print_hex_dump_debug("next buf@"__stringify(__LINE__)": ", 1544 + DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, 1545 + 1); 1503 1546 1504 1547 return ret; 1505 1548 unmap_ctx: ··· 1885 1930 caam_jr_free(ctx->jrdev); 1886 1931 } 1887 1932 1888 - static void __exit caam_algapi_hash_exit(void) 1933 + void caam_algapi_hash_exit(void) 1889 1934 { 1890 1935 struct caam_hash_alg *t_alg, *n; 1891 1936 ··· 1943 1988 return t_alg; 1944 1989 } 1945 1990 1946 - static int __init caam_algapi_hash_init(void) 1991 + int caam_algapi_hash_init(struct device *ctrldev) 1947 1992 { 1948 - struct device_node *dev_node; 1949 - struct platform_device *pdev; 1950 1993 int i = 0, err = 0; 1951 - struct caam_drv_private *priv; 1994 + struct caam_drv_private *priv = dev_get_drvdata(ctrldev); 1952 1995 unsigned int md_limit = SHA512_DIGEST_SIZE; 1953 1996 u32 md_inst, md_vid; 1954 - 1955 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 1956 - if (!dev_node) { 1957 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); 1958 - if (!dev_node) 1959 - return -ENODEV; 1960 - } 1961 - 1962 - pdev = of_find_device_by_node(dev_node); 1963 - if (!pdev) { 1964 - of_node_put(dev_node); 1965 - return -ENODEV; 1966 - } 1967 - 1968 - priv = dev_get_drvdata(&pdev->dev); 1969 - of_node_put(dev_node); 1970 - 1971 - /* 1972 - * If priv is NULL, it's probably because the caam driver wasn't 1973 - * properly initialized (e.g. RNG4 init failed). Thus, bail out here. 1974 - */ 1975 - if (!priv) { 1976 - err = -ENODEV; 1977 - goto out_put_dev; 1978 - } 1979 1997 1980 1998 /* 1981 1999 * Register crypto algorithms the device supports. First, identify ··· 1970 2042 * Skip registration of any hashing algorithms if MD block 1971 2043 * is not present. 1972 2044 */ 1973 - if (!md_inst) { 1974 - err = -ENODEV; 1975 - goto out_put_dev; 1976 - } 2045 + if (!md_inst) 2046 + return -ENODEV; 1977 2047 1978 2048 /* Limit digest size based on LP256 */ 1979 2049 if (md_vid == CHA_VER_VID_MD_LP256) ··· 2028 2102 list_add_tail(&t_alg->entry, &hash_list); 2029 2103 } 2030 2104 2031 - out_put_dev: 2032 - put_device(&pdev->dev); 2033 2105 return err; 2034 2106 } 2035 - 2036 - module_init(caam_algapi_hash_init); 2037 - module_exit(caam_algapi_hash_exit); 2038 - 2039 - MODULE_LICENSE("GPL"); 2040 - MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API"); 2041 - MODULE_AUTHOR("Freescale Semiconductor - NMG");
+103 -72
drivers/crypto/caam/caampkc.c
··· 3 3 * caam - Freescale FSL CAAM support for Public Key Cryptography 4 4 * 5 5 * Copyright 2016 Freescale Semiconductor, Inc. 6 - * Copyright 2018 NXP 6 + * Copyright 2018-2019 NXP 7 7 * 8 8 * There is no Shared Descriptor for PKC so that the Job Descriptor must carry 9 9 * all the desired key parameters, input and output pointers. ··· 24 24 sizeof(struct rsa_priv_f2_pdb)) 25 25 #define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \ 26 26 sizeof(struct rsa_priv_f3_pdb)) 27 + #define CAAM_RSA_MAX_INPUT_SIZE 512 /* for a 4096-bit modulus */ 28 + 29 + /* buffer filled with zeros, used for padding */ 30 + static u8 *zero_buffer; 27 31 28 32 static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc, 29 33 struct akcipher_request *req) 30 34 { 35 + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); 36 + 31 37 dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE); 32 - dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); 38 + dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE); 33 39 34 40 if (edesc->sec4_sg_bytes) 35 41 dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes, ··· 174 168 akcipher_request_complete(req, err); 175 169 } 176 170 171 + /** 172 + * Count leading zeros, need it to strip, from a given scatterlist 173 + * 174 + * @sgl : scatterlist to count zeros from 175 + * @nbytes: number of zeros, in bytes, to strip 176 + * @flags : operation flags 177 + */ 177 178 static int caam_rsa_count_leading_zeros(struct scatterlist *sgl, 178 179 unsigned int nbytes, 179 180 unsigned int flags) ··· 200 187 lzeros = 0; 201 188 len = 0; 202 189 while (nbytes > 0) { 203 - while (len && !*buff) { 190 + /* do not strip more than given bytes */ 191 + while (len && !*buff && lzeros < nbytes) { 204 192 lzeros++; 205 193 len--; 206 194 buff++; ··· 232 218 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 233 219 struct device *dev = ctx->dev; 234 220 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); 221 + struct caam_rsa_key *key = &ctx->key; 235 222 struct rsa_edesc *edesc; 236 223 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 237 224 GFP_KERNEL : GFP_ATOMIC; ··· 240 225 int sgc; 241 226 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; 242 227 int src_nents, dst_nents; 228 + unsigned int diff_size = 0; 243 229 int lzeros; 244 230 245 - lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags); 246 - if (lzeros < 0) 247 - return ERR_PTR(lzeros); 231 + if (req->src_len > key->n_sz) { 232 + /* 233 + * strip leading zeros and 234 + * return the number of zeros to skip 235 + */ 236 + lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len - 237 + key->n_sz, sg_flags); 238 + if (lzeros < 0) 239 + return ERR_PTR(lzeros); 248 240 249 - req->src_len -= lzeros; 250 - req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros); 241 + req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src, 242 + lzeros); 243 + req_ctx->fixup_src_len = req->src_len - lzeros; 244 + } else { 245 + /* 246 + * input src is less then n key modulus, 247 + * so there will be zero padding 248 + */ 249 + diff_size = key->n_sz - req->src_len; 250 + req_ctx->fixup_src = req->src; 251 + req_ctx->fixup_src_len = req->src_len; 252 + } 251 253 252 - src_nents = sg_nents_for_len(req->src, req->src_len); 254 + src_nents = sg_nents_for_len(req_ctx->fixup_src, 255 + req_ctx->fixup_src_len); 253 256 dst_nents = sg_nents_for_len(req->dst, req->dst_len); 254 257 255 - if (src_nents > 1) 256 - sec4_sg_len = src_nents; 258 + if (!diff_size && src_nents == 1) 259 + sec4_sg_len = 0; /* no need for an input hw s/g table */ 260 + else 261 + sec4_sg_len = src_nents + !!diff_size; 262 + sec4_sg_index = sec4_sg_len; 257 263 if (dst_nents > 1) 258 - sec4_sg_len += dst_nents; 264 + sec4_sg_len += pad_sg_nents(dst_nents); 265 + else 266 + sec4_sg_len = pad_sg_nents(sec4_sg_len); 259 267 260 268 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); 261 269 ··· 288 250 if (!edesc) 289 251 return ERR_PTR(-ENOMEM); 290 252 291 - sgc = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE); 253 + sgc = dma_map_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE); 292 254 if (unlikely(!sgc)) { 293 255 dev_err(dev, "unable to map source\n"); 294 256 goto src_fail; ··· 301 263 } 302 264 303 265 edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen; 266 + if (diff_size) 267 + dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size, 268 + 0); 304 269 305 - sec4_sg_index = 0; 306 - if (src_nents > 1) { 307 - sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0); 308 - sec4_sg_index += src_nents; 309 - } 270 + if (sec4_sg_index) 271 + sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len, 272 + edesc->sec4_sg + !!diff_size, 0); 273 + 310 274 if (dst_nents > 1) 311 - sg_to_sec4_sg_last(req->dst, dst_nents, 275 + sg_to_sec4_sg_last(req->dst, req->dst_len, 312 276 edesc->sec4_sg + sec4_sg_index, 0); 313 277 314 278 /* Save nents for later use in Job Descriptor */ ··· 329 289 330 290 edesc->sec4_sg_bytes = sec4_sg_bytes; 331 291 292 + print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ", 293 + DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, 294 + edesc->sec4_sg_bytes, 1); 295 + 332 296 return edesc; 333 297 334 298 sec4_sg_fail: 335 299 dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE); 336 300 dst_fail: 337 - dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE); 301 + dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE); 338 302 src_fail: 339 303 kfree(edesc); 340 304 return ERR_PTR(-ENOMEM); ··· 348 304 struct rsa_edesc *edesc) 349 305 { 350 306 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 307 + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); 351 308 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 352 309 struct caam_rsa_key *key = &ctx->key; 353 310 struct device *dev = ctx->dev; ··· 373 328 pdb->f_dma = edesc->sec4_sg_dma; 374 329 sec4_sg_index += edesc->src_nents; 375 330 } else { 376 - pdb->f_dma = sg_dma_address(req->src); 331 + pdb->f_dma = sg_dma_address(req_ctx->fixup_src); 377 332 } 378 333 379 334 if (edesc->dst_nents > 1) { ··· 385 340 } 386 341 387 342 pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz; 388 - pdb->f_len = req->src_len; 343 + pdb->f_len = req_ctx->fixup_src_len; 389 344 390 345 return 0; 391 346 } ··· 418 373 pdb->g_dma = edesc->sec4_sg_dma; 419 374 sec4_sg_index += edesc->src_nents; 420 375 } else { 421 - pdb->g_dma = sg_dma_address(req->src); 376 + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); 377 + 378 + pdb->g_dma = sg_dma_address(req_ctx->fixup_src); 422 379 } 423 380 424 381 if (edesc->dst_nents > 1) { ··· 483 436 pdb->g_dma = edesc->sec4_sg_dma; 484 437 sec4_sg_index += edesc->src_nents; 485 438 } else { 486 - pdb->g_dma = sg_dma_address(req->src); 439 + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); 440 + 441 + pdb->g_dma = sg_dma_address(req_ctx->fixup_src); 487 442 } 488 443 489 444 if (edesc->dst_nents > 1) { ··· 572 523 pdb->g_dma = edesc->sec4_sg_dma; 573 524 sec4_sg_index += edesc->src_nents; 574 525 } else { 575 - pdb->g_dma = sg_dma_address(req->src); 526 + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); 527 + 528 + pdb->g_dma = sg_dma_address(req_ctx->fixup_src); 576 529 } 577 530 578 531 if (edesc->dst_nents > 1) { ··· 1029 978 return PTR_ERR(ctx->dev); 1030 979 } 1031 980 981 + ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer, 982 + CAAM_RSA_MAX_INPUT_SIZE - 1, 983 + DMA_TO_DEVICE); 984 + if (dma_mapping_error(ctx->dev, ctx->padding_dma)) { 985 + dev_err(ctx->dev, "unable to map padding\n"); 986 + caam_jr_free(ctx->dev); 987 + return -ENOMEM; 988 + } 989 + 1032 990 return 0; 1033 991 } 1034 992 ··· 1047 987 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 1048 988 struct caam_rsa_key *key = &ctx->key; 1049 989 990 + dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE - 991 + 1, DMA_TO_DEVICE); 1050 992 caam_rsa_free_key(key); 1051 993 caam_jr_free(ctx->dev); 1052 994 } ··· 1072 1010 }; 1073 1011 1074 1012 /* Public Key Cryptography module initialization handler */ 1075 - static int __init caam_pkc_init(void) 1013 + int caam_pkc_init(struct device *ctrldev) 1076 1014 { 1077 - struct device_node *dev_node; 1078 - struct platform_device *pdev; 1079 - struct device *ctrldev; 1080 - struct caam_drv_private *priv; 1015 + struct caam_drv_private *priv = dev_get_drvdata(ctrldev); 1081 1016 u32 pk_inst; 1082 1017 int err; 1083 - 1084 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 1085 - if (!dev_node) { 1086 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); 1087 - if (!dev_node) 1088 - return -ENODEV; 1089 - } 1090 - 1091 - pdev = of_find_device_by_node(dev_node); 1092 - if (!pdev) { 1093 - of_node_put(dev_node); 1094 - return -ENODEV; 1095 - } 1096 - 1097 - ctrldev = &pdev->dev; 1098 - priv = dev_get_drvdata(ctrldev); 1099 - of_node_put(dev_node); 1100 - 1101 - /* 1102 - * If priv is NULL, it's probably because the caam driver wasn't 1103 - * properly initialized (e.g. RNG4 init failed). Thus, bail out here. 1104 - */ 1105 - if (!priv) { 1106 - err = -ENODEV; 1107 - goto out_put_dev; 1108 - } 1109 1018 1110 1019 /* Determine public key hardware accelerator presence. */ 1111 1020 if (priv->era < 10) ··· 1086 1053 pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK; 1087 1054 1088 1055 /* Do not register algorithms if PKHA is not present. */ 1089 - if (!pk_inst) { 1090 - err = -ENODEV; 1091 - goto out_put_dev; 1092 - } 1056 + if (!pk_inst) 1057 + return 0; 1058 + 1059 + /* allocate zero buffer, used for padding input */ 1060 + zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA | 1061 + GFP_KERNEL); 1062 + if (!zero_buffer) 1063 + return -ENOMEM; 1093 1064 1094 1065 err = crypto_register_akcipher(&caam_rsa); 1095 - if (err) 1066 + if (err) { 1067 + kfree(zero_buffer); 1096 1068 dev_warn(ctrldev, "%s alg registration failed\n", 1097 1069 caam_rsa.base.cra_driver_name); 1098 - else 1070 + } else { 1099 1071 dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n"); 1072 + } 1100 1073 1101 - out_put_dev: 1102 - put_device(ctrldev); 1103 1074 return err; 1104 1075 } 1105 1076 1106 - static void __exit caam_pkc_exit(void) 1077 + void caam_pkc_exit(void) 1107 1078 { 1079 + kfree(zero_buffer); 1108 1080 crypto_unregister_akcipher(&caam_rsa); 1109 1081 } 1110 - 1111 - module_init(caam_pkc_init); 1112 - module_exit(caam_pkc_exit); 1113 - 1114 - MODULE_LICENSE("Dual BSD/GPL"); 1115 - MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API"); 1116 - MODULE_AUTHOR("Freescale Semiconductor");
+8 -1
drivers/crypto/caam/caampkc.h
··· 89 89 * caam_rsa_ctx - per session context. 90 90 * @key : RSA key in DMA zone 91 91 * @dev : device structure 92 + * @padding_dma : dma address of padding, for adding it to the input 92 93 */ 93 94 struct caam_rsa_ctx { 94 95 struct caam_rsa_key key; 95 96 struct device *dev; 97 + dma_addr_t padding_dma; 98 + 96 99 }; 97 100 98 101 /** 99 102 * caam_rsa_req_ctx - per request context. 100 - * @src: input scatterlist (stripped of leading zeros) 103 + * @src : input scatterlist (stripped of leading zeros) 104 + * @fixup_src : input scatterlist (that might be stripped of leading zeros) 105 + * @fixup_src_len : length of the fixup_src input scatterlist 101 106 */ 102 107 struct caam_rsa_req_ctx { 103 108 struct scatterlist src[2]; 109 + struct scatterlist *fixup_src; 110 + unsigned int fixup_src_len; 104 111 }; 105 112 106 113 /**
+17 -59
drivers/crypto/caam/caamrng.c
··· 3 3 * caam - Freescale FSL CAAM support for hw_random 4 4 * 5 5 * Copyright 2011 Freescale Semiconductor, Inc. 6 - * Copyright 2018 NXP 6 + * Copyright 2018-2019 NXP 7 7 * 8 8 * Based on caamalg.c crypto API driver. 9 9 * ··· 113 113 /* Buffer refilled, invalidate cache */ 114 114 dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE, DMA_FROM_DEVICE); 115 115 116 - #ifdef DEBUG 117 - print_hex_dump(KERN_ERR, "rng refreshed buf@: ", 118 - DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1); 119 - #endif 116 + print_hex_dump_debug("rng refreshed buf@: ", DUMP_PREFIX_ADDRESS, 16, 4, 117 + bd->buf, RN_BUF_SIZE, 1); 120 118 } 121 119 122 120 static inline int submit_job(struct caam_rng_ctx *ctx, int to_current) ··· 207 209 dev_err(jrdev, "unable to map shared descriptor\n"); 208 210 return -ENOMEM; 209 211 } 210 - #ifdef DEBUG 211 - print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4, 212 - desc, desc_bytes(desc), 1); 213 - #endif 212 + 213 + print_hex_dump_debug("rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4, 214 + desc, desc_bytes(desc), 1); 215 + 214 216 return 0; 215 217 } 216 218 ··· 231 233 } 232 234 233 235 append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0); 234 - #ifdef DEBUG 235 - print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4, 236 - desc, desc_bytes(desc), 1); 237 - #endif 236 + 237 + print_hex_dump_debug("rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4, 238 + desc, desc_bytes(desc), 1); 239 + 238 240 return 0; 239 241 } 240 242 ··· 294 296 .read = caam_read, 295 297 }; 296 298 297 - static void __exit caam_rng_exit(void) 299 + void caam_rng_exit(void) 298 300 { 299 301 caam_jr_free(rng_ctx->jrdev); 300 302 hwrng_unregister(&caam_rng); 301 303 kfree(rng_ctx); 302 304 } 303 305 304 - static int __init caam_rng_init(void) 306 + int caam_rng_init(struct device *ctrldev) 305 307 { 306 308 struct device *dev; 307 - struct device_node *dev_node; 308 - struct platform_device *pdev; 309 - struct caam_drv_private *priv; 310 309 u32 rng_inst; 310 + struct caam_drv_private *priv = dev_get_drvdata(ctrldev); 311 311 int err; 312 - 313 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 314 - if (!dev_node) { 315 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); 316 - if (!dev_node) 317 - return -ENODEV; 318 - } 319 - 320 - pdev = of_find_device_by_node(dev_node); 321 - if (!pdev) { 322 - of_node_put(dev_node); 323 - return -ENODEV; 324 - } 325 - 326 - priv = dev_get_drvdata(&pdev->dev); 327 - of_node_put(dev_node); 328 - 329 - /* 330 - * If priv is NULL, it's probably because the caam driver wasn't 331 - * properly initialized (e.g. RNG4 init failed). Thus, bail out here. 332 - */ 333 - if (!priv) { 334 - err = -ENODEV; 335 - goto out_put_dev; 336 - } 337 312 338 313 /* Check for an instantiated RNG before registration */ 339 314 if (priv->era < 10) ··· 315 344 else 316 345 rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK; 317 346 318 - if (!rng_inst) { 319 - err = -ENODEV; 320 - goto out_put_dev; 321 - } 347 + if (!rng_inst) 348 + return 0; 322 349 323 350 dev = caam_jr_alloc(); 324 351 if (IS_ERR(dev)) { 325 352 pr_err("Job Ring Device allocation for transform failed\n"); 326 - err = PTR_ERR(dev); 327 - goto out_put_dev; 353 + return PTR_ERR(dev); 328 354 } 329 355 rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL); 330 356 if (!rng_ctx) { ··· 332 364 if (err) 333 365 goto free_rng_ctx; 334 366 335 - put_device(&pdev->dev); 336 367 dev_info(dev, "registering rng-caam\n"); 337 368 return hwrng_register(&caam_rng); 338 369 ··· 339 372 kfree(rng_ctx); 340 373 free_caam_alloc: 341 374 caam_jr_free(dev); 342 - out_put_dev: 343 - put_device(&pdev->dev); 344 375 return err; 345 376 } 346 - 347 - module_init(caam_rng_init); 348 - module_exit(caam_rng_exit); 349 - 350 - MODULE_LICENSE("GPL"); 351 - MODULE_DESCRIPTION("FSL CAAM support for hw_random API"); 352 - MODULE_AUTHOR("Freescale Semiconductor - NMG");
+32 -24
drivers/crypto/caam/ctrl.c
··· 3 3 * Controller-level driver, kernel property detection, initialization 4 4 * 5 5 * Copyright 2008-2012 Freescale Semiconductor, Inc. 6 - * Copyright 2018 NXP 6 + * Copyright 2018-2019 NXP 7 7 */ 8 8 9 9 #include <linux/device.h> ··· 323 323 of_platform_depopulate(ctrldev); 324 324 325 325 #ifdef CONFIG_CAAM_QI 326 - if (ctrlpriv->qidev) 327 - caam_qi_shutdown(ctrlpriv->qidev); 326 + if (ctrlpriv->qi_init) 327 + caam_qi_shutdown(ctrldev); 328 328 #endif 329 329 330 330 /* ··· 540 540 ctrlpriv->caam_ipg = clk; 541 541 542 542 if (!of_machine_is_compatible("fsl,imx7d") && 543 - !of_machine_is_compatible("fsl,imx7s")) { 543 + !of_machine_is_compatible("fsl,imx7s") && 544 + !of_machine_is_compatible("fsl,imx7ulp")) { 544 545 clk = caam_drv_identify_clk(&pdev->dev, "mem"); 545 546 if (IS_ERR(clk)) { 546 547 ret = PTR_ERR(clk); ··· 563 562 564 563 if (!of_machine_is_compatible("fsl,imx6ul") && 565 564 !of_machine_is_compatible("fsl,imx7d") && 566 - !of_machine_is_compatible("fsl,imx7s")) { 565 + !of_machine_is_compatible("fsl,imx7s") && 566 + !of_machine_is_compatible("fsl,imx7ulp")) { 567 567 clk = caam_drv_identify_clk(&pdev->dev, "emi_slow"); 568 568 if (IS_ERR(clk)) { 569 569 ret = PTR_ERR(clk); ··· 704 702 } 705 703 706 704 ctrlpriv->era = caam_get_era(ctrl); 707 - 708 - ret = of_platform_populate(nprop, caam_match, NULL, dev); 709 - if (ret) { 710 - dev_err(dev, "JR platform devices creation error\n"); 711 - goto iounmap_ctrl; 712 - } 705 + ctrlpriv->domain = iommu_get_domain_for_dev(dev); 713 706 714 707 #ifdef CONFIG_DEBUG_FS 715 708 /* ··· 717 720 ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL); 718 721 ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root); 719 722 #endif 720 - 721 - ring = 0; 722 - for_each_available_child_of_node(nprop, np) 723 - if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") || 724 - of_device_is_compatible(np, "fsl,sec4.0-job-ring")) { 725 - ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *) 726 - ((__force uint8_t *)ctrl + 727 - (ring + JR_BLOCK_NUMBER) * 728 - BLOCK_OFFSET 729 - ); 730 - ctrlpriv->total_jobrs++; 731 - ring++; 732 - } 733 723 734 724 /* Check to see if (DPAA 1.x) QI present. If so, enable */ 735 725 ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK); ··· 735 751 dev_err(dev, "caam qi i/f init failed: %d\n", ret); 736 752 #endif 737 753 } 754 + 755 + ret = of_platform_populate(nprop, caam_match, NULL, dev); 756 + if (ret) { 757 + dev_err(dev, "JR platform devices creation error\n"); 758 + goto shutdown_qi; 759 + } 760 + 761 + ring = 0; 762 + for_each_available_child_of_node(nprop, np) 763 + if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") || 764 + of_device_is_compatible(np, "fsl,sec4.0-job-ring")) { 765 + ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *) 766 + ((__force uint8_t *)ctrl + 767 + (ring + JR_BLOCK_NUMBER) * 768 + BLOCK_OFFSET 769 + ); 770 + ctrlpriv->total_jobrs++; 771 + ring++; 772 + } 738 773 739 774 /* If no QI and no rings specified, quit and go home */ 740 775 if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) { ··· 901 898 caam_remove(pdev); 902 899 return ret; 903 900 901 + shutdown_qi: 902 + #ifdef CONFIG_CAAM_QI 903 + if (ctrlpriv->qi_init) 904 + caam_qi_shutdown(dev); 905 + #endif 904 906 iounmap_ctrl: 905 907 iounmap(ctrl); 906 908 disable_caam_emi_slow:
+11
drivers/crypto/caam/desc_constr.h
··· 3 3 * caam descriptor construction helper functions 4 4 * 5 5 * Copyright 2008-2012 Freescale Semiconductor, Inc. 6 + * Copyright 2019 NXP 6 7 */ 7 8 8 9 #ifndef DESC_CONSTR_H ··· 37 36 (LDOFF_ENABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT)) 38 37 39 38 extern bool caam_little_end; 39 + 40 + /* 41 + * HW fetches 4 S/G table entries at a time, irrespective of how many entries 42 + * are in the table. It's SW's responsibility to make sure these accesses 43 + * do not have side effects. 44 + */ 45 + static inline int pad_sg_nents(int sg_nents) 46 + { 47 + return ALIGN(sg_nents, 4); 48 + } 40 49 41 50 static inline int desc_len(u32 * const desc) 42 51 {
+4 -4
drivers/crypto/caam/error.c
··· 13 13 #ifdef DEBUG 14 14 #include <linux/highmem.h> 15 15 16 - void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, 16 + void caam_dump_sg(const char *prefix_str, int prefix_type, 17 17 int rowsize, int groupsize, struct scatterlist *sg, 18 18 size_t tlen, bool ascii) 19 19 { ··· 35 35 36 36 buf = it_page + it->offset; 37 37 len = min_t(size_t, tlen, it->length); 38 - print_hex_dump(level, prefix_str, prefix_type, rowsize, 39 - groupsize, buf, len, ascii); 38 + print_hex_dump_debug(prefix_str, prefix_type, rowsize, 39 + groupsize, buf, len, ascii); 40 40 tlen -= len; 41 41 42 42 kunmap_atomic(it_page); 43 43 } 44 44 } 45 45 #else 46 - void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, 46 + void caam_dump_sg(const char *prefix_str, int prefix_type, 47 47 int rowsize, int groupsize, struct scatterlist *sg, 48 48 size_t tlen, bool ascii) 49 49 {}
+1 -1
drivers/crypto/caam/error.h
··· 17 17 #define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false) 18 18 #define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true) 19 19 20 - void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, 20 + void caam_dump_sg(const char *prefix_str, int prefix_type, 21 21 int rowsize, int groupsize, struct scatterlist *sg, 22 22 size_t tlen, bool ascii); 23 23
+95 -7
drivers/crypto/caam/intern.h
··· 4 4 * Private/internal definitions between modules 5 5 * 6 6 * Copyright 2008-2011 Freescale Semiconductor, Inc. 7 - * 7 + * Copyright 2019 NXP 8 8 */ 9 9 10 10 #ifndef INTERN_H ··· 63 63 * Driver-private storage for a single CAAM block instance 64 64 */ 65 65 struct caam_drv_private { 66 - #ifdef CONFIG_CAAM_QI 67 - struct device *qidev; 68 - #endif 69 - 70 66 /* Physical-presence section */ 71 67 struct caam_ctrl __iomem *ctrl; /* controller region */ 72 68 struct caam_deco __iomem *deco; /* DECO/CCB views */ ··· 70 74 struct caam_queue_if __iomem *qi; /* QI control region */ 71 75 struct caam_job_ring __iomem *jr[4]; /* JobR's register space */ 72 76 77 + struct iommu_domain *domain; 78 + 73 79 /* 74 80 * Detected geometry block. Filled in from device tree if powerpc, 75 81 * or from register-based version detection code 76 82 */ 77 83 u8 total_jobrs; /* Total Job Rings in device */ 78 84 u8 qi_present; /* Nonzero if QI present in device */ 85 + #ifdef CONFIG_CAAM_QI 86 + u8 qi_init; /* Nonzero if QI has been initialized */ 87 + #endif 79 88 u8 mc_en; /* Nonzero if MC f/w is active */ 80 89 int secvio_irq; /* Security violation interrupt number */ 81 90 int virt_en; /* Virtualization enabled in CAAM */ ··· 108 107 #endif 109 108 }; 110 109 111 - void caam_jr_algapi_init(struct device *dev); 112 - void caam_jr_algapi_remove(struct device *dev); 110 + #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API 111 + 112 + int caam_algapi_init(struct device *dev); 113 + void caam_algapi_exit(void); 114 + 115 + #else 116 + 117 + static inline int caam_algapi_init(struct device *dev) 118 + { 119 + return 0; 120 + } 121 + 122 + static inline void caam_algapi_exit(void) 123 + { 124 + } 125 + 126 + #endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API */ 127 + 128 + #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API 129 + 130 + int caam_algapi_hash_init(struct device *dev); 131 + void caam_algapi_hash_exit(void); 132 + 133 + #else 134 + 135 + static inline int caam_algapi_hash_init(struct device *dev) 136 + { 137 + return 0; 138 + } 139 + 140 + static inline void caam_algapi_hash_exit(void) 141 + { 142 + } 143 + 144 + #endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API */ 145 + 146 + #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API 147 + 148 + int caam_pkc_init(struct device *dev); 149 + void caam_pkc_exit(void); 150 + 151 + #else 152 + 153 + static inline int caam_pkc_init(struct device *dev) 154 + { 155 + return 0; 156 + } 157 + 158 + static inline void caam_pkc_exit(void) 159 + { 160 + } 161 + 162 + #endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API */ 163 + 164 + #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API 165 + 166 + int caam_rng_init(struct device *dev); 167 + void caam_rng_exit(void); 168 + 169 + #else 170 + 171 + static inline int caam_rng_init(struct device *dev) 172 + { 173 + return 0; 174 + } 175 + 176 + static inline void caam_rng_exit(void) 177 + { 178 + } 179 + 180 + #endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API */ 181 + 182 + #ifdef CONFIG_CAAM_QI 183 + 184 + int caam_qi_algapi_init(struct device *dev); 185 + void caam_qi_algapi_exit(void); 186 + 187 + #else 188 + 189 + static inline int caam_qi_algapi_init(struct device *dev) 190 + { 191 + return 0; 192 + } 193 + 194 + static inline void caam_qi_algapi_exit(void) 195 + { 196 + } 197 + 198 + #endif /* CONFIG_CAAM_QI */ 113 199 114 200 #ifdef CONFIG_DEBUG_FS 115 201 static int caam_debugfs_u64_get(void *data, u64 *val)
+43
drivers/crypto/caam/jr.c
··· 4 4 * JobR backend functionality 5 5 * 6 6 * Copyright 2008-2012 Freescale Semiconductor, Inc. 7 + * Copyright 2019 NXP 7 8 */ 8 9 9 10 #include <linux/of_irq.h> ··· 24 23 } ____cacheline_aligned; 25 24 26 25 static struct jr_driver_data driver_data; 26 + static DEFINE_MUTEX(algs_lock); 27 + static unsigned int active_devs; 28 + 29 + static void register_algs(struct device *dev) 30 + { 31 + mutex_lock(&algs_lock); 32 + 33 + if (++active_devs != 1) 34 + goto algs_unlock; 35 + 36 + caam_algapi_init(dev); 37 + caam_algapi_hash_init(dev); 38 + caam_pkc_init(dev); 39 + caam_rng_init(dev); 40 + caam_qi_algapi_init(dev); 41 + 42 + algs_unlock: 43 + mutex_unlock(&algs_lock); 44 + } 45 + 46 + static void unregister_algs(void) 47 + { 48 + mutex_lock(&algs_lock); 49 + 50 + if (--active_devs != 0) 51 + goto algs_unlock; 52 + 53 + caam_qi_algapi_exit(); 54 + 55 + caam_rng_exit(); 56 + caam_pkc_exit(); 57 + caam_algapi_hash_exit(); 58 + caam_algapi_exit(); 59 + 60 + algs_unlock: 61 + mutex_unlock(&algs_lock); 62 + } 27 63 28 64 static int caam_reset_hw_jr(struct device *dev) 29 65 { ··· 146 108 dev_err(jrdev, "Device is busy\n"); 147 109 return -EBUSY; 148 110 } 111 + 112 + /* Unregister JR-based RNG & crypto algorithms */ 113 + unregister_algs(); 149 114 150 115 /* Remove the node from Physical JobR list maintained by driver */ 151 116 spin_lock(&driver_data.jr_alloc_lock); ··· 581 540 spin_unlock(&driver_data.jr_alloc_lock); 582 541 583 542 atomic_set(&jrpriv->tfm_count, 0); 543 + 544 + register_algs(jrdev->parent); 584 545 585 546 return 0; 586 547 }
+11 -17
drivers/crypto/caam/key_gen.c
··· 16 16 { 17 17 struct split_key_result *res = context; 18 18 19 - #ifdef DEBUG 20 - dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 21 - #endif 19 + dev_dbg(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 22 20 23 21 if (err) 24 22 caam_jr_strstatus(dev, err); ··· 53 55 adata->keylen_pad = split_key_pad_len(adata->algtype & 54 56 OP_ALG_ALGSEL_MASK); 55 57 56 - #ifdef DEBUG 57 - dev_err(jrdev, "split keylen %d split keylen padded %d\n", 58 + dev_dbg(jrdev, "split keylen %d split keylen padded %d\n", 58 59 adata->keylen, adata->keylen_pad); 59 - print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", 60 - DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1); 61 - #endif 60 + print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ", 61 + DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1); 62 62 63 63 if (adata->keylen_pad > max_keylen) 64 64 return -EINVAL; ··· 98 102 append_fifo_store(desc, dma_addr, adata->keylen, 99 103 LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); 100 104 101 - #ifdef DEBUG 102 - print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 103 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 104 - #endif 105 + print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", 106 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 107 + 1); 105 108 106 109 result.err = 0; 107 110 init_completion(&result.completion); ··· 110 115 /* in progress */ 111 116 wait_for_completion(&result.completion); 112 117 ret = result.err; 113 - #ifdef DEBUG 114 - print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", 115 - DUMP_PREFIX_ADDRESS, 16, 4, key_out, 116 - adata->keylen_pad, 1); 117 - #endif 118 + 119 + print_hex_dump_debug("ctx.key@"__stringify(__LINE__)": ", 120 + DUMP_PREFIX_ADDRESS, 16, 4, key_out, 121 + adata->keylen_pad, 1); 118 122 } 119 123 120 124 dma_unmap_single(jrdev, dma_addr, adata->keylen_pad, DMA_BIDIRECTIONAL);
+22 -30
drivers/crypto/caam/qi.c
··· 4 4 * Queue Interface backend functionality 5 5 * 6 6 * Copyright 2013-2016 Freescale Semiconductor, Inc. 7 - * Copyright 2016-2017 NXP 7 + * Copyright 2016-2017, 2019 NXP 8 8 */ 9 9 10 10 #include <linux/cpumask.h> ··· 18 18 #include "desc_constr.h" 19 19 20 20 #define PREHDR_RSLS_SHIFT 31 21 + #define PREHDR_ABS BIT(25) 21 22 22 23 /* 23 24 * Use a reasonable backlog of frames (per CPU) as congestion threshold, ··· 59 58 /* 60 59 * caam_qi_priv - CAAM QI backend private params 61 60 * @cgr: QMan congestion group 62 - * @qi_pdev: platform device for QI backend 63 61 */ 64 62 struct caam_qi_priv { 65 63 struct qman_cgr cgr; 66 - struct platform_device *qi_pdev; 67 64 }; 68 65 69 66 static struct caam_qi_priv qipriv ____cacheline_aligned; ··· 93 94 * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here 94 95 */ 95 96 static struct kmem_cache *qi_cache; 97 + 98 + static void *caam_iova_to_virt(struct iommu_domain *domain, 99 + dma_addr_t iova_addr) 100 + { 101 + phys_addr_t phys_addr; 102 + 103 + phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr; 104 + 105 + return phys_to_virt(phys_addr); 106 + } 96 107 97 108 int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req) 98 109 { ··· 144 135 const struct qm_fd *fd; 145 136 struct caam_drv_req *drv_req; 146 137 struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev); 138 + struct caam_drv_private *priv = dev_get_drvdata(qidev); 147 139 148 140 fd = &msg->ern.fd; 149 141 ··· 153 143 return; 154 144 } 155 145 156 - drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd)); 146 + drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd)); 157 147 if (!drv_req) { 158 148 dev_err(qidev, 159 149 "Can't find original request for CAAM response\n"); ··· 356 346 */ 357 347 drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) | 358 348 num_words); 349 + drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS); 359 350 memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc)); 360 351 dma_sync_single_for_device(qidev, drv_ctx->context_a, 361 352 sizeof(drv_ctx->sh_desc) + ··· 412 401 */ 413 402 drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) | 414 403 num_words); 404 + drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS); 415 405 memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc)); 416 406 size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc); 417 407 hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size, ··· 500 488 void caam_qi_shutdown(struct device *qidev) 501 489 { 502 490 int i; 503 - struct caam_qi_priv *priv = dev_get_drvdata(qidev); 491 + struct caam_qi_priv *priv = &qipriv; 504 492 const cpumask_t *cpus = qman_affine_cpus(); 505 493 506 494 for_each_cpu(i, cpus) { ··· 518 506 qman_release_cgrid(priv->cgr.cgrid); 519 507 520 508 kmem_cache_destroy(qi_cache); 521 - 522 - platform_device_unregister(priv->qi_pdev); 523 509 } 524 510 525 511 static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested) ··· 560 550 struct caam_drv_req *drv_req; 561 551 const struct qm_fd *fd; 562 552 struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev); 553 + struct caam_drv_private *priv = dev_get_drvdata(qidev); 563 554 u32 status; 564 555 565 556 if (caam_qi_napi_schedule(p, caam_napi)) ··· 583 572 return qman_cb_dqrr_consume; 584 573 } 585 574 586 - drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd)); 575 + drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd)); 587 576 if (unlikely(!drv_req)) { 588 577 dev_err(qidev, 589 578 "Can't find original request for caam response\n"); ··· 703 692 int caam_qi_init(struct platform_device *caam_pdev) 704 693 { 705 694 int err, i; 706 - struct platform_device *qi_pdev; 707 695 struct device *ctrldev = &caam_pdev->dev, *qidev; 708 696 struct caam_drv_private *ctrlpriv; 709 697 const cpumask_t *cpus = qman_affine_cpus(); 710 - static struct platform_device_info qi_pdev_info = { 711 - .name = "caam_qi", 712 - .id = PLATFORM_DEVID_NONE 713 - }; 714 - 715 - qi_pdev_info.parent = ctrldev; 716 - qi_pdev_info.dma_mask = dma_get_mask(ctrldev); 717 - qi_pdev = platform_device_register_full(&qi_pdev_info); 718 - if (IS_ERR(qi_pdev)) 719 - return PTR_ERR(qi_pdev); 720 - set_dma_ops(&qi_pdev->dev, get_dma_ops(ctrldev)); 721 698 722 699 ctrlpriv = dev_get_drvdata(ctrldev); 723 - qidev = &qi_pdev->dev; 724 - 725 - qipriv.qi_pdev = qi_pdev; 726 - dev_set_drvdata(qidev, &qipriv); 700 + qidev = ctrldev; 727 701 728 702 /* Initialize the congestion detection */ 729 703 err = init_cgr(qidev); 730 704 if (err) { 731 705 dev_err(qidev, "CGR initialization failed: %d\n", err); 732 - platform_device_unregister(qi_pdev); 733 706 return err; 734 707 } 735 708 ··· 722 727 if (err) { 723 728 dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err); 724 729 free_rsp_fqs(); 725 - platform_device_unregister(qi_pdev); 726 730 return err; 727 731 } 728 732 ··· 744 750 napi_enable(irqtask); 745 751 } 746 752 747 - /* Hook up QI device to parent controlling caam device */ 748 - ctrlpriv->qidev = qidev; 749 - 750 753 qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0, 751 754 SLAB_CACHE_DMA, NULL); 752 755 if (!qi_cache) { 753 756 dev_err(qidev, "Can't allocate CAAM cache\n"); 754 757 free_rsp_fqs(); 755 - platform_device_unregister(qi_pdev); 756 758 return -ENOMEM; 757 759 } 758 760 ··· 756 766 debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl, 757 767 &times_congested, &caam_fops_u64_ro); 758 768 #endif 769 + 770 + ctrlpriv->qi_init = 1; 759 771 dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n"); 760 772 return 0; 761 773 }
+11 -7
drivers/crypto/caam/sg_sw_qm.h
··· 54 54 * but does not have final bit; instead, returns last entry 55 55 */ 56 56 static inline struct qm_sg_entry * 57 - sg_to_qm_sg(struct scatterlist *sg, int sg_count, 57 + sg_to_qm_sg(struct scatterlist *sg, int len, 58 58 struct qm_sg_entry *qm_sg_ptr, u16 offset) 59 59 { 60 - while (sg_count && sg) { 61 - dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), 62 - sg_dma_len(sg), offset); 60 + int ent_len; 61 + 62 + while (len) { 63 + ent_len = min_t(int, sg_dma_len(sg), len); 64 + 65 + dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), ent_len, 66 + offset); 63 67 qm_sg_ptr++; 64 68 sg = sg_next(sg); 65 - sg_count--; 69 + len -= ent_len; 66 70 } 67 71 return qm_sg_ptr - 1; 68 72 } ··· 75 71 * convert scatterlist to h/w link table format 76 72 * scatterlist must have been previously dma mapped 77 73 */ 78 - static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count, 74 + static inline void sg_to_qm_sg_last(struct scatterlist *sg, int len, 79 75 struct qm_sg_entry *qm_sg_ptr, u16 offset) 80 76 { 81 - qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset); 77 + qm_sg_ptr = sg_to_qm_sg(sg, len, qm_sg_ptr, offset); 82 78 qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr)); 83 79 } 84 80
+11 -7
drivers/crypto/caam/sg_sw_qm2.h
··· 25 25 * but does not have final bit; instead, returns last entry 26 26 */ 27 27 static inline struct dpaa2_sg_entry * 28 - sg_to_qm_sg(struct scatterlist *sg, int sg_count, 28 + sg_to_qm_sg(struct scatterlist *sg, int len, 29 29 struct dpaa2_sg_entry *qm_sg_ptr, u16 offset) 30 30 { 31 - while (sg_count && sg) { 32 - dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), 33 - sg_dma_len(sg), offset); 31 + int ent_len; 32 + 33 + while (len) { 34 + ent_len = min_t(int, sg_dma_len(sg), len); 35 + 36 + dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), ent_len, 37 + offset); 34 38 qm_sg_ptr++; 35 39 sg = sg_next(sg); 36 - sg_count--; 40 + len -= ent_len; 37 41 } 38 42 return qm_sg_ptr - 1; 39 43 } ··· 46 42 * convert scatterlist to h/w link table format 47 43 * scatterlist must have been previously dma mapped 48 44 */ 49 - static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count, 45 + static inline void sg_to_qm_sg_last(struct scatterlist *sg, int len, 50 46 struct dpaa2_sg_entry *qm_sg_ptr, 51 47 u16 offset) 52 48 { 53 - qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset); 49 + qm_sg_ptr = sg_to_qm_sg(sg, len, qm_sg_ptr, offset); 54 50 dpaa2_sg_set_final(qm_sg_ptr, true); 55 51 } 56 52
+14 -12
drivers/crypto/caam/sg_sw_sec4.h
··· 35 35 sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & 36 36 SEC4_SG_OFFSET_MASK); 37 37 } 38 - #ifdef DEBUG 39 - print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ", 40 - DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr, 41 - sizeof(struct sec4_sg_entry), 1); 42 - #endif 38 + 39 + print_hex_dump_debug("sec4_sg_ptr@: ", DUMP_PREFIX_ADDRESS, 16, 4, 40 + sec4_sg_ptr, sizeof(struct sec4_sg_entry), 1); 43 41 } 44 42 45 43 /* ··· 45 47 * but does not have final bit; instead, returns last entry 46 48 */ 47 49 static inline struct sec4_sg_entry * 48 - sg_to_sec4_sg(struct scatterlist *sg, int sg_count, 50 + sg_to_sec4_sg(struct scatterlist *sg, int len, 49 51 struct sec4_sg_entry *sec4_sg_ptr, u16 offset) 50 52 { 51 - while (sg_count) { 52 - dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), 53 - sg_dma_len(sg), offset); 53 + int ent_len; 54 + 55 + while (len) { 56 + ent_len = min_t(int, sg_dma_len(sg), len); 57 + 58 + dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), ent_len, 59 + offset); 54 60 sec4_sg_ptr++; 55 61 sg = sg_next(sg); 56 - sg_count--; 62 + len -= ent_len; 57 63 } 58 64 return sec4_sg_ptr - 1; 59 65 } ··· 74 72 * convert scatterlist to h/w link table format 75 73 * scatterlist must have been previously dma mapped 76 74 */ 77 - static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count, 75 + static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int len, 78 76 struct sec4_sg_entry *sec4_sg_ptr, 79 77 u16 offset) 80 78 { 81 - sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset); 79 + sec4_sg_ptr = sg_to_sec4_sg(sg, len, sec4_sg_ptr, offset); 82 80 sg_to_sec4_set_last(sec4_sg_ptr); 83 81 } 84 82
-1
drivers/crypto/cavium/cpt/cptvf_algs.c
··· 7 7 #include <crypto/aes.h> 8 8 #include <crypto/algapi.h> 9 9 #include <crypto/authenc.h> 10 - #include <crypto/crypto_wq.h> 11 10 #include <crypto/des.h> 12 11 #include <crypto/xts.h> 13 12 #include <linux/crypto.h>
+1 -1
drivers/crypto/cavium/nitrox/nitrox_debugfs.h
··· 1 - // SPDX-License-Identifier: GPL-2.0 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 2 #ifndef __NITROX_DEBUGFS_H 3 3 #define __NITROX_DEBUGFS_H 4 4
+1 -1
drivers/crypto/cavium/nitrox/nitrox_mbx.h
··· 1 - // SPDX-License-Identifier: GPL-2.0 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 2 #ifndef __NITROX_MBX_H 3 3 #define __NITROX_MBX_H 4 4
+3 -4
drivers/crypto/ccp/ccp-crypto-aes.c
··· 2 2 /* 3 3 * AMD Cryptographic Coprocessor (CCP) AES crypto API support 4 4 * 5 - * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. 5 + * Copyright (C) 2013-2019 Advanced Micro Devices, Inc. 6 6 * 7 7 * Author: Tom Lendacky <thomas.lendacky@amd.com> 8 8 */ ··· 76 76 return -EINVAL; 77 77 78 78 if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) || 79 - (ctx->u.aes.mode == CCP_AES_MODE_CBC) || 80 - (ctx->u.aes.mode == CCP_AES_MODE_CFB)) && 79 + (ctx->u.aes.mode == CCP_AES_MODE_CBC)) && 81 80 (req->nbytes & (AES_BLOCK_SIZE - 1))) 82 81 return -EINVAL; 83 82 ··· 287 288 .version = CCP_VERSION(3, 0), 288 289 .name = "cfb(aes)", 289 290 .driver_name = "cfb-aes-ccp", 290 - .blocksize = AES_BLOCK_SIZE, 291 + .blocksize = 1, 291 292 .ivsize = AES_BLOCK_SIZE, 292 293 .alg_defaults = &ccp_aes_defaults, 293 294 },
+51 -45
drivers/crypto/ccp/ccp-dev.c
··· 32 32 }; 33 33 34 34 /* Human-readable error strings */ 35 + #define CCP_MAX_ERROR_CODE 64 35 36 static char *ccp_error_codes[] = { 36 37 "", 37 - "ERR 01: ILLEGAL_ENGINE", 38 - "ERR 02: ILLEGAL_KEY_ID", 39 - "ERR 03: ILLEGAL_FUNCTION_TYPE", 40 - "ERR 04: ILLEGAL_FUNCTION_MODE", 41 - "ERR 05: ILLEGAL_FUNCTION_ENCRYPT", 42 - "ERR 06: ILLEGAL_FUNCTION_SIZE", 43 - "ERR 07: Zlib_MISSING_INIT_EOM", 44 - "ERR 08: ILLEGAL_FUNCTION_RSVD", 45 - "ERR 09: ILLEGAL_BUFFER_LENGTH", 46 - "ERR 10: VLSB_FAULT", 47 - "ERR 11: ILLEGAL_MEM_ADDR", 48 - "ERR 12: ILLEGAL_MEM_SEL", 49 - "ERR 13: ILLEGAL_CONTEXT_ID", 50 - "ERR 14: ILLEGAL_KEY_ADDR", 51 - "ERR 15: 0xF Reserved", 52 - "ERR 16: Zlib_ILLEGAL_MULTI_QUEUE", 53 - "ERR 17: Zlib_ILLEGAL_JOBID_CHANGE", 54 - "ERR 18: CMD_TIMEOUT", 55 - "ERR 19: IDMA0_AXI_SLVERR", 56 - "ERR 20: IDMA0_AXI_DECERR", 57 - "ERR 21: 0x15 Reserved", 58 - "ERR 22: IDMA1_AXI_SLAVE_FAULT", 59 - "ERR 23: IDMA1_AIXI_DECERR", 60 - "ERR 24: 0x18 Reserved", 61 - "ERR 25: ZLIBVHB_AXI_SLVERR", 62 - "ERR 26: ZLIBVHB_AXI_DECERR", 63 - "ERR 27: 0x1B Reserved", 64 - "ERR 27: ZLIB_UNEXPECTED_EOM", 65 - "ERR 27: ZLIB_EXTRA_DATA", 66 - "ERR 30: ZLIB_BTYPE", 67 - "ERR 31: ZLIB_UNDEFINED_SYMBOL", 68 - "ERR 32: ZLIB_UNDEFINED_DISTANCE_S", 69 - "ERR 33: ZLIB_CODE_LENGTH_SYMBOL", 70 - "ERR 34: ZLIB _VHB_ILLEGAL_FETCH", 71 - "ERR 35: ZLIB_UNCOMPRESSED_LEN", 72 - "ERR 36: ZLIB_LIMIT_REACHED", 73 - "ERR 37: ZLIB_CHECKSUM_MISMATCH0", 74 - "ERR 38: ODMA0_AXI_SLVERR", 75 - "ERR 39: ODMA0_AXI_DECERR", 76 - "ERR 40: 0x28 Reserved", 77 - "ERR 41: ODMA1_AXI_SLVERR", 78 - "ERR 42: ODMA1_AXI_DECERR", 79 - "ERR 43: LSB_PARITY_ERR", 38 + "ILLEGAL_ENGINE", 39 + "ILLEGAL_KEY_ID", 40 + "ILLEGAL_FUNCTION_TYPE", 41 + "ILLEGAL_FUNCTION_MODE", 42 + "ILLEGAL_FUNCTION_ENCRYPT", 43 + "ILLEGAL_FUNCTION_SIZE", 44 + "Zlib_MISSING_INIT_EOM", 45 + "ILLEGAL_FUNCTION_RSVD", 46 + "ILLEGAL_BUFFER_LENGTH", 47 + "VLSB_FAULT", 48 + "ILLEGAL_MEM_ADDR", 49 + "ILLEGAL_MEM_SEL", 50 + "ILLEGAL_CONTEXT_ID", 51 + "ILLEGAL_KEY_ADDR", 52 + "0xF Reserved", 53 + "Zlib_ILLEGAL_MULTI_QUEUE", 54 + "Zlib_ILLEGAL_JOBID_CHANGE", 55 + "CMD_TIMEOUT", 56 + "IDMA0_AXI_SLVERR", 57 + "IDMA0_AXI_DECERR", 58 + "0x15 Reserved", 59 + "IDMA1_AXI_SLAVE_FAULT", 60 + "IDMA1_AIXI_DECERR", 61 + "0x18 Reserved", 62 + "ZLIBVHB_AXI_SLVERR", 63 + "ZLIBVHB_AXI_DECERR", 64 + "0x1B Reserved", 65 + "ZLIB_UNEXPECTED_EOM", 66 + "ZLIB_EXTRA_DATA", 67 + "ZLIB_BTYPE", 68 + "ZLIB_UNDEFINED_SYMBOL", 69 + "ZLIB_UNDEFINED_DISTANCE_S", 70 + "ZLIB_CODE_LENGTH_SYMBOL", 71 + "ZLIB _VHB_ILLEGAL_FETCH", 72 + "ZLIB_UNCOMPRESSED_LEN", 73 + "ZLIB_LIMIT_REACHED", 74 + "ZLIB_CHECKSUM_MISMATCH0", 75 + "ODMA0_AXI_SLVERR", 76 + "ODMA0_AXI_DECERR", 77 + "0x28 Reserved", 78 + "ODMA1_AXI_SLVERR", 79 + "ODMA1_AXI_DECERR", 80 80 }; 81 81 82 - void ccp_log_error(struct ccp_device *d, int e) 82 + void ccp_log_error(struct ccp_device *d, unsigned int e) 83 83 { 84 - dev_err(d->dev, "CCP error: %s (0x%x)\n", ccp_error_codes[e], e); 84 + if (WARN_ON(e >= CCP_MAX_ERROR_CODE)) 85 + return; 86 + 87 + if (e < ARRAY_SIZE(ccp_error_codes)) 88 + dev_err(d->dev, "CCP error %d: %s\n", e, ccp_error_codes[e]); 89 + else 90 + dev_err(d->dev, "CCP error %d: Unknown Error\n", e); 85 91 } 86 92 87 93 /* List of CCPs, CCP count, read-write access lock, and access functions
+1 -1
drivers/crypto/ccp/ccp-dev.h
··· 629 629 void ccp_add_device(struct ccp_device *ccp); 630 630 void ccp_del_device(struct ccp_device *ccp); 631 631 632 - extern void ccp_log_error(struct ccp_device *, int); 632 + extern void ccp_log_error(struct ccp_device *, unsigned int); 633 633 634 634 struct ccp_device *ccp_alloc_struct(struct sp_device *sp); 635 635 bool ccp_queues_suspended(struct ccp_device *ccp);
+6 -14
drivers/crypto/ccp/ccp-ops.c
··· 2 2 /* 3 3 * AMD Cryptographic Coprocessor (CCP) driver 4 4 * 5 - * Copyright (C) 2013,2018 Advanced Micro Devices, Inc. 5 + * Copyright (C) 2013-2019 Advanced Micro Devices, Inc. 6 6 * 7 7 * Author: Tom Lendacky <thomas.lendacky@amd.com> 8 8 * Author: Gary R Hook <gary.hook@amd.com> ··· 890 890 return -EINVAL; 891 891 892 892 if (((aes->mode == CCP_AES_MODE_ECB) || 893 - (aes->mode == CCP_AES_MODE_CBC) || 894 - (aes->mode == CCP_AES_MODE_CFB)) && 893 + (aes->mode == CCP_AES_MODE_CBC)) && 895 894 (aes->src_len & (AES_BLOCK_SIZE - 1))) 896 895 return -EINVAL; 897 896 ··· 1263 1264 int ret; 1264 1265 1265 1266 /* Error checks */ 1267 + if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) 1268 + return -EINVAL; 1269 + 1266 1270 if (!cmd_q->ccp->vdata->perform->des3) 1267 1271 return -EINVAL; 1268 1272 ··· 1348 1346 * passthru option to convert from big endian to little endian. 1349 1347 */ 1350 1348 if (des3->mode != CCP_DES3_MODE_ECB) { 1351 - u32 load_mode; 1352 - 1353 1349 op.sb_ctx = cmd_q->sb_ctx; 1354 1350 1355 1351 ret = ccp_init_dm_workarea(&ctx, cmd_q, ··· 1363 1363 if (ret) 1364 1364 goto e_ctx; 1365 1365 1366 - if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) 1367 - load_mode = CCP_PASSTHRU_BYTESWAP_NOOP; 1368 - else 1369 - load_mode = CCP_PASSTHRU_BYTESWAP_256BIT; 1370 1366 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1371 - load_mode); 1367 + CCP_PASSTHRU_BYTESWAP_256BIT); 1372 1368 if (ret) { 1373 1369 cmd->engine_error = cmd_q->cmd_error; 1374 1370 goto e_ctx; ··· 1426 1430 } 1427 1431 1428 1432 /* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */ 1429 - if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) 1430 - dm_offset = CCP_SB_BYTES - des3->iv_len; 1431 - else 1432 - dm_offset = 0; 1433 1433 ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0, 1434 1434 DES3_EDE_BLOCK_SIZE); 1435 1435 }
+61 -9
drivers/crypto/ccree/cc_driver.c
··· 48 48 }; 49 49 50 50 #define CC_NUM_IDRS 4 51 + #define CC_HW_RESET_LOOP_COUNT 10 51 52 52 53 /* Note: PIDR3 holds CMOD/Rev so ignored for HW identification purposes */ 53 54 static const u32 pidr_0124_offsets[CC_NUM_IDRS] = { ··· 134 133 u32 imr; 135 134 136 135 /* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */ 136 + /* if driver suspended return, probebly shared interrupt */ 137 + if (cc_pm_is_dev_suspended(dev)) 138 + return IRQ_NONE; 137 139 138 140 /* read the interrupt status */ 139 141 irr = cc_ioread(drvdata, CC_REG(HOST_IRR)); ··· 190 186 } 191 187 192 188 return IRQ_HANDLED; 189 + } 190 + 191 + bool cc_wait_for_reset_completion(struct cc_drvdata *drvdata) 192 + { 193 + unsigned int val; 194 + unsigned int i; 195 + 196 + /* 712/710/63 has no reset completion indication, always return true */ 197 + if (drvdata->hw_rev <= CC_HW_REV_712) 198 + return true; 199 + 200 + for (i = 0; i < CC_HW_RESET_LOOP_COUNT; i++) { 201 + /* in cc7x3 NVM_IS_IDLE indicates that CC reset is 202 + * completed and device is fully functional 203 + */ 204 + val = cc_ioread(drvdata, CC_REG(NVM_IS_IDLE)); 205 + if (val & CC_NVM_IS_IDLE_MASK) { 206 + /* hw indicate reset completed */ 207 + return true; 208 + } 209 + /* allow scheduling other process on the processor */ 210 + schedule(); 211 + } 212 + /* reset not completed */ 213 + return false; 193 214 } 194 215 195 216 int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe) ··· 344 315 return new_drvdata->irq; 345 316 } 346 317 347 - rc = devm_request_irq(dev, new_drvdata->irq, cc_isr, 348 - IRQF_SHARED, "ccree", new_drvdata); 349 - if (rc) { 350 - dev_err(dev, "Could not register to interrupt %d\n", 351 - new_drvdata->irq); 352 - return rc; 353 - } 354 - dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq); 355 - 356 318 init_completion(&new_drvdata->hw_queue_avail); 357 319 358 320 if (!plat_dev->dev.dma_mask) ··· 371 351 } 372 352 373 353 new_drvdata->sec_disabled = cc_sec_disable; 354 + 355 + /* wait for Crytpcell reset completion */ 356 + if (!cc_wait_for_reset_completion(new_drvdata)) { 357 + dev_err(dev, "Cryptocell reset not completed"); 358 + } 374 359 375 360 if (hw_rev->rev <= CC_HW_REV_712) { 376 361 /* Verify correct mapping */ ··· 408 383 } 409 384 sig_cidr = val; 410 385 386 + /* Check HW engine configuration */ 387 + val = cc_ioread(new_drvdata, CC_REG(HOST_REMOVE_INPUT_PINS)); 388 + switch (val) { 389 + case CC_PINS_FULL: 390 + /* This is fine */ 391 + break; 392 + case CC_PINS_SLIM: 393 + if (new_drvdata->std_bodies & CC_STD_NIST) { 394 + dev_warn(dev, "703 mode forced due to HW configuration.\n"); 395 + new_drvdata->std_bodies = CC_STD_OSCCA; 396 + } 397 + break; 398 + default: 399 + dev_err(dev, "Unsupported engines configration.\n"); 400 + rc = -EINVAL; 401 + goto post_clk_err; 402 + } 403 + 411 404 /* Check security disable state */ 412 405 val = cc_ioread(new_drvdata, CC_REG(SECURITY_DISABLED)); 413 406 val &= CC_SECURITY_DISABLED_MASK; ··· 444 401 /* Display HW versions */ 445 402 dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X/0x%8X, Driver version %s\n", 446 403 hw_rev->name, hw_rev_pidr, sig_cidr, DRV_MODULE_VERSION); 404 + /* register the driver isr function */ 405 + rc = devm_request_irq(dev, new_drvdata->irq, cc_isr, 406 + IRQF_SHARED, "ccree", new_drvdata); 407 + if (rc) { 408 + dev_err(dev, "Could not register to interrupt %d\n", 409 + new_drvdata->irq); 410 + goto post_clk_err; 411 + } 412 + dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq); 447 413 448 414 rc = init_cc_regs(new_drvdata, true); 449 415 if (rc) {
+6
drivers/crypto/ccree/cc_driver.h
··· 53 53 54 54 #define CC_COHERENT_CACHE_PARAMS 0xEEE 55 55 56 + #define CC_PINS_FULL 0x0 57 + #define CC_PINS_SLIM 0x9F 58 + 56 59 /* Maximum DMA mask supported by IP */ 57 60 #define DMA_BIT_MASK_LEN 48 58 61 ··· 69 66 #define CC_COMP_IRQ_MASK BIT(CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT) 70 67 71 68 #define CC_SECURITY_DISABLED_MASK BIT(CC_SECURITY_DISABLED_VALUE_BIT_SHIFT) 69 + 70 + #define CC_NVM_IS_IDLE_MASK BIT(CC_NVM_IS_IDLE_VALUE_BIT_SHIFT) 72 71 73 72 #define AXIM_MON_COMP_VALUE GENMASK(CC_AXIM_MON_COMP_VALUE_BIT_SIZE + \ 74 73 CC_AXIM_MON_COMP_VALUE_BIT_SHIFT, \ ··· 221 216 __dump_byte_array(name, the_array, size); 222 217 } 223 218 219 + bool cc_wait_for_reset_completion(struct cc_drvdata *drvdata); 224 220 int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe); 225 221 void fini_cc_regs(struct cc_drvdata *drvdata); 226 222 int cc_clk_on(struct cc_drvdata *drvdata);
+20
drivers/crypto/ccree/cc_host_regs.h
··· 114 114 #define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE 0x1UL 115 115 #define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT 0x17UL 116 116 #define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE 0x1UL 117 + #define CC_NVM_IS_IDLE_REG_OFFSET 0x0A10UL 118 + #define CC_NVM_IS_IDLE_VALUE_BIT_SHIFT 0x0UL 119 + #define CC_NVM_IS_IDLE_VALUE_BIT_SIZE 0x1UL 117 120 #define CC_SECURITY_DISABLED_REG_OFFSET 0x0A1CUL 118 121 #define CC_SECURITY_DISABLED_VALUE_BIT_SHIFT 0x0UL 119 122 #define CC_SECURITY_DISABLED_VALUE_BIT_SIZE 0x1UL ··· 206 203 #define CC_HOST_POWER_DOWN_EN_REG_OFFSET 0xA78UL 207 204 #define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SHIFT 0x0UL 208 205 #define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SIZE 0x1UL 206 + #define CC_HOST_REMOVE_INPUT_PINS_REG_OFFSET 0x0A7CUL 207 + #define CC_HOST_REMOVE_INPUT_PINS_REMOVE_AES_ENGINE_BIT_SHIFT 0x0UL 208 + #define CC_HOST_REMOVE_INPUT_PINS_REMOVE_AES_ENGINE_BIT_SIZE 0x1UL 209 + #define CC_HOST_REMOVE_INPUT_PINS_REMOVE_AES_MAC_ENGINE_BIT_SHIFT 0x1UL 210 + #define CC_HOST_REMOVE_INPUT_PINS_REMOVE_AES_MAC_ENGINE_BIT_SIZE 0x1UL 211 + #define CC_HOST_REMOVE_INPUT_PINS_REMOVE_GHASH_ENGINE_BIT_SHIFT 0x2UL 212 + #define CC_HOST_REMOVE_INPUT_PINS_REMOVE_GHASH_ENGINE_BIT_SIZE 0x1UL 213 + #define CC_HOST_REMOVE_INPUT_PINS_REMOVE_DES_ENGINE_BIT_SHIFT 0x3UL 214 + #define CC_HOST_REMOVE_INPUT_PINS_REMOVE_DES_ENGINE_BIT_SIZE 0x1UL 215 + #define CC_HOST_REMOVE_INPUT_PINS_REMOVE_HASH_ENGINE_BIT_SHIFT 0x4UL 216 + #define CC_HOST_REMOVE_INPUT_PINS_REMOVE_HASH_ENGINE_BIT_SIZE 0x1UL 217 + #define CC_HOST_REMOVE_INPUT_PINS_REMOVE_SM3_ENGINE_BIT_SHIFT 0x5UL 218 + #define CC_HOST_REMOVE_INPUT_PINS_REMOVE_SM3_ENGINE_BIT_SIZE 0x1UL 219 + #define CC_HOST_REMOVE_INPUT_PINS_REMOVE_SM4_ENGINE_BIT_SHIFT 0x6UL 220 + #define CC_HOST_REMOVE_INPUT_PINS_REMOVE_SM4_ENGINE_BIT_SIZE 0x1UL 221 + #define CC_HOST_REMOVE_INPUT_PINS_OTP_DISCONNECTED_BIT_SHIFT 0x7UL 222 + #define CC_HOST_REMOVE_INPUT_PINS_OTP_DISCONNECTED_BIT_SIZE 0x1UL 209 223 // -------------------------------------- 210 224 // BLOCK: ID_REGISTERS 211 225 // --------------------------------------
+11
drivers/crypto/ccree/cc_pm.c
··· 49 49 dev_err(dev, "failed getting clock back on. We're toast.\n"); 50 50 return rc; 51 51 } 52 + /* wait for Crytpcell reset completion */ 53 + if (!cc_wait_for_reset_completion(drvdata)) { 54 + dev_err(dev, "Cryptocell reset not completed"); 55 + return -EBUSY; 56 + } 52 57 53 58 cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE); 54 59 rc = init_cc_regs(drvdata, false); ··· 104 99 rc = -EBUSY; 105 100 } 106 101 return rc; 102 + } 103 + 104 + bool cc_pm_is_dev_suspended(struct device *dev) 105 + { 106 + /* check device state using runtime api */ 107 + return pm_runtime_suspended(dev); 107 108 } 108 109 109 110 int cc_pm_init(struct cc_drvdata *drvdata)
+7
drivers/crypto/ccree/cc_pm.h
··· 22 22 int cc_pm_resume(struct device *dev); 23 23 int cc_pm_get(struct device *dev); 24 24 int cc_pm_put_suspend(struct device *dev); 25 + bool cc_pm_is_dev_suspended(struct device *dev); 25 26 26 27 #else 27 28 ··· 53 52 static inline int cc_pm_put_suspend(struct device *dev) 54 53 { 55 54 return 0; 55 + } 56 + 57 + static inline bool cc_pm_is_dev_suspended(struct device *dev) 58 + { 59 + /* if PM not supported device is never suspend */ 60 + return false; 56 61 } 57 62 58 63 #endif
+1 -1
drivers/crypto/hisilicon/sec/sec_drv.h
··· 1 - // SPDX-License-Identifier: GPL-2.0 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 2 /* Copyright (c) 2016-2017 Hisilicon Limited. */ 3 3 4 4 #ifndef _SEC_DRV_H_
+9 -4
drivers/crypto/inside-secure/safexcel.c
··· 398 398 399 399 /* Processing Engine configuration */ 400 400 401 + /* Token & context configuration */ 402 + val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES | 403 + EIP197_PE_EIP96_TOKEN_CTRL_REUSE_CTX | 404 + EIP197_PE_EIP96_TOKEN_CTRL_POST_REUSE_CTX; 405 + writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe)); 406 + 401 407 /* H/W capabilities selection */ 402 408 val = EIP197_FUNCTION_RSVD; 403 409 val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY; ··· 595 589 if (rdesc->result_data.error_code & 0x407f) { 596 590 /* Fatal error (bits 0-7, 14) */ 597 591 dev_err(priv->dev, 598 - "cipher: result: result descriptor error (%d)\n", 592 + "cipher: result: result descriptor error (0x%x)\n", 599 593 rdesc->result_data.error_code); 600 - return -EIO; 594 + return -EINVAL; 601 595 } else if (rdesc->result_data.error_code == BIT(9)) { 602 596 /* Authentication failed */ 603 597 return -EBADMSG; ··· 726 720 } 727 721 728 722 acknowledge: 729 - if (i) { 723 + if (i) 730 724 writel(EIP197_xDR_PROC_xD_PKT(i) | 731 725 EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset), 732 726 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT); 733 - } 734 727 735 728 /* If the number of requests overflowed the counter, try to proceed more 736 729 * requests.
+16 -1
drivers/crypto/inside-secure/safexcel.h
··· 118 118 #define EIP197_PE_ICE_SCRATCH_CTRL(n) (0x0d04 + (0x2000 * (n))) 119 119 #define EIP197_PE_ICE_FPP_CTRL(n) (0x0d80 + (0x2000 * (n))) 120 120 #define EIP197_PE_ICE_RAM_CTRL(n) (0x0ff0 + (0x2000 * (n))) 121 + #define EIP197_PE_EIP96_TOKEN_CTRL(n) (0x1000 + (0x2000 * (n))) 121 122 #define EIP197_PE_EIP96_FUNCTION_EN(n) (0x1004 + (0x2000 * (n))) 122 123 #define EIP197_PE_EIP96_CONTEXT_CTRL(n) (0x1008 + (0x2000 * (n))) 123 124 #define EIP197_PE_EIP96_CONTEXT_STAT(n) (0x100c + (0x2000 * (n))) ··· 250 249 #define EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN BIT(0) 251 250 #define EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN BIT(1) 252 251 252 + /* EIP197_PE_EIP96_TOKEN_CTRL */ 253 + #define EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES BIT(16) 254 + #define EIP197_PE_EIP96_TOKEN_CTRL_REUSE_CTX BIT(19) 255 + #define EIP197_PE_EIP96_TOKEN_CTRL_POST_REUSE_CTX BIT(20) 256 + 253 257 /* EIP197_PE_EIP96_FUNCTION_EN */ 254 258 #define EIP197_FUNCTION_RSVD (BIT(6) | BIT(15) | BIT(20) | BIT(23)) 255 259 #define EIP197_PROTOCOL_HASH_ONLY BIT(0) ··· 339 333 #define CONTEXT_CONTROL_IV3 BIT(8) 340 334 #define CONTEXT_CONTROL_DIGEST_CNT BIT(9) 341 335 #define CONTEXT_CONTROL_COUNTER_MODE BIT(10) 336 + #define CONTEXT_CONTROL_CRYPTO_STORE BIT(12) 342 337 #define CONTEXT_CONTROL_HASH_STORE BIT(19) 343 338 344 339 /* The hash counter given to the engine in the context has a granularity of ··· 432 425 433 426 #define EIP197_TOKEN_HASH_RESULT_VERIFY BIT(16) 434 427 428 + #define EIP197_TOKEN_CTX_OFFSET(x) (x) 429 + #define EIP197_TOKEN_DIRECTION_EXTERNAL BIT(11) 430 + #define EIP197_TOKEN_EXEC_IF_SUCCESSFUL (0x1 << 12) 431 + 435 432 #define EIP197_TOKEN_STAT_LAST_HASH BIT(0) 436 433 #define EIP197_TOKEN_STAT_LAST_PACKET BIT(1) 437 434 #define EIP197_TOKEN_OPCODE_DIRECTION 0x0 ··· 443 432 #define EIP197_TOKEN_OPCODE_NOOP EIP197_TOKEN_OPCODE_INSERT 444 433 #define EIP197_TOKEN_OPCODE_RETRIEVE 0x4 445 434 #define EIP197_TOKEN_OPCODE_VERIFY 0xd 435 + #define EIP197_TOKEN_OPCODE_CTX_ACCESS 0xe 446 436 #define EIP197_TOKEN_OPCODE_BYPASS GENMASK(3, 0) 447 437 448 438 static inline void eip197_noop_token(struct safexcel_token *token) ··· 454 442 455 443 /* Instructions */ 456 444 #define EIP197_TOKEN_INS_INSERT_HASH_DIGEST 0x1c 445 + #define EIP197_TOKEN_INS_ORIGIN_IV0 0x14 446 + #define EIP197_TOKEN_INS_ORIGIN_LEN(x) ((x) << 5) 457 447 #define EIP197_TOKEN_INS_TYPE_OUTPUT BIT(5) 458 448 #define EIP197_TOKEN_INS_TYPE_HASH BIT(6) 459 449 #define EIP197_TOKEN_INS_TYPE_CRYTO BIT(7) ··· 482 468 483 469 #define EIP197_OPTION_MAGIC_VALUE BIT(0) 484 470 #define EIP197_OPTION_64BIT_CTX BIT(1) 471 + #define EIP197_OPTION_RC_AUTO (0x2 << 3) 485 472 #define EIP197_OPTION_CTX_CTRL_IN_CMD BIT(8) 486 473 #define EIP197_OPTION_2_TOKEN_IV_CMD GENMASK(11, 10) 487 474 #define EIP197_OPTION_4_TOKEN_IV_CMD GENMASK(11, 9) ··· 644 629 u32 digest; 645 630 646 631 u32 state[SHA512_DIGEST_SIZE / sizeof(u32)]; 647 - u8 cache[SHA512_BLOCK_SIZE]; 632 + u8 cache[SHA512_BLOCK_SIZE << 1]; 648 633 }; 649 634 650 635 /*
+74 -42
drivers/crypto/inside-secure/safexcel_cipher.c
··· 51 51 52 52 struct safexcel_cipher_req { 53 53 enum safexcel_cipher_direction direction; 54 + /* Number of result descriptors associated to the request */ 55 + unsigned int rdescs; 54 56 bool needs_inv; 55 57 }; 56 58 ··· 61 59 u32 length) 62 60 { 63 61 struct safexcel_token *token; 64 - unsigned offset = 0; 62 + u32 offset = 0, block_sz = 0; 65 63 66 64 if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) { 67 65 switch (ctx->alg) { 68 66 case SAFEXCEL_DES: 69 - offset = DES_BLOCK_SIZE / sizeof(u32); 70 - memcpy(cdesc->control_data.token, iv, DES_BLOCK_SIZE); 67 + block_sz = DES_BLOCK_SIZE; 71 68 cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD; 72 69 break; 73 70 case SAFEXCEL_3DES: 74 - offset = DES3_EDE_BLOCK_SIZE / sizeof(u32); 75 - memcpy(cdesc->control_data.token, iv, DES3_EDE_BLOCK_SIZE); 71 + block_sz = DES3_EDE_BLOCK_SIZE; 76 72 cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD; 77 73 break; 78 - 79 74 case SAFEXCEL_AES: 80 - offset = AES_BLOCK_SIZE / sizeof(u32); 81 - memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE); 75 + block_sz = AES_BLOCK_SIZE; 82 76 cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; 83 77 break; 84 78 } 79 + 80 + offset = block_sz / sizeof(u32); 81 + memcpy(cdesc->control_data.token, iv, block_sz); 85 82 } 86 83 87 84 token = (struct safexcel_token *)(cdesc->control_data.token + offset); ··· 92 91 token[0].instructions = EIP197_TOKEN_INS_LAST | 93 92 EIP197_TOKEN_INS_TYPE_CRYTO | 94 93 EIP197_TOKEN_INS_TYPE_OUTPUT; 94 + 95 + if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) { 96 + u32 last = (EIP197_MAX_TOKENS - 1) - offset; 97 + 98 + token[last].opcode = EIP197_TOKEN_OPCODE_CTX_ACCESS; 99 + token[last].packet_length = EIP197_TOKEN_DIRECTION_EXTERNAL | 100 + EIP197_TOKEN_EXEC_IF_SUCCESSFUL| 101 + EIP197_TOKEN_CTX_OFFSET(0x2); 102 + token[last].stat = EIP197_TOKEN_STAT_LAST_HASH | 103 + EIP197_TOKEN_STAT_LAST_PACKET; 104 + token[last].instructions = 105 + EIP197_TOKEN_INS_ORIGIN_LEN(block_sz / sizeof(u32)) | 106 + EIP197_TOKEN_INS_ORIGIN_IV0; 107 + 108 + /* Store the updated IV values back in the internal context 109 + * registers. 110 + */ 111 + cdesc->control_data.control1 |= CONTEXT_CONTROL_CRYPTO_STORE; 112 + } 95 113 } 96 114 97 115 static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv, ··· 353 333 354 334 *ret = 0; 355 335 356 - do { 336 + if (unlikely(!sreq->rdescs)) 337 + return 0; 338 + 339 + while (sreq->rdescs--) { 357 340 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); 358 341 if (IS_ERR(rdesc)) { 359 342 dev_err(priv->dev, ··· 369 346 *ret = safexcel_rdesc_check_errors(priv, rdesc); 370 347 371 348 ndesc++; 372 - } while (!rdesc->last_seg); 349 + } 373 350 374 351 safexcel_complete(priv, ring); 375 352 376 353 if (src == dst) { 377 - dma_unmap_sg(priv->dev, src, 378 - sg_nents_for_len(src, cryptlen), 379 - DMA_BIDIRECTIONAL); 354 + dma_unmap_sg(priv->dev, src, sg_nents(src), DMA_BIDIRECTIONAL); 380 355 } else { 381 - dma_unmap_sg(priv->dev, src, 382 - sg_nents_for_len(src, cryptlen), 383 - DMA_TO_DEVICE); 384 - dma_unmap_sg(priv->dev, dst, 385 - sg_nents_for_len(dst, cryptlen), 386 - DMA_FROM_DEVICE); 356 + dma_unmap_sg(priv->dev, src, sg_nents(src), DMA_TO_DEVICE); 357 + dma_unmap_sg(priv->dev, dst, sg_nents(dst), DMA_FROM_DEVICE); 387 358 } 388 359 389 360 *should_complete = true; ··· 402 385 int i, ret = 0; 403 386 404 387 if (src == dst) { 405 - nr_src = dma_map_sg(priv->dev, src, 406 - sg_nents_for_len(src, totlen), 388 + nr_src = dma_map_sg(priv->dev, src, sg_nents(src), 407 389 DMA_BIDIRECTIONAL); 408 390 nr_dst = nr_src; 409 391 if (!nr_src) 410 392 return -EINVAL; 411 393 } else { 412 - nr_src = dma_map_sg(priv->dev, src, 413 - sg_nents_for_len(src, totlen), 394 + nr_src = dma_map_sg(priv->dev, src, sg_nents(src), 414 395 DMA_TO_DEVICE); 415 396 if (!nr_src) 416 397 return -EINVAL; 417 398 418 - nr_dst = dma_map_sg(priv->dev, dst, 419 - sg_nents_for_len(dst, totlen), 399 + nr_dst = dma_map_sg(priv->dev, dst, sg_nents(dst), 420 400 DMA_FROM_DEVICE); 421 401 if (!nr_dst) { 422 - dma_unmap_sg(priv->dev, src, 423 - sg_nents_for_len(src, totlen), 424 - DMA_TO_DEVICE); 402 + dma_unmap_sg(priv->dev, src, nr_src, DMA_TO_DEVICE); 425 403 return -EINVAL; 426 404 } 427 405 } ··· 466 454 467 455 /* result descriptors */ 468 456 for_each_sg(dst, sg, nr_dst, i) { 469 - bool first = !i, last = (i == nr_dst - 1); 457 + bool first = !i, last = sg_is_last(sg); 470 458 u32 len = sg_dma_len(sg); 471 459 472 460 rdesc = safexcel_add_rdesc(priv, ring, first, last, ··· 495 483 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr); 496 484 497 485 if (src == dst) { 498 - dma_unmap_sg(priv->dev, src, 499 - sg_nents_for_len(src, totlen), 500 - DMA_BIDIRECTIONAL); 486 + dma_unmap_sg(priv->dev, src, nr_src, DMA_BIDIRECTIONAL); 501 487 } else { 502 - dma_unmap_sg(priv->dev, src, 503 - sg_nents_for_len(src, totlen), 504 - DMA_TO_DEVICE); 505 - dma_unmap_sg(priv->dev, dst, 506 - sg_nents_for_len(dst, totlen), 507 - DMA_FROM_DEVICE); 488 + dma_unmap_sg(priv->dev, src, nr_src, DMA_TO_DEVICE); 489 + dma_unmap_sg(priv->dev, dst, nr_dst, DMA_FROM_DEVICE); 508 490 } 509 491 510 492 return ret; ··· 507 501 static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, 508 502 int ring, 509 503 struct crypto_async_request *base, 504 + struct safexcel_cipher_req *sreq, 510 505 bool *should_complete, int *ret) 511 506 { 512 507 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm); ··· 516 509 517 510 *ret = 0; 518 511 519 - do { 512 + if (unlikely(!sreq->rdescs)) 513 + return 0; 514 + 515 + while (sreq->rdescs--) { 520 516 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); 521 517 if (IS_ERR(rdesc)) { 522 518 dev_err(priv->dev, ··· 532 522 *ret = safexcel_rdesc_check_errors(priv, rdesc); 533 523 534 524 ndesc++; 535 - } while (!rdesc->last_seg); 525 + } 536 526 537 527 safexcel_complete(priv, ring); 538 528 ··· 570 560 { 571 561 struct skcipher_request *req = skcipher_request_cast(async); 572 562 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); 563 + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(async->tfm); 573 564 int err; 574 565 575 566 if (sreq->needs_inv) { 576 567 sreq->needs_inv = false; 577 - err = safexcel_handle_inv_result(priv, ring, async, 568 + err = safexcel_handle_inv_result(priv, ring, async, sreq, 578 569 should_complete, ret); 579 570 } else { 580 571 err = safexcel_handle_req_result(priv, ring, async, req->src, 581 572 req->dst, req->cryptlen, sreq, 582 573 should_complete, ret); 574 + 575 + if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) { 576 + u32 block_sz = 0; 577 + 578 + switch (ctx->alg) { 579 + case SAFEXCEL_DES: 580 + block_sz = DES_BLOCK_SIZE; 581 + break; 582 + case SAFEXCEL_3DES: 583 + block_sz = DES3_EDE_BLOCK_SIZE; 584 + break; 585 + case SAFEXCEL_AES: 586 + block_sz = AES_BLOCK_SIZE; 587 + break; 588 + } 589 + 590 + memcpy(req->iv, ctx->base.ctxr->data, block_sz); 591 + } 583 592 } 584 593 585 594 return err; ··· 616 587 617 588 if (sreq->needs_inv) { 618 589 sreq->needs_inv = false; 619 - err = safexcel_handle_inv_result(priv, ring, async, 590 + err = safexcel_handle_inv_result(priv, ring, async, sreq, 620 591 should_complete, ret); 621 592 } else { 622 593 err = safexcel_handle_req_result(priv, ring, async, req->src, ··· 662 633 ret = safexcel_send_req(async, ring, sreq, req->src, 663 634 req->dst, req->cryptlen, 0, 0, req->iv, 664 635 commands, results); 636 + 637 + sreq->rdescs = *results; 665 638 return ret; 666 639 } 667 640 ··· 686 655 req->cryptlen, req->assoclen, 687 656 crypto_aead_authsize(tfm), req->iv, 688 657 commands, results); 658 + sreq->rdescs = *results; 689 659 return ret; 690 660 } 691 661
+55 -37
drivers/crypto/inside-secure/safexcel_hash.c
··· 41 41 u64 len[2]; 42 42 u64 processed[2]; 43 43 44 - u8 cache[SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); 44 + u8 cache[SHA512_BLOCK_SIZE << 1] __aligned(sizeof(u32)); 45 45 dma_addr_t cache_dma; 46 46 unsigned int cache_sz; 47 47 48 - u8 cache_next[SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); 48 + u8 cache_next[SHA512_BLOCK_SIZE << 1] __aligned(sizeof(u32)); 49 49 }; 50 50 51 51 static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req) 52 52 { 53 - if (req->len[1] > req->processed[1]) 54 - return 0xffffffff - (req->len[0] - req->processed[0]); 53 + u64 len, processed; 55 54 56 - return req->len[0] - req->processed[0]; 55 + len = (0xffffffff * req->len[1]) + req->len[0]; 56 + processed = (0xffffffff * req->processed[1]) + req->processed[0]; 57 + 58 + return len - processed; 57 59 } 58 60 59 61 static void safexcel_hash_token(struct safexcel_command_desc *cdesc, ··· 89 87 cdesc->control_data.control0 |= ctx->alg; 90 88 cdesc->control_data.control0 |= req->digest; 91 89 90 + if (!req->finish) 91 + cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH; 92 + 92 93 if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) { 93 94 if (req->processed[0] || req->processed[1]) { 94 95 if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5) ··· 109 104 } else { 110 105 cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH; 111 106 } 112 - 113 - if (!req->finish) 114 - cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH; 115 107 116 108 /* 117 109 * Copy the input digest if needed, and setup the context ··· 185 183 dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz, 186 184 DMA_TO_DEVICE); 187 185 sreq->cache_dma = 0; 186 + sreq->cache_sz = 0; 188 187 } 189 188 190 189 if (sreq->finish) ··· 212 209 struct safexcel_command_desc *cdesc, *first_cdesc = NULL; 213 210 struct safexcel_result_desc *rdesc; 214 211 struct scatterlist *sg; 215 - int i, extra, n_cdesc = 0, ret = 0; 216 - u64 queued, len, cache_len; 212 + int i, extra = 0, n_cdesc = 0, ret = 0; 213 + u64 queued, len, cache_len, cache_max; 214 + 215 + cache_max = crypto_ahash_blocksize(ahash); 216 + if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) 217 + cache_max <<= 1; 217 218 218 219 queued = len = safexcel_queued_len(req); 219 - if (queued <= crypto_ahash_blocksize(ahash)) 220 + if (queued <= cache_max) 220 221 cache_len = queued; 221 222 else 222 223 cache_len = queued - areq->nbytes; ··· 230 223 * fit into full blocks, cache it for the next send() call. 231 224 */ 232 225 extra = queued & (crypto_ahash_blocksize(ahash) - 1); 226 + 227 + if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC && 228 + extra < crypto_ahash_blocksize(ahash)) 229 + extra += crypto_ahash_blocksize(ahash); 230 + 231 + /* If this is not the last request and the queued data 232 + * is a multiple of a block, cache the last one for now. 233 + */ 233 234 if (!extra) 234 - /* If this is not the last request and the queued data 235 - * is a multiple of a block, cache the last one for now. 236 - */ 237 235 extra = crypto_ahash_blocksize(ahash); 238 236 239 - if (extra) { 240 - sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), 241 - req->cache_next, extra, 242 - areq->nbytes - extra); 237 + sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), 238 + req->cache_next, extra, 239 + areq->nbytes - extra); 243 240 244 - queued -= extra; 245 - len -= extra; 246 - 247 - if (!queued) { 248 - *commands = 0; 249 - *results = 0; 250 - return 0; 251 - } 252 - } 241 + queued -= extra; 242 + len -= extra; 253 243 } 254 244 255 245 /* Add a command descriptor for the cached data, if any */ ··· 273 269 } 274 270 275 271 /* Now handle the current ahash request buffer(s) */ 276 - req->nents = dma_map_sg(priv->dev, areq->src, 277 - sg_nents_for_len(areq->src, areq->nbytes), 272 + req->nents = dma_map_sg(priv->dev, areq->src, sg_nents(areq->src), 278 273 DMA_TO_DEVICE); 279 274 if (!req->nents) { 280 275 ret = -ENOMEM; ··· 348 345 if (req->cache_dma) { 349 346 dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz, 350 347 DMA_TO_DEVICE); 348 + req->cache_dma = 0; 351 349 req->cache_sz = 0; 352 350 } 353 351 ··· 490 486 struct safexcel_inv_result result = {}; 491 487 int ring = ctx->base.ring; 492 488 493 - memset(req, 0, sizeof(struct ahash_request)); 489 + memset(req, 0, EIP197_AHASH_REQ_SIZE); 494 490 495 491 /* create invalidation request */ 496 492 init_completion(&result.completion); ··· 523 519 /* safexcel_ahash_cache: cache data until at least one request can be sent to 524 520 * the engine, aka. when there is at least 1 block size in the pipe. 525 521 */ 526 - static int safexcel_ahash_cache(struct ahash_request *areq) 522 + static int safexcel_ahash_cache(struct ahash_request *areq, u32 cache_max) 527 523 { 528 524 struct safexcel_ahash_req *req = ahash_request_ctx(areq); 529 - struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); 530 525 u64 queued, cache_len; 531 526 532 527 /* queued: everything accepted by the driver which will be handled by ··· 542 539 * In case there isn't enough bytes to proceed (less than a 543 540 * block size), cache the data until we have enough. 544 541 */ 545 - if (cache_len + areq->nbytes <= crypto_ahash_blocksize(ahash)) { 542 + if (cache_len + areq->nbytes <= cache_max) { 546 543 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), 547 544 req->cache + cache_len, 548 545 areq->nbytes, 0); ··· 602 599 { 603 600 struct safexcel_ahash_req *req = ahash_request_ctx(areq); 604 601 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); 602 + u32 cache_max; 605 603 606 604 /* If the request is 0 length, do nothing */ 607 605 if (!areq->nbytes) ··· 612 608 if (req->len[0] < areq->nbytes) 613 609 req->len[1]++; 614 610 615 - safexcel_ahash_cache(areq); 611 + cache_max = crypto_ahash_blocksize(ahash); 612 + if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) 613 + cache_max <<= 1; 614 + 615 + safexcel_ahash_cache(areq, cache_max); 616 616 617 617 /* 618 618 * We're not doing partial updates when performing an hmac request. ··· 629 621 return safexcel_ahash_enqueue(areq); 630 622 631 623 if (!req->last_req && 632 - safexcel_queued_len(req) > crypto_ahash_blocksize(ahash)) 624 + safexcel_queued_len(req) > cache_max) 633 625 return safexcel_ahash_enqueue(areq); 634 626 635 627 return 0; ··· 686 678 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); 687 679 struct safexcel_ahash_req *req = ahash_request_ctx(areq); 688 680 struct safexcel_ahash_export_state *export = out; 681 + u32 cache_sz; 682 + 683 + cache_sz = crypto_ahash_blocksize(ahash); 684 + if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) 685 + cache_sz <<= 1; 689 686 690 687 export->len[0] = req->len[0]; 691 688 export->len[1] = req->len[1]; ··· 700 687 export->digest = req->digest; 701 688 702 689 memcpy(export->state, req->state, req->state_sz); 703 - memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash)); 690 + memcpy(export->cache, req->cache, cache_sz); 704 691 705 692 return 0; 706 693 } ··· 710 697 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); 711 698 struct safexcel_ahash_req *req = ahash_request_ctx(areq); 712 699 const struct safexcel_ahash_export_state *export = in; 700 + u32 cache_sz; 713 701 int ret; 714 702 715 703 ret = crypto_ahash_init(areq); 716 704 if (ret) 717 705 return ret; 706 + 707 + cache_sz = crypto_ahash_blocksize(ahash); 708 + if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) 709 + cache_sz <<= 1; 718 710 719 711 req->len[0] = export->len[0]; 720 712 req->len[1] = export->len[1]; ··· 728 710 729 711 req->digest = export->digest; 730 712 731 - memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash)); 713 + memcpy(req->cache, export->cache, cache_sz); 732 714 memcpy(req->state, export->state, req->state_sz); 733 715 734 716 return 0;
+3
drivers/crypto/inside-secure/safexcel_ring.c
··· 145 145 (lower_32_bits(context) & GENMASK(31, 2)) >> 2; 146 146 cdesc->control_data.context_hi = upper_32_bits(context); 147 147 148 + if (priv->version == EIP197B || priv->version == EIP197D) 149 + cdesc->control_data.options |= EIP197_OPTION_RC_AUTO; 150 + 148 151 /* TODO: large xform HMAC with SHA-384/512 uses refresh = 3 */ 149 152 cdesc->control_data.refresh = 2; 150 153
+8 -7
drivers/crypto/ixp4xx_crypto.c
··· 100 100 u16 pkt_len; 101 101 u16 buf_len; 102 102 #endif 103 - u32 phys_addr; 103 + dma_addr_t phys_addr; 104 104 u32 __reserved[4]; 105 105 struct buffer_desc *next; 106 106 enum dma_data_direction dir; ··· 117 117 u8 mode; /* NPE_OP_* operation mode */ 118 118 #endif 119 119 u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */ 120 - u32 icv_rev_aes; /* icv or rev aes */ 121 - u32 src_buf; 122 - u32 dst_buf; 120 + dma_addr_t icv_rev_aes; /* icv or rev aes */ 121 + dma_addr_t src_buf; 122 + dma_addr_t dst_buf; 123 123 #ifdef __ARMEB__ 124 124 u16 auth_offs; /* Authentication start offset */ 125 125 u16 auth_len; /* Authentication data length */ ··· 320 320 } 321 321 } 322 322 323 - static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys) 323 + static void free_buf_chain(struct device *dev, struct buffer_desc *buf, 324 + dma_addr_t phys) 324 325 { 325 326 while (buf) { 326 327 struct buffer_desc *buf1; ··· 603 602 struct buffer_desc *buf; 604 603 int i; 605 604 u8 *pad; 606 - u32 pad_phys, buf_phys; 605 + dma_addr_t pad_phys, buf_phys; 607 606 608 607 BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN); 609 608 pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys); ··· 788 787 for (; nbytes > 0; sg = sg_next(sg)) { 789 788 unsigned len = min(nbytes, sg->length); 790 789 struct buffer_desc *next_buf; 791 - u32 next_buf_phys; 790 + dma_addr_t next_buf_phys; 792 791 void *ptr; 793 792 794 793 nbytes -= len;
+1 -4
drivers/crypto/mxs-dcp.c
··· 986 986 struct device *dev = &pdev->dev; 987 987 struct dcp *sdcp = NULL; 988 988 int i, ret; 989 - 990 - struct resource *iores; 991 989 int dcp_vmi_irq, dcp_irq; 992 990 993 991 if (global_sdcp) { ··· 993 995 return -ENODEV; 994 996 } 995 997 996 - iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 997 998 dcp_vmi_irq = platform_get_irq(pdev, 0); 998 999 if (dcp_vmi_irq < 0) { 999 1000 dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_vmi_irq); ··· 1010 1013 return -ENOMEM; 1011 1014 1012 1015 sdcp->dev = dev; 1013 - sdcp->base = devm_ioremap_resource(dev, iores); 1016 + sdcp->base = devm_platform_ioremap_resource(pdev, 0); 1014 1017 if (IS_ERR(sdcp->base)) 1015 1018 return PTR_ERR(sdcp->base); 1016 1019
+5 -3
drivers/crypto/nx/nx-842-powernv.c
··· 27 27 #define WORKMEM_ALIGN (CRB_ALIGN) 28 28 #define CSB_WAIT_MAX (5000) /* ms */ 29 29 #define VAS_RETRIES (10) 30 - /* # of requests allowed per RxFIFO at a time. 0 for unlimited */ 31 - #define MAX_CREDITS_PER_RXFIFO (1024) 32 30 33 31 struct nx842_workmem { 34 32 /* Below fields must be properly aligned */ ··· 810 812 rxattr.lnotify_lpid = lpid; 811 813 rxattr.lnotify_pid = pid; 812 814 rxattr.lnotify_tid = tid; 813 - rxattr.wcreds_max = MAX_CREDITS_PER_RXFIFO; 815 + /* 816 + * Maximum RX window credits can not be more than #CRBs in 817 + * RxFIFO. Otherwise, can get checkstop if RxFIFO overruns. 818 + */ 819 + rxattr.wcreds_max = fifo_size / CRB_SIZE; 814 820 815 821 /* 816 822 * Open a VAS receice window which is used to configure RxFIFO
+1 -3
drivers/crypto/nx/nx.c
··· 569 569 570 570 memset(&nx_driver.stats, 0, sizeof(struct nx_stats)); 571 571 572 - rc = NX_DEBUGFS_INIT(&nx_driver); 573 - if (rc) 574 - goto out; 572 + NX_DEBUGFS_INIT(&nx_driver); 575 573 576 574 nx_driver.of.status = NX_OKAY; 577 575
+2 -10
drivers/crypto/nx/nx.h
··· 76 76 atomic_t last_error_pid; 77 77 }; 78 78 79 - struct nx_debugfs { 80 - struct dentry *dfs_root; 81 - struct dentry *dfs_aes_ops, *dfs_aes_bytes; 82 - struct dentry *dfs_sha256_ops, *dfs_sha256_bytes; 83 - struct dentry *dfs_sha512_ops, *dfs_sha512_bytes; 84 - struct dentry *dfs_errors, *dfs_last_error, *dfs_last_error_pid; 85 - }; 86 - 87 79 struct nx_crypto_driver { 88 80 struct nx_stats stats; 89 81 struct nx_of of; 90 82 struct vio_dev *viodev; 91 83 struct vio_driver viodriver; 92 - struct nx_debugfs dfs; 84 + struct dentry *dfs_root; 93 85 }; 94 86 95 87 #define NX_GCM4106_NONCE_LEN (4) ··· 169 177 #define NX_DEBUGFS_INIT(drv) nx_debugfs_init(drv) 170 178 #define NX_DEBUGFS_FINI(drv) nx_debugfs_fini(drv) 171 179 172 - int nx_debugfs_init(struct nx_crypto_driver *); 180 + void nx_debugfs_init(struct nx_crypto_driver *); 173 181 void nx_debugfs_fini(struct nx_crypto_driver *); 174 182 #else 175 183 #define NX_DEBUGFS_INIT(drv) (0)
+23 -48
drivers/crypto/nx/nx_debugfs.c
··· 30 30 * Documentation/ABI/testing/debugfs-pfo-nx-crypto 31 31 */ 32 32 33 - int nx_debugfs_init(struct nx_crypto_driver *drv) 33 + void nx_debugfs_init(struct nx_crypto_driver *drv) 34 34 { 35 - struct nx_debugfs *dfs = &drv->dfs; 35 + struct dentry *root; 36 36 37 - dfs->dfs_root = debugfs_create_dir(NX_NAME, NULL); 37 + root = debugfs_create_dir(NX_NAME, NULL); 38 + drv->dfs_root = root; 38 39 39 - dfs->dfs_aes_ops = 40 - debugfs_create_u32("aes_ops", 41 - S_IRUSR | S_IRGRP | S_IROTH, 42 - dfs->dfs_root, (u32 *)&drv->stats.aes_ops); 43 - dfs->dfs_sha256_ops = 44 - debugfs_create_u32("sha256_ops", 45 - S_IRUSR | S_IRGRP | S_IROTH, 46 - dfs->dfs_root, 47 - (u32 *)&drv->stats.sha256_ops); 48 - dfs->dfs_sha512_ops = 49 - debugfs_create_u32("sha512_ops", 50 - S_IRUSR | S_IRGRP | S_IROTH, 51 - dfs->dfs_root, 52 - (u32 *)&drv->stats.sha512_ops); 53 - dfs->dfs_aes_bytes = 54 - debugfs_create_u64("aes_bytes", 55 - S_IRUSR | S_IRGRP | S_IROTH, 56 - dfs->dfs_root, 57 - (u64 *)&drv->stats.aes_bytes); 58 - dfs->dfs_sha256_bytes = 59 - debugfs_create_u64("sha256_bytes", 60 - S_IRUSR | S_IRGRP | S_IROTH, 61 - dfs->dfs_root, 62 - (u64 *)&drv->stats.sha256_bytes); 63 - dfs->dfs_sha512_bytes = 64 - debugfs_create_u64("sha512_bytes", 65 - S_IRUSR | S_IRGRP | S_IROTH, 66 - dfs->dfs_root, 67 - (u64 *)&drv->stats.sha512_bytes); 68 - dfs->dfs_errors = 69 - debugfs_create_u32("errors", 70 - S_IRUSR | S_IRGRP | S_IROTH, 71 - dfs->dfs_root, (u32 *)&drv->stats.errors); 72 - dfs->dfs_last_error = 73 - debugfs_create_u32("last_error", 74 - S_IRUSR | S_IRGRP | S_IROTH, 75 - dfs->dfs_root, 76 - (u32 *)&drv->stats.last_error); 77 - dfs->dfs_last_error_pid = 78 - debugfs_create_u32("last_error_pid", 79 - S_IRUSR | S_IRGRP | S_IROTH, 80 - dfs->dfs_root, 81 - (u32 *)&drv->stats.last_error_pid); 82 - return 0; 40 + debugfs_create_u32("aes_ops", S_IRUSR | S_IRGRP | S_IROTH, 41 + root, (u32 *)&drv->stats.aes_ops); 42 + debugfs_create_u32("sha256_ops", S_IRUSR | S_IRGRP | S_IROTH, 43 + root, (u32 *)&drv->stats.sha256_ops); 44 + debugfs_create_u32("sha512_ops", S_IRUSR | S_IRGRP | S_IROTH, 45 + root, (u32 *)&drv->stats.sha512_ops); 46 + debugfs_create_u64("aes_bytes", S_IRUSR | S_IRGRP | S_IROTH, 47 + root, (u64 *)&drv->stats.aes_bytes); 48 + debugfs_create_u64("sha256_bytes", S_IRUSR | S_IRGRP | S_IROTH, 49 + root, (u64 *)&drv->stats.sha256_bytes); 50 + debugfs_create_u64("sha512_bytes", S_IRUSR | S_IRGRP | S_IROTH, 51 + root, (u64 *)&drv->stats.sha512_bytes); 52 + debugfs_create_u32("errors", S_IRUSR | S_IRGRP | S_IROTH, 53 + root, (u32 *)&drv->stats.errors); 54 + debugfs_create_u32("last_error", S_IRUSR | S_IRGRP | S_IROTH, 55 + root, (u32 *)&drv->stats.last_error); 56 + debugfs_create_u32("last_error_pid", S_IRUSR | S_IRGRP | S_IROTH, 57 + root, (u32 *)&drv->stats.last_error_pid); 83 58 } 84 59 85 60 void 86 61 nx_debugfs_fini(struct nx_crypto_driver *drv) 87 62 { 88 - debugfs_remove_recursive(drv->dfs.dfs_root); 63 + debugfs_remove_recursive(drv->dfs_root); 89 64 } 90 65 91 66 #endif
+198 -96
drivers/crypto/qat/qat_common/qat_algs.c
··· 131 131 struct icp_qat_fw_la_bulk_req dec_fw_req; 132 132 struct qat_crypto_instance *inst; 133 133 struct crypto_tfm *tfm; 134 - spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */ 135 134 }; 136 135 137 136 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg) ··· 222 223 return -EFAULT; 223 224 224 225 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8); 226 + if (offset < 0) 227 + return -EFAULT; 228 + 225 229 hash_state_out = (__be32 *)(hash->sha.state1 + offset); 226 230 hash512_state_out = (__be64 *)hash_state_out; 227 231 ··· 255 253 return 0; 256 254 } 257 255 258 - static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header) 256 + static void qat_alg_init_hdr_iv_updt(struct icp_qat_fw_comn_req_hdr *header) 257 + { 258 + ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags, 259 + ICP_QAT_FW_CIPH_IV_64BIT_PTR); 260 + ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags, 261 + ICP_QAT_FW_LA_UPDATE_STATE); 262 + } 263 + 264 + static void qat_alg_init_hdr_no_iv_updt(struct icp_qat_fw_comn_req_hdr *header) 265 + { 266 + ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags, 267 + ICP_QAT_FW_CIPH_IV_16BYTE_DATA); 268 + ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags, 269 + ICP_QAT_FW_LA_NO_UPDATE_STATE); 270 + } 271 + 272 + static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header, 273 + int aead) 259 274 { 260 275 header->hdr_flags = 261 276 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET); ··· 282 263 QAT_COMN_PTR_TYPE_SGL); 283 264 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags, 284 265 ICP_QAT_FW_LA_PARTIAL_NONE); 285 - ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags, 286 - ICP_QAT_FW_CIPH_IV_16BYTE_DATA); 266 + if (aead) 267 + qat_alg_init_hdr_no_iv_updt(header); 268 + else 269 + qat_alg_init_hdr_iv_updt(header); 287 270 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, 288 271 ICP_QAT_FW_LA_NO_PROTO); 289 - ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags, 290 - ICP_QAT_FW_LA_NO_UPDATE_STATE); 291 272 } 292 273 293 274 static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm, ··· 322 303 return -EFAULT; 323 304 324 305 /* Request setup */ 325 - qat_alg_init_common_hdr(header); 306 + qat_alg_init_common_hdr(header, 1); 326 307 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH; 327 308 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, 328 309 ICP_QAT_FW_LA_DIGEST_IN_BUFFER); ··· 409 390 return -EFAULT; 410 391 411 392 /* Request setup */ 412 - qat_alg_init_common_hdr(header); 393 + qat_alg_init_common_hdr(header, 1); 413 394 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER; 414 395 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, 415 396 ICP_QAT_FW_LA_DIGEST_IN_BUFFER); ··· 473 454 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl; 474 455 475 456 memcpy(cd->aes.key, key, keylen); 476 - qat_alg_init_common_hdr(header); 457 + qat_alg_init_common_hdr(header, 0); 477 458 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER; 478 459 cd_pars->u.s.content_desc_params_sz = 479 460 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3; ··· 595 576 return -EINVAL; 596 577 } 597 578 598 - static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key, 579 + static int qat_alg_aead_rekey(struct crypto_aead *tfm, const uint8_t *key, 580 + unsigned int keylen) 581 + { 582 + struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); 583 + 584 + memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd)); 585 + memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd)); 586 + memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req)); 587 + memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req)); 588 + 589 + return qat_alg_aead_init_sessions(tfm, key, keylen, 590 + ICP_QAT_HW_CIPHER_CBC_MODE); 591 + } 592 + 593 + static int qat_alg_aead_newkey(struct crypto_aead *tfm, const uint8_t *key, 599 594 unsigned int keylen) 600 595 { 601 596 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); 597 + struct qat_crypto_instance *inst = NULL; 598 + int node = get_current_node(); 602 599 struct device *dev; 600 + int ret; 603 601 604 - if (ctx->enc_cd) { 605 - /* rekeying */ 606 - dev = &GET_DEV(ctx->inst->accel_dev); 607 - memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd)); 608 - memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd)); 609 - memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req)); 610 - memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req)); 611 - } else { 612 - /* new key */ 613 - int node = get_current_node(); 614 - struct qat_crypto_instance *inst = 615 - qat_crypto_get_instance_node(node); 616 - if (!inst) { 617 - return -EINVAL; 618 - } 619 - 620 - dev = &GET_DEV(inst->accel_dev); 621 - ctx->inst = inst; 622 - ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd), 623 - &ctx->enc_cd_paddr, 624 - GFP_ATOMIC); 625 - if (!ctx->enc_cd) { 626 - return -ENOMEM; 627 - } 628 - ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd), 629 - &ctx->dec_cd_paddr, 630 - GFP_ATOMIC); 631 - if (!ctx->dec_cd) { 632 - goto out_free_enc; 633 - } 602 + inst = qat_crypto_get_instance_node(node); 603 + if (!inst) 604 + return -EINVAL; 605 + dev = &GET_DEV(inst->accel_dev); 606 + ctx->inst = inst; 607 + ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd), 608 + &ctx->enc_cd_paddr, 609 + GFP_ATOMIC); 610 + if (!ctx->enc_cd) { 611 + ret = -ENOMEM; 612 + goto out_free_inst; 634 613 } 635 - if (qat_alg_aead_init_sessions(tfm, key, keylen, 636 - ICP_QAT_HW_CIPHER_CBC_MODE)) 614 + ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd), 615 + &ctx->dec_cd_paddr, 616 + GFP_ATOMIC); 617 + if (!ctx->dec_cd) { 618 + ret = -ENOMEM; 619 + goto out_free_enc; 620 + } 621 + 622 + ret = qat_alg_aead_init_sessions(tfm, key, keylen, 623 + ICP_QAT_HW_CIPHER_CBC_MODE); 624 + if (ret) 637 625 goto out_free_all; 638 626 639 627 return 0; ··· 655 629 dma_free_coherent(dev, sizeof(struct qat_alg_cd), 656 630 ctx->enc_cd, ctx->enc_cd_paddr); 657 631 ctx->enc_cd = NULL; 658 - return -ENOMEM; 632 + out_free_inst: 633 + ctx->inst = NULL; 634 + qat_crypto_put_instance(inst); 635 + return ret; 636 + } 637 + 638 + static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key, 639 + unsigned int keylen) 640 + { 641 + struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); 642 + 643 + if (ctx->enc_cd) 644 + return qat_alg_aead_rekey(tfm, key, keylen); 645 + else 646 + return qat_alg_aead_newkey(tfm, key, keylen); 659 647 } 660 648 661 649 static void qat_alg_free_bufl(struct qat_crypto_instance *inst, ··· 717 677 dma_addr_t blp; 718 678 dma_addr_t bloutp = 0; 719 679 struct scatterlist *sg; 720 - size_t sz_out, sz = sizeof(struct qat_alg_buf_list) + 721 - ((1 + n) * sizeof(struct qat_alg_buf)); 680 + size_t sz_out, sz = struct_size(bufl, bufers, n + 1); 722 681 723 682 if (unlikely(!n)) 724 683 return -EINVAL; ··· 754 715 struct qat_alg_buf *bufers; 755 716 756 717 n = sg_nents(sglout); 757 - sz_out = sizeof(struct qat_alg_buf_list) + 758 - ((1 + n) * sizeof(struct qat_alg_buf)); 718 + sz_out = struct_size(buflout, bufers, n + 1); 759 719 sg_nctr = 0; 760 720 buflout = kzalloc_node(sz_out, GFP_ATOMIC, 761 721 dev_to_node(&GET_DEV(inst->accel_dev))); ··· 839 801 struct qat_crypto_instance *inst = ctx->inst; 840 802 struct ablkcipher_request *areq = qat_req->ablkcipher_req; 841 803 uint8_t stat_filed = qat_resp->comn_resp.comn_status; 804 + struct device *dev = &GET_DEV(ctx->inst->accel_dev); 842 805 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed); 843 806 844 807 qat_alg_free_bufl(inst, qat_req); 845 808 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK)) 846 809 res = -EINVAL; 810 + 811 + memcpy(areq->info, qat_req->iv, AES_BLOCK_SIZE); 812 + dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv, 813 + qat_req->iv_paddr); 814 + 847 815 areq->base.complete(&areq->base, res); 848 816 } 849 817 ··· 949 905 return -EINPROGRESS; 950 906 } 951 907 952 - static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm, 908 + static int qat_alg_ablkcipher_rekey(struct qat_alg_ablkcipher_ctx *ctx, 909 + const u8 *key, unsigned int keylen, 910 + int mode) 911 + { 912 + memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd)); 913 + memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd)); 914 + memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req)); 915 + memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req)); 916 + 917 + return qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode); 918 + } 919 + 920 + static int qat_alg_ablkcipher_newkey(struct qat_alg_ablkcipher_ctx *ctx, 953 921 const u8 *key, unsigned int keylen, 954 922 int mode) 955 923 { 956 - struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); 924 + struct qat_crypto_instance *inst = NULL; 957 925 struct device *dev; 926 + int node = get_current_node(); 927 + int ret; 958 928 959 - spin_lock(&ctx->lock); 960 - if (ctx->enc_cd) { 961 - /* rekeying */ 962 - dev = &GET_DEV(ctx->inst->accel_dev); 963 - memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd)); 964 - memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd)); 965 - memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req)); 966 - memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req)); 967 - } else { 968 - /* new key */ 969 - int node = get_current_node(); 970 - struct qat_crypto_instance *inst = 971 - qat_crypto_get_instance_node(node); 972 - if (!inst) { 973 - spin_unlock(&ctx->lock); 974 - return -EINVAL; 975 - } 976 - 977 - dev = &GET_DEV(inst->accel_dev); 978 - ctx->inst = inst; 979 - ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd), 980 - &ctx->enc_cd_paddr, 981 - GFP_ATOMIC); 982 - if (!ctx->enc_cd) { 983 - spin_unlock(&ctx->lock); 984 - return -ENOMEM; 985 - } 986 - ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd), 987 - &ctx->dec_cd_paddr, 988 - GFP_ATOMIC); 989 - if (!ctx->dec_cd) { 990 - spin_unlock(&ctx->lock); 991 - goto out_free_enc; 992 - } 929 + inst = qat_crypto_get_instance_node(node); 930 + if (!inst) 931 + return -EINVAL; 932 + dev = &GET_DEV(inst->accel_dev); 933 + ctx->inst = inst; 934 + ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd), 935 + &ctx->enc_cd_paddr, 936 + GFP_ATOMIC); 937 + if (!ctx->enc_cd) { 938 + ret = -ENOMEM; 939 + goto out_free_instance; 993 940 } 994 - spin_unlock(&ctx->lock); 995 - if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode)) 941 + ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd), 942 + &ctx->dec_cd_paddr, 943 + GFP_ATOMIC); 944 + if (!ctx->dec_cd) { 945 + ret = -ENOMEM; 946 + goto out_free_enc; 947 + } 948 + 949 + ret = qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode); 950 + if (ret) 996 951 goto out_free_all; 997 952 998 953 return 0; ··· 1006 963 dma_free_coherent(dev, sizeof(*ctx->enc_cd), 1007 964 ctx->enc_cd, ctx->enc_cd_paddr); 1008 965 ctx->enc_cd = NULL; 1009 - return -ENOMEM; 966 + out_free_instance: 967 + ctx->inst = NULL; 968 + qat_crypto_put_instance(inst); 969 + return ret; 970 + } 971 + 972 + static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm, 973 + const u8 *key, unsigned int keylen, 974 + int mode) 975 + { 976 + struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); 977 + 978 + if (ctx->enc_cd) 979 + return qat_alg_ablkcipher_rekey(ctx, key, keylen, mode); 980 + else 981 + return qat_alg_ablkcipher_newkey(ctx, key, keylen, mode); 1010 982 } 1011 983 1012 984 static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm, ··· 1053 995 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req); 1054 996 struct icp_qat_fw_la_cipher_req_params *cipher_param; 1055 997 struct icp_qat_fw_la_bulk_req *msg; 998 + struct device *dev = &GET_DEV(ctx->inst->accel_dev); 1056 999 int ret, ctr = 0; 1057 1000 1001 + if (req->nbytes == 0) 1002 + return 0; 1003 + 1004 + qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE, 1005 + &qat_req->iv_paddr, GFP_ATOMIC); 1006 + if (!qat_req->iv) 1007 + return -ENOMEM; 1008 + 1058 1009 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req); 1059 - if (unlikely(ret)) 1010 + if (unlikely(ret)) { 1011 + dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv, 1012 + qat_req->iv_paddr); 1060 1013 return ret; 1014 + } 1061 1015 1062 1016 msg = &qat_req->req; 1063 1017 *msg = ctx->enc_fw_req; ··· 1082 1012 cipher_param = (void *)&qat_req->req.serv_specif_rqpars; 1083 1013 cipher_param->cipher_length = req->nbytes; 1084 1014 cipher_param->cipher_offset = 0; 1085 - memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE); 1015 + cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr; 1016 + memcpy(qat_req->iv, req->info, AES_BLOCK_SIZE); 1086 1017 do { 1087 1018 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); 1088 1019 } while (ret == -EAGAIN && ctr++ < 10); 1089 1020 1090 1021 if (ret == -EAGAIN) { 1091 1022 qat_alg_free_bufl(ctx->inst, qat_req); 1023 + dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv, 1024 + qat_req->iv_paddr); 1092 1025 return -EBUSY; 1093 1026 } 1094 1027 return -EINPROGRESS; 1028 + } 1029 + 1030 + static int qat_alg_ablkcipher_blk_encrypt(struct ablkcipher_request *req) 1031 + { 1032 + if (req->nbytes % AES_BLOCK_SIZE != 0) 1033 + return -EINVAL; 1034 + 1035 + return qat_alg_ablkcipher_encrypt(req); 1095 1036 } 1096 1037 1097 1038 static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req) ··· 1113 1032 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req); 1114 1033 struct icp_qat_fw_la_cipher_req_params *cipher_param; 1115 1034 struct icp_qat_fw_la_bulk_req *msg; 1035 + struct device *dev = &GET_DEV(ctx->inst->accel_dev); 1116 1036 int ret, ctr = 0; 1117 1037 1038 + if (req->nbytes == 0) 1039 + return 0; 1040 + 1041 + qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE, 1042 + &qat_req->iv_paddr, GFP_ATOMIC); 1043 + if (!qat_req->iv) 1044 + return -ENOMEM; 1045 + 1118 1046 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req); 1119 - if (unlikely(ret)) 1047 + if (unlikely(ret)) { 1048 + dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv, 1049 + qat_req->iv_paddr); 1120 1050 return ret; 1051 + } 1121 1052 1122 1053 msg = &qat_req->req; 1123 1054 *msg = ctx->dec_fw_req; ··· 1142 1049 cipher_param = (void *)&qat_req->req.serv_specif_rqpars; 1143 1050 cipher_param->cipher_length = req->nbytes; 1144 1051 cipher_param->cipher_offset = 0; 1145 - memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE); 1052 + cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr; 1053 + memcpy(qat_req->iv, req->info, AES_BLOCK_SIZE); 1146 1054 do { 1147 1055 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); 1148 1056 } while (ret == -EAGAIN && ctr++ < 10); 1149 1057 1150 1058 if (ret == -EAGAIN) { 1151 1059 qat_alg_free_bufl(ctx->inst, qat_req); 1060 + dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv, 1061 + qat_req->iv_paddr); 1152 1062 return -EBUSY; 1153 1063 } 1154 1064 return -EINPROGRESS; 1155 1065 } 1156 1066 1067 + static int qat_alg_ablkcipher_blk_decrypt(struct ablkcipher_request *req) 1068 + { 1069 + if (req->nbytes % AES_BLOCK_SIZE != 0) 1070 + return -EINVAL; 1071 + 1072 + return qat_alg_ablkcipher_decrypt(req); 1073 + } 1157 1074 static int qat_alg_aead_init(struct crypto_aead *tfm, 1158 1075 enum icp_qat_hw_auth_algo hash, 1159 1076 const char *hash_name) ··· 1222 1119 { 1223 1120 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); 1224 1121 1225 - spin_lock_init(&ctx->lock); 1226 1122 tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request); 1227 1123 ctx->tfm = tfm; 1228 1124 return 0; ··· 1323 1221 .cra_u = { 1324 1222 .ablkcipher = { 1325 1223 .setkey = qat_alg_ablkcipher_cbc_setkey, 1326 - .decrypt = qat_alg_ablkcipher_decrypt, 1327 - .encrypt = qat_alg_ablkcipher_encrypt, 1224 + .decrypt = qat_alg_ablkcipher_blk_decrypt, 1225 + .encrypt = qat_alg_ablkcipher_blk_encrypt, 1328 1226 .min_keysize = AES_MIN_KEY_SIZE, 1329 1227 .max_keysize = AES_MAX_KEY_SIZE, 1330 1228 .ivsize = AES_BLOCK_SIZE, ··· 1335 1233 .cra_driver_name = "qat_aes_ctr", 1336 1234 .cra_priority = 4001, 1337 1235 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 1338 - .cra_blocksize = AES_BLOCK_SIZE, 1236 + .cra_blocksize = 1, 1339 1237 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx), 1340 1238 .cra_alignmask = 0, 1341 1239 .cra_type = &crypto_ablkcipher_type, ··· 1367 1265 .cra_u = { 1368 1266 .ablkcipher = { 1369 1267 .setkey = qat_alg_ablkcipher_xts_setkey, 1370 - .decrypt = qat_alg_ablkcipher_decrypt, 1371 - .encrypt = qat_alg_ablkcipher_encrypt, 1268 + .decrypt = qat_alg_ablkcipher_blk_decrypt, 1269 + .encrypt = qat_alg_ablkcipher_blk_encrypt, 1372 1270 .min_keysize = 2 * AES_MIN_KEY_SIZE, 1373 1271 .max_keysize = 2 * AES_MAX_KEY_SIZE, 1374 1272 .ivsize = AES_BLOCK_SIZE,
+2
drivers/crypto/qat/qat_common/qat_crypto.h
··· 88 88 struct qat_crypto_request_buffs buf; 89 89 void (*cb)(struct icp_qat_fw_la_resp *resp, 90 90 struct qat_crypto_request *req); 91 + void *iv; 92 + dma_addr_t iv_paddr; 91 93 }; 92 94 93 95 #endif
+1 -3
drivers/crypto/sahara.c
··· 1384 1384 static int sahara_probe(struct platform_device *pdev) 1385 1385 { 1386 1386 struct sahara_dev *dev; 1387 - struct resource *res; 1388 1387 u32 version; 1389 1388 int irq; 1390 1389 int err; ··· 1397 1398 platform_set_drvdata(pdev, dev); 1398 1399 1399 1400 /* Get the base address */ 1400 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1401 - dev->regs_base = devm_ioremap_resource(&pdev->dev, res); 1401 + dev->regs_base = devm_platform_ioremap_resource(pdev, 0); 1402 1402 if (IS_ERR(dev->regs_base)) 1403 1403 return PTR_ERR(dev->regs_base); 1404 1404
+1 -1
drivers/crypto/stm32/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 - obj-$(CONFIG_CRYPTO_DEV_STM32_CRC) += stm32_crc32.o 2 + obj-$(CONFIG_CRYPTO_DEV_STM32_CRC) += stm32-crc32.o 3 3 obj-$(CONFIG_CRYPTO_DEV_STM32_HASH) += stm32-hash.o 4 4 obj-$(CONFIG_CRYPTO_DEV_STM32_CRYP) += stm32-cryp.o
+3 -3
drivers/crypto/stm32/stm32-hash.c
··· 349 349 return -ETIMEDOUT; 350 350 351 351 if ((hdev->flags & HASH_FLAGS_HMAC) && 352 - (hdev->flags & ~HASH_FLAGS_HMAC_KEY)) { 352 + (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) { 353 353 hdev->flags |= HASH_FLAGS_HMAC_KEY; 354 354 stm32_hash_write_key(hdev); 355 355 if (stm32_hash_wait_busy(hdev)) ··· 447 447 448 448 dma_async_issue_pending(hdev->dma_lch); 449 449 450 - if (!wait_for_completion_interruptible_timeout(&hdev->dma_completion, 451 - msecs_to_jiffies(100))) 450 + if (!wait_for_completion_timeout(&hdev->dma_completion, 451 + msecs_to_jiffies(100))) 452 452 err = -ETIMEDOUT; 453 453 454 454 if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
drivers/crypto/stm32/stm32_crc32.c drivers/crypto/stm32/stm32-crc32.c
+30 -17
drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
··· 12 12 */ 13 13 #include "sun4i-ss.h" 14 14 15 - static int sun4i_ss_opti_poll(struct skcipher_request *areq) 15 + static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq) 16 16 { 17 17 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); 18 18 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); ··· 114 114 return err; 115 115 } 116 116 117 + 118 + static int noinline_for_stack sun4i_ss_cipher_poll_fallback(struct skcipher_request *areq) 119 + { 120 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); 121 + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); 122 + struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq); 123 + SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm); 124 + int err; 125 + 126 + skcipher_request_set_sync_tfm(subreq, op->fallback_tfm); 127 + skcipher_request_set_callback(subreq, areq->base.flags, NULL, 128 + NULL); 129 + skcipher_request_set_crypt(subreq, areq->src, areq->dst, 130 + areq->cryptlen, areq->iv); 131 + if (ctx->mode & SS_DECRYPTION) 132 + err = crypto_skcipher_decrypt(subreq); 133 + else 134 + err = crypto_skcipher_encrypt(subreq); 135 + skcipher_request_zero(subreq); 136 + 137 + return err; 138 + } 139 + 117 140 /* Generic function that support SG with size not multiple of 4 */ 118 141 static int sun4i_ss_cipher_poll(struct skcipher_request *areq) 119 142 { ··· 163 140 unsigned int todo; 164 141 struct sg_mapping_iter mi, mo; 165 142 unsigned int oi, oo; /* offset for in and out */ 166 - char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */ 167 - char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */ 168 143 unsigned int ob = 0; /* offset in buf */ 169 144 unsigned int obo = 0; /* offset in bufo*/ 170 145 unsigned int obl = 0; /* length of data in bufo */ ··· 199 178 if (no_chunk == 1 && !need_fallback) 200 179 return sun4i_ss_opti_poll(areq); 201 180 202 - if (need_fallback) { 203 - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm); 204 - skcipher_request_set_sync_tfm(subreq, op->fallback_tfm); 205 - skcipher_request_set_callback(subreq, areq->base.flags, NULL, 206 - NULL); 207 - skcipher_request_set_crypt(subreq, areq->src, areq->dst, 208 - areq->cryptlen, areq->iv); 209 - if (ctx->mode & SS_DECRYPTION) 210 - err = crypto_skcipher_decrypt(subreq); 211 - else 212 - err = crypto_skcipher_encrypt(subreq); 213 - skcipher_request_zero(subreq); 214 - return err; 215 - } 181 + if (need_fallback) 182 + return sun4i_ss_cipher_poll_fallback(areq); 216 183 217 184 spin_lock_irqsave(&ss->slock, flags); 218 185 ··· 233 224 234 225 while (oleft) { 235 226 if (ileft) { 227 + char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */ 228 + 236 229 /* 237 230 * todo is the number of consecutive 4byte word that we 238 231 * can read from current SG ··· 292 281 oo = 0; 293 282 } 294 283 } else { 284 + char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */ 285 + 295 286 /* 296 287 * read obl bytes in bufo, we read at maximum for 297 288 * emptying the device
+172 -196
drivers/crypto/talitos.c
··· 265 265 * callback must check err and feedback in descriptor header 266 266 * for device processing status. 267 267 */ 268 - int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, 269 - void (*callback)(struct device *dev, 270 - struct talitos_desc *desc, 271 - void *context, int error), 272 - void *context) 268 + static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, 269 + void (*callback)(struct device *dev, 270 + struct talitos_desc *desc, 271 + void *context, int error), 272 + void *context) 273 273 { 274 274 struct talitos_private *priv = dev_get_drvdata(dev); 275 275 struct talitos_request *request; ··· 319 319 320 320 return -EINPROGRESS; 321 321 } 322 - EXPORT_SYMBOL(talitos_submit); 322 + 323 + static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1) 324 + { 325 + struct talitos_edesc *edesc; 326 + 327 + if (!is_sec1) 328 + return request->desc->hdr; 329 + 330 + if (!request->desc->next_desc) 331 + return request->desc->hdr1; 332 + 333 + edesc = container_of(request->desc, struct talitos_edesc, desc); 334 + 335 + return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1; 336 + } 323 337 324 338 /* 325 339 * process what was done, notify callback of error if not ··· 356 342 357 343 /* descriptors with their done bits set don't get the error */ 358 344 rmb(); 359 - if (!is_sec1) 360 - hdr = request->desc->hdr; 361 - else if (request->desc->next_desc) 362 - hdr = (request->desc + 1)->hdr1; 363 - else 364 - hdr = request->desc->hdr1; 345 + hdr = get_request_hdr(request, is_sec1); 365 346 366 347 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE) 367 348 status = 0; ··· 486 477 } 487 478 } 488 479 489 - if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) 490 - return (priv->chan[ch].fifo[iter].desc + 1)->hdr; 480 + if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) { 481 + struct talitos_edesc *edesc; 482 + 483 + edesc = container_of(priv->chan[ch].fifo[iter].desc, 484 + struct talitos_edesc, desc); 485 + return ((struct talitos_desc *) 486 + (edesc->buf + edesc->dma_len))->hdr; 487 + } 491 488 492 489 return priv->chan[ch].fifo[iter].desc->hdr; 493 490 } ··· 839 824 * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP 840 825 */ 841 826 #define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1) 827 + #ifdef CONFIG_CRYPTO_DEV_TALITOS2 842 828 #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE) 829 + #else 830 + #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA256_BLOCK_SIZE) 831 + #endif 843 832 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ 844 833 845 834 struct talitos_ctx { ··· 967 948 goto out; 968 949 } 969 950 970 - /* 971 - * talitos_edesc - s/w-extended descriptor 972 - * @src_nents: number of segments in input scatterlist 973 - * @dst_nents: number of segments in output scatterlist 974 - * @icv_ool: whether ICV is out-of-line 975 - * @iv_dma: dma address of iv for checking continuity and link table 976 - * @dma_len: length of dma mapped link_tbl space 977 - * @dma_link_tbl: bus physical address of link_tbl/buf 978 - * @desc: h/w descriptor 979 - * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2) 980 - * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1) 981 - * 982 - * if decrypting (with authcheck), or either one of src_nents or dst_nents 983 - * is greater than 1, an integrity check value is concatenated to the end 984 - * of link_tbl data 985 - */ 986 - struct talitos_edesc { 987 - int src_nents; 988 - int dst_nents; 989 - bool icv_ool; 990 - dma_addr_t iv_dma; 991 - int dma_len; 992 - dma_addr_t dma_link_tbl; 993 - struct talitos_desc desc; 994 - union { 995 - struct talitos_ptr link_tbl[0]; 996 - u8 buf[0]; 997 - }; 998 - }; 999 - 1000 951 static void talitos_sg_unmap(struct device *dev, 1001 952 struct talitos_edesc *edesc, 1002 953 struct scatterlist *src, ··· 997 1008 998 1009 static void ipsec_esp_unmap(struct device *dev, 999 1010 struct talitos_edesc *edesc, 1000 - struct aead_request *areq) 1011 + struct aead_request *areq, bool encrypt) 1001 1012 { 1002 1013 struct crypto_aead *aead = crypto_aead_reqtfm(areq); 1003 1014 struct talitos_ctx *ctx = crypto_aead_ctx(aead); 1004 1015 unsigned int ivsize = crypto_aead_ivsize(aead); 1016 + unsigned int authsize = crypto_aead_authsize(aead); 1017 + unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize); 1005 1018 bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP; 1006 1019 struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3]; 1007 1020 ··· 1012 1021 DMA_FROM_DEVICE); 1013 1022 unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE); 1014 1023 1015 - talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen, 1016 - areq->assoclen); 1024 + talitos_sg_unmap(dev, edesc, areq->src, areq->dst, 1025 + cryptlen + authsize, areq->assoclen); 1017 1026 1018 1027 if (edesc->dma_len) 1019 1028 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, ··· 1023 1032 unsigned int dst_nents = edesc->dst_nents ? : 1; 1024 1033 1025 1034 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize, 1026 - areq->assoclen + areq->cryptlen - ivsize); 1035 + areq->assoclen + cryptlen - ivsize); 1027 1036 } 1028 1037 } 1029 1038 ··· 1034 1043 struct talitos_desc *desc, void *context, 1035 1044 int err) 1036 1045 { 1037 - struct talitos_private *priv = dev_get_drvdata(dev); 1038 - bool is_sec1 = has_ftr_sec1(priv); 1039 1046 struct aead_request *areq = context; 1040 1047 struct crypto_aead *authenc = crypto_aead_reqtfm(areq); 1041 - unsigned int authsize = crypto_aead_authsize(authenc); 1042 1048 unsigned int ivsize = crypto_aead_ivsize(authenc); 1043 1049 struct talitos_edesc *edesc; 1044 - struct scatterlist *sg; 1045 - void *icvdata; 1046 1050 1047 1051 edesc = container_of(desc, struct talitos_edesc, desc); 1048 1052 1049 - ipsec_esp_unmap(dev, edesc, areq); 1050 - 1051 - /* copy the generated ICV to dst */ 1052 - if (edesc->icv_ool) { 1053 - if (is_sec1) 1054 - icvdata = edesc->buf + areq->assoclen + areq->cryptlen; 1055 - else 1056 - icvdata = &edesc->link_tbl[edesc->src_nents + 1057 - edesc->dst_nents + 2]; 1058 - sg = sg_last(areq->dst, edesc->dst_nents); 1059 - memcpy((char *)sg_virt(sg) + sg->length - authsize, 1060 - icvdata, authsize); 1061 - } 1053 + ipsec_esp_unmap(dev, edesc, areq, true); 1062 1054 1063 1055 dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE); 1064 1056 ··· 1058 1084 struct crypto_aead *authenc = crypto_aead_reqtfm(req); 1059 1085 unsigned int authsize = crypto_aead_authsize(authenc); 1060 1086 struct talitos_edesc *edesc; 1061 - struct scatterlist *sg; 1062 1087 char *oicv, *icv; 1063 - struct talitos_private *priv = dev_get_drvdata(dev); 1064 - bool is_sec1 = has_ftr_sec1(priv); 1065 1088 1066 1089 edesc = container_of(desc, struct talitos_edesc, desc); 1067 1090 1068 - ipsec_esp_unmap(dev, edesc, req); 1091 + ipsec_esp_unmap(dev, edesc, req, false); 1069 1092 1070 1093 if (!err) { 1071 1094 /* auth check */ 1072 - sg = sg_last(req->dst, edesc->dst_nents ? : 1); 1073 - icv = (char *)sg_virt(sg) + sg->length - authsize; 1074 - 1075 - if (edesc->dma_len) { 1076 - if (is_sec1) 1077 - oicv = (char *)&edesc->dma_link_tbl + 1078 - req->assoclen + req->cryptlen; 1079 - else 1080 - oicv = (char *) 1081 - &edesc->link_tbl[edesc->src_nents + 1082 - edesc->dst_nents + 2]; 1083 - if (edesc->icv_ool) 1084 - icv = oicv + authsize; 1085 - } else 1086 - oicv = (char *)&edesc->link_tbl[0]; 1095 + oicv = edesc->buf + edesc->dma_len; 1096 + icv = oicv - authsize; 1087 1097 1088 1098 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0; 1089 1099 } ··· 1086 1128 1087 1129 edesc = container_of(desc, struct talitos_edesc, desc); 1088 1130 1089 - ipsec_esp_unmap(dev, edesc, req); 1131 + ipsec_esp_unmap(dev, edesc, req, false); 1090 1132 1091 1133 /* check ICV auth status */ 1092 1134 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) != ··· 1103 1145 * stop at cryptlen bytes 1104 1146 */ 1105 1147 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count, 1106 - unsigned int offset, int cryptlen, 1148 + unsigned int offset, int datalen, int elen, 1107 1149 struct talitos_ptr *link_tbl_ptr) 1108 1150 { 1109 - int n_sg = sg_count; 1151 + int n_sg = elen ? sg_count + 1 : sg_count; 1110 1152 int count = 0; 1153 + int cryptlen = datalen + elen; 1111 1154 1112 1155 while (cryptlen && sg && n_sg--) { 1113 1156 unsigned int len = sg_dma_len(sg); ··· 1123 1164 if (len > cryptlen) 1124 1165 len = cryptlen; 1125 1166 1167 + if (datalen > 0 && len > datalen) { 1168 + to_talitos_ptr(link_tbl_ptr + count, 1169 + sg_dma_address(sg) + offset, datalen, 0); 1170 + to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0); 1171 + count++; 1172 + len -= datalen; 1173 + offset += datalen; 1174 + } 1126 1175 to_talitos_ptr(link_tbl_ptr + count, 1127 1176 sg_dma_address(sg) + offset, len, 0); 1128 1177 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0); 1129 1178 count++; 1130 1179 cryptlen -= len; 1180 + datalen -= len; 1131 1181 offset = 0; 1132 1182 1133 1183 next: ··· 1146 1178 /* tag end of link table */ 1147 1179 if (count > 0) 1148 1180 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1, 1149 - DESC_PTR_LNKTBL_RETURN, 0); 1181 + DESC_PTR_LNKTBL_RET, 0); 1150 1182 1151 1183 return count; 1152 1184 } ··· 1154 1186 static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src, 1155 1187 unsigned int len, struct talitos_edesc *edesc, 1156 1188 struct talitos_ptr *ptr, int sg_count, 1157 - unsigned int offset, int tbl_off, int elen) 1189 + unsigned int offset, int tbl_off, int elen, 1190 + bool force) 1158 1191 { 1159 1192 struct talitos_private *priv = dev_get_drvdata(dev); 1160 1193 bool is_sec1 = has_ftr_sec1(priv); ··· 1165 1196 return 1; 1166 1197 } 1167 1198 to_talitos_ptr_ext_set(ptr, elen, is_sec1); 1168 - if (sg_count == 1) { 1199 + if (sg_count == 1 && !force) { 1169 1200 to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1); 1170 1201 return sg_count; 1171 1202 } ··· 1173 1204 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1); 1174 1205 return sg_count; 1175 1206 } 1176 - sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen, 1207 + sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen, 1177 1208 &edesc->link_tbl[tbl_off]); 1178 - if (sg_count == 1) { 1209 + if (sg_count == 1 && !force) { 1179 1210 /* Only one segment now, so no link tbl needed*/ 1180 1211 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1); 1181 1212 return sg_count; ··· 1193 1224 unsigned int offset, int tbl_off) 1194 1225 { 1195 1226 return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset, 1196 - tbl_off, 0); 1227 + tbl_off, 0, false); 1197 1228 } 1198 1229 1199 1230 /* 1200 1231 * fill in and submit ipsec_esp descriptor 1201 1232 */ 1202 1233 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, 1234 + bool encrypt, 1203 1235 void (*callback)(struct device *dev, 1204 1236 struct talitos_desc *desc, 1205 1237 void *context, int error)) ··· 1210 1240 struct talitos_ctx *ctx = crypto_aead_ctx(aead); 1211 1241 struct device *dev = ctx->dev; 1212 1242 struct talitos_desc *desc = &edesc->desc; 1213 - unsigned int cryptlen = areq->cryptlen; 1243 + unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize); 1214 1244 unsigned int ivsize = crypto_aead_ivsize(aead); 1215 1245 int tbl_off = 0; 1216 1246 int sg_count, ret; ··· 1221 1251 bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP; 1222 1252 struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3]; 1223 1253 struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2]; 1254 + dma_addr_t dma_icv = edesc->dma_link_tbl + edesc->dma_len - authsize; 1224 1255 1225 1256 /* hmac key */ 1226 1257 to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1); ··· 1261 1290 elen = authsize; 1262 1291 1263 1292 ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4], 1264 - sg_count, areq->assoclen, tbl_off, elen); 1293 + sg_count, areq->assoclen, tbl_off, elen, 1294 + false); 1265 1295 1266 1296 if (ret > 1) { 1267 1297 tbl_off += ret; ··· 1276 1304 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE); 1277 1305 } 1278 1306 1279 - ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5], 1280 - sg_count, areq->assoclen, tbl_off); 1307 + if (is_ipsec_esp && encrypt) 1308 + elen = authsize; 1309 + else 1310 + elen = 0; 1311 + ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5], 1312 + sg_count, areq->assoclen, tbl_off, elen, 1313 + is_ipsec_esp && !encrypt); 1314 + tbl_off += ret; 1281 1315 1282 - if (is_ipsec_esp) 1316 + if (!encrypt && is_ipsec_esp) { 1317 + struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; 1318 + 1319 + /* Add an entry to the link table for ICV data */ 1320 + to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1); 1321 + to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RET, is_sec1); 1322 + 1323 + /* icv data follows link tables */ 1324 + to_talitos_ptr(tbl_ptr, dma_icv, authsize, is_sec1); 1283 1325 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1); 1284 - 1285 - /* ICV data */ 1286 - if (ret > 1) { 1287 - tbl_off += ret; 1288 - edesc->icv_ool = true; 1289 1326 sync_needed = true; 1290 - 1291 - if (is_ipsec_esp) { 1292 - struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; 1293 - int offset = (edesc->src_nents + edesc->dst_nents + 2) * 1294 - sizeof(struct talitos_ptr) + authsize; 1295 - 1296 - /* Add an entry to the link table for ICV data */ 1297 - to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1); 1298 - to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN, 1299 - is_sec1); 1300 - 1301 - /* icv data follows link tables */ 1302 - to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset, 1303 - authsize, is_sec1); 1304 - } else { 1305 - dma_addr_t addr = edesc->dma_link_tbl; 1306 - 1307 - if (is_sec1) 1308 - addr += areq->assoclen + cryptlen; 1309 - else 1310 - addr += sizeof(struct talitos_ptr) * tbl_off; 1311 - 1312 - to_talitos_ptr(&desc->ptr[6], addr, authsize, is_sec1); 1313 - } 1327 + } else if (!encrypt) { 1328 + to_talitos_ptr(&desc->ptr[6], dma_icv, authsize, is_sec1); 1329 + sync_needed = true; 1314 1330 } else if (!is_ipsec_esp) { 1315 - ret = talitos_sg_map(dev, areq->dst, authsize, edesc, 1316 - &desc->ptr[6], sg_count, areq->assoclen + 1317 - cryptlen, 1318 - tbl_off); 1319 - if (ret > 1) { 1320 - tbl_off += ret; 1321 - edesc->icv_ool = true; 1322 - sync_needed = true; 1323 - } else { 1324 - edesc->icv_ool = false; 1325 - } 1326 - } else { 1327 - edesc->icv_ool = false; 1331 + talitos_sg_map(dev, areq->dst, authsize, edesc, &desc->ptr[6], 1332 + sg_count, areq->assoclen + cryptlen, tbl_off); 1328 1333 } 1329 1334 1330 1335 /* iv out */ ··· 1316 1367 1317 1368 ret = talitos_submit(dev, ctx->ch, desc, callback, areq); 1318 1369 if (ret != -EINPROGRESS) { 1319 - ipsec_esp_unmap(dev, edesc, areq); 1370 + ipsec_esp_unmap(dev, edesc, areq, encrypt); 1320 1371 kfree(edesc); 1321 1372 } 1322 1373 return ret; ··· 1384 1435 * and space for two sets of ICVs (stashed and generated) 1385 1436 */ 1386 1437 alloc_len = sizeof(struct talitos_edesc); 1387 - if (src_nents || dst_nents) { 1438 + if (src_nents || dst_nents || !encrypt) { 1388 1439 if (is_sec1) 1389 1440 dma_len = (src_nents ? src_len : 0) + 1390 - (dst_nents ? dst_len : 0); 1441 + (dst_nents ? dst_len : 0) + authsize; 1391 1442 else 1392 1443 dma_len = (src_nents + dst_nents + 2) * 1393 - sizeof(struct talitos_ptr) + authsize * 2; 1444 + sizeof(struct talitos_ptr) + authsize; 1394 1445 alloc_len += dma_len; 1395 1446 } else { 1396 1447 dma_len = 0; 1397 - alloc_len += icv_stashing ? authsize : 0; 1398 1448 } 1449 + alloc_len += icv_stashing ? authsize : 0; 1399 1450 1400 1451 /* if its a ahash, add space for a second desc next to the first one */ 1401 1452 if (is_sec1 && !dst) ··· 1415 1466 edesc->dst_nents = dst_nents; 1416 1467 edesc->iv_dma = iv_dma; 1417 1468 edesc->dma_len = dma_len; 1418 - if (dma_len) { 1419 - void *addr = &edesc->link_tbl[0]; 1420 - 1421 - if (is_sec1 && !dst) 1422 - addr += sizeof(struct talitos_desc); 1423 - edesc->dma_link_tbl = dma_map_single(dev, addr, 1469 + if (dma_len) 1470 + edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0], 1424 1471 edesc->dma_len, 1425 1472 DMA_BIDIRECTIONAL); 1426 - } 1473 + 1427 1474 return edesc; 1428 1475 } 1429 1476 ··· 1430 1485 unsigned int authsize = crypto_aead_authsize(authenc); 1431 1486 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1432 1487 unsigned int ivsize = crypto_aead_ivsize(authenc); 1488 + unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize); 1433 1489 1434 1490 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 1435 - iv, areq->assoclen, areq->cryptlen, 1491 + iv, areq->assoclen, cryptlen, 1436 1492 authsize, ivsize, icv_stashing, 1437 1493 areq->base.flags, encrypt); 1438 1494 } ··· 1452 1506 /* set encrypt */ 1453 1507 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; 1454 1508 1455 - return ipsec_esp(edesc, req, ipsec_esp_encrypt_done); 1509 + return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done); 1456 1510 } 1457 1511 1458 1512 static int aead_decrypt(struct aead_request *req) ··· 1462 1516 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1463 1517 struct talitos_private *priv = dev_get_drvdata(ctx->dev); 1464 1518 struct talitos_edesc *edesc; 1465 - struct scatterlist *sg; 1466 1519 void *icvdata; 1467 - 1468 - req->cryptlen -= authsize; 1469 1520 1470 1521 /* allocate extended descriptor */ 1471 1522 edesc = aead_edesc_alloc(req, req->iv, 1, false); 1472 1523 if (IS_ERR(edesc)) 1473 1524 return PTR_ERR(edesc); 1474 1525 1475 - if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) && 1526 + if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) && 1527 + (priv->features & TALITOS_FTR_HW_AUTH_CHECK) && 1476 1528 ((!edesc->src_nents && !edesc->dst_nents) || 1477 1529 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) { 1478 1530 ··· 1481 1537 1482 1538 /* reset integrity check result bits */ 1483 1539 1484 - return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done); 1540 + return ipsec_esp(edesc, req, false, 1541 + ipsec_esp_decrypt_hwauth_done); 1485 1542 } 1486 1543 1487 1544 /* Have to check the ICV with software */ 1488 1545 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; 1489 1546 1490 1547 /* stash incoming ICV for later cmp with ICV generated by the h/w */ 1491 - if (edesc->dma_len) 1492 - icvdata = (char *)&edesc->link_tbl[edesc->src_nents + 1493 - edesc->dst_nents + 2]; 1494 - else 1495 - icvdata = &edesc->link_tbl[0]; 1548 + icvdata = edesc->buf + edesc->dma_len; 1496 1549 1497 - sg = sg_last(req->src, edesc->src_nents ? : 1); 1550 + sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize, 1551 + req->assoclen + req->cryptlen - authsize); 1498 1552 1499 - memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize); 1500 - 1501 - return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done); 1553 + return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done); 1502 1554 } 1503 1555 1504 1556 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, ··· 1545 1605 return ablkcipher_setkey(cipher, key, keylen); 1546 1606 } 1547 1607 1608 + static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher, 1609 + const u8 *key, unsigned int keylen) 1610 + { 1611 + if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 || 1612 + keylen == AES_KEYSIZE_256) 1613 + return ablkcipher_setkey(cipher, key, keylen); 1614 + 1615 + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 1616 + 1617 + return -EINVAL; 1618 + } 1619 + 1548 1620 static void common_nonsnoop_unmap(struct device *dev, 1549 1621 struct talitos_edesc *edesc, 1550 1622 struct ablkcipher_request *areq) ··· 1576 1624 int err) 1577 1625 { 1578 1626 struct ablkcipher_request *areq = context; 1627 + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); 1628 + struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1629 + unsigned int ivsize = crypto_ablkcipher_ivsize(cipher); 1579 1630 struct talitos_edesc *edesc; 1580 1631 1581 1632 edesc = container_of(desc, struct talitos_edesc, desc); 1582 1633 1583 1634 common_nonsnoop_unmap(dev, edesc, areq); 1635 + memcpy(areq->info, ctx->iv, ivsize); 1584 1636 1585 1637 kfree(edesc); 1586 1638 ··· 1679 1723 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); 1680 1724 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1681 1725 struct talitos_edesc *edesc; 1726 + unsigned int blocksize = 1727 + crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher)); 1728 + 1729 + if (!areq->nbytes) 1730 + return 0; 1731 + 1732 + if (areq->nbytes % blocksize) 1733 + return -EINVAL; 1682 1734 1683 1735 /* allocate extended descriptor */ 1684 1736 edesc = ablkcipher_edesc_alloc(areq, true); ··· 1704 1740 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); 1705 1741 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1706 1742 struct talitos_edesc *edesc; 1743 + unsigned int blocksize = 1744 + crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher)); 1745 + 1746 + if (!areq->nbytes) 1747 + return 0; 1748 + 1749 + if (areq->nbytes % blocksize) 1750 + return -EINVAL; 1707 1751 1708 1752 /* allocate extended descriptor */ 1709 1753 edesc = ablkcipher_edesc_alloc(areq, false); ··· 1731 1759 struct talitos_private *priv = dev_get_drvdata(dev); 1732 1760 bool is_sec1 = has_ftr_sec1(priv); 1733 1761 struct talitos_desc *desc = &edesc->desc; 1734 - struct talitos_desc *desc2 = desc + 1; 1762 + struct talitos_desc *desc2 = (struct talitos_desc *) 1763 + (edesc->buf + edesc->dma_len); 1735 1764 1736 1765 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); 1737 1766 if (desc->next_desc && 1738 1767 desc->ptr[5].ptr != desc2->ptr[5].ptr) 1739 1768 unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE); 1740 1769 1741 - talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0); 1770 + if (req_ctx->psrc) 1771 + talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0); 1742 1772 1743 1773 /* When using hashctx-in, must unmap it. */ 1744 1774 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1)) ··· 1807 1833 1808 1834 static int common_nonsnoop_hash(struct talitos_edesc *edesc, 1809 1835 struct ahash_request *areq, unsigned int length, 1810 - unsigned int offset, 1811 1836 void (*callback) (struct device *dev, 1812 1837 struct talitos_desc *desc, 1813 1838 void *context, int error)) ··· 1845 1872 1846 1873 sg_count = edesc->src_nents ?: 1; 1847 1874 if (is_sec1 && sg_count > 1) 1848 - sg_pcopy_to_buffer(req_ctx->psrc, sg_count, 1849 - edesc->buf + sizeof(struct talitos_desc), 1850 - length, req_ctx->nbuf); 1875 + sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length); 1851 1876 else if (length) 1852 1877 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count, 1853 1878 DMA_TO_DEVICE); ··· 1858 1887 DMA_TO_DEVICE); 1859 1888 } else { 1860 1889 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc, 1861 - &desc->ptr[3], sg_count, offset, 0); 1890 + &desc->ptr[3], sg_count, 0, 0); 1862 1891 if (sg_count > 1) 1863 1892 sync_needed = true; 1864 1893 } ··· 1882 1911 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]); 1883 1912 1884 1913 if (is_sec1 && req_ctx->nbuf && length) { 1885 - struct talitos_desc *desc2 = desc + 1; 1914 + struct talitos_desc *desc2 = (struct talitos_desc *) 1915 + (edesc->buf + edesc->dma_len); 1886 1916 dma_addr_t next_desc; 1887 1917 1888 1918 memset(desc2, 0, sizeof(*desc2)); ··· 1904 1932 DMA_TO_DEVICE); 1905 1933 copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1); 1906 1934 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc, 1907 - &desc2->ptr[3], sg_count, offset, 0); 1935 + &desc2->ptr[3], sg_count, 0, 0); 1908 1936 if (sg_count > 1) 1909 1937 sync_needed = true; 1910 1938 copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1); ··· 2015 2043 struct device *dev = ctx->dev; 2016 2044 struct talitos_private *priv = dev_get_drvdata(dev); 2017 2045 bool is_sec1 = has_ftr_sec1(priv); 2018 - int offset = 0; 2019 2046 u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx]; 2020 2047 2021 2048 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) { ··· 2054 2083 sg_chain(req_ctx->bufsl, 2, areq->src); 2055 2084 req_ctx->psrc = req_ctx->bufsl; 2056 2085 } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) { 2086 + int offset; 2087 + 2057 2088 if (nbytes_to_hash > blocksize) 2058 2089 offset = blocksize - req_ctx->nbuf; 2059 2090 else ··· 2068 2095 sg_copy_to_buffer(areq->src, nents, 2069 2096 ctx_buf + req_ctx->nbuf, offset); 2070 2097 req_ctx->nbuf += offset; 2071 - req_ctx->psrc = areq->src; 2098 + req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src, 2099 + offset); 2072 2100 } else 2073 2101 req_ctx->psrc = areq->src; 2074 2102 ··· 2109 2135 if (ctx->keylen && (req_ctx->first || req_ctx->last)) 2110 2136 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC; 2111 2137 2112 - return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset, 2113 - ahash_done); 2138 + return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done); 2114 2139 } 2115 2140 2116 2141 static int ahash_update(struct ahash_request *areq) ··· 2312 2339 .base = { 2313 2340 .cra_name = "authenc(hmac(sha1),cbc(aes))", 2314 2341 .cra_driver_name = "authenc-hmac-sha1-" 2315 - "cbc-aes-talitos", 2342 + "cbc-aes-talitos-hsna", 2316 2343 .cra_blocksize = AES_BLOCK_SIZE, 2317 2344 .cra_flags = CRYPTO_ALG_ASYNC, 2318 2345 }, ··· 2357 2384 .cra_name = "authenc(hmac(sha1)," 2358 2385 "cbc(des3_ede))", 2359 2386 .cra_driver_name = "authenc-hmac-sha1-" 2360 - "cbc-3des-talitos", 2387 + "cbc-3des-talitos-hsna", 2361 2388 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2362 2389 .cra_flags = CRYPTO_ALG_ASYNC, 2363 2390 }, ··· 2400 2427 .base = { 2401 2428 .cra_name = "authenc(hmac(sha224),cbc(aes))", 2402 2429 .cra_driver_name = "authenc-hmac-sha224-" 2403 - "cbc-aes-talitos", 2430 + "cbc-aes-talitos-hsna", 2404 2431 .cra_blocksize = AES_BLOCK_SIZE, 2405 2432 .cra_flags = CRYPTO_ALG_ASYNC, 2406 2433 }, ··· 2445 2472 .cra_name = "authenc(hmac(sha224)," 2446 2473 "cbc(des3_ede))", 2447 2474 .cra_driver_name = "authenc-hmac-sha224-" 2448 - "cbc-3des-talitos", 2475 + "cbc-3des-talitos-hsna", 2449 2476 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2450 2477 .cra_flags = CRYPTO_ALG_ASYNC, 2451 2478 }, ··· 2488 2515 .base = { 2489 2516 .cra_name = "authenc(hmac(sha256),cbc(aes))", 2490 2517 .cra_driver_name = "authenc-hmac-sha256-" 2491 - "cbc-aes-talitos", 2518 + "cbc-aes-talitos-hsna", 2492 2519 .cra_blocksize = AES_BLOCK_SIZE, 2493 2520 .cra_flags = CRYPTO_ALG_ASYNC, 2494 2521 }, ··· 2533 2560 .cra_name = "authenc(hmac(sha256)," 2534 2561 "cbc(des3_ede))", 2535 2562 .cra_driver_name = "authenc-hmac-sha256-" 2536 - "cbc-3des-talitos", 2563 + "cbc-3des-talitos-hsna", 2537 2564 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2538 2565 .cra_flags = CRYPTO_ALG_ASYNC, 2539 2566 }, ··· 2662 2689 .base = { 2663 2690 .cra_name = "authenc(hmac(md5),cbc(aes))", 2664 2691 .cra_driver_name = "authenc-hmac-md5-" 2665 - "cbc-aes-talitos", 2692 + "cbc-aes-talitos-hsna", 2666 2693 .cra_blocksize = AES_BLOCK_SIZE, 2667 2694 .cra_flags = CRYPTO_ALG_ASYNC, 2668 2695 }, ··· 2705 2732 .base = { 2706 2733 .cra_name = "authenc(hmac(md5),cbc(des3_ede))", 2707 2734 .cra_driver_name = "authenc-hmac-md5-" 2708 - "cbc-3des-talitos", 2735 + "cbc-3des-talitos-hsna", 2709 2736 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2710 2737 .cra_flags = CRYPTO_ALG_ASYNC, 2711 2738 }, ··· 2733 2760 .cra_ablkcipher = { 2734 2761 .min_keysize = AES_MIN_KEY_SIZE, 2735 2762 .max_keysize = AES_MAX_KEY_SIZE, 2736 - .ivsize = AES_BLOCK_SIZE, 2763 + .setkey = ablkcipher_aes_setkey, 2737 2764 } 2738 2765 }, 2739 2766 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | ··· 2750 2777 .min_keysize = AES_MIN_KEY_SIZE, 2751 2778 .max_keysize = AES_MAX_KEY_SIZE, 2752 2779 .ivsize = AES_BLOCK_SIZE, 2780 + .setkey = ablkcipher_aes_setkey, 2753 2781 } 2754 2782 }, 2755 2783 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | ··· 2761 2787 .alg.crypto = { 2762 2788 .cra_name = "ctr(aes)", 2763 2789 .cra_driver_name = "ctr-aes-talitos", 2764 - .cra_blocksize = AES_BLOCK_SIZE, 2790 + .cra_blocksize = 1, 2765 2791 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 2766 2792 CRYPTO_ALG_ASYNC, 2767 2793 .cra_ablkcipher = { 2768 2794 .min_keysize = AES_MIN_KEY_SIZE, 2769 2795 .max_keysize = AES_MAX_KEY_SIZE, 2770 2796 .ivsize = AES_BLOCK_SIZE, 2797 + .setkey = ablkcipher_aes_setkey, 2771 2798 } 2772 2799 }, 2773 2800 .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP | ··· 2785 2810 .cra_ablkcipher = { 2786 2811 .min_keysize = DES_KEY_SIZE, 2787 2812 .max_keysize = DES_KEY_SIZE, 2788 - .ivsize = DES_BLOCK_SIZE, 2789 2813 .setkey = ablkcipher_des_setkey, 2790 2814 } 2791 2815 }, ··· 2819 2845 .cra_ablkcipher = { 2820 2846 .min_keysize = DES3_EDE_KEY_SIZE, 2821 2847 .max_keysize = DES3_EDE_KEY_SIZE, 2822 - .ivsize = DES3_EDE_BLOCK_SIZE, 2823 2848 .setkey = ablkcipher_des3_setkey, 2824 2849 } 2825 2850 }, ··· 3243 3270 alg->cra_priority = t_alg->algt.priority; 3244 3271 else 3245 3272 alg->cra_priority = TALITOS_CRA_PRIORITY; 3246 - alg->cra_alignmask = 0; 3273 + if (has_ftr_sec1(priv)) 3274 + alg->cra_alignmask = 3; 3275 + else 3276 + alg->cra_alignmask = 0; 3247 3277 alg->cra_ctxsize = sizeof(struct talitos_ctx); 3248 3278 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY; 3249 3279 ··· 3394 3418 if (err) 3395 3419 goto err_out; 3396 3420 3397 - if (of_device_is_compatible(np, "fsl,sec1.0")) { 3421 + if (has_ftr_sec1(priv)) { 3398 3422 if (priv->num_channels == 1) 3399 3423 tasklet_init(&priv->done_task[0], talitos1_done_ch0, 3400 3424 (unsigned long)dev);
+35 -38
drivers/crypto/talitos.h
··· 1 + /* SPDX-License-Identifier: BSD-3-Clause */ 1 2 /* 2 3 * Freescale SEC (talitos) device register and descriptor header defines 3 4 * 4 5 * Copyright (c) 2006-2011 Freescale Semiconductor, Inc. 5 - * 6 - * Redistribution and use in source and binary forms, with or without 7 - * modification, are permitted provided that the following conditions 8 - * are met: 9 - * 10 - * 1. Redistributions of source code must retain the above copyright 11 - * notice, this list of conditions and the following disclaimer. 12 - * 2. Redistributions in binary form must reproduce the above copyright 13 - * notice, this list of conditions and the following disclaimer in the 14 - * documentation and/or other materials provided with the distribution. 15 - * 3. The name of the author may not be used to endorse or promote products 16 - * derived from this software without specific prior written permission. 17 - * 18 - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 - * 29 6 */ 30 7 31 8 #define TALITOS_TIMEOUT 100000 ··· 41 64 }; 42 65 43 66 #define TALITOS_DESC_SIZE (sizeof(struct talitos_desc) - sizeof(__be32)) 67 + 68 + /* 69 + * talitos_edesc - s/w-extended descriptor 70 + * @src_nents: number of segments in input scatterlist 71 + * @dst_nents: number of segments in output scatterlist 72 + * @iv_dma: dma address of iv for checking continuity and link table 73 + * @dma_len: length of dma mapped link_tbl space 74 + * @dma_link_tbl: bus physical address of link_tbl/buf 75 + * @desc: h/w descriptor 76 + * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2) 77 + * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1) 78 + * 79 + * if decrypting (with authcheck), or either one of src_nents or dst_nents 80 + * is greater than 1, an integrity check value is concatenated to the end 81 + * of link_tbl data 82 + */ 83 + struct talitos_edesc { 84 + int src_nents; 85 + int dst_nents; 86 + dma_addr_t iv_dma; 87 + int dma_len; 88 + dma_addr_t dma_link_tbl; 89 + struct talitos_desc desc; 90 + union { 91 + struct talitos_ptr link_tbl[0]; 92 + u8 buf[0]; 93 + }; 94 + }; 44 95 45 96 /** 46 97 * talitos_request - descriptor submission request ··· 155 150 bool rng_registered; 156 151 }; 157 152 158 - extern int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, 159 - void (*callback)(struct device *dev, 160 - struct talitos_desc *desc, 161 - void *context, int error), 162 - void *context); 163 - 164 153 /* .features flag */ 165 154 #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 166 155 #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 ··· 169 170 */ 170 171 static inline bool has_ftr_sec1(struct talitos_private *priv) 171 172 { 172 - #if defined(CONFIG_CRYPTO_DEV_TALITOS1) && defined(CONFIG_CRYPTO_DEV_TALITOS2) 173 - return priv->features & TALITOS_FTR_SEC1 ? true : false; 174 - #elif defined(CONFIG_CRYPTO_DEV_TALITOS1) 175 - return true; 176 - #else 177 - return false; 178 - #endif 173 + if (IS_ENABLED(CONFIG_CRYPTO_DEV_TALITOS1) && 174 + IS_ENABLED(CONFIG_CRYPTO_DEV_TALITOS2)) 175 + return priv->features & TALITOS_FTR_SEC1; 176 + 177 + return IS_ENABLED(CONFIG_CRYPTO_DEV_TALITOS1); 179 178 } 180 179 181 180 /* ··· 409 412 410 413 /* link table extent field bits */ 411 414 #define DESC_PTR_LNKTBL_JUMP 0x80 412 - #define DESC_PTR_LNKTBL_RETURN 0x02 415 + #define DESC_PTR_LNKTBL_RET 0x02 413 416 #define DESC_PTR_LNKTBL_NEXT 0x01
+66 -115
drivers/crypto/vmx/aes_cbc.c
··· 7 7 * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com> 8 8 */ 9 9 10 - #include <linux/types.h> 11 - #include <linux/err.h> 12 - #include <linux/crypto.h> 13 - #include <linux/delay.h> 14 10 #include <asm/simd.h> 15 11 #include <asm/switch_to.h> 16 12 #include <crypto/aes.h> 17 13 #include <crypto/internal/simd.h> 18 - #include <crypto/scatterwalk.h> 19 - #include <crypto/skcipher.h> 14 + #include <crypto/internal/skcipher.h> 20 15 21 16 #include "aesp8-ppc.h" 22 17 23 18 struct p8_aes_cbc_ctx { 24 - struct crypto_sync_skcipher *fallback; 19 + struct crypto_skcipher *fallback; 25 20 struct aes_key enc_key; 26 21 struct aes_key dec_key; 27 22 }; 28 23 29 - static int p8_aes_cbc_init(struct crypto_tfm *tfm) 24 + static int p8_aes_cbc_init(struct crypto_skcipher *tfm) 30 25 { 31 - const char *alg = crypto_tfm_alg_name(tfm); 32 - struct crypto_sync_skcipher *fallback; 33 - struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); 26 + struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); 27 + struct crypto_skcipher *fallback; 34 28 35 - fallback = crypto_alloc_sync_skcipher(alg, 0, 36 - CRYPTO_ALG_NEED_FALLBACK); 37 - 29 + fallback = crypto_alloc_skcipher("cbc(aes)", 0, 30 + CRYPTO_ALG_NEED_FALLBACK | 31 + CRYPTO_ALG_ASYNC); 38 32 if (IS_ERR(fallback)) { 39 - printk(KERN_ERR 40 - "Failed to allocate transformation for '%s': %ld\n", 41 - alg, PTR_ERR(fallback)); 33 + pr_err("Failed to allocate cbc(aes) fallback: %ld\n", 34 + PTR_ERR(fallback)); 42 35 return PTR_ERR(fallback); 43 36 } 44 37 45 - crypto_sync_skcipher_set_flags( 46 - fallback, 47 - crypto_skcipher_get_flags((struct crypto_skcipher *)tfm)); 38 + crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) + 39 + crypto_skcipher_reqsize(fallback)); 48 40 ctx->fallback = fallback; 49 - 50 41 return 0; 51 42 } 52 43 53 - static void p8_aes_cbc_exit(struct crypto_tfm *tfm) 44 + static void p8_aes_cbc_exit(struct crypto_skcipher *tfm) 54 45 { 55 - struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); 46 + struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); 56 47 57 - if (ctx->fallback) { 58 - crypto_free_sync_skcipher(ctx->fallback); 59 - ctx->fallback = NULL; 60 - } 48 + crypto_free_skcipher(ctx->fallback); 61 49 } 62 50 63 - static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key, 51 + static int p8_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key, 64 52 unsigned int keylen) 65 53 { 54 + struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); 66 55 int ret; 67 - struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); 68 56 69 57 preempt_disable(); 70 58 pagefault_disable(); ··· 63 75 pagefault_enable(); 64 76 preempt_enable(); 65 77 66 - ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen); 78 + ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen); 67 79 68 80 return ret ? -EINVAL : 0; 69 81 } 70 82 71 - static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, 72 - struct scatterlist *dst, 73 - struct scatterlist *src, unsigned int nbytes) 83 + static int p8_aes_cbc_crypt(struct skcipher_request *req, int enc) 74 84 { 85 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 86 + const struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); 87 + struct skcipher_walk walk; 88 + unsigned int nbytes; 75 89 int ret; 76 - struct blkcipher_walk walk; 77 - struct p8_aes_cbc_ctx *ctx = 78 - crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); 79 90 80 91 if (!crypto_simd_usable()) { 81 - SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback); 82 - skcipher_request_set_sync_tfm(req, ctx->fallback); 83 - skcipher_request_set_callback(req, desc->flags, NULL, NULL); 84 - skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); 85 - ret = crypto_skcipher_encrypt(req); 86 - skcipher_request_zero(req); 87 - } else { 88 - blkcipher_walk_init(&walk, dst, src, nbytes); 89 - ret = blkcipher_walk_virt(desc, &walk); 90 - while ((nbytes = walk.nbytes)) { 91 - preempt_disable(); 92 - pagefault_disable(); 93 - enable_kernel_vsx(); 94 - aes_p8_cbc_encrypt(walk.src.virt.addr, 95 - walk.dst.virt.addr, 96 - nbytes & AES_BLOCK_MASK, 97 - &ctx->enc_key, walk.iv, 1); 98 - disable_kernel_vsx(); 99 - pagefault_enable(); 100 - preempt_enable(); 92 + struct skcipher_request *subreq = skcipher_request_ctx(req); 101 93 102 - nbytes &= AES_BLOCK_SIZE - 1; 103 - ret = blkcipher_walk_done(desc, &walk, nbytes); 104 - } 94 + *subreq = *req; 95 + skcipher_request_set_tfm(subreq, ctx->fallback); 96 + return enc ? crypto_skcipher_encrypt(subreq) : 97 + crypto_skcipher_decrypt(subreq); 105 98 } 106 99 100 + ret = skcipher_walk_virt(&walk, req, false); 101 + while ((nbytes = walk.nbytes) != 0) { 102 + preempt_disable(); 103 + pagefault_disable(); 104 + enable_kernel_vsx(); 105 + aes_p8_cbc_encrypt(walk.src.virt.addr, 106 + walk.dst.virt.addr, 107 + round_down(nbytes, AES_BLOCK_SIZE), 108 + enc ? &ctx->enc_key : &ctx->dec_key, 109 + walk.iv, enc); 110 + disable_kernel_vsx(); 111 + pagefault_enable(); 112 + preempt_enable(); 113 + 114 + ret = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE); 115 + } 107 116 return ret; 108 117 } 109 118 110 - static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, 111 - struct scatterlist *dst, 112 - struct scatterlist *src, unsigned int nbytes) 119 + static int p8_aes_cbc_encrypt(struct skcipher_request *req) 113 120 { 114 - int ret; 115 - struct blkcipher_walk walk; 116 - struct p8_aes_cbc_ctx *ctx = 117 - crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); 118 - 119 - if (!crypto_simd_usable()) { 120 - SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback); 121 - skcipher_request_set_sync_tfm(req, ctx->fallback); 122 - skcipher_request_set_callback(req, desc->flags, NULL, NULL); 123 - skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); 124 - ret = crypto_skcipher_decrypt(req); 125 - skcipher_request_zero(req); 126 - } else { 127 - blkcipher_walk_init(&walk, dst, src, nbytes); 128 - ret = blkcipher_walk_virt(desc, &walk); 129 - while ((nbytes = walk.nbytes)) { 130 - preempt_disable(); 131 - pagefault_disable(); 132 - enable_kernel_vsx(); 133 - aes_p8_cbc_encrypt(walk.src.virt.addr, 134 - walk.dst.virt.addr, 135 - nbytes & AES_BLOCK_MASK, 136 - &ctx->dec_key, walk.iv, 0); 137 - disable_kernel_vsx(); 138 - pagefault_enable(); 139 - preempt_enable(); 140 - 141 - nbytes &= AES_BLOCK_SIZE - 1; 142 - ret = blkcipher_walk_done(desc, &walk, nbytes); 143 - } 144 - } 145 - 146 - return ret; 121 + return p8_aes_cbc_crypt(req, 1); 147 122 } 148 123 124 + static int p8_aes_cbc_decrypt(struct skcipher_request *req) 125 + { 126 + return p8_aes_cbc_crypt(req, 0); 127 + } 149 128 150 - struct crypto_alg p8_aes_cbc_alg = { 151 - .cra_name = "cbc(aes)", 152 - .cra_driver_name = "p8_aes_cbc", 153 - .cra_module = THIS_MODULE, 154 - .cra_priority = 2000, 155 - .cra_type = &crypto_blkcipher_type, 156 - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, 157 - .cra_alignmask = 0, 158 - .cra_blocksize = AES_BLOCK_SIZE, 159 - .cra_ctxsize = sizeof(struct p8_aes_cbc_ctx), 160 - .cra_init = p8_aes_cbc_init, 161 - .cra_exit = p8_aes_cbc_exit, 162 - .cra_blkcipher = { 163 - .ivsize = AES_BLOCK_SIZE, 164 - .min_keysize = AES_MIN_KEY_SIZE, 165 - .max_keysize = AES_MAX_KEY_SIZE, 166 - .setkey = p8_aes_cbc_setkey, 167 - .encrypt = p8_aes_cbc_encrypt, 168 - .decrypt = p8_aes_cbc_decrypt, 169 - }, 129 + struct skcipher_alg p8_aes_cbc_alg = { 130 + .base.cra_name = "cbc(aes)", 131 + .base.cra_driver_name = "p8_aes_cbc", 132 + .base.cra_module = THIS_MODULE, 133 + .base.cra_priority = 2000, 134 + .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, 135 + .base.cra_blocksize = AES_BLOCK_SIZE, 136 + .base.cra_ctxsize = sizeof(struct p8_aes_cbc_ctx), 137 + .setkey = p8_aes_cbc_setkey, 138 + .encrypt = p8_aes_cbc_encrypt, 139 + .decrypt = p8_aes_cbc_decrypt, 140 + .init = p8_aes_cbc_init, 141 + .exit = p8_aes_cbc_exit, 142 + .min_keysize = AES_MIN_KEY_SIZE, 143 + .max_keysize = AES_MAX_KEY_SIZE, 144 + .ivsize = AES_BLOCK_SIZE, 170 145 };
+69 -94
drivers/crypto/vmx/aes_ctr.c
··· 7 7 * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com> 8 8 */ 9 9 10 - #include <linux/types.h> 11 - #include <linux/err.h> 12 - #include <linux/crypto.h> 13 - #include <linux/delay.h> 14 10 #include <asm/simd.h> 15 11 #include <asm/switch_to.h> 16 12 #include <crypto/aes.h> 17 13 #include <crypto/internal/simd.h> 18 - #include <crypto/scatterwalk.h> 19 - #include <crypto/skcipher.h> 14 + #include <crypto/internal/skcipher.h> 20 15 21 16 #include "aesp8-ppc.h" 22 17 23 18 struct p8_aes_ctr_ctx { 24 - struct crypto_sync_skcipher *fallback; 19 + struct crypto_skcipher *fallback; 25 20 struct aes_key enc_key; 26 21 }; 27 22 28 - static int p8_aes_ctr_init(struct crypto_tfm *tfm) 23 + static int p8_aes_ctr_init(struct crypto_skcipher *tfm) 29 24 { 30 - const char *alg = crypto_tfm_alg_name(tfm); 31 - struct crypto_sync_skcipher *fallback; 32 - struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); 25 + struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm); 26 + struct crypto_skcipher *fallback; 33 27 34 - fallback = crypto_alloc_sync_skcipher(alg, 0, 35 - CRYPTO_ALG_NEED_FALLBACK); 28 + fallback = crypto_alloc_skcipher("ctr(aes)", 0, 29 + CRYPTO_ALG_NEED_FALLBACK | 30 + CRYPTO_ALG_ASYNC); 36 31 if (IS_ERR(fallback)) { 37 - printk(KERN_ERR 38 - "Failed to allocate transformation for '%s': %ld\n", 39 - alg, PTR_ERR(fallback)); 32 + pr_err("Failed to allocate ctr(aes) fallback: %ld\n", 33 + PTR_ERR(fallback)); 40 34 return PTR_ERR(fallback); 41 35 } 42 36 43 - crypto_sync_skcipher_set_flags( 44 - fallback, 45 - crypto_skcipher_get_flags((struct crypto_skcipher *)tfm)); 37 + crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) + 38 + crypto_skcipher_reqsize(fallback)); 46 39 ctx->fallback = fallback; 47 - 48 40 return 0; 49 41 } 50 42 51 - static void p8_aes_ctr_exit(struct crypto_tfm *tfm) 43 + static void p8_aes_ctr_exit(struct crypto_skcipher *tfm) 52 44 { 53 - struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); 45 + struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm); 54 46 55 - if (ctx->fallback) { 56 - crypto_free_sync_skcipher(ctx->fallback); 57 - ctx->fallback = NULL; 58 - } 47 + crypto_free_skcipher(ctx->fallback); 59 48 } 60 49 61 - static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key, 50 + static int p8_aes_ctr_setkey(struct crypto_skcipher *tfm, const u8 *key, 62 51 unsigned int keylen) 63 52 { 53 + struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm); 64 54 int ret; 65 - struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); 66 55 67 56 preempt_disable(); 68 57 pagefault_disable(); ··· 61 72 pagefault_enable(); 62 73 preempt_enable(); 63 74 64 - ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen); 75 + ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen); 65 76 66 77 return ret ? -EINVAL : 0; 67 78 } 68 79 69 - static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx, 70 - struct blkcipher_walk *walk) 80 + static void p8_aes_ctr_final(const struct p8_aes_ctr_ctx *ctx, 81 + struct skcipher_walk *walk) 71 82 { 72 83 u8 *ctrblk = walk->iv; 73 84 u8 keystream[AES_BLOCK_SIZE]; ··· 87 98 crypto_inc(ctrblk, AES_BLOCK_SIZE); 88 99 } 89 100 90 - static int p8_aes_ctr_crypt(struct blkcipher_desc *desc, 91 - struct scatterlist *dst, 92 - struct scatterlist *src, unsigned int nbytes) 101 + static int p8_aes_ctr_crypt(struct skcipher_request *req) 93 102 { 103 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 104 + const struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm); 105 + struct skcipher_walk walk; 106 + unsigned int nbytes; 94 107 int ret; 95 - u64 inc; 96 - struct blkcipher_walk walk; 97 - struct p8_aes_ctr_ctx *ctx = 98 - crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); 99 108 100 109 if (!crypto_simd_usable()) { 101 - SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback); 102 - skcipher_request_set_sync_tfm(req, ctx->fallback); 103 - skcipher_request_set_callback(req, desc->flags, NULL, NULL); 104 - skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); 105 - ret = crypto_skcipher_encrypt(req); 106 - skcipher_request_zero(req); 107 - } else { 108 - blkcipher_walk_init(&walk, dst, src, nbytes); 109 - ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); 110 - while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { 111 - preempt_disable(); 112 - pagefault_disable(); 113 - enable_kernel_vsx(); 114 - aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, 115 - walk.dst.virt.addr, 116 - (nbytes & 117 - AES_BLOCK_MASK) / 118 - AES_BLOCK_SIZE, 119 - &ctx->enc_key, 120 - walk.iv); 121 - disable_kernel_vsx(); 122 - pagefault_enable(); 123 - preempt_enable(); 110 + struct skcipher_request *subreq = skcipher_request_ctx(req); 124 111 125 - /* We need to update IV mostly for last bytes/round */ 126 - inc = (nbytes & AES_BLOCK_MASK) / AES_BLOCK_SIZE; 127 - if (inc > 0) 128 - while (inc--) 129 - crypto_inc(walk.iv, AES_BLOCK_SIZE); 130 - 131 - nbytes &= AES_BLOCK_SIZE - 1; 132 - ret = blkcipher_walk_done(desc, &walk, nbytes); 133 - } 134 - if (walk.nbytes) { 135 - p8_aes_ctr_final(ctx, &walk); 136 - ret = blkcipher_walk_done(desc, &walk, 0); 137 - } 112 + *subreq = *req; 113 + skcipher_request_set_tfm(subreq, ctx->fallback); 114 + return crypto_skcipher_encrypt(subreq); 138 115 } 139 116 117 + ret = skcipher_walk_virt(&walk, req, false); 118 + while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { 119 + preempt_disable(); 120 + pagefault_disable(); 121 + enable_kernel_vsx(); 122 + aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, 123 + walk.dst.virt.addr, 124 + nbytes / AES_BLOCK_SIZE, 125 + &ctx->enc_key, walk.iv); 126 + disable_kernel_vsx(); 127 + pagefault_enable(); 128 + preempt_enable(); 129 + 130 + do { 131 + crypto_inc(walk.iv, AES_BLOCK_SIZE); 132 + } while ((nbytes -= AES_BLOCK_SIZE) >= AES_BLOCK_SIZE); 133 + 134 + ret = skcipher_walk_done(&walk, nbytes); 135 + } 136 + if (nbytes) { 137 + p8_aes_ctr_final(ctx, &walk); 138 + ret = skcipher_walk_done(&walk, 0); 139 + } 140 140 return ret; 141 141 } 142 142 143 - struct crypto_alg p8_aes_ctr_alg = { 144 - .cra_name = "ctr(aes)", 145 - .cra_driver_name = "p8_aes_ctr", 146 - .cra_module = THIS_MODULE, 147 - .cra_priority = 2000, 148 - .cra_type = &crypto_blkcipher_type, 149 - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, 150 - .cra_alignmask = 0, 151 - .cra_blocksize = 1, 152 - .cra_ctxsize = sizeof(struct p8_aes_ctr_ctx), 153 - .cra_init = p8_aes_ctr_init, 154 - .cra_exit = p8_aes_ctr_exit, 155 - .cra_blkcipher = { 156 - .ivsize = AES_BLOCK_SIZE, 157 - .min_keysize = AES_MIN_KEY_SIZE, 158 - .max_keysize = AES_MAX_KEY_SIZE, 159 - .setkey = p8_aes_ctr_setkey, 160 - .encrypt = p8_aes_ctr_crypt, 161 - .decrypt = p8_aes_ctr_crypt, 162 - }, 143 + struct skcipher_alg p8_aes_ctr_alg = { 144 + .base.cra_name = "ctr(aes)", 145 + .base.cra_driver_name = "p8_aes_ctr", 146 + .base.cra_module = THIS_MODULE, 147 + .base.cra_priority = 2000, 148 + .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, 149 + .base.cra_blocksize = 1, 150 + .base.cra_ctxsize = sizeof(struct p8_aes_ctr_ctx), 151 + .setkey = p8_aes_ctr_setkey, 152 + .encrypt = p8_aes_ctr_crypt, 153 + .decrypt = p8_aes_ctr_crypt, 154 + .init = p8_aes_ctr_init, 155 + .exit = p8_aes_ctr_exit, 156 + .min_keysize = AES_MIN_KEY_SIZE, 157 + .max_keysize = AES_MAX_KEY_SIZE, 158 + .ivsize = AES_BLOCK_SIZE, 159 + .chunksize = AES_BLOCK_SIZE, 163 160 };
+77 -98
drivers/crypto/vmx/aes_xts.c
··· 7 7 * Author: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com> 8 8 */ 9 9 10 - #include <linux/types.h> 11 - #include <linux/err.h> 12 - #include <linux/crypto.h> 13 - #include <linux/delay.h> 14 10 #include <asm/simd.h> 15 11 #include <asm/switch_to.h> 16 12 #include <crypto/aes.h> 17 13 #include <crypto/internal/simd.h> 18 - #include <crypto/scatterwalk.h> 14 + #include <crypto/internal/skcipher.h> 19 15 #include <crypto/xts.h> 20 - #include <crypto/skcipher.h> 21 16 22 17 #include "aesp8-ppc.h" 23 18 24 19 struct p8_aes_xts_ctx { 25 - struct crypto_sync_skcipher *fallback; 20 + struct crypto_skcipher *fallback; 26 21 struct aes_key enc_key; 27 22 struct aes_key dec_key; 28 23 struct aes_key tweak_key; 29 24 }; 30 25 31 - static int p8_aes_xts_init(struct crypto_tfm *tfm) 26 + static int p8_aes_xts_init(struct crypto_skcipher *tfm) 32 27 { 33 - const char *alg = crypto_tfm_alg_name(tfm); 34 - struct crypto_sync_skcipher *fallback; 35 - struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm); 28 + struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 29 + struct crypto_skcipher *fallback; 36 30 37 - fallback = crypto_alloc_sync_skcipher(alg, 0, 38 - CRYPTO_ALG_NEED_FALLBACK); 31 + fallback = crypto_alloc_skcipher("xts(aes)", 0, 32 + CRYPTO_ALG_NEED_FALLBACK | 33 + CRYPTO_ALG_ASYNC); 39 34 if (IS_ERR(fallback)) { 40 - printk(KERN_ERR 41 - "Failed to allocate transformation for '%s': %ld\n", 42 - alg, PTR_ERR(fallback)); 35 + pr_err("Failed to allocate xts(aes) fallback: %ld\n", 36 + PTR_ERR(fallback)); 43 37 return PTR_ERR(fallback); 44 38 } 45 39 46 - crypto_sync_skcipher_set_flags( 47 - fallback, 48 - crypto_skcipher_get_flags((struct crypto_skcipher *)tfm)); 40 + crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) + 41 + crypto_skcipher_reqsize(fallback)); 49 42 ctx->fallback = fallback; 50 - 51 43 return 0; 52 44 } 53 45 54 - static void p8_aes_xts_exit(struct crypto_tfm *tfm) 46 + static void p8_aes_xts_exit(struct crypto_skcipher *tfm) 55 47 { 56 - struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm); 48 + struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 57 49 58 - if (ctx->fallback) { 59 - crypto_free_sync_skcipher(ctx->fallback); 60 - ctx->fallback = NULL; 61 - } 50 + crypto_free_skcipher(ctx->fallback); 62 51 } 63 52 64 - static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key, 53 + static int p8_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, 65 54 unsigned int keylen) 66 55 { 56 + struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 67 57 int ret; 68 - struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm); 69 58 70 - ret = xts_check_key(tfm, key, keylen); 59 + ret = xts_verify_key(tfm, key, keylen); 71 60 if (ret) 72 61 return ret; 73 62 ··· 70 81 pagefault_enable(); 71 82 preempt_enable(); 72 83 73 - ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen); 84 + ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen); 74 85 75 86 return ret ? -EINVAL : 0; 76 87 } 77 88 78 - static int p8_aes_xts_crypt(struct blkcipher_desc *desc, 79 - struct scatterlist *dst, 80 - struct scatterlist *src, 81 - unsigned int nbytes, int enc) 89 + static int p8_aes_xts_crypt(struct skcipher_request *req, int enc) 82 90 { 83 - int ret; 91 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 92 + const struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 93 + struct skcipher_walk walk; 94 + unsigned int nbytes; 84 95 u8 tweak[AES_BLOCK_SIZE]; 85 - u8 *iv; 86 - struct blkcipher_walk walk; 87 - struct p8_aes_xts_ctx *ctx = 88 - crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); 96 + int ret; 89 97 90 98 if (!crypto_simd_usable()) { 91 - SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback); 92 - skcipher_request_set_sync_tfm(req, ctx->fallback); 93 - skcipher_request_set_callback(req, desc->flags, NULL, NULL); 94 - skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); 95 - ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req); 96 - skcipher_request_zero(req); 97 - } else { 98 - blkcipher_walk_init(&walk, dst, src, nbytes); 99 + struct skcipher_request *subreq = skcipher_request_ctx(req); 99 100 100 - ret = blkcipher_walk_virt(desc, &walk); 101 + *subreq = *req; 102 + skcipher_request_set_tfm(subreq, ctx->fallback); 103 + return enc ? crypto_skcipher_encrypt(subreq) : 104 + crypto_skcipher_decrypt(subreq); 105 + } 101 106 107 + ret = skcipher_walk_virt(&walk, req, false); 108 + if (ret) 109 + return ret; 110 + 111 + preempt_disable(); 112 + pagefault_disable(); 113 + enable_kernel_vsx(); 114 + 115 + aes_p8_encrypt(walk.iv, tweak, &ctx->tweak_key); 116 + 117 + disable_kernel_vsx(); 118 + pagefault_enable(); 119 + preempt_enable(); 120 + 121 + while ((nbytes = walk.nbytes) != 0) { 102 122 preempt_disable(); 103 123 pagefault_disable(); 104 124 enable_kernel_vsx(); 105 - 106 - iv = walk.iv; 107 - memset(tweak, 0, AES_BLOCK_SIZE); 108 - aes_p8_encrypt(iv, tweak, &ctx->tweak_key); 109 - 125 + if (enc) 126 + aes_p8_xts_encrypt(walk.src.virt.addr, 127 + walk.dst.virt.addr, 128 + round_down(nbytes, AES_BLOCK_SIZE), 129 + &ctx->enc_key, NULL, tweak); 130 + else 131 + aes_p8_xts_decrypt(walk.src.virt.addr, 132 + walk.dst.virt.addr, 133 + round_down(nbytes, AES_BLOCK_SIZE), 134 + &ctx->dec_key, NULL, tweak); 110 135 disable_kernel_vsx(); 111 136 pagefault_enable(); 112 137 preempt_enable(); 113 138 114 - while ((nbytes = walk.nbytes)) { 115 - preempt_disable(); 116 - pagefault_disable(); 117 - enable_kernel_vsx(); 118 - if (enc) 119 - aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, 120 - nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak); 121 - else 122 - aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, 123 - nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak); 124 - disable_kernel_vsx(); 125 - pagefault_enable(); 126 - preempt_enable(); 127 - 128 - nbytes &= AES_BLOCK_SIZE - 1; 129 - ret = blkcipher_walk_done(desc, &walk, nbytes); 130 - } 139 + ret = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE); 131 140 } 132 141 return ret; 133 142 } 134 143 135 - static int p8_aes_xts_encrypt(struct blkcipher_desc *desc, 136 - struct scatterlist *dst, 137 - struct scatterlist *src, unsigned int nbytes) 144 + static int p8_aes_xts_encrypt(struct skcipher_request *req) 138 145 { 139 - return p8_aes_xts_crypt(desc, dst, src, nbytes, 1); 146 + return p8_aes_xts_crypt(req, 1); 140 147 } 141 148 142 - static int p8_aes_xts_decrypt(struct blkcipher_desc *desc, 143 - struct scatterlist *dst, 144 - struct scatterlist *src, unsigned int nbytes) 149 + static int p8_aes_xts_decrypt(struct skcipher_request *req) 145 150 { 146 - return p8_aes_xts_crypt(desc, dst, src, nbytes, 0); 151 + return p8_aes_xts_crypt(req, 0); 147 152 } 148 153 149 - struct crypto_alg p8_aes_xts_alg = { 150 - .cra_name = "xts(aes)", 151 - .cra_driver_name = "p8_aes_xts", 152 - .cra_module = THIS_MODULE, 153 - .cra_priority = 2000, 154 - .cra_type = &crypto_blkcipher_type, 155 - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, 156 - .cra_alignmask = 0, 157 - .cra_blocksize = AES_BLOCK_SIZE, 158 - .cra_ctxsize = sizeof(struct p8_aes_xts_ctx), 159 - .cra_init = p8_aes_xts_init, 160 - .cra_exit = p8_aes_xts_exit, 161 - .cra_blkcipher = { 162 - .ivsize = AES_BLOCK_SIZE, 163 - .min_keysize = 2 * AES_MIN_KEY_SIZE, 164 - .max_keysize = 2 * AES_MAX_KEY_SIZE, 165 - .setkey = p8_aes_xts_setkey, 166 - .encrypt = p8_aes_xts_encrypt, 167 - .decrypt = p8_aes_xts_decrypt, 168 - } 154 + struct skcipher_alg p8_aes_xts_alg = { 155 + .base.cra_name = "xts(aes)", 156 + .base.cra_driver_name = "p8_aes_xts", 157 + .base.cra_module = THIS_MODULE, 158 + .base.cra_priority = 2000, 159 + .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, 160 + .base.cra_blocksize = AES_BLOCK_SIZE, 161 + .base.cra_ctxsize = sizeof(struct p8_aes_xts_ctx), 162 + .setkey = p8_aes_xts_setkey, 163 + .encrypt = p8_aes_xts_encrypt, 164 + .decrypt = p8_aes_xts_decrypt, 165 + .init = p8_aes_xts_init, 166 + .exit = p8_aes_xts_exit, 167 + .min_keysize = 2 * AES_MIN_KEY_SIZE, 168 + .max_keysize = 2 * AES_MAX_KEY_SIZE, 169 + .ivsize = AES_BLOCK_SIZE, 169 170 };
-2
drivers/crypto/vmx/aesp8-ppc.h
··· 2 2 #include <linux/types.h> 3 3 #include <crypto/aes.h> 4 4 5 - #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1)) 6 - 7 5 struct aes_key { 8 6 u8 key[AES_MAX_KEYLENGTH]; 9 7 int rounds;
+20 -2
drivers/crypto/vmx/aesp8-ppc.pl
··· 1286 1286 1287 1287 ######################################################################### 1288 1288 {{{ # CTR procedure[s] # 1289 + 1290 + ####################### WARNING: Here be dragons! ####################### 1291 + # 1292 + # This code is written as 'ctr32', based on a 32-bit counter used 1293 + # upstream. The kernel does *not* use a 32-bit counter. The kernel uses 1294 + # a 128-bit counter. 1295 + # 1296 + # This leads to subtle changes from the upstream code: the counter 1297 + # is incremented with vaddu_q_m rather than vaddu_w_m. This occurs in 1298 + # both the bulk (8 blocks at a time) path, and in the individual block 1299 + # path. Be aware of this when doing updates. 1300 + # 1301 + # See: 1302 + # 1d4aa0b4c181 ("crypto: vmx - Fixing AES-CTR counter bug") 1303 + # 009b30ac7444 ("crypto: vmx - CTR: always increment IV as quadword") 1304 + # https://github.com/openssl/openssl/pull/8942 1305 + # 1306 + ######################################################################### 1289 1307 my ($inp,$out,$len,$key,$ivp,$x10,$rounds,$idx)=map("r$_",(3..10)); 1290 1308 my ($rndkey0,$rndkey1,$inout,$tmp)= map("v$_",(0..3)); 1291 1309 my ($ivec,$inptail,$inpperm,$outhead,$outperm,$outmask,$keyperm,$one)= ··· 1375 1357 addi $idx,$idx,16 1376 1358 bdnz Loop_ctr32_enc 1377 1359 1378 - vadduqm $ivec,$ivec,$one 1360 + vadduqm $ivec,$ivec,$one # Kernel change for 128-bit 1379 1361 vmr $dat,$inptail 1380 1362 lvx $inptail,0,$inp 1381 1363 addi $inp,$inp,16 ··· 1519 1501 $SHL $len,$len,4 1520 1502 1521 1503 vadduqm $out1,$ivec,$one # counter values ... 1522 - vadduqm $out2,$ivec,$two 1504 + vadduqm $out2,$ivec,$two # (do all ctr adds as 128-bit) 1523 1505 vxor $out0,$ivec,$rndkey0 # ... xored with rndkey[0] 1524 1506 le?li $idx,8 1525 1507 vadduqm $out3,$out1,$two
+39 -35
drivers/crypto/vmx/vmx.c
··· 15 15 #include <linux/crypto.h> 16 16 #include <asm/cputable.h> 17 17 #include <crypto/internal/hash.h> 18 + #include <crypto/internal/skcipher.h> 18 19 19 20 extern struct shash_alg p8_ghash_alg; 20 21 extern struct crypto_alg p8_aes_alg; 21 - extern struct crypto_alg p8_aes_cbc_alg; 22 - extern struct crypto_alg p8_aes_ctr_alg; 23 - extern struct crypto_alg p8_aes_xts_alg; 24 - static struct crypto_alg *algs[] = { 25 - &p8_aes_alg, 26 - &p8_aes_cbc_alg, 27 - &p8_aes_ctr_alg, 28 - &p8_aes_xts_alg, 29 - NULL, 30 - }; 22 + extern struct skcipher_alg p8_aes_cbc_alg; 23 + extern struct skcipher_alg p8_aes_ctr_alg; 24 + extern struct skcipher_alg p8_aes_xts_alg; 31 25 32 26 static int __init p8_init(void) 33 27 { 34 - int ret = 0; 35 - struct crypto_alg **alg_it; 36 - 37 - for (alg_it = algs; *alg_it; alg_it++) { 38 - ret = crypto_register_alg(*alg_it); 39 - printk(KERN_INFO "crypto_register_alg '%s' = %d\n", 40 - (*alg_it)->cra_name, ret); 41 - if (ret) { 42 - for (alg_it--; alg_it >= algs; alg_it--) 43 - crypto_unregister_alg(*alg_it); 44 - break; 45 - } 46 - } 47 - if (ret) 48 - return ret; 28 + int ret; 49 29 50 30 ret = crypto_register_shash(&p8_ghash_alg); 51 - if (ret) { 52 - for (alg_it = algs; *alg_it; alg_it++) 53 - crypto_unregister_alg(*alg_it); 54 - } 31 + if (ret) 32 + goto err; 33 + 34 + ret = crypto_register_alg(&p8_aes_alg); 35 + if (ret) 36 + goto err_unregister_ghash; 37 + 38 + ret = crypto_register_skcipher(&p8_aes_cbc_alg); 39 + if (ret) 40 + goto err_unregister_aes; 41 + 42 + ret = crypto_register_skcipher(&p8_aes_ctr_alg); 43 + if (ret) 44 + goto err_unregister_aes_cbc; 45 + 46 + ret = crypto_register_skcipher(&p8_aes_xts_alg); 47 + if (ret) 48 + goto err_unregister_aes_ctr; 49 + 50 + return 0; 51 + 52 + err_unregister_aes_ctr: 53 + crypto_unregister_skcipher(&p8_aes_ctr_alg); 54 + err_unregister_aes_cbc: 55 + crypto_unregister_skcipher(&p8_aes_cbc_alg); 56 + err_unregister_aes: 57 + crypto_unregister_alg(&p8_aes_alg); 58 + err_unregister_ghash: 59 + crypto_unregister_shash(&p8_ghash_alg); 60 + err: 55 61 return ret; 56 62 } 57 63 58 64 static void __exit p8_exit(void) 59 65 { 60 - struct crypto_alg **alg_it; 61 - 62 - for (alg_it = algs; *alg_it; alg_it++) { 63 - printk(KERN_INFO "Removing '%s'\n", (*alg_it)->cra_name); 64 - crypto_unregister_alg(*alg_it); 65 - } 66 + crypto_unregister_skcipher(&p8_aes_xts_alg); 67 + crypto_unregister_skcipher(&p8_aes_ctr_alg); 68 + crypto_unregister_skcipher(&p8_aes_cbc_alg); 69 + crypto_unregister_alg(&p8_aes_alg); 66 70 crypto_unregister_shash(&p8_ghash_alg); 67 71 } 68 72
+4 -2
drivers/i2c/i2c-core-acpi.c
··· 111 111 struct list_head resource_list; 112 112 int ret; 113 113 114 - if (acpi_bus_get_status(adev) || !adev->status.present || 115 - acpi_device_enumerated(adev)) 114 + if (acpi_bus_get_status(adev) || !adev->status.present) 116 115 return -EINVAL; 117 116 118 117 if (acpi_match_device_ids(adev, i2c_acpi_ignored_device_ids) == 0) ··· 145 146 memset(&lookup, 0, sizeof(lookup)); 146 147 lookup.info = info; 147 148 lookup.index = -1; 149 + 150 + if (acpi_device_enumerated(adev)) 151 + return -EINVAL; 148 152 149 153 ret = i2c_acpi_do_lookup(adev, &lookup); 150 154 if (ret)
+1 -2
drivers/net/ppp/Kconfig
··· 87 87 depends on PPP 88 88 select CRYPTO 89 89 select CRYPTO_SHA1 90 - select CRYPTO_ARC4 91 - select CRYPTO_ECB 90 + select CRYPTO_LIB_ARC4 92 91 ---help--- 93 92 Support for the MPPE Encryption protocol, as employed by the 94 93 Microsoft Point-to-Point Tunneling Protocol.
+14 -83
drivers/net/ppp/ppp_mppe.c
··· 42 42 * deprecated in 2.6 43 43 */ 44 44 45 + #include <crypto/arc4.h> 45 46 #include <crypto/hash.h> 46 - #include <crypto/skcipher.h> 47 47 #include <linux/err.h> 48 + #include <linux/fips.h> 48 49 #include <linux/module.h> 49 50 #include <linux/kernel.h> 50 51 #include <linux/init.h> ··· 66 65 MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE)); 67 66 MODULE_SOFTDEP("pre: arc4"); 68 67 MODULE_VERSION("1.0.2"); 69 - 70 - static unsigned int 71 - setup_sg(struct scatterlist *sg, const void *address, unsigned int length) 72 - { 73 - sg_set_buf(sg, address, length); 74 - return length; 75 - } 76 68 77 69 #define SHA1_PAD_SIZE 40 78 70 ··· 90 96 * State for an MPPE (de)compressor. 91 97 */ 92 98 struct ppp_mppe_state { 93 - struct crypto_sync_skcipher *arc4; 99 + struct arc4_ctx arc4; 94 100 struct shash_desc *sha1; 95 101 unsigned char *sha1_digest; 96 102 unsigned char master_key[MPPE_MAX_KEY_LEN]; ··· 149 155 */ 150 156 static void mppe_rekey(struct ppp_mppe_state * state, int initial_key) 151 157 { 152 - struct scatterlist sg_in[1], sg_out[1]; 153 - SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4); 154 - 155 - skcipher_request_set_sync_tfm(req, state->arc4); 156 - skcipher_request_set_callback(req, 0, NULL, NULL); 157 - 158 158 get_new_key_from_sha(state); 159 159 if (!initial_key) { 160 - crypto_sync_skcipher_setkey(state->arc4, state->sha1_digest, 161 - state->keylen); 162 - sg_init_table(sg_in, 1); 163 - sg_init_table(sg_out, 1); 164 - setup_sg(sg_in, state->sha1_digest, state->keylen); 165 - setup_sg(sg_out, state->session_key, state->keylen); 166 - skcipher_request_set_crypt(req, sg_in, sg_out, state->keylen, 167 - NULL); 168 - if (crypto_skcipher_encrypt(req)) 169 - printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n"); 160 + arc4_setkey(&state->arc4, state->sha1_digest, state->keylen); 161 + arc4_crypt(&state->arc4, state->session_key, state->sha1_digest, 162 + state->keylen); 170 163 } else { 171 164 memcpy(state->session_key, state->sha1_digest, state->keylen); 172 165 } ··· 163 182 state->session_key[1] = 0x26; 164 183 state->session_key[2] = 0x9e; 165 184 } 166 - crypto_sync_skcipher_setkey(state->arc4, state->session_key, 167 - state->keylen); 168 - skcipher_request_zero(req); 185 + arc4_setkey(&state->arc4, state->session_key, state->keylen); 169 186 } 170 187 171 188 /* ··· 176 197 unsigned int digestsize; 177 198 178 199 if (optlen != CILEN_MPPE + sizeof(state->master_key) || 179 - options[0] != CI_MPPE || options[1] != CILEN_MPPE) 200 + options[0] != CI_MPPE || options[1] != CILEN_MPPE || 201 + fips_enabled) 180 202 goto out; 181 203 182 204 state = kzalloc(sizeof(*state), GFP_KERNEL); 183 205 if (state == NULL) 184 206 goto out; 185 207 186 - 187 - state->arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0); 188 - if (IS_ERR(state->arc4)) { 189 - state->arc4 = NULL; 190 - goto out_free; 191 - } 192 208 193 209 shash = crypto_alloc_shash("sha1", 0, 0); 194 210 if (IS_ERR(shash)) ··· 225 251 crypto_free_shash(state->sha1->tfm); 226 252 kzfree(state->sha1); 227 253 } 228 - crypto_free_sync_skcipher(state->arc4); 229 254 kfree(state); 230 255 out: 231 256 return NULL; ··· 240 267 kfree(state->sha1_digest); 241 268 crypto_free_shash(state->sha1->tfm); 242 269 kzfree(state->sha1); 243 - crypto_free_sync_skcipher(state->arc4); 244 - kfree(state); 270 + kzfree(state); 245 271 } 246 272 } 247 273 ··· 339 367 int isize, int osize) 340 368 { 341 369 struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; 342 - SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4); 343 370 int proto; 344 - int err; 345 - struct scatterlist sg_in[1], sg_out[1]; 346 371 347 372 /* 348 373 * Check that the protocol is in the range we handle. ··· 390 421 ibuf += 2; /* skip to proto field */ 391 422 isize -= 2; 392 423 393 - /* Encrypt packet */ 394 - sg_init_table(sg_in, 1); 395 - sg_init_table(sg_out, 1); 396 - setup_sg(sg_in, ibuf, isize); 397 - setup_sg(sg_out, obuf, osize); 398 - 399 - skcipher_request_set_sync_tfm(req, state->arc4); 400 - skcipher_request_set_callback(req, 0, NULL, NULL); 401 - skcipher_request_set_crypt(req, sg_in, sg_out, isize, NULL); 402 - err = crypto_skcipher_encrypt(req); 403 - skcipher_request_zero(req); 404 - if (err) { 405 - printk(KERN_DEBUG "crypto_cypher_encrypt failed\n"); 406 - return -1; 407 - } 424 + arc4_crypt(&state->arc4, obuf, ibuf, isize); 408 425 409 426 state->stats.unc_bytes += isize; 410 427 state->stats.unc_packets++; ··· 436 481 int osize) 437 482 { 438 483 struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; 439 - SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4); 440 484 unsigned ccount; 441 485 int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED; 442 - struct scatterlist sg_in[1], sg_out[1]; 443 486 444 487 if (isize <= PPP_HDRLEN + MPPE_OVHD) { 445 488 if (state->debug) ··· 564 611 * Decrypt the first byte in order to check if it is 565 612 * a compressed or uncompressed protocol field. 566 613 */ 567 - sg_init_table(sg_in, 1); 568 - sg_init_table(sg_out, 1); 569 - setup_sg(sg_in, ibuf, 1); 570 - setup_sg(sg_out, obuf, 1); 571 - 572 - skcipher_request_set_sync_tfm(req, state->arc4); 573 - skcipher_request_set_callback(req, 0, NULL, NULL); 574 - skcipher_request_set_crypt(req, sg_in, sg_out, 1, NULL); 575 - if (crypto_skcipher_decrypt(req)) { 576 - printk(KERN_DEBUG "crypto_cypher_decrypt failed\n"); 577 - osize = DECOMP_ERROR; 578 - goto out_zap_req; 579 - } 614 + arc4_crypt(&state->arc4, obuf, ibuf, 1); 580 615 581 616 /* 582 617 * Do PFC decompression. ··· 579 638 } 580 639 581 640 /* And finally, decrypt the rest of the packet. */ 582 - setup_sg(sg_in, ibuf + 1, isize - 1); 583 - setup_sg(sg_out, obuf + 1, osize - 1); 584 - skcipher_request_set_crypt(req, sg_in, sg_out, isize - 1, NULL); 585 - if (crypto_skcipher_decrypt(req)) { 586 - printk(KERN_DEBUG "crypto_cypher_decrypt failed\n"); 587 - osize = DECOMP_ERROR; 588 - goto out_zap_req; 589 - } 641 + arc4_crypt(&state->arc4, obuf + 1, ibuf + 1, isize - 1); 590 642 591 643 state->stats.unc_bytes += osize; 592 644 state->stats.unc_packets++; ··· 589 655 /* good packet credit */ 590 656 state->sanity_errors >>= 1; 591 657 592 - out_zap_req: 593 - skcipher_request_zero(req); 594 658 return osize; 595 659 596 660 sanity_error: ··· 661 729 static int __init ppp_mppe_init(void) 662 730 { 663 731 int answer; 664 - if (!(crypto_has_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC) && 665 - crypto_has_ahash("sha1", 0, CRYPTO_ALG_ASYNC))) 732 + if (fips_enabled || !crypto_has_ahash("sha1", 0, CRYPTO_ALG_ASYNC)) 666 733 return -ENODEV; 667 734 668 735 sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL);
+1 -1
fs/cifs/Kconfig
··· 10 10 select CRYPTO_SHA512 11 11 select CRYPTO_CMAC 12 12 select CRYPTO_HMAC 13 - select CRYPTO_ARC4 13 + select CRYPTO_LIB_ARC4 14 14 select CRYPTO_AEAD2 15 15 select CRYPTO_CCM 16 16 select CRYPTO_ECB
+16 -46
fs/cifs/cifsencrypt.c
··· 33 33 #include <linux/ctype.h> 34 34 #include <linux/random.h> 35 35 #include <linux/highmem.h> 36 - #include <crypto/skcipher.h> 36 + #include <linux/fips.h> 37 + #include <crypto/arc4.h> 37 38 #include <crypto/aead.h> 38 39 39 40 int __cifs_calc_signature(struct smb_rqst *rqst, ··· 773 772 int 774 773 calc_seckey(struct cifs_ses *ses) 775 774 { 776 - int rc; 777 - struct crypto_skcipher *tfm_arc4; 778 - struct scatterlist sgin, sgout; 779 - struct skcipher_request *req; 780 - unsigned char *sec_key; 775 + unsigned char sec_key[CIFS_SESS_KEY_SIZE]; /* a nonce */ 776 + struct arc4_ctx *ctx_arc4; 781 777 782 - sec_key = kmalloc(CIFS_SESS_KEY_SIZE, GFP_KERNEL); 783 - if (sec_key == NULL) 784 - return -ENOMEM; 778 + if (fips_enabled) 779 + return -ENODEV; 785 780 786 781 get_random_bytes(sec_key, CIFS_SESS_KEY_SIZE); 787 782 788 - tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); 789 - if (IS_ERR(tfm_arc4)) { 790 - rc = PTR_ERR(tfm_arc4); 791 - cifs_dbg(VFS, "could not allocate crypto API arc4\n"); 792 - goto out; 783 + ctx_arc4 = kmalloc(sizeof(*ctx_arc4), GFP_KERNEL); 784 + if (!ctx_arc4) { 785 + cifs_dbg(VFS, "could not allocate arc4 context\n"); 786 + return -ENOMEM; 793 787 } 794 788 795 - rc = crypto_skcipher_setkey(tfm_arc4, ses->auth_key.response, 796 - CIFS_SESS_KEY_SIZE); 797 - if (rc) { 798 - cifs_dbg(VFS, "%s: Could not set response as a key\n", 799 - __func__); 800 - goto out_free_cipher; 801 - } 802 - 803 - req = skcipher_request_alloc(tfm_arc4, GFP_KERNEL); 804 - if (!req) { 805 - rc = -ENOMEM; 806 - cifs_dbg(VFS, "could not allocate crypto API arc4 request\n"); 807 - goto out_free_cipher; 808 - } 809 - 810 - sg_init_one(&sgin, sec_key, CIFS_SESS_KEY_SIZE); 811 - sg_init_one(&sgout, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE); 812 - 813 - skcipher_request_set_callback(req, 0, NULL, NULL); 814 - skcipher_request_set_crypt(req, &sgin, &sgout, CIFS_CPHTXT_SIZE, NULL); 815 - 816 - rc = crypto_skcipher_encrypt(req); 817 - skcipher_request_free(req); 818 - if (rc) { 819 - cifs_dbg(VFS, "could not encrypt session key rc: %d\n", rc); 820 - goto out_free_cipher; 821 - } 789 + arc4_setkey(ctx_arc4, ses->auth_key.response, CIFS_SESS_KEY_SIZE); 790 + arc4_crypt(ctx_arc4, ses->ntlmssp->ciphertext, sec_key, 791 + CIFS_CPHTXT_SIZE); 822 792 823 793 /* make secondary_key/nonce as session key */ 824 794 memcpy(ses->auth_key.response, sec_key, CIFS_SESS_KEY_SIZE); 825 795 /* and make len as that of session key only */ 826 796 ses->auth_key.len = CIFS_SESS_KEY_SIZE; 827 797 828 - out_free_cipher: 829 - crypto_free_skcipher(tfm_arc4); 830 - out: 831 - kfree(sec_key); 832 - return rc; 798 + memzero_explicit(sec_key, CIFS_SESS_KEY_SIZE); 799 + kzfree(ctx_arc4); 800 + return 0; 833 801 } 834 802 835 803 void
-1
fs/cifs/cifsfs.c
··· 1591 1591 ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and " 1592 1592 "also older servers complying with the SNIA CIFS Specification)"); 1593 1593 MODULE_VERSION(CIFS_VERSION); 1594 - MODULE_SOFTDEP("pre: arc4"); 1595 1594 MODULE_SOFTDEP("pre: des"); 1596 1595 MODULE_SOFTDEP("pre: ecb"); 1597 1596 MODULE_SOFTDEP("pre: hmac");
+2 -32
include/crypto/aead.h
··· 317 317 * 318 318 * Return: 0 if the cipher operation was successful; < 0 if an error occurred 319 319 */ 320 - static inline int crypto_aead_encrypt(struct aead_request *req) 321 - { 322 - struct crypto_aead *aead = crypto_aead_reqtfm(req); 323 - struct crypto_alg *alg = aead->base.__crt_alg; 324 - unsigned int cryptlen = req->cryptlen; 325 - int ret; 326 - 327 - crypto_stats_get(alg); 328 - if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) 329 - ret = -ENOKEY; 330 - else 331 - ret = crypto_aead_alg(aead)->encrypt(req); 332 - crypto_stats_aead_encrypt(cryptlen, alg, ret); 333 - return ret; 334 - } 320 + int crypto_aead_encrypt(struct aead_request *req); 335 321 336 322 /** 337 323 * crypto_aead_decrypt() - decrypt ciphertext ··· 341 355 * integrity of the ciphertext or the associated data was violated); 342 356 * < 0 if an error occurred. 343 357 */ 344 - static inline int crypto_aead_decrypt(struct aead_request *req) 345 - { 346 - struct crypto_aead *aead = crypto_aead_reqtfm(req); 347 - struct crypto_alg *alg = aead->base.__crt_alg; 348 - unsigned int cryptlen = req->cryptlen; 349 - int ret; 350 - 351 - crypto_stats_get(alg); 352 - if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) 353 - ret = -ENOKEY; 354 - else if (req->cryptlen < crypto_aead_authsize(aead)) 355 - ret = -EINVAL; 356 - else 357 - ret = crypto_aead_alg(aead)->decrypt(req); 358 - crypto_stats_aead_decrypt(cryptlen, alg, ret); 359 - return ret; 360 - } 358 + int crypto_aead_decrypt(struct aead_request *req); 361 359 362 360 /** 363 361 * DOC: Asynchronous AEAD Request Handle
-7
include/crypto/algapi.h
··· 189 189 int crypto_enqueue_request(struct crypto_queue *queue, 190 190 struct crypto_async_request *request); 191 191 struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue); 192 - int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm); 193 192 static inline unsigned int crypto_queue_len(struct crypto_queue *queue) 194 193 { 195 194 return queue->qlen; ··· 368 369 static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req) 369 370 { 370 371 return req->__ctx; 371 - } 372 - 373 - static inline int ablkcipher_tfm_in_queue(struct crypto_queue *queue, 374 - struct crypto_ablkcipher *tfm) 375 - { 376 - return crypto_tfm_in_queue(queue, crypto_ablkcipher_tfm(tfm)); 377 372 } 378 373 379 374 static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb,
+10
include/crypto/arc4.h
··· 6 6 #ifndef _CRYPTO_ARC4_H 7 7 #define _CRYPTO_ARC4_H 8 8 9 + #include <linux/types.h> 10 + 9 11 #define ARC4_MIN_KEY_SIZE 1 10 12 #define ARC4_MAX_KEY_SIZE 256 11 13 #define ARC4_BLOCK_SIZE 1 14 + 15 + struct arc4_ctx { 16 + u32 S[256]; 17 + u32 x, y; 18 + }; 19 + 20 + int arc4_setkey(struct arc4_ctx *ctx, const u8 *in_key, unsigned int key_len); 21 + void arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int len); 12 22 13 23 #endif /* _CRYPTO_ARC4_H */
+1 -1
include/crypto/chacha.h
··· 41 41 } 42 42 void hchacha_block(const u32 *in, u32 *out, int nrounds); 43 43 44 - void crypto_chacha_init(u32 *state, struct chacha_ctx *ctx, u8 *iv); 44 + void crypto_chacha_init(u32 *state, const struct chacha_ctx *ctx, const u8 *iv); 45 45 46 46 int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, 47 47 unsigned int keysize);
-8
include/crypto/crypto_wq.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef CRYPTO_WQ_H 3 - #define CRYPTO_WQ_H 4 - 5 - #include <linux/workqueue.h> 6 - 7 - extern struct workqueue_struct *kcrypto_wq; 8 - #endif
+2
include/crypto/drbg.h
··· 129 129 130 130 bool seeded; /* DRBG fully seeded? */ 131 131 bool pr; /* Prediction resistance enabled? */ 132 + bool fips_primed; /* Continuous test primed? */ 133 + unsigned char *prev; /* FIPS 140-2 continuous test value */ 132 134 struct work_struct seed_work; /* asynchronous seeding support */ 133 135 struct crypto_rng *jent; 134 136 const struct drbg_state_ops *d_ops;
-6
include/crypto/internal/hash.h
··· 196 196 return ahash_request_cast(crypto_dequeue_request(queue)); 197 197 } 198 198 199 - static inline int ahash_tfm_in_queue(struct crypto_queue *queue, 200 - struct crypto_ahash *tfm) 201 - { 202 - return crypto_tfm_in_queue(queue, crypto_ahash_tfm(tfm)); 203 - } 204 - 205 199 static inline void *crypto_shash_ctx(struct crypto_shash *tfm) 206 200 { 207 201 return crypto_tfm_ctx(&tfm->base);
+60
include/crypto/internal/skcipher.h
··· 200 200 return alg->max_keysize; 201 201 } 202 202 203 + static inline unsigned int crypto_skcipher_alg_chunksize( 204 + struct skcipher_alg *alg) 205 + { 206 + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == 207 + CRYPTO_ALG_TYPE_BLKCIPHER) 208 + return alg->base.cra_blocksize; 209 + 210 + if (alg->base.cra_ablkcipher.encrypt) 211 + return alg->base.cra_blocksize; 212 + 213 + return alg->chunksize; 214 + } 215 + 216 + static inline unsigned int crypto_skcipher_alg_walksize( 217 + struct skcipher_alg *alg) 218 + { 219 + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == 220 + CRYPTO_ALG_TYPE_BLKCIPHER) 221 + return alg->base.cra_blocksize; 222 + 223 + if (alg->base.cra_ablkcipher.encrypt) 224 + return alg->base.cra_blocksize; 225 + 226 + return alg->walksize; 227 + } 228 + 229 + /** 230 + * crypto_skcipher_chunksize() - obtain chunk size 231 + * @tfm: cipher handle 232 + * 233 + * The block size is set to one for ciphers such as CTR. However, 234 + * you still need to provide incremental updates in multiples of 235 + * the underlying block size as the IV does not have sub-block 236 + * granularity. This is known in this API as the chunk size. 237 + * 238 + * Return: chunk size in bytes 239 + */ 240 + static inline unsigned int crypto_skcipher_chunksize( 241 + struct crypto_skcipher *tfm) 242 + { 243 + return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm)); 244 + } 245 + 246 + /** 247 + * crypto_skcipher_walksize() - obtain walk size 248 + * @tfm: cipher handle 249 + * 250 + * In some cases, algorithms can only perform optimally when operating on 251 + * multiple blocks in parallel. This is reflected by the walksize, which 252 + * must be a multiple of the chunksize (or equal if the concern does not 253 + * apply) 254 + * 255 + * Return: walk size in bytes 256 + */ 257 + static inline unsigned int crypto_skcipher_walksize( 258 + struct crypto_skcipher *tfm) 259 + { 260 + return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm)); 261 + } 262 + 203 263 /* Helpers for simple block cipher modes of operation */ 204 264 struct skcipher_ctx_simple { 205 265 struct crypto_cipher *cipher; /* underlying block cipher */
+2 -90
include/crypto/skcipher.h
··· 288 288 return crypto_skcipher_ivsize(&tfm->base); 289 289 } 290 290 291 - static inline unsigned int crypto_skcipher_alg_chunksize( 292 - struct skcipher_alg *alg) 293 - { 294 - if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == 295 - CRYPTO_ALG_TYPE_BLKCIPHER) 296 - return alg->base.cra_blocksize; 297 - 298 - if (alg->base.cra_ablkcipher.encrypt) 299 - return alg->base.cra_blocksize; 300 - 301 - return alg->chunksize; 302 - } 303 - 304 - static inline unsigned int crypto_skcipher_alg_walksize( 305 - struct skcipher_alg *alg) 306 - { 307 - if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == 308 - CRYPTO_ALG_TYPE_BLKCIPHER) 309 - return alg->base.cra_blocksize; 310 - 311 - if (alg->base.cra_ablkcipher.encrypt) 312 - return alg->base.cra_blocksize; 313 - 314 - return alg->walksize; 315 - } 316 - 317 - /** 318 - * crypto_skcipher_chunksize() - obtain chunk size 319 - * @tfm: cipher handle 320 - * 321 - * The block size is set to one for ciphers such as CTR. However, 322 - * you still need to provide incremental updates in multiples of 323 - * the underlying block size as the IV does not have sub-block 324 - * granularity. This is known in this API as the chunk size. 325 - * 326 - * Return: chunk size in bytes 327 - */ 328 - static inline unsigned int crypto_skcipher_chunksize( 329 - struct crypto_skcipher *tfm) 330 - { 331 - return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm)); 332 - } 333 - 334 - /** 335 - * crypto_skcipher_walksize() - obtain walk size 336 - * @tfm: cipher handle 337 - * 338 - * In some cases, algorithms can only perform optimally when operating on 339 - * multiple blocks in parallel. This is reflected by the walksize, which 340 - * must be a multiple of the chunksize (or equal if the concern does not 341 - * apply) 342 - * 343 - * Return: walk size in bytes 344 - */ 345 - static inline unsigned int crypto_skcipher_walksize( 346 - struct crypto_skcipher *tfm) 347 - { 348 - return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm)); 349 - } 350 - 351 291 /** 352 292 * crypto_skcipher_blocksize() - obtain block size of cipher 353 293 * @tfm: cipher handle ··· 419 479 * 420 480 * Return: 0 if the cipher operation was successful; < 0 if an error occurred 421 481 */ 422 - static inline int crypto_skcipher_encrypt(struct skcipher_request *req) 423 - { 424 - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 425 - struct crypto_alg *alg = tfm->base.__crt_alg; 426 - unsigned int cryptlen = req->cryptlen; 427 - int ret; 428 - 429 - crypto_stats_get(alg); 430 - if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 431 - ret = -ENOKEY; 432 - else 433 - ret = tfm->encrypt(req); 434 - crypto_stats_skcipher_encrypt(cryptlen, ret, alg); 435 - return ret; 436 - } 482 + int crypto_skcipher_encrypt(struct skcipher_request *req); 437 483 438 484 /** 439 485 * crypto_skcipher_decrypt() - decrypt ciphertext ··· 432 506 * 433 507 * Return: 0 if the cipher operation was successful; < 0 if an error occurred 434 508 */ 435 - static inline int crypto_skcipher_decrypt(struct skcipher_request *req) 436 - { 437 - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 438 - struct crypto_alg *alg = tfm->base.__crt_alg; 439 - unsigned int cryptlen = req->cryptlen; 440 - int ret; 441 - 442 - crypto_stats_get(alg); 443 - if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 444 - ret = -ENOKEY; 445 - else 446 - ret = tfm->decrypt(req); 447 - crypto_stats_skcipher_decrypt(cryptlen, ret, alg); 448 - return ret; 449 - } 509 + int crypto_skcipher_decrypt(struct skcipher_request *req); 450 510 451 511 /** 452 512 * DOC: Symmetric Key Cipher Request Handle
+11 -1
include/linux/crypto.h
··· 49 49 #define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b 50 50 #define CRYPTO_ALG_TYPE_RNG 0x0000000c 51 51 #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d 52 - #define CRYPTO_ALG_TYPE_DIGEST 0x0000000e 53 52 #define CRYPTO_ALG_TYPE_HASH 0x0000000e 54 53 #define CRYPTO_ALG_TYPE_SHASH 0x0000000e 55 54 #define CRYPTO_ALG_TYPE_AHASH 0x0000000f ··· 322 323 void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 323 324 }; 324 325 326 + /** 327 + * struct compress_alg - compression/decompression algorithm 328 + * @coa_compress: Compress a buffer of specified length, storing the resulting 329 + * data in the specified buffer. Return the length of the 330 + * compressed data in dlen. 331 + * @coa_decompress: Decompress the source buffer, storing the uncompressed 332 + * data in the specified buffer. The length of the data is 333 + * returned in dlen. 334 + * 335 + * All fields are mandatory. 336 + */ 325 337 struct compress_alg { 326 338 int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src, 327 339 unsigned int slen, u8 *dst, unsigned int *dlen);
+1 -1
lib/Makefile
··· 102 102 obj-$(CONFIG_DEBUG_INFO_REDUCED) += debug_info.o 103 103 CFLAGS_debug_info.o += $(call cc-option, -femit-struct-debug-detailed=any) 104 104 105 - obj-y += math/ 105 + obj-y += math/ crypto/ 106 106 107 107 obj-$(CONFIG_GENERIC_IOMAP) += iomap.o 108 108 obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o
+4
lib/crypto/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + 3 + obj-$(CONFIG_CRYPTO_LIB_ARC4) += libarc4.o 4 + libarc4-y := arc4.o
+74
lib/crypto/arc4.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-or-later 2 + /* 3 + * Cryptographic API 4 + * 5 + * ARC4 Cipher Algorithm 6 + * 7 + * Jon Oberheide <jon@oberheide.org> 8 + */ 9 + 10 + #include <crypto/arc4.h> 11 + #include <linux/module.h> 12 + 13 + int arc4_setkey(struct arc4_ctx *ctx, const u8 *in_key, unsigned int key_len) 14 + { 15 + int i, j = 0, k = 0; 16 + 17 + ctx->x = 1; 18 + ctx->y = 0; 19 + 20 + for (i = 0; i < 256; i++) 21 + ctx->S[i] = i; 22 + 23 + for (i = 0; i < 256; i++) { 24 + u32 a = ctx->S[i]; 25 + 26 + j = (j + in_key[k] + a) & 0xff; 27 + ctx->S[i] = ctx->S[j]; 28 + ctx->S[j] = a; 29 + if (++k >= key_len) 30 + k = 0; 31 + } 32 + 33 + return 0; 34 + } 35 + EXPORT_SYMBOL(arc4_setkey); 36 + 37 + void arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int len) 38 + { 39 + u32 *const S = ctx->S; 40 + u32 x, y, a, b; 41 + u32 ty, ta, tb; 42 + 43 + if (len == 0) 44 + return; 45 + 46 + x = ctx->x; 47 + y = ctx->y; 48 + 49 + a = S[x]; 50 + y = (y + a) & 0xff; 51 + b = S[y]; 52 + 53 + do { 54 + S[y] = a; 55 + a = (a + b) & 0xff; 56 + S[x] = b; 57 + x = (x + 1) & 0xff; 58 + ta = S[x]; 59 + ty = (y + ta) & 0xff; 60 + tb = S[ty]; 61 + *out++ = *in++ ^ S[a]; 62 + if (--len == 0) 63 + break; 64 + y = ty; 65 + a = ta; 66 + b = tb; 67 + } while (true); 68 + 69 + ctx->x = x; 70 + ctx->y = y; 71 + } 72 + EXPORT_SYMBOL(arc4_crypt); 73 + 74 + MODULE_LICENSE("GPL");
+5 -4
lib/scatterlist.c
··· 676 676 { 677 677 if (!miter->__remaining) { 678 678 struct scatterlist *sg; 679 - unsigned long pgoffset; 680 679 681 680 if (!__sg_page_iter_next(&miter->piter)) 682 681 return false; 683 682 684 683 sg = miter->piter.sg; 685 - pgoffset = miter->piter.sg_pgoffset; 686 684 687 - miter->__offset = pgoffset ? 0 : sg->offset; 685 + miter->__offset = miter->piter.sg_pgoffset ? 0 : sg->offset; 686 + miter->piter.sg_pgoffset += miter->__offset >> PAGE_SHIFT; 687 + miter->__offset &= PAGE_SIZE - 1; 688 688 miter->__remaining = sg->offset + sg->length - 689 - (pgoffset << PAGE_SHIFT) - miter->__offset; 689 + (miter->piter.sg_pgoffset << PAGE_SHIFT) - 690 + miter->__offset; 690 691 miter->__remaining = min_t(unsigned long, miter->__remaining, 691 692 PAGE_SIZE - miter->__offset); 692 693 }
+1 -1
net/mac80211/Kconfig
··· 3 3 tristate "Generic IEEE 802.11 Networking Stack (mac80211)" 4 4 depends on CFG80211 5 5 select CRYPTO 6 - select CRYPTO_ARC4 6 + select CRYPTO_LIB_ARC4 7 7 select CRYPTO_AES 8 8 select CRYPTO_CCM 9 9 select CRYPTO_GCM
+2 -2
net/mac80211/cfg.c
··· 14 14 #include <linux/slab.h> 15 15 #include <net/net_namespace.h> 16 16 #include <linux/rcupdate.h> 17 + #include <linux/fips.h> 17 18 #include <linux/if_ether.h> 18 19 #include <net/cfg80211.h> 19 20 #include "ieee80211_i.h" ··· 403 402 case WLAN_CIPHER_SUITE_WEP40: 404 403 case WLAN_CIPHER_SUITE_TKIP: 405 404 case WLAN_CIPHER_SUITE_WEP104: 406 - if (IS_ERR(local->wep_tx_tfm)) 405 + if (WARN_ON_ONCE(fips_enabled)) 407 406 return -EINVAL; 408 - break; 409 407 case WLAN_CIPHER_SUITE_CCMP: 410 408 case WLAN_CIPHER_SUITE_CCMP_256: 411 409 case WLAN_CIPHER_SUITE_AES_CMAC:
+2 -2
net/mac80211/ieee80211_i.h
··· 1255 1255 1256 1256 struct rate_control_ref *rate_ctrl; 1257 1257 1258 - struct crypto_cipher *wep_tx_tfm; 1259 - struct crypto_cipher *wep_rx_tfm; 1258 + struct arc4_ctx wep_tx_ctx; 1259 + struct arc4_ctx wep_rx_ctx; 1260 1260 u32 wep_iv; 1261 1261 1262 1262 /* see iface.c */
+1
net/mac80211/key.h
··· 11 11 #include <linux/list.h> 12 12 #include <linux/crypto.h> 13 13 #include <linux/rcupdate.h> 14 + #include <crypto/arc4.h> 14 15 #include <net/mac80211.h> 15 16 16 17 #define NUM_DEFAULT_KEYS 4
+2 -4
net/mac80211/main.c
··· 10 10 11 11 #include <net/mac80211.h> 12 12 #include <linux/module.h> 13 + #include <linux/fips.h> 13 14 #include <linux/init.h> 14 15 #include <linux/netdevice.h> 15 16 #include <linux/types.h> ··· 731 730 732 731 static int ieee80211_init_cipher_suites(struct ieee80211_local *local) 733 732 { 734 - bool have_wep = !(IS_ERR(local->wep_tx_tfm) || 735 - IS_ERR(local->wep_rx_tfm)); 733 + bool have_wep = !fips_enabled; /* FIPS does not permit the use of RC4 */ 736 734 bool have_mfp = ieee80211_hw_check(&local->hw, MFP_CAPABLE); 737 735 int n_suites = 0, r = 0, w = 0; 738 736 u32 *suites; ··· 1298 1298 fail_rate: 1299 1299 rtnl_unlock(); 1300 1300 ieee80211_led_exit(local); 1301 - ieee80211_wep_free(local); 1302 1301 fail_flows: 1303 1302 destroy_workqueue(local->workqueue); 1304 1303 fail_workqueue: ··· 1354 1355 1355 1356 destroy_workqueue(local->workqueue); 1356 1357 wiphy_unregister(local->hw.wiphy); 1357 - ieee80211_wep_free(local); 1358 1358 ieee80211_led_exit(local); 1359 1359 kfree(local->int_scan_req); 1360 1360 }
+2 -1
net/mac80211/mlme.c
··· 12 12 */ 13 13 14 14 #include <linux/delay.h> 15 + #include <linux/fips.h> 15 16 #include <linux/if_ether.h> 16 17 #include <linux/skbuff.h> 17 18 #include <linux/if_arp.h> ··· 5046 5045 auth_alg = WLAN_AUTH_OPEN; 5047 5046 break; 5048 5047 case NL80211_AUTHTYPE_SHARED_KEY: 5049 - if (IS_ERR(local->wep_tx_tfm)) 5048 + if (fips_enabled) 5050 5049 return -EOPNOTSUPP; 5051 5050 auth_alg = WLAN_AUTH_SHARED_KEY; 5052 5051 break;
+4 -4
net/mac80211/tkip.c
··· 219 219 * @payload_len is the length of payload (_not_ including IV/ICV length). 220 220 * @ta is the transmitter addresses. 221 221 */ 222 - int ieee80211_tkip_encrypt_data(struct crypto_cipher *tfm, 222 + int ieee80211_tkip_encrypt_data(struct arc4_ctx *ctx, 223 223 struct ieee80211_key *key, 224 224 struct sk_buff *skb, 225 225 u8 *payload, size_t payload_len) ··· 228 228 229 229 ieee80211_get_tkip_p2k(&key->conf, skb, rc4key); 230 230 231 - return ieee80211_wep_encrypt_data(tfm, rc4key, 16, 231 + return ieee80211_wep_encrypt_data(ctx, rc4key, 16, 232 232 payload, payload_len); 233 233 } 234 234 ··· 236 236 * beginning of the buffer containing IEEE 802.11 header payload, i.e., 237 237 * including IV, Ext. IV, real data, Michael MIC, ICV. @payload_len is the 238 238 * length of payload, including IV, Ext. IV, MIC, ICV. */ 239 - int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm, 239 + int ieee80211_tkip_decrypt_data(struct arc4_ctx *ctx, 240 240 struct ieee80211_key *key, 241 241 u8 *payload, size_t payload_len, u8 *ta, 242 242 u8 *ra, int only_iv, int queue, ··· 294 294 295 295 tkip_mixing_phase2(tk, &rx_ctx->ctx, iv16, rc4key); 296 296 297 - res = ieee80211_wep_decrypt_data(tfm, rc4key, 16, pos, payload_len - 12); 297 + res = ieee80211_wep_decrypt_data(ctx, rc4key, 16, pos, payload_len - 12); 298 298 done: 299 299 if (res == TKIP_DECRYPT_OK) { 300 300 /*
+2 -2
net/mac80211/tkip.h
··· 10 10 #include <linux/crypto.h> 11 11 #include "key.h" 12 12 13 - int ieee80211_tkip_encrypt_data(struct crypto_cipher *tfm, 13 + int ieee80211_tkip_encrypt_data(struct arc4_ctx *ctx, 14 14 struct ieee80211_key *key, 15 15 struct sk_buff *skb, 16 16 u8 *payload, size_t payload_len); ··· 21 21 TKIP_DECRYPT_INVALID_KEYIDX = -2, 22 22 TKIP_DECRYPT_REPLAY = -3, 23 23 }; 24 - int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm, 24 + int ieee80211_tkip_decrypt_data(struct arc4_ctx *ctx, 25 25 struct ieee80211_key *key, 26 26 u8 *payload, size_t payload_len, u8 *ta, 27 27 u8 *ra, int only_iv, int queue,
+10 -39
net/mac80211/wep.c
··· 27 27 /* start WEP IV from a random value */ 28 28 get_random_bytes(&local->wep_iv, IEEE80211_WEP_IV_LEN); 29 29 30 - local->wep_tx_tfm = crypto_alloc_cipher("arc4", 0, 0); 31 - if (IS_ERR(local->wep_tx_tfm)) { 32 - local->wep_rx_tfm = ERR_PTR(-EINVAL); 33 - return PTR_ERR(local->wep_tx_tfm); 34 - } 35 - 36 - local->wep_rx_tfm = crypto_alloc_cipher("arc4", 0, 0); 37 - if (IS_ERR(local->wep_rx_tfm)) { 38 - crypto_free_cipher(local->wep_tx_tfm); 39 - local->wep_tx_tfm = ERR_PTR(-EINVAL); 40 - return PTR_ERR(local->wep_rx_tfm); 41 - } 42 - 43 30 return 0; 44 - } 45 - 46 - void ieee80211_wep_free(struct ieee80211_local *local) 47 - { 48 - if (!IS_ERR(local->wep_tx_tfm)) 49 - crypto_free_cipher(local->wep_tx_tfm); 50 - if (!IS_ERR(local->wep_rx_tfm)) 51 - crypto_free_cipher(local->wep_rx_tfm); 52 31 } 53 32 54 33 static inline bool ieee80211_wep_weak_iv(u32 iv, int keylen) ··· 107 128 /* Perform WEP encryption using given key. data buffer must have tailroom 108 129 * for 4-byte ICV. data_len must not include this ICV. Note: this function 109 130 * does _not_ add IV. data = RC4(data | CRC32(data)) */ 110 - int ieee80211_wep_encrypt_data(struct crypto_cipher *tfm, u8 *rc4key, 131 + int ieee80211_wep_encrypt_data(struct arc4_ctx *ctx, u8 *rc4key, 111 132 size_t klen, u8 *data, size_t data_len) 112 133 { 113 134 __le32 icv; 114 - int i; 115 - 116 - if (IS_ERR(tfm)) 117 - return -1; 118 135 119 136 icv = cpu_to_le32(~crc32_le(~0, data, data_len)); 120 137 put_unaligned(icv, (__le32 *)(data + data_len)); 121 138 122 - crypto_cipher_setkey(tfm, rc4key, klen); 123 - for (i = 0; i < data_len + IEEE80211_WEP_ICV_LEN; i++) 124 - crypto_cipher_encrypt_one(tfm, data + i, data + i); 139 + arc4_setkey(ctx, rc4key, klen); 140 + arc4_crypt(ctx, data, data, data_len + IEEE80211_WEP_ICV_LEN); 141 + memzero_explicit(ctx, sizeof(*ctx)); 125 142 126 143 return 0; 127 144 } ··· 156 181 /* Add room for ICV */ 157 182 skb_put(skb, IEEE80211_WEP_ICV_LEN); 158 183 159 - return ieee80211_wep_encrypt_data(local->wep_tx_tfm, rc4key, keylen + 3, 184 + return ieee80211_wep_encrypt_data(&local->wep_tx_ctx, rc4key, keylen + 3, 160 185 iv + IEEE80211_WEP_IV_LEN, len); 161 186 } 162 187 ··· 164 189 /* Perform WEP decryption using given key. data buffer includes encrypted 165 190 * payload, including 4-byte ICV, but _not_ IV. data_len must not include ICV. 166 191 * Return 0 on success and -1 on ICV mismatch. */ 167 - int ieee80211_wep_decrypt_data(struct crypto_cipher *tfm, u8 *rc4key, 192 + int ieee80211_wep_decrypt_data(struct arc4_ctx *ctx, u8 *rc4key, 168 193 size_t klen, u8 *data, size_t data_len) 169 194 { 170 195 __le32 crc; 171 - int i; 172 196 173 - if (IS_ERR(tfm)) 174 - return -1; 175 - 176 - crypto_cipher_setkey(tfm, rc4key, klen); 177 - for (i = 0; i < data_len + IEEE80211_WEP_ICV_LEN; i++) 178 - crypto_cipher_decrypt_one(tfm, data + i, data + i); 197 + arc4_setkey(ctx, rc4key, klen); 198 + arc4_crypt(ctx, data, data, data_len + IEEE80211_WEP_ICV_LEN); 199 + memzero_explicit(ctx, sizeof(*ctx)); 179 200 180 201 crc = cpu_to_le32(~crc32_le(~0, data, data_len)); 181 202 if (memcmp(&crc, data + data_len, IEEE80211_WEP_ICV_LEN) != 0) ··· 224 253 /* Copy rest of the WEP key (the secret part) */ 225 254 memcpy(rc4key + 3, key->conf.key, key->conf.keylen); 226 255 227 - if (ieee80211_wep_decrypt_data(local->wep_rx_tfm, rc4key, klen, 256 + if (ieee80211_wep_decrypt_data(&local->wep_rx_ctx, rc4key, klen, 228 257 skb->data + hdrlen + 229 258 IEEE80211_WEP_IV_LEN, len)) 230 259 ret = -1;
+2 -3
net/mac80211/wep.h
··· 14 14 #include "key.h" 15 15 16 16 int ieee80211_wep_init(struct ieee80211_local *local); 17 - void ieee80211_wep_free(struct ieee80211_local *local); 18 - int ieee80211_wep_encrypt_data(struct crypto_cipher *tfm, u8 *rc4key, 17 + int ieee80211_wep_encrypt_data(struct arc4_ctx *ctx, u8 *rc4key, 19 18 size_t klen, u8 *data, size_t data_len); 20 19 int ieee80211_wep_encrypt(struct ieee80211_local *local, 21 20 struct sk_buff *skb, 22 21 const u8 *key, int keylen, int keyidx); 23 - int ieee80211_wep_decrypt_data(struct crypto_cipher *tfm, u8 *rc4key, 22 + int ieee80211_wep_decrypt_data(struct arc4_ctx *ctx, u8 *rc4key, 24 23 size_t klen, u8 *data, size_t data_len); 25 24 26 25 ieee80211_rx_result
+2 -2
net/mac80211/wpa.c
··· 239 239 /* Add room for ICV */ 240 240 skb_put(skb, IEEE80211_TKIP_ICV_LEN); 241 241 242 - return ieee80211_tkip_encrypt_data(tx->local->wep_tx_tfm, 242 + return ieee80211_tkip_encrypt_data(&tx->local->wep_tx_ctx, 243 243 key, skb, pos, len); 244 244 } 245 245 ··· 290 290 if (status->flag & RX_FLAG_DECRYPTED) 291 291 hwaccel = 1; 292 292 293 - res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm, 293 + res = ieee80211_tkip_decrypt_data(&rx->local->wep_rx_ctx, 294 294 key, skb->data + hdrlen, 295 295 skb->len - hdrlen, rx->sta->sta.addr, 296 296 hdr->addr1, hwaccel, rx->security_idx,
+2
net/wireless/Kconfig
··· 213 213 214 214 config LIB80211_CRYPT_WEP 215 215 tristate 216 + select CRYPTO_LIB_ARC4 216 217 217 218 config LIB80211_CRYPT_CCMP 218 219 tristate 219 220 220 221 config LIB80211_CRYPT_TKIP 221 222 tristate 223 + select CRYPTO_LIB_ARC4 222 224 223 225 config LIB80211_DEBUG 224 226 bool "lib80211 debugging messages"
+17 -31
net/wireless/lib80211_crypt_tkip.c
··· 9 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 10 11 11 #include <linux/err.h> 12 + #include <linux/fips.h> 12 13 #include <linux/module.h> 13 14 #include <linux/init.h> 14 15 #include <linux/slab.h> ··· 26 25 #include <linux/ieee80211.h> 27 26 #include <net/iw_handler.h> 28 27 28 + #include <crypto/arc4.h> 29 29 #include <crypto/hash.h> 30 30 #include <linux/crypto.h> 31 31 #include <linux/crc32.h> ··· 62 60 63 61 int key_idx; 64 62 65 - struct crypto_cipher *rx_tfm_arc4; 63 + struct arc4_ctx rx_ctx_arc4; 64 + struct arc4_ctx tx_ctx_arc4; 66 65 struct crypto_shash *rx_tfm_michael; 67 - struct crypto_cipher *tx_tfm_arc4; 68 66 struct crypto_shash *tx_tfm_michael; 69 67 70 68 /* scratch buffers for virt_to_page() (crypto API) */ ··· 91 89 { 92 90 struct lib80211_tkip_data *priv; 93 91 92 + if (fips_enabled) 93 + return NULL; 94 + 94 95 priv = kzalloc(sizeof(*priv), GFP_ATOMIC); 95 96 if (priv == NULL) 96 97 goto fail; 97 98 98 99 priv->key_idx = key_idx; 99 100 100 - priv->tx_tfm_arc4 = crypto_alloc_cipher("arc4", 0, 0); 101 - if (IS_ERR(priv->tx_tfm_arc4)) { 102 - priv->tx_tfm_arc4 = NULL; 103 - goto fail; 104 - } 105 - 106 101 priv->tx_tfm_michael = crypto_alloc_shash("michael_mic", 0, 0); 107 102 if (IS_ERR(priv->tx_tfm_michael)) { 108 103 priv->tx_tfm_michael = NULL; 109 - goto fail; 110 - } 111 - 112 - priv->rx_tfm_arc4 = crypto_alloc_cipher("arc4", 0, 0); 113 - if (IS_ERR(priv->rx_tfm_arc4)) { 114 - priv->rx_tfm_arc4 = NULL; 115 104 goto fail; 116 105 } 117 106 ··· 117 124 fail: 118 125 if (priv) { 119 126 crypto_free_shash(priv->tx_tfm_michael); 120 - crypto_free_cipher(priv->tx_tfm_arc4); 121 127 crypto_free_shash(priv->rx_tfm_michael); 122 - crypto_free_cipher(priv->rx_tfm_arc4); 123 128 kfree(priv); 124 129 } 125 130 ··· 129 138 struct lib80211_tkip_data *_priv = priv; 130 139 if (_priv) { 131 140 crypto_free_shash(_priv->tx_tfm_michael); 132 - crypto_free_cipher(_priv->tx_tfm_arc4); 133 141 crypto_free_shash(_priv->rx_tfm_michael); 134 - crypto_free_cipher(_priv->rx_tfm_arc4); 135 142 } 136 - kfree(priv); 143 + kzfree(priv); 137 144 } 138 145 139 146 static inline u16 RotR1(u16 val) ··· 330 341 int len; 331 342 u8 rc4key[16], *pos, *icv; 332 343 u32 crc; 333 - int i; 334 344 335 345 if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { 336 346 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; ··· 354 366 icv[2] = crc >> 16; 355 367 icv[3] = crc >> 24; 356 368 357 - crypto_cipher_setkey(tkey->tx_tfm_arc4, rc4key, 16); 358 - for (i = 0; i < len + 4; i++) 359 - crypto_cipher_encrypt_one(tkey->tx_tfm_arc4, pos + i, pos + i); 369 + arc4_setkey(&tkey->tx_ctx_arc4, rc4key, 16); 370 + arc4_crypt(&tkey->tx_ctx_arc4, pos, pos, len + 4); 371 + 360 372 return 0; 361 373 } 362 374 ··· 384 396 u8 icv[4]; 385 397 u32 crc; 386 398 int plen; 387 - int i; 388 399 389 400 hdr = (struct ieee80211_hdr *)skb->data; 390 401 ··· 436 449 437 450 plen = skb->len - hdr_len - 12; 438 451 439 - crypto_cipher_setkey(tkey->rx_tfm_arc4, rc4key, 16); 440 - for (i = 0; i < plen + 4; i++) 441 - crypto_cipher_decrypt_one(tkey->rx_tfm_arc4, pos + i, pos + i); 452 + arc4_setkey(&tkey->rx_ctx_arc4, rc4key, 16); 453 + arc4_crypt(&tkey->rx_ctx_arc4, pos, pos, plen + 4); 442 454 443 455 crc = ~crc32_le(~0, pos, plen); 444 456 icv[0] = crc; ··· 622 636 struct lib80211_tkip_data *tkey = priv; 623 637 int keyidx; 624 638 struct crypto_shash *tfm = tkey->tx_tfm_michael; 625 - struct crypto_cipher *tfm2 = tkey->tx_tfm_arc4; 639 + struct arc4_ctx *tfm2 = &tkey->tx_ctx_arc4; 626 640 struct crypto_shash *tfm3 = tkey->rx_tfm_michael; 627 - struct crypto_cipher *tfm4 = tkey->rx_tfm_arc4; 641 + struct arc4_ctx *tfm4 = &tkey->rx_ctx_arc4; 628 642 629 643 keyidx = tkey->key_idx; 630 644 memset(tkey, 0, sizeof(*tkey)); 631 645 tkey->key_idx = keyidx; 632 646 tkey->tx_tfm_michael = tfm; 633 - tkey->tx_tfm_arc4 = tfm2; 647 + tkey->tx_ctx_arc4 = *tfm2; 634 648 tkey->rx_tfm_michael = tfm3; 635 - tkey->rx_tfm_arc4 = tfm4; 649 + tkey->rx_ctx_arc4 = *tfm4; 636 650 if (len == TKIP_KEY_LEN) { 637 651 memcpy(tkey->key, key, TKIP_KEY_LEN); 638 652 tkey->key_set = 1;
+13 -38
net/wireless/lib80211_crypt_wep.c
··· 7 7 */ 8 8 9 9 #include <linux/err.h> 10 + #include <linux/fips.h> 10 11 #include <linux/module.h> 11 12 #include <linux/init.h> 12 13 #include <linux/slab.h> ··· 19 18 20 19 #include <net/lib80211.h> 21 20 22 - #include <linux/crypto.h> 21 + #include <crypto/arc4.h> 23 22 #include <linux/crc32.h> 24 23 25 24 MODULE_AUTHOR("Jouni Malinen"); ··· 32 31 u8 key[WEP_KEY_LEN + 1]; 33 32 u8 key_len; 34 33 u8 key_idx; 35 - struct crypto_cipher *tx_tfm; 36 - struct crypto_cipher *rx_tfm; 34 + struct arc4_ctx tx_ctx; 35 + struct arc4_ctx rx_ctx; 37 36 }; 38 37 39 38 static void *lib80211_wep_init(int keyidx) 40 39 { 41 40 struct lib80211_wep_data *priv; 42 41 42 + if (fips_enabled) 43 + return NULL; 44 + 43 45 priv = kzalloc(sizeof(*priv), GFP_ATOMIC); 44 46 if (priv == NULL) 45 - goto fail; 47 + return NULL; 46 48 priv->key_idx = keyidx; 47 49 48 - priv->tx_tfm = crypto_alloc_cipher("arc4", 0, 0); 49 - if (IS_ERR(priv->tx_tfm)) { 50 - priv->tx_tfm = NULL; 51 - goto fail; 52 - } 53 - 54 - priv->rx_tfm = crypto_alloc_cipher("arc4", 0, 0); 55 - if (IS_ERR(priv->rx_tfm)) { 56 - priv->rx_tfm = NULL; 57 - goto fail; 58 - } 59 50 /* start WEP IV from a random value */ 60 51 get_random_bytes(&priv->iv, 4); 61 52 62 53 return priv; 63 - 64 - fail: 65 - if (priv) { 66 - crypto_free_cipher(priv->tx_tfm); 67 - crypto_free_cipher(priv->rx_tfm); 68 - kfree(priv); 69 - } 70 - return NULL; 71 54 } 72 55 73 56 static void lib80211_wep_deinit(void *priv) 74 57 { 75 - struct lib80211_wep_data *_priv = priv; 76 - if (_priv) { 77 - crypto_free_cipher(_priv->tx_tfm); 78 - crypto_free_cipher(_priv->rx_tfm); 79 - } 80 - kfree(priv); 58 + kzfree(priv); 81 59 } 82 60 83 61 /* Add WEP IV/key info to a frame that has at least 4 bytes of headroom */ ··· 108 128 u32 crc, klen, len; 109 129 u8 *pos, *icv; 110 130 u8 key[WEP_KEY_LEN + 3]; 111 - int i; 112 131 113 132 /* other checks are in lib80211_wep_build_iv */ 114 133 if (skb_tailroom(skb) < 4) ··· 135 156 icv[2] = crc >> 16; 136 157 icv[3] = crc >> 24; 137 158 138 - crypto_cipher_setkey(wep->tx_tfm, key, klen); 139 - 140 - for (i = 0; i < len + 4; i++) 141 - crypto_cipher_encrypt_one(wep->tx_tfm, pos + i, pos + i); 159 + arc4_setkey(&wep->tx_ctx, key, klen); 160 + arc4_crypt(&wep->tx_ctx, pos, pos, len + 4); 142 161 143 162 return 0; 144 163 } ··· 154 177 u32 crc, klen, plen; 155 178 u8 key[WEP_KEY_LEN + 3]; 156 179 u8 keyidx, *pos, icv[4]; 157 - int i; 158 180 159 181 if (skb->len < hdr_len + 8) 160 182 return -1; ··· 174 198 /* Apply RC4 to data and compute CRC32 over decrypted data */ 175 199 plen = skb->len - hdr_len - 8; 176 200 177 - crypto_cipher_setkey(wep->rx_tfm, key, klen); 178 - for (i = 0; i < plen + 4; i++) 179 - crypto_cipher_decrypt_one(wep->rx_tfm, pos + i, pos + i); 201 + arc4_setkey(&wep->rx_ctx, key, klen); 202 + arc4_crypt(&wep->rx_ctx, pos, pos, plen + 4); 180 203 181 204 crc = ~crc32_le(~0, pos, plen); 182 205 icv[0] = crc;