Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto update from Herbert Xu:
"Here is the crypto update for 3.20:

- Added 192/256-bit key support to aesni GCM.
- Added MIPS OCTEON MD5 support.
- Fixed hwrng starvation and race conditions.
- Added note that memzero_explicit is not a subsitute for memset.
- Added user-space interface for crypto_rng.
- Misc fixes"

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (71 commits)
crypto: tcrypt - do not allocate iv on stack for aead speed tests
crypto: testmgr - limit IV copy length in aead tests
crypto: tcrypt - fix buflen reminder calculation
crypto: testmgr - mark rfc4106(gcm(aes)) as fips_allowed
crypto: caam - fix resource clean-up on error path for caam_jr_init
crypto: caam - pair irq map and dispose in the same function
crypto: ccp - terminate ccp_support array with empty element
crypto: caam - remove unused local variable
crypto: caam - remove dead code
crypto: caam - don't emit ICV check failures to dmesg
hwrng: virtio - drop extra empty line
crypto: replace scatterwalk_sg_next with sg_next
crypto: atmel - Free memory in error path
crypto: doc - remove colons in comments
crypto: seqiv - Ensure that IV size is at least 8 bytes
crypto: cts - Weed out non-CBC algorithms
MAINTAINERS: add linux-crypto to hw random
crypto: cts - Remove bogus use of seqiv
crypto: qat - don't need qat_auth_state struct
crypto: algif_rng - fix sparse non static symbol warning
...

+1788 -712
+1
MAINTAINERS
··· 4434 4434 HARDWARE RANDOM NUMBER GENERATOR CORE 4435 4435 M: Matt Mackall <mpm@selenic.com> 4436 4436 M: Herbert Xu <herbert@gondor.apana.org.au> 4437 + L: linux-crypto@vger.kernel.org 4437 4438 S: Odd fixes 4438 4439 F: Documentation/hw_random.txt 4439 4440 F: drivers/char/hw_random/
+1
arch/mips/cavium-octeon/Makefile
··· 16 16 obj-y += dma-octeon.o 17 17 obj-y += octeon-memcpy.o 18 18 obj-y += executive/ 19 + obj-y += crypto/ 19 20 20 21 obj-$(CONFIG_MTD) += flash_setup.o 21 22 obj-$(CONFIG_SMP) += smp.o
+7
arch/mips/cavium-octeon/crypto/Makefile
··· 1 + # 2 + # OCTEON-specific crypto modules. 3 + # 4 + 5 + obj-y += octeon-crypto.o 6 + 7 + obj-$(CONFIG_CRYPTO_MD5_OCTEON) += octeon-md5.o
+66
arch/mips/cavium-octeon/crypto/octeon-crypto.c
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (C) 2004-2012 Cavium Networks 7 + */ 8 + 9 + #include <asm/cop2.h> 10 + #include <linux/module.h> 11 + #include <linux/interrupt.h> 12 + 13 + #include "octeon-crypto.h" 14 + 15 + /** 16 + * Enable access to Octeon's COP2 crypto hardware for kernel use. Wrap any 17 + * crypto operations in calls to octeon_crypto_enable/disable in order to make 18 + * sure the state of COP2 isn't corrupted if userspace is also performing 19 + * hardware crypto operations. Allocate the state parameter on the stack. 20 + * Preemption must be disabled to prevent context switches. 21 + * 22 + * @state: Pointer to state structure to store current COP2 state in. 23 + * 24 + * Returns: Flags to be passed to octeon_crypto_disable() 25 + */ 26 + unsigned long octeon_crypto_enable(struct octeon_cop2_state *state) 27 + { 28 + int status; 29 + unsigned long flags; 30 + 31 + local_irq_save(flags); 32 + status = read_c0_status(); 33 + write_c0_status(status | ST0_CU2); 34 + if (KSTK_STATUS(current) & ST0_CU2) { 35 + octeon_cop2_save(&(current->thread.cp2)); 36 + KSTK_STATUS(current) &= ~ST0_CU2; 37 + status &= ~ST0_CU2; 38 + } else if (status & ST0_CU2) { 39 + octeon_cop2_save(state); 40 + } 41 + local_irq_restore(flags); 42 + return status & ST0_CU2; 43 + } 44 + EXPORT_SYMBOL_GPL(octeon_crypto_enable); 45 + 46 + /** 47 + * Disable access to Octeon's COP2 crypto hardware in the kernel. This must be 48 + * called after an octeon_crypto_enable() before any context switch or return to 49 + * userspace. 50 + * 51 + * @state: Pointer to COP2 state to restore 52 + * @flags: Return value from octeon_crypto_enable() 53 + */ 54 + void octeon_crypto_disable(struct octeon_cop2_state *state, 55 + unsigned long crypto_flags) 56 + { 57 + unsigned long flags; 58 + 59 + local_irq_save(flags); 60 + if (crypto_flags & ST0_CU2) 61 + octeon_cop2_restore(state); 62 + else 63 + write_c0_status(read_c0_status() & ~ST0_CU2); 64 + local_irq_restore(flags); 65 + } 66 + EXPORT_SYMBOL_GPL(octeon_crypto_disable);
+75
arch/mips/cavium-octeon/crypto/octeon-crypto.h
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (C) 2012-2013 Cavium Inc., All Rights Reserved. 7 + * 8 + * MD5 instruction definitions added by Aaro Koskinen <aaro.koskinen@iki.fi>. 9 + * 10 + */ 11 + #ifndef __LINUX_OCTEON_CRYPTO_H 12 + #define __LINUX_OCTEON_CRYPTO_H 13 + 14 + #include <linux/sched.h> 15 + #include <asm/mipsregs.h> 16 + 17 + #define OCTEON_CR_OPCODE_PRIORITY 300 18 + 19 + extern unsigned long octeon_crypto_enable(struct octeon_cop2_state *state); 20 + extern void octeon_crypto_disable(struct octeon_cop2_state *state, 21 + unsigned long flags); 22 + 23 + /* 24 + * Macros needed to implement MD5: 25 + */ 26 + 27 + /* 28 + * The index can be 0-1. 29 + */ 30 + #define write_octeon_64bit_hash_dword(value, index) \ 31 + do { \ 32 + __asm__ __volatile__ ( \ 33 + "dmtc2 %[rt],0x0048+" STR(index) \ 34 + : \ 35 + : [rt] "d" (value)); \ 36 + } while (0) 37 + 38 + /* 39 + * The index can be 0-1. 40 + */ 41 + #define read_octeon_64bit_hash_dword(index) \ 42 + ({ \ 43 + u64 __value; \ 44 + \ 45 + __asm__ __volatile__ ( \ 46 + "dmfc2 %[rt],0x0048+" STR(index) \ 47 + : [rt] "=d" (__value) \ 48 + : ); \ 49 + \ 50 + __value; \ 51 + }) 52 + 53 + /* 54 + * The index can be 0-6. 55 + */ 56 + #define write_octeon_64bit_block_dword(value, index) \ 57 + do { \ 58 + __asm__ __volatile__ ( \ 59 + "dmtc2 %[rt],0x0040+" STR(index) \ 60 + : \ 61 + : [rt] "d" (value)); \ 62 + } while (0) 63 + 64 + /* 65 + * The value is the final block dword (64-bit). 66 + */ 67 + #define octeon_md5_start(value) \ 68 + do { \ 69 + __asm__ __volatile__ ( \ 70 + "dmtc2 %[rt],0x4047" \ 71 + : \ 72 + : [rt] "d" (value)); \ 73 + } while (0) 74 + 75 + #endif /* __LINUX_OCTEON_CRYPTO_H */
+216
arch/mips/cavium-octeon/crypto/octeon-md5.c
··· 1 + /* 2 + * Cryptographic API. 3 + * 4 + * MD5 Message Digest Algorithm (RFC1321). 5 + * 6 + * Adapted for OCTEON by Aaro Koskinen <aaro.koskinen@iki.fi>. 7 + * 8 + * Based on crypto/md5.c, which is: 9 + * 10 + * Derived from cryptoapi implementation, originally based on the 11 + * public domain implementation written by Colin Plumb in 1993. 12 + * 13 + * Copyright (c) Cryptoapi developers. 14 + * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> 15 + * 16 + * This program is free software; you can redistribute it and/or modify it 17 + * under the terms of the GNU General Public License as published by the Free 18 + * Software Foundation; either version 2 of the License, or (at your option) 19 + * any later version. 20 + */ 21 + 22 + #include <crypto/md5.h> 23 + #include <linux/init.h> 24 + #include <linux/types.h> 25 + #include <linux/module.h> 26 + #include <linux/string.h> 27 + #include <asm/byteorder.h> 28 + #include <linux/cryptohash.h> 29 + #include <asm/octeon/octeon.h> 30 + #include <crypto/internal/hash.h> 31 + 32 + #include "octeon-crypto.h" 33 + 34 + /* 35 + * We pass everything as 64-bit. OCTEON can handle misaligned data. 36 + */ 37 + 38 + static void octeon_md5_store_hash(struct md5_state *ctx) 39 + { 40 + u64 *hash = (u64 *)ctx->hash; 41 + 42 + write_octeon_64bit_hash_dword(hash[0], 0); 43 + write_octeon_64bit_hash_dword(hash[1], 1); 44 + } 45 + 46 + static void octeon_md5_read_hash(struct md5_state *ctx) 47 + { 48 + u64 *hash = (u64 *)ctx->hash; 49 + 50 + hash[0] = read_octeon_64bit_hash_dword(0); 51 + hash[1] = read_octeon_64bit_hash_dword(1); 52 + } 53 + 54 + static void octeon_md5_transform(const void *_block) 55 + { 56 + const u64 *block = _block; 57 + 58 + write_octeon_64bit_block_dword(block[0], 0); 59 + write_octeon_64bit_block_dword(block[1], 1); 60 + write_octeon_64bit_block_dword(block[2], 2); 61 + write_octeon_64bit_block_dword(block[3], 3); 62 + write_octeon_64bit_block_dword(block[4], 4); 63 + write_octeon_64bit_block_dword(block[5], 5); 64 + write_octeon_64bit_block_dword(block[6], 6); 65 + octeon_md5_start(block[7]); 66 + } 67 + 68 + static int octeon_md5_init(struct shash_desc *desc) 69 + { 70 + struct md5_state *mctx = shash_desc_ctx(desc); 71 + 72 + mctx->hash[0] = cpu_to_le32(0x67452301); 73 + mctx->hash[1] = cpu_to_le32(0xefcdab89); 74 + mctx->hash[2] = cpu_to_le32(0x98badcfe); 75 + mctx->hash[3] = cpu_to_le32(0x10325476); 76 + mctx->byte_count = 0; 77 + 78 + return 0; 79 + } 80 + 81 + static int octeon_md5_update(struct shash_desc *desc, const u8 *data, 82 + unsigned int len) 83 + { 84 + struct md5_state *mctx = shash_desc_ctx(desc); 85 + const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); 86 + struct octeon_cop2_state state; 87 + unsigned long flags; 88 + 89 + mctx->byte_count += len; 90 + 91 + if (avail > len) { 92 + memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), 93 + data, len); 94 + return 0; 95 + } 96 + 97 + memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data, 98 + avail); 99 + 100 + local_bh_disable(); 101 + preempt_disable(); 102 + flags = octeon_crypto_enable(&state); 103 + octeon_md5_store_hash(mctx); 104 + 105 + octeon_md5_transform(mctx->block); 106 + data += avail; 107 + len -= avail; 108 + 109 + while (len >= sizeof(mctx->block)) { 110 + octeon_md5_transform(data); 111 + data += sizeof(mctx->block); 112 + len -= sizeof(mctx->block); 113 + } 114 + 115 + octeon_md5_read_hash(mctx); 116 + octeon_crypto_disable(&state, flags); 117 + preempt_enable(); 118 + local_bh_enable(); 119 + 120 + memcpy(mctx->block, data, len); 121 + 122 + return 0; 123 + } 124 + 125 + static int octeon_md5_final(struct shash_desc *desc, u8 *out) 126 + { 127 + struct md5_state *mctx = shash_desc_ctx(desc); 128 + const unsigned int offset = mctx->byte_count & 0x3f; 129 + char *p = (char *)mctx->block + offset; 130 + int padding = 56 - (offset + 1); 131 + struct octeon_cop2_state state; 132 + unsigned long flags; 133 + 134 + *p++ = 0x80; 135 + 136 + local_bh_disable(); 137 + preempt_disable(); 138 + flags = octeon_crypto_enable(&state); 139 + octeon_md5_store_hash(mctx); 140 + 141 + if (padding < 0) { 142 + memset(p, 0x00, padding + sizeof(u64)); 143 + octeon_md5_transform(mctx->block); 144 + p = (char *)mctx->block; 145 + padding = 56; 146 + } 147 + 148 + memset(p, 0, padding); 149 + mctx->block[14] = cpu_to_le32(mctx->byte_count << 3); 150 + mctx->block[15] = cpu_to_le32(mctx->byte_count >> 29); 151 + octeon_md5_transform(mctx->block); 152 + 153 + octeon_md5_read_hash(mctx); 154 + octeon_crypto_disable(&state, flags); 155 + preempt_enable(); 156 + local_bh_enable(); 157 + 158 + memcpy(out, mctx->hash, sizeof(mctx->hash)); 159 + memset(mctx, 0, sizeof(*mctx)); 160 + 161 + return 0; 162 + } 163 + 164 + static int octeon_md5_export(struct shash_desc *desc, void *out) 165 + { 166 + struct md5_state *ctx = shash_desc_ctx(desc); 167 + 168 + memcpy(out, ctx, sizeof(*ctx)); 169 + return 0; 170 + } 171 + 172 + static int octeon_md5_import(struct shash_desc *desc, const void *in) 173 + { 174 + struct md5_state *ctx = shash_desc_ctx(desc); 175 + 176 + memcpy(ctx, in, sizeof(*ctx)); 177 + return 0; 178 + } 179 + 180 + static struct shash_alg alg = { 181 + .digestsize = MD5_DIGEST_SIZE, 182 + .init = octeon_md5_init, 183 + .update = octeon_md5_update, 184 + .final = octeon_md5_final, 185 + .export = octeon_md5_export, 186 + .import = octeon_md5_import, 187 + .descsize = sizeof(struct md5_state), 188 + .statesize = sizeof(struct md5_state), 189 + .base = { 190 + .cra_name = "md5", 191 + .cra_driver_name= "octeon-md5", 192 + .cra_priority = OCTEON_CR_OPCODE_PRIORITY, 193 + .cra_flags = CRYPTO_ALG_TYPE_SHASH, 194 + .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 195 + .cra_module = THIS_MODULE, 196 + } 197 + }; 198 + 199 + static int __init md5_mod_init(void) 200 + { 201 + if (!octeon_has_crypto()) 202 + return -ENOTSUPP; 203 + return crypto_register_shash(&alg); 204 + } 205 + 206 + static void __exit md5_mod_fini(void) 207 + { 208 + crypto_unregister_shash(&alg); 209 + } 210 + 211 + module_init(md5_mod_init); 212 + module_exit(md5_mod_fini); 213 + 214 + MODULE_LICENSE("GPL"); 215 + MODULE_DESCRIPTION("MD5 Message Digest Algorithm (OCTEON)"); 216 + MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
+6
arch/mips/cavium-octeon/executive/octeon-model.c
··· 27 27 28 28 #include <asm/octeon/octeon.h> 29 29 30 + enum octeon_feature_bits __octeon_feature_bits __read_mostly; 31 + EXPORT_SYMBOL_GPL(__octeon_feature_bits); 32 + 30 33 /** 31 34 * Read a byte of fuse data 32 35 * @byte_addr: address to read ··· 105 102 suffix = "EXP"; 106 103 else 107 104 suffix = "NSP"; 105 + 106 + if (!fus_dat2.s.nocrypto) 107 + __octeon_feature_bits |= OCTEON_HAS_CRYPTO; 108 108 109 109 /* 110 110 * Assume pass number is encoded using <5:3><2:0>. Exceptions
+15 -2
arch/mips/include/asm/octeon/octeon-feature.h
··· 46 46 OCTEON_FEATURE_SAAD, 47 47 /* Does this Octeon support the ZIP offload engine? */ 48 48 OCTEON_FEATURE_ZIP, 49 - /* Does this Octeon support crypto acceleration using COP2? */ 50 - OCTEON_FEATURE_CRYPTO, 51 49 OCTEON_FEATURE_DORM_CRYPTO, 52 50 /* Does this Octeon support PCI express? */ 53 51 OCTEON_FEATURE_PCIE, ··· 83 85 OCTEON_FEATURE_CIU2, 84 86 OCTEON_MAX_FEATURE 85 87 }; 88 + 89 + enum octeon_feature_bits { 90 + OCTEON_HAS_CRYPTO = 0x0001, /* Crypto acceleration using COP2 */ 91 + }; 92 + extern enum octeon_feature_bits __octeon_feature_bits; 93 + 94 + /** 95 + * octeon_has_crypto() - Check if this OCTEON has crypto acceleration support. 96 + * 97 + * Returns: Non-zero if the feature exists. Zero if the feature does not exist. 98 + */ 99 + static inline int octeon_has_crypto(void) 100 + { 101 + return __octeon_feature_bits & OCTEON_HAS_CRYPTO; 102 + } 86 103 87 104 /** 88 105 * Determine if the current Octeon supports a specific feature. These
-5
arch/mips/include/asm/octeon/octeon.h
··· 44 44 extern const char *octeon_get_boot_argument(int arg); 45 45 extern void octeon_hal_setup_reserved32(void); 46 46 extern void octeon_user_io_init(void); 47 - struct octeon_cop2_state; 48 - extern unsigned long octeon_crypto_enable(struct octeon_cop2_state *state); 49 - extern void octeon_crypto_disable(struct octeon_cop2_state *state, 50 - unsigned long flags); 51 - extern asmlinkage void octeon_cop2_restore(struct octeon_cop2_state *task); 52 47 53 48 extern void octeon_init_cvmcount(void); 54 49 extern void octeon_setup_delays(void);
+1 -1
arch/sparc/crypto/aes_glue.c
··· 497 497 module_exit(aes_sparc64_mod_fini); 498 498 499 499 MODULE_LICENSE("GPL"); 500 - MODULE_DESCRIPTION("AES Secure Hash Algorithm, sparc64 aes opcode accelerated"); 500 + MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, sparc64 aes opcode accelerated"); 501 501 502 502 MODULE_ALIAS_CRYPTO("aes"); 503 503
+1 -1
arch/sparc/crypto/camellia_glue.c
··· 322 322 MODULE_LICENSE("GPL"); 323 323 MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated"); 324 324 325 - MODULE_ALIAS_CRYPTO("aes"); 325 + MODULE_ALIAS_CRYPTO("camellia"); 326 326 327 327 #include "crop_devid.c"
+1
arch/sparc/crypto/des_glue.c
··· 533 533 MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms, sparc64 des opcode accelerated"); 534 534 535 535 MODULE_ALIAS_CRYPTO("des"); 536 + MODULE_ALIAS_CRYPTO("des3_ede"); 536 537 537 538 #include "crop_devid.c"
+1 -1
arch/sparc/crypto/md5_glue.c
··· 183 183 module_exit(md5_sparc64_mod_fini); 184 184 185 185 MODULE_LICENSE("GPL"); 186 - MODULE_DESCRIPTION("MD5 Secure Hash Algorithm, sparc64 md5 opcode accelerated"); 186 + MODULE_DESCRIPTION("MD5 Message Digest Algorithm, sparc64 md5 opcode accelerated"); 187 187 188 188 MODULE_ALIAS_CRYPTO("md5"); 189 189
+177 -166
arch/x86/crypto/aesni-intel_asm.S
··· 32 32 #include <linux/linkage.h> 33 33 #include <asm/inst.h> 34 34 35 + /* 36 + * The following macros are used to move an (un)aligned 16 byte value to/from 37 + * an XMM register. This can done for either FP or integer values, for FP use 38 + * movaps (move aligned packed single) or integer use movdqa (move double quad 39 + * aligned). It doesn't make a performance difference which instruction is used 40 + * since Nehalem (original Core i7) was released. However, the movaps is a byte 41 + * shorter, so that is the one we'll use for now. (same for unaligned). 42 + */ 43 + #define MOVADQ movaps 44 + #define MOVUDQ movups 45 + 35 46 #ifdef __x86_64__ 47 + 36 48 .data 37 49 .align 16 38 50 .Lgf128mul_x_ble_mask: 39 51 .octa 0x00000000000000010000000000000087 40 - 41 52 POLY: .octa 0xC2000000000000000000000000000001 42 53 TWOONE: .octa 0x00000001000000000000000000000001 43 54 ··· 100 89 #define arg8 STACK_OFFSET+16(%r14) 101 90 #define arg9 STACK_OFFSET+24(%r14) 102 91 #define arg10 STACK_OFFSET+32(%r14) 92 + #define keysize 2*15*16(%arg1) 103 93 #endif 104 94 105 95 ··· 225 213 226 214 .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \ 227 215 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation 216 + MOVADQ SHUF_MASK(%rip), %xmm14 228 217 mov arg7, %r10 # %r10 = AAD 229 218 mov arg8, %r12 # %r12 = aadLen 230 219 mov %r12, %r11 231 220 pxor %xmm\i, %xmm\i 221 + 232 222 _get_AAD_loop\num_initial_blocks\operation: 233 223 movd (%r10), \TMP1 234 224 pslldq $12, \TMP1 ··· 239 225 add $4, %r10 240 226 sub $4, %r12 241 227 jne _get_AAD_loop\num_initial_blocks\operation 228 + 242 229 cmp $16, %r11 243 230 je _get_AAD_loop2_done\num_initial_blocks\operation 231 + 244 232 mov $16, %r12 245 233 _get_AAD_loop2\num_initial_blocks\operation: 246 234 psrldq $4, %xmm\i 247 235 sub $4, %r12 248 236 cmp %r11, %r12 249 237 jne _get_AAD_loop2\num_initial_blocks\operation 238 + 250 239 _get_AAD_loop2_done\num_initial_blocks\operation: 251 - movdqa SHUF_MASK(%rip), %xmm14 252 240 PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data 253 241 254 242 xor %r11, %r11 # initialise the data pointer offset as zero ··· 259 243 260 244 mov %arg5, %rax # %rax = *Y0 261 245 movdqu (%rax), \XMM0 # XMM0 = Y0 262 - movdqa SHUF_MASK(%rip), %xmm14 263 246 PSHUFB_XMM %xmm14, \XMM0 264 247 265 248 .if (\i == 5) || (\i == 6) || (\i == 7) 249 + MOVADQ ONE(%RIP),\TMP1 250 + MOVADQ (%arg1),\TMP2 266 251 .irpc index, \i_seq 267 - paddd ONE(%rip), \XMM0 # INCR Y0 252 + paddd \TMP1, \XMM0 # INCR Y0 268 253 movdqa \XMM0, %xmm\index 269 - movdqa SHUF_MASK(%rip), %xmm14 270 254 PSHUFB_XMM %xmm14, %xmm\index # perform a 16 byte swap 255 + pxor \TMP2, %xmm\index 256 + .endr 257 + lea 0x10(%arg1),%r10 258 + mov keysize,%eax 259 + shr $2,%eax # 128->4, 192->6, 256->8 260 + add $5,%eax # 128->9, 192->11, 256->13 271 261 262 + aes_loop_initial_dec\num_initial_blocks: 263 + MOVADQ (%r10),\TMP1 264 + .irpc index, \i_seq 265 + AESENC \TMP1, %xmm\index 272 266 .endr 267 + add $16,%r10 268 + sub $1,%eax 269 + jnz aes_loop_initial_dec\num_initial_blocks 270 + 271 + MOVADQ (%r10), \TMP1 273 272 .irpc index, \i_seq 274 - pxor 16*0(%arg1), %xmm\index 275 - .endr 276 - .irpc index, \i_seq 277 - movaps 0x10(%rdi), \TMP1 278 - AESENC \TMP1, %xmm\index # Round 1 279 - .endr 280 - .irpc index, \i_seq 281 - movaps 0x20(%arg1), \TMP1 282 - AESENC \TMP1, %xmm\index # Round 2 283 - .endr 284 - .irpc index, \i_seq 285 - movaps 0x30(%arg1), \TMP1 286 - AESENC \TMP1, %xmm\index # Round 2 287 - .endr 288 - .irpc index, \i_seq 289 - movaps 0x40(%arg1), \TMP1 290 - AESENC \TMP1, %xmm\index # Round 2 291 - .endr 292 - .irpc index, \i_seq 293 - movaps 0x50(%arg1), \TMP1 294 - AESENC \TMP1, %xmm\index # Round 2 295 - .endr 296 - .irpc index, \i_seq 297 - movaps 0x60(%arg1), \TMP1 298 - AESENC \TMP1, %xmm\index # Round 2 299 - .endr 300 - .irpc index, \i_seq 301 - movaps 0x70(%arg1), \TMP1 302 - AESENC \TMP1, %xmm\index # Round 2 303 - .endr 304 - .irpc index, \i_seq 305 - movaps 0x80(%arg1), \TMP1 306 - AESENC \TMP1, %xmm\index # Round 2 307 - .endr 308 - .irpc index, \i_seq 309 - movaps 0x90(%arg1), \TMP1 310 - AESENC \TMP1, %xmm\index # Round 2 311 - .endr 312 - .irpc index, \i_seq 313 - movaps 0xa0(%arg1), \TMP1 314 - AESENCLAST \TMP1, %xmm\index # Round 10 273 + AESENCLAST \TMP1, %xmm\index # Last Round 315 274 .endr 316 275 .irpc index, \i_seq 317 276 movdqu (%arg3 , %r11, 1), \TMP1 ··· 296 305 add $16, %r11 297 306 298 307 movdqa \TMP1, %xmm\index 299 - movdqa SHUF_MASK(%rip), %xmm14 300 308 PSHUFB_XMM %xmm14, %xmm\index 301 - 302 - # prepare plaintext/ciphertext for GHASH computation 309 + # prepare plaintext/ciphertext for GHASH computation 303 310 .endr 304 311 .endif 305 312 GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 ··· 327 338 * Precomputations for HashKey parallel with encryption of first 4 blocks. 328 339 * Haskey_i_k holds XORed values of the low and high parts of the Haskey_i 329 340 */ 330 - paddd ONE(%rip), \XMM0 # INCR Y0 331 - movdqa \XMM0, \XMM1 332 - movdqa SHUF_MASK(%rip), %xmm14 341 + MOVADQ ONE(%rip), \TMP1 342 + paddd \TMP1, \XMM0 # INCR Y0 343 + MOVADQ \XMM0, \XMM1 333 344 PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap 334 345 335 - paddd ONE(%rip), \XMM0 # INCR Y0 336 - movdqa \XMM0, \XMM2 337 - movdqa SHUF_MASK(%rip), %xmm14 346 + paddd \TMP1, \XMM0 # INCR Y0 347 + MOVADQ \XMM0, \XMM2 338 348 PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap 339 349 340 - paddd ONE(%rip), \XMM0 # INCR Y0 341 - movdqa \XMM0, \XMM3 342 - movdqa SHUF_MASK(%rip), %xmm14 350 + paddd \TMP1, \XMM0 # INCR Y0 351 + MOVADQ \XMM0, \XMM3 343 352 PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap 344 353 345 - paddd ONE(%rip), \XMM0 # INCR Y0 346 - movdqa \XMM0, \XMM4 347 - movdqa SHUF_MASK(%rip), %xmm14 354 + paddd \TMP1, \XMM0 # INCR Y0 355 + MOVADQ \XMM0, \XMM4 348 356 PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap 349 357 350 - pxor 16*0(%arg1), \XMM1 351 - pxor 16*0(%arg1), \XMM2 352 - pxor 16*0(%arg1), \XMM3 353 - pxor 16*0(%arg1), \XMM4 358 + MOVADQ 0(%arg1),\TMP1 359 + pxor \TMP1, \XMM1 360 + pxor \TMP1, \XMM2 361 + pxor \TMP1, \XMM3 362 + pxor \TMP1, \XMM4 354 363 movdqa \TMP3, \TMP5 355 364 pshufd $78, \TMP3, \TMP1 356 365 pxor \TMP3, \TMP1 ··· 386 399 pshufd $78, \TMP5, \TMP1 387 400 pxor \TMP5, \TMP1 388 401 movdqa \TMP1, HashKey_4_k(%rsp) 389 - movaps 0xa0(%arg1), \TMP2 402 + lea 0xa0(%arg1),%r10 403 + mov keysize,%eax 404 + shr $2,%eax # 128->4, 192->6, 256->8 405 + sub $4,%eax # 128->0, 192->2, 256->4 406 + jz aes_loop_pre_dec_done\num_initial_blocks 407 + 408 + aes_loop_pre_dec\num_initial_blocks: 409 + MOVADQ (%r10),\TMP2 410 + .irpc index, 1234 411 + AESENC \TMP2, %xmm\index 412 + .endr 413 + add $16,%r10 414 + sub $1,%eax 415 + jnz aes_loop_pre_dec\num_initial_blocks 416 + 417 + aes_loop_pre_dec_done\num_initial_blocks: 418 + MOVADQ (%r10), \TMP2 390 419 AESENCLAST \TMP2, \XMM1 391 420 AESENCLAST \TMP2, \XMM2 392 421 AESENCLAST \TMP2, \XMM3 ··· 424 421 movdqu \XMM4, 16*3(%arg2 , %r11 , 1) 425 422 movdqa \TMP1, \XMM4 426 423 add $64, %r11 427 - movdqa SHUF_MASK(%rip), %xmm14 428 424 PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap 429 425 pxor \XMMDst, \XMM1 430 426 # combine GHASHed value with the corresponding ciphertext 431 - movdqa SHUF_MASK(%rip), %xmm14 432 427 PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap 433 - movdqa SHUF_MASK(%rip), %xmm14 434 428 PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap 435 - movdqa SHUF_MASK(%rip), %xmm14 436 429 PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap 437 430 438 431 _initial_blocks_done\num_initial_blocks\operation: ··· 450 451 451 452 .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \ 452 453 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation 454 + MOVADQ SHUF_MASK(%rip), %xmm14 453 455 mov arg7, %r10 # %r10 = AAD 454 456 mov arg8, %r12 # %r12 = aadLen 455 457 mov %r12, %r11 ··· 472 472 cmp %r11, %r12 473 473 jne _get_AAD_loop2\num_initial_blocks\operation 474 474 _get_AAD_loop2_done\num_initial_blocks\operation: 475 - movdqa SHUF_MASK(%rip), %xmm14 476 475 PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data 477 476 478 477 xor %r11, %r11 # initialise the data pointer offset as zero ··· 480 481 481 482 mov %arg5, %rax # %rax = *Y0 482 483 movdqu (%rax), \XMM0 # XMM0 = Y0 483 - movdqa SHUF_MASK(%rip), %xmm14 484 484 PSHUFB_XMM %xmm14, \XMM0 485 485 486 486 .if (\i == 5) || (\i == 6) || (\i == 7) 487 - .irpc index, \i_seq 488 - paddd ONE(%rip), \XMM0 # INCR Y0 489 - movdqa \XMM0, %xmm\index 490 - movdqa SHUF_MASK(%rip), %xmm14 491 - PSHUFB_XMM %xmm14, %xmm\index # perform a 16 byte swap 492 487 493 - .endr 488 + MOVADQ ONE(%RIP),\TMP1 489 + MOVADQ 0(%arg1),\TMP2 494 490 .irpc index, \i_seq 495 - pxor 16*0(%arg1), %xmm\index 491 + paddd \TMP1, \XMM0 # INCR Y0 492 + MOVADQ \XMM0, %xmm\index 493 + PSHUFB_XMM %xmm14, %xmm\index # perform a 16 byte swap 494 + pxor \TMP2, %xmm\index 496 495 .endr 497 - .irpc index, \i_seq 498 - movaps 0x10(%rdi), \TMP1 499 - AESENC \TMP1, %xmm\index # Round 1 496 + lea 0x10(%arg1),%r10 497 + mov keysize,%eax 498 + shr $2,%eax # 128->4, 192->6, 256->8 499 + add $5,%eax # 128->9, 192->11, 256->13 500 + 501 + aes_loop_initial_enc\num_initial_blocks: 502 + MOVADQ (%r10),\TMP1 503 + .irpc index, \i_seq 504 + AESENC \TMP1, %xmm\index 500 505 .endr 506 + add $16,%r10 507 + sub $1,%eax 508 + jnz aes_loop_initial_enc\num_initial_blocks 509 + 510 + MOVADQ (%r10), \TMP1 501 511 .irpc index, \i_seq 502 - movaps 0x20(%arg1), \TMP1 503 - AESENC \TMP1, %xmm\index # Round 2 504 - .endr 505 - .irpc index, \i_seq 506 - movaps 0x30(%arg1), \TMP1 507 - AESENC \TMP1, %xmm\index # Round 2 508 - .endr 509 - .irpc index, \i_seq 510 - movaps 0x40(%arg1), \TMP1 511 - AESENC \TMP1, %xmm\index # Round 2 512 - .endr 513 - .irpc index, \i_seq 514 - movaps 0x50(%arg1), \TMP1 515 - AESENC \TMP1, %xmm\index # Round 2 516 - .endr 517 - .irpc index, \i_seq 518 - movaps 0x60(%arg1), \TMP1 519 - AESENC \TMP1, %xmm\index # Round 2 520 - .endr 521 - .irpc index, \i_seq 522 - movaps 0x70(%arg1), \TMP1 523 - AESENC \TMP1, %xmm\index # Round 2 524 - .endr 525 - .irpc index, \i_seq 526 - movaps 0x80(%arg1), \TMP1 527 - AESENC \TMP1, %xmm\index # Round 2 528 - .endr 529 - .irpc index, \i_seq 530 - movaps 0x90(%arg1), \TMP1 531 - AESENC \TMP1, %xmm\index # Round 2 532 - .endr 533 - .irpc index, \i_seq 534 - movaps 0xa0(%arg1), \TMP1 535 - AESENCLAST \TMP1, %xmm\index # Round 10 512 + AESENCLAST \TMP1, %xmm\index # Last Round 536 513 .endr 537 514 .irpc index, \i_seq 538 515 movdqu (%arg3 , %r11, 1), \TMP1 ··· 516 541 movdqu %xmm\index, (%arg2 , %r11, 1) 517 542 # write back plaintext/ciphertext for num_initial_blocks 518 543 add $16, %r11 519 - 520 - movdqa SHUF_MASK(%rip), %xmm14 521 544 PSHUFB_XMM %xmm14, %xmm\index 522 545 523 546 # prepare plaintext/ciphertext for GHASH computation ··· 548 575 * Precomputations for HashKey parallel with encryption of first 4 blocks. 549 576 * Haskey_i_k holds XORed values of the low and high parts of the Haskey_i 550 577 */ 551 - paddd ONE(%rip), \XMM0 # INCR Y0 552 - movdqa \XMM0, \XMM1 553 - movdqa SHUF_MASK(%rip), %xmm14 578 + MOVADQ ONE(%RIP),\TMP1 579 + paddd \TMP1, \XMM0 # INCR Y0 580 + MOVADQ \XMM0, \XMM1 554 581 PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap 555 582 556 - paddd ONE(%rip), \XMM0 # INCR Y0 557 - movdqa \XMM0, \XMM2 558 - movdqa SHUF_MASK(%rip), %xmm14 583 + paddd \TMP1, \XMM0 # INCR Y0 584 + MOVADQ \XMM0, \XMM2 559 585 PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap 560 586 561 - paddd ONE(%rip), \XMM0 # INCR Y0 562 - movdqa \XMM0, \XMM3 563 - movdqa SHUF_MASK(%rip), %xmm14 587 + paddd \TMP1, \XMM0 # INCR Y0 588 + MOVADQ \XMM0, \XMM3 564 589 PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap 565 590 566 - paddd ONE(%rip), \XMM0 # INCR Y0 567 - movdqa \XMM0, \XMM4 568 - movdqa SHUF_MASK(%rip), %xmm14 591 + paddd \TMP1, \XMM0 # INCR Y0 592 + MOVADQ \XMM0, \XMM4 569 593 PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap 570 594 571 - pxor 16*0(%arg1), \XMM1 572 - pxor 16*0(%arg1), \XMM2 573 - pxor 16*0(%arg1), \XMM3 574 - pxor 16*0(%arg1), \XMM4 595 + MOVADQ 0(%arg1),\TMP1 596 + pxor \TMP1, \XMM1 597 + pxor \TMP1, \XMM2 598 + pxor \TMP1, \XMM3 599 + pxor \TMP1, \XMM4 575 600 movdqa \TMP3, \TMP5 576 601 pshufd $78, \TMP3, \TMP1 577 602 pxor \TMP3, \TMP1 ··· 607 636 pshufd $78, \TMP5, \TMP1 608 637 pxor \TMP5, \TMP1 609 638 movdqa \TMP1, HashKey_4_k(%rsp) 610 - movaps 0xa0(%arg1), \TMP2 639 + lea 0xa0(%arg1),%r10 640 + mov keysize,%eax 641 + shr $2,%eax # 128->4, 192->6, 256->8 642 + sub $4,%eax # 128->0, 192->2, 256->4 643 + jz aes_loop_pre_enc_done\num_initial_blocks 644 + 645 + aes_loop_pre_enc\num_initial_blocks: 646 + MOVADQ (%r10),\TMP2 647 + .irpc index, 1234 648 + AESENC \TMP2, %xmm\index 649 + .endr 650 + add $16,%r10 651 + sub $1,%eax 652 + jnz aes_loop_pre_enc\num_initial_blocks 653 + 654 + aes_loop_pre_enc_done\num_initial_blocks: 655 + MOVADQ (%r10), \TMP2 611 656 AESENCLAST \TMP2, \XMM1 612 657 AESENCLAST \TMP2, \XMM2 613 658 AESENCLAST \TMP2, \XMM3 ··· 642 655 movdqu \XMM4, 16*3(%arg2 , %r11 , 1) 643 656 644 657 add $64, %r11 645 - movdqa SHUF_MASK(%rip), %xmm14 646 658 PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap 647 659 pxor \XMMDst, \XMM1 648 660 # combine GHASHed value with the corresponding ciphertext 649 - movdqa SHUF_MASK(%rip), %xmm14 650 661 PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap 651 - movdqa SHUF_MASK(%rip), %xmm14 652 662 PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap 653 - movdqa SHUF_MASK(%rip), %xmm14 654 663 PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap 655 664 656 665 _initial_blocks_done\num_initial_blocks\operation: ··· 777 794 AESENC \TMP3, \XMM3 778 795 AESENC \TMP3, \XMM4 779 796 PCLMULQDQ 0x00, \TMP5, \XMM8 # XMM8 = a0*b0 780 - movaps 0xa0(%arg1), \TMP3 797 + lea 0xa0(%arg1),%r10 798 + mov keysize,%eax 799 + shr $2,%eax # 128->4, 192->6, 256->8 800 + sub $4,%eax # 128->0, 192->2, 256->4 801 + jz aes_loop_par_enc_done 802 + 803 + aes_loop_par_enc: 804 + MOVADQ (%r10),\TMP3 805 + .irpc index, 1234 806 + AESENC \TMP3, %xmm\index 807 + .endr 808 + add $16,%r10 809 + sub $1,%eax 810 + jnz aes_loop_par_enc 811 + 812 + aes_loop_par_enc_done: 813 + MOVADQ (%r10), \TMP3 781 814 AESENCLAST \TMP3, \XMM1 # Round 10 782 815 AESENCLAST \TMP3, \XMM2 783 816 AESENCLAST \TMP3, \XMM3 ··· 985 986 AESENC \TMP3, \XMM3 986 987 AESENC \TMP3, \XMM4 987 988 PCLMULQDQ 0x00, \TMP5, \XMM8 # XMM8 = a0*b0 988 - movaps 0xa0(%arg1), \TMP3 989 - AESENCLAST \TMP3, \XMM1 # Round 10 989 + lea 0xa0(%arg1),%r10 990 + mov keysize,%eax 991 + shr $2,%eax # 128->4, 192->6, 256->8 992 + sub $4,%eax # 128->0, 192->2, 256->4 993 + jz aes_loop_par_dec_done 994 + 995 + aes_loop_par_dec: 996 + MOVADQ (%r10),\TMP3 997 + .irpc index, 1234 998 + AESENC \TMP3, %xmm\index 999 + .endr 1000 + add $16,%r10 1001 + sub $1,%eax 1002 + jnz aes_loop_par_dec 1003 + 1004 + aes_loop_par_dec_done: 1005 + MOVADQ (%r10), \TMP3 1006 + AESENCLAST \TMP3, \XMM1 # last round 990 1007 AESENCLAST \TMP3, \XMM2 991 1008 AESENCLAST \TMP3, \XMM3 992 1009 AESENCLAST \TMP3, \XMM4 ··· 1170 1155 pxor \TMP6, \XMMDst # reduced result is in XMMDst 1171 1156 .endm 1172 1157 1173 - /* Encryption of a single block done*/ 1158 + 1159 + /* Encryption of a single block 1160 + * uses eax & r10 1161 + */ 1162 + 1174 1163 .macro ENCRYPT_SINGLE_BLOCK XMM0 TMP1 1175 1164 1176 - pxor (%arg1), \XMM0 1177 - movaps 16(%arg1), \TMP1 1178 - AESENC \TMP1, \XMM0 1179 - movaps 32(%arg1), \TMP1 1180 - AESENC \TMP1, \XMM0 1181 - movaps 48(%arg1), \TMP1 1182 - AESENC \TMP1, \XMM0 1183 - movaps 64(%arg1), \TMP1 1184 - AESENC \TMP1, \XMM0 1185 - movaps 80(%arg1), \TMP1 1186 - AESENC \TMP1, \XMM0 1187 - movaps 96(%arg1), \TMP1 1188 - AESENC \TMP1, \XMM0 1189 - movaps 112(%arg1), \TMP1 1190 - AESENC \TMP1, \XMM0 1191 - movaps 128(%arg1), \TMP1 1192 - AESENC \TMP1, \XMM0 1193 - movaps 144(%arg1), \TMP1 1194 - AESENC \TMP1, \XMM0 1195 - movaps 160(%arg1), \TMP1 1196 - AESENCLAST \TMP1, \XMM0 1165 + pxor (%arg1), \XMM0 1166 + mov keysize,%eax 1167 + shr $2,%eax # 128->4, 192->6, 256->8 1168 + add $5,%eax # 128->9, 192->11, 256->13 1169 + lea 16(%arg1), %r10 # get first expanded key address 1170 + 1171 + _esb_loop_\@: 1172 + MOVADQ (%r10),\TMP1 1173 + AESENC \TMP1,\XMM0 1174 + add $16,%r10 1175 + sub $1,%eax 1176 + jnz _esb_loop_\@ 1177 + 1178 + MOVADQ (%r10),\TMP1 1179 + AESENCLAST \TMP1,\XMM0 1197 1180 .endm 1198 - 1199 - 1200 1181 /***************************************************************************** 1201 1182 * void aesni_gcm_dec(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary. 1202 1183 * u8 *out, // Plaintext output. Encrypt in-place is allowed.
+28 -6
arch/x86/crypto/aesni-intel_glue.c
··· 43 43 #include <asm/crypto/glue_helper.h> 44 44 #endif 45 45 46 + 46 47 /* This data is stored at the end of the crypto_tfm struct. 47 48 * It's a type of per "session" data storage location. 48 49 * This needs to be 16 byte aligned. ··· 183 182 u8 *hash_subkey, const u8 *aad, unsigned long aad_len, 184 183 u8 *auth_tag, unsigned long auth_tag_len) 185 184 { 186 - if (plaintext_len < AVX_GEN2_OPTSIZE) { 185 + struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx; 186 + if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){ 187 187 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad, 188 188 aad_len, auth_tag, auth_tag_len); 189 189 } else { ··· 199 197 u8 *hash_subkey, const u8 *aad, unsigned long aad_len, 200 198 u8 *auth_tag, unsigned long auth_tag_len) 201 199 { 202 - if (ciphertext_len < AVX_GEN2_OPTSIZE) { 200 + struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx; 201 + if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) { 203 202 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad, 204 203 aad_len, auth_tag, auth_tag_len); 205 204 } else { ··· 234 231 u8 *hash_subkey, const u8 *aad, unsigned long aad_len, 235 232 u8 *auth_tag, unsigned long auth_tag_len) 236 233 { 237 - if (plaintext_len < AVX_GEN2_OPTSIZE) { 234 + struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx; 235 + if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) { 238 236 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad, 239 237 aad_len, auth_tag, auth_tag_len); 240 238 } else if (plaintext_len < AVX_GEN4_OPTSIZE) { ··· 254 250 u8 *hash_subkey, const u8 *aad, unsigned long aad_len, 255 251 u8 *auth_tag, unsigned long auth_tag_len) 256 252 { 257 - if (ciphertext_len < AVX_GEN2_OPTSIZE) { 253 + struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx; 254 + if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) { 258 255 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, 259 256 aad, aad_len, auth_tag, auth_tag_len); 260 257 } else if (ciphertext_len < AVX_GEN4_OPTSIZE) { ··· 516 511 kernel_fpu_begin(); 517 512 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { 518 513 aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr, 519 - nbytes & AES_BLOCK_MASK, walk.iv); 514 + nbytes & AES_BLOCK_MASK, walk.iv); 520 515 nbytes &= AES_BLOCK_SIZE - 1; 521 516 err = blkcipher_walk_done(desc, &walk, nbytes); 522 517 } ··· 907 902 } 908 903 /*Account for 4 byte nonce at the end.*/ 909 904 key_len -= 4; 910 - if (key_len != AES_KEYSIZE_128) { 905 + if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 && 906 + key_len != AES_KEYSIZE_256) { 911 907 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 912 908 return -EINVAL; 913 909 } ··· 1019 1013 __be32 counter = cpu_to_be32(1); 1020 1014 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1021 1015 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); 1016 + u32 key_len = ctx->aes_key_expanded.key_length; 1022 1017 void *aes_ctx = &(ctx->aes_key_expanded); 1023 1018 unsigned long auth_tag_len = crypto_aead_authsize(tfm); 1024 1019 u8 iv_tab[16+AESNI_ALIGN]; ··· 1034 1027 /* to 8 or 12 bytes */ 1035 1028 if (unlikely(req->assoclen != 8 && req->assoclen != 12)) 1036 1029 return -EINVAL; 1030 + if (unlikely(auth_tag_len != 8 && auth_tag_len != 12 && auth_tag_len != 16)) 1031 + return -EINVAL; 1032 + if (unlikely(key_len != AES_KEYSIZE_128 && 1033 + key_len != AES_KEYSIZE_192 && 1034 + key_len != AES_KEYSIZE_256)) 1035 + return -EINVAL; 1036 + 1037 1037 /* IV below built */ 1038 1038 for (i = 0; i < 4; i++) 1039 1039 *(iv+i) = ctx->nonce[i]; ··· 1105 1091 int retval = 0; 1106 1092 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1107 1093 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); 1094 + u32 key_len = ctx->aes_key_expanded.key_length; 1108 1095 void *aes_ctx = &(ctx->aes_key_expanded); 1109 1096 unsigned long auth_tag_len = crypto_aead_authsize(tfm); 1110 1097 u8 iv_and_authTag[32+AESNI_ALIGN]; ··· 1119 1104 if (unlikely((req->cryptlen < auth_tag_len) || 1120 1105 (req->assoclen != 8 && req->assoclen != 12))) 1121 1106 return -EINVAL; 1107 + if (unlikely(auth_tag_len != 8 && auth_tag_len != 12 && auth_tag_len != 16)) 1108 + return -EINVAL; 1109 + if (unlikely(key_len != AES_KEYSIZE_128 && 1110 + key_len != AES_KEYSIZE_192 && 1111 + key_len != AES_KEYSIZE_256)) 1112 + return -EINVAL; 1113 + 1122 1114 /* Assuming we are supporting rfc4106 64-bit extended */ 1123 1115 /* sequence numbers We need to have the AAD length */ 1124 1116 /* equal to 8 or 12 bytes */
-2
arch/x86/crypto/des3_ede_glue.c
··· 504 504 MODULE_DESCRIPTION("Triple DES EDE Cipher Algorithm, asm optimized"); 505 505 MODULE_ALIAS_CRYPTO("des3_ede"); 506 506 MODULE_ALIAS_CRYPTO("des3_ede-asm"); 507 - MODULE_ALIAS_CRYPTO("des"); 508 - MODULE_ALIAS_CRYPTO("des-asm"); 509 507 MODULE_AUTHOR("Jussi Kivilinna <jussi.kivilinna@iki.fi>");
+18
crypto/Kconfig
··· 427 427 help 428 428 MD5 message digest algorithm (RFC1321). 429 429 430 + config CRYPTO_MD5_OCTEON 431 + tristate "MD5 digest algorithm (OCTEON)" 432 + depends on CPU_CAVIUM_OCTEON 433 + select CRYPTO_MD5 434 + select CRYPTO_HASH 435 + help 436 + MD5 message digest algorithm (RFC1321) implemented 437 + using OCTEON crypto instructions, when available. 438 + 430 439 config CRYPTO_MD5_SPARC64 431 440 tristate "MD5 digest algorithm (SPARC64)" 432 441 depends on SPARC64 ··· 1513 1504 help 1514 1505 This option enables the user-spaces interface for symmetric 1515 1506 key cipher algorithms. 1507 + 1508 + config CRYPTO_USER_API_RNG 1509 + tristate "User-space interface for random number generator algorithms" 1510 + depends on NET 1511 + select CRYPTO_RNG 1512 + select CRYPTO_USER_API 1513 + help 1514 + This option enables the user-spaces interface for random 1515 + number generator algorithms. 1516 1516 1517 1517 config CRYPTO_HASH_INFO 1518 1518 bool
+1
crypto/Makefile
··· 99 99 obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o 100 100 obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o 101 101 obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o 102 + obj-$(CONFIG_CRYPTO_USER_API_RNG) += algif_rng.o 102 103 103 104 # 104 105 # generic algorithms and the async_tx api
+5 -2
crypto/ablkcipher.c
··· 69 69 static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len) 70 70 { 71 71 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); 72 + 72 73 return max(start, end_page); 73 74 } 74 75 ··· 87 86 if (n == len_this_page) 88 87 break; 89 88 n -= len_this_page; 90 - scatterwalk_start(&walk->out, scatterwalk_sg_next(walk->out.sg)); 89 + scatterwalk_start(&walk->out, sg_next(walk->out.sg)); 91 90 } 92 91 93 92 return bsize; ··· 285 284 walk->iv = req->info; 286 285 if (unlikely(((unsigned long)walk->iv & alignmask))) { 287 286 int err = ablkcipher_copy_iv(walk, tfm, alignmask); 287 + 288 288 if (err) 289 289 return err; 290 290 } ··· 591 589 if (IS_ERR(inst)) 592 590 goto put_tmpl; 593 591 594 - if ((err = crypto_register_instance(tmpl, inst))) { 592 + err = crypto_register_instance(tmpl, inst); 593 + if (err) { 595 594 tmpl->free(inst); 596 595 goto put_tmpl; 597 596 }
+2 -1
crypto/aead.c
··· 448 448 if (IS_ERR(inst)) 449 449 goto put_tmpl; 450 450 451 - if ((err = crypto_register_instance(tmpl, inst))) { 451 + err = crypto_register_instance(tmpl, inst); 452 + if (err) { 452 453 tmpl->free(inst); 453 454 goto put_tmpl; 454 455 }
+9 -2
crypto/af_alg.c
··· 188 188 err = type->setkey(ask->private, key, keylen); 189 189 190 190 out: 191 - sock_kfree_s(sk, key, keylen); 191 + sock_kzfree_s(sk, key, keylen); 192 192 193 193 return err; 194 194 } ··· 215 215 goto unlock; 216 216 217 217 err = alg_setkey(sk, optval, optlen); 218 + break; 219 + case ALG_SET_AEAD_AUTHSIZE: 220 + if (sock->state == SS_CONNECTED) 221 + goto unlock; 222 + if (!type->setauthsize) 223 + goto unlock; 224 + err = type->setauthsize(ask->private, optlen); 218 225 } 219 226 220 227 unlock: ··· 394 387 if (cmsg->cmsg_level != SOL_ALG) 395 388 continue; 396 389 397 - switch(cmsg->cmsg_type) { 390 + switch (cmsg->cmsg_type) { 398 391 case ALG_SET_IV: 399 392 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*con->iv))) 400 393 return -EINVAL;
+2 -1
crypto/ahash.c
··· 55 55 56 56 if (offset & alignmask) { 57 57 unsigned int unaligned = alignmask + 1 - (offset & alignmask); 58 + 58 59 if (nbytes > unaligned) 59 60 nbytes = unaligned; 60 61 } ··· 121 120 if (!walk->total) 122 121 return 0; 123 122 124 - walk->sg = scatterwalk_sg_next(walk->sg); 123 + walk->sg = sg_next(walk->sg); 125 124 126 125 return hash_walk_new_entry(walk); 127 126 }
+1
crypto/algapi.c
··· 473 473 list = &tmpl->instances; 474 474 hlist_for_each_entry(inst, list, list) { 475 475 int err = crypto_remove_alg(&inst->alg, &users); 476 + 476 477 BUG_ON(err); 477 478 } 478 479
+192
crypto/algif_rng.c
··· 1 + /* 2 + * algif_rng: User-space interface for random number generators 3 + * 4 + * This file provides the user-space API for random number generators. 5 + * 6 + * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de> 7 + * 8 + * Redistribution and use in source and binary forms, with or without 9 + * modification, are permitted provided that the following conditions 10 + * are met: 11 + * 1. Redistributions of source code must retain the above copyright 12 + * notice, and the entire permission notice in its entirety, 13 + * including the disclaimer of warranties. 14 + * 2. Redistributions in binary form must reproduce the above copyright 15 + * notice, this list of conditions and the following disclaimer in the 16 + * documentation and/or other materials provided with the distribution. 17 + * 3. The name of the author may not be used to endorse or promote 18 + * products derived from this software without specific prior 19 + * written permission. 20 + * 21 + * ALTERNATIVELY, this product may be distributed under the terms of 22 + * the GNU General Public License, in which case the provisions of the GPL2 23 + * are required INSTEAD OF the above restrictions. (This clause is 24 + * necessary due to a potential bad interaction between the GPL and 25 + * the restrictions contained in a BSD-style copyright.) 26 + * 27 + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 28 + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 29 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF 30 + * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE 31 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT 33 + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 34 + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 35 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 36 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 37 + * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH 38 + * DAMAGE. 39 + */ 40 + 41 + #include <linux/module.h> 42 + #include <crypto/rng.h> 43 + #include <linux/random.h> 44 + #include <crypto/if_alg.h> 45 + #include <linux/net.h> 46 + #include <net/sock.h> 47 + 48 + MODULE_LICENSE("GPL"); 49 + MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>"); 50 + MODULE_DESCRIPTION("User-space interface for random number generators"); 51 + 52 + struct rng_ctx { 53 + #define MAXSIZE 128 54 + unsigned int len; 55 + struct crypto_rng *drng; 56 + }; 57 + 58 + static int rng_recvmsg(struct kiocb *unused, struct socket *sock, 59 + struct msghdr *msg, size_t len, int flags) 60 + { 61 + struct sock *sk = sock->sk; 62 + struct alg_sock *ask = alg_sk(sk); 63 + struct rng_ctx *ctx = ask->private; 64 + int err = -EFAULT; 65 + int genlen = 0; 66 + u8 result[MAXSIZE]; 67 + 68 + if (len == 0) 69 + return 0; 70 + if (len > MAXSIZE) 71 + len = MAXSIZE; 72 + 73 + /* 74 + * although not strictly needed, this is a precaution against coding 75 + * errors 76 + */ 77 + memset(result, 0, len); 78 + 79 + /* 80 + * The enforcement of a proper seeding of an RNG is done within an 81 + * RNG implementation. Some RNGs (DRBG, krng) do not need specific 82 + * seeding as they automatically seed. The X9.31 DRNG will return 83 + * an error if it was not seeded properly. 84 + */ 85 + genlen = crypto_rng_get_bytes(ctx->drng, result, len); 86 + if (genlen < 0) 87 + return genlen; 88 + 89 + err = memcpy_to_msg(msg, result, len); 90 + memzero_explicit(result, genlen); 91 + 92 + return err ? err : len; 93 + } 94 + 95 + static struct proto_ops algif_rng_ops = { 96 + .family = PF_ALG, 97 + 98 + .connect = sock_no_connect, 99 + .socketpair = sock_no_socketpair, 100 + .getname = sock_no_getname, 101 + .ioctl = sock_no_ioctl, 102 + .listen = sock_no_listen, 103 + .shutdown = sock_no_shutdown, 104 + .getsockopt = sock_no_getsockopt, 105 + .mmap = sock_no_mmap, 106 + .bind = sock_no_bind, 107 + .accept = sock_no_accept, 108 + .setsockopt = sock_no_setsockopt, 109 + .poll = sock_no_poll, 110 + .sendmsg = sock_no_sendmsg, 111 + .sendpage = sock_no_sendpage, 112 + 113 + .release = af_alg_release, 114 + .recvmsg = rng_recvmsg, 115 + }; 116 + 117 + static void *rng_bind(const char *name, u32 type, u32 mask) 118 + { 119 + return crypto_alloc_rng(name, type, mask); 120 + } 121 + 122 + static void rng_release(void *private) 123 + { 124 + crypto_free_rng(private); 125 + } 126 + 127 + static void rng_sock_destruct(struct sock *sk) 128 + { 129 + struct alg_sock *ask = alg_sk(sk); 130 + struct rng_ctx *ctx = ask->private; 131 + 132 + sock_kfree_s(sk, ctx, ctx->len); 133 + af_alg_release_parent(sk); 134 + } 135 + 136 + static int rng_accept_parent(void *private, struct sock *sk) 137 + { 138 + struct rng_ctx *ctx; 139 + struct alg_sock *ask = alg_sk(sk); 140 + unsigned int len = sizeof(*ctx); 141 + 142 + ctx = sock_kmalloc(sk, len, GFP_KERNEL); 143 + if (!ctx) 144 + return -ENOMEM; 145 + 146 + ctx->len = len; 147 + 148 + /* 149 + * No seeding done at that point -- if multiple accepts are 150 + * done on one RNG instance, each resulting FD points to the same 151 + * state of the RNG. 152 + */ 153 + 154 + ctx->drng = private; 155 + ask->private = ctx; 156 + sk->sk_destruct = rng_sock_destruct; 157 + 158 + return 0; 159 + } 160 + 161 + static int rng_setkey(void *private, const u8 *seed, unsigned int seedlen) 162 + { 163 + /* 164 + * Check whether seedlen is of sufficient size is done in RNG 165 + * implementations. 166 + */ 167 + return crypto_rng_reset(private, (u8 *)seed, seedlen); 168 + } 169 + 170 + static const struct af_alg_type algif_type_rng = { 171 + .bind = rng_bind, 172 + .release = rng_release, 173 + .accept = rng_accept_parent, 174 + .setkey = rng_setkey, 175 + .ops = &algif_rng_ops, 176 + .name = "rng", 177 + .owner = THIS_MODULE 178 + }; 179 + 180 + static int __init rng_init(void) 181 + { 182 + return af_alg_register_type(&algif_type_rng); 183 + } 184 + 185 + static void __exit rng_exit(void) 186 + { 187 + int err = af_alg_unregister_type(&algif_type_rng); 188 + BUG_ON(err); 189 + } 190 + 191 + module_init(rng_init); 192 + module_exit(rng_exit);
+8
crypto/algif_skcipher.c
··· 330 330 331 331 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); 332 332 sg = sgl->sg; 333 + sg_unmark_end(sg + sgl->cur); 333 334 do { 334 335 i = sgl->cur; 335 336 plen = min_t(int, len, PAGE_SIZE); ··· 355 354 size -= plen; 356 355 sgl->cur++; 357 356 } while (len && sgl->cur < MAX_SGL_ENTS); 357 + 358 + if (!size) 359 + sg_mark_end(sg + sgl->cur - 1); 358 360 359 361 ctx->merge = plen & (PAGE_SIZE - 1); 360 362 } ··· 405 401 ctx->merge = 0; 406 402 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); 407 403 404 + if (sgl->cur) 405 + sg_unmark_end(sgl->sg + sgl->cur - 1); 406 + 407 + sg_mark_end(sgl->sg + sgl->cur); 408 408 get_page(page); 409 409 sg_set_page(sgl->sg + sgl->cur, page, size, offset); 410 410 sgl->cur++;
+3 -2
crypto/cts.c
··· 290 290 if (!is_power_of_2(alg->cra_blocksize)) 291 291 goto out_put_alg; 292 292 293 + if (strncmp(alg->cra_name, "cbc(", 4)) 294 + goto out_put_alg; 295 + 293 296 inst = crypto_alloc_instance("cts", alg); 294 297 if (IS_ERR(inst)) 295 298 goto out_put_alg; ··· 309 306 inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; 310 307 inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize; 311 308 inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize; 312 - 313 - inst->alg.cra_blkcipher.geniv = "seqiv"; 314 309 315 310 inst->alg.cra_ctxsize = sizeof(struct crypto_cts_ctx); 316 311
+13 -21
crypto/drbg.c
··· 98 98 */ 99 99 100 100 #include <crypto/drbg.h> 101 - #include <linux/string.h> 102 101 103 102 /*************************************************************** 104 103 * Backend cipher definitions available to DRBG ··· 222 223 * function. Thus, the function implicitly knows the size of the 223 224 * buffer. 224 225 * 225 - * The FIPS test can be called in an endless loop until it returns 226 - * true. Although the code looks like a potential for a deadlock, it 227 - * is not the case, because returning a false cannot mathematically 228 - * occur (except once when a reseed took place and the updated state 229 - * would is now set up such that the generation of new value returns 230 - * an identical one -- this is most unlikely and would happen only once). 231 - * Thus, if this function repeatedly returns false and thus would cause 232 - * a deadlock, the integrity of the entire kernel is lost. 233 - * 234 226 * @drbg DRBG handle 235 227 * @buf output buffer of random data to be checked 236 228 * ··· 248 258 return false; 249 259 } 250 260 ret = memcmp(drbg->prev, buf, drbg_blocklen(drbg)); 261 + if (!ret) 262 + panic("DRBG continuous self test failed\n"); 251 263 memcpy(drbg->prev, buf, drbg_blocklen(drbg)); 252 264 /* the test shall pass when the two compared values are not equal */ 253 265 return ret != 0; ··· 490 498 ret = 0; 491 499 492 500 out: 493 - memzero_explicit(iv, drbg_blocklen(drbg)); 494 - memzero_explicit(temp, drbg_statelen(drbg)); 495 - memzero_explicit(pad, drbg_blocklen(drbg)); 501 + memset(iv, 0, drbg_blocklen(drbg)); 502 + memset(temp, 0, drbg_statelen(drbg)); 503 + memset(pad, 0, drbg_blocklen(drbg)); 496 504 return ret; 497 505 } 498 506 ··· 566 574 ret = 0; 567 575 568 576 out: 569 - memzero_explicit(temp, drbg_statelen(drbg) + drbg_blocklen(drbg)); 577 + memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg)); 570 578 if (2 != reseed) 571 - memzero_explicit(df_data, drbg_statelen(drbg)); 579 + memset(df_data, 0, drbg_statelen(drbg)); 572 580 return ret; 573 581 } 574 582 ··· 626 634 len = ret; 627 635 628 636 out: 629 - memzero_explicit(drbg->scratchpad, drbg_blocklen(drbg)); 637 + memset(drbg->scratchpad, 0, drbg_blocklen(drbg)); 630 638 return len; 631 639 } 632 640 ··· 864 872 } 865 873 866 874 out: 867 - memzero_explicit(tmp, drbg_blocklen(drbg)); 875 + memset(tmp, 0, drbg_blocklen(drbg)); 868 876 return ret; 869 877 } 870 878 ··· 908 916 ret = drbg_hash_df(drbg, drbg->C, drbg_statelen(drbg), &datalist2); 909 917 910 918 out: 911 - memzero_explicit(drbg->scratchpad, drbg_statelen(drbg)); 919 + memset(drbg->scratchpad, 0, drbg_statelen(drbg)); 912 920 return ret; 913 921 } 914 922 ··· 943 951 drbg->scratchpad, drbg_blocklen(drbg)); 944 952 945 953 out: 946 - memzero_explicit(drbg->scratchpad, drbg_blocklen(drbg)); 954 + memset(drbg->scratchpad, 0, drbg_blocklen(drbg)); 947 955 return ret; 948 956 } 949 957 ··· 990 998 } 991 999 992 1000 out: 993 - memzero_explicit(drbg->scratchpad, 1001 + memset(drbg->scratchpad, 0, 994 1002 (drbg_statelen(drbg) + drbg_blocklen(drbg))); 995 1003 return len; 996 1004 } ··· 1039 1047 drbg_add_buf(drbg->V, drbg_statelen(drbg), u.req, 8); 1040 1048 1041 1049 out: 1042 - memzero_explicit(drbg->scratchpad, drbg_blocklen(drbg)); 1050 + memset(drbg->scratchpad, 0, drbg_blocklen(drbg)); 1043 1051 return len; 1044 1052 } 1045 1053
+3 -3
crypto/scatterwalk.c
··· 62 62 walk->offset += PAGE_SIZE - 1; 63 63 walk->offset &= PAGE_MASK; 64 64 if (walk->offset >= walk->sg->offset + walk->sg->length) 65 - scatterwalk_start(walk, scatterwalk_sg_next(walk->sg)); 65 + scatterwalk_start(walk, sg_next(walk->sg)); 66 66 } 67 67 } 68 68 ··· 116 116 break; 117 117 118 118 offset += sg->length; 119 - sg = scatterwalk_sg_next(sg); 119 + sg = sg_next(sg); 120 120 } 121 121 122 122 scatterwalk_advance(&walk, start - offset); ··· 136 136 do { 137 137 offset += sg->length; 138 138 n++; 139 - sg = scatterwalk_sg_next(sg); 139 + sg = sg_next(sg); 140 140 141 141 /* num_bytes is too large */ 142 142 if (unlikely(!sg && (num_bytes < offset)))
+12
crypto/seqiv.c
··· 267 267 if (IS_ERR(inst)) 268 268 goto out; 269 269 270 + if (inst->alg.cra_ablkcipher.ivsize < sizeof(u64)) { 271 + skcipher_geniv_free(inst); 272 + inst = ERR_PTR(-EINVAL); 273 + goto out; 274 + } 275 + 270 276 inst->alg.cra_ablkcipher.givencrypt = seqiv_givencrypt_first; 271 277 272 278 inst->alg.cra_init = seqiv_init; ··· 292 286 293 287 if (IS_ERR(inst)) 294 288 goto out; 289 + 290 + if (inst->alg.cra_aead.ivsize < sizeof(u64)) { 291 + aead_geniv_free(inst); 292 + inst = ERR_PTR(-EINVAL); 293 + goto out; 294 + } 295 295 296 296 inst->alg.cra_aead.givencrypt = seqiv_aead_givencrypt_first; 297 297
+20 -17
crypto/tcrypt.c
··· 250 250 int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE; 251 251 int k, rem; 252 252 253 - np = (np > XBUFSIZE) ? XBUFSIZE : np; 254 - rem = buflen % PAGE_SIZE; 255 253 if (np > XBUFSIZE) { 256 254 rem = PAGE_SIZE; 257 255 np = XBUFSIZE; 256 + } else { 257 + rem = buflen % PAGE_SIZE; 258 258 } 259 + 259 260 sg_init_table(sg, np); 260 - for (k = 0; k < np; ++k) { 261 - if (k == (np-1)) 262 - sg_set_buf(&sg[k], xbuf[k], rem); 263 - else 264 - sg_set_buf(&sg[k], xbuf[k], PAGE_SIZE); 265 - } 261 + np--; 262 + for (k = 0; k < np; k++) 263 + sg_set_buf(&sg[k], xbuf[k], PAGE_SIZE); 264 + 265 + sg_set_buf(&sg[k], xbuf[k], rem); 266 266 } 267 267 268 268 static void test_aead_speed(const char *algo, int enc, unsigned int secs, ··· 280 280 struct scatterlist *sgout; 281 281 const char *e; 282 282 void *assoc; 283 - char iv[MAX_IVLEN]; 283 + char *iv; 284 284 char *xbuf[XBUFSIZE]; 285 285 char *xoutbuf[XBUFSIZE]; 286 286 char *axbuf[XBUFSIZE]; 287 287 unsigned int *b_size; 288 288 unsigned int iv_len; 289 289 290 + iv = kzalloc(MAX_IVLEN, GFP_KERNEL); 291 + if (!iv) 292 + return; 293 + 290 294 if (aad_size >= PAGE_SIZE) { 291 295 pr_err("associate data length (%u) too big\n", aad_size); 292 - return; 296 + goto out_noxbuf; 293 297 } 294 298 295 299 if (enc == ENCRYPT) ··· 359 355 360 356 iv_len = crypto_aead_ivsize(tfm); 361 357 if (iv_len) 362 - memset(&iv, 0xff, iv_len); 358 + memset(iv, 0xff, iv_len); 363 359 364 360 crypto_aead_clear_flags(tfm, ~0); 365 361 printk(KERN_INFO "test %u (%d bit key, %d byte blocks): ", ··· 412 408 out_noaxbuf: 413 409 testmgr_free_buf(xbuf); 414 410 out_noxbuf: 411 + kfree(iv); 415 412 return; 416 413 } 417 414 ··· 769 764 if (ret == -EINPROGRESS || ret == -EBUSY) { 770 765 struct tcrypt_result *tr = req->base.data; 771 766 772 - ret = wait_for_completion_interruptible(&tr->completion); 773 - if (!ret) 774 - ret = tr->err; 767 + wait_for_completion(&tr->completion); 775 768 reinit_completion(&tr->completion); 769 + ret = tr->err; 776 770 } 777 771 return ret; 778 772 } ··· 997 993 if (ret == -EINPROGRESS || ret == -EBUSY) { 998 994 struct tcrypt_result *tr = req->base.data; 999 995 1000 - ret = wait_for_completion_interruptible(&tr->completion); 1001 - if (!ret) 1002 - ret = tr->err; 996 + wait_for_completion(&tr->completion); 1003 997 reinit_completion(&tr->completion); 998 + ret = tr->err; 1004 999 } 1005 1000 1006 1001 return ret;
+27 -31
crypto/testmgr.c
··· 181 181 static int wait_async_op(struct tcrypt_result *tr, int ret) 182 182 { 183 183 if (ret == -EINPROGRESS || ret == -EBUSY) { 184 - ret = wait_for_completion_interruptible(&tr->completion); 185 - if (!ret) 186 - ret = tr->err; 184 + wait_for_completion(&tr->completion); 187 185 reinit_completion(&tr->completion); 186 + ret = tr->err; 188 187 } 189 188 return ret; 190 189 } ··· 352 353 break; 353 354 case -EINPROGRESS: 354 355 case -EBUSY: 355 - ret = wait_for_completion_interruptible( 356 - &tresult.completion); 357 - if (!ret && !(ret = tresult.err)) { 358 - reinit_completion(&tresult.completion); 356 + wait_for_completion(&tresult.completion); 357 + reinit_completion(&tresult.completion); 358 + ret = tresult.err; 359 + if (!ret) 359 360 break; 360 - } 361 361 /* fall through */ 362 362 default: 363 363 printk(KERN_ERR "alg: hash: digest failed " ··· 429 431 struct scatterlist *sgout; 430 432 const char *e, *d; 431 433 struct tcrypt_result result; 432 - unsigned int authsize; 434 + unsigned int authsize, iv_len; 433 435 void *input; 434 436 void *output; 435 437 void *assoc; ··· 500 502 501 503 memcpy(input, template[i].input, template[i].ilen); 502 504 memcpy(assoc, template[i].assoc, template[i].alen); 505 + iv_len = crypto_aead_ivsize(tfm); 503 506 if (template[i].iv) 504 - memcpy(iv, template[i].iv, MAX_IVLEN); 507 + memcpy(iv, template[i].iv, iv_len); 505 508 else 506 - memset(iv, 0, MAX_IVLEN); 509 + memset(iv, 0, iv_len); 507 510 508 511 crypto_aead_clear_flags(tfm, ~0); 509 512 if (template[i].wk) ··· 568 569 break; 569 570 case -EINPROGRESS: 570 571 case -EBUSY: 571 - ret = wait_for_completion_interruptible( 572 - &result.completion); 573 - if (!ret && !(ret = result.err)) { 574 - reinit_completion(&result.completion); 572 + wait_for_completion(&result.completion); 573 + reinit_completion(&result.completion); 574 + ret = result.err; 575 + if (!ret) 575 576 break; 576 - } 577 577 case -EBADMSG: 578 578 if (template[i].novrfy) 579 579 /* verification failure was expected */ ··· 718 720 break; 719 721 case -EINPROGRESS: 720 722 case -EBUSY: 721 - ret = wait_for_completion_interruptible( 722 - &result.completion); 723 - if (!ret && !(ret = result.err)) { 724 - reinit_completion(&result.completion); 723 + wait_for_completion(&result.completion); 724 + reinit_completion(&result.completion); 725 + ret = result.err; 726 + if (!ret) 725 727 break; 726 - } 727 728 case -EBADMSG: 728 729 if (template[i].novrfy) 729 730 /* verification failure was expected */ ··· 999 1002 break; 1000 1003 case -EINPROGRESS: 1001 1004 case -EBUSY: 1002 - ret = wait_for_completion_interruptible( 1003 - &result.completion); 1004 - if (!ret && !((ret = result.err))) { 1005 - reinit_completion(&result.completion); 1005 + wait_for_completion(&result.completion); 1006 + reinit_completion(&result.completion); 1007 + ret = result.err; 1008 + if (!ret) 1006 1009 break; 1007 - } 1008 1010 /* fall through */ 1009 1011 default: 1010 1012 pr_err("alg: skcipher%s: %s failed on test %d for %s: ret=%d\n", ··· 1093 1097 break; 1094 1098 case -EINPROGRESS: 1095 1099 case -EBUSY: 1096 - ret = wait_for_completion_interruptible( 1097 - &result.completion); 1098 - if (!ret && !((ret = result.err))) { 1099 - reinit_completion(&result.completion); 1100 + wait_for_completion(&result.completion); 1101 + reinit_completion(&result.completion); 1102 + ret = result.err; 1103 + if (!ret) 1100 1104 break; 1101 - } 1102 1105 /* fall through */ 1103 1106 default: 1104 1107 pr_err("alg: skcipher%s: %s failed on chunk test %d for %s: ret=%d\n", ··· 3294 3299 }, { 3295 3300 .alg = "rfc4106(gcm(aes))", 3296 3301 .test = alg_test_aead, 3302 + .fips_allowed = 1, 3297 3303 .suite = { 3298 3304 .aead = { 3299 3305 .enc = {
+149 -70
drivers/char/hw_random/core.c
··· 42 42 #include <linux/delay.h> 43 43 #include <linux/slab.h> 44 44 #include <linux/random.h> 45 + #include <linux/err.h> 45 46 #include <asm/uaccess.h> 46 47 47 48 ··· 54 53 static struct hwrng *current_rng; 55 54 static struct task_struct *hwrng_fill; 56 55 static LIST_HEAD(rng_list); 56 + /* Protects rng_list and current_rng */ 57 57 static DEFINE_MUTEX(rng_mutex); 58 + /* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */ 59 + static DEFINE_MUTEX(reading_mutex); 58 60 static int data_avail; 59 61 static u8 *rng_buffer, *rng_fillbuf; 60 62 static unsigned short current_quality; ··· 70 66 MODULE_PARM_DESC(default_quality, 71 67 "default entropy content of hwrng per mill"); 72 68 69 + static void drop_current_rng(void); 70 + static int hwrng_init(struct hwrng *rng); 73 71 static void start_khwrngd(void); 74 72 75 73 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, ··· 87 81 unsigned char bytes[16]; 88 82 int bytes_read; 89 83 84 + mutex_lock(&reading_mutex); 90 85 bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1); 86 + mutex_unlock(&reading_mutex); 91 87 if (bytes_read > 0) 92 88 add_device_randomness(bytes, bytes_read); 93 89 } 94 90 95 - static inline int hwrng_init(struct hwrng *rng) 91 + static inline void cleanup_rng(struct kref *kref) 96 92 { 93 + struct hwrng *rng = container_of(kref, struct hwrng, ref); 94 + 95 + if (rng->cleanup) 96 + rng->cleanup(rng); 97 + 98 + complete(&rng->cleanup_done); 99 + } 100 + 101 + static int set_current_rng(struct hwrng *rng) 102 + { 103 + int err; 104 + 105 + BUG_ON(!mutex_is_locked(&rng_mutex)); 106 + 107 + err = hwrng_init(rng); 108 + if (err) 109 + return err; 110 + 111 + drop_current_rng(); 112 + current_rng = rng; 113 + 114 + return 0; 115 + } 116 + 117 + static void drop_current_rng(void) 118 + { 119 + BUG_ON(!mutex_is_locked(&rng_mutex)); 120 + if (!current_rng) 121 + return; 122 + 123 + /* decrease last reference for triggering the cleanup */ 124 + kref_put(&current_rng->ref, cleanup_rng); 125 + current_rng = NULL; 126 + } 127 + 128 + /* Returns ERR_PTR(), NULL or refcounted hwrng */ 129 + static struct hwrng *get_current_rng(void) 130 + { 131 + struct hwrng *rng; 132 + 133 + if (mutex_lock_interruptible(&rng_mutex)) 134 + return ERR_PTR(-ERESTARTSYS); 135 + 136 + rng = current_rng; 137 + if (rng) 138 + kref_get(&rng->ref); 139 + 140 + mutex_unlock(&rng_mutex); 141 + return rng; 142 + } 143 + 144 + static void put_rng(struct hwrng *rng) 145 + { 146 + /* 147 + * Hold rng_mutex here so we serialize in case they set_current_rng 148 + * on rng again immediately. 149 + */ 150 + mutex_lock(&rng_mutex); 151 + if (rng) 152 + kref_put(&rng->ref, cleanup_rng); 153 + mutex_unlock(&rng_mutex); 154 + } 155 + 156 + static int hwrng_init(struct hwrng *rng) 157 + { 158 + if (kref_get_unless_zero(&rng->ref)) 159 + goto skip_init; 160 + 97 161 if (rng->init) { 98 162 int ret; 99 163 ··· 171 95 if (ret) 172 96 return ret; 173 97 } 98 + 99 + kref_init(&rng->ref); 100 + reinit_completion(&rng->cleanup_done); 101 + 102 + skip_init: 174 103 add_early_randomness(rng); 175 104 176 105 current_quality = rng->quality ? : default_quality; ··· 187 106 start_khwrngd(); 188 107 189 108 return 0; 190 - } 191 - 192 - static inline void hwrng_cleanup(struct hwrng *rng) 193 - { 194 - if (rng && rng->cleanup) 195 - rng->cleanup(rng); 196 109 } 197 110 198 111 static int rng_dev_open(struct inode *inode, struct file *filp) ··· 203 128 int wait) { 204 129 int present; 205 130 131 + BUG_ON(!mutex_is_locked(&reading_mutex)); 206 132 if (rng->read) 207 133 return rng->read(rng, (void *)buffer, size, wait); 208 134 ··· 224 148 ssize_t ret = 0; 225 149 int err = 0; 226 150 int bytes_read, len; 151 + struct hwrng *rng; 227 152 228 153 while (size) { 229 - if (mutex_lock_interruptible(&rng_mutex)) { 230 - err = -ERESTARTSYS; 154 + rng = get_current_rng(); 155 + if (IS_ERR(rng)) { 156 + err = PTR_ERR(rng); 157 + goto out; 158 + } 159 + if (!rng) { 160 + err = -ENODEV; 231 161 goto out; 232 162 } 233 163 234 - if (!current_rng) { 235 - err = -ENODEV; 236 - goto out_unlock; 237 - } 238 - 164 + mutex_lock(&reading_mutex); 239 165 if (!data_avail) { 240 - bytes_read = rng_get_data(current_rng, rng_buffer, 166 + bytes_read = rng_get_data(rng, rng_buffer, 241 167 rng_buffer_size(), 242 168 !(filp->f_flags & O_NONBLOCK)); 243 169 if (bytes_read < 0) { 244 170 err = bytes_read; 245 - goto out_unlock; 171 + goto out_unlock_reading; 246 172 } 247 173 data_avail = bytes_read; 248 174 } ··· 252 174 if (!data_avail) { 253 175 if (filp->f_flags & O_NONBLOCK) { 254 176 err = -EAGAIN; 255 - goto out_unlock; 177 + goto out_unlock_reading; 256 178 } 257 179 } else { 258 180 len = data_avail; ··· 264 186 if (copy_to_user(buf + ret, rng_buffer + data_avail, 265 187 len)) { 266 188 err = -EFAULT; 267 - goto out_unlock; 189 + goto out_unlock_reading; 268 190 } 269 191 270 192 size -= len; 271 193 ret += len; 272 194 } 273 195 274 - mutex_unlock(&rng_mutex); 196 + mutex_unlock(&reading_mutex); 197 + put_rng(rng); 275 198 276 199 if (need_resched()) 277 200 schedule_timeout_interruptible(1); ··· 284 205 } 285 206 out: 286 207 return ret ? : err; 287 - out_unlock: 288 - mutex_unlock(&rng_mutex); 208 + 209 + out_unlock_reading: 210 + mutex_unlock(&reading_mutex); 211 + put_rng(rng); 289 212 goto out; 290 213 } 291 214 ··· 320 239 err = -ENODEV; 321 240 list_for_each_entry(rng, &rng_list, list) { 322 241 if (strcmp(rng->name, buf) == 0) { 323 - if (rng == current_rng) { 324 - err = 0; 325 - break; 326 - } 327 - err = hwrng_init(rng); 328 - if (err) 329 - break; 330 - hwrng_cleanup(current_rng); 331 - current_rng = rng; 332 242 err = 0; 243 + if (rng != current_rng) 244 + err = set_current_rng(rng); 333 245 break; 334 246 } 335 247 } ··· 335 261 struct device_attribute *attr, 336 262 char *buf) 337 263 { 338 - int err; 339 264 ssize_t ret; 340 - const char *name = "none"; 265 + struct hwrng *rng; 341 266 342 - err = mutex_lock_interruptible(&rng_mutex); 343 - if (err) 344 - return -ERESTARTSYS; 345 - if (current_rng) 346 - name = current_rng->name; 347 - ret = snprintf(buf, PAGE_SIZE, "%s\n", name); 348 - mutex_unlock(&rng_mutex); 267 + rng = get_current_rng(); 268 + if (IS_ERR(rng)) 269 + return PTR_ERR(rng); 270 + 271 + ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none"); 272 + put_rng(rng); 349 273 350 274 return ret; 351 275 } ··· 377 305 NULL); 378 306 379 307 380 - static void unregister_miscdev(void) 308 + static void __exit unregister_miscdev(void) 381 309 { 382 310 device_remove_file(rng_miscdev.this_device, &dev_attr_rng_available); 383 311 device_remove_file(rng_miscdev.this_device, &dev_attr_rng_current); 384 312 misc_deregister(&rng_miscdev); 385 313 } 386 314 387 - static int register_miscdev(void) 315 + static int __init register_miscdev(void) 388 316 { 389 317 int err; 390 318 ··· 414 342 long rc; 415 343 416 344 while (!kthread_should_stop()) { 417 - if (!current_rng) 345 + struct hwrng *rng; 346 + 347 + rng = get_current_rng(); 348 + if (IS_ERR(rng) || !rng) 418 349 break; 419 - rc = rng_get_data(current_rng, rng_fillbuf, 350 + mutex_lock(&reading_mutex); 351 + rc = rng_get_data(rng, rng_fillbuf, 420 352 rng_buffer_size(), 1); 353 + mutex_unlock(&reading_mutex); 354 + put_rng(rng); 421 355 if (rc <= 0) { 422 356 pr_warn("hwrng: no data available\n"); 423 357 msleep_interruptible(10000); 424 358 continue; 425 359 } 360 + /* Outside lock, sure, but y'know: randomness. */ 426 361 add_hwgenerator_randomness((void *)rng_fillbuf, rc, 427 362 rc * current_quality * 8 >> 10); 428 363 } ··· 479 400 goto out_unlock; 480 401 } 481 402 403 + init_completion(&rng->cleanup_done); 404 + complete(&rng->cleanup_done); 405 + 482 406 old_rng = current_rng; 483 - if (!old_rng) { 484 - err = hwrng_init(rng); 485 - if (err) 486 - goto out_unlock; 487 - current_rng = rng; 488 - } 489 407 err = 0; 490 408 if (!old_rng) { 491 - err = register_miscdev(); 492 - if (err) { 493 - hwrng_cleanup(rng); 494 - current_rng = NULL; 409 + err = set_current_rng(rng); 410 + if (err) 495 411 goto out_unlock; 496 - } 497 412 } 498 - INIT_LIST_HEAD(&rng->list); 499 413 list_add_tail(&rng->list, &rng_list); 500 414 501 415 if (old_rng && !rng->init) { ··· 511 439 512 440 void hwrng_unregister(struct hwrng *rng) 513 441 { 514 - int err; 515 - 516 442 mutex_lock(&rng_mutex); 517 443 518 444 list_del(&rng->list); 519 445 if (current_rng == rng) { 520 - hwrng_cleanup(rng); 521 - if (list_empty(&rng_list)) { 522 - current_rng = NULL; 523 - } else { 524 - current_rng = list_entry(rng_list.prev, struct hwrng, list); 525 - err = hwrng_init(current_rng); 526 - if (err) 527 - current_rng = NULL; 446 + drop_current_rng(); 447 + if (!list_empty(&rng_list)) { 448 + struct hwrng *tail; 449 + 450 + tail = list_entry(rng_list.prev, struct hwrng, list); 451 + 452 + set_current_rng(tail); 528 453 } 529 454 } 455 + 530 456 if (list_empty(&rng_list)) { 531 - unregister_miscdev(); 457 + mutex_unlock(&rng_mutex); 532 458 if (hwrng_fill) 533 459 kthread_stop(hwrng_fill); 534 - } 460 + } else 461 + mutex_unlock(&rng_mutex); 535 462 536 - mutex_unlock(&rng_mutex); 463 + wait_for_completion(&rng->cleanup_done); 537 464 } 538 465 EXPORT_SYMBOL_GPL(hwrng_unregister); 539 466 540 - static void __exit hwrng_exit(void) 467 + static int __init hwrng_modinit(void) 468 + { 469 + return register_miscdev(); 470 + } 471 + 472 + static void __exit hwrng_modexit(void) 541 473 { 542 474 mutex_lock(&rng_mutex); 543 475 BUG_ON(current_rng); 544 476 kfree(rng_buffer); 545 477 kfree(rng_fillbuf); 546 478 mutex_unlock(&rng_mutex); 479 + 480 + unregister_miscdev(); 547 481 } 548 482 549 - module_exit(hwrng_exit); 483 + module_init(hwrng_modinit); 484 + module_exit(hwrng_modexit); 550 485 551 486 MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver"); 552 487 MODULE_LICENSE("GPL");
-1
drivers/char/hw_random/virtio-rng.c
··· 39 39 bool hwrng_removed; 40 40 }; 41 41 42 - 43 42 static void random_recv_done(struct virtqueue *vq) 44 43 { 45 44 struct virtrng_info *vi = vq->vdev->priv;
-23
drivers/crypto/amcc/crypto4xx_sa.c
··· 34 34 #include "crypto4xx_sa.h" 35 35 #include "crypto4xx_core.h" 36 36 37 - u32 get_dynamic_sa_offset_iv_field(struct crypto4xx_ctx *ctx) 38 - { 39 - u32 offset; 40 - union dynamic_sa_contents cts; 41 - 42 - if (ctx->direction == DIR_INBOUND) 43 - cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_in))->sa_contents; 44 - else 45 - cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_out))->sa_contents; 46 - offset = cts.bf.key_size 47 - + cts.bf.inner_size 48 - + cts.bf.outer_size 49 - + cts.bf.spi 50 - + cts.bf.seq_num0 51 - + cts.bf.seq_num1 52 - + cts.bf.seq_num_mask0 53 - + cts.bf.seq_num_mask1 54 - + cts.bf.seq_num_mask2 55 - + cts.bf.seq_num_mask3; 56 - 57 - return sizeof(struct dynamic_sa_ctl) + offset * 4; 58 - } 59 - 60 37 u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx) 61 38 { 62 39 u32 offset;
+1 -1
drivers/crypto/atmel-aes.c
··· 673 673 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, 674 674 DMA_TO_DEVICE); 675 675 err_map_in: 676 + err_alloc: 676 677 free_page((unsigned long)dd->buf_out); 677 678 free_page((unsigned long)dd->buf_in); 678 - err_alloc: 679 679 if (err) 680 680 pr_err("error: %d\n", err); 681 681 return err;
+6 -44
drivers/crypto/atmel-sha.c
··· 102 102 struct atmel_sha_dev *dd; 103 103 104 104 unsigned long flags; 105 - 106 - /* fallback stuff */ 107 - struct crypto_shash *fallback; 108 - 109 105 }; 110 106 111 107 #define ATMEL_SHA_QUEUE_LENGTH 50 ··· 970 974 return atmel_sha_init(req) ?: atmel_sha_finup(req); 971 975 } 972 976 973 - static int atmel_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) 977 + static int atmel_sha_cra_init(struct crypto_tfm *tfm) 974 978 { 975 - struct atmel_sha_ctx *tctx = crypto_tfm_ctx(tfm); 976 - const char *alg_name = crypto_tfm_alg_name(tfm); 977 - 978 - /* Allocate a fallback and abort if it failed. */ 979 - tctx->fallback = crypto_alloc_shash(alg_name, 0, 980 - CRYPTO_ALG_NEED_FALLBACK); 981 - if (IS_ERR(tctx->fallback)) { 982 - pr_err("atmel-sha: fallback driver '%s' could not be loaded.\n", 983 - alg_name); 984 - return PTR_ERR(tctx->fallback); 985 - } 986 979 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 987 980 sizeof(struct atmel_sha_reqctx) + 988 981 SHA_BUFFER_LEN + SHA512_BLOCK_SIZE); 989 982 990 983 return 0; 991 - } 992 - 993 - static int atmel_sha_cra_init(struct crypto_tfm *tfm) 994 - { 995 - return atmel_sha_cra_init_alg(tfm, NULL); 996 - } 997 - 998 - static void atmel_sha_cra_exit(struct crypto_tfm *tfm) 999 - { 1000 - struct atmel_sha_ctx *tctx = crypto_tfm_ctx(tfm); 1001 - 1002 - crypto_free_shash(tctx->fallback); 1003 - tctx->fallback = NULL; 1004 984 } 1005 985 1006 986 static struct ahash_alg sha_1_256_algs[] = { ··· 992 1020 .cra_name = "sha1", 993 1021 .cra_driver_name = "atmel-sha1", 994 1022 .cra_priority = 100, 995 - .cra_flags = CRYPTO_ALG_ASYNC | 996 - CRYPTO_ALG_NEED_FALLBACK, 1023 + .cra_flags = CRYPTO_ALG_ASYNC, 997 1024 .cra_blocksize = SHA1_BLOCK_SIZE, 998 1025 .cra_ctxsize = sizeof(struct atmel_sha_ctx), 999 1026 .cra_alignmask = 0, 1000 1027 .cra_module = THIS_MODULE, 1001 1028 .cra_init = atmel_sha_cra_init, 1002 - .cra_exit = atmel_sha_cra_exit, 1003 1029 } 1004 1030 } 1005 1031 }, ··· 1013 1043 .cra_name = "sha256", 1014 1044 .cra_driver_name = "atmel-sha256", 1015 1045 .cra_priority = 100, 1016 - .cra_flags = CRYPTO_ALG_ASYNC | 1017 - CRYPTO_ALG_NEED_FALLBACK, 1046 + .cra_flags = CRYPTO_ALG_ASYNC, 1018 1047 .cra_blocksize = SHA256_BLOCK_SIZE, 1019 1048 .cra_ctxsize = sizeof(struct atmel_sha_ctx), 1020 1049 .cra_alignmask = 0, 1021 1050 .cra_module = THIS_MODULE, 1022 1051 .cra_init = atmel_sha_cra_init, 1023 - .cra_exit = atmel_sha_cra_exit, 1024 1052 } 1025 1053 } 1026 1054 }, ··· 1036 1068 .cra_name = "sha224", 1037 1069 .cra_driver_name = "atmel-sha224", 1038 1070 .cra_priority = 100, 1039 - .cra_flags = CRYPTO_ALG_ASYNC | 1040 - CRYPTO_ALG_NEED_FALLBACK, 1071 + .cra_flags = CRYPTO_ALG_ASYNC, 1041 1072 .cra_blocksize = SHA224_BLOCK_SIZE, 1042 1073 .cra_ctxsize = sizeof(struct atmel_sha_ctx), 1043 1074 .cra_alignmask = 0, 1044 1075 .cra_module = THIS_MODULE, 1045 1076 .cra_init = atmel_sha_cra_init, 1046 - .cra_exit = atmel_sha_cra_exit, 1047 1077 } 1048 1078 } 1049 1079 }; ··· 1059 1093 .cra_name = "sha384", 1060 1094 .cra_driver_name = "atmel-sha384", 1061 1095 .cra_priority = 100, 1062 - .cra_flags = CRYPTO_ALG_ASYNC | 1063 - CRYPTO_ALG_NEED_FALLBACK, 1096 + .cra_flags = CRYPTO_ALG_ASYNC, 1064 1097 .cra_blocksize = SHA384_BLOCK_SIZE, 1065 1098 .cra_ctxsize = sizeof(struct atmel_sha_ctx), 1066 1099 .cra_alignmask = 0x3, 1067 1100 .cra_module = THIS_MODULE, 1068 1101 .cra_init = atmel_sha_cra_init, 1069 - .cra_exit = atmel_sha_cra_exit, 1070 1102 } 1071 1103 } 1072 1104 }, ··· 1080 1116 .cra_name = "sha512", 1081 1117 .cra_driver_name = "atmel-sha512", 1082 1118 .cra_priority = 100, 1083 - .cra_flags = CRYPTO_ALG_ASYNC | 1084 - CRYPTO_ALG_NEED_FALLBACK, 1119 + .cra_flags = CRYPTO_ALG_ASYNC, 1085 1120 .cra_blocksize = SHA512_BLOCK_SIZE, 1086 1121 .cra_ctxsize = sizeof(struct atmel_sha_ctx), 1087 1122 .cra_alignmask = 0x3, 1088 1123 .cra_module = THIS_MODULE, 1089 1124 .cra_init = atmel_sha_cra_init, 1090 - .cra_exit = atmel_sha_cra_exit, 1091 1125 } 1092 1126 } 1093 1127 },
+1 -1
drivers/crypto/atmel-tdes.c
··· 376 376 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, 377 377 DMA_TO_DEVICE); 378 378 err_map_in: 379 + err_alloc: 379 380 free_page((unsigned long)dd->buf_out); 380 381 free_page((unsigned long)dd->buf_in); 381 - err_alloc: 382 382 if (err) 383 383 pr_err("error: %d\n", err); 384 384 return err;
+2 -2
drivers/crypto/bfin_crc.c
··· 110 110 111 111 while (!sg_is_last(sg)) { 112 112 sg_nents++; 113 - sg = scatterwalk_sg_next(sg); 113 + sg = sg_next(sg); 114 114 } 115 115 116 116 return sg_nents; ··· 744 744 745 745 ret = platform_driver_register(&bfin_crypto_crc_driver); 746 746 if (ret) { 747 - pr_info(KERN_ERR "unable to register driver\n"); 747 + pr_err("unable to register driver\n"); 748 748 return ret; 749 749 } 750 750
+7 -7
drivers/crypto/caam/caamalg.c
··· 2532 2532 in_options = 0; 2533 2533 } else { 2534 2534 src_dma = edesc->sec4_sg_dma; 2535 - sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents; 2535 + sec4_sg_index += edesc->src_nents + 1; 2536 2536 in_options = LDST_SGF; 2537 2537 } 2538 2538 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options); ··· 2714 2714 if (!all_contig) { 2715 2715 if (!is_gcm) { 2716 2716 sg_to_sec4_sg(req->assoc, 2717 - (assoc_nents ? : 1), 2717 + assoc_nents, 2718 2718 edesc->sec4_sg + 2719 2719 sec4_sg_index, 0); 2720 - sec4_sg_index += assoc_nents ? : 1; 2720 + sec4_sg_index += assoc_nents; 2721 2721 } 2722 2722 2723 2723 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, ··· 2726 2726 2727 2727 if (is_gcm) { 2728 2728 sg_to_sec4_sg(req->assoc, 2729 - (assoc_nents ? : 1), 2729 + assoc_nents, 2730 2730 edesc->sec4_sg + 2731 2731 sec4_sg_index, 0); 2732 - sec4_sg_index += assoc_nents ? : 1; 2732 + sec4_sg_index += assoc_nents; 2733 2733 } 2734 2734 2735 2735 sg_to_sec4_sg_last(req->src, 2736 - (src_nents ? : 1), 2736 + src_nents, 2737 2737 edesc->sec4_sg + 2738 2738 sec4_sg_index, 0); 2739 - sec4_sg_index += src_nents ? : 1; 2739 + sec4_sg_index += src_nents; 2740 2740 } 2741 2741 if (dst_nents) { 2742 2742 sg_to_sec4_sg_last(req->dst, dst_nents,
+1 -5
drivers/crypto/caam/ctrl.c
··· 175 175 { 176 176 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); 177 177 struct caam_ctrl __iomem *ctrl; 178 - struct rng4tst __iomem *r4tst; 179 178 u32 *desc, status, rdsta_val; 180 179 int ret = 0, sh_idx; 181 180 182 181 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl; 183 - r4tst = &ctrl->r4tst[0]; 184 - 185 182 desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL); 186 183 if (!desc) 187 184 return -ENOMEM; ··· 206 209 * without any error (HW optimizations for later 207 210 * CAAM eras), then try again. 208 211 */ 209 - rdsta_val = 210 - rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK; 212 + rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK; 211 213 if (status || !(rdsta_val & (1 << sh_idx))) 212 214 ret = -EAGAIN; 213 215 if (ret)
+9 -4
drivers/crypto/caam/error.c
··· 151 151 else 152 152 snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id); 153 153 154 - dev_err(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n", 155 - status, error, idx_str, idx, 156 - cha_str, cha_err_code, 157 - err_str, err_err_code); 154 + /* 155 + * CCB ICV check failures are part of normal operation life; 156 + * we leave the upper layers to do what they want with them. 157 + */ 158 + if (err_id != JRSTA_CCBERR_ERRID_ICVCHK) 159 + dev_err(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n", 160 + status, error, idx_str, idx, 161 + cha_str, cha_err_code, 162 + err_str, err_err_code); 158 163 } 159 164 160 165 static void report_jump_status(struct device *jrdev, const u32 status,
+25 -12
drivers/crypto/caam/jr.c
··· 384 384 if (error) { 385 385 dev_err(dev, "can't connect JobR %d interrupt (%d)\n", 386 386 jrp->ridx, jrp->irq); 387 - irq_dispose_mapping(jrp->irq); 388 - jrp->irq = 0; 389 - return -EINVAL; 387 + goto out_kill_deq; 390 388 } 391 389 392 390 error = caam_reset_hw_jr(dev); 393 391 if (error) 394 - return error; 392 + goto out_free_irq; 395 393 394 + error = -ENOMEM; 396 395 jrp->inpring = dma_alloc_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH, 397 396 &inpbusaddr, GFP_KERNEL); 397 + if (!jrp->inpring) 398 + goto out_free_irq; 398 399 399 400 jrp->outring = dma_alloc_coherent(dev, sizeof(struct jr_outentry) * 400 401 JOBR_DEPTH, &outbusaddr, GFP_KERNEL); 402 + if (!jrp->outring) 403 + goto out_free_inpring; 401 404 402 405 jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH, 403 406 GFP_KERNEL); 404 - 405 - if ((jrp->inpring == NULL) || (jrp->outring == NULL) || 406 - (jrp->entinfo == NULL)) { 407 - dev_err(dev, "can't allocate job rings for %d\n", 408 - jrp->ridx); 409 - return -ENOMEM; 410 - } 407 + if (!jrp->entinfo) 408 + goto out_free_outring; 411 409 412 410 for (i = 0; i < JOBR_DEPTH; i++) 413 411 jrp->entinfo[i].desc_addr_dma = !0; ··· 432 434 (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT)); 433 435 434 436 return 0; 437 + 438 + out_free_outring: 439 + dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH, 440 + jrp->outring, outbusaddr); 441 + out_free_inpring: 442 + dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH, 443 + jrp->inpring, inpbusaddr); 444 + dev_err(dev, "can't allocate job rings for %d\n", jrp->ridx); 445 + out_free_irq: 446 + free_irq(jrp->irq, dev); 447 + out_kill_deq: 448 + tasklet_kill(&jrp->irqtask); 449 + return error; 435 450 } 436 451 437 452 ··· 495 484 496 485 /* Now do the platform independent part */ 497 486 error = caam_jr_init(jrdev); /* now turn on hardware */ 498 - if (error) 487 + if (error) { 488 + irq_dispose_mapping(jrpriv->irq); 499 489 return error; 490 + } 500 491 501 492 jrpriv->dev = jrdev; 502 493 spin_lock(&driver_data.jr_alloc_lock);
+4 -4
drivers/crypto/caam/sg_sw_sec4.h
··· 37 37 dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), 38 38 sg_dma_len(sg), offset); 39 39 sec4_sg_ptr++; 40 - sg = scatterwalk_sg_next(sg); 40 + sg = sg_next(sg); 41 41 sg_count--; 42 42 } 43 43 return sec4_sg_ptr - 1; ··· 67 67 nbytes -= sg->length; 68 68 if (!sg_is_last(sg) && (sg + 1)->length == 0) 69 69 *chained = true; 70 - sg = scatterwalk_sg_next(sg); 70 + sg = sg_next(sg); 71 71 } 72 72 73 73 return sg_nents; ··· 93 93 int i; 94 94 for (i = 0; i < nents; i++) { 95 95 dma_map_sg(dev, sg, 1, dir); 96 - sg = scatterwalk_sg_next(sg); 96 + sg = sg_next(sg); 97 97 } 98 98 } else { 99 99 dma_map_sg(dev, sg, nents, dir); ··· 109 109 int i; 110 110 for (i = 0; i < nents; i++) { 111 111 dma_unmap_sg(dev, sg, 1, dir); 112 - sg = scatterwalk_sg_next(sg); 112 + sg = sg_next(sg); 113 113 } 114 114 } else { 115 115 dma_unmap_sg(dev, sg, nents, dir);
+1
drivers/crypto/ccp/ccp-dev.c
··· 583 583 #ifdef CONFIG_X86 584 584 static const struct x86_cpu_id ccp_support[] = { 585 585 { X86_VENDOR_AMD, 22, }, 586 + { }, 586 587 }; 587 588 #endif 588 589
+2 -2
drivers/crypto/ixp4xx_crypto.c
··· 784 784 struct buffer_desc *buf, gfp_t flags, 785 785 enum dma_data_direction dir) 786 786 { 787 - for (;nbytes > 0; sg = scatterwalk_sg_next(sg)) { 787 + for (; nbytes > 0; sg = sg_next(sg)) { 788 788 unsigned len = min(nbytes, sg->length); 789 789 struct buffer_desc *next_buf; 790 790 u32 next_buf_phys; ··· 982 982 break; 983 983 984 984 offset += sg->length; 985 - sg = scatterwalk_sg_next(sg); 985 + sg = sg_next(sg); 986 986 } 987 987 return (start + nbytes > offset + sg->length); 988 988 }
+3 -3
drivers/crypto/nx/nx.c
··· 177 177 break; 178 178 179 179 offset += sg_src->length; 180 - sg_src = scatterwalk_sg_next(sg_src); 180 + sg_src = sg_next(sg_src); 181 181 } 182 182 183 183 /* start - offset is the number of bytes to advance in the scatterlist ··· 187 187 while (len && (nx_sg - nx_dst) < sglen) { 188 188 n = scatterwalk_clamp(&walk, len); 189 189 if (!n) { 190 - /* In cases where we have scatterlist chain scatterwalk_sg_next 190 + /* In cases where we have scatterlist chain sg_next 191 191 * handles with it properly */ 192 - scatterwalk_start(&walk, scatterwalk_sg_next(walk.sg)); 192 + scatterwalk_start(&walk, sg_next(walk.sg)); 193 193 n = scatterwalk_clamp(&walk, len); 194 194 } 195 195 dst = scatterwalk_map(&walk);
+2 -2
drivers/crypto/omap-aes.c
··· 994 994 995 995 scatterwalk_advance(&dd->in_walk, 4); 996 996 if (dd->in_sg->length == _calc_walked(in)) { 997 - dd->in_sg = scatterwalk_sg_next(dd->in_sg); 997 + dd->in_sg = sg_next(dd->in_sg); 998 998 if (dd->in_sg) { 999 999 scatterwalk_start(&dd->in_walk, 1000 1000 dd->in_sg); ··· 1026 1026 *dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i)); 1027 1027 scatterwalk_advance(&dd->out_walk, 4); 1028 1028 if (dd->out_sg->length == _calc_walked(out)) { 1029 - dd->out_sg = scatterwalk_sg_next(dd->out_sg); 1029 + dd->out_sg = sg_next(dd->out_sg); 1030 1030 if (dd->out_sg) { 1031 1031 scatterwalk_start(&dd->out_walk, 1032 1032 dd->out_sg);
+4 -4
drivers/crypto/omap-des.c
··· 921 921 922 922 scatterwalk_advance(&dd->in_walk, 4); 923 923 if (dd->in_sg->length == _calc_walked(in)) { 924 - dd->in_sg = scatterwalk_sg_next(dd->in_sg); 924 + dd->in_sg = sg_next(dd->in_sg); 925 925 if (dd->in_sg) { 926 926 scatterwalk_start(&dd->in_walk, 927 927 dd->in_sg); ··· 953 953 *dst = omap_des_read(dd, DES_REG_DATA_N(dd, i)); 954 954 scatterwalk_advance(&dd->out_walk, 4); 955 955 if (dd->out_sg->length == _calc_walked(out)) { 956 - dd->out_sg = scatterwalk_sg_next(dd->out_sg); 956 + dd->out_sg = sg_next(dd->out_sg); 957 957 if (dd->out_sg) { 958 958 scatterwalk_start(&dd->out_walk, 959 959 dd->out_sg); ··· 965 965 } 966 966 } 967 967 968 - dd->total -= DES_BLOCK_SIZE; 968 + BUG_ON(dd->total < DES_BLOCK_SIZE); 969 969 970 - BUG_ON(dd->total < 0); 970 + dd->total -= DES_BLOCK_SIZE; 971 971 972 972 /* Clear IRQ status */ 973 973 status &= ~DES_REG_IRQ_DATA_OUT;
+5 -1
drivers/crypto/qat/qat_common/adf_accel_devices.h
··· 47 47 #ifndef ADF_ACCEL_DEVICES_H_ 48 48 #define ADF_ACCEL_DEVICES_H_ 49 49 #include <linux/module.h> 50 - #include <linux/atomic.h> 51 50 #include <linux/list.h> 52 51 #include <linux/proc_fs.h> 53 52 #include <linux/io.h> ··· 147 148 int (*alloc_irq)(struct adf_accel_dev *accel_dev); 148 149 void (*free_irq)(struct adf_accel_dev *accel_dev); 149 150 void (*enable_error_correction)(struct adf_accel_dev *accel_dev); 151 + int (*init_admin_comms)(struct adf_accel_dev *accel_dev); 152 + void (*exit_admin_comms)(struct adf_accel_dev *accel_dev); 153 + int (*init_arb)(struct adf_accel_dev *accel_dev); 154 + void (*exit_arb)(struct adf_accel_dev *accel_dev); 155 + void (*enable_ints)(struct adf_accel_dev *accel_dev); 150 156 const char *fw_name; 151 157 uint32_t pci_dev_id; 152 158 uint32_t fuses;
+6 -18
drivers/crypto/qat/qat_common/adf_aer.c
··· 82 82 struct work_struct reset_work; 83 83 }; 84 84 85 - #define PPDSTAT_OFFSET 0x7E 86 85 static void adf_dev_restore(struct adf_accel_dev *accel_dev) 87 86 { 88 87 struct pci_dev *pdev = accel_to_pci_dev(accel_dev); 89 88 struct pci_dev *parent = pdev->bus->self; 90 - uint16_t ppdstat = 0, bridge_ctl = 0; 91 - int pending = 0; 89 + uint16_t bridge_ctl = 0; 92 90 93 91 pr_info("QAT: Resetting device qat_dev%d\n", accel_dev->accel_id); 94 - pci_read_config_word(pdev, PPDSTAT_OFFSET, &ppdstat); 95 - pending = ppdstat & PCI_EXP_DEVSTA_TRPND; 96 - if (pending) { 97 - int ctr = 0; 98 92 99 - do { 100 - msleep(100); 101 - pci_read_config_word(pdev, PPDSTAT_OFFSET, &ppdstat); 102 - pending = ppdstat & PCI_EXP_DEVSTA_TRPND; 103 - } while (pending && ctr++ < 10); 104 - } 105 - 106 - if (pending) 93 + if (!pci_wait_for_pending_transaction(pdev)) 107 94 pr_info("QAT: Transaction still in progress. Proceeding\n"); 108 95 109 96 pci_read_config_word(parent, PCI_BRIDGE_CONTROL, &bridge_ctl); ··· 112 125 113 126 adf_dev_restarting_notify(accel_dev); 114 127 adf_dev_stop(accel_dev); 128 + adf_dev_shutdown(accel_dev); 115 129 adf_dev_restore(accel_dev); 116 - if (adf_dev_start(accel_dev)) { 130 + if (adf_dev_init(accel_dev) || adf_dev_start(accel_dev)) { 117 131 /* The device hanged and we can't restart it so stop here */ 118 132 dev_err(&GET_DEV(accel_dev), "Restart device failed\n"); 119 133 kfree(reset_data); ··· 136 148 { 137 149 struct adf_reset_dev_data *reset_data; 138 150 139 - if (adf_dev_started(accel_dev) && 140 - !test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) 151 + if (!adf_dev_started(accel_dev) || 152 + test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) 141 153 return 0; 142 154 143 155 set_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+2
drivers/crypto/qat/qat_common/adf_cfg.c
··· 50 50 #include <linux/seq_file.h> 51 51 #include "adf_accel_devices.h" 52 52 #include "adf_cfg.h" 53 + #include "adf_common_drv.h" 53 54 54 55 static DEFINE_MUTEX(qat_cfg_read_lock); 55 56 ··· 160 159 down_write(&dev_cfg_data->lock); 161 160 adf_cfg_section_del_all(&dev_cfg_data->sec_list); 162 161 up_write(&dev_cfg_data->lock); 162 + clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); 163 163 } 164 164 165 165 /**
+1 -1
drivers/crypto/qat/qat_common/adf_common_drv.h
··· 93 93 int adf_dev_init(struct adf_accel_dev *accel_dev); 94 94 int adf_dev_start(struct adf_accel_dev *accel_dev); 95 95 int adf_dev_stop(struct adf_accel_dev *accel_dev); 96 - int adf_dev_shutdown(struct adf_accel_dev *accel_dev); 96 + void adf_dev_shutdown(struct adf_accel_dev *accel_dev); 97 97 98 98 int adf_ctl_dev_register(void); 99 99 void adf_ctl_dev_unregister(void);
+6 -1
drivers/crypto/qat/qat_common/adf_ctl_drv.c
··· 282 282 if (adf_dev_stop(accel_dev)) { 283 283 pr_err("QAT: Failed to stop qat_dev%d\n", id); 284 284 ret = -EFAULT; 285 + } else { 286 + adf_dev_shutdown(accel_dev); 285 287 } 286 288 } 287 289 } ··· 345 343 if (!adf_dev_started(accel_dev)) { 346 344 pr_info("QAT: Starting acceleration device qat_dev%d.\n", 347 345 ctl_data->device_id); 348 - ret = adf_dev_start(accel_dev); 346 + ret = adf_dev_init(accel_dev); 347 + if (!ret) 348 + ret = adf_dev_start(accel_dev); 349 349 } else { 350 350 pr_info("QAT: Acceleration device qat_dev%d already started.\n", 351 351 ctl_data->device_id); ··· 355 351 if (ret) { 356 352 pr_err("QAT: Failed to start qat_dev%d\n", ctl_data->device_id); 357 353 adf_dev_stop(accel_dev); 354 + adf_dev_shutdown(accel_dev); 358 355 } 359 356 out: 360 357 kfree(ctl_data);
+84 -14
drivers/crypto/qat/qat_common/adf_init.c
··· 108 108 EXPORT_SYMBOL_GPL(adf_service_unregister); 109 109 110 110 /** 111 - * adf_dev_start() - Start acceleration service for the given accel device 112 - * @accel_dev: Pointer to acceleration device. 111 + * adf_dev_init() - Init data structures and services for the given accel device 112 + * @accel_dev: Pointer to acceleration device. 113 113 * 114 - * Function notifies all the registered services that the acceleration device 115 - * is ready to be used. 116 - * To be used by QAT device specific drivers. 114 + * Initialize the ring data structures and the admin comms and arbitration 115 + * services. 117 116 * 118 117 * Return: 0 on success, error code othewise. 119 118 */ 120 - int adf_dev_start(struct adf_accel_dev *accel_dev) 119 + int adf_dev_init(struct adf_accel_dev *accel_dev) 121 120 { 122 121 struct service_hndl *service; 123 122 struct list_head *list_itr; 124 123 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 125 124 125 + if (!hw_data) { 126 + dev_err(&GET_DEV(accel_dev), 127 + "QAT: Failed to init device - hw_data not set\n"); 128 + return -EFAULT; 129 + } 130 + 126 131 if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status)) { 127 132 pr_info("QAT: Device not configured\n"); 128 133 return -EFAULT; 129 134 } 130 - set_bit(ADF_STATUS_STARTING, &accel_dev->status); 135 + 136 + if (adf_init_etr_data(accel_dev)) { 137 + dev_err(&GET_DEV(accel_dev), "Failed initialize etr\n"); 138 + return -EFAULT; 139 + } 140 + 141 + if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) { 142 + dev_err(&GET_DEV(accel_dev), "Failed initialize admin comms\n"); 143 + return -EFAULT; 144 + } 145 + 146 + if (hw_data->init_arb && hw_data->init_arb(accel_dev)) { 147 + dev_err(&GET_DEV(accel_dev), "Failed initialize hw arbiter\n"); 148 + return -EFAULT; 149 + } 150 + 151 + hw_data->enable_ints(accel_dev); 131 152 132 153 if (adf_ae_init(accel_dev)) { 133 154 pr_err("QAT: Failed to initialise Acceleration Engine\n"); ··· 198 177 } 199 178 200 179 hw_data->enable_error_correction(accel_dev); 180 + 181 + return 0; 182 + } 183 + EXPORT_SYMBOL_GPL(adf_dev_init); 184 + 185 + /** 186 + * adf_dev_start() - Start acceleration service for the given accel device 187 + * @accel_dev: Pointer to acceleration device. 188 + * 189 + * Function notifies all the registered services that the acceleration device 190 + * is ready to be used. 191 + * To be used by QAT device specific drivers. 192 + * 193 + * Return: 0 on success, error code othewise. 194 + */ 195 + int adf_dev_start(struct adf_accel_dev *accel_dev) 196 + { 197 + struct service_hndl *service; 198 + struct list_head *list_itr; 199 + 200 + set_bit(ADF_STATUS_STARTING, &accel_dev->status); 201 201 202 202 if (adf_ae_start(accel_dev)) { 203 203 pr_err("QAT: AE Start Failed\n"); ··· 274 232 */ 275 233 int adf_dev_stop(struct adf_accel_dev *accel_dev) 276 234 { 277 - struct adf_hw_device_data *hw_data = accel_dev->hw_device; 278 235 struct service_hndl *service; 279 236 struct list_head *list_itr; 280 - int ret, wait = 0; 237 + bool wait = false; 238 + int ret; 281 239 282 240 if (!adf_dev_started(accel_dev) && 283 241 !test_bit(ADF_STATUS_STARTING, &accel_dev->status)) { 284 242 return 0; 285 243 } 286 - clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); 287 244 clear_bit(ADF_STATUS_STARTING, &accel_dev->status); 288 245 clear_bit(ADF_STATUS_STARTED, &accel_dev->status); 289 246 ··· 299 258 if (!ret) { 300 259 clear_bit(accel_dev->accel_id, &service->start_status); 301 260 } else if (ret == -EAGAIN) { 302 - wait = 1; 261 + wait = true; 303 262 clear_bit(accel_dev->accel_id, &service->start_status); 304 263 } 305 264 } ··· 319 278 if (wait) 320 279 msleep(100); 321 280 322 - if (adf_dev_started(accel_dev)) { 281 + if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) { 323 282 if (adf_ae_stop(accel_dev)) 324 283 pr_err("QAT: failed to stop AE\n"); 325 284 else 326 285 clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status); 286 + } 287 + 288 + return 0; 289 + } 290 + EXPORT_SYMBOL_GPL(adf_dev_stop); 291 + 292 + /** 293 + * adf_dev_shutdown() - shutdown acceleration services and data strucutures 294 + * @accel_dev: Pointer to acceleration device 295 + * 296 + * Cleanup the ring data structures and the admin comms and arbitration 297 + * services. 298 + */ 299 + void adf_dev_shutdown(struct adf_accel_dev *accel_dev) 300 + { 301 + struct adf_hw_device_data *hw_data = accel_dev->hw_device; 302 + struct service_hndl *service; 303 + struct list_head *list_itr; 304 + 305 + if (!hw_data) { 306 + dev_err(&GET_DEV(accel_dev), 307 + "QAT: Failed to shutdown device - hw_data not set\n"); 308 + return; 327 309 } 328 310 329 311 if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) { ··· 399 335 if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) 400 336 adf_cfg_del_all(accel_dev); 401 337 402 - return 0; 338 + if (hw_data->exit_arb) 339 + hw_data->exit_arb(accel_dev); 340 + 341 + if (hw_data->exit_admin_comms) 342 + hw_data->exit_admin_comms(accel_dev); 343 + 344 + adf_cleanup_etr_data(accel_dev); 403 345 } 404 - EXPORT_SYMBOL_GPL(adf_dev_stop); 346 + EXPORT_SYMBOL_GPL(adf_dev_shutdown); 405 347 406 348 int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev) 407 349 {
-1
drivers/crypto/qat/qat_common/adf_transport_internal.h
··· 48 48 #define ADF_TRANSPORT_INTRN_H 49 49 50 50 #include <linux/interrupt.h> 51 - #include <linux/atomic.h> 52 51 #include <linux/spinlock_types.h> 53 52 #include "adf_transport.h" 54 53
+1 -1
drivers/crypto/qat/qat_common/icp_qat_hw.h
··· 301 301 302 302 struct icp_qat_hw_cipher_algo_blk { 303 303 struct icp_qat_hw_cipher_aes256_f8 aes; 304 - }; 304 + } __aligned(64); 305 305 #endif
+478 -166
drivers/crypto/qat/qat_common/qat_algs.c
··· 63 63 #include "icp_qat_fw.h" 64 64 #include "icp_qat_fw_la.h" 65 65 66 - #define QAT_AES_HW_CONFIG_ENC(alg) \ 66 + #define QAT_AES_HW_CONFIG_CBC_ENC(alg) \ 67 67 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \ 68 - ICP_QAT_HW_CIPHER_NO_CONVERT, \ 69 - ICP_QAT_HW_CIPHER_ENCRYPT) 68 + ICP_QAT_HW_CIPHER_NO_CONVERT, \ 69 + ICP_QAT_HW_CIPHER_ENCRYPT) 70 70 71 - #define QAT_AES_HW_CONFIG_DEC(alg) \ 71 + #define QAT_AES_HW_CONFIG_CBC_DEC(alg) \ 72 72 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \ 73 - ICP_QAT_HW_CIPHER_KEY_CONVERT, \ 74 - ICP_QAT_HW_CIPHER_DECRYPT) 73 + ICP_QAT_HW_CIPHER_KEY_CONVERT, \ 74 + ICP_QAT_HW_CIPHER_DECRYPT) 75 75 76 76 static atomic_t active_dev; 77 77 ··· 102 102 }; 103 103 } __aligned(64); 104 104 105 - #define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk) 106 - 107 - struct qat_auth_state { 108 - uint8_t data[MAX_AUTH_STATE_SIZE + 64]; 109 - } __aligned(64); 110 - 111 - struct qat_alg_session_ctx { 105 + struct qat_alg_aead_ctx { 112 106 struct qat_alg_cd *enc_cd; 113 - dma_addr_t enc_cd_paddr; 114 107 struct qat_alg_cd *dec_cd; 108 + dma_addr_t enc_cd_paddr; 115 109 dma_addr_t dec_cd_paddr; 116 - struct icp_qat_fw_la_bulk_req enc_fw_req_tmpl; 117 - struct icp_qat_fw_la_bulk_req dec_fw_req_tmpl; 118 - struct qat_crypto_instance *inst; 119 - struct crypto_tfm *tfm; 110 + struct icp_qat_fw_la_bulk_req enc_fw_req; 111 + struct icp_qat_fw_la_bulk_req dec_fw_req; 120 112 struct crypto_shash *hash_tfm; 121 113 enum icp_qat_hw_auth_algo qat_hash_alg; 114 + struct qat_crypto_instance *inst; 115 + struct crypto_tfm *tfm; 122 116 uint8_t salt[AES_BLOCK_SIZE]; 123 - spinlock_t lock; /* protects qat_alg_session_ctx struct */ 117 + spinlock_t lock; /* protects qat_alg_aead_ctx struct */ 118 + }; 119 + 120 + struct qat_alg_ablkcipher_ctx { 121 + struct icp_qat_hw_cipher_algo_blk *enc_cd; 122 + struct icp_qat_hw_cipher_algo_blk *dec_cd; 123 + dma_addr_t enc_cd_paddr; 124 + dma_addr_t dec_cd_paddr; 125 + struct icp_qat_fw_la_bulk_req enc_fw_req; 126 + struct icp_qat_fw_la_bulk_req dec_fw_req; 127 + struct qat_crypto_instance *inst; 128 + struct crypto_tfm *tfm; 129 + spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */ 124 130 }; 125 131 126 132 static int get_current_node(void) ··· 150 144 } 151 145 152 146 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash, 153 - struct qat_alg_session_ctx *ctx, 147 + struct qat_alg_aead_ctx *ctx, 154 148 const uint8_t *auth_key, 155 149 unsigned int auth_keylen) 156 150 { 157 - struct qat_auth_state auth_state; 158 151 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm); 159 152 struct sha1_state sha1; 160 153 struct sha256_state sha256; 161 154 struct sha512_state sha512; 162 155 int block_size = crypto_shash_blocksize(ctx->hash_tfm); 163 156 int digest_size = crypto_shash_digestsize(ctx->hash_tfm); 164 - uint8_t *ipad = auth_state.data; 165 - uint8_t *opad = ipad + block_size; 157 + char ipad[block_size]; 158 + char opad[block_size]; 166 159 __be32 *hash_state_out; 167 160 __be64 *hash512_state_out; 168 161 int i, offset; 169 162 170 - memzero_explicit(auth_state.data, MAX_AUTH_STATE_SIZE + 64); 163 + memset(ipad, 0, block_size); 164 + memset(opad, 0, block_size); 171 165 shash->tfm = ctx->hash_tfm; 172 166 shash->flags = 0x0; 173 167 174 168 if (auth_keylen > block_size) { 175 - char buff[SHA512_BLOCK_SIZE]; 176 169 int ret = crypto_shash_digest(shash, auth_key, 177 - auth_keylen, buff); 170 + auth_keylen, ipad); 178 171 if (ret) 179 172 return ret; 180 173 181 - memcpy(ipad, buff, digest_size); 182 - memcpy(opad, buff, digest_size); 183 - memzero_explicit(ipad + digest_size, block_size - digest_size); 184 - memzero_explicit(opad + digest_size, block_size - digest_size); 174 + memcpy(opad, ipad, digest_size); 185 175 } else { 186 176 memcpy(ipad, auth_key, auth_keylen); 187 177 memcpy(opad, auth_key, auth_keylen); 188 - memzero_explicit(ipad + auth_keylen, block_size - auth_keylen); 189 - memzero_explicit(opad + auth_keylen, block_size - auth_keylen); 190 178 } 191 179 192 180 for (i = 0; i < block_size; i++) { ··· 267 267 header->comn_req_flags = 268 268 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR, 269 269 QAT_COMN_PTR_TYPE_SGL); 270 - ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, 271 - ICP_QAT_FW_LA_DIGEST_IN_BUFFER); 272 270 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags, 273 271 ICP_QAT_FW_LA_PARTIAL_NONE); 274 272 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags, ··· 277 279 ICP_QAT_FW_LA_NO_UPDATE_STATE); 278 280 } 279 281 280 - static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx, 281 - int alg, struct crypto_authenc_keys *keys) 282 + static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx, 283 + int alg, 284 + struct crypto_authenc_keys *keys) 282 285 { 283 286 struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm); 284 287 unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize; ··· 288 289 struct icp_qat_hw_auth_algo_blk *hash = 289 290 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx + 290 291 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen); 291 - struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req_tmpl; 292 + struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req; 292 293 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; 293 294 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; 294 295 void *ptr = &req_tmpl->cd_ctrl; ··· 296 297 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; 297 298 298 299 /* CD setup */ 299 - cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg); 300 + cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg); 300 301 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen); 301 302 hash->sha.inner_setup.auth_config.config = 302 303 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1, ··· 310 311 /* Request setup */ 311 312 qat_alg_init_common_hdr(header); 312 313 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH; 314 + ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, 315 + ICP_QAT_FW_LA_DIGEST_IN_BUFFER); 313 316 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, 314 317 ICP_QAT_FW_LA_RET_AUTH_RES); 315 318 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, ··· 357 356 return 0; 358 357 } 359 358 360 - static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx, 361 - int alg, struct crypto_authenc_keys *keys) 359 + static int qat_alg_aead_init_dec_session(struct qat_alg_aead_ctx *ctx, 360 + int alg, 361 + struct crypto_authenc_keys *keys) 362 362 { 363 363 struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm); 364 364 unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize; ··· 369 367 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx + 370 368 sizeof(struct icp_qat_hw_auth_setup) + 371 369 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2); 372 - struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req_tmpl; 370 + struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req; 373 371 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; 374 372 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; 375 373 void *ptr = &req_tmpl->cd_ctrl; ··· 381 379 sizeof(struct icp_qat_fw_la_cipher_req_params)); 382 380 383 381 /* CD setup */ 384 - cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg); 382 + cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg); 385 383 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen); 386 384 hash->sha.inner_setup.auth_config.config = 387 385 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1, ··· 396 394 /* Request setup */ 397 395 qat_alg_init_common_hdr(header); 398 396 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER; 397 + ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, 398 + ICP_QAT_FW_LA_DIGEST_IN_BUFFER); 399 399 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, 400 400 ICP_QAT_FW_LA_NO_RET_AUTH_RES); 401 401 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, ··· 448 444 return 0; 449 445 } 450 446 451 - static int qat_alg_init_sessions(struct qat_alg_session_ctx *ctx, 452 - const uint8_t *key, unsigned int keylen) 447 + static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx, 448 + struct icp_qat_fw_la_bulk_req *req, 449 + struct icp_qat_hw_cipher_algo_blk *cd, 450 + const uint8_t *key, unsigned int keylen) 451 + { 452 + struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars; 453 + struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr; 454 + struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl; 455 + 456 + memcpy(cd->aes.key, key, keylen); 457 + qat_alg_init_common_hdr(header); 458 + header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER; 459 + cd_pars->u.s.content_desc_params_sz = 460 + sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3; 461 + /* Cipher CD config setup */ 462 + cd_ctrl->cipher_key_sz = keylen >> 3; 463 + cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3; 464 + cd_ctrl->cipher_cfg_offset = 0; 465 + ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); 466 + ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR); 467 + } 468 + 469 + static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx, 470 + int alg, const uint8_t *key, 471 + unsigned int keylen) 472 + { 473 + struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd; 474 + struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req; 475 + struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars; 476 + 477 + qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen); 478 + cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr; 479 + enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg); 480 + } 481 + 482 + static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx, 483 + int alg, const uint8_t *key, 484 + unsigned int keylen) 485 + { 486 + struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd; 487 + struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req; 488 + struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars; 489 + 490 + qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen); 491 + cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr; 492 + dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg); 493 + } 494 + 495 + static int qat_alg_validate_key(int key_len, int *alg) 496 + { 497 + switch (key_len) { 498 + case AES_KEYSIZE_128: 499 + *alg = ICP_QAT_HW_CIPHER_ALGO_AES128; 500 + break; 501 + case AES_KEYSIZE_192: 502 + *alg = ICP_QAT_HW_CIPHER_ALGO_AES192; 503 + break; 504 + case AES_KEYSIZE_256: 505 + *alg = ICP_QAT_HW_CIPHER_ALGO_AES256; 506 + break; 507 + default: 508 + return -EINVAL; 509 + } 510 + return 0; 511 + } 512 + 513 + static int qat_alg_aead_init_sessions(struct qat_alg_aead_ctx *ctx, 514 + const uint8_t *key, unsigned int keylen) 453 515 { 454 516 struct crypto_authenc_keys keys; 455 517 int alg; ··· 526 456 if (crypto_authenc_extractkeys(&keys, key, keylen)) 527 457 goto bad_key; 528 458 529 - switch (keys.enckeylen) { 530 - case AES_KEYSIZE_128: 531 - alg = ICP_QAT_HW_CIPHER_ALGO_AES128; 532 - break; 533 - case AES_KEYSIZE_192: 534 - alg = ICP_QAT_HW_CIPHER_ALGO_AES192; 535 - break; 536 - case AES_KEYSIZE_256: 537 - alg = ICP_QAT_HW_CIPHER_ALGO_AES256; 538 - break; 539 - default: 459 + if (qat_alg_validate_key(keys.enckeylen, &alg)) 540 460 goto bad_key; 541 - } 542 461 543 - if (qat_alg_init_enc_session(ctx, alg, &keys)) 462 + if (qat_alg_aead_init_enc_session(ctx, alg, &keys)) 544 463 goto error; 545 464 546 - if (qat_alg_init_dec_session(ctx, alg, &keys)) 465 + if (qat_alg_aead_init_dec_session(ctx, alg, &keys)) 547 466 goto error; 548 467 549 468 return 0; ··· 543 484 return -EFAULT; 544 485 } 545 486 546 - static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key, 547 - unsigned int keylen) 487 + static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx, 488 + const uint8_t *key, 489 + unsigned int keylen) 548 490 { 549 - struct qat_alg_session_ctx *ctx = crypto_aead_ctx(tfm); 491 + int alg; 492 + 493 + if (qat_alg_validate_key(keylen, &alg)) 494 + goto bad_key; 495 + 496 + qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen); 497 + qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen); 498 + return 0; 499 + bad_key: 500 + crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 501 + return -EINVAL; 502 + } 503 + 504 + static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key, 505 + unsigned int keylen) 506 + { 507 + struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); 550 508 struct device *dev; 551 509 552 510 spin_lock(&ctx->lock); 553 511 if (ctx->enc_cd) { 554 512 /* rekeying */ 555 513 dev = &GET_DEV(ctx->inst->accel_dev); 556 - memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd)); 557 - memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd)); 558 - memzero_explicit(&ctx->enc_fw_req_tmpl, 559 - sizeof(struct icp_qat_fw_la_bulk_req)); 560 - memzero_explicit(&ctx->dec_fw_req_tmpl, 561 - sizeof(struct icp_qat_fw_la_bulk_req)); 514 + memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd)); 515 + memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd)); 516 + memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req)); 517 + memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req)); 562 518 } else { 563 519 /* new key */ 564 520 int node = get_current_node(); ··· 586 512 587 513 dev = &GET_DEV(inst->accel_dev); 588 514 ctx->inst = inst; 589 - ctx->enc_cd = dma_zalloc_coherent(dev, 590 - sizeof(struct qat_alg_cd), 515 + ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd), 591 516 &ctx->enc_cd_paddr, 592 517 GFP_ATOMIC); 593 518 if (!ctx->enc_cd) { 594 519 spin_unlock(&ctx->lock); 595 520 return -ENOMEM; 596 521 } 597 - ctx->dec_cd = dma_zalloc_coherent(dev, 598 - sizeof(struct qat_alg_cd), 522 + ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), 599 523 &ctx->dec_cd_paddr, 600 524 GFP_ATOMIC); 601 525 if (!ctx->dec_cd) { ··· 602 530 } 603 531 } 604 532 spin_unlock(&ctx->lock); 605 - if (qat_alg_init_sessions(ctx, key, keylen)) 533 + if (qat_alg_aead_init_sessions(ctx, key, keylen)) 606 534 goto out_free_all; 607 535 608 536 return 0; 609 537 610 538 out_free_all: 611 - memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd)); 539 + memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd)); 612 540 dma_free_coherent(dev, sizeof(struct qat_alg_cd), 613 541 ctx->dec_cd, ctx->dec_cd_paddr); 614 542 ctx->dec_cd = NULL; 615 543 out_free_enc: 616 - memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd)); 544 + memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd)); 617 545 dma_free_coherent(dev, sizeof(struct qat_alg_cd), 618 546 ctx->enc_cd, ctx->enc_cd_paddr); 619 547 ctx->enc_cd = NULL; ··· 629 557 dma_addr_t blp = qat_req->buf.blp; 630 558 dma_addr_t blpout = qat_req->buf.bloutp; 631 559 size_t sz = qat_req->buf.sz; 632 - int i, bufs = bl->num_bufs; 560 + size_t sz_out = qat_req->buf.sz_out; 561 + int i; 633 562 634 563 for (i = 0; i < bl->num_bufs; i++) 635 564 dma_unmap_single(dev, bl->bufers[i].addr, ··· 640 567 kfree(bl); 641 568 if (blp != blpout) { 642 569 /* If out of place operation dma unmap only data */ 643 - int bufless = bufs - blout->num_mapped_bufs; 570 + int bufless = blout->num_bufs - blout->num_mapped_bufs; 644 571 645 - for (i = bufless; i < bufs; i++) { 572 + for (i = bufless; i < blout->num_bufs; i++) { 646 573 dma_unmap_single(dev, blout->bufers[i].addr, 647 574 blout->bufers[i].len, 648 575 DMA_BIDIRECTIONAL); 649 576 } 650 - dma_unmap_single(dev, blpout, sz, DMA_TO_DEVICE); 577 + dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE); 651 578 kfree(blout); 652 579 } 653 580 } ··· 660 587 struct qat_crypto_request *qat_req) 661 588 { 662 589 struct device *dev = &GET_DEV(inst->accel_dev); 663 - int i, bufs = 0, n = sg_nents(sgl), assoc_n = sg_nents(assoc); 590 + int i, bufs = 0, sg_nctr = 0; 591 + int n = sg_nents(sgl), assoc_n = sg_nents(assoc); 664 592 struct qat_alg_buf_list *bufl; 665 593 struct qat_alg_buf_list *buflout = NULL; 666 594 dma_addr_t blp; 667 595 dma_addr_t bloutp = 0; 668 596 struct scatterlist *sg; 669 - size_t sz = sizeof(struct qat_alg_buf_list) + 597 + size_t sz_out, sz = sizeof(struct qat_alg_buf_list) + 670 598 ((1 + n + assoc_n) * sizeof(struct qat_alg_buf)); 671 599 672 600 if (unlikely(!n)) 673 601 return -EINVAL; 674 602 675 - bufl = kmalloc_node(sz, GFP_ATOMIC, 603 + bufl = kzalloc_node(sz, GFP_ATOMIC, 676 604 dev_to_node(&GET_DEV(inst->accel_dev))); 677 605 if (unlikely(!bufl)) 678 606 return -ENOMEM; ··· 694 620 goto err; 695 621 bufs++; 696 622 } 697 - bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen, 698 - DMA_BIDIRECTIONAL); 699 - bufl->bufers[bufs].len = ivlen; 700 - if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr))) 701 - goto err; 702 - bufs++; 623 + if (ivlen) { 624 + bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen, 625 + DMA_BIDIRECTIONAL); 626 + bufl->bufers[bufs].len = ivlen; 627 + if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr))) 628 + goto err; 629 + bufs++; 630 + } 703 631 704 632 for_each_sg(sgl, sg, n, i) { 705 - int y = i + bufs; 633 + int y = sg_nctr + bufs; 634 + 635 + if (!sg->length) 636 + continue; 706 637 707 638 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg), 708 639 sg->length, ··· 715 636 bufl->bufers[y].len = sg->length; 716 637 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr))) 717 638 goto err; 639 + sg_nctr++; 718 640 } 719 - bufl->num_bufs = n + bufs; 641 + bufl->num_bufs = sg_nctr + bufs; 720 642 qat_req->buf.bl = bufl; 721 643 qat_req->buf.blp = blp; 722 644 qat_req->buf.sz = sz; ··· 725 645 if (sgl != sglout) { 726 646 struct qat_alg_buf *bufers; 727 647 728 - buflout = kmalloc_node(sz, GFP_ATOMIC, 648 + n = sg_nents(sglout); 649 + sz_out = sizeof(struct qat_alg_buf_list) + 650 + ((1 + n + assoc_n) * sizeof(struct qat_alg_buf)); 651 + sg_nctr = 0; 652 + buflout = kzalloc_node(sz_out, GFP_ATOMIC, 729 653 dev_to_node(&GET_DEV(inst->accel_dev))); 730 654 if (unlikely(!buflout)) 731 655 goto err; 732 - bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE); 656 + bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE); 733 657 if (unlikely(dma_mapping_error(dev, bloutp))) 734 658 goto err; 735 659 bufers = buflout->bufers; ··· 744 660 bufers[i].addr = bufl->bufers[i].addr; 745 661 } 746 662 for_each_sg(sglout, sg, n, i) { 747 - int y = i + bufs; 663 + int y = sg_nctr + bufs; 664 + 665 + if (!sg->length) 666 + continue; 748 667 749 668 bufers[y].addr = dma_map_single(dev, sg_virt(sg), 750 669 sg->length, 751 670 DMA_BIDIRECTIONAL); 752 - buflout->bufers[y].len = sg->length; 753 671 if (unlikely(dma_mapping_error(dev, bufers[y].addr))) 754 672 goto err; 673 + bufers[y].len = sg->length; 674 + sg_nctr++; 755 675 } 756 - buflout->num_bufs = n + bufs; 757 - buflout->num_mapped_bufs = n; 676 + buflout->num_bufs = sg_nctr + bufs; 677 + buflout->num_mapped_bufs = sg_nctr; 758 678 qat_req->buf.blout = buflout; 759 679 qat_req->buf.bloutp = bloutp; 680 + qat_req->buf.sz_out = sz_out; 760 681 } else { 761 682 /* Otherwise set the src and dst to the same address */ 762 683 qat_req->buf.bloutp = qat_req->buf.blp; 684 + qat_req->buf.sz_out = 0; 763 685 } 764 686 return 0; 765 687 err: 766 688 dev_err(dev, "Failed to map buf for dma\n"); 767 - for_each_sg(sgl, sg, n + bufs, i) { 768 - if (!dma_mapping_error(dev, bufl->bufers[i].addr)) { 689 + sg_nctr = 0; 690 + for (i = 0; i < n + bufs; i++) 691 + if (!dma_mapping_error(dev, bufl->bufers[i].addr)) 769 692 dma_unmap_single(dev, bufl->bufers[i].addr, 770 693 bufl->bufers[i].len, 771 694 DMA_BIDIRECTIONAL); 772 - } 773 - } 695 + 774 696 if (!dma_mapping_error(dev, blp)) 775 697 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); 776 698 kfree(bufl); 777 699 if (sgl != sglout && buflout) { 778 - for_each_sg(sglout, sg, n, i) { 779 - int y = i + bufs; 780 - 781 - if (!dma_mapping_error(dev, buflout->bufers[y].addr)) 782 - dma_unmap_single(dev, buflout->bufers[y].addr, 783 - buflout->bufers[y].len, 700 + n = sg_nents(sglout); 701 + for (i = bufs; i < n + bufs; i++) 702 + if (!dma_mapping_error(dev, buflout->bufers[i].addr)) 703 + dma_unmap_single(dev, buflout->bufers[i].addr, 704 + buflout->bufers[i].len, 784 705 DMA_BIDIRECTIONAL); 785 - } 786 706 if (!dma_mapping_error(dev, bloutp)) 787 - dma_unmap_single(dev, bloutp, sz, DMA_TO_DEVICE); 707 + dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE); 788 708 kfree(buflout); 789 709 } 790 710 return -ENOMEM; 791 711 } 792 712 793 - void qat_alg_callback(void *resp) 713 + static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp, 714 + struct qat_crypto_request *qat_req) 794 715 { 795 - struct icp_qat_fw_la_resp *qat_resp = resp; 796 - struct qat_crypto_request *qat_req = 797 - (void *)(__force long)qat_resp->opaque_data; 798 - struct qat_alg_session_ctx *ctx = qat_req->ctx; 716 + struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx; 799 717 struct qat_crypto_instance *inst = ctx->inst; 800 - struct aead_request *areq = qat_req->areq; 718 + struct aead_request *areq = qat_req->aead_req; 801 719 uint8_t stat_filed = qat_resp->comn_resp.comn_status; 802 720 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed); 803 721 ··· 809 723 areq->base.complete(&areq->base, res); 810 724 } 811 725 812 - static int qat_alg_dec(struct aead_request *areq) 726 + static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp, 727 + struct qat_crypto_request *qat_req) 728 + { 729 + struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx; 730 + struct qat_crypto_instance *inst = ctx->inst; 731 + struct ablkcipher_request *areq = qat_req->ablkcipher_req; 732 + uint8_t stat_filed = qat_resp->comn_resp.comn_status; 733 + int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed); 734 + 735 + qat_alg_free_bufl(inst, qat_req); 736 + if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK)) 737 + res = -EINVAL; 738 + areq->base.complete(&areq->base, res); 739 + } 740 + 741 + void qat_alg_callback(void *resp) 742 + { 743 + struct icp_qat_fw_la_resp *qat_resp = resp; 744 + struct qat_crypto_request *qat_req = 745 + (void *)(__force long)qat_resp->opaque_data; 746 + 747 + qat_req->cb(qat_resp, qat_req); 748 + } 749 + 750 + static int qat_alg_aead_dec(struct aead_request *areq) 813 751 { 814 752 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq); 815 753 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm); 816 - struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); 754 + struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm); 817 755 struct qat_crypto_request *qat_req = aead_request_ctx(areq); 818 756 struct icp_qat_fw_la_cipher_req_params *cipher_param; 819 757 struct icp_qat_fw_la_auth_req_params *auth_param; ··· 851 741 return ret; 852 742 853 743 msg = &qat_req->req; 854 - *msg = ctx->dec_fw_req_tmpl; 855 - qat_req->ctx = ctx; 856 - qat_req->areq = areq; 744 + *msg = ctx->dec_fw_req; 745 + qat_req->aead_ctx = ctx; 746 + qat_req->aead_req = areq; 747 + qat_req->cb = qat_aead_alg_callback; 857 748 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; 858 749 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; 859 750 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; ··· 877 766 return -EINPROGRESS; 878 767 } 879 768 880 - static int qat_alg_enc_internal(struct aead_request *areq, uint8_t *iv, 881 - int enc_iv) 769 + static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv, 770 + int enc_iv) 882 771 { 883 772 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq); 884 773 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm); 885 - struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); 774 + struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm); 886 775 struct qat_crypto_request *qat_req = aead_request_ctx(areq); 887 776 struct icp_qat_fw_la_cipher_req_params *cipher_param; 888 777 struct icp_qat_fw_la_auth_req_params *auth_param; ··· 895 784 return ret; 896 785 897 786 msg = &qat_req->req; 898 - *msg = ctx->enc_fw_req_tmpl; 899 - qat_req->ctx = ctx; 900 - qat_req->areq = areq; 787 + *msg = ctx->enc_fw_req; 788 + qat_req->aead_ctx = ctx; 789 + qat_req->aead_req = areq; 790 + qat_req->cb = qat_aead_alg_callback; 901 791 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; 902 792 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; 903 793 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; ··· 927 815 return -EINPROGRESS; 928 816 } 929 817 930 - static int qat_alg_enc(struct aead_request *areq) 818 + static int qat_alg_aead_enc(struct aead_request *areq) 931 819 { 932 - return qat_alg_enc_internal(areq, areq->iv, 0); 820 + return qat_alg_aead_enc_internal(areq, areq->iv, 0); 933 821 } 934 822 935 - static int qat_alg_genivenc(struct aead_givcrypt_request *req) 823 + static int qat_alg_aead_genivenc(struct aead_givcrypt_request *req) 936 824 { 937 825 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq); 938 826 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm); 939 - struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); 827 + struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm); 940 828 __be64 seq; 941 829 942 830 memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE); 943 831 seq = cpu_to_be64(req->seq); 944 832 memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t), 945 833 &seq, sizeof(uint64_t)); 946 - return qat_alg_enc_internal(&req->areq, req->giv, 1); 834 + return qat_alg_aead_enc_internal(&req->areq, req->giv, 1); 947 835 } 948 836 949 - static int qat_alg_init(struct crypto_tfm *tfm, 950 - enum icp_qat_hw_auth_algo hash, const char *hash_name) 837 + static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm, 838 + const uint8_t *key, 839 + unsigned int keylen) 951 840 { 952 - struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); 841 + struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); 842 + struct device *dev; 953 843 954 - memzero_explicit(ctx, sizeof(*ctx)); 844 + spin_lock(&ctx->lock); 845 + if (ctx->enc_cd) { 846 + /* rekeying */ 847 + dev = &GET_DEV(ctx->inst->accel_dev); 848 + memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd)); 849 + memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd)); 850 + memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req)); 851 + memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req)); 852 + } else { 853 + /* new key */ 854 + int node = get_current_node(); 855 + struct qat_crypto_instance *inst = 856 + qat_crypto_get_instance_node(node); 857 + if (!inst) { 858 + spin_unlock(&ctx->lock); 859 + return -EINVAL; 860 + } 861 + 862 + dev = &GET_DEV(inst->accel_dev); 863 + ctx->inst = inst; 864 + ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd), 865 + &ctx->enc_cd_paddr, 866 + GFP_ATOMIC); 867 + if (!ctx->enc_cd) { 868 + spin_unlock(&ctx->lock); 869 + return -ENOMEM; 870 + } 871 + ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), 872 + &ctx->dec_cd_paddr, 873 + GFP_ATOMIC); 874 + if (!ctx->dec_cd) { 875 + spin_unlock(&ctx->lock); 876 + goto out_free_enc; 877 + } 878 + } 879 + spin_unlock(&ctx->lock); 880 + if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen)) 881 + goto out_free_all; 882 + 883 + return 0; 884 + 885 + out_free_all: 886 + memset(ctx->dec_cd, 0, sizeof(*ctx->enc_cd)); 887 + dma_free_coherent(dev, sizeof(*ctx->enc_cd), 888 + ctx->dec_cd, ctx->dec_cd_paddr); 889 + ctx->dec_cd = NULL; 890 + out_free_enc: 891 + memset(ctx->enc_cd, 0, sizeof(*ctx->dec_cd)); 892 + dma_free_coherent(dev, sizeof(*ctx->dec_cd), 893 + ctx->enc_cd, ctx->enc_cd_paddr); 894 + ctx->enc_cd = NULL; 895 + return -ENOMEM; 896 + } 897 + 898 + static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req) 899 + { 900 + struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req); 901 + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm); 902 + struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); 903 + struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req); 904 + struct icp_qat_fw_la_cipher_req_params *cipher_param; 905 + struct icp_qat_fw_la_bulk_req *msg; 906 + int ret, ctr = 0; 907 + 908 + ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst, 909 + NULL, 0, qat_req); 910 + if (unlikely(ret)) 911 + return ret; 912 + 913 + msg = &qat_req->req; 914 + *msg = ctx->enc_fw_req; 915 + qat_req->ablkcipher_ctx = ctx; 916 + qat_req->ablkcipher_req = req; 917 + qat_req->cb = qat_ablkcipher_alg_callback; 918 + qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; 919 + qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; 920 + qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; 921 + cipher_param = (void *)&qat_req->req.serv_specif_rqpars; 922 + cipher_param->cipher_length = req->nbytes; 923 + cipher_param->cipher_offset = 0; 924 + memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE); 925 + do { 926 + ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); 927 + } while (ret == -EAGAIN && ctr++ < 10); 928 + 929 + if (ret == -EAGAIN) { 930 + qat_alg_free_bufl(ctx->inst, qat_req); 931 + return -EBUSY; 932 + } 933 + return -EINPROGRESS; 934 + } 935 + 936 + static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req) 937 + { 938 + struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req); 939 + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm); 940 + struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); 941 + struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req); 942 + struct icp_qat_fw_la_cipher_req_params *cipher_param; 943 + struct icp_qat_fw_la_bulk_req *msg; 944 + int ret, ctr = 0; 945 + 946 + ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst, 947 + NULL, 0, qat_req); 948 + if (unlikely(ret)) 949 + return ret; 950 + 951 + msg = &qat_req->req; 952 + *msg = ctx->dec_fw_req; 953 + qat_req->ablkcipher_ctx = ctx; 954 + qat_req->ablkcipher_req = req; 955 + qat_req->cb = qat_ablkcipher_alg_callback; 956 + qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; 957 + qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; 958 + qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; 959 + cipher_param = (void *)&qat_req->req.serv_specif_rqpars; 960 + cipher_param->cipher_length = req->nbytes; 961 + cipher_param->cipher_offset = 0; 962 + memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE); 963 + do { 964 + ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); 965 + } while (ret == -EAGAIN && ctr++ < 10); 966 + 967 + if (ret == -EAGAIN) { 968 + qat_alg_free_bufl(ctx->inst, qat_req); 969 + return -EBUSY; 970 + } 971 + return -EINPROGRESS; 972 + } 973 + 974 + static int qat_alg_aead_init(struct crypto_tfm *tfm, 975 + enum icp_qat_hw_auth_algo hash, 976 + const char *hash_name) 977 + { 978 + struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm); 979 + 955 980 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); 956 981 if (IS_ERR(ctx->hash_tfm)) 957 982 return -EFAULT; ··· 1100 851 return 0; 1101 852 } 1102 853 1103 - static int qat_alg_sha1_init(struct crypto_tfm *tfm) 854 + static int qat_alg_aead_sha1_init(struct crypto_tfm *tfm) 1104 855 { 1105 - return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1"); 856 + return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1"); 1106 857 } 1107 858 1108 - static int qat_alg_sha256_init(struct crypto_tfm *tfm) 859 + static int qat_alg_aead_sha256_init(struct crypto_tfm *tfm) 1109 860 { 1110 - return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256"); 861 + return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256"); 1111 862 } 1112 863 1113 - static int qat_alg_sha512_init(struct crypto_tfm *tfm) 864 + static int qat_alg_aead_sha512_init(struct crypto_tfm *tfm) 1114 865 { 1115 - return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512"); 866 + return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512"); 1116 867 } 1117 868 1118 - static void qat_alg_exit(struct crypto_tfm *tfm) 869 + static void qat_alg_aead_exit(struct crypto_tfm *tfm) 1119 870 { 1120 - struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); 871 + struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm); 1121 872 struct qat_crypto_instance *inst = ctx->inst; 1122 873 struct device *dev; 1123 874 ··· 1129 880 1130 881 dev = &GET_DEV(inst->accel_dev); 1131 882 if (ctx->enc_cd) { 1132 - memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd)); 883 + memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd)); 1133 884 dma_free_coherent(dev, sizeof(struct qat_alg_cd), 1134 885 ctx->enc_cd, ctx->enc_cd_paddr); 1135 886 } 1136 887 if (ctx->dec_cd) { 1137 - memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd)); 888 + memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd)); 1138 889 dma_free_coherent(dev, sizeof(struct qat_alg_cd), 890 + ctx->dec_cd, ctx->dec_cd_paddr); 891 + } 892 + qat_crypto_put_instance(inst); 893 + } 894 + 895 + static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm) 896 + { 897 + struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); 898 + 899 + spin_lock_init(&ctx->lock); 900 + tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) + 901 + sizeof(struct qat_crypto_request); 902 + ctx->tfm = tfm; 903 + return 0; 904 + } 905 + 906 + static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm) 907 + { 908 + struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); 909 + struct qat_crypto_instance *inst = ctx->inst; 910 + struct device *dev; 911 + 912 + if (!inst) 913 + return; 914 + 915 + dev = &GET_DEV(inst->accel_dev); 916 + if (ctx->enc_cd) { 917 + memset(ctx->enc_cd, 0, 918 + sizeof(struct icp_qat_hw_cipher_algo_blk)); 919 + dma_free_coherent(dev, 920 + sizeof(struct icp_qat_hw_cipher_algo_blk), 921 + ctx->enc_cd, ctx->enc_cd_paddr); 922 + } 923 + if (ctx->dec_cd) { 924 + memset(ctx->dec_cd, 0, 925 + sizeof(struct icp_qat_hw_cipher_algo_blk)); 926 + dma_free_coherent(dev, 927 + sizeof(struct icp_qat_hw_cipher_algo_blk), 1139 928 ctx->dec_cd, ctx->dec_cd_paddr); 1140 929 } 1141 930 qat_crypto_put_instance(inst); ··· 1185 898 .cra_priority = 4001, 1186 899 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, 1187 900 .cra_blocksize = AES_BLOCK_SIZE, 1188 - .cra_ctxsize = sizeof(struct qat_alg_session_ctx), 901 + .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), 1189 902 .cra_alignmask = 0, 1190 903 .cra_type = &crypto_aead_type, 1191 904 .cra_module = THIS_MODULE, 1192 - .cra_init = qat_alg_sha1_init, 1193 - .cra_exit = qat_alg_exit, 905 + .cra_init = qat_alg_aead_sha1_init, 906 + .cra_exit = qat_alg_aead_exit, 1194 907 .cra_u = { 1195 908 .aead = { 1196 - .setkey = qat_alg_setkey, 1197 - .decrypt = qat_alg_dec, 1198 - .encrypt = qat_alg_enc, 1199 - .givencrypt = qat_alg_genivenc, 909 + .setkey = qat_alg_aead_setkey, 910 + .decrypt = qat_alg_aead_dec, 911 + .encrypt = qat_alg_aead_enc, 912 + .givencrypt = qat_alg_aead_genivenc, 1200 913 .ivsize = AES_BLOCK_SIZE, 1201 914 .maxauthsize = SHA1_DIGEST_SIZE, 1202 915 }, ··· 1207 920 .cra_priority = 4001, 1208 921 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, 1209 922 .cra_blocksize = AES_BLOCK_SIZE, 1210 - .cra_ctxsize = sizeof(struct qat_alg_session_ctx), 923 + .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), 1211 924 .cra_alignmask = 0, 1212 925 .cra_type = &crypto_aead_type, 1213 926 .cra_module = THIS_MODULE, 1214 - .cra_init = qat_alg_sha256_init, 1215 - .cra_exit = qat_alg_exit, 927 + .cra_init = qat_alg_aead_sha256_init, 928 + .cra_exit = qat_alg_aead_exit, 1216 929 .cra_u = { 1217 930 .aead = { 1218 - .setkey = qat_alg_setkey, 1219 - .decrypt = qat_alg_dec, 1220 - .encrypt = qat_alg_enc, 1221 - .givencrypt = qat_alg_genivenc, 931 + .setkey = qat_alg_aead_setkey, 932 + .decrypt = qat_alg_aead_dec, 933 + .encrypt = qat_alg_aead_enc, 934 + .givencrypt = qat_alg_aead_genivenc, 1222 935 .ivsize = AES_BLOCK_SIZE, 1223 936 .maxauthsize = SHA256_DIGEST_SIZE, 1224 937 }, ··· 1229 942 .cra_priority = 4001, 1230 943 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, 1231 944 .cra_blocksize = AES_BLOCK_SIZE, 1232 - .cra_ctxsize = sizeof(struct qat_alg_session_ctx), 945 + .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), 1233 946 .cra_alignmask = 0, 1234 947 .cra_type = &crypto_aead_type, 1235 948 .cra_module = THIS_MODULE, 1236 - .cra_init = qat_alg_sha512_init, 1237 - .cra_exit = qat_alg_exit, 949 + .cra_init = qat_alg_aead_sha512_init, 950 + .cra_exit = qat_alg_aead_exit, 1238 951 .cra_u = { 1239 952 .aead = { 1240 - .setkey = qat_alg_setkey, 1241 - .decrypt = qat_alg_dec, 1242 - .encrypt = qat_alg_enc, 1243 - .givencrypt = qat_alg_genivenc, 953 + .setkey = qat_alg_aead_setkey, 954 + .decrypt = qat_alg_aead_dec, 955 + .encrypt = qat_alg_aead_enc, 956 + .givencrypt = qat_alg_aead_genivenc, 1244 957 .ivsize = AES_BLOCK_SIZE, 1245 958 .maxauthsize = SHA512_DIGEST_SIZE, 959 + }, 960 + }, 961 + }, { 962 + .cra_name = "cbc(aes)", 963 + .cra_driver_name = "qat_aes_cbc", 964 + .cra_priority = 4001, 965 + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 966 + .cra_blocksize = AES_BLOCK_SIZE, 967 + .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx), 968 + .cra_alignmask = 0, 969 + .cra_type = &crypto_ablkcipher_type, 970 + .cra_module = THIS_MODULE, 971 + .cra_init = qat_alg_ablkcipher_init, 972 + .cra_exit = qat_alg_ablkcipher_exit, 973 + .cra_u = { 974 + .ablkcipher = { 975 + .setkey = qat_alg_ablkcipher_setkey, 976 + .decrypt = qat_alg_ablkcipher_decrypt, 977 + .encrypt = qat_alg_ablkcipher_encrypt, 978 + .min_keysize = AES_MIN_KEY_SIZE, 979 + .max_keysize = AES_MAX_KEY_SIZE, 980 + .ivsize = AES_BLOCK_SIZE, 1246 981 }, 1247 982 }, 1248 983 } }; ··· 1275 966 int i; 1276 967 1277 968 for (i = 0; i < ARRAY_SIZE(qat_algs); i++) 1278 - qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_AEAD | 1279 - CRYPTO_ALG_ASYNC; 969 + qat_algs[i].cra_flags = 970 + (qat_algs[i].cra_type == &crypto_aead_type) ? 971 + CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC : 972 + CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; 973 + 1280 974 return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs)); 1281 975 } 1282 976 return 0;
+14 -2
drivers/crypto/qat/qat_common/qat_crypto.h
··· 72 72 struct qat_alg_buf_list *blout; 73 73 dma_addr_t bloutp; 74 74 size_t sz; 75 + size_t sz_out; 75 76 }; 77 + 78 + struct qat_crypto_request; 76 79 77 80 struct qat_crypto_request { 78 81 struct icp_qat_fw_la_bulk_req req; 79 - struct qat_alg_session_ctx *ctx; 80 - struct aead_request *areq; 82 + union { 83 + struct qat_alg_aead_ctx *aead_ctx; 84 + struct qat_alg_ablkcipher_ctx *ablkcipher_ctx; 85 + }; 86 + union { 87 + struct aead_request *aead_req; 88 + struct ablkcipher_request *ablkcipher_req; 89 + }; 81 90 struct qat_crypto_request_buffs buf; 91 + void (*cb)(struct icp_qat_fw_la_resp *resp, 92 + struct qat_crypto_request *req); 82 93 }; 94 + 83 95 #endif
+19
drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
··· 46 46 */ 47 47 #include <adf_accel_devices.h> 48 48 #include "adf_dh895xcc_hw_data.h" 49 + #include "adf_common_drv.h" 49 50 #include "adf_drv.h" 50 51 51 52 /* Worker thread to service arbiter mappings based on dev SKUs */ ··· 183 182 } 184 183 } 185 184 185 + static void adf_enable_ints(struct adf_accel_dev *accel_dev) 186 + { 187 + void __iomem *addr; 188 + 189 + addr = (&GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR])->virt_addr; 190 + 191 + /* Enable bundle and misc interrupts */ 192 + ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET, 193 + ADF_DH895XCC_SMIA0_MASK); 194 + ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET, 195 + ADF_DH895XCC_SMIA1_MASK); 196 + } 197 + 186 198 void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) 187 199 { 188 200 hw_data->dev_class = &dh895xcc_class; ··· 220 206 hw_data->get_misc_bar_id = get_misc_bar_id; 221 207 hw_data->get_sku = get_sku; 222 208 hw_data->fw_name = ADF_DH895XCC_FW; 209 + hw_data->init_admin_comms = adf_init_admin_comms; 210 + hw_data->exit_admin_comms = adf_exit_admin_comms; 211 + hw_data->init_arb = adf_init_arb; 212 + hw_data->exit_arb = adf_exit_arb; 213 + hw_data->enable_ints = adf_enable_ints; 223 214 } 224 215 225 216 void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
+11 -31
drivers/crypto/qat/qat_dh895xcc/adf_drv.c
··· 90 90 struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; 91 91 int i; 92 92 93 - adf_exit_admin_comms(accel_dev); 94 - adf_exit_arb(accel_dev); 95 - adf_cleanup_etr_data(accel_dev); 93 + adf_dev_shutdown(accel_dev); 96 94 97 95 for (i = 0; i < ADF_PCI_MAX_BARS; i++) { 98 96 struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; ··· 117 119 kfree(accel_dev); 118 120 } 119 121 120 - static int qat_dev_start(struct adf_accel_dev *accel_dev) 122 + static int adf_dev_configure(struct adf_accel_dev *accel_dev) 121 123 { 122 124 int cpus = num_online_cpus(); 123 125 int banks = GET_MAX_BANKS(accel_dev); ··· 204 206 goto err; 205 207 206 208 set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); 207 - return adf_dev_start(accel_dev); 209 + return 0; 208 210 err: 209 211 dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n"); 210 212 return -EINVAL; ··· 215 217 struct adf_accel_dev *accel_dev; 216 218 struct adf_accel_pci *accel_pci_dev; 217 219 struct adf_hw_device_data *hw_data; 218 - void __iomem *pmisc_bar_addr = NULL; 219 220 char name[ADF_DEVICE_NAME_LENGTH]; 220 221 unsigned int i, bar_nr; 221 222 int ret; ··· 344 347 ret = -EFAULT; 345 348 goto out_err; 346 349 } 347 - if (i == ADF_DH895XCC_PMISC_BAR) 348 - pmisc_bar_addr = bar->virt_addr; 349 350 } 350 351 pci_set_master(pdev); 351 352 ··· 353 358 goto out_err; 354 359 } 355 360 356 - if (adf_init_etr_data(accel_dev)) { 357 - dev_err(&pdev->dev, "Failed initialize etr\n"); 358 - ret = -EFAULT; 359 - goto out_err; 360 - } 361 - 362 - if (adf_init_admin_comms(accel_dev)) { 363 - dev_err(&pdev->dev, "Failed initialize admin comms\n"); 364 - ret = -EFAULT; 365 - goto out_err; 366 - } 367 - 368 - if (adf_init_arb(accel_dev)) { 369 - dev_err(&pdev->dev, "Failed initialize hw arbiter\n"); 370 - ret = -EFAULT; 371 - goto out_err; 372 - } 373 361 if (pci_save_state(pdev)) { 374 362 dev_err(&pdev->dev, "Failed to save pci state\n"); 375 363 ret = -ENOMEM; 376 364 goto out_err; 377 365 } 378 366 379 - /* Enable bundle and misc interrupts */ 380 - ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET, 381 - ADF_DH895XCC_SMIA0_MASK); 382 - ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET, 383 - ADF_DH895XCC_SMIA1_MASK); 367 + ret = adf_dev_configure(accel_dev); 368 + if (ret) 369 + goto out_err; 384 370 385 - ret = qat_dev_start(accel_dev); 371 + ret = adf_dev_init(accel_dev); 372 + if (ret) 373 + goto out_err; 374 + 375 + ret = adf_dev_start(accel_dev); 386 376 if (ret) { 387 377 adf_dev_stop(accel_dev); 388 378 goto out_err;
+3 -3
drivers/crypto/qce/dma.c
··· 64 64 err = dma_map_sg(dev, sg, 1, dir); 65 65 if (!err) 66 66 return -EFAULT; 67 - sg = scatterwalk_sg_next(sg); 67 + sg = sg_next(sg); 68 68 } 69 69 } else { 70 70 err = dma_map_sg(dev, sg, nents, dir); ··· 81 81 if (chained) 82 82 while (sg) { 83 83 dma_unmap_sg(dev, sg, 1, dir); 84 - sg = scatterwalk_sg_next(sg); 84 + sg = sg_next(sg); 85 85 } 86 86 else 87 87 dma_unmap_sg(dev, sg, nents, dir); ··· 100 100 nbytes -= sg->length; 101 101 if (!sg_is_last(sg) && (sg + 1)->length == 0 && chained) 102 102 *chained = true; 103 - sg = scatterwalk_sg_next(sg); 103 + sg = sg_next(sg); 104 104 } 105 105 106 106 return nents;
+1 -1
drivers/crypto/qce/sha.c
··· 285 285 break; 286 286 len += sg_dma_len(sg); 287 287 sg_last = sg; 288 - sg = scatterwalk_sg_next(sg); 288 + sg = sg_next(sg); 289 289 } 290 290 291 291 if (!sg_last)
+1 -1
drivers/crypto/sahara.c
··· 940 940 break; 941 941 } 942 942 nbytes -= sg->length; 943 - sg = scatterwalk_sg_next(sg); 943 + sg = sg_next(sg); 944 944 } 945 945 946 946 return nbytes;
+4 -4
drivers/crypto/talitos.c
··· 743 743 if (unlikely(chained)) 744 744 while (sg) { 745 745 dma_map_sg(dev, sg, 1, dir); 746 - sg = scatterwalk_sg_next(sg); 746 + sg = sg_next(sg); 747 747 } 748 748 else 749 749 dma_map_sg(dev, sg, nents, dir); ··· 755 755 { 756 756 while (sg) { 757 757 dma_unmap_sg(dev, sg, 1, dir); 758 - sg = scatterwalk_sg_next(sg); 758 + sg = sg_next(sg); 759 759 } 760 760 } 761 761 ··· 915 915 link_tbl_ptr->j_extent = 0; 916 916 link_tbl_ptr++; 917 917 cryptlen -= sg_dma_len(sg); 918 - sg = scatterwalk_sg_next(sg); 918 + sg = sg_next(sg); 919 919 } 920 920 921 921 /* adjust (decrease) last one (or two) entry's len to cryptlen */ ··· 1102 1102 nbytes -= sg->length; 1103 1103 if (!sg_is_last(sg) && (sg + 1)->length == 0) 1104 1104 *chained = true; 1105 - sg = scatterwalk_sg_next(sg); 1105 + sg = sg_next(sg); 1106 1106 } 1107 1107 1108 1108 return sg_nents;
+5 -5
drivers/crypto/ux500/cryp/cryp_core.c
··· 479 479 .dst_addr = device_data->phybase + CRYP_DMA_TX_FIFO, 480 480 .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES, 481 481 .dst_maxburst = 4, 482 - }; 482 + }; 483 483 struct dma_slave_config cryp2mem = { 484 484 .direction = DMA_DEV_TO_MEM, 485 485 .src_addr = device_data->phybase + CRYP_DMA_RX_FIFO, 486 486 .src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES, 487 487 .src_maxburst = 4, 488 - }; 488 + }; 489 489 490 490 dma_cap_zero(device_data->dma.mask); 491 491 dma_cap_set(DMA_SLAVE, device_data->dma.mask); ··· 814 814 815 815 while (nbytes > 0) { 816 816 nbytes -= sg->length; 817 - sg = scatterwalk_sg_next(sg); 817 + sg = sg_next(sg); 818 818 nents++; 819 819 } 820 820 ··· 1774 1774 static SIMPLE_DEV_PM_OPS(ux500_cryp_pm, ux500_cryp_suspend, ux500_cryp_resume); 1775 1775 1776 1776 static const struct of_device_id ux500_cryp_match[] = { 1777 - { .compatible = "stericsson,ux500-cryp" }, 1778 - { }, 1777 + { .compatible = "stericsson,ux500-cryp" }, 1778 + { }, 1779 1779 }; 1780 1780 1781 1781 static struct platform_driver cryp_driver = {
+1
include/crypto/if_alg.h
··· 50 50 void (*release)(void *private); 51 51 int (*setkey)(void *private, const u8 *key, unsigned int keylen); 52 52 int (*accept)(void *private, struct sock *sk); 53 + int (*setauthsize)(void *private, unsigned int authsize); 53 54 54 55 struct proto_ops *ops; 55 56 struct module *owner;
+1 -9
include/crypto/scatterwalk.h
··· 33 33 sg1[num - 1].page_link |= 0x01; 34 34 } 35 35 36 - static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg) 37 - { 38 - if (sg_is_last(sg)) 39 - return NULL; 40 - 41 - return (++sg)->length ? sg : sg_chain_ptr(sg); 42 - } 43 - 44 36 static inline void scatterwalk_crypto_chain(struct scatterlist *head, 45 37 struct scatterlist *sg, 46 38 int chain, int num) 47 39 { 48 40 if (chain) { 49 41 head->length += sg->length; 50 - sg = scatterwalk_sg_next(sg); 42 + sg = sg_next(sg); 51 43 } 52 44 53 45 if (sg)
+7 -4
include/linux/crypto.h
··· 1147 1147 * cipher operation completes. 1148 1148 * 1149 1149 * The callback function is registered with the ablkcipher_request handle and 1150 - * must comply with the following template: 1150 + * must comply with the following template 1151 1151 * 1152 1152 * void callback_function(struct crypto_async_request *req, int error) 1153 1153 */ ··· 1174 1174 * 1175 1175 * For encryption, the source is treated as the plaintext and the 1176 1176 * destination is the ciphertext. For a decryption operation, the use is 1177 - * reversed: the source is the ciphertext and the destination is the plaintext. 1177 + * reversed - the source is the ciphertext and the destination is the plaintext. 1178 1178 */ 1179 1179 static inline void ablkcipher_request_set_crypt( 1180 1180 struct ablkcipher_request *req, ··· 1412 1412 */ 1413 1413 static inline int crypto_aead_decrypt(struct aead_request *req) 1414 1414 { 1415 + if (req->cryptlen < crypto_aead_authsize(crypto_aead_reqtfm(req))) 1416 + return -EINVAL; 1417 + 1415 1418 return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req); 1416 1419 } 1417 1420 ··· 1509 1506 * completes 1510 1507 * 1511 1508 * The callback function is registered with the aead_request handle and 1512 - * must comply with the following template: 1509 + * must comply with the following template 1513 1510 * 1514 1511 * void callback_function(struct crypto_async_request *req, int error) 1515 1512 */ ··· 1536 1533 * 1537 1534 * For encryption, the source is treated as the plaintext and the 1538 1535 * destination is the ciphertext. For a decryption operation, the use is 1539 - * reversed: the source is the ciphertext and the destination is the plaintext. 1536 + * reversed - the source is the ciphertext and the destination is the plaintext. 1540 1537 * 1541 1538 * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption, 1542 1539 * the caller must concatenate the ciphertext followed by the
+4
include/linux/hw_random.h
··· 12 12 #ifndef LINUX_HWRANDOM_H_ 13 13 #define LINUX_HWRANDOM_H_ 14 14 15 + #include <linux/completion.h> 15 16 #include <linux/types.h> 16 17 #include <linux/list.h> 18 + #include <linux/kref.h> 17 19 18 20 /** 19 21 * struct hwrng - Hardware Random Number Generator driver ··· 46 44 47 45 /* internal. */ 48 46 struct list_head list; 47 + struct kref ref; 48 + struct completion cleanup_done; 49 49 }; 50 50 51 51 /** Register a new Hardware Random Number Generator driver. */
+5
lib/string.c
··· 596 596 * @s: Pointer to the start of the area. 597 597 * @count: The size of the area. 598 598 * 599 + * Note: usually using memset() is just fine (!), but in cases 600 + * where clearing out _local_ data at the end of a scope is 601 + * necessary, memzero_explicit() should be used instead in 602 + * order to prevent the compiler from optimising away zeroing. 603 + * 599 604 * memzero_explicit() doesn't need an arch-specific version as 600 605 * it just invokes the one of memset() implicitly. 601 606 */