Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (64 commits)
[BLOCK] dm-crypt: trivial comment improvements
[CRYPTO] api: Deprecate crypto_digest_* and crypto_alg_available
[CRYPTO] padlock: Convert padlock-sha to use crypto_hash
[CRYPTO] users: Use crypto_comp and crypto_has_*
[CRYPTO] api: Add crypto_comp and crypto_has_*
[CRYPTO] users: Use crypto_hash interface instead of crypto_digest
[SCSI] iscsi: Use crypto_hash interface instead of crypto_digest
[CRYPTO] digest: Remove old HMAC implementation
[CRYPTO] doc: Update documentation for hash and me
[SCTP]: Use HMAC template and hash interface
[IPSEC]: Use HMAC template and hash interface
[CRYPTO] tcrypt: Use HMAC template and hash interface
[CRYPTO] hmac: Add crypto template implementation
[CRYPTO] digest: Added user API for new hash type
[CRYPTO] api: Mark parts of cipher interface as deprecated
[PATCH] scatterlist: Add const to sg_set_buf/sg_init_one pointer argument
[CRYPTO] drivers: Remove obsolete block cipher operations
[CRYPTO] users: Use block ciphers where applicable
[SUNRPC] GSS: Use block ciphers where applicable
[IPSEC] ESP: Use block ciphers where applicable
...

+7806 -2860
+22 -14
Documentation/crypto/api-intro.txt
··· 19 19 API. 20 20 21 21 'Transforms' are user-instantiated objects, which maintain state, handle all 22 - of the implementation logic (e.g. manipulating page vectors), provide an 23 - abstraction to the underlying algorithms, and handle common logical 24 - operations (e.g. cipher modes, HMAC for digests). However, at the user 22 + of the implementation logic (e.g. manipulating page vectors) and provide an 23 + abstraction to the underlying algorithms. However, at the user 25 24 level they are very simple. 26 25 27 26 Conceptually, the API layering looks like this: 28 27 29 28 [transform api] (user interface) 30 - [transform ops] (per-type logic glue e.g. cipher.c, digest.c) 29 + [transform ops] (per-type logic glue e.g. cipher.c, compress.c) 31 30 [algorithm api] (for registering algorithms) 32 31 33 32 The idea is to make the user interface and algorithm registration API ··· 43 44 Here's an example of how to use the API: 44 45 45 46 #include <linux/crypto.h> 47 + #include <linux/err.h> 48 + #include <linux/scatterlist.h> 46 49 47 50 struct scatterlist sg[2]; 48 51 char result[128]; 49 - struct crypto_tfm *tfm; 52 + struct crypto_hash *tfm; 53 + struct hash_desc desc; 50 54 51 - tfm = crypto_alloc_tfm("md5", 0); 52 - if (tfm == NULL) 55 + tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); 56 + if (IS_ERR(tfm)) 53 57 fail(); 54 58 55 59 /* ... set up the scatterlists ... */ 60 + 61 + desc.tfm = tfm; 62 + desc.flags = 0; 56 63 57 - crypto_digest_init(tfm); 58 - crypto_digest_update(tfm, &sg, 2); 59 - crypto_digest_final(tfm, result); 64 + if (crypto_hash_digest(&desc, &sg, 2, result)) 65 + fail(); 60 66 61 - crypto_free_tfm(tfm); 67 + crypto_free_hash(tfm); 62 68 63 69 64 70 Many real examples are available in the regression test module (tcrypt.c). ··· 130 126 BUGS 131 127 132 128 Send bug reports to: 133 - James Morris <jmorris@redhat.com> 129 + Herbert Xu <herbert@gondor.apana.org.au> 134 130 Cc: David S. Miller <davem@redhat.com> 135 131 136 132 ··· 138 134 139 135 For further patches and various updates, including the current TODO 140 136 list, see: 141 - http://samba.org/~jamesm/crypto/ 137 + http://gondor.apana.org.au/~herbert/crypto/ 142 138 143 139 144 140 AUTHORS 145 141 146 142 James Morris 147 143 David S. Miller 144 + Herbert Xu 148 145 149 146 150 147 CREDITS ··· 243 238 Tiger algorithm contributors: 244 239 Aaron Grothe 245 240 241 + VIA PadLock contributors: 242 + Michal Ludvig 243 + 246 244 Generic scatterwalk code by Adam J. Richter <adam@yggdrasil.com> 247 245 248 246 Please send any credits updates or corrections to: 249 - James Morris <jmorris@redhat.com> 247 + Herbert Xu <herbert@gondor.apana.org.au> 250 248
+3
arch/i386/crypto/Makefile
··· 5 5 # 6 6 7 7 obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o 8 + obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o 8 9 9 10 aes-i586-y := aes-i586-asm.o aes.o 11 + twofish-i586-y := twofish-i586-asm.o twofish.o 12 +
+2 -1
arch/i386/crypto/aes.c
··· 379 379 } 380 380 381 381 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, 382 - unsigned int key_len, u32 *flags) 382 + unsigned int key_len) 383 383 { 384 384 int i; 385 385 u32 ss[8]; 386 386 struct aes_ctx *ctx = crypto_tfm_ctx(tfm); 387 387 const __le32 *key = (const __le32 *)in_key; 388 + u32 *flags = &tfm->crt_flags; 388 389 389 390 /* encryption schedule */ 390 391
+335
arch/i386/crypto/twofish-i586-asm.S
··· 1 + /*************************************************************************** 2 + * Copyright (C) 2006 by Joachim Fritschi, <jfritschi@freenet.de> * 3 + * * 4 + * This program is free software; you can redistribute it and/or modify * 5 + * it under the terms of the GNU General Public License as published by * 6 + * the Free Software Foundation; either version 2 of the License, or * 7 + * (at your option) any later version. * 8 + * * 9 + * This program is distributed in the hope that it will be useful, * 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of * 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * 12 + * GNU General Public License for more details. * 13 + * * 14 + * You should have received a copy of the GNU General Public License * 15 + * along with this program; if not, write to the * 16 + * Free Software Foundation, Inc., * 17 + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * 18 + ***************************************************************************/ 19 + 20 + .file "twofish-i586-asm.S" 21 + .text 22 + 23 + #include <asm/asm-offsets.h> 24 + 25 + /* return adress at 0 */ 26 + 27 + #define in_blk 12 /* input byte array address parameter*/ 28 + #define out_blk 8 /* output byte array address parameter*/ 29 + #define tfm 4 /* Twofish context structure */ 30 + 31 + #define a_offset 0 32 + #define b_offset 4 33 + #define c_offset 8 34 + #define d_offset 12 35 + 36 + /* Structure of the crypto context struct*/ 37 + 38 + #define s0 0 /* S0 Array 256 Words each */ 39 + #define s1 1024 /* S1 Array */ 40 + #define s2 2048 /* S2 Array */ 41 + #define s3 3072 /* S3 Array */ 42 + #define w 4096 /* 8 whitening keys (word) */ 43 + #define k 4128 /* key 1-32 ( word ) */ 44 + 45 + /* define a few register aliases to allow macro substitution */ 46 + 47 + #define R0D %eax 48 + #define R0B %al 49 + #define R0H %ah 50 + 51 + #define R1D %ebx 52 + #define R1B %bl 53 + #define R1H %bh 54 + 55 + #define R2D %ecx 56 + #define R2B %cl 57 + #define R2H %ch 58 + 59 + #define R3D %edx 60 + #define R3B %dl 61 + #define R3H %dh 62 + 63 + 64 + /* performs input whitening */ 65 + #define input_whitening(src,context,offset)\ 66 + xor w+offset(context), src; 67 + 68 + /* performs input whitening */ 69 + #define output_whitening(src,context,offset)\ 70 + xor w+16+offset(context), src; 71 + 72 + /* 73 + * a input register containing a (rotated 16) 74 + * b input register containing b 75 + * c input register containing c 76 + * d input register containing d (already rol $1) 77 + * operations on a and b are interleaved to increase performance 78 + */ 79 + #define encrypt_round(a,b,c,d,round)\ 80 + push d ## D;\ 81 + movzx b ## B, %edi;\ 82 + mov s1(%ebp,%edi,4),d ## D;\ 83 + movzx a ## B, %edi;\ 84 + mov s2(%ebp,%edi,4),%esi;\ 85 + movzx b ## H, %edi;\ 86 + ror $16, b ## D;\ 87 + xor s2(%ebp,%edi,4),d ## D;\ 88 + movzx a ## H, %edi;\ 89 + ror $16, a ## D;\ 90 + xor s3(%ebp,%edi,4),%esi;\ 91 + movzx b ## B, %edi;\ 92 + xor s3(%ebp,%edi,4),d ## D;\ 93 + movzx a ## B, %edi;\ 94 + xor (%ebp,%edi,4), %esi;\ 95 + movzx b ## H, %edi;\ 96 + ror $15, b ## D;\ 97 + xor (%ebp,%edi,4), d ## D;\ 98 + movzx a ## H, %edi;\ 99 + xor s1(%ebp,%edi,4),%esi;\ 100 + pop %edi;\ 101 + add d ## D, %esi;\ 102 + add %esi, d ## D;\ 103 + add k+round(%ebp), %esi;\ 104 + xor %esi, c ## D;\ 105 + rol $15, c ## D;\ 106 + add k+4+round(%ebp),d ## D;\ 107 + xor %edi, d ## D; 108 + 109 + /* 110 + * a input register containing a (rotated 16) 111 + * b input register containing b 112 + * c input register containing c 113 + * d input register containing d (already rol $1) 114 + * operations on a and b are interleaved to increase performance 115 + * last round has different rotations for the output preparation 116 + */ 117 + #define encrypt_last_round(a,b,c,d,round)\ 118 + push d ## D;\ 119 + movzx b ## B, %edi;\ 120 + mov s1(%ebp,%edi,4),d ## D;\ 121 + movzx a ## B, %edi;\ 122 + mov s2(%ebp,%edi,4),%esi;\ 123 + movzx b ## H, %edi;\ 124 + ror $16, b ## D;\ 125 + xor s2(%ebp,%edi,4),d ## D;\ 126 + movzx a ## H, %edi;\ 127 + ror $16, a ## D;\ 128 + xor s3(%ebp,%edi,4),%esi;\ 129 + movzx b ## B, %edi;\ 130 + xor s3(%ebp,%edi,4),d ## D;\ 131 + movzx a ## B, %edi;\ 132 + xor (%ebp,%edi,4), %esi;\ 133 + movzx b ## H, %edi;\ 134 + ror $16, b ## D;\ 135 + xor (%ebp,%edi,4), d ## D;\ 136 + movzx a ## H, %edi;\ 137 + xor s1(%ebp,%edi,4),%esi;\ 138 + pop %edi;\ 139 + add d ## D, %esi;\ 140 + add %esi, d ## D;\ 141 + add k+round(%ebp), %esi;\ 142 + xor %esi, c ## D;\ 143 + ror $1, c ## D;\ 144 + add k+4+round(%ebp),d ## D;\ 145 + xor %edi, d ## D; 146 + 147 + /* 148 + * a input register containing a 149 + * b input register containing b (rotated 16) 150 + * c input register containing c 151 + * d input register containing d (already rol $1) 152 + * operations on a and b are interleaved to increase performance 153 + */ 154 + #define decrypt_round(a,b,c,d,round)\ 155 + push c ## D;\ 156 + movzx a ## B, %edi;\ 157 + mov (%ebp,%edi,4), c ## D;\ 158 + movzx b ## B, %edi;\ 159 + mov s3(%ebp,%edi,4),%esi;\ 160 + movzx a ## H, %edi;\ 161 + ror $16, a ## D;\ 162 + xor s1(%ebp,%edi,4),c ## D;\ 163 + movzx b ## H, %edi;\ 164 + ror $16, b ## D;\ 165 + xor (%ebp,%edi,4), %esi;\ 166 + movzx a ## B, %edi;\ 167 + xor s2(%ebp,%edi,4),c ## D;\ 168 + movzx b ## B, %edi;\ 169 + xor s1(%ebp,%edi,4),%esi;\ 170 + movzx a ## H, %edi;\ 171 + ror $15, a ## D;\ 172 + xor s3(%ebp,%edi,4),c ## D;\ 173 + movzx b ## H, %edi;\ 174 + xor s2(%ebp,%edi,4),%esi;\ 175 + pop %edi;\ 176 + add %esi, c ## D;\ 177 + add c ## D, %esi;\ 178 + add k+round(%ebp), c ## D;\ 179 + xor %edi, c ## D;\ 180 + add k+4+round(%ebp),%esi;\ 181 + xor %esi, d ## D;\ 182 + rol $15, d ## D; 183 + 184 + /* 185 + * a input register containing a 186 + * b input register containing b (rotated 16) 187 + * c input register containing c 188 + * d input register containing d (already rol $1) 189 + * operations on a and b are interleaved to increase performance 190 + * last round has different rotations for the output preparation 191 + */ 192 + #define decrypt_last_round(a,b,c,d,round)\ 193 + push c ## D;\ 194 + movzx a ## B, %edi;\ 195 + mov (%ebp,%edi,4), c ## D;\ 196 + movzx b ## B, %edi;\ 197 + mov s3(%ebp,%edi,4),%esi;\ 198 + movzx a ## H, %edi;\ 199 + ror $16, a ## D;\ 200 + xor s1(%ebp,%edi,4),c ## D;\ 201 + movzx b ## H, %edi;\ 202 + ror $16, b ## D;\ 203 + xor (%ebp,%edi,4), %esi;\ 204 + movzx a ## B, %edi;\ 205 + xor s2(%ebp,%edi,4),c ## D;\ 206 + movzx b ## B, %edi;\ 207 + xor s1(%ebp,%edi,4),%esi;\ 208 + movzx a ## H, %edi;\ 209 + ror $16, a ## D;\ 210 + xor s3(%ebp,%edi,4),c ## D;\ 211 + movzx b ## H, %edi;\ 212 + xor s2(%ebp,%edi,4),%esi;\ 213 + pop %edi;\ 214 + add %esi, c ## D;\ 215 + add c ## D, %esi;\ 216 + add k+round(%ebp), c ## D;\ 217 + xor %edi, c ## D;\ 218 + add k+4+round(%ebp),%esi;\ 219 + xor %esi, d ## D;\ 220 + ror $1, d ## D; 221 + 222 + .align 4 223 + .global twofish_enc_blk 224 + .global twofish_dec_blk 225 + 226 + twofish_enc_blk: 227 + push %ebp /* save registers according to calling convention*/ 228 + push %ebx 229 + push %esi 230 + push %edi 231 + 232 + mov tfm + 16(%esp), %ebp /* abuse the base pointer: set new base bointer to the crypto tfm */ 233 + add $crypto_tfm_ctx_offset, %ebp /* ctx adress */ 234 + mov in_blk+16(%esp),%edi /* input adress in edi */ 235 + 236 + mov (%edi), %eax 237 + mov b_offset(%edi), %ebx 238 + mov c_offset(%edi), %ecx 239 + mov d_offset(%edi), %edx 240 + input_whitening(%eax,%ebp,a_offset) 241 + ror $16, %eax 242 + input_whitening(%ebx,%ebp,b_offset) 243 + input_whitening(%ecx,%ebp,c_offset) 244 + input_whitening(%edx,%ebp,d_offset) 245 + rol $1, %edx 246 + 247 + encrypt_round(R0,R1,R2,R3,0); 248 + encrypt_round(R2,R3,R0,R1,8); 249 + encrypt_round(R0,R1,R2,R3,2*8); 250 + encrypt_round(R2,R3,R0,R1,3*8); 251 + encrypt_round(R0,R1,R2,R3,4*8); 252 + encrypt_round(R2,R3,R0,R1,5*8); 253 + encrypt_round(R0,R1,R2,R3,6*8); 254 + encrypt_round(R2,R3,R0,R1,7*8); 255 + encrypt_round(R0,R1,R2,R3,8*8); 256 + encrypt_round(R2,R3,R0,R1,9*8); 257 + encrypt_round(R0,R1,R2,R3,10*8); 258 + encrypt_round(R2,R3,R0,R1,11*8); 259 + encrypt_round(R0,R1,R2,R3,12*8); 260 + encrypt_round(R2,R3,R0,R1,13*8); 261 + encrypt_round(R0,R1,R2,R3,14*8); 262 + encrypt_last_round(R2,R3,R0,R1,15*8); 263 + 264 + output_whitening(%eax,%ebp,c_offset) 265 + output_whitening(%ebx,%ebp,d_offset) 266 + output_whitening(%ecx,%ebp,a_offset) 267 + output_whitening(%edx,%ebp,b_offset) 268 + mov out_blk+16(%esp),%edi; 269 + mov %eax, c_offset(%edi) 270 + mov %ebx, d_offset(%edi) 271 + mov %ecx, (%edi) 272 + mov %edx, b_offset(%edi) 273 + 274 + pop %edi 275 + pop %esi 276 + pop %ebx 277 + pop %ebp 278 + mov $1, %eax 279 + ret 280 + 281 + twofish_dec_blk: 282 + push %ebp /* save registers according to calling convention*/ 283 + push %ebx 284 + push %esi 285 + push %edi 286 + 287 + 288 + mov tfm + 16(%esp), %ebp /* abuse the base pointer: set new base bointer to the crypto tfm */ 289 + add $crypto_tfm_ctx_offset, %ebp /* ctx adress */ 290 + mov in_blk+16(%esp),%edi /* input adress in edi */ 291 + 292 + mov (%edi), %eax 293 + mov b_offset(%edi), %ebx 294 + mov c_offset(%edi), %ecx 295 + mov d_offset(%edi), %edx 296 + output_whitening(%eax,%ebp,a_offset) 297 + output_whitening(%ebx,%ebp,b_offset) 298 + ror $16, %ebx 299 + output_whitening(%ecx,%ebp,c_offset) 300 + output_whitening(%edx,%ebp,d_offset) 301 + rol $1, %ecx 302 + 303 + decrypt_round(R0,R1,R2,R3,15*8); 304 + decrypt_round(R2,R3,R0,R1,14*8); 305 + decrypt_round(R0,R1,R2,R3,13*8); 306 + decrypt_round(R2,R3,R0,R1,12*8); 307 + decrypt_round(R0,R1,R2,R3,11*8); 308 + decrypt_round(R2,R3,R0,R1,10*8); 309 + decrypt_round(R0,R1,R2,R3,9*8); 310 + decrypt_round(R2,R3,R0,R1,8*8); 311 + decrypt_round(R0,R1,R2,R3,7*8); 312 + decrypt_round(R2,R3,R0,R1,6*8); 313 + decrypt_round(R0,R1,R2,R3,5*8); 314 + decrypt_round(R2,R3,R0,R1,4*8); 315 + decrypt_round(R0,R1,R2,R3,3*8); 316 + decrypt_round(R2,R3,R0,R1,2*8); 317 + decrypt_round(R0,R1,R2,R3,1*8); 318 + decrypt_last_round(R2,R3,R0,R1,0); 319 + 320 + input_whitening(%eax,%ebp,c_offset) 321 + input_whitening(%ebx,%ebp,d_offset) 322 + input_whitening(%ecx,%ebp,a_offset) 323 + input_whitening(%edx,%ebp,b_offset) 324 + mov out_blk+16(%esp),%edi; 325 + mov %eax, c_offset(%edi) 326 + mov %ebx, d_offset(%edi) 327 + mov %ecx, (%edi) 328 + mov %edx, b_offset(%edi) 329 + 330 + pop %edi 331 + pop %esi 332 + pop %ebx 333 + pop %ebp 334 + mov $1, %eax 335 + ret
+97
arch/i386/crypto/twofish.c
··· 1 + /* 2 + * Glue Code for optimized 586 assembler version of TWOFISH 3 + * 4 + * Originally Twofish for GPG 5 + * By Matthew Skala <mskala@ansuz.sooke.bc.ca>, July 26, 1998 6 + * 256-bit key length added March 20, 1999 7 + * Some modifications to reduce the text size by Werner Koch, April, 1998 8 + * Ported to the kerneli patch by Marc Mutz <Marc@Mutz.com> 9 + * Ported to CryptoAPI by Colin Slater <hoho@tacomeat.net> 10 + * 11 + * The original author has disclaimed all copyright interest in this 12 + * code and thus put it in the public domain. The subsequent authors 13 + * have put this under the GNU General Public License. 14 + * 15 + * This program is free software; you can redistribute it and/or modify 16 + * it under the terms of the GNU General Public License as published by 17 + * the Free Software Foundation; either version 2 of the License, or 18 + * (at your option) any later version. 19 + * 20 + * This program is distributed in the hope that it will be useful, 21 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 22 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 23 + * GNU General Public License for more details. 24 + * 25 + * You should have received a copy of the GNU General Public License 26 + * along with this program; if not, write to the Free Software 27 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 28 + * USA 29 + * 30 + * This code is a "clean room" implementation, written from the paper 31 + * _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey, 32 + * Doug Whiting, David Wagner, Chris Hall, and Niels Ferguson, available 33 + * through http://www.counterpane.com/twofish.html 34 + * 35 + * For background information on multiplication in finite fields, used for 36 + * the matrix operations in the key schedule, see the book _Contemporary 37 + * Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the 38 + * Third Edition. 39 + */ 40 + 41 + #include <crypto/twofish.h> 42 + #include <linux/crypto.h> 43 + #include <linux/init.h> 44 + #include <linux/module.h> 45 + #include <linux/types.h> 46 + 47 + 48 + asmlinkage void twofish_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 49 + asmlinkage void twofish_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 50 + 51 + static void twofish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 52 + { 53 + twofish_enc_blk(tfm, dst, src); 54 + } 55 + 56 + static void twofish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 57 + { 58 + twofish_dec_blk(tfm, dst, src); 59 + } 60 + 61 + static struct crypto_alg alg = { 62 + .cra_name = "twofish", 63 + .cra_driver_name = "twofish-i586", 64 + .cra_priority = 200, 65 + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 66 + .cra_blocksize = TF_BLOCK_SIZE, 67 + .cra_ctxsize = sizeof(struct twofish_ctx), 68 + .cra_alignmask = 3, 69 + .cra_module = THIS_MODULE, 70 + .cra_list = LIST_HEAD_INIT(alg.cra_list), 71 + .cra_u = { 72 + .cipher = { 73 + .cia_min_keysize = TF_MIN_KEY_SIZE, 74 + .cia_max_keysize = TF_MAX_KEY_SIZE, 75 + .cia_setkey = twofish_setkey, 76 + .cia_encrypt = twofish_encrypt, 77 + .cia_decrypt = twofish_decrypt 78 + } 79 + } 80 + }; 81 + 82 + static int __init init(void) 83 + { 84 + return crypto_register_alg(&alg); 85 + } 86 + 87 + static void __exit fini(void) 88 + { 89 + crypto_unregister_alg(&alg); 90 + } 91 + 92 + module_init(init); 93 + module_exit(fini); 94 + 95 + MODULE_LICENSE("GPL"); 96 + MODULE_DESCRIPTION ("Twofish Cipher Algorithm, i586 asm optimized"); 97 + MODULE_ALIAS("twofish");
+219 -116
arch/s390/crypto/aes_s390.c
··· 16 16 * 17 17 */ 18 18 19 + #include <crypto/algapi.h> 19 20 #include <linux/module.h> 20 21 #include <linux/init.h> 21 - #include <linux/crypto.h> 22 22 #include "crypt_s390.h" 23 23 24 24 #define AES_MIN_KEY_SIZE 16 ··· 34 34 struct s390_aes_ctx { 35 35 u8 iv[AES_BLOCK_SIZE]; 36 36 u8 key[AES_MAX_KEY_SIZE]; 37 + long enc; 38 + long dec; 37 39 int key_len; 38 40 }; 39 41 40 42 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, 41 - unsigned int key_len, u32 *flags) 43 + unsigned int key_len) 42 44 { 43 45 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 46 + u32 *flags = &tfm->crt_flags; 44 47 45 48 switch (key_len) { 46 49 case 16: ··· 113 110 } 114 111 } 115 112 116 - static unsigned int aes_encrypt_ecb(const struct cipher_desc *desc, u8 *out, 117 - const u8 *in, unsigned int nbytes) 118 - { 119 - struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm); 120 - int ret; 121 - 122 - /* only use complete blocks */ 123 - nbytes &= ~(AES_BLOCK_SIZE - 1); 124 - 125 - switch (sctx->key_len) { 126 - case 16: 127 - ret = crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in, nbytes); 128 - BUG_ON((ret < 0) || (ret != nbytes)); 129 - break; 130 - case 24: 131 - ret = crypt_s390_km(KM_AES_192_ENCRYPT, &sctx->key, out, in, nbytes); 132 - BUG_ON((ret < 0) || (ret != nbytes)); 133 - break; 134 - case 32: 135 - ret = crypt_s390_km(KM_AES_256_ENCRYPT, &sctx->key, out, in, nbytes); 136 - BUG_ON((ret < 0) || (ret != nbytes)); 137 - break; 138 - } 139 - return nbytes; 140 - } 141 - 142 - static unsigned int aes_decrypt_ecb(const struct cipher_desc *desc, u8 *out, 143 - const u8 *in, unsigned int nbytes) 144 - { 145 - struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm); 146 - int ret; 147 - 148 - /* only use complete blocks */ 149 - nbytes &= ~(AES_BLOCK_SIZE - 1); 150 - 151 - switch (sctx->key_len) { 152 - case 16: 153 - ret = crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in, nbytes); 154 - BUG_ON((ret < 0) || (ret != nbytes)); 155 - break; 156 - case 24: 157 - ret = crypt_s390_km(KM_AES_192_DECRYPT, &sctx->key, out, in, nbytes); 158 - BUG_ON((ret < 0) || (ret != nbytes)); 159 - break; 160 - case 32: 161 - ret = crypt_s390_km(KM_AES_256_DECRYPT, &sctx->key, out, in, nbytes); 162 - BUG_ON((ret < 0) || (ret != nbytes)); 163 - break; 164 - } 165 - return nbytes; 166 - } 167 - 168 - static unsigned int aes_encrypt_cbc(const struct cipher_desc *desc, u8 *out, 169 - const u8 *in, unsigned int nbytes) 170 - { 171 - struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm); 172 - int ret; 173 - 174 - /* only use complete blocks */ 175 - nbytes &= ~(AES_BLOCK_SIZE - 1); 176 - 177 - memcpy(&sctx->iv, desc->info, AES_BLOCK_SIZE); 178 - switch (sctx->key_len) { 179 - case 16: 180 - ret = crypt_s390_kmc(KMC_AES_128_ENCRYPT, &sctx->iv, out, in, nbytes); 181 - BUG_ON((ret < 0) || (ret != nbytes)); 182 - break; 183 - case 24: 184 - ret = crypt_s390_kmc(KMC_AES_192_ENCRYPT, &sctx->iv, out, in, nbytes); 185 - BUG_ON((ret < 0) || (ret != nbytes)); 186 - break; 187 - case 32: 188 - ret = crypt_s390_kmc(KMC_AES_256_ENCRYPT, &sctx->iv, out, in, nbytes); 189 - BUG_ON((ret < 0) || (ret != nbytes)); 190 - break; 191 - } 192 - memcpy(desc->info, &sctx->iv, AES_BLOCK_SIZE); 193 - 194 - return nbytes; 195 - } 196 - 197 - static unsigned int aes_decrypt_cbc(const struct cipher_desc *desc, u8 *out, 198 - const u8 *in, unsigned int nbytes) 199 - { 200 - struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm); 201 - int ret; 202 - 203 - /* only use complete blocks */ 204 - nbytes &= ~(AES_BLOCK_SIZE - 1); 205 - 206 - memcpy(&sctx->iv, desc->info, AES_BLOCK_SIZE); 207 - switch (sctx->key_len) { 208 - case 16: 209 - ret = crypt_s390_kmc(KMC_AES_128_DECRYPT, &sctx->iv, out, in, nbytes); 210 - BUG_ON((ret < 0) || (ret != nbytes)); 211 - break; 212 - case 24: 213 - ret = crypt_s390_kmc(KMC_AES_192_DECRYPT, &sctx->iv, out, in, nbytes); 214 - BUG_ON((ret < 0) || (ret != nbytes)); 215 - break; 216 - case 32: 217 - ret = crypt_s390_kmc(KMC_AES_256_DECRYPT, &sctx->iv, out, in, nbytes); 218 - BUG_ON((ret < 0) || (ret != nbytes)); 219 - break; 220 - } 221 - return nbytes; 222 - } 223 - 224 113 225 114 static struct crypto_alg aes_alg = { 226 115 .cra_name = "aes", 116 + .cra_driver_name = "aes-s390", 117 + .cra_priority = CRYPT_S390_PRIORITY, 227 118 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 228 119 .cra_blocksize = AES_BLOCK_SIZE, 229 120 .cra_ctxsize = sizeof(struct s390_aes_ctx), ··· 130 233 .cia_setkey = aes_set_key, 131 234 .cia_encrypt = aes_encrypt, 132 235 .cia_decrypt = aes_decrypt, 133 - .cia_encrypt_ecb = aes_encrypt_ecb, 134 - .cia_decrypt_ecb = aes_decrypt_ecb, 135 - .cia_encrypt_cbc = aes_encrypt_cbc, 136 - .cia_decrypt_cbc = aes_decrypt_cbc, 236 + } 237 + } 238 + }; 239 + 240 + static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, 241 + unsigned int key_len) 242 + { 243 + struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 244 + 245 + switch (key_len) { 246 + case 16: 247 + sctx->enc = KM_AES_128_ENCRYPT; 248 + sctx->dec = KM_AES_128_DECRYPT; 249 + break; 250 + case 24: 251 + sctx->enc = KM_AES_192_ENCRYPT; 252 + sctx->dec = KM_AES_192_DECRYPT; 253 + break; 254 + case 32: 255 + sctx->enc = KM_AES_256_ENCRYPT; 256 + sctx->dec = KM_AES_256_DECRYPT; 257 + break; 258 + } 259 + 260 + return aes_set_key(tfm, in_key, key_len); 261 + } 262 + 263 + static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param, 264 + struct blkcipher_walk *walk) 265 + { 266 + int ret = blkcipher_walk_virt(desc, walk); 267 + unsigned int nbytes; 268 + 269 + while ((nbytes = walk->nbytes)) { 270 + /* only use complete blocks */ 271 + unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1); 272 + u8 *out = walk->dst.virt.addr; 273 + u8 *in = walk->src.virt.addr; 274 + 275 + ret = crypt_s390_km(func, param, out, in, n); 276 + BUG_ON((ret < 0) || (ret != n)); 277 + 278 + nbytes &= AES_BLOCK_SIZE - 1; 279 + ret = blkcipher_walk_done(desc, walk, nbytes); 280 + } 281 + 282 + return ret; 283 + } 284 + 285 + static int ecb_aes_encrypt(struct blkcipher_desc *desc, 286 + struct scatterlist *dst, struct scatterlist *src, 287 + unsigned int nbytes) 288 + { 289 + struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 290 + struct blkcipher_walk walk; 291 + 292 + blkcipher_walk_init(&walk, dst, src, nbytes); 293 + return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk); 294 + } 295 + 296 + static int ecb_aes_decrypt(struct blkcipher_desc *desc, 297 + struct scatterlist *dst, struct scatterlist *src, 298 + unsigned int nbytes) 299 + { 300 + struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 301 + struct blkcipher_walk walk; 302 + 303 + blkcipher_walk_init(&walk, dst, src, nbytes); 304 + return ecb_aes_crypt(desc, sctx->dec, sctx->key, &walk); 305 + } 306 + 307 + static struct crypto_alg ecb_aes_alg = { 308 + .cra_name = "ecb(aes)", 309 + .cra_driver_name = "ecb-aes-s390", 310 + .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, 311 + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 312 + .cra_blocksize = AES_BLOCK_SIZE, 313 + .cra_ctxsize = sizeof(struct s390_aes_ctx), 314 + .cra_type = &crypto_blkcipher_type, 315 + .cra_module = THIS_MODULE, 316 + .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list), 317 + .cra_u = { 318 + .blkcipher = { 319 + .min_keysize = AES_MIN_KEY_SIZE, 320 + .max_keysize = AES_MAX_KEY_SIZE, 321 + .setkey = ecb_aes_set_key, 322 + .encrypt = ecb_aes_encrypt, 323 + .decrypt = ecb_aes_decrypt, 324 + } 325 + } 326 + }; 327 + 328 + static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, 329 + unsigned int key_len) 330 + { 331 + struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 332 + 333 + switch (key_len) { 334 + case 16: 335 + sctx->enc = KMC_AES_128_ENCRYPT; 336 + sctx->dec = KMC_AES_128_DECRYPT; 337 + break; 338 + case 24: 339 + sctx->enc = KMC_AES_192_ENCRYPT; 340 + sctx->dec = KMC_AES_192_DECRYPT; 341 + break; 342 + case 32: 343 + sctx->enc = KMC_AES_256_ENCRYPT; 344 + sctx->dec = KMC_AES_256_DECRYPT; 345 + break; 346 + } 347 + 348 + return aes_set_key(tfm, in_key, key_len); 349 + } 350 + 351 + static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param, 352 + struct blkcipher_walk *walk) 353 + { 354 + int ret = blkcipher_walk_virt(desc, walk); 355 + unsigned int nbytes = walk->nbytes; 356 + 357 + if (!nbytes) 358 + goto out; 359 + 360 + memcpy(param, walk->iv, AES_BLOCK_SIZE); 361 + do { 362 + /* only use complete blocks */ 363 + unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1); 364 + u8 *out = walk->dst.virt.addr; 365 + u8 *in = walk->src.virt.addr; 366 + 367 + ret = crypt_s390_kmc(func, param, out, in, n); 368 + BUG_ON((ret < 0) || (ret != n)); 369 + 370 + nbytes &= AES_BLOCK_SIZE - 1; 371 + ret = blkcipher_walk_done(desc, walk, nbytes); 372 + } while ((nbytes = walk->nbytes)); 373 + memcpy(walk->iv, param, AES_BLOCK_SIZE); 374 + 375 + out: 376 + return ret; 377 + } 378 + 379 + static int cbc_aes_encrypt(struct blkcipher_desc *desc, 380 + struct scatterlist *dst, struct scatterlist *src, 381 + unsigned int nbytes) 382 + { 383 + struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 384 + struct blkcipher_walk walk; 385 + 386 + blkcipher_walk_init(&walk, dst, src, nbytes); 387 + return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk); 388 + } 389 + 390 + static int cbc_aes_decrypt(struct blkcipher_desc *desc, 391 + struct scatterlist *dst, struct scatterlist *src, 392 + unsigned int nbytes) 393 + { 394 + struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 395 + struct blkcipher_walk walk; 396 + 397 + blkcipher_walk_init(&walk, dst, src, nbytes); 398 + return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk); 399 + } 400 + 401 + static struct crypto_alg cbc_aes_alg = { 402 + .cra_name = "cbc(aes)", 403 + .cra_driver_name = "cbc-aes-s390", 404 + .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, 405 + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 406 + .cra_blocksize = AES_BLOCK_SIZE, 407 + .cra_ctxsize = sizeof(struct s390_aes_ctx), 408 + .cra_type = &crypto_blkcipher_type, 409 + .cra_module = THIS_MODULE, 410 + .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list), 411 + .cra_u = { 412 + .blkcipher = { 413 + .min_keysize = AES_MIN_KEY_SIZE, 414 + .max_keysize = AES_MAX_KEY_SIZE, 415 + .ivsize = AES_BLOCK_SIZE, 416 + .setkey = cbc_aes_set_key, 417 + .encrypt = cbc_aes_encrypt, 418 + .decrypt = cbc_aes_decrypt, 137 419 } 138 420 } 139 421 }; ··· 332 256 return -ENOSYS; 333 257 334 258 ret = crypto_register_alg(&aes_alg); 335 - if (ret != 0) 336 - printk(KERN_INFO "crypt_s390: aes_s390 couldn't be loaded.\n"); 259 + if (ret != 0) { 260 + printk(KERN_INFO "crypt_s390: aes-s390 couldn't be loaded.\n"); 261 + goto aes_err; 262 + } 263 + 264 + ret = crypto_register_alg(&ecb_aes_alg); 265 + if (ret != 0) { 266 + printk(KERN_INFO 267 + "crypt_s390: ecb-aes-s390 couldn't be loaded.\n"); 268 + goto ecb_aes_err; 269 + } 270 + 271 + ret = crypto_register_alg(&cbc_aes_alg); 272 + if (ret != 0) { 273 + printk(KERN_INFO 274 + "crypt_s390: cbc-aes-s390 couldn't be loaded.\n"); 275 + goto cbc_aes_err; 276 + } 277 + 278 + out: 337 279 return ret; 280 + 281 + cbc_aes_err: 282 + crypto_unregister_alg(&ecb_aes_alg); 283 + ecb_aes_err: 284 + crypto_unregister_alg(&aes_alg); 285 + aes_err: 286 + goto out; 338 287 } 339 288 340 289 static void __exit aes_fini(void) 341 290 { 291 + crypto_unregister_alg(&cbc_aes_alg); 292 + crypto_unregister_alg(&ecb_aes_alg); 342 293 crypto_unregister_alg(&aes_alg); 343 294 } 344 295
+3
arch/s390/crypto/crypt_s390.h
··· 20 20 #define CRYPT_S390_OP_MASK 0xFF00 21 21 #define CRYPT_S390_FUNC_MASK 0x00FF 22 22 23 + #define CRYPT_S390_PRIORITY 300 24 + #define CRYPT_S390_COMPOSITE_PRIORITY 400 25 + 23 26 /* s930 cryptographic operations */ 24 27 enum crypt_s390_operations { 25 28 CRYPT_S390_KM = 0x0100,
+388 -219
arch/s390/crypto/des_s390.c
··· 13 13 * (at your option) any later version. 14 14 * 15 15 */ 16 + 17 + #include <crypto/algapi.h> 16 18 #include <linux/init.h> 17 19 #include <linux/module.h> 18 - #include <linux/crypto.h> 19 20 20 21 #include "crypt_s390.h" 21 22 #include "crypto_des.h" ··· 46 45 }; 47 46 48 47 static int des_setkey(struct crypto_tfm *tfm, const u8 *key, 49 - unsigned int keylen, u32 *flags) 48 + unsigned int keylen) 50 49 { 51 50 struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm); 51 + u32 *flags = &tfm->crt_flags; 52 52 int ret; 53 53 54 54 /* test if key is valid (not a weak key) */ ··· 73 71 crypt_s390_km(KM_DEA_DECRYPT, dctx->key, out, in, DES_BLOCK_SIZE); 74 72 } 75 73 76 - static unsigned int des_encrypt_ecb(const struct cipher_desc *desc, u8 *out, 77 - const u8 *in, unsigned int nbytes) 78 - { 79 - struct crypt_s390_des_ctx *sctx = crypto_tfm_ctx(desc->tfm); 80 - int ret; 81 - 82 - /* only use complete blocks */ 83 - nbytes &= ~(DES_BLOCK_SIZE - 1); 84 - ret = crypt_s390_km(KM_DEA_ENCRYPT, sctx->key, out, in, nbytes); 85 - BUG_ON((ret < 0) || (ret != nbytes)); 86 - 87 - return nbytes; 88 - } 89 - 90 - static unsigned int des_decrypt_ecb(const struct cipher_desc *desc, u8 *out, 91 - const u8 *in, unsigned int nbytes) 92 - { 93 - struct crypt_s390_des_ctx *sctx = crypto_tfm_ctx(desc->tfm); 94 - int ret; 95 - 96 - /* only use complete blocks */ 97 - nbytes &= ~(DES_BLOCK_SIZE - 1); 98 - ret = crypt_s390_km(KM_DEA_DECRYPT, sctx->key, out, in, nbytes); 99 - BUG_ON((ret < 0) || (ret != nbytes)); 100 - 101 - return nbytes; 102 - } 103 - 104 - static unsigned int des_encrypt_cbc(const struct cipher_desc *desc, u8 *out, 105 - const u8 *in, unsigned int nbytes) 106 - { 107 - struct crypt_s390_des_ctx *sctx = crypto_tfm_ctx(desc->tfm); 108 - int ret; 109 - 110 - /* only use complete blocks */ 111 - nbytes &= ~(DES_BLOCK_SIZE - 1); 112 - 113 - memcpy(sctx->iv, desc->info, DES_BLOCK_SIZE); 114 - ret = crypt_s390_kmc(KMC_DEA_ENCRYPT, &sctx->iv, out, in, nbytes); 115 - BUG_ON((ret < 0) || (ret != nbytes)); 116 - 117 - memcpy(desc->info, sctx->iv, DES_BLOCK_SIZE); 118 - return nbytes; 119 - } 120 - 121 - static unsigned int des_decrypt_cbc(const struct cipher_desc *desc, u8 *out, 122 - const u8 *in, unsigned int nbytes) 123 - { 124 - struct crypt_s390_des_ctx *sctx = crypto_tfm_ctx(desc->tfm); 125 - int ret; 126 - 127 - /* only use complete blocks */ 128 - nbytes &= ~(DES_BLOCK_SIZE - 1); 129 - 130 - memcpy(&sctx->iv, desc->info, DES_BLOCK_SIZE); 131 - ret = crypt_s390_kmc(KMC_DEA_DECRYPT, &sctx->iv, out, in, nbytes); 132 - BUG_ON((ret < 0) || (ret != nbytes)); 133 - 134 - return nbytes; 135 - } 136 - 137 74 static struct crypto_alg des_alg = { 138 75 .cra_name = "des", 76 + .cra_driver_name = "des-s390", 77 + .cra_priority = CRYPT_S390_PRIORITY, 139 78 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 140 79 .cra_blocksize = DES_BLOCK_SIZE, 141 80 .cra_ctxsize = sizeof(struct crypt_s390_des_ctx), ··· 89 146 .cia_setkey = des_setkey, 90 147 .cia_encrypt = des_encrypt, 91 148 .cia_decrypt = des_decrypt, 92 - .cia_encrypt_ecb = des_encrypt_ecb, 93 - .cia_decrypt_ecb = des_decrypt_ecb, 94 - .cia_encrypt_cbc = des_encrypt_cbc, 95 - .cia_decrypt_cbc = des_decrypt_cbc, 149 + } 150 + } 151 + }; 152 + 153 + static int ecb_desall_crypt(struct blkcipher_desc *desc, long func, 154 + void *param, struct blkcipher_walk *walk) 155 + { 156 + int ret = blkcipher_walk_virt(desc, walk); 157 + unsigned int nbytes; 158 + 159 + while ((nbytes = walk->nbytes)) { 160 + /* only use complete blocks */ 161 + unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1); 162 + u8 *out = walk->dst.virt.addr; 163 + u8 *in = walk->src.virt.addr; 164 + 165 + ret = crypt_s390_km(func, param, out, in, n); 166 + BUG_ON((ret < 0) || (ret != n)); 167 + 168 + nbytes &= DES_BLOCK_SIZE - 1; 169 + ret = blkcipher_walk_done(desc, walk, nbytes); 170 + } 171 + 172 + return ret; 173 + } 174 + 175 + static int cbc_desall_crypt(struct blkcipher_desc *desc, long func, 176 + void *param, struct blkcipher_walk *walk) 177 + { 178 + int ret = blkcipher_walk_virt(desc, walk); 179 + unsigned int nbytes = walk->nbytes; 180 + 181 + if (!nbytes) 182 + goto out; 183 + 184 + memcpy(param, walk->iv, DES_BLOCK_SIZE); 185 + do { 186 + /* only use complete blocks */ 187 + unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1); 188 + u8 *out = walk->dst.virt.addr; 189 + u8 *in = walk->src.virt.addr; 190 + 191 + ret = crypt_s390_kmc(func, param, out, in, n); 192 + BUG_ON((ret < 0) || (ret != n)); 193 + 194 + nbytes &= DES_BLOCK_SIZE - 1; 195 + ret = blkcipher_walk_done(desc, walk, nbytes); 196 + } while ((nbytes = walk->nbytes)); 197 + memcpy(walk->iv, param, DES_BLOCK_SIZE); 198 + 199 + out: 200 + return ret; 201 + } 202 + 203 + static int ecb_des_encrypt(struct blkcipher_desc *desc, 204 + struct scatterlist *dst, struct scatterlist *src, 205 + unsigned int nbytes) 206 + { 207 + struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 208 + struct blkcipher_walk walk; 209 + 210 + blkcipher_walk_init(&walk, dst, src, nbytes); 211 + return ecb_desall_crypt(desc, KM_DEA_ENCRYPT, sctx->key, &walk); 212 + } 213 + 214 + static int ecb_des_decrypt(struct blkcipher_desc *desc, 215 + struct scatterlist *dst, struct scatterlist *src, 216 + unsigned int nbytes) 217 + { 218 + struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 219 + struct blkcipher_walk walk; 220 + 221 + blkcipher_walk_init(&walk, dst, src, nbytes); 222 + return ecb_desall_crypt(desc, KM_DEA_DECRYPT, sctx->key, &walk); 223 + } 224 + 225 + static struct crypto_alg ecb_des_alg = { 226 + .cra_name = "ecb(des)", 227 + .cra_driver_name = "ecb-des-s390", 228 + .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, 229 + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 230 + .cra_blocksize = DES_BLOCK_SIZE, 231 + .cra_ctxsize = sizeof(struct crypt_s390_des_ctx), 232 + .cra_type = &crypto_blkcipher_type, 233 + .cra_module = THIS_MODULE, 234 + .cra_list = LIST_HEAD_INIT(ecb_des_alg.cra_list), 235 + .cra_u = { 236 + .blkcipher = { 237 + .min_keysize = DES_KEY_SIZE, 238 + .max_keysize = DES_KEY_SIZE, 239 + .setkey = des_setkey, 240 + .encrypt = ecb_des_encrypt, 241 + .decrypt = ecb_des_decrypt, 242 + } 243 + } 244 + }; 245 + 246 + static int cbc_des_encrypt(struct blkcipher_desc *desc, 247 + struct scatterlist *dst, struct scatterlist *src, 248 + unsigned int nbytes) 249 + { 250 + struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 251 + struct blkcipher_walk walk; 252 + 253 + blkcipher_walk_init(&walk, dst, src, nbytes); 254 + return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, sctx->iv, &walk); 255 + } 256 + 257 + static int cbc_des_decrypt(struct blkcipher_desc *desc, 258 + struct scatterlist *dst, struct scatterlist *src, 259 + unsigned int nbytes) 260 + { 261 + struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 262 + struct blkcipher_walk walk; 263 + 264 + blkcipher_walk_init(&walk, dst, src, nbytes); 265 + return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, sctx->iv, &walk); 266 + } 267 + 268 + static struct crypto_alg cbc_des_alg = { 269 + .cra_name = "cbc(des)", 270 + .cra_driver_name = "cbc-des-s390", 271 + .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, 272 + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 273 + .cra_blocksize = DES_BLOCK_SIZE, 274 + .cra_ctxsize = sizeof(struct crypt_s390_des_ctx), 275 + .cra_type = &crypto_blkcipher_type, 276 + .cra_module = THIS_MODULE, 277 + .cra_list = LIST_HEAD_INIT(cbc_des_alg.cra_list), 278 + .cra_u = { 279 + .blkcipher = { 280 + .min_keysize = DES_KEY_SIZE, 281 + .max_keysize = DES_KEY_SIZE, 282 + .ivsize = DES_BLOCK_SIZE, 283 + .setkey = des_setkey, 284 + .encrypt = cbc_des_encrypt, 285 + .decrypt = cbc_des_decrypt, 96 286 } 97 287 } 98 288 }; ··· 243 167 * 244 168 */ 245 169 static int des3_128_setkey(struct crypto_tfm *tfm, const u8 *key, 246 - unsigned int keylen, u32 *flags) 170 + unsigned int keylen) 247 171 { 248 172 int i, ret; 249 173 struct crypt_s390_des3_128_ctx *dctx = crypto_tfm_ctx(tfm); 250 - const u8* temp_key = key; 174 + const u8 *temp_key = key; 175 + u32 *flags = &tfm->crt_flags; 251 176 252 177 if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE))) { 253 178 *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED; ··· 279 202 DES3_128_BLOCK_SIZE); 280 203 } 281 204 282 - static unsigned int des3_128_encrypt_ecb(const struct cipher_desc *desc, 283 - u8 *out, const u8 *in, 284 - unsigned int nbytes) 285 - { 286 - struct crypt_s390_des3_128_ctx *sctx = crypto_tfm_ctx(desc->tfm); 287 - int ret; 288 - 289 - /* only use complete blocks */ 290 - nbytes &= ~(DES3_128_BLOCK_SIZE - 1); 291 - ret = crypt_s390_km(KM_TDEA_128_ENCRYPT, sctx->key, out, in, nbytes); 292 - BUG_ON((ret < 0) || (ret != nbytes)); 293 - 294 - return nbytes; 295 - } 296 - 297 - static unsigned int des3_128_decrypt_ecb(const struct cipher_desc *desc, 298 - u8 *out, const u8 *in, 299 - unsigned int nbytes) 300 - { 301 - struct crypt_s390_des3_128_ctx *sctx = crypto_tfm_ctx(desc->tfm); 302 - int ret; 303 - 304 - /* only use complete blocks */ 305 - nbytes &= ~(DES3_128_BLOCK_SIZE - 1); 306 - ret = crypt_s390_km(KM_TDEA_128_DECRYPT, sctx->key, out, in, nbytes); 307 - BUG_ON((ret < 0) || (ret != nbytes)); 308 - 309 - return nbytes; 310 - } 311 - 312 - static unsigned int des3_128_encrypt_cbc(const struct cipher_desc *desc, 313 - u8 *out, const u8 *in, 314 - unsigned int nbytes) 315 - { 316 - struct crypt_s390_des3_128_ctx *sctx = crypto_tfm_ctx(desc->tfm); 317 - int ret; 318 - 319 - /* only use complete blocks */ 320 - nbytes &= ~(DES3_128_BLOCK_SIZE - 1); 321 - 322 - memcpy(sctx->iv, desc->info, DES3_128_BLOCK_SIZE); 323 - ret = crypt_s390_kmc(KMC_TDEA_128_ENCRYPT, &sctx->iv, out, in, nbytes); 324 - BUG_ON((ret < 0) || (ret != nbytes)); 325 - 326 - memcpy(desc->info, sctx->iv, DES3_128_BLOCK_SIZE); 327 - return nbytes; 328 - } 329 - 330 - static unsigned int des3_128_decrypt_cbc(const struct cipher_desc *desc, 331 - u8 *out, const u8 *in, 332 - unsigned int nbytes) 333 - { 334 - struct crypt_s390_des3_128_ctx *sctx = crypto_tfm_ctx(desc->tfm); 335 - int ret; 336 - 337 - /* only use complete blocks */ 338 - nbytes &= ~(DES3_128_BLOCK_SIZE - 1); 339 - 340 - memcpy(&sctx->iv, desc->info, DES3_128_BLOCK_SIZE); 341 - ret = crypt_s390_kmc(KMC_TDEA_128_DECRYPT, &sctx->iv, out, in, nbytes); 342 - BUG_ON((ret < 0) || (ret != nbytes)); 343 - 344 - return nbytes; 345 - } 346 - 347 205 static struct crypto_alg des3_128_alg = { 348 206 .cra_name = "des3_ede128", 207 + .cra_driver_name = "des3_ede128-s390", 208 + .cra_priority = CRYPT_S390_PRIORITY, 349 209 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 350 210 .cra_blocksize = DES3_128_BLOCK_SIZE, 351 211 .cra_ctxsize = sizeof(struct crypt_s390_des3_128_ctx), ··· 295 281 .cia_setkey = des3_128_setkey, 296 282 .cia_encrypt = des3_128_encrypt, 297 283 .cia_decrypt = des3_128_decrypt, 298 - .cia_encrypt_ecb = des3_128_encrypt_ecb, 299 - .cia_decrypt_ecb = des3_128_decrypt_ecb, 300 - .cia_encrypt_cbc = des3_128_encrypt_cbc, 301 - .cia_decrypt_cbc = des3_128_decrypt_cbc, 284 + } 285 + } 286 + }; 287 + 288 + static int ecb_des3_128_encrypt(struct blkcipher_desc *desc, 289 + struct scatterlist *dst, 290 + struct scatterlist *src, unsigned int nbytes) 291 + { 292 + struct crypt_s390_des3_128_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 293 + struct blkcipher_walk walk; 294 + 295 + blkcipher_walk_init(&walk, dst, src, nbytes); 296 + return ecb_desall_crypt(desc, KM_TDEA_128_ENCRYPT, sctx->key, &walk); 297 + } 298 + 299 + static int ecb_des3_128_decrypt(struct blkcipher_desc *desc, 300 + struct scatterlist *dst, 301 + struct scatterlist *src, unsigned int nbytes) 302 + { 303 + struct crypt_s390_des3_128_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 304 + struct blkcipher_walk walk; 305 + 306 + blkcipher_walk_init(&walk, dst, src, nbytes); 307 + return ecb_desall_crypt(desc, KM_TDEA_128_DECRYPT, sctx->key, &walk); 308 + } 309 + 310 + static struct crypto_alg ecb_des3_128_alg = { 311 + .cra_name = "ecb(des3_ede128)", 312 + .cra_driver_name = "ecb-des3_ede128-s390", 313 + .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, 314 + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 315 + .cra_blocksize = DES3_128_BLOCK_SIZE, 316 + .cra_ctxsize = sizeof(struct crypt_s390_des3_128_ctx), 317 + .cra_type = &crypto_blkcipher_type, 318 + .cra_module = THIS_MODULE, 319 + .cra_list = LIST_HEAD_INIT( 320 + ecb_des3_128_alg.cra_list), 321 + .cra_u = { 322 + .blkcipher = { 323 + .min_keysize = DES3_128_KEY_SIZE, 324 + .max_keysize = DES3_128_KEY_SIZE, 325 + .setkey = des3_128_setkey, 326 + .encrypt = ecb_des3_128_encrypt, 327 + .decrypt = ecb_des3_128_decrypt, 328 + } 329 + } 330 + }; 331 + 332 + static int cbc_des3_128_encrypt(struct blkcipher_desc *desc, 333 + struct scatterlist *dst, 334 + struct scatterlist *src, unsigned int nbytes) 335 + { 336 + struct crypt_s390_des3_128_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 337 + struct blkcipher_walk walk; 338 + 339 + blkcipher_walk_init(&walk, dst, src, nbytes); 340 + return cbc_desall_crypt(desc, KMC_TDEA_128_ENCRYPT, sctx->iv, &walk); 341 + } 342 + 343 + static int cbc_des3_128_decrypt(struct blkcipher_desc *desc, 344 + struct scatterlist *dst, 345 + struct scatterlist *src, unsigned int nbytes) 346 + { 347 + struct crypt_s390_des3_128_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 348 + struct blkcipher_walk walk; 349 + 350 + blkcipher_walk_init(&walk, dst, src, nbytes); 351 + return cbc_desall_crypt(desc, KMC_TDEA_128_DECRYPT, sctx->iv, &walk); 352 + } 353 + 354 + static struct crypto_alg cbc_des3_128_alg = { 355 + .cra_name = "cbc(des3_ede128)", 356 + .cra_driver_name = "cbc-des3_ede128-s390", 357 + .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, 358 + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 359 + .cra_blocksize = DES3_128_BLOCK_SIZE, 360 + .cra_ctxsize = sizeof(struct crypt_s390_des3_128_ctx), 361 + .cra_type = &crypto_blkcipher_type, 362 + .cra_module = THIS_MODULE, 363 + .cra_list = LIST_HEAD_INIT( 364 + cbc_des3_128_alg.cra_list), 365 + .cra_u = { 366 + .blkcipher = { 367 + .min_keysize = DES3_128_KEY_SIZE, 368 + .max_keysize = DES3_128_KEY_SIZE, 369 + .ivsize = DES3_128_BLOCK_SIZE, 370 + .setkey = des3_128_setkey, 371 + .encrypt = cbc_des3_128_encrypt, 372 + .decrypt = cbc_des3_128_decrypt, 302 373 } 303 374 } 304 375 }; ··· 402 303 * 403 304 */ 404 305 static int des3_192_setkey(struct crypto_tfm *tfm, const u8 *key, 405 - unsigned int keylen, u32 *flags) 306 + unsigned int keylen) 406 307 { 407 308 int i, ret; 408 309 struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm); 409 - const u8* temp_key = key; 310 + const u8 *temp_key = key; 311 + u32 *flags = &tfm->crt_flags; 410 312 411 313 if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) && 412 314 memcmp(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2], ··· 441 341 DES3_192_BLOCK_SIZE); 442 342 } 443 343 444 - static unsigned int des3_192_encrypt_ecb(const struct cipher_desc *desc, 445 - u8 *out, const u8 *in, 446 - unsigned int nbytes) 447 - { 448 - struct crypt_s390_des3_192_ctx *sctx = crypto_tfm_ctx(desc->tfm); 449 - int ret; 450 - 451 - /* only use complete blocks */ 452 - nbytes &= ~(DES3_192_BLOCK_SIZE - 1); 453 - ret = crypt_s390_km(KM_TDEA_192_ENCRYPT, sctx->key, out, in, nbytes); 454 - BUG_ON((ret < 0) || (ret != nbytes)); 455 - 456 - return nbytes; 457 - } 458 - 459 - static unsigned int des3_192_decrypt_ecb(const struct cipher_desc *desc, 460 - u8 *out, const u8 *in, 461 - unsigned int nbytes) 462 - { 463 - struct crypt_s390_des3_192_ctx *sctx = crypto_tfm_ctx(desc->tfm); 464 - int ret; 465 - 466 - /* only use complete blocks */ 467 - nbytes &= ~(DES3_192_BLOCK_SIZE - 1); 468 - ret = crypt_s390_km(KM_TDEA_192_DECRYPT, sctx->key, out, in, nbytes); 469 - BUG_ON((ret < 0) || (ret != nbytes)); 470 - 471 - return nbytes; 472 - } 473 - 474 - static unsigned int des3_192_encrypt_cbc(const struct cipher_desc *desc, 475 - u8 *out, const u8 *in, 476 - unsigned int nbytes) 477 - { 478 - struct crypt_s390_des3_192_ctx *sctx = crypto_tfm_ctx(desc->tfm); 479 - int ret; 480 - 481 - /* only use complete blocks */ 482 - nbytes &= ~(DES3_192_BLOCK_SIZE - 1); 483 - 484 - memcpy(sctx->iv, desc->info, DES3_192_BLOCK_SIZE); 485 - ret = crypt_s390_kmc(KMC_TDEA_192_ENCRYPT, &sctx->iv, out, in, nbytes); 486 - BUG_ON((ret < 0) || (ret != nbytes)); 487 - 488 - memcpy(desc->info, sctx->iv, DES3_192_BLOCK_SIZE); 489 - return nbytes; 490 - } 491 - 492 - static unsigned int des3_192_decrypt_cbc(const struct cipher_desc *desc, 493 - u8 *out, const u8 *in, 494 - unsigned int nbytes) 495 - { 496 - struct crypt_s390_des3_192_ctx *sctx = crypto_tfm_ctx(desc->tfm); 497 - int ret; 498 - 499 - /* only use complete blocks */ 500 - nbytes &= ~(DES3_192_BLOCK_SIZE - 1); 501 - 502 - memcpy(&sctx->iv, desc->info, DES3_192_BLOCK_SIZE); 503 - ret = crypt_s390_kmc(KMC_TDEA_192_DECRYPT, &sctx->iv, out, in, nbytes); 504 - BUG_ON((ret < 0) || (ret != nbytes)); 505 - 506 - return nbytes; 507 - } 508 - 509 344 static struct crypto_alg des3_192_alg = { 510 345 .cra_name = "des3_ede", 346 + .cra_driver_name = "des3_ede-s390", 347 + .cra_priority = CRYPT_S390_PRIORITY, 511 348 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 512 349 .cra_blocksize = DES3_192_BLOCK_SIZE, 513 350 .cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx), ··· 457 420 .cia_setkey = des3_192_setkey, 458 421 .cia_encrypt = des3_192_encrypt, 459 422 .cia_decrypt = des3_192_decrypt, 460 - .cia_encrypt_ecb = des3_192_encrypt_ecb, 461 - .cia_decrypt_ecb = des3_192_decrypt_ecb, 462 - .cia_encrypt_cbc = des3_192_encrypt_cbc, 463 - .cia_decrypt_cbc = des3_192_decrypt_cbc, 423 + } 424 + } 425 + }; 426 + 427 + static int ecb_des3_192_encrypt(struct blkcipher_desc *desc, 428 + struct scatterlist *dst, 429 + struct scatterlist *src, unsigned int nbytes) 430 + { 431 + struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 432 + struct blkcipher_walk walk; 433 + 434 + blkcipher_walk_init(&walk, dst, src, nbytes); 435 + return ecb_desall_crypt(desc, KM_TDEA_192_ENCRYPT, sctx->key, &walk); 436 + } 437 + 438 + static int ecb_des3_192_decrypt(struct blkcipher_desc *desc, 439 + struct scatterlist *dst, 440 + struct scatterlist *src, unsigned int nbytes) 441 + { 442 + struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 443 + struct blkcipher_walk walk; 444 + 445 + blkcipher_walk_init(&walk, dst, src, nbytes); 446 + return ecb_desall_crypt(desc, KM_TDEA_192_DECRYPT, sctx->key, &walk); 447 + } 448 + 449 + static struct crypto_alg ecb_des3_192_alg = { 450 + .cra_name = "ecb(des3_ede)", 451 + .cra_driver_name = "ecb-des3_ede-s390", 452 + .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, 453 + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 454 + .cra_blocksize = DES3_192_BLOCK_SIZE, 455 + .cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx), 456 + .cra_type = &crypto_blkcipher_type, 457 + .cra_module = THIS_MODULE, 458 + .cra_list = LIST_HEAD_INIT( 459 + ecb_des3_192_alg.cra_list), 460 + .cra_u = { 461 + .blkcipher = { 462 + .min_keysize = DES3_192_KEY_SIZE, 463 + .max_keysize = DES3_192_KEY_SIZE, 464 + .setkey = des3_192_setkey, 465 + .encrypt = ecb_des3_192_encrypt, 466 + .decrypt = ecb_des3_192_decrypt, 467 + } 468 + } 469 + }; 470 + 471 + static int cbc_des3_192_encrypt(struct blkcipher_desc *desc, 472 + struct scatterlist *dst, 473 + struct scatterlist *src, unsigned int nbytes) 474 + { 475 + struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 476 + struct blkcipher_walk walk; 477 + 478 + blkcipher_walk_init(&walk, dst, src, nbytes); 479 + return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, sctx->iv, &walk); 480 + } 481 + 482 + static int cbc_des3_192_decrypt(struct blkcipher_desc *desc, 483 + struct scatterlist *dst, 484 + struct scatterlist *src, unsigned int nbytes) 485 + { 486 + struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 487 + struct blkcipher_walk walk; 488 + 489 + blkcipher_walk_init(&walk, dst, src, nbytes); 490 + return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, sctx->iv, &walk); 491 + } 492 + 493 + static struct crypto_alg cbc_des3_192_alg = { 494 + .cra_name = "cbc(des3_ede)", 495 + .cra_driver_name = "cbc-des3_ede-s390", 496 + .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, 497 + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 498 + .cra_blocksize = DES3_192_BLOCK_SIZE, 499 + .cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx), 500 + .cra_type = &crypto_blkcipher_type, 501 + .cra_module = THIS_MODULE, 502 + .cra_list = LIST_HEAD_INIT( 503 + cbc_des3_192_alg.cra_list), 504 + .cra_u = { 505 + .blkcipher = { 506 + .min_keysize = DES3_192_KEY_SIZE, 507 + .max_keysize = DES3_192_KEY_SIZE, 508 + .ivsize = DES3_192_BLOCK_SIZE, 509 + .setkey = des3_192_setkey, 510 + .encrypt = cbc_des3_192_encrypt, 511 + .decrypt = cbc_des3_192_decrypt, 464 512 } 465 513 } 466 514 }; ··· 559 437 !crypt_s390_func_available(KM_TDEA_192_ENCRYPT)) 560 438 return -ENOSYS; 561 439 562 - ret |= (crypto_register_alg(&des_alg) == 0) ? 0:1; 563 - ret |= (crypto_register_alg(&des3_128_alg) == 0) ? 0:2; 564 - ret |= (crypto_register_alg(&des3_192_alg) == 0) ? 0:4; 565 - if (ret) { 566 - crypto_unregister_alg(&des3_192_alg); 567 - crypto_unregister_alg(&des3_128_alg); 568 - crypto_unregister_alg(&des_alg); 569 - return -EEXIST; 570 - } 571 - return 0; 440 + ret = crypto_register_alg(&des_alg); 441 + if (ret) 442 + goto des_err; 443 + ret = crypto_register_alg(&ecb_des_alg); 444 + if (ret) 445 + goto ecb_des_err; 446 + ret = crypto_register_alg(&cbc_des_alg); 447 + if (ret) 448 + goto cbc_des_err; 449 + 450 + ret = crypto_register_alg(&des3_128_alg); 451 + if (ret) 452 + goto des3_128_err; 453 + ret = crypto_register_alg(&ecb_des3_128_alg); 454 + if (ret) 455 + goto ecb_des3_128_err; 456 + ret = crypto_register_alg(&cbc_des3_128_alg); 457 + if (ret) 458 + goto cbc_des3_128_err; 459 + 460 + ret = crypto_register_alg(&des3_192_alg); 461 + if (ret) 462 + goto des3_192_err; 463 + ret = crypto_register_alg(&ecb_des3_192_alg); 464 + if (ret) 465 + goto ecb_des3_192_err; 466 + ret = crypto_register_alg(&cbc_des3_192_alg); 467 + if (ret) 468 + goto cbc_des3_192_err; 469 + 470 + out: 471 + return ret; 472 + 473 + cbc_des3_192_err: 474 + crypto_unregister_alg(&ecb_des3_192_alg); 475 + ecb_des3_192_err: 476 + crypto_unregister_alg(&des3_192_alg); 477 + des3_192_err: 478 + crypto_unregister_alg(&cbc_des3_128_alg); 479 + cbc_des3_128_err: 480 + crypto_unregister_alg(&ecb_des3_128_alg); 481 + ecb_des3_128_err: 482 + crypto_unregister_alg(&des3_128_alg); 483 + des3_128_err: 484 + crypto_unregister_alg(&cbc_des_alg); 485 + cbc_des_err: 486 + crypto_unregister_alg(&ecb_des_alg); 487 + ecb_des_err: 488 + crypto_unregister_alg(&des_alg); 489 + des_err: 490 + goto out; 572 491 } 573 492 574 493 static void __exit fini(void) 575 494 { 495 + crypto_unregister_alg(&cbc_des3_192_alg); 496 + crypto_unregister_alg(&ecb_des3_192_alg); 576 497 crypto_unregister_alg(&des3_192_alg); 498 + crypto_unregister_alg(&cbc_des3_128_alg); 499 + crypto_unregister_alg(&ecb_des3_128_alg); 577 500 crypto_unregister_alg(&des3_128_alg); 501 + crypto_unregister_alg(&cbc_des_alg); 502 + crypto_unregister_alg(&ecb_des_alg); 578 503 crypto_unregister_alg(&des_alg); 579 504 } 580 505
+2
arch/s390/crypto/sha1_s390.c
··· 126 126 127 127 static struct crypto_alg alg = { 128 128 .cra_name = "sha1", 129 + .cra_driver_name = "sha1-s390", 130 + .cra_priority = CRYPT_S390_PRIORITY, 129 131 .cra_flags = CRYPTO_ALG_TYPE_DIGEST, 130 132 .cra_blocksize = SHA1_BLOCK_SIZE, 131 133 .cra_ctxsize = sizeof(struct crypt_s390_sha1_ctx),
+2
arch/s390/crypto/sha256_s390.c
··· 127 127 128 128 static struct crypto_alg alg = { 129 129 .cra_name = "sha256", 130 + .cra_driver_name = "sha256-s390", 131 + .cra_priority = CRYPT_S390_PRIORITY, 130 132 .cra_flags = CRYPTO_ALG_TYPE_DIGEST, 131 133 .cra_blocksize = SHA256_BLOCK_SIZE, 132 134 .cra_ctxsize = sizeof(struct s390_sha256_ctx),
+3
arch/x86_64/crypto/Makefile
··· 5 5 # 6 6 7 7 obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o 8 + obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o 8 9 9 10 aes-x86_64-y := aes-x86_64-asm.o aes.o 11 + twofish-x86_64-y := twofish-x86_64-asm.o twofish.o 12 +
+3 -2
arch/x86_64/crypto/aes.c
··· 228 228 } 229 229 230 230 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, 231 - unsigned int key_len, u32 *flags) 231 + unsigned int key_len) 232 232 { 233 233 struct aes_ctx *ctx = crypto_tfm_ctx(tfm); 234 234 const __le32 *key = (const __le32 *)in_key; 235 + u32 *flags = &tfm->crt_flags; 235 236 u32 i, j, t, u, v, w; 236 237 237 - if (key_len != 16 && key_len != 24 && key_len != 32) { 238 + if (key_len % 8) { 238 239 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 239 240 return -EINVAL; 240 241 }
+324
arch/x86_64/crypto/twofish-x86_64-asm.S
··· 1 + /*************************************************************************** 2 + * Copyright (C) 2006 by Joachim Fritschi, <jfritschi@freenet.de> * 3 + * * 4 + * This program is free software; you can redistribute it and/or modify * 5 + * it under the terms of the GNU General Public License as published by * 6 + * the Free Software Foundation; either version 2 of the License, or * 7 + * (at your option) any later version. * 8 + * * 9 + * This program is distributed in the hope that it will be useful, * 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of * 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * 12 + * GNU General Public License for more details. * 13 + * * 14 + * You should have received a copy of the GNU General Public License * 15 + * along with this program; if not, write to the * 16 + * Free Software Foundation, Inc., * 17 + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * 18 + ***************************************************************************/ 19 + 20 + .file "twofish-x86_64-asm.S" 21 + .text 22 + 23 + #include <asm/asm-offsets.h> 24 + 25 + #define a_offset 0 26 + #define b_offset 4 27 + #define c_offset 8 28 + #define d_offset 12 29 + 30 + /* Structure of the crypto context struct*/ 31 + 32 + #define s0 0 /* S0 Array 256 Words each */ 33 + #define s1 1024 /* S1 Array */ 34 + #define s2 2048 /* S2 Array */ 35 + #define s3 3072 /* S3 Array */ 36 + #define w 4096 /* 8 whitening keys (word) */ 37 + #define k 4128 /* key 1-32 ( word ) */ 38 + 39 + /* define a few register aliases to allow macro substitution */ 40 + 41 + #define R0 %rax 42 + #define R0D %eax 43 + #define R0B %al 44 + #define R0H %ah 45 + 46 + #define R1 %rbx 47 + #define R1D %ebx 48 + #define R1B %bl 49 + #define R1H %bh 50 + 51 + #define R2 %rcx 52 + #define R2D %ecx 53 + #define R2B %cl 54 + #define R2H %ch 55 + 56 + #define R3 %rdx 57 + #define R3D %edx 58 + #define R3B %dl 59 + #define R3H %dh 60 + 61 + 62 + /* performs input whitening */ 63 + #define input_whitening(src,context,offset)\ 64 + xor w+offset(context), src; 65 + 66 + /* performs input whitening */ 67 + #define output_whitening(src,context,offset)\ 68 + xor w+16+offset(context), src; 69 + 70 + 71 + /* 72 + * a input register containing a (rotated 16) 73 + * b input register containing b 74 + * c input register containing c 75 + * d input register containing d (already rol $1) 76 + * operations on a and b are interleaved to increase performance 77 + */ 78 + #define encrypt_round(a,b,c,d,round)\ 79 + movzx b ## B, %edi;\ 80 + mov s1(%r11,%rdi,4),%r8d;\ 81 + movzx a ## B, %edi;\ 82 + mov s2(%r11,%rdi,4),%r9d;\ 83 + movzx b ## H, %edi;\ 84 + ror $16, b ## D;\ 85 + xor s2(%r11,%rdi,4),%r8d;\ 86 + movzx a ## H, %edi;\ 87 + ror $16, a ## D;\ 88 + xor s3(%r11,%rdi,4),%r9d;\ 89 + movzx b ## B, %edi;\ 90 + xor s3(%r11,%rdi,4),%r8d;\ 91 + movzx a ## B, %edi;\ 92 + xor (%r11,%rdi,4), %r9d;\ 93 + movzx b ## H, %edi;\ 94 + ror $15, b ## D;\ 95 + xor (%r11,%rdi,4), %r8d;\ 96 + movzx a ## H, %edi;\ 97 + xor s1(%r11,%rdi,4),%r9d;\ 98 + add %r8d, %r9d;\ 99 + add %r9d, %r8d;\ 100 + add k+round(%r11), %r9d;\ 101 + xor %r9d, c ## D;\ 102 + rol $15, c ## D;\ 103 + add k+4+round(%r11),%r8d;\ 104 + xor %r8d, d ## D; 105 + 106 + /* 107 + * a input register containing a(rotated 16) 108 + * b input register containing b 109 + * c input register containing c 110 + * d input register containing d (already rol $1) 111 + * operations on a and b are interleaved to increase performance 112 + * during the round a and b are prepared for the output whitening 113 + */ 114 + #define encrypt_last_round(a,b,c,d,round)\ 115 + mov b ## D, %r10d;\ 116 + shl $32, %r10;\ 117 + movzx b ## B, %edi;\ 118 + mov s1(%r11,%rdi,4),%r8d;\ 119 + movzx a ## B, %edi;\ 120 + mov s2(%r11,%rdi,4),%r9d;\ 121 + movzx b ## H, %edi;\ 122 + ror $16, b ## D;\ 123 + xor s2(%r11,%rdi,4),%r8d;\ 124 + movzx a ## H, %edi;\ 125 + ror $16, a ## D;\ 126 + xor s3(%r11,%rdi,4),%r9d;\ 127 + movzx b ## B, %edi;\ 128 + xor s3(%r11,%rdi,4),%r8d;\ 129 + movzx a ## B, %edi;\ 130 + xor (%r11,%rdi,4), %r9d;\ 131 + xor a, %r10;\ 132 + movzx b ## H, %edi;\ 133 + xor (%r11,%rdi,4), %r8d;\ 134 + movzx a ## H, %edi;\ 135 + xor s1(%r11,%rdi,4),%r9d;\ 136 + add %r8d, %r9d;\ 137 + add %r9d, %r8d;\ 138 + add k+round(%r11), %r9d;\ 139 + xor %r9d, c ## D;\ 140 + ror $1, c ## D;\ 141 + add k+4+round(%r11),%r8d;\ 142 + xor %r8d, d ## D 143 + 144 + /* 145 + * a input register containing a 146 + * b input register containing b (rotated 16) 147 + * c input register containing c (already rol $1) 148 + * d input register containing d 149 + * operations on a and b are interleaved to increase performance 150 + */ 151 + #define decrypt_round(a,b,c,d,round)\ 152 + movzx a ## B, %edi;\ 153 + mov (%r11,%rdi,4), %r9d;\ 154 + movzx b ## B, %edi;\ 155 + mov s3(%r11,%rdi,4),%r8d;\ 156 + movzx a ## H, %edi;\ 157 + ror $16, a ## D;\ 158 + xor s1(%r11,%rdi,4),%r9d;\ 159 + movzx b ## H, %edi;\ 160 + ror $16, b ## D;\ 161 + xor (%r11,%rdi,4), %r8d;\ 162 + movzx a ## B, %edi;\ 163 + xor s2(%r11,%rdi,4),%r9d;\ 164 + movzx b ## B, %edi;\ 165 + xor s1(%r11,%rdi,4),%r8d;\ 166 + movzx a ## H, %edi;\ 167 + ror $15, a ## D;\ 168 + xor s3(%r11,%rdi,4),%r9d;\ 169 + movzx b ## H, %edi;\ 170 + xor s2(%r11,%rdi,4),%r8d;\ 171 + add %r8d, %r9d;\ 172 + add %r9d, %r8d;\ 173 + add k+round(%r11), %r9d;\ 174 + xor %r9d, c ## D;\ 175 + add k+4+round(%r11),%r8d;\ 176 + xor %r8d, d ## D;\ 177 + rol $15, d ## D; 178 + 179 + /* 180 + * a input register containing a 181 + * b input register containing b 182 + * c input register containing c (already rol $1) 183 + * d input register containing d 184 + * operations on a and b are interleaved to increase performance 185 + * during the round a and b are prepared for the output whitening 186 + */ 187 + #define decrypt_last_round(a,b,c,d,round)\ 188 + movzx a ## B, %edi;\ 189 + mov (%r11,%rdi,4), %r9d;\ 190 + movzx b ## B, %edi;\ 191 + mov s3(%r11,%rdi,4),%r8d;\ 192 + movzx b ## H, %edi;\ 193 + ror $16, b ## D;\ 194 + xor (%r11,%rdi,4), %r8d;\ 195 + movzx a ## H, %edi;\ 196 + mov b ## D, %r10d;\ 197 + shl $32, %r10;\ 198 + xor a, %r10;\ 199 + ror $16, a ## D;\ 200 + xor s1(%r11,%rdi,4),%r9d;\ 201 + movzx b ## B, %edi;\ 202 + xor s1(%r11,%rdi,4),%r8d;\ 203 + movzx a ## B, %edi;\ 204 + xor s2(%r11,%rdi,4),%r9d;\ 205 + movzx b ## H, %edi;\ 206 + xor s2(%r11,%rdi,4),%r8d;\ 207 + movzx a ## H, %edi;\ 208 + xor s3(%r11,%rdi,4),%r9d;\ 209 + add %r8d, %r9d;\ 210 + add %r9d, %r8d;\ 211 + add k+round(%r11), %r9d;\ 212 + xor %r9d, c ## D;\ 213 + add k+4+round(%r11),%r8d;\ 214 + xor %r8d, d ## D;\ 215 + ror $1, d ## D; 216 + 217 + .align 8 218 + .global twofish_enc_blk 219 + .global twofish_dec_blk 220 + 221 + twofish_enc_blk: 222 + pushq R1 223 + 224 + /* %rdi contains the crypto tfm adress */ 225 + /* %rsi contains the output adress */ 226 + /* %rdx contains the input adress */ 227 + add $crypto_tfm_ctx_offset, %rdi /* set ctx adress */ 228 + /* ctx adress is moved to free one non-rex register 229 + as target for the 8bit high operations */ 230 + mov %rdi, %r11 231 + 232 + movq (R3), R1 233 + movq 8(R3), R3 234 + input_whitening(R1,%r11,a_offset) 235 + input_whitening(R3,%r11,c_offset) 236 + mov R1D, R0D 237 + rol $16, R0D 238 + shr $32, R1 239 + mov R3D, R2D 240 + shr $32, R3 241 + rol $1, R3D 242 + 243 + encrypt_round(R0,R1,R2,R3,0); 244 + encrypt_round(R2,R3,R0,R1,8); 245 + encrypt_round(R0,R1,R2,R3,2*8); 246 + encrypt_round(R2,R3,R0,R1,3*8); 247 + encrypt_round(R0,R1,R2,R3,4*8); 248 + encrypt_round(R2,R3,R0,R1,5*8); 249 + encrypt_round(R0,R1,R2,R3,6*8); 250 + encrypt_round(R2,R3,R0,R1,7*8); 251 + encrypt_round(R0,R1,R2,R3,8*8); 252 + encrypt_round(R2,R3,R0,R1,9*8); 253 + encrypt_round(R0,R1,R2,R3,10*8); 254 + encrypt_round(R2,R3,R0,R1,11*8); 255 + encrypt_round(R0,R1,R2,R3,12*8); 256 + encrypt_round(R2,R3,R0,R1,13*8); 257 + encrypt_round(R0,R1,R2,R3,14*8); 258 + encrypt_last_round(R2,R3,R0,R1,15*8); 259 + 260 + 261 + output_whitening(%r10,%r11,a_offset) 262 + movq %r10, (%rsi) 263 + 264 + shl $32, R1 265 + xor R0, R1 266 + 267 + output_whitening(R1,%r11,c_offset) 268 + movq R1, 8(%rsi) 269 + 270 + popq R1 271 + movq $1,%rax 272 + ret 273 + 274 + twofish_dec_blk: 275 + pushq R1 276 + 277 + /* %rdi contains the crypto tfm adress */ 278 + /* %rsi contains the output adress */ 279 + /* %rdx contains the input adress */ 280 + add $crypto_tfm_ctx_offset, %rdi /* set ctx adress */ 281 + /* ctx adress is moved to free one non-rex register 282 + as target for the 8bit high operations */ 283 + mov %rdi, %r11 284 + 285 + movq (R3), R1 286 + movq 8(R3), R3 287 + output_whitening(R1,%r11,a_offset) 288 + output_whitening(R3,%r11,c_offset) 289 + mov R1D, R0D 290 + shr $32, R1 291 + rol $16, R1D 292 + mov R3D, R2D 293 + shr $32, R3 294 + rol $1, R2D 295 + 296 + decrypt_round(R0,R1,R2,R3,15*8); 297 + decrypt_round(R2,R3,R0,R1,14*8); 298 + decrypt_round(R0,R1,R2,R3,13*8); 299 + decrypt_round(R2,R3,R0,R1,12*8); 300 + decrypt_round(R0,R1,R2,R3,11*8); 301 + decrypt_round(R2,R3,R0,R1,10*8); 302 + decrypt_round(R0,R1,R2,R3,9*8); 303 + decrypt_round(R2,R3,R0,R1,8*8); 304 + decrypt_round(R0,R1,R2,R3,7*8); 305 + decrypt_round(R2,R3,R0,R1,6*8); 306 + decrypt_round(R0,R1,R2,R3,5*8); 307 + decrypt_round(R2,R3,R0,R1,4*8); 308 + decrypt_round(R0,R1,R2,R3,3*8); 309 + decrypt_round(R2,R3,R0,R1,2*8); 310 + decrypt_round(R0,R1,R2,R3,1*8); 311 + decrypt_last_round(R2,R3,R0,R1,0); 312 + 313 + input_whitening(%r10,%r11,a_offset) 314 + movq %r10, (%rsi) 315 + 316 + shl $32, R1 317 + xor R0, R1 318 + 319 + input_whitening(R1,%r11,c_offset) 320 + movq R1, 8(%rsi) 321 + 322 + popq R1 323 + movq $1,%rax 324 + ret
+97
arch/x86_64/crypto/twofish.c
··· 1 + /* 2 + * Glue Code for optimized x86_64 assembler version of TWOFISH 3 + * 4 + * Originally Twofish for GPG 5 + * By Matthew Skala <mskala@ansuz.sooke.bc.ca>, July 26, 1998 6 + * 256-bit key length added March 20, 1999 7 + * Some modifications to reduce the text size by Werner Koch, April, 1998 8 + * Ported to the kerneli patch by Marc Mutz <Marc@Mutz.com> 9 + * Ported to CryptoAPI by Colin Slater <hoho@tacomeat.net> 10 + * 11 + * The original author has disclaimed all copyright interest in this 12 + * code and thus put it in the public domain. The subsequent authors 13 + * have put this under the GNU General Public License. 14 + * 15 + * This program is free software; you can redistribute it and/or modify 16 + * it under the terms of the GNU General Public License as published by 17 + * the Free Software Foundation; either version 2 of the License, or 18 + * (at your option) any later version. 19 + * 20 + * This program is distributed in the hope that it will be useful, 21 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 22 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 23 + * GNU General Public License for more details. 24 + * 25 + * You should have received a copy of the GNU General Public License 26 + * along with this program; if not, write to the Free Software 27 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 28 + * USA 29 + * 30 + * This code is a "clean room" implementation, written from the paper 31 + * _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey, 32 + * Doug Whiting, David Wagner, Chris Hall, and Niels Ferguson, available 33 + * through http://www.counterpane.com/twofish.html 34 + * 35 + * For background information on multiplication in finite fields, used for 36 + * the matrix operations in the key schedule, see the book _Contemporary 37 + * Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the 38 + * Third Edition. 39 + */ 40 + 41 + #include <crypto/twofish.h> 42 + #include <linux/crypto.h> 43 + #include <linux/init.h> 44 + #include <linux/kernel.h> 45 + #include <linux/module.h> 46 + #include <linux/types.h> 47 + 48 + asmlinkage void twofish_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 49 + asmlinkage void twofish_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 50 + 51 + static void twofish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 52 + { 53 + twofish_enc_blk(tfm, dst, src); 54 + } 55 + 56 + static void twofish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 57 + { 58 + twofish_dec_blk(tfm, dst, src); 59 + } 60 + 61 + static struct crypto_alg alg = { 62 + .cra_name = "twofish", 63 + .cra_driver_name = "twofish-x86_64", 64 + .cra_priority = 200, 65 + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 66 + .cra_blocksize = TF_BLOCK_SIZE, 67 + .cra_ctxsize = sizeof(struct twofish_ctx), 68 + .cra_alignmask = 3, 69 + .cra_module = THIS_MODULE, 70 + .cra_list = LIST_HEAD_INIT(alg.cra_list), 71 + .cra_u = { 72 + .cipher = { 73 + .cia_min_keysize = TF_MIN_KEY_SIZE, 74 + .cia_max_keysize = TF_MAX_KEY_SIZE, 75 + .cia_setkey = twofish_setkey, 76 + .cia_encrypt = twofish_encrypt, 77 + .cia_decrypt = twofish_decrypt 78 + } 79 + } 80 + }; 81 + 82 + static int __init init(void) 83 + { 84 + return crypto_register_alg(&alg); 85 + } 86 + 87 + static void __exit fini(void) 88 + { 89 + crypto_unregister_alg(&alg); 90 + } 91 + 92 + module_init(init); 93 + module_exit(fini); 94 + 95 + MODULE_LICENSE("GPL"); 96 + MODULE_DESCRIPTION ("Twofish Cipher Algorithm, x86_64 asm optimized"); 97 + MODULE_ALIAS("twofish");
+122 -32
crypto/Kconfig
··· 9 9 help 10 10 This option provides the core Cryptographic API. 11 11 12 + if CRYPTO 13 + 14 + config CRYPTO_ALGAPI 15 + tristate 16 + help 17 + This option provides the API for cryptographic algorithms. 18 + 19 + config CRYPTO_BLKCIPHER 20 + tristate 21 + select CRYPTO_ALGAPI 22 + 23 + config CRYPTO_HASH 24 + tristate 25 + select CRYPTO_ALGAPI 26 + 27 + config CRYPTO_MANAGER 28 + tristate "Cryptographic algorithm manager" 29 + select CRYPTO_ALGAPI 30 + default m 31 + help 32 + Create default cryptographic template instantiations such as 33 + cbc(aes). 34 + 12 35 config CRYPTO_HMAC 13 - bool "HMAC support" 14 - depends on CRYPTO 36 + tristate "HMAC support" 37 + select CRYPTO_HASH 15 38 help 16 39 HMAC: Keyed-Hashing for Message Authentication (RFC2104). 17 40 This is required for IPSec. 18 41 19 42 config CRYPTO_NULL 20 43 tristate "Null algorithms" 21 - depends on CRYPTO 44 + select CRYPTO_ALGAPI 22 45 help 23 46 These are 'Null' algorithms, used by IPsec, which do nothing. 24 47 25 48 config CRYPTO_MD4 26 49 tristate "MD4 digest algorithm" 27 - depends on CRYPTO 50 + select CRYPTO_ALGAPI 28 51 help 29 52 MD4 message digest algorithm (RFC1320). 30 53 31 54 config CRYPTO_MD5 32 55 tristate "MD5 digest algorithm" 33 - depends on CRYPTO 56 + select CRYPTO_ALGAPI 34 57 help 35 58 MD5 message digest algorithm (RFC1321). 36 59 37 60 config CRYPTO_SHA1 38 61 tristate "SHA1 digest algorithm" 39 - depends on CRYPTO 62 + select CRYPTO_ALGAPI 40 63 help 41 64 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). 42 65 43 66 config CRYPTO_SHA1_S390 44 67 tristate "SHA1 digest algorithm (s390)" 45 - depends on CRYPTO && S390 68 + depends on S390 69 + select CRYPTO_ALGAPI 46 70 help 47 71 This is the s390 hardware accelerated implementation of the 48 72 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). 49 73 50 74 config CRYPTO_SHA256 51 75 tristate "SHA256 digest algorithm" 52 - depends on CRYPTO 76 + select CRYPTO_ALGAPI 53 77 help 54 78 SHA256 secure hash standard (DFIPS 180-2). 55 79 ··· 82 58 83 59 config CRYPTO_SHA256_S390 84 60 tristate "SHA256 digest algorithm (s390)" 85 - depends on CRYPTO && S390 61 + depends on S390 62 + select CRYPTO_ALGAPI 86 63 help 87 64 This is the s390 hardware accelerated implementation of the 88 65 SHA256 secure hash standard (DFIPS 180-2). ··· 93 68 94 69 config CRYPTO_SHA512 95 70 tristate "SHA384 and SHA512 digest algorithms" 96 - depends on CRYPTO 71 + select CRYPTO_ALGAPI 97 72 help 98 73 SHA512 secure hash standard (DFIPS 180-2). 99 74 ··· 105 80 106 81 config CRYPTO_WP512 107 82 tristate "Whirlpool digest algorithms" 108 - depends on CRYPTO 83 + select CRYPTO_ALGAPI 109 84 help 110 85 Whirlpool hash algorithm 512, 384 and 256-bit hashes 111 86 ··· 117 92 118 93 config CRYPTO_TGR192 119 94 tristate "Tiger digest algorithms" 120 - depends on CRYPTO 95 + select CRYPTO_ALGAPI 121 96 help 122 97 Tiger hash algorithm 192, 160 and 128-bit hashes 123 98 ··· 128 103 See also: 129 104 <http://www.cs.technion.ac.il/~biham/Reports/Tiger/>. 130 105 106 + config CRYPTO_ECB 107 + tristate "ECB support" 108 + select CRYPTO_BLKCIPHER 109 + default m 110 + help 111 + ECB: Electronic CodeBook mode 112 + This is the simplest block cipher algorithm. It simply encrypts 113 + the input block by block. 114 + 115 + config CRYPTO_CBC 116 + tristate "CBC support" 117 + select CRYPTO_BLKCIPHER 118 + default m 119 + help 120 + CBC: Cipher Block Chaining mode 121 + This block cipher algorithm is required for IPSec. 122 + 131 123 config CRYPTO_DES 132 124 tristate "DES and Triple DES EDE cipher algorithms" 133 - depends on CRYPTO 125 + select CRYPTO_ALGAPI 134 126 help 135 127 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). 136 128 137 129 config CRYPTO_DES_S390 138 130 tristate "DES and Triple DES cipher algorithms (s390)" 139 - depends on CRYPTO && S390 131 + depends on S390 132 + select CRYPTO_ALGAPI 133 + select CRYPTO_BLKCIPHER 140 134 help 141 135 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). 142 136 143 137 config CRYPTO_BLOWFISH 144 138 tristate "Blowfish cipher algorithm" 145 - depends on CRYPTO 139 + select CRYPTO_ALGAPI 146 140 help 147 141 Blowfish cipher algorithm, by Bruce Schneier. 148 142 ··· 174 130 175 131 config CRYPTO_TWOFISH 176 132 tristate "Twofish cipher algorithm" 177 - depends on CRYPTO 133 + select CRYPTO_ALGAPI 134 + select CRYPTO_TWOFISH_COMMON 178 135 help 179 136 Twofish cipher algorithm. 180 137 ··· 187 142 See also: 188 143 <http://www.schneier.com/twofish.html> 189 144 145 + config CRYPTO_TWOFISH_COMMON 146 + tristate 147 + help 148 + Common parts of the Twofish cipher algorithm shared by the 149 + generic c and the assembler implementations. 150 + 151 + config CRYPTO_TWOFISH_586 152 + tristate "Twofish cipher algorithms (i586)" 153 + depends on (X86 || UML_X86) && !64BIT 154 + select CRYPTO_ALGAPI 155 + select CRYPTO_TWOFISH_COMMON 156 + help 157 + Twofish cipher algorithm. 158 + 159 + Twofish was submitted as an AES (Advanced Encryption Standard) 160 + candidate cipher by researchers at CounterPane Systems. It is a 161 + 16 round block cipher supporting key sizes of 128, 192, and 256 162 + bits. 163 + 164 + See also: 165 + <http://www.schneier.com/twofish.html> 166 + 167 + config CRYPTO_TWOFISH_X86_64 168 + tristate "Twofish cipher algorithm (x86_64)" 169 + depends on (X86 || UML_X86) && 64BIT 170 + select CRYPTO_ALGAPI 171 + select CRYPTO_TWOFISH_COMMON 172 + help 173 + Twofish cipher algorithm (x86_64). 174 + 175 + Twofish was submitted as an AES (Advanced Encryption Standard) 176 + candidate cipher by researchers at CounterPane Systems. It is a 177 + 16 round block cipher supporting key sizes of 128, 192, and 256 178 + bits. 179 + 180 + See also: 181 + <http://www.schneier.com/twofish.html> 182 + 190 183 config CRYPTO_SERPENT 191 184 tristate "Serpent cipher algorithm" 192 - depends on CRYPTO 185 + select CRYPTO_ALGAPI 193 186 help 194 187 Serpent cipher algorithm, by Anderson, Biham & Knudsen. 195 188 ··· 240 157 241 158 config CRYPTO_AES 242 159 tristate "AES cipher algorithms" 243 - depends on CRYPTO 160 + select CRYPTO_ALGAPI 244 161 help 245 162 AES cipher algorithms (FIPS-197). AES uses the Rijndael 246 163 algorithm. ··· 260 177 261 178 config CRYPTO_AES_586 262 179 tristate "AES cipher algorithms (i586)" 263 - depends on CRYPTO && ((X86 || UML_X86) && !64BIT) 180 + depends on (X86 || UML_X86) && !64BIT 181 + select CRYPTO_ALGAPI 264 182 help 265 183 AES cipher algorithms (FIPS-197). AES uses the Rijndael 266 184 algorithm. ··· 281 197 282 198 config CRYPTO_AES_X86_64 283 199 tristate "AES cipher algorithms (x86_64)" 284 - depends on CRYPTO && ((X86 || UML_X86) && 64BIT) 200 + depends on (X86 || UML_X86) && 64BIT 201 + select CRYPTO_ALGAPI 285 202 help 286 203 AES cipher algorithms (FIPS-197). AES uses the Rijndael 287 204 algorithm. ··· 302 217 303 218 config CRYPTO_AES_S390 304 219 tristate "AES cipher algorithms (s390)" 305 - depends on CRYPTO && S390 220 + depends on S390 221 + select CRYPTO_ALGAPI 222 + select CRYPTO_BLKCIPHER 306 223 help 307 224 This is the s390 hardware accelerated implementation of the 308 225 AES cipher algorithms (FIPS-197). AES uses the Rijndael ··· 324 237 325 238 config CRYPTO_CAST5 326 239 tristate "CAST5 (CAST-128) cipher algorithm" 327 - depends on CRYPTO 240 + select CRYPTO_ALGAPI 328 241 help 329 242 The CAST5 encryption algorithm (synonymous with CAST-128) is 330 243 described in RFC2144. 331 244 332 245 config CRYPTO_CAST6 333 246 tristate "CAST6 (CAST-256) cipher algorithm" 334 - depends on CRYPTO 247 + select CRYPTO_ALGAPI 335 248 help 336 249 The CAST6 encryption algorithm (synonymous with CAST-256) is 337 250 described in RFC2612. 338 251 339 252 config CRYPTO_TEA 340 253 tristate "TEA, XTEA and XETA cipher algorithms" 341 - depends on CRYPTO 254 + select CRYPTO_ALGAPI 342 255 help 343 256 TEA cipher algorithm. 344 257 ··· 355 268 356 269 config CRYPTO_ARC4 357 270 tristate "ARC4 cipher algorithm" 358 - depends on CRYPTO 271 + select CRYPTO_ALGAPI 359 272 help 360 273 ARC4 cipher algorithm. 361 274 ··· 366 279 367 280 config CRYPTO_KHAZAD 368 281 tristate "Khazad cipher algorithm" 369 - depends on CRYPTO 282 + select CRYPTO_ALGAPI 370 283 help 371 284 Khazad cipher algorithm. 372 285 ··· 379 292 380 293 config CRYPTO_ANUBIS 381 294 tristate "Anubis cipher algorithm" 382 - depends on CRYPTO 295 + select CRYPTO_ALGAPI 383 296 help 384 297 Anubis cipher algorithm. 385 298 ··· 394 307 395 308 config CRYPTO_DEFLATE 396 309 tristate "Deflate compression algorithm" 397 - depends on CRYPTO 310 + select CRYPTO_ALGAPI 398 311 select ZLIB_INFLATE 399 312 select ZLIB_DEFLATE 400 313 help ··· 405 318 406 319 config CRYPTO_MICHAEL_MIC 407 320 tristate "Michael MIC keyed digest algorithm" 408 - depends on CRYPTO 321 + select CRYPTO_ALGAPI 409 322 help 410 323 Michael MIC is used for message integrity protection in TKIP 411 324 (IEEE 802.11i). This algorithm is required for TKIP, but it ··· 414 327 415 328 config CRYPTO_CRC32C 416 329 tristate "CRC32c CRC algorithm" 417 - depends on CRYPTO 330 + select CRYPTO_ALGAPI 418 331 select LIBCRC32C 419 332 help 420 333 Castagnoli, et al Cyclic Redundancy-Check Algorithm. Used ··· 424 337 425 338 config CRYPTO_TEST 426 339 tristate "Testing module" 427 - depends on CRYPTO && m 340 + depends on m 341 + select CRYPTO_ALGAPI 428 342 help 429 343 Quick & dirty crypto test module. 430 344 431 345 source "drivers/crypto/Kconfig" 432 - endmenu 433 346 347 + endif # if CRYPTO 348 + 349 + endmenu
+13 -3
crypto/Makefile
··· 2 2 # Cryptographic API 3 3 # 4 4 5 - proc-crypto-$(CONFIG_PROC_FS) = proc.o 5 + obj-$(CONFIG_CRYPTO) += api.o scatterwalk.o cipher.o digest.o compress.o 6 6 7 - obj-$(CONFIG_CRYPTO) += api.o scatterwalk.o cipher.o digest.o compress.o \ 8 - $(proc-crypto-y) 7 + crypto_algapi-$(CONFIG_PROC_FS) += proc.o 8 + crypto_algapi-objs := algapi.o $(crypto_algapi-y) 9 + obj-$(CONFIG_CRYPTO_ALGAPI) += crypto_algapi.o 9 10 11 + obj-$(CONFIG_CRYPTO_BLKCIPHER) += blkcipher.o 12 + 13 + crypto_hash-objs := hash.o 14 + obj-$(CONFIG_CRYPTO_HASH) += crypto_hash.o 15 + 16 + obj-$(CONFIG_CRYPTO_MANAGER) += cryptomgr.o 10 17 obj-$(CONFIG_CRYPTO_HMAC) += hmac.o 11 18 obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o 12 19 obj-$(CONFIG_CRYPTO_MD4) += md4.o ··· 23 16 obj-$(CONFIG_CRYPTO_SHA512) += sha512.o 24 17 obj-$(CONFIG_CRYPTO_WP512) += wp512.o 25 18 obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o 19 + obj-$(CONFIG_CRYPTO_ECB) += ecb.o 20 + obj-$(CONFIG_CRYPTO_CBC) += cbc.o 26 21 obj-$(CONFIG_CRYPTO_DES) += des.o 27 22 obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o 28 23 obj-$(CONFIG_CRYPTO_TWOFISH) += twofish.o 24 + obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o 29 25 obj-$(CONFIG_CRYPTO_SERPENT) += serpent.o 30 26 obj-$(CONFIG_CRYPTO_AES) += aes.o 31 27 obj-$(CONFIG_CRYPTO_CAST5) += cast5.o
+3 -2
crypto/aes.c
··· 249 249 } 250 250 251 251 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, 252 - unsigned int key_len, u32 *flags) 252 + unsigned int key_len) 253 253 { 254 254 struct aes_ctx *ctx = crypto_tfm_ctx(tfm); 255 255 const __le32 *key = (const __le32 *)in_key; 256 + u32 *flags = &tfm->crt_flags; 256 257 u32 i, t, u, v, w; 257 258 258 - if (key_len != 16 && key_len != 24 && key_len != 32) { 259 + if (key_len % 8) { 259 260 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 260 261 return -EINVAL; 261 262 }
+486
crypto/algapi.c
··· 1 + /* 2 + * Cryptographic API for algorithms (i.e., low-level API). 3 + * 4 + * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms of the GNU General Public License as published by the Free 8 + * Software Foundation; either version 2 of the License, or (at your option) 9 + * any later version. 10 + * 11 + */ 12 + 13 + #include <linux/err.h> 14 + #include <linux/errno.h> 15 + #include <linux/init.h> 16 + #include <linux/kernel.h> 17 + #include <linux/list.h> 18 + #include <linux/module.h> 19 + #include <linux/rtnetlink.h> 20 + #include <linux/string.h> 21 + 22 + #include "internal.h" 23 + 24 + static LIST_HEAD(crypto_template_list); 25 + 26 + void crypto_larval_error(const char *name, u32 type, u32 mask) 27 + { 28 + struct crypto_alg *alg; 29 + 30 + down_read(&crypto_alg_sem); 31 + alg = __crypto_alg_lookup(name, type, mask); 32 + up_read(&crypto_alg_sem); 33 + 34 + if (alg) { 35 + if (crypto_is_larval(alg)) { 36 + struct crypto_larval *larval = (void *)alg; 37 + complete(&larval->completion); 38 + } 39 + crypto_mod_put(alg); 40 + } 41 + } 42 + EXPORT_SYMBOL_GPL(crypto_larval_error); 43 + 44 + static inline int crypto_set_driver_name(struct crypto_alg *alg) 45 + { 46 + static const char suffix[] = "-generic"; 47 + char *driver_name = alg->cra_driver_name; 48 + int len; 49 + 50 + if (*driver_name) 51 + return 0; 52 + 53 + len = strlcpy(driver_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); 54 + if (len + sizeof(suffix) > CRYPTO_MAX_ALG_NAME) 55 + return -ENAMETOOLONG; 56 + 57 + memcpy(driver_name + len, suffix, sizeof(suffix)); 58 + return 0; 59 + } 60 + 61 + static int crypto_check_alg(struct crypto_alg *alg) 62 + { 63 + if (alg->cra_alignmask & (alg->cra_alignmask + 1)) 64 + return -EINVAL; 65 + 66 + if (alg->cra_alignmask & alg->cra_blocksize) 67 + return -EINVAL; 68 + 69 + if (alg->cra_blocksize > PAGE_SIZE / 8) 70 + return -EINVAL; 71 + 72 + if (alg->cra_priority < 0) 73 + return -EINVAL; 74 + 75 + return crypto_set_driver_name(alg); 76 + } 77 + 78 + static void crypto_destroy_instance(struct crypto_alg *alg) 79 + { 80 + struct crypto_instance *inst = (void *)alg; 81 + struct crypto_template *tmpl = inst->tmpl; 82 + 83 + tmpl->free(inst); 84 + crypto_tmpl_put(tmpl); 85 + } 86 + 87 + static void crypto_remove_spawns(struct list_head *spawns, 88 + struct list_head *list) 89 + { 90 + struct crypto_spawn *spawn, *n; 91 + 92 + list_for_each_entry_safe(spawn, n, spawns, list) { 93 + struct crypto_instance *inst = spawn->inst; 94 + struct crypto_template *tmpl = inst->tmpl; 95 + 96 + list_del_init(&spawn->list); 97 + spawn->alg = NULL; 98 + 99 + if (crypto_is_dead(&inst->alg)) 100 + continue; 101 + 102 + inst->alg.cra_flags |= CRYPTO_ALG_DEAD; 103 + if (!tmpl || !crypto_tmpl_get(tmpl)) 104 + continue; 105 + 106 + crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, &inst->alg); 107 + list_move(&inst->alg.cra_list, list); 108 + hlist_del(&inst->list); 109 + inst->alg.cra_destroy = crypto_destroy_instance; 110 + 111 + if (!list_empty(&inst->alg.cra_users)) { 112 + if (&n->list == spawns) 113 + n = list_entry(inst->alg.cra_users.next, 114 + typeof(*n), list); 115 + __list_splice(&inst->alg.cra_users, spawns->prev); 116 + } 117 + } 118 + } 119 + 120 + static int __crypto_register_alg(struct crypto_alg *alg, 121 + struct list_head *list) 122 + { 123 + struct crypto_alg *q; 124 + int ret = -EAGAIN; 125 + 126 + if (crypto_is_dead(alg)) 127 + goto out; 128 + 129 + INIT_LIST_HEAD(&alg->cra_users); 130 + 131 + ret = -EEXIST; 132 + 133 + atomic_set(&alg->cra_refcnt, 1); 134 + list_for_each_entry(q, &crypto_alg_list, cra_list) { 135 + if (q == alg) 136 + goto out; 137 + 138 + if (crypto_is_moribund(q)) 139 + continue; 140 + 141 + if (crypto_is_larval(q)) { 142 + struct crypto_larval *larval = (void *)q; 143 + 144 + if (strcmp(alg->cra_name, q->cra_name) && 145 + strcmp(alg->cra_driver_name, q->cra_name)) 146 + continue; 147 + 148 + if (larval->adult) 149 + continue; 150 + if ((q->cra_flags ^ alg->cra_flags) & larval->mask) 151 + continue; 152 + if (!crypto_mod_get(alg)) 153 + continue; 154 + 155 + larval->adult = alg; 156 + complete(&larval->completion); 157 + continue; 158 + } 159 + 160 + if (strcmp(alg->cra_name, q->cra_name)) 161 + continue; 162 + 163 + if (strcmp(alg->cra_driver_name, q->cra_driver_name) && 164 + q->cra_priority > alg->cra_priority) 165 + continue; 166 + 167 + crypto_remove_spawns(&q->cra_users, list); 168 + } 169 + 170 + list_add(&alg->cra_list, &crypto_alg_list); 171 + 172 + crypto_notify(CRYPTO_MSG_ALG_REGISTER, alg); 173 + ret = 0; 174 + 175 + out: 176 + return ret; 177 + } 178 + 179 + static void crypto_remove_final(struct list_head *list) 180 + { 181 + struct crypto_alg *alg; 182 + struct crypto_alg *n; 183 + 184 + list_for_each_entry_safe(alg, n, list, cra_list) { 185 + list_del_init(&alg->cra_list); 186 + crypto_alg_put(alg); 187 + } 188 + } 189 + 190 + int crypto_register_alg(struct crypto_alg *alg) 191 + { 192 + LIST_HEAD(list); 193 + int err; 194 + 195 + err = crypto_check_alg(alg); 196 + if (err) 197 + return err; 198 + 199 + down_write(&crypto_alg_sem); 200 + err = __crypto_register_alg(alg, &list); 201 + up_write(&crypto_alg_sem); 202 + 203 + crypto_remove_final(&list); 204 + return err; 205 + } 206 + EXPORT_SYMBOL_GPL(crypto_register_alg); 207 + 208 + static int crypto_remove_alg(struct crypto_alg *alg, struct list_head *list) 209 + { 210 + if (unlikely(list_empty(&alg->cra_list))) 211 + return -ENOENT; 212 + 213 + alg->cra_flags |= CRYPTO_ALG_DEAD; 214 + 215 + crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, alg); 216 + list_del_init(&alg->cra_list); 217 + crypto_remove_spawns(&alg->cra_users, list); 218 + 219 + return 0; 220 + } 221 + 222 + int crypto_unregister_alg(struct crypto_alg *alg) 223 + { 224 + int ret; 225 + LIST_HEAD(list); 226 + 227 + down_write(&crypto_alg_sem); 228 + ret = crypto_remove_alg(alg, &list); 229 + up_write(&crypto_alg_sem); 230 + 231 + if (ret) 232 + return ret; 233 + 234 + BUG_ON(atomic_read(&alg->cra_refcnt) != 1); 235 + if (alg->cra_destroy) 236 + alg->cra_destroy(alg); 237 + 238 + crypto_remove_final(&list); 239 + return 0; 240 + } 241 + EXPORT_SYMBOL_GPL(crypto_unregister_alg); 242 + 243 + int crypto_register_template(struct crypto_template *tmpl) 244 + { 245 + struct crypto_template *q; 246 + int err = -EEXIST; 247 + 248 + down_write(&crypto_alg_sem); 249 + 250 + list_for_each_entry(q, &crypto_template_list, list) { 251 + if (q == tmpl) 252 + goto out; 253 + } 254 + 255 + list_add(&tmpl->list, &crypto_template_list); 256 + crypto_notify(CRYPTO_MSG_TMPL_REGISTER, tmpl); 257 + err = 0; 258 + out: 259 + up_write(&crypto_alg_sem); 260 + return err; 261 + } 262 + EXPORT_SYMBOL_GPL(crypto_register_template); 263 + 264 + void crypto_unregister_template(struct crypto_template *tmpl) 265 + { 266 + struct crypto_instance *inst; 267 + struct hlist_node *p, *n; 268 + struct hlist_head *list; 269 + LIST_HEAD(users); 270 + 271 + down_write(&crypto_alg_sem); 272 + 273 + BUG_ON(list_empty(&tmpl->list)); 274 + list_del_init(&tmpl->list); 275 + 276 + list = &tmpl->instances; 277 + hlist_for_each_entry(inst, p, list, list) { 278 + int err = crypto_remove_alg(&inst->alg, &users); 279 + BUG_ON(err); 280 + } 281 + 282 + crypto_notify(CRYPTO_MSG_TMPL_UNREGISTER, tmpl); 283 + 284 + up_write(&crypto_alg_sem); 285 + 286 + hlist_for_each_entry_safe(inst, p, n, list, list) { 287 + BUG_ON(atomic_read(&inst->alg.cra_refcnt) != 1); 288 + tmpl->free(inst); 289 + } 290 + crypto_remove_final(&users); 291 + } 292 + EXPORT_SYMBOL_GPL(crypto_unregister_template); 293 + 294 + static struct crypto_template *__crypto_lookup_template(const char *name) 295 + { 296 + struct crypto_template *q, *tmpl = NULL; 297 + 298 + down_read(&crypto_alg_sem); 299 + list_for_each_entry(q, &crypto_template_list, list) { 300 + if (strcmp(q->name, name)) 301 + continue; 302 + if (unlikely(!crypto_tmpl_get(q))) 303 + continue; 304 + 305 + tmpl = q; 306 + break; 307 + } 308 + up_read(&crypto_alg_sem); 309 + 310 + return tmpl; 311 + } 312 + 313 + struct crypto_template *crypto_lookup_template(const char *name) 314 + { 315 + return try_then_request_module(__crypto_lookup_template(name), name); 316 + } 317 + EXPORT_SYMBOL_GPL(crypto_lookup_template); 318 + 319 + int crypto_register_instance(struct crypto_template *tmpl, 320 + struct crypto_instance *inst) 321 + { 322 + LIST_HEAD(list); 323 + int err = -EINVAL; 324 + 325 + if (inst->alg.cra_destroy) 326 + goto err; 327 + 328 + err = crypto_check_alg(&inst->alg); 329 + if (err) 330 + goto err; 331 + 332 + inst->alg.cra_module = tmpl->module; 333 + 334 + down_write(&crypto_alg_sem); 335 + 336 + err = __crypto_register_alg(&inst->alg, &list); 337 + if (err) 338 + goto unlock; 339 + 340 + hlist_add_head(&inst->list, &tmpl->instances); 341 + inst->tmpl = tmpl; 342 + 343 + unlock: 344 + up_write(&crypto_alg_sem); 345 + 346 + crypto_remove_final(&list); 347 + 348 + err: 349 + return err; 350 + } 351 + EXPORT_SYMBOL_GPL(crypto_register_instance); 352 + 353 + int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, 354 + struct crypto_instance *inst) 355 + { 356 + int err = -EAGAIN; 357 + 358 + spawn->inst = inst; 359 + 360 + down_write(&crypto_alg_sem); 361 + if (!crypto_is_moribund(alg)) { 362 + list_add(&spawn->list, &alg->cra_users); 363 + spawn->alg = alg; 364 + err = 0; 365 + } 366 + up_write(&crypto_alg_sem); 367 + 368 + return err; 369 + } 370 + EXPORT_SYMBOL_GPL(crypto_init_spawn); 371 + 372 + void crypto_drop_spawn(struct crypto_spawn *spawn) 373 + { 374 + down_write(&crypto_alg_sem); 375 + list_del(&spawn->list); 376 + up_write(&crypto_alg_sem); 377 + } 378 + EXPORT_SYMBOL_GPL(crypto_drop_spawn); 379 + 380 + struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn) 381 + { 382 + struct crypto_alg *alg; 383 + struct crypto_alg *alg2; 384 + struct crypto_tfm *tfm; 385 + 386 + down_read(&crypto_alg_sem); 387 + alg = spawn->alg; 388 + alg2 = alg; 389 + if (alg2) 390 + alg2 = crypto_mod_get(alg2); 391 + up_read(&crypto_alg_sem); 392 + 393 + if (!alg2) { 394 + if (alg) 395 + crypto_shoot_alg(alg); 396 + return ERR_PTR(-EAGAIN); 397 + } 398 + 399 + tfm = __crypto_alloc_tfm(alg, 0); 400 + if (IS_ERR(tfm)) 401 + crypto_mod_put(alg); 402 + 403 + return tfm; 404 + } 405 + EXPORT_SYMBOL_GPL(crypto_spawn_tfm); 406 + 407 + int crypto_register_notifier(struct notifier_block *nb) 408 + { 409 + return blocking_notifier_chain_register(&crypto_chain, nb); 410 + } 411 + EXPORT_SYMBOL_GPL(crypto_register_notifier); 412 + 413 + int crypto_unregister_notifier(struct notifier_block *nb) 414 + { 415 + return blocking_notifier_chain_unregister(&crypto_chain, nb); 416 + } 417 + EXPORT_SYMBOL_GPL(crypto_unregister_notifier); 418 + 419 + struct crypto_alg *crypto_get_attr_alg(void *param, unsigned int len, 420 + u32 type, u32 mask) 421 + { 422 + struct rtattr *rta = param; 423 + struct crypto_attr_alg *alga; 424 + 425 + if (!RTA_OK(rta, len)) 426 + return ERR_PTR(-EBADR); 427 + if (rta->rta_type != CRYPTOA_ALG || RTA_PAYLOAD(rta) < sizeof(*alga)) 428 + return ERR_PTR(-EINVAL); 429 + 430 + alga = RTA_DATA(rta); 431 + alga->name[CRYPTO_MAX_ALG_NAME - 1] = 0; 432 + 433 + return crypto_alg_mod_lookup(alga->name, type, mask); 434 + } 435 + EXPORT_SYMBOL_GPL(crypto_get_attr_alg); 436 + 437 + struct crypto_instance *crypto_alloc_instance(const char *name, 438 + struct crypto_alg *alg) 439 + { 440 + struct crypto_instance *inst; 441 + struct crypto_spawn *spawn; 442 + int err; 443 + 444 + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); 445 + if (!inst) 446 + return ERR_PTR(-ENOMEM); 447 + 448 + err = -ENAMETOOLONG; 449 + if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name, 450 + alg->cra_name) >= CRYPTO_MAX_ALG_NAME) 451 + goto err_free_inst; 452 + 453 + if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", 454 + name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) 455 + goto err_free_inst; 456 + 457 + spawn = crypto_instance_ctx(inst); 458 + err = crypto_init_spawn(spawn, alg, inst); 459 + 460 + if (err) 461 + goto err_free_inst; 462 + 463 + return inst; 464 + 465 + err_free_inst: 466 + kfree(inst); 467 + return ERR_PTR(err); 468 + } 469 + EXPORT_SYMBOL_GPL(crypto_alloc_instance); 470 + 471 + static int __init crypto_algapi_init(void) 472 + { 473 + crypto_init_proc(); 474 + return 0; 475 + } 476 + 477 + static void __exit crypto_algapi_exit(void) 478 + { 479 + crypto_exit_proc(); 480 + } 481 + 482 + module_init(crypto_algapi_init); 483 + module_exit(crypto_algapi_exit); 484 + 485 + MODULE_LICENSE("GPL"); 486 + MODULE_DESCRIPTION("Cryptographic algorithms API");
+2 -1
crypto/anubis.c
··· 461 461 }; 462 462 463 463 static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key, 464 - unsigned int key_len, u32 *flags) 464 + unsigned int key_len) 465 465 { 466 466 struct anubis_ctx *ctx = crypto_tfm_ctx(tfm); 467 467 const __be32 *key = (const __be32 *)in_key; 468 + u32 *flags = &tfm->crt_flags; 468 469 int N, R, i, r; 469 470 u32 kappa[ANUBIS_MAX_N]; 470 471 u32 inter[ANUBIS_MAX_N];
+303 -133
crypto/api.c
··· 15 15 * 16 16 */ 17 17 18 - #include <linux/compiler.h> 19 - #include <linux/init.h> 20 - #include <linux/crypto.h> 18 + #include <linux/err.h> 21 19 #include <linux/errno.h> 22 20 #include <linux/kernel.h> 23 21 #include <linux/kmod.h> 24 - #include <linux/rwsem.h> 22 + #include <linux/module.h> 23 + #include <linux/param.h> 24 + #include <linux/sched.h> 25 25 #include <linux/slab.h> 26 26 #include <linux/string.h> 27 27 #include "internal.h" 28 28 29 29 LIST_HEAD(crypto_alg_list); 30 + EXPORT_SYMBOL_GPL(crypto_alg_list); 30 31 DECLARE_RWSEM(crypto_alg_sem); 32 + EXPORT_SYMBOL_GPL(crypto_alg_sem); 31 33 32 - static inline int crypto_alg_get(struct crypto_alg *alg) 34 + BLOCKING_NOTIFIER_HEAD(crypto_chain); 35 + EXPORT_SYMBOL_GPL(crypto_chain); 36 + 37 + static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg) 33 38 { 34 - return try_module_get(alg->cra_module); 39 + atomic_inc(&alg->cra_refcnt); 40 + return alg; 35 41 } 36 42 37 - static inline void crypto_alg_put(struct crypto_alg *alg) 43 + struct crypto_alg *crypto_mod_get(struct crypto_alg *alg) 38 44 { 45 + return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL; 46 + } 47 + EXPORT_SYMBOL_GPL(crypto_mod_get); 48 + 49 + void crypto_mod_put(struct crypto_alg *alg) 50 + { 51 + crypto_alg_put(alg); 39 52 module_put(alg->cra_module); 40 53 } 54 + EXPORT_SYMBOL_GPL(crypto_mod_put); 41 55 42 - static struct crypto_alg *crypto_alg_lookup(const char *name) 56 + struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, u32 mask) 43 57 { 44 58 struct crypto_alg *q, *alg = NULL; 45 - int best = -1; 59 + int best = -2; 46 60 47 - if (!name) 48 - return NULL; 49 - 50 - down_read(&crypto_alg_sem); 51 - 52 61 list_for_each_entry(q, &crypto_alg_list, cra_list) { 53 62 int exact, fuzzy; 63 + 64 + if (crypto_is_moribund(q)) 65 + continue; 66 + 67 + if ((q->cra_flags ^ type) & mask) 68 + continue; 69 + 70 + if (crypto_is_larval(q) && 71 + ((struct crypto_larval *)q)->mask != mask) 72 + continue; 54 73 55 74 exact = !strcmp(q->cra_driver_name, name); 56 75 fuzzy = !strcmp(q->cra_name, name); 57 76 if (!exact && !(fuzzy && q->cra_priority > best)) 58 77 continue; 59 78 60 - if (unlikely(!crypto_alg_get(q))) 79 + if (unlikely(!crypto_mod_get(q))) 61 80 continue; 62 81 63 82 best = q->cra_priority; 64 83 if (alg) 65 - crypto_alg_put(alg); 84 + crypto_mod_put(alg); 66 85 alg = q; 67 86 68 87 if (exact) 69 88 break; 70 89 } 71 - 72 - up_read(&crypto_alg_sem); 90 + 91 + return alg; 92 + } 93 + EXPORT_SYMBOL_GPL(__crypto_alg_lookup); 94 + 95 + static void crypto_larval_destroy(struct crypto_alg *alg) 96 + { 97 + struct crypto_larval *larval = (void *)alg; 98 + 99 + BUG_ON(!crypto_is_larval(alg)); 100 + if (larval->adult) 101 + crypto_mod_put(larval->adult); 102 + kfree(larval); 103 + } 104 + 105 + static struct crypto_alg *crypto_larval_alloc(const char *name, u32 type, 106 + u32 mask) 107 + { 108 + struct crypto_alg *alg; 109 + struct crypto_larval *larval; 110 + 111 + larval = kzalloc(sizeof(*larval), GFP_KERNEL); 112 + if (!larval) 113 + return ERR_PTR(-ENOMEM); 114 + 115 + larval->mask = mask; 116 + larval->alg.cra_flags = CRYPTO_ALG_LARVAL | type; 117 + larval->alg.cra_priority = -1; 118 + larval->alg.cra_destroy = crypto_larval_destroy; 119 + 120 + atomic_set(&larval->alg.cra_refcnt, 2); 121 + strlcpy(larval->alg.cra_name, name, CRYPTO_MAX_ALG_NAME); 122 + init_completion(&larval->completion); 123 + 124 + down_write(&crypto_alg_sem); 125 + alg = __crypto_alg_lookup(name, type, mask); 126 + if (!alg) { 127 + alg = &larval->alg; 128 + list_add(&alg->cra_list, &crypto_alg_list); 129 + } 130 + up_write(&crypto_alg_sem); 131 + 132 + if (alg != &larval->alg) 133 + kfree(larval); 134 + 73 135 return alg; 74 136 } 75 137 76 - /* A far more intelligent version of this is planned. For now, just 77 - * try an exact match on the name of the algorithm. */ 78 - static inline struct crypto_alg *crypto_alg_mod_lookup(const char *name) 138 + static void crypto_larval_kill(struct crypto_alg *alg) 79 139 { 80 - return try_then_request_module(crypto_alg_lookup(name), name); 140 + struct crypto_larval *larval = (void *)alg; 141 + 142 + down_write(&crypto_alg_sem); 143 + list_del(&alg->cra_list); 144 + up_write(&crypto_alg_sem); 145 + complete(&larval->completion); 146 + crypto_alg_put(alg); 81 147 } 148 + 149 + static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) 150 + { 151 + struct crypto_larval *larval = (void *)alg; 152 + 153 + wait_for_completion_interruptible_timeout(&larval->completion, 60 * HZ); 154 + alg = larval->adult; 155 + if (alg) { 156 + if (!crypto_mod_get(alg)) 157 + alg = ERR_PTR(-EAGAIN); 158 + } else 159 + alg = ERR_PTR(-ENOENT); 160 + crypto_mod_put(&larval->alg); 161 + 162 + return alg; 163 + } 164 + 165 + static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, 166 + u32 mask) 167 + { 168 + struct crypto_alg *alg; 169 + 170 + down_read(&crypto_alg_sem); 171 + alg = __crypto_alg_lookup(name, type, mask); 172 + up_read(&crypto_alg_sem); 173 + 174 + return alg; 175 + } 176 + 177 + struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask) 178 + { 179 + struct crypto_alg *alg; 180 + struct crypto_alg *larval; 181 + int ok; 182 + 183 + if (!name) 184 + return ERR_PTR(-ENOENT); 185 + 186 + mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD); 187 + type &= mask; 188 + 189 + alg = try_then_request_module(crypto_alg_lookup(name, type, mask), 190 + name); 191 + if (alg) 192 + return crypto_is_larval(alg) ? crypto_larval_wait(alg) : alg; 193 + 194 + larval = crypto_larval_alloc(name, type, mask); 195 + if (IS_ERR(larval) || !crypto_is_larval(larval)) 196 + return larval; 197 + 198 + ok = crypto_notify(CRYPTO_MSG_ALG_REQUEST, larval); 199 + if (ok == NOTIFY_DONE) { 200 + request_module("cryptomgr"); 201 + ok = crypto_notify(CRYPTO_MSG_ALG_REQUEST, larval); 202 + } 203 + 204 + if (ok == NOTIFY_STOP) 205 + alg = crypto_larval_wait(larval); 206 + else { 207 + crypto_mod_put(larval); 208 + alg = ERR_PTR(-ENOENT); 209 + } 210 + crypto_larval_kill(larval); 211 + return alg; 212 + } 213 + EXPORT_SYMBOL_GPL(crypto_alg_mod_lookup); 82 214 83 215 static int crypto_init_flags(struct crypto_tfm *tfm, u32 flags) 84 216 { ··· 226 94 227 95 case CRYPTO_ALG_TYPE_COMPRESS: 228 96 return crypto_init_compress_flags(tfm, flags); 229 - 230 - default: 231 - break; 232 97 } 233 98 234 - BUG(); 235 - return -EINVAL; 99 + return 0; 236 100 } 237 101 238 102 static int crypto_init_ops(struct crypto_tfm *tfm) 239 103 { 104 + const struct crypto_type *type = tfm->__crt_alg->cra_type; 105 + 106 + if (type) 107 + return type->init(tfm); 108 + 240 109 switch (crypto_tfm_alg_type(tfm)) { 241 110 case CRYPTO_ALG_TYPE_CIPHER: 242 111 return crypto_init_cipher_ops(tfm); ··· 258 125 259 126 static void crypto_exit_ops(struct crypto_tfm *tfm) 260 127 { 128 + const struct crypto_type *type = tfm->__crt_alg->cra_type; 129 + 130 + if (type) { 131 + if (type->exit) 132 + type->exit(tfm); 133 + return; 134 + } 135 + 261 136 switch (crypto_tfm_alg_type(tfm)) { 262 137 case CRYPTO_ALG_TYPE_CIPHER: 263 138 crypto_exit_cipher_ops(tfm); ··· 287 146 288 147 static unsigned int crypto_ctxsize(struct crypto_alg *alg, int flags) 289 148 { 149 + const struct crypto_type *type = alg->cra_type; 290 150 unsigned int len; 151 + 152 + len = alg->cra_alignmask & ~(crypto_tfm_ctx_alignment() - 1); 153 + if (type) 154 + return len + type->ctxsize(alg); 291 155 292 156 switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { 293 157 default: 294 158 BUG(); 295 159 296 160 case CRYPTO_ALG_TYPE_CIPHER: 297 - len = crypto_cipher_ctxsize(alg, flags); 161 + len += crypto_cipher_ctxsize(alg, flags); 298 162 break; 299 163 300 164 case CRYPTO_ALG_TYPE_DIGEST: 301 - len = crypto_digest_ctxsize(alg, flags); 165 + len += crypto_digest_ctxsize(alg, flags); 302 166 break; 303 167 304 168 case CRYPTO_ALG_TYPE_COMPRESS: 305 - len = crypto_compress_ctxsize(alg, flags); 169 + len += crypto_compress_ctxsize(alg, flags); 306 170 break; 307 171 } 308 172 309 - return len + (alg->cra_alignmask & ~(crypto_tfm_ctx_alignment() - 1)); 173 + return len; 310 174 } 311 175 312 - struct crypto_tfm *crypto_alloc_tfm(const char *name, u32 flags) 176 + void crypto_shoot_alg(struct crypto_alg *alg) 177 + { 178 + down_write(&crypto_alg_sem); 179 + alg->cra_flags |= CRYPTO_ALG_DYING; 180 + up_write(&crypto_alg_sem); 181 + } 182 + EXPORT_SYMBOL_GPL(crypto_shoot_alg); 183 + 184 + struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 flags) 313 185 { 314 186 struct crypto_tfm *tfm = NULL; 315 - struct crypto_alg *alg; 316 187 unsigned int tfm_size; 317 - 318 - alg = crypto_alg_mod_lookup(name); 319 - if (alg == NULL) 320 - goto out; 188 + int err = -ENOMEM; 321 189 322 190 tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, flags); 323 191 tfm = kzalloc(tfm_size, GFP_KERNEL); 324 192 if (tfm == NULL) 325 - goto out_put; 193 + goto out; 326 194 327 195 tfm->__crt_alg = alg; 328 - 329 - if (crypto_init_flags(tfm, flags)) 196 + 197 + err = crypto_init_flags(tfm, flags); 198 + if (err) 330 199 goto out_free_tfm; 331 200 332 - if (crypto_init_ops(tfm)) 201 + err = crypto_init_ops(tfm); 202 + if (err) 333 203 goto out_free_tfm; 334 204 335 - if (alg->cra_init && alg->cra_init(tfm)) 205 + if (alg->cra_init && (err = alg->cra_init(tfm))) { 206 + if (err == -EAGAIN) 207 + crypto_shoot_alg(alg); 336 208 goto cra_init_failed; 209 + } 337 210 338 211 goto out; 339 212 ··· 355 200 crypto_exit_ops(tfm); 356 201 out_free_tfm: 357 202 kfree(tfm); 358 - tfm = NULL; 359 - out_put: 360 - crypto_alg_put(alg); 203 + tfm = ERR_PTR(err); 361 204 out: 362 205 return tfm; 363 206 } 207 + EXPORT_SYMBOL_GPL(__crypto_alloc_tfm); 364 208 209 + struct crypto_tfm *crypto_alloc_tfm(const char *name, u32 flags) 210 + { 211 + struct crypto_tfm *tfm = NULL; 212 + int err; 213 + 214 + do { 215 + struct crypto_alg *alg; 216 + 217 + alg = crypto_alg_mod_lookup(name, 0, CRYPTO_ALG_ASYNC); 218 + err = PTR_ERR(alg); 219 + if (IS_ERR(alg)) 220 + continue; 221 + 222 + tfm = __crypto_alloc_tfm(alg, flags); 223 + err = 0; 224 + if (IS_ERR(tfm)) { 225 + crypto_mod_put(alg); 226 + err = PTR_ERR(tfm); 227 + tfm = NULL; 228 + } 229 + } while (err == -EAGAIN && !signal_pending(current)); 230 + 231 + return tfm; 232 + } 233 + 234 + /* 235 + * crypto_alloc_base - Locate algorithm and allocate transform 236 + * @alg_name: Name of algorithm 237 + * @type: Type of algorithm 238 + * @mask: Mask for type comparison 239 + * 240 + * crypto_alloc_base() will first attempt to locate an already loaded 241 + * algorithm. If that fails and the kernel supports dynamically loadable 242 + * modules, it will then attempt to load a module of the same name or 243 + * alias. If that fails it will send a query to any loaded crypto manager 244 + * to construct an algorithm on the fly. A refcount is grabbed on the 245 + * algorithm which is then associated with the new transform. 246 + * 247 + * The returned transform is of a non-determinate type. Most people 248 + * should use one of the more specific allocation functions such as 249 + * crypto_alloc_blkcipher. 250 + * 251 + * In case of error the return value is an error pointer. 252 + */ 253 + struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask) 254 + { 255 + struct crypto_tfm *tfm; 256 + int err; 257 + 258 + for (;;) { 259 + struct crypto_alg *alg; 260 + 261 + alg = crypto_alg_mod_lookup(alg_name, type, mask); 262 + err = PTR_ERR(alg); 263 + tfm = ERR_PTR(err); 264 + if (IS_ERR(alg)) 265 + goto err; 266 + 267 + tfm = __crypto_alloc_tfm(alg, 0); 268 + if (!IS_ERR(tfm)) 269 + break; 270 + 271 + crypto_mod_put(alg); 272 + err = PTR_ERR(tfm); 273 + 274 + err: 275 + if (err != -EAGAIN) 276 + break; 277 + if (signal_pending(current)) { 278 + err = -EINTR; 279 + break; 280 + } 281 + }; 282 + 283 + return tfm; 284 + } 285 + EXPORT_SYMBOL_GPL(crypto_alloc_base); 286 + 287 + /* 288 + * crypto_free_tfm - Free crypto transform 289 + * @tfm: Transform to free 290 + * 291 + * crypto_free_tfm() frees up the transform and any associated resources, 292 + * then drops the refcount on the associated algorithm. 293 + */ 365 294 void crypto_free_tfm(struct crypto_tfm *tfm) 366 295 { 367 296 struct crypto_alg *alg; ··· 460 221 if (alg->cra_exit) 461 222 alg->cra_exit(tfm); 462 223 crypto_exit_ops(tfm); 463 - crypto_alg_put(alg); 224 + crypto_mod_put(alg); 464 225 memset(tfm, 0, size); 465 226 kfree(tfm); 466 - } 467 - 468 - static inline int crypto_set_driver_name(struct crypto_alg *alg) 469 - { 470 - static const char suffix[] = "-generic"; 471 - char *driver_name = alg->cra_driver_name; 472 - int len; 473 - 474 - if (*driver_name) 475 - return 0; 476 - 477 - len = strlcpy(driver_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); 478 - if (len + sizeof(suffix) > CRYPTO_MAX_ALG_NAME) 479 - return -ENAMETOOLONG; 480 - 481 - memcpy(driver_name + len, suffix, sizeof(suffix)); 482 - return 0; 483 - } 484 - 485 - int crypto_register_alg(struct crypto_alg *alg) 486 - { 487 - int ret; 488 - struct crypto_alg *q; 489 - 490 - if (alg->cra_alignmask & (alg->cra_alignmask + 1)) 491 - return -EINVAL; 492 - 493 - if (alg->cra_alignmask & alg->cra_blocksize) 494 - return -EINVAL; 495 - 496 - if (alg->cra_blocksize > PAGE_SIZE / 8) 497 - return -EINVAL; 498 - 499 - if (alg->cra_priority < 0) 500 - return -EINVAL; 501 - 502 - ret = crypto_set_driver_name(alg); 503 - if (unlikely(ret)) 504 - return ret; 505 - 506 - down_write(&crypto_alg_sem); 507 - 508 - list_for_each_entry(q, &crypto_alg_list, cra_list) { 509 - if (q == alg) { 510 - ret = -EEXIST; 511 - goto out; 512 - } 513 - } 514 - 515 - list_add(&alg->cra_list, &crypto_alg_list); 516 - out: 517 - up_write(&crypto_alg_sem); 518 - return ret; 519 - } 520 - 521 - int crypto_unregister_alg(struct crypto_alg *alg) 522 - { 523 - int ret = -ENOENT; 524 - struct crypto_alg *q; 525 - 526 - BUG_ON(!alg->cra_module); 527 - 528 - down_write(&crypto_alg_sem); 529 - list_for_each_entry(q, &crypto_alg_list, cra_list) { 530 - if (alg == q) { 531 - list_del(&alg->cra_list); 532 - ret = 0; 533 - goto out; 534 - } 535 - } 536 - out: 537 - up_write(&crypto_alg_sem); 538 - return ret; 539 227 } 540 228 541 229 int crypto_alg_available(const char *name, u32 flags) 542 230 { 543 231 int ret = 0; 544 - struct crypto_alg *alg = crypto_alg_mod_lookup(name); 232 + struct crypto_alg *alg = crypto_alg_mod_lookup(name, 0, 233 + CRYPTO_ALG_ASYNC); 545 234 546 - if (alg) { 547 - crypto_alg_put(alg); 235 + if (!IS_ERR(alg)) { 236 + crypto_mod_put(alg); 548 237 ret = 1; 549 238 } 550 239 551 240 return ret; 552 241 } 553 242 554 - static int __init init_crypto(void) 555 - { 556 - printk(KERN_INFO "Initializing Cryptographic API\n"); 557 - crypto_init_proc(); 558 - return 0; 559 - } 560 - 561 - __initcall(init_crypto); 562 - 563 - EXPORT_SYMBOL_GPL(crypto_register_alg); 564 - EXPORT_SYMBOL_GPL(crypto_unregister_alg); 565 243 EXPORT_SYMBOL_GPL(crypto_alloc_tfm); 566 244 EXPORT_SYMBOL_GPL(crypto_free_tfm); 567 245 EXPORT_SYMBOL_GPL(crypto_alg_available); 246 + 247 + int crypto_has_alg(const char *name, u32 type, u32 mask) 248 + { 249 + int ret = 0; 250 + struct crypto_alg *alg = crypto_alg_mod_lookup(name, type, mask); 251 + 252 + if (!IS_ERR(alg)) { 253 + crypto_mod_put(alg); 254 + ret = 1; 255 + } 256 + 257 + return ret; 258 + } 259 + EXPORT_SYMBOL_GPL(crypto_has_alg);
+1 -1
crypto/arc4.c
··· 25 25 }; 26 26 27 27 static int arc4_set_key(struct crypto_tfm *tfm, const u8 *in_key, 28 - unsigned int key_len, u32 *flags) 28 + unsigned int key_len) 29 29 { 30 30 struct arc4_ctx *ctx = crypto_tfm_ctx(tfm); 31 31 int i, j = 0, k = 0;
+405
crypto/blkcipher.c
··· 1 + /* 2 + * Block chaining cipher operations. 3 + * 4 + * Generic encrypt/decrypt wrapper for ciphers, handles operations across 5 + * multiple page boundaries by using temporary blocks. In user context, 6 + * the kernel is given a chance to schedule us once per page. 7 + * 8 + * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 9 + * 10 + * This program is free software; you can redistribute it and/or modify it 11 + * under the terms of the GNU General Public License as published by the Free 12 + * Software Foundation; either version 2 of the License, or (at your option) 13 + * any later version. 14 + * 15 + */ 16 + 17 + #include <linux/crypto.h> 18 + #include <linux/errno.h> 19 + #include <linux/kernel.h> 20 + #include <linux/io.h> 21 + #include <linux/module.h> 22 + #include <linux/scatterlist.h> 23 + #include <linux/seq_file.h> 24 + #include <linux/slab.h> 25 + #include <linux/string.h> 26 + 27 + #include "internal.h" 28 + #include "scatterwalk.h" 29 + 30 + enum { 31 + BLKCIPHER_WALK_PHYS = 1 << 0, 32 + BLKCIPHER_WALK_SLOW = 1 << 1, 33 + BLKCIPHER_WALK_COPY = 1 << 2, 34 + BLKCIPHER_WALK_DIFF = 1 << 3, 35 + }; 36 + 37 + static int blkcipher_walk_next(struct blkcipher_desc *desc, 38 + struct blkcipher_walk *walk); 39 + static int blkcipher_walk_first(struct blkcipher_desc *desc, 40 + struct blkcipher_walk *walk); 41 + 42 + static inline void blkcipher_map_src(struct blkcipher_walk *walk) 43 + { 44 + walk->src.virt.addr = scatterwalk_map(&walk->in, 0); 45 + } 46 + 47 + static inline void blkcipher_map_dst(struct blkcipher_walk *walk) 48 + { 49 + walk->dst.virt.addr = scatterwalk_map(&walk->out, 1); 50 + } 51 + 52 + static inline void blkcipher_unmap_src(struct blkcipher_walk *walk) 53 + { 54 + scatterwalk_unmap(walk->src.virt.addr, 0); 55 + } 56 + 57 + static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk) 58 + { 59 + scatterwalk_unmap(walk->dst.virt.addr, 1); 60 + } 61 + 62 + static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len) 63 + { 64 + if (offset_in_page(start + len) < len) 65 + return (u8 *)((unsigned long)(start + len) & PAGE_MASK); 66 + return start; 67 + } 68 + 69 + static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm, 70 + struct blkcipher_walk *walk, 71 + unsigned int bsize) 72 + { 73 + u8 *addr; 74 + unsigned int alignmask = crypto_blkcipher_alignmask(tfm); 75 + 76 + addr = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1); 77 + addr = blkcipher_get_spot(addr, bsize); 78 + scatterwalk_copychunks(addr, &walk->out, bsize, 1); 79 + return bsize; 80 + } 81 + 82 + static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk, 83 + unsigned int n) 84 + { 85 + n = walk->nbytes - n; 86 + 87 + if (walk->flags & BLKCIPHER_WALK_COPY) { 88 + blkcipher_map_dst(walk); 89 + memcpy(walk->dst.virt.addr, walk->page, n); 90 + blkcipher_unmap_dst(walk); 91 + } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) { 92 + blkcipher_unmap_src(walk); 93 + if (walk->flags & BLKCIPHER_WALK_DIFF) 94 + blkcipher_unmap_dst(walk); 95 + } 96 + 97 + scatterwalk_advance(&walk->in, n); 98 + scatterwalk_advance(&walk->out, n); 99 + 100 + return n; 101 + } 102 + 103 + int blkcipher_walk_done(struct blkcipher_desc *desc, 104 + struct blkcipher_walk *walk, int err) 105 + { 106 + struct crypto_blkcipher *tfm = desc->tfm; 107 + unsigned int nbytes = 0; 108 + 109 + if (likely(err >= 0)) { 110 + unsigned int bsize = crypto_blkcipher_blocksize(tfm); 111 + unsigned int n; 112 + 113 + if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) 114 + n = blkcipher_done_fast(walk, err); 115 + else 116 + n = blkcipher_done_slow(tfm, walk, bsize); 117 + 118 + nbytes = walk->total - n; 119 + err = 0; 120 + } 121 + 122 + scatterwalk_done(&walk->in, 0, nbytes); 123 + scatterwalk_done(&walk->out, 1, nbytes); 124 + 125 + walk->total = nbytes; 126 + walk->nbytes = nbytes; 127 + 128 + if (nbytes) { 129 + crypto_yield(desc->flags); 130 + return blkcipher_walk_next(desc, walk); 131 + } 132 + 133 + if (walk->iv != desc->info) 134 + memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm)); 135 + if (walk->buffer != walk->page) 136 + kfree(walk->buffer); 137 + if (walk->page) 138 + free_page((unsigned long)walk->page); 139 + 140 + return err; 141 + } 142 + EXPORT_SYMBOL_GPL(blkcipher_walk_done); 143 + 144 + static inline int blkcipher_next_slow(struct blkcipher_desc *desc, 145 + struct blkcipher_walk *walk, 146 + unsigned int bsize, 147 + unsigned int alignmask) 148 + { 149 + unsigned int n; 150 + 151 + if (walk->buffer) 152 + goto ok; 153 + 154 + walk->buffer = walk->page; 155 + if (walk->buffer) 156 + goto ok; 157 + 158 + n = bsize * 2 + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); 159 + walk->buffer = kmalloc(n, GFP_ATOMIC); 160 + if (!walk->buffer) 161 + return blkcipher_walk_done(desc, walk, -ENOMEM); 162 + 163 + ok: 164 + walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer, 165 + alignmask + 1); 166 + walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize); 167 + walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr + bsize, 168 + bsize); 169 + 170 + scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0); 171 + 172 + walk->nbytes = bsize; 173 + walk->flags |= BLKCIPHER_WALK_SLOW; 174 + 175 + return 0; 176 + } 177 + 178 + static inline int blkcipher_next_copy(struct blkcipher_walk *walk) 179 + { 180 + u8 *tmp = walk->page; 181 + 182 + blkcipher_map_src(walk); 183 + memcpy(tmp, walk->src.virt.addr, walk->nbytes); 184 + blkcipher_unmap_src(walk); 185 + 186 + walk->src.virt.addr = tmp; 187 + walk->dst.virt.addr = tmp; 188 + 189 + return 0; 190 + } 191 + 192 + static inline int blkcipher_next_fast(struct blkcipher_desc *desc, 193 + struct blkcipher_walk *walk) 194 + { 195 + unsigned long diff; 196 + 197 + walk->src.phys.page = scatterwalk_page(&walk->in); 198 + walk->src.phys.offset = offset_in_page(walk->in.offset); 199 + walk->dst.phys.page = scatterwalk_page(&walk->out); 200 + walk->dst.phys.offset = offset_in_page(walk->out.offset); 201 + 202 + if (walk->flags & BLKCIPHER_WALK_PHYS) 203 + return 0; 204 + 205 + diff = walk->src.phys.offset - walk->dst.phys.offset; 206 + diff |= walk->src.virt.page - walk->dst.virt.page; 207 + 208 + blkcipher_map_src(walk); 209 + walk->dst.virt.addr = walk->src.virt.addr; 210 + 211 + if (diff) { 212 + walk->flags |= BLKCIPHER_WALK_DIFF; 213 + blkcipher_map_dst(walk); 214 + } 215 + 216 + return 0; 217 + } 218 + 219 + static int blkcipher_walk_next(struct blkcipher_desc *desc, 220 + struct blkcipher_walk *walk) 221 + { 222 + struct crypto_blkcipher *tfm = desc->tfm; 223 + unsigned int alignmask = crypto_blkcipher_alignmask(tfm); 224 + unsigned int bsize = crypto_blkcipher_blocksize(tfm); 225 + unsigned int n; 226 + int err; 227 + 228 + n = walk->total; 229 + if (unlikely(n < bsize)) { 230 + desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; 231 + return blkcipher_walk_done(desc, walk, -EINVAL); 232 + } 233 + 234 + walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY | 235 + BLKCIPHER_WALK_DIFF); 236 + if (!scatterwalk_aligned(&walk->in, alignmask) || 237 + !scatterwalk_aligned(&walk->out, alignmask)) { 238 + walk->flags |= BLKCIPHER_WALK_COPY; 239 + if (!walk->page) { 240 + walk->page = (void *)__get_free_page(GFP_ATOMIC); 241 + if (!walk->page) 242 + n = 0; 243 + } 244 + } 245 + 246 + n = scatterwalk_clamp(&walk->in, n); 247 + n = scatterwalk_clamp(&walk->out, n); 248 + 249 + if (unlikely(n < bsize)) { 250 + err = blkcipher_next_slow(desc, walk, bsize, alignmask); 251 + goto set_phys_lowmem; 252 + } 253 + 254 + walk->nbytes = n; 255 + if (walk->flags & BLKCIPHER_WALK_COPY) { 256 + err = blkcipher_next_copy(walk); 257 + goto set_phys_lowmem; 258 + } 259 + 260 + return blkcipher_next_fast(desc, walk); 261 + 262 + set_phys_lowmem: 263 + if (walk->flags & BLKCIPHER_WALK_PHYS) { 264 + walk->src.phys.page = virt_to_page(walk->src.virt.addr); 265 + walk->dst.phys.page = virt_to_page(walk->dst.virt.addr); 266 + walk->src.phys.offset &= PAGE_SIZE - 1; 267 + walk->dst.phys.offset &= PAGE_SIZE - 1; 268 + } 269 + return err; 270 + } 271 + 272 + static inline int blkcipher_copy_iv(struct blkcipher_walk *walk, 273 + struct crypto_blkcipher *tfm, 274 + unsigned int alignmask) 275 + { 276 + unsigned bs = crypto_blkcipher_blocksize(tfm); 277 + unsigned int ivsize = crypto_blkcipher_ivsize(tfm); 278 + unsigned int size = bs * 2 + ivsize + max(bs, ivsize) - (alignmask + 1); 279 + u8 *iv; 280 + 281 + size += alignmask & ~(crypto_tfm_ctx_alignment() - 1); 282 + walk->buffer = kmalloc(size, GFP_ATOMIC); 283 + if (!walk->buffer) 284 + return -ENOMEM; 285 + 286 + iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1); 287 + iv = blkcipher_get_spot(iv, bs) + bs; 288 + iv = blkcipher_get_spot(iv, bs) + bs; 289 + iv = blkcipher_get_spot(iv, ivsize); 290 + 291 + walk->iv = memcpy(iv, walk->iv, ivsize); 292 + return 0; 293 + } 294 + 295 + int blkcipher_walk_virt(struct blkcipher_desc *desc, 296 + struct blkcipher_walk *walk) 297 + { 298 + walk->flags &= ~BLKCIPHER_WALK_PHYS; 299 + return blkcipher_walk_first(desc, walk); 300 + } 301 + EXPORT_SYMBOL_GPL(blkcipher_walk_virt); 302 + 303 + int blkcipher_walk_phys(struct blkcipher_desc *desc, 304 + struct blkcipher_walk *walk) 305 + { 306 + walk->flags |= BLKCIPHER_WALK_PHYS; 307 + return blkcipher_walk_first(desc, walk); 308 + } 309 + EXPORT_SYMBOL_GPL(blkcipher_walk_phys); 310 + 311 + static int blkcipher_walk_first(struct blkcipher_desc *desc, 312 + struct blkcipher_walk *walk) 313 + { 314 + struct crypto_blkcipher *tfm = desc->tfm; 315 + unsigned int alignmask = crypto_blkcipher_alignmask(tfm); 316 + 317 + walk->nbytes = walk->total; 318 + if (unlikely(!walk->total)) 319 + return 0; 320 + 321 + walk->buffer = NULL; 322 + walk->iv = desc->info; 323 + if (unlikely(((unsigned long)walk->iv & alignmask))) { 324 + int err = blkcipher_copy_iv(walk, tfm, alignmask); 325 + if (err) 326 + return err; 327 + } 328 + 329 + scatterwalk_start(&walk->in, walk->in.sg); 330 + scatterwalk_start(&walk->out, walk->out.sg); 331 + walk->page = NULL; 332 + 333 + return blkcipher_walk_next(desc, walk); 334 + } 335 + 336 + static int setkey(struct crypto_tfm *tfm, const u8 *key, 337 + unsigned int keylen) 338 + { 339 + struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher; 340 + 341 + if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) { 342 + tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 343 + return -EINVAL; 344 + } 345 + 346 + return cipher->setkey(tfm, key, keylen); 347 + } 348 + 349 + static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg) 350 + { 351 + struct blkcipher_alg *cipher = &alg->cra_blkcipher; 352 + unsigned int len = alg->cra_ctxsize; 353 + 354 + if (cipher->ivsize) { 355 + len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1); 356 + len += cipher->ivsize; 357 + } 358 + 359 + return len; 360 + } 361 + 362 + static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm) 363 + { 364 + struct blkcipher_tfm *crt = &tfm->crt_blkcipher; 365 + struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; 366 + unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1; 367 + unsigned long addr; 368 + 369 + if (alg->ivsize > PAGE_SIZE / 8) 370 + return -EINVAL; 371 + 372 + crt->setkey = setkey; 373 + crt->encrypt = alg->encrypt; 374 + crt->decrypt = alg->decrypt; 375 + 376 + addr = (unsigned long)crypto_tfm_ctx(tfm); 377 + addr = ALIGN(addr, align); 378 + addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align); 379 + crt->iv = (void *)addr; 380 + 381 + return 0; 382 + } 383 + 384 + static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) 385 + __attribute_used__; 386 + static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) 387 + { 388 + seq_printf(m, "type : blkcipher\n"); 389 + seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); 390 + seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize); 391 + seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize); 392 + seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize); 393 + } 394 + 395 + const struct crypto_type crypto_blkcipher_type = { 396 + .ctxsize = crypto_blkcipher_ctxsize, 397 + .init = crypto_init_blkcipher_ops, 398 + #ifdef CONFIG_PROC_FS 399 + .show = crypto_blkcipher_show, 400 + #endif 401 + }; 402 + EXPORT_SYMBOL_GPL(crypto_blkcipher_type); 403 + 404 + MODULE_LICENSE("GPL"); 405 + MODULE_DESCRIPTION("Generic block chaining cipher type");
+1 -2
crypto/blowfish.c
··· 399 399 /* 400 400 * Calculates the blowfish S and P boxes for encryption and decryption. 401 401 */ 402 - static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, 403 - unsigned int keylen, u32 *flags) 402 + static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) 404 403 { 405 404 struct bf_ctx *ctx = crypto_tfm_ctx(tfm); 406 405 u32 *P = ctx->p;
+1 -7
crypto/cast5.c
··· 769 769 } 770 770 771 771 772 - static int cast5_setkey(struct crypto_tfm *tfm, const u8 *key, 773 - unsigned key_len, u32 *flags) 772 + static int cast5_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned key_len) 774 773 { 775 774 struct cast5_ctx *c = crypto_tfm_ctx(tfm); 776 775 int i; ··· 777 778 u32 z[4]; 778 779 u32 k[16]; 779 780 __be32 p_key[4]; 780 - 781 - if (key_len < 5 || key_len > 16) { 782 - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 783 - return -EINVAL; 784 - } 785 781 786 782 c->rr = key_len <= 10 ? 1 : 0; 787 783
+3 -2
crypto/cast6.c
··· 382 382 } 383 383 384 384 static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key, 385 - unsigned key_len, u32 *flags) 385 + unsigned key_len) 386 386 { 387 387 int i; 388 388 u32 key[8]; 389 389 __be32 p_key[8]; /* padded key */ 390 390 struct cast6_ctx *c = crypto_tfm_ctx(tfm); 391 + u32 *flags = &tfm->crt_flags; 391 392 392 - if (key_len < 16 || key_len > 32 || key_len % 4 != 0) { 393 + if (key_len % 4 != 0) { 393 394 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 394 395 return -EINVAL; 395 396 }
+344
crypto/cbc.c
··· 1 + /* 2 + * CBC: Cipher Block Chaining mode 3 + * 4 + * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms of the GNU General Public License as published by the Free 8 + * Software Foundation; either version 2 of the License, or (at your option) 9 + * any later version. 10 + * 11 + */ 12 + 13 + #include <crypto/algapi.h> 14 + #include <linux/err.h> 15 + #include <linux/init.h> 16 + #include <linux/kernel.h> 17 + #include <linux/module.h> 18 + #include <linux/scatterlist.h> 19 + #include <linux/slab.h> 20 + 21 + struct crypto_cbc_ctx { 22 + struct crypto_cipher *child; 23 + void (*xor)(u8 *dst, const u8 *src, unsigned int bs); 24 + }; 25 + 26 + static int crypto_cbc_setkey(struct crypto_tfm *parent, const u8 *key, 27 + unsigned int keylen) 28 + { 29 + struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(parent); 30 + struct crypto_cipher *child = ctx->child; 31 + int err; 32 + 33 + crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 34 + crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & 35 + CRYPTO_TFM_REQ_MASK); 36 + err = crypto_cipher_setkey(child, key, keylen); 37 + crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & 38 + CRYPTO_TFM_RES_MASK); 39 + return err; 40 + } 41 + 42 + static int crypto_cbc_encrypt_segment(struct blkcipher_desc *desc, 43 + struct blkcipher_walk *walk, 44 + struct crypto_cipher *tfm, 45 + void (*xor)(u8 *, const u8 *, 46 + unsigned int)) 47 + { 48 + void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = 49 + crypto_cipher_alg(tfm)->cia_encrypt; 50 + int bsize = crypto_cipher_blocksize(tfm); 51 + unsigned int nbytes = walk->nbytes; 52 + u8 *src = walk->src.virt.addr; 53 + u8 *dst = walk->dst.virt.addr; 54 + u8 *iv = walk->iv; 55 + 56 + do { 57 + xor(iv, src, bsize); 58 + fn(crypto_cipher_tfm(tfm), dst, iv); 59 + memcpy(iv, dst, bsize); 60 + 61 + src += bsize; 62 + dst += bsize; 63 + } while ((nbytes -= bsize) >= bsize); 64 + 65 + return nbytes; 66 + } 67 + 68 + static int crypto_cbc_encrypt_inplace(struct blkcipher_desc *desc, 69 + struct blkcipher_walk *walk, 70 + struct crypto_cipher *tfm, 71 + void (*xor)(u8 *, const u8 *, 72 + unsigned int)) 73 + { 74 + void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = 75 + crypto_cipher_alg(tfm)->cia_encrypt; 76 + int bsize = crypto_cipher_blocksize(tfm); 77 + unsigned int nbytes = walk->nbytes; 78 + u8 *src = walk->src.virt.addr; 79 + u8 *iv = walk->iv; 80 + 81 + do { 82 + xor(src, iv, bsize); 83 + fn(crypto_cipher_tfm(tfm), src, src); 84 + iv = src; 85 + 86 + src += bsize; 87 + } while ((nbytes -= bsize) >= bsize); 88 + 89 + memcpy(walk->iv, iv, bsize); 90 + 91 + return nbytes; 92 + } 93 + 94 + static int crypto_cbc_encrypt(struct blkcipher_desc *desc, 95 + struct scatterlist *dst, struct scatterlist *src, 96 + unsigned int nbytes) 97 + { 98 + struct blkcipher_walk walk; 99 + struct crypto_blkcipher *tfm = desc->tfm; 100 + struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm); 101 + struct crypto_cipher *child = ctx->child; 102 + void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx->xor; 103 + int err; 104 + 105 + blkcipher_walk_init(&walk, dst, src, nbytes); 106 + err = blkcipher_walk_virt(desc, &walk); 107 + 108 + while ((nbytes = walk.nbytes)) { 109 + if (walk.src.virt.addr == walk.dst.virt.addr) 110 + nbytes = crypto_cbc_encrypt_inplace(desc, &walk, child, 111 + xor); 112 + else 113 + nbytes = crypto_cbc_encrypt_segment(desc, &walk, child, 114 + xor); 115 + err = blkcipher_walk_done(desc, &walk, nbytes); 116 + } 117 + 118 + return err; 119 + } 120 + 121 + static int crypto_cbc_decrypt_segment(struct blkcipher_desc *desc, 122 + struct blkcipher_walk *walk, 123 + struct crypto_cipher *tfm, 124 + void (*xor)(u8 *, const u8 *, 125 + unsigned int)) 126 + { 127 + void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = 128 + crypto_cipher_alg(tfm)->cia_decrypt; 129 + int bsize = crypto_cipher_blocksize(tfm); 130 + unsigned int nbytes = walk->nbytes; 131 + u8 *src = walk->src.virt.addr; 132 + u8 *dst = walk->dst.virt.addr; 133 + u8 *iv = walk->iv; 134 + 135 + do { 136 + fn(crypto_cipher_tfm(tfm), dst, src); 137 + xor(dst, iv, bsize); 138 + iv = src; 139 + 140 + src += bsize; 141 + dst += bsize; 142 + } while ((nbytes -= bsize) >= bsize); 143 + 144 + memcpy(walk->iv, iv, bsize); 145 + 146 + return nbytes; 147 + } 148 + 149 + static int crypto_cbc_decrypt_inplace(struct blkcipher_desc *desc, 150 + struct blkcipher_walk *walk, 151 + struct crypto_cipher *tfm, 152 + void (*xor)(u8 *, const u8 *, 153 + unsigned int)) 154 + { 155 + void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = 156 + crypto_cipher_alg(tfm)->cia_decrypt; 157 + int bsize = crypto_cipher_blocksize(tfm); 158 + unsigned long alignmask = crypto_cipher_alignmask(tfm); 159 + unsigned int nbytes = walk->nbytes; 160 + u8 *src = walk->src.virt.addr; 161 + u8 stack[bsize + alignmask]; 162 + u8 *first_iv = (u8 *)ALIGN((unsigned long)stack, alignmask + 1); 163 + 164 + memcpy(first_iv, walk->iv, bsize); 165 + 166 + /* Start of the last block. */ 167 + src += nbytes - nbytes % bsize - bsize; 168 + memcpy(walk->iv, src, bsize); 169 + 170 + for (;;) { 171 + fn(crypto_cipher_tfm(tfm), src, src); 172 + if ((nbytes -= bsize) < bsize) 173 + break; 174 + xor(src, src - bsize, bsize); 175 + src -= bsize; 176 + } 177 + 178 + xor(src, first_iv, bsize); 179 + 180 + return nbytes; 181 + } 182 + 183 + static int crypto_cbc_decrypt(struct blkcipher_desc *desc, 184 + struct scatterlist *dst, struct scatterlist *src, 185 + unsigned int nbytes) 186 + { 187 + struct blkcipher_walk walk; 188 + struct crypto_blkcipher *tfm = desc->tfm; 189 + struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm); 190 + struct crypto_cipher *child = ctx->child; 191 + void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx->xor; 192 + int err; 193 + 194 + blkcipher_walk_init(&walk, dst, src, nbytes); 195 + err = blkcipher_walk_virt(desc, &walk); 196 + 197 + while ((nbytes = walk.nbytes)) { 198 + if (walk.src.virt.addr == walk.dst.virt.addr) 199 + nbytes = crypto_cbc_decrypt_inplace(desc, &walk, child, 200 + xor); 201 + else 202 + nbytes = crypto_cbc_decrypt_segment(desc, &walk, child, 203 + xor); 204 + err = blkcipher_walk_done(desc, &walk, nbytes); 205 + } 206 + 207 + return err; 208 + } 209 + 210 + static void xor_byte(u8 *a, const u8 *b, unsigned int bs) 211 + { 212 + do { 213 + *a++ ^= *b++; 214 + } while (--bs); 215 + } 216 + 217 + static void xor_quad(u8 *dst, const u8 *src, unsigned int bs) 218 + { 219 + u32 *a = (u32 *)dst; 220 + u32 *b = (u32 *)src; 221 + 222 + do { 223 + *a++ ^= *b++; 224 + } while ((bs -= 4)); 225 + } 226 + 227 + static void xor_64(u8 *a, const u8 *b, unsigned int bs) 228 + { 229 + ((u32 *)a)[0] ^= ((u32 *)b)[0]; 230 + ((u32 *)a)[1] ^= ((u32 *)b)[1]; 231 + } 232 + 233 + static void xor_128(u8 *a, const u8 *b, unsigned int bs) 234 + { 235 + ((u32 *)a)[0] ^= ((u32 *)b)[0]; 236 + ((u32 *)a)[1] ^= ((u32 *)b)[1]; 237 + ((u32 *)a)[2] ^= ((u32 *)b)[2]; 238 + ((u32 *)a)[3] ^= ((u32 *)b)[3]; 239 + } 240 + 241 + static int crypto_cbc_init_tfm(struct crypto_tfm *tfm) 242 + { 243 + struct crypto_instance *inst = (void *)tfm->__crt_alg; 244 + struct crypto_spawn *spawn = crypto_instance_ctx(inst); 245 + struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm); 246 + 247 + switch (crypto_tfm_alg_blocksize(tfm)) { 248 + case 8: 249 + ctx->xor = xor_64; 250 + break; 251 + 252 + case 16: 253 + ctx->xor = xor_128; 254 + break; 255 + 256 + default: 257 + if (crypto_tfm_alg_blocksize(tfm) % 4) 258 + ctx->xor = xor_byte; 259 + else 260 + ctx->xor = xor_quad; 261 + } 262 + 263 + tfm = crypto_spawn_tfm(spawn); 264 + if (IS_ERR(tfm)) 265 + return PTR_ERR(tfm); 266 + 267 + ctx->child = crypto_cipher_cast(tfm); 268 + return 0; 269 + } 270 + 271 + static void crypto_cbc_exit_tfm(struct crypto_tfm *tfm) 272 + { 273 + struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm); 274 + crypto_free_cipher(ctx->child); 275 + } 276 + 277 + static struct crypto_instance *crypto_cbc_alloc(void *param, unsigned int len) 278 + { 279 + struct crypto_instance *inst; 280 + struct crypto_alg *alg; 281 + 282 + alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER, 283 + CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); 284 + if (IS_ERR(alg)) 285 + return ERR_PTR(PTR_ERR(alg)); 286 + 287 + inst = crypto_alloc_instance("cbc", alg); 288 + if (IS_ERR(inst)) 289 + goto out_put_alg; 290 + 291 + inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; 292 + inst->alg.cra_priority = alg->cra_priority; 293 + inst->alg.cra_blocksize = alg->cra_blocksize; 294 + inst->alg.cra_alignmask = alg->cra_alignmask; 295 + inst->alg.cra_type = &crypto_blkcipher_type; 296 + 297 + if (!(alg->cra_blocksize % 4)) 298 + inst->alg.cra_alignmask |= 3; 299 + inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; 300 + inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize; 301 + inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize; 302 + 303 + inst->alg.cra_ctxsize = sizeof(struct crypto_cbc_ctx); 304 + 305 + inst->alg.cra_init = crypto_cbc_init_tfm; 306 + inst->alg.cra_exit = crypto_cbc_exit_tfm; 307 + 308 + inst->alg.cra_blkcipher.setkey = crypto_cbc_setkey; 309 + inst->alg.cra_blkcipher.encrypt = crypto_cbc_encrypt; 310 + inst->alg.cra_blkcipher.decrypt = crypto_cbc_decrypt; 311 + 312 + out_put_alg: 313 + crypto_mod_put(alg); 314 + return inst; 315 + } 316 + 317 + static void crypto_cbc_free(struct crypto_instance *inst) 318 + { 319 + crypto_drop_spawn(crypto_instance_ctx(inst)); 320 + kfree(inst); 321 + } 322 + 323 + static struct crypto_template crypto_cbc_tmpl = { 324 + .name = "cbc", 325 + .alloc = crypto_cbc_alloc, 326 + .free = crypto_cbc_free, 327 + .module = THIS_MODULE, 328 + }; 329 + 330 + static int __init crypto_cbc_module_init(void) 331 + { 332 + return crypto_register_template(&crypto_cbc_tmpl); 333 + } 334 + 335 + static void __exit crypto_cbc_module_exit(void) 336 + { 337 + crypto_unregister_template(&crypto_cbc_tmpl); 338 + } 339 + 340 + module_init(crypto_cbc_module_init); 341 + module_exit(crypto_cbc_module_exit); 342 + 343 + MODULE_LICENSE("GPL"); 344 + MODULE_DESCRIPTION("CBC block cipher algorithm");
+94 -23
crypto/cipher.c
··· 23 23 #include "internal.h" 24 24 #include "scatterwalk.h" 25 25 26 + struct cipher_alg_compat { 27 + unsigned int cia_min_keysize; 28 + unsigned int cia_max_keysize; 29 + int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key, 30 + unsigned int keylen); 31 + void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 32 + void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 33 + 34 + unsigned int (*cia_encrypt_ecb)(const struct cipher_desc *desc, 35 + u8 *dst, const u8 *src, 36 + unsigned int nbytes); 37 + unsigned int (*cia_decrypt_ecb)(const struct cipher_desc *desc, 38 + u8 *dst, const u8 *src, 39 + unsigned int nbytes); 40 + unsigned int (*cia_encrypt_cbc)(const struct cipher_desc *desc, 41 + u8 *dst, const u8 *src, 42 + unsigned int nbytes); 43 + unsigned int (*cia_decrypt_cbc)(const struct cipher_desc *desc, 44 + u8 *dst, const u8 *src, 45 + unsigned int nbytes); 46 + }; 47 + 26 48 static inline void xor_64(u8 *a, const u8 *b) 27 49 { 28 50 ((u32 *)a)[0] ^= ((u32 *)b)[0]; ··· 67 45 u8 buffer[bsize * 2 + alignmask]; 68 46 u8 *src = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); 69 47 u8 *dst = src + bsize; 70 - unsigned int n; 71 48 72 - n = scatterwalk_copychunks(src, in, bsize, 0); 73 - scatterwalk_advance(in, n); 74 - 49 + scatterwalk_copychunks(src, in, bsize, 0); 75 50 desc->prfn(desc, dst, src, bsize); 76 - 77 - n = scatterwalk_copychunks(dst, out, bsize, 1); 78 - scatterwalk_advance(out, n); 51 + scatterwalk_copychunks(dst, out, bsize, 1); 79 52 80 53 return bsize; 81 54 } ··· 81 64 unsigned int nbytes, u8 *tmp) 82 65 { 83 66 u8 *src, *dst; 67 + u8 *real_src, *real_dst; 84 68 85 - src = in->data; 86 - dst = scatterwalk_samebuf(in, out) ? src : out->data; 69 + real_src = scatterwalk_map(in, 0); 70 + real_dst = scatterwalk_map(out, 1); 71 + 72 + src = real_src; 73 + dst = scatterwalk_samebuf(in, out) ? src : real_dst; 87 74 88 75 if (tmp) { 89 - memcpy(tmp, in->data, nbytes); 76 + memcpy(tmp, src, nbytes); 90 77 src = tmp; 91 78 dst = tmp; 92 79 } ··· 98 77 nbytes = desc->prfn(desc, dst, src, nbytes); 99 78 100 79 if (tmp) 101 - memcpy(out->data, tmp, nbytes); 80 + memcpy(real_dst, tmp, nbytes); 81 + 82 + scatterwalk_unmap(real_src, 0); 83 + scatterwalk_unmap(real_dst, 1); 102 84 103 85 scatterwalk_advance(in, nbytes); 104 86 scatterwalk_advance(out, nbytes); ··· 150 126 tmp = (u8 *)buffer; 151 127 } 152 128 153 - scatterwalk_map(&walk_in, 0); 154 - scatterwalk_map(&walk_out, 1); 155 - 156 129 n = scatterwalk_clamp(&walk_in, n); 157 130 n = scatterwalk_clamp(&walk_out, n); 158 131 ··· 166 145 if (!nbytes) 167 146 break; 168 147 169 - crypto_yield(tfm); 148 + crypto_yield(tfm->crt_flags); 170 149 } 171 150 172 151 if (buffer) ··· 285 264 { 286 265 struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher; 287 266 267 + tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; 288 268 if (keylen < cia->cia_min_keysize || keylen > cia->cia_max_keysize) { 289 269 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 290 270 return -EINVAL; 291 271 } else 292 - return cia->cia_setkey(tfm, key, keylen, 293 - &tfm->crt_flags); 272 + return cia->cia_setkey(tfm, key, keylen); 294 273 } 295 274 296 275 static int ecb_encrypt(struct crypto_tfm *tfm, ··· 298 277 struct scatterlist *src, unsigned int nbytes) 299 278 { 300 279 struct cipher_desc desc; 301 - struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; 280 + struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher; 302 281 303 282 desc.tfm = tfm; 304 283 desc.crfn = cipher->cia_encrypt; ··· 313 292 unsigned int nbytes) 314 293 { 315 294 struct cipher_desc desc; 316 - struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; 295 + struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher; 317 296 318 297 desc.tfm = tfm; 319 298 desc.crfn = cipher->cia_decrypt; ··· 328 307 unsigned int nbytes) 329 308 { 330 309 struct cipher_desc desc; 331 - struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; 310 + struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher; 332 311 333 312 desc.tfm = tfm; 334 313 desc.crfn = cipher->cia_encrypt; ··· 344 323 unsigned int nbytes, u8 *iv) 345 324 { 346 325 struct cipher_desc desc; 347 - struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; 326 + struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher; 348 327 349 328 desc.tfm = tfm; 350 329 desc.crfn = cipher->cia_encrypt; ··· 360 339 unsigned int nbytes) 361 340 { 362 341 struct cipher_desc desc; 363 - struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; 342 + struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher; 364 343 365 344 desc.tfm = tfm; 366 345 desc.crfn = cipher->cia_decrypt; ··· 376 355 unsigned int nbytes, u8 *iv) 377 356 { 378 357 struct cipher_desc desc; 379 - struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; 358 + struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher; 380 359 381 360 desc.tfm = tfm; 382 361 desc.crfn = cipher->cia_decrypt; ··· 409 388 return 0; 410 389 } 411 390 391 + static void cipher_crypt_unaligned(void (*fn)(struct crypto_tfm *, u8 *, 392 + const u8 *), 393 + struct crypto_tfm *tfm, 394 + u8 *dst, const u8 *src) 395 + { 396 + unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); 397 + unsigned int size = crypto_tfm_alg_blocksize(tfm); 398 + u8 buffer[size + alignmask]; 399 + u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); 400 + 401 + memcpy(tmp, src, size); 402 + fn(tfm, tmp, tmp); 403 + memcpy(dst, tmp, size); 404 + } 405 + 406 + static void cipher_encrypt_unaligned(struct crypto_tfm *tfm, 407 + u8 *dst, const u8 *src) 408 + { 409 + unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); 410 + struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; 411 + 412 + if (unlikely(((unsigned long)dst | (unsigned long)src) & alignmask)) { 413 + cipher_crypt_unaligned(cipher->cia_encrypt, tfm, dst, src); 414 + return; 415 + } 416 + 417 + cipher->cia_encrypt(tfm, dst, src); 418 + } 419 + 420 + static void cipher_decrypt_unaligned(struct crypto_tfm *tfm, 421 + u8 *dst, const u8 *src) 422 + { 423 + unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); 424 + struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; 425 + 426 + if (unlikely(((unsigned long)dst | (unsigned long)src) & alignmask)) { 427 + cipher_crypt_unaligned(cipher->cia_decrypt, tfm, dst, src); 428 + return; 429 + } 430 + 431 + cipher->cia_decrypt(tfm, dst, src); 432 + } 433 + 412 434 int crypto_init_cipher_ops(struct crypto_tfm *tfm) 413 435 { 414 436 int ret = 0; 415 437 struct cipher_tfm *ops = &tfm->crt_cipher; 438 + struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; 416 439 417 440 ops->cit_setkey = setkey; 441 + ops->cit_encrypt_one = crypto_tfm_alg_alignmask(tfm) ? 442 + cipher_encrypt_unaligned : cipher->cia_encrypt; 443 + ops->cit_decrypt_one = crypto_tfm_alg_alignmask(tfm) ? 444 + cipher_decrypt_unaligned : cipher->cia_decrypt; 418 445 419 446 switch (tfm->crt_cipher.cit_mode) { 420 447 case CRYPTO_TFM_MODE_ECB: 421 448 ops->cit_encrypt = ecb_encrypt; 422 449 ops->cit_decrypt = ecb_decrypt; 450 + ops->cit_encrypt_iv = nocrypt_iv; 451 + ops->cit_decrypt_iv = nocrypt_iv; 423 452 break; 424 453 425 454 case CRYPTO_TFM_MODE_CBC:
+17 -13
crypto/crc32c.c
··· 16 16 #include <linux/string.h> 17 17 #include <linux/crypto.h> 18 18 #include <linux/crc32c.h> 19 - #include <linux/types.h> 20 - #include <asm/byteorder.h> 19 + #include <linux/kernel.h> 21 20 22 21 #define CHKSUM_BLOCK_SIZE 32 23 22 #define CHKSUM_DIGEST_SIZE 4 24 23 25 24 struct chksum_ctx { 26 25 u32 crc; 26 + u32 key; 27 27 }; 28 28 29 29 /* ··· 35 35 { 36 36 struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); 37 37 38 - mctx->crc = ~(u32)0; /* common usage */ 38 + mctx->crc = mctx->key; 39 39 } 40 40 41 41 /* ··· 44 44 * the seed. 45 45 */ 46 46 static int chksum_setkey(struct crypto_tfm *tfm, const u8 *key, 47 - unsigned int keylen, u32 *flags) 47 + unsigned int keylen) 48 48 { 49 49 struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); 50 50 51 51 if (keylen != sizeof(mctx->crc)) { 52 - if (flags) 53 - *flags = CRYPTO_TFM_RES_BAD_KEY_LEN; 52 + tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 54 53 return -EINVAL; 55 54 } 56 - mctx->crc = __cpu_to_le32(*(u32 *)key); 55 + mctx->key = le32_to_cpu(*(__le32 *)key); 57 56 return 0; 58 57 } 59 58 ··· 60 61 unsigned int length) 61 62 { 62 63 struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); 63 - u32 mcrc; 64 64 65 - mcrc = crc32c(mctx->crc, data, (size_t)length); 66 - 67 - mctx->crc = mcrc; 65 + mctx->crc = crc32c(mctx->crc, data, length); 68 66 } 69 67 70 68 static void chksum_final(struct crypto_tfm *tfm, u8 *out) 71 69 { 72 70 struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); 73 - u32 mcrc = (mctx->crc ^ ~(u32)0); 74 71 75 - *(u32 *)out = __le32_to_cpu(mcrc); 72 + *(__le32 *)out = ~cpu_to_le32(mctx->crc); 73 + } 74 + 75 + static int crc32c_cra_init(struct crypto_tfm *tfm) 76 + { 77 + struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); 78 + 79 + mctx->key = ~0; 80 + return 0; 76 81 } 77 82 78 83 static struct crypto_alg alg = { ··· 86 83 .cra_ctxsize = sizeof(struct chksum_ctx), 87 84 .cra_module = THIS_MODULE, 88 85 .cra_list = LIST_HEAD_INIT(alg.cra_list), 86 + .cra_init = crc32c_cra_init, 89 87 .cra_u = { 90 88 .digest = { 91 89 .dia_digestsize= CHKSUM_DIGEST_SIZE,
+1 -1
crypto/crypto_null.c
··· 48 48 { } 49 49 50 50 static int null_setkey(struct crypto_tfm *tfm, const u8 *key, 51 - unsigned int keylen, u32 *flags) 51 + unsigned int keylen) 52 52 { return 0; } 53 53 54 54 static void null_crypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+156
crypto/cryptomgr.c
··· 1 + /* 2 + * Create default crypto algorithm instances. 3 + * 4 + * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms of the GNU General Public License as published by the Free 8 + * Software Foundation; either version 2 of the License, or (at your option) 9 + * any later version. 10 + * 11 + */ 12 + 13 + #include <linux/crypto.h> 14 + #include <linux/ctype.h> 15 + #include <linux/err.h> 16 + #include <linux/init.h> 17 + #include <linux/module.h> 18 + #include <linux/notifier.h> 19 + #include <linux/rtnetlink.h> 20 + #include <linux/sched.h> 21 + #include <linux/string.h> 22 + #include <linux/workqueue.h> 23 + 24 + #include "internal.h" 25 + 26 + struct cryptomgr_param { 27 + struct work_struct work; 28 + 29 + struct { 30 + struct rtattr attr; 31 + struct crypto_attr_alg data; 32 + } alg; 33 + 34 + struct { 35 + u32 type; 36 + u32 mask; 37 + char name[CRYPTO_MAX_ALG_NAME]; 38 + } larval; 39 + 40 + char template[CRYPTO_MAX_ALG_NAME]; 41 + }; 42 + 43 + static void cryptomgr_probe(void *data) 44 + { 45 + struct cryptomgr_param *param = data; 46 + struct crypto_template *tmpl; 47 + struct crypto_instance *inst; 48 + int err; 49 + 50 + tmpl = crypto_lookup_template(param->template); 51 + if (!tmpl) 52 + goto err; 53 + 54 + do { 55 + inst = tmpl->alloc(&param->alg, sizeof(param->alg)); 56 + if (IS_ERR(inst)) 57 + err = PTR_ERR(inst); 58 + else if ((err = crypto_register_instance(tmpl, inst))) 59 + tmpl->free(inst); 60 + } while (err == -EAGAIN && !signal_pending(current)); 61 + 62 + crypto_tmpl_put(tmpl); 63 + 64 + if (err) 65 + goto err; 66 + 67 + out: 68 + kfree(param); 69 + return; 70 + 71 + err: 72 + crypto_larval_error(param->larval.name, param->larval.type, 73 + param->larval.mask); 74 + goto out; 75 + } 76 + 77 + static int cryptomgr_schedule_probe(struct crypto_larval *larval) 78 + { 79 + struct cryptomgr_param *param; 80 + const char *name = larval->alg.cra_name; 81 + const char *p; 82 + unsigned int len; 83 + 84 + param = kmalloc(sizeof(*param), GFP_KERNEL); 85 + if (!param) 86 + goto err; 87 + 88 + for (p = name; isalnum(*p) || *p == '-' || *p == '_'; p++) 89 + ; 90 + 91 + len = p - name; 92 + if (!len || *p != '(') 93 + goto err_free_param; 94 + 95 + memcpy(param->template, name, len); 96 + param->template[len] = 0; 97 + 98 + name = p + 1; 99 + for (p = name; isalnum(*p) || *p == '-' || *p == '_'; p++) 100 + ; 101 + 102 + len = p - name; 103 + if (!len || *p != ')' || p[1]) 104 + goto err_free_param; 105 + 106 + param->alg.attr.rta_len = sizeof(param->alg); 107 + param->alg.attr.rta_type = CRYPTOA_ALG; 108 + memcpy(param->alg.data.name, name, len); 109 + param->alg.data.name[len] = 0; 110 + 111 + memcpy(param->larval.name, larval->alg.cra_name, CRYPTO_MAX_ALG_NAME); 112 + param->larval.type = larval->alg.cra_flags; 113 + param->larval.mask = larval->mask; 114 + 115 + INIT_WORK(&param->work, cryptomgr_probe, param); 116 + schedule_work(&param->work); 117 + 118 + return NOTIFY_STOP; 119 + 120 + err_free_param: 121 + kfree(param); 122 + err: 123 + return NOTIFY_OK; 124 + } 125 + 126 + static int cryptomgr_notify(struct notifier_block *this, unsigned long msg, 127 + void *data) 128 + { 129 + switch (msg) { 130 + case CRYPTO_MSG_ALG_REQUEST: 131 + return cryptomgr_schedule_probe(data); 132 + } 133 + 134 + return NOTIFY_DONE; 135 + } 136 + 137 + static struct notifier_block cryptomgr_notifier = { 138 + .notifier_call = cryptomgr_notify, 139 + }; 140 + 141 + static int __init cryptomgr_init(void) 142 + { 143 + return crypto_register_notifier(&cryptomgr_notifier); 144 + } 145 + 146 + static void __exit cryptomgr_exit(void) 147 + { 148 + int err = crypto_unregister_notifier(&cryptomgr_notifier); 149 + BUG_ON(err); 150 + } 151 + 152 + module_init(cryptomgr_init); 153 + module_exit(cryptomgr_exit); 154 + 155 + MODULE_LICENSE("GPL"); 156 + MODULE_DESCRIPTION("Crypto Algorithm Manager");
+4 -2
crypto/des.c
··· 784 784 } 785 785 786 786 static int des_setkey(struct crypto_tfm *tfm, const u8 *key, 787 - unsigned int keylen, u32 *flags) 787 + unsigned int keylen) 788 788 { 789 789 struct des_ctx *dctx = crypto_tfm_ctx(tfm); 790 + u32 *flags = &tfm->crt_flags; 790 791 u32 tmp[DES_EXPKEY_WORDS]; 791 792 int ret; 792 793 ··· 865 864 * 866 865 */ 867 866 static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key, 868 - unsigned int keylen, u32 *flags) 867 + unsigned int keylen) 869 868 { 870 869 const u32 *K = (const u32 *)key; 871 870 struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm); 872 871 u32 *expkey = dctx->expkey; 872 + u32 *flags = &tfm->crt_flags; 873 873 874 874 if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || 875 875 !((K[2] ^ K[4]) | (K[3] ^ K[5]))))
+120 -37
crypto/digest.c
··· 11 11 * any later version. 12 12 * 13 13 */ 14 - #include <linux/crypto.h> 14 + 15 15 #include <linux/mm.h> 16 16 #include <linux/errno.h> 17 17 #include <linux/highmem.h> 18 - #include <asm/scatterlist.h> 19 - #include "internal.h" 18 + #include <linux/module.h> 19 + #include <linux/scatterlist.h> 20 20 21 - static void init(struct crypto_tfm *tfm) 21 + #include "internal.h" 22 + #include "scatterwalk.h" 23 + 24 + void crypto_digest_init(struct crypto_tfm *tfm) 22 25 { 26 + struct crypto_hash *hash = crypto_hash_cast(tfm); 27 + struct hash_desc desc = { .tfm = hash, .flags = tfm->crt_flags }; 28 + 29 + crypto_hash_init(&desc); 30 + } 31 + EXPORT_SYMBOL_GPL(crypto_digest_init); 32 + 33 + void crypto_digest_update(struct crypto_tfm *tfm, 34 + struct scatterlist *sg, unsigned int nsg) 35 + { 36 + struct crypto_hash *hash = crypto_hash_cast(tfm); 37 + struct hash_desc desc = { .tfm = hash, .flags = tfm->crt_flags }; 38 + unsigned int nbytes = 0; 39 + unsigned int i; 40 + 41 + for (i = 0; i < nsg; i++) 42 + nbytes += sg[i].length; 43 + 44 + crypto_hash_update(&desc, sg, nbytes); 45 + } 46 + EXPORT_SYMBOL_GPL(crypto_digest_update); 47 + 48 + void crypto_digest_final(struct crypto_tfm *tfm, u8 *out) 49 + { 50 + struct crypto_hash *hash = crypto_hash_cast(tfm); 51 + struct hash_desc desc = { .tfm = hash, .flags = tfm->crt_flags }; 52 + 53 + crypto_hash_final(&desc, out); 54 + } 55 + EXPORT_SYMBOL_GPL(crypto_digest_final); 56 + 57 + void crypto_digest_digest(struct crypto_tfm *tfm, 58 + struct scatterlist *sg, unsigned int nsg, u8 *out) 59 + { 60 + struct crypto_hash *hash = crypto_hash_cast(tfm); 61 + struct hash_desc desc = { .tfm = hash, .flags = tfm->crt_flags }; 62 + unsigned int nbytes = 0; 63 + unsigned int i; 64 + 65 + for (i = 0; i < nsg; i++) 66 + nbytes += sg[i].length; 67 + 68 + crypto_hash_digest(&desc, sg, nbytes, out); 69 + } 70 + EXPORT_SYMBOL_GPL(crypto_digest_digest); 71 + 72 + static int init(struct hash_desc *desc) 73 + { 74 + struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm); 75 + 23 76 tfm->__crt_alg->cra_digest.dia_init(tfm); 77 + return 0; 24 78 } 25 79 26 - static void update(struct crypto_tfm *tfm, 27 - struct scatterlist *sg, unsigned int nsg) 80 + static int update(struct hash_desc *desc, 81 + struct scatterlist *sg, unsigned int nbytes) 28 82 { 29 - unsigned int i; 83 + struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm); 30 84 unsigned int alignmask = crypto_tfm_alg_alignmask(tfm); 31 85 32 - for (i = 0; i < nsg; i++) { 86 + if (!nbytes) 87 + return 0; 33 88 34 - struct page *pg = sg[i].page; 35 - unsigned int offset = sg[i].offset; 36 - unsigned int l = sg[i].length; 89 + for (;;) { 90 + struct page *pg = sg->page; 91 + unsigned int offset = sg->offset; 92 + unsigned int l = sg->length; 93 + 94 + if (unlikely(l > nbytes)) 95 + l = nbytes; 96 + nbytes -= l; 37 97 38 98 do { 39 99 unsigned int bytes_from_page = min(l, ((unsigned int) ··· 115 55 tfm->__crt_alg->cra_digest.dia_update(tfm, p, 116 56 bytes_from_page); 117 57 crypto_kunmap(src, 0); 118 - crypto_yield(tfm); 58 + crypto_yield(desc->flags); 119 59 offset = 0; 120 60 pg++; 121 61 l -= bytes_from_page; 122 62 } while (l > 0); 63 + 64 + if (!nbytes) 65 + break; 66 + sg = sg_next(sg); 123 67 } 68 + 69 + return 0; 124 70 } 125 71 126 - static void final(struct crypto_tfm *tfm, u8 *out) 72 + static int final(struct hash_desc *desc, u8 *out) 127 73 { 74 + struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm); 128 75 unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); 76 + struct digest_alg *digest = &tfm->__crt_alg->cra_digest; 77 + 129 78 if (unlikely((unsigned long)out & alignmask)) { 130 - unsigned int size = crypto_tfm_alg_digestsize(tfm); 131 - u8 buffer[size + alignmask]; 132 - u8 *dst = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); 133 - tfm->__crt_alg->cra_digest.dia_final(tfm, dst); 134 - memcpy(out, dst, size); 79 + unsigned long align = alignmask + 1; 80 + unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm); 81 + u8 *dst = (u8 *)ALIGN(addr, align) + 82 + ALIGN(tfm->__crt_alg->cra_ctxsize, align); 83 + 84 + digest->dia_final(tfm, dst); 85 + memcpy(out, dst, digest->dia_digestsize); 135 86 } else 136 - tfm->__crt_alg->cra_digest.dia_final(tfm, out); 87 + digest->dia_final(tfm, out); 88 + 89 + return 0; 137 90 } 138 91 139 - static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) 92 + static int nosetkey(struct crypto_hash *tfm, const u8 *key, unsigned int keylen) 140 93 { 141 - u32 flags; 142 - if (tfm->__crt_alg->cra_digest.dia_setkey == NULL) 143 - return -ENOSYS; 144 - return tfm->__crt_alg->cra_digest.dia_setkey(tfm, key, keylen, &flags); 94 + crypto_hash_clear_flags(tfm, CRYPTO_TFM_RES_MASK); 95 + return -ENOSYS; 145 96 } 146 97 147 - static void digest(struct crypto_tfm *tfm, 148 - struct scatterlist *sg, unsigned int nsg, u8 *out) 98 + static int setkey(struct crypto_hash *hash, const u8 *key, unsigned int keylen) 149 99 { 150 - init(tfm); 151 - update(tfm, sg, nsg); 152 - final(tfm, out); 100 + struct crypto_tfm *tfm = crypto_hash_tfm(hash); 101 + 102 + crypto_hash_clear_flags(hash, CRYPTO_TFM_RES_MASK); 103 + return tfm->__crt_alg->cra_digest.dia_setkey(tfm, key, keylen); 104 + } 105 + 106 + static int digest(struct hash_desc *desc, 107 + struct scatterlist *sg, unsigned int nbytes, u8 *out) 108 + { 109 + init(desc); 110 + update(desc, sg, nbytes); 111 + return final(desc, out); 153 112 } 154 113 155 114 int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags) ··· 178 99 179 100 int crypto_init_digest_ops(struct crypto_tfm *tfm) 180 101 { 181 - struct digest_tfm *ops = &tfm->crt_digest; 102 + struct hash_tfm *ops = &tfm->crt_hash; 103 + struct digest_alg *dalg = &tfm->__crt_alg->cra_digest; 104 + 105 + if (dalg->dia_digestsize > crypto_tfm_alg_blocksize(tfm)) 106 + return -EINVAL; 182 107 183 - ops->dit_init = init; 184 - ops->dit_update = update; 185 - ops->dit_final = final; 186 - ops->dit_digest = digest; 187 - ops->dit_setkey = setkey; 108 + ops->init = init; 109 + ops->update = update; 110 + ops->final = final; 111 + ops->digest = digest; 112 + ops->setkey = dalg->dia_setkey ? setkey : nosetkey; 113 + ops->digestsize = dalg->dia_digestsize; 188 114 189 - return crypto_alloc_hmac_block(tfm); 115 + return 0; 190 116 } 191 117 192 118 void crypto_exit_digest_ops(struct crypto_tfm *tfm) 193 119 { 194 - crypto_free_hmac_block(tfm); 195 120 }
+181
crypto/ecb.c
··· 1 + /* 2 + * ECB: Electronic CodeBook mode 3 + * 4 + * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms of the GNU General Public License as published by the Free 8 + * Software Foundation; either version 2 of the License, or (at your option) 9 + * any later version. 10 + * 11 + */ 12 + 13 + #include <crypto/algapi.h> 14 + #include <linux/err.h> 15 + #include <linux/init.h> 16 + #include <linux/kernel.h> 17 + #include <linux/module.h> 18 + #include <linux/scatterlist.h> 19 + #include <linux/slab.h> 20 + 21 + struct crypto_ecb_ctx { 22 + struct crypto_cipher *child; 23 + }; 24 + 25 + static int crypto_ecb_setkey(struct crypto_tfm *parent, const u8 *key, 26 + unsigned int keylen) 27 + { 28 + struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(parent); 29 + struct crypto_cipher *child = ctx->child; 30 + int err; 31 + 32 + crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 33 + crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & 34 + CRYPTO_TFM_REQ_MASK); 35 + err = crypto_cipher_setkey(child, key, keylen); 36 + crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & 37 + CRYPTO_TFM_RES_MASK); 38 + return err; 39 + } 40 + 41 + static int crypto_ecb_crypt(struct blkcipher_desc *desc, 42 + struct blkcipher_walk *walk, 43 + struct crypto_cipher *tfm, 44 + void (*fn)(struct crypto_tfm *, u8 *, const u8 *)) 45 + { 46 + int bsize = crypto_cipher_blocksize(tfm); 47 + unsigned int nbytes; 48 + int err; 49 + 50 + err = blkcipher_walk_virt(desc, walk); 51 + 52 + while ((nbytes = walk->nbytes)) { 53 + u8 *wsrc = walk->src.virt.addr; 54 + u8 *wdst = walk->dst.virt.addr; 55 + 56 + do { 57 + fn(crypto_cipher_tfm(tfm), wdst, wsrc); 58 + 59 + wsrc += bsize; 60 + wdst += bsize; 61 + } while ((nbytes -= bsize) >= bsize); 62 + 63 + err = blkcipher_walk_done(desc, walk, nbytes); 64 + } 65 + 66 + return err; 67 + } 68 + 69 + static int crypto_ecb_encrypt(struct blkcipher_desc *desc, 70 + struct scatterlist *dst, struct scatterlist *src, 71 + unsigned int nbytes) 72 + { 73 + struct blkcipher_walk walk; 74 + struct crypto_blkcipher *tfm = desc->tfm; 75 + struct crypto_ecb_ctx *ctx = crypto_blkcipher_ctx(tfm); 76 + struct crypto_cipher *child = ctx->child; 77 + 78 + blkcipher_walk_init(&walk, dst, src, nbytes); 79 + return crypto_ecb_crypt(desc, &walk, child, 80 + crypto_cipher_alg(child)->cia_encrypt); 81 + } 82 + 83 + static int crypto_ecb_decrypt(struct blkcipher_desc *desc, 84 + struct scatterlist *dst, struct scatterlist *src, 85 + unsigned int nbytes) 86 + { 87 + struct blkcipher_walk walk; 88 + struct crypto_blkcipher *tfm = desc->tfm; 89 + struct crypto_ecb_ctx *ctx = crypto_blkcipher_ctx(tfm); 90 + struct crypto_cipher *child = ctx->child; 91 + 92 + blkcipher_walk_init(&walk, dst, src, nbytes); 93 + return crypto_ecb_crypt(desc, &walk, child, 94 + crypto_cipher_alg(child)->cia_decrypt); 95 + } 96 + 97 + static int crypto_ecb_init_tfm(struct crypto_tfm *tfm) 98 + { 99 + struct crypto_instance *inst = (void *)tfm->__crt_alg; 100 + struct crypto_spawn *spawn = crypto_instance_ctx(inst); 101 + struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(tfm); 102 + 103 + tfm = crypto_spawn_tfm(spawn); 104 + if (IS_ERR(tfm)) 105 + return PTR_ERR(tfm); 106 + 107 + ctx->child = crypto_cipher_cast(tfm); 108 + return 0; 109 + } 110 + 111 + static void crypto_ecb_exit_tfm(struct crypto_tfm *tfm) 112 + { 113 + struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(tfm); 114 + crypto_free_cipher(ctx->child); 115 + } 116 + 117 + static struct crypto_instance *crypto_ecb_alloc(void *param, unsigned int len) 118 + { 119 + struct crypto_instance *inst; 120 + struct crypto_alg *alg; 121 + 122 + alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER, 123 + CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); 124 + if (IS_ERR(alg)) 125 + return ERR_PTR(PTR_ERR(alg)); 126 + 127 + inst = crypto_alloc_instance("ecb", alg); 128 + if (IS_ERR(inst)) 129 + goto out_put_alg; 130 + 131 + inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; 132 + inst->alg.cra_priority = alg->cra_priority; 133 + inst->alg.cra_blocksize = alg->cra_blocksize; 134 + inst->alg.cra_alignmask = alg->cra_alignmask; 135 + inst->alg.cra_type = &crypto_blkcipher_type; 136 + 137 + inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize; 138 + inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize; 139 + 140 + inst->alg.cra_ctxsize = sizeof(struct crypto_ecb_ctx); 141 + 142 + inst->alg.cra_init = crypto_ecb_init_tfm; 143 + inst->alg.cra_exit = crypto_ecb_exit_tfm; 144 + 145 + inst->alg.cra_blkcipher.setkey = crypto_ecb_setkey; 146 + inst->alg.cra_blkcipher.encrypt = crypto_ecb_encrypt; 147 + inst->alg.cra_blkcipher.decrypt = crypto_ecb_decrypt; 148 + 149 + out_put_alg: 150 + crypto_mod_put(alg); 151 + return inst; 152 + } 153 + 154 + static void crypto_ecb_free(struct crypto_instance *inst) 155 + { 156 + crypto_drop_spawn(crypto_instance_ctx(inst)); 157 + kfree(inst); 158 + } 159 + 160 + static struct crypto_template crypto_ecb_tmpl = { 161 + .name = "ecb", 162 + .alloc = crypto_ecb_alloc, 163 + .free = crypto_ecb_free, 164 + .module = THIS_MODULE, 165 + }; 166 + 167 + static int __init crypto_ecb_module_init(void) 168 + { 169 + return crypto_register_template(&crypto_ecb_tmpl); 170 + } 171 + 172 + static void __exit crypto_ecb_module_exit(void) 173 + { 174 + crypto_unregister_template(&crypto_ecb_tmpl); 175 + } 176 + 177 + module_init(crypto_ecb_module_init); 178 + module_exit(crypto_ecb_module_exit); 179 + 180 + MODULE_LICENSE("GPL"); 181 + MODULE_DESCRIPTION("ECB block cipher algorithm");
+61
crypto/hash.c
··· 1 + /* 2 + * Cryptographic Hash operations. 3 + * 4 + * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms of the GNU General Public License as published by the Free 8 + * Software Foundation; either version 2 of the License, or (at your option) 9 + * any later version. 10 + */ 11 + 12 + #include <linux/errno.h> 13 + #include <linux/kernel.h> 14 + #include <linux/module.h> 15 + #include <linux/seq_file.h> 16 + 17 + #include "internal.h" 18 + 19 + static unsigned int crypto_hash_ctxsize(struct crypto_alg *alg) 20 + { 21 + return alg->cra_ctxsize; 22 + } 23 + 24 + static int crypto_init_hash_ops(struct crypto_tfm *tfm) 25 + { 26 + struct hash_tfm *crt = &tfm->crt_hash; 27 + struct hash_alg *alg = &tfm->__crt_alg->cra_hash; 28 + 29 + if (alg->digestsize > crypto_tfm_alg_blocksize(tfm)) 30 + return -EINVAL; 31 + 32 + crt->init = alg->init; 33 + crt->update = alg->update; 34 + crt->final = alg->final; 35 + crt->digest = alg->digest; 36 + crt->setkey = alg->setkey; 37 + crt->digestsize = alg->digestsize; 38 + 39 + return 0; 40 + } 41 + 42 + static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg) 43 + __attribute_used__; 44 + static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg) 45 + { 46 + seq_printf(m, "type : hash\n"); 47 + seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); 48 + seq_printf(m, "digestsize : %u\n", alg->cra_hash.digestsize); 49 + } 50 + 51 + const struct crypto_type crypto_hash_type = { 52 + .ctxsize = crypto_hash_ctxsize, 53 + .init = crypto_init_hash_ops, 54 + #ifdef CONFIG_PROC_FS 55 + .show = crypto_hash_show, 56 + #endif 57 + }; 58 + EXPORT_SYMBOL_GPL(crypto_hash_type); 59 + 60 + MODULE_LICENSE("GPL"); 61 + MODULE_DESCRIPTION("Generic cryptographic hash type");
+215 -87
crypto/hmac.c
··· 4 4 * HMAC: Keyed-Hashing for Message Authentication (RFC2104). 5 5 * 6 6 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> 7 + * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 7 8 * 8 9 * The HMAC implementation is derived from USAGI. 9 10 * Copyright (c) 2002 Kazunori Miyazawa <miyazawa@linux-ipv6.org> / USAGI 10 11 * 11 12 * This program is free software; you can redistribute it and/or modify it 12 13 * under the terms of the GNU General Public License as published by the Free 13 - * Software Foundation; either version 2 of the License, or (at your option) 14 + * Software Foundation; either version 2 of the License, or (at your option) 14 15 * any later version. 15 16 * 16 17 */ 17 - #include <linux/crypto.h> 18 - #include <linux/mm.h> 19 - #include <linux/highmem.h> 20 - #include <linux/slab.h> 18 + 19 + #include <crypto/algapi.h> 20 + #include <linux/err.h> 21 + #include <linux/init.h> 22 + #include <linux/kernel.h> 23 + #include <linux/module.h> 21 24 #include <linux/scatterlist.h> 22 - #include "internal.h" 25 + #include <linux/slab.h> 26 + #include <linux/string.h> 23 27 24 - static void hash_key(struct crypto_tfm *tfm, u8 *key, unsigned int keylen) 28 + struct hmac_ctx { 29 + struct crypto_hash *child; 30 + }; 31 + 32 + static inline void *align_ptr(void *p, unsigned int align) 25 33 { 26 - struct scatterlist tmp; 27 - 28 - sg_set_buf(&tmp, key, keylen); 29 - crypto_digest_digest(tfm, &tmp, 1, key); 34 + return (void *)ALIGN((unsigned long)p, align); 30 35 } 31 36 32 - int crypto_alloc_hmac_block(struct crypto_tfm *tfm) 37 + static inline struct hmac_ctx *hmac_ctx(struct crypto_hash *tfm) 33 38 { 34 - int ret = 0; 35 - 36 - BUG_ON(!crypto_tfm_alg_blocksize(tfm)); 37 - 38 - tfm->crt_digest.dit_hmac_block = kmalloc(crypto_tfm_alg_blocksize(tfm), 39 - GFP_KERNEL); 40 - if (tfm->crt_digest.dit_hmac_block == NULL) 41 - ret = -ENOMEM; 42 - 43 - return ret; 44 - 39 + return align_ptr(crypto_hash_ctx_aligned(tfm) + 40 + crypto_hash_blocksize(tfm) * 2 + 41 + crypto_hash_digestsize(tfm), sizeof(void *)); 45 42 } 46 43 47 - void crypto_free_hmac_block(struct crypto_tfm *tfm) 44 + static int hmac_setkey(struct crypto_hash *parent, 45 + const u8 *inkey, unsigned int keylen) 48 46 { 49 - kfree(tfm->crt_digest.dit_hmac_block); 50 - } 51 - 52 - void crypto_hmac_init(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen) 53 - { 47 + int bs = crypto_hash_blocksize(parent); 48 + int ds = crypto_hash_digestsize(parent); 49 + char *ipad = crypto_hash_ctx_aligned(parent); 50 + char *opad = ipad + bs; 51 + char *digest = opad + bs; 52 + struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *)); 53 + struct crypto_hash *tfm = ctx->child; 54 54 unsigned int i; 55 - struct scatterlist tmp; 56 - char *ipad = tfm->crt_digest.dit_hmac_block; 57 - 58 - if (*keylen > crypto_tfm_alg_blocksize(tfm)) { 59 - hash_key(tfm, key, *keylen); 60 - *keylen = crypto_tfm_alg_digestsize(tfm); 55 + 56 + if (keylen > bs) { 57 + struct hash_desc desc; 58 + struct scatterlist tmp; 59 + int err; 60 + 61 + desc.tfm = tfm; 62 + desc.flags = crypto_hash_get_flags(parent); 63 + desc.flags &= CRYPTO_TFM_REQ_MAY_SLEEP; 64 + sg_set_buf(&tmp, inkey, keylen); 65 + 66 + err = crypto_hash_digest(&desc, &tmp, keylen, digest); 67 + if (err) 68 + return err; 69 + 70 + inkey = digest; 71 + keylen = ds; 61 72 } 62 73 63 - memset(ipad, 0, crypto_tfm_alg_blocksize(tfm)); 64 - memcpy(ipad, key, *keylen); 74 + memcpy(ipad, inkey, keylen); 75 + memset(ipad + keylen, 0, bs - keylen); 76 + memcpy(opad, ipad, bs); 65 77 66 - for (i = 0; i < crypto_tfm_alg_blocksize(tfm); i++) 78 + for (i = 0; i < bs; i++) { 67 79 ipad[i] ^= 0x36; 68 - 69 - sg_set_buf(&tmp, ipad, crypto_tfm_alg_blocksize(tfm)); 70 - 71 - crypto_digest_init(tfm); 72 - crypto_digest_update(tfm, &tmp, 1); 73 - } 74 - 75 - void crypto_hmac_update(struct crypto_tfm *tfm, 76 - struct scatterlist *sg, unsigned int nsg) 77 - { 78 - crypto_digest_update(tfm, sg, nsg); 79 - } 80 - 81 - void crypto_hmac_final(struct crypto_tfm *tfm, u8 *key, 82 - unsigned int *keylen, u8 *out) 83 - { 84 - unsigned int i; 85 - struct scatterlist tmp; 86 - char *opad = tfm->crt_digest.dit_hmac_block; 87 - 88 - if (*keylen > crypto_tfm_alg_blocksize(tfm)) { 89 - hash_key(tfm, key, *keylen); 90 - *keylen = crypto_tfm_alg_digestsize(tfm); 80 + opad[i] ^= 0x5c; 91 81 } 92 82 93 - crypto_digest_final(tfm, out); 94 - 95 - memset(opad, 0, crypto_tfm_alg_blocksize(tfm)); 96 - memcpy(opad, key, *keylen); 97 - 98 - for (i = 0; i < crypto_tfm_alg_blocksize(tfm); i++) 99 - opad[i] ^= 0x5c; 100 - 101 - sg_set_buf(&tmp, opad, crypto_tfm_alg_blocksize(tfm)); 102 - 103 - crypto_digest_init(tfm); 104 - crypto_digest_update(tfm, &tmp, 1); 105 - 106 - sg_set_buf(&tmp, out, crypto_tfm_alg_digestsize(tfm)); 107 - 108 - crypto_digest_update(tfm, &tmp, 1); 109 - crypto_digest_final(tfm, out); 83 + return 0; 110 84 } 111 85 112 - void crypto_hmac(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen, 113 - struct scatterlist *sg, unsigned int nsg, u8 *out) 86 + static int hmac_init(struct hash_desc *pdesc) 114 87 { 115 - crypto_hmac_init(tfm, key, keylen); 116 - crypto_hmac_update(tfm, sg, nsg); 117 - crypto_hmac_final(tfm, key, keylen, out); 88 + struct crypto_hash *parent = pdesc->tfm; 89 + int bs = crypto_hash_blocksize(parent); 90 + int ds = crypto_hash_digestsize(parent); 91 + char *ipad = crypto_hash_ctx_aligned(parent); 92 + struct hmac_ctx *ctx = align_ptr(ipad + bs * 2 + ds, sizeof(void *)); 93 + struct hash_desc desc; 94 + struct scatterlist tmp; 95 + 96 + desc.tfm = ctx->child; 97 + desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; 98 + sg_set_buf(&tmp, ipad, bs); 99 + 100 + return unlikely(crypto_hash_init(&desc)) ?: 101 + crypto_hash_update(&desc, &tmp, 1); 118 102 } 119 103 120 - EXPORT_SYMBOL_GPL(crypto_hmac_init); 121 - EXPORT_SYMBOL_GPL(crypto_hmac_update); 122 - EXPORT_SYMBOL_GPL(crypto_hmac_final); 123 - EXPORT_SYMBOL_GPL(crypto_hmac); 104 + static int hmac_update(struct hash_desc *pdesc, 105 + struct scatterlist *sg, unsigned int nbytes) 106 + { 107 + struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm); 108 + struct hash_desc desc; 124 109 110 + desc.tfm = ctx->child; 111 + desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; 112 + 113 + return crypto_hash_update(&desc, sg, nbytes); 114 + } 115 + 116 + static int hmac_final(struct hash_desc *pdesc, u8 *out) 117 + { 118 + struct crypto_hash *parent = pdesc->tfm; 119 + int bs = crypto_hash_blocksize(parent); 120 + int ds = crypto_hash_digestsize(parent); 121 + char *opad = crypto_hash_ctx_aligned(parent) + bs; 122 + char *digest = opad + bs; 123 + struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *)); 124 + struct hash_desc desc; 125 + struct scatterlist tmp; 126 + 127 + desc.tfm = ctx->child; 128 + desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; 129 + sg_set_buf(&tmp, opad, bs + ds); 130 + 131 + return unlikely(crypto_hash_final(&desc, digest)) ?: 132 + crypto_hash_digest(&desc, &tmp, bs + ds, out); 133 + } 134 + 135 + static int hmac_digest(struct hash_desc *pdesc, struct scatterlist *sg, 136 + unsigned int nbytes, u8 *out) 137 + { 138 + struct crypto_hash *parent = pdesc->tfm; 139 + int bs = crypto_hash_blocksize(parent); 140 + int ds = crypto_hash_digestsize(parent); 141 + char *ipad = crypto_hash_ctx_aligned(parent); 142 + char *opad = ipad + bs; 143 + char *digest = opad + bs; 144 + struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *)); 145 + struct hash_desc desc; 146 + struct scatterlist sg1[2]; 147 + struct scatterlist sg2[1]; 148 + 149 + desc.tfm = ctx->child; 150 + desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; 151 + 152 + sg_set_buf(sg1, ipad, bs); 153 + sg1[1].page = (void *)sg; 154 + sg1[1].length = 0; 155 + sg_set_buf(sg2, opad, bs + ds); 156 + 157 + return unlikely(crypto_hash_digest(&desc, sg1, nbytes + bs, digest)) ?: 158 + crypto_hash_digest(&desc, sg2, bs + ds, out); 159 + } 160 + 161 + static int hmac_init_tfm(struct crypto_tfm *tfm) 162 + { 163 + struct crypto_instance *inst = (void *)tfm->__crt_alg; 164 + struct crypto_spawn *spawn = crypto_instance_ctx(inst); 165 + struct hmac_ctx *ctx = hmac_ctx(__crypto_hash_cast(tfm)); 166 + 167 + tfm = crypto_spawn_tfm(spawn); 168 + if (IS_ERR(tfm)) 169 + return PTR_ERR(tfm); 170 + 171 + ctx->child = crypto_hash_cast(tfm); 172 + return 0; 173 + } 174 + 175 + static void hmac_exit_tfm(struct crypto_tfm *tfm) 176 + { 177 + struct hmac_ctx *ctx = hmac_ctx(__crypto_hash_cast(tfm)); 178 + crypto_free_hash(ctx->child); 179 + } 180 + 181 + static void hmac_free(struct crypto_instance *inst) 182 + { 183 + crypto_drop_spawn(crypto_instance_ctx(inst)); 184 + kfree(inst); 185 + } 186 + 187 + static struct crypto_instance *hmac_alloc(void *param, unsigned int len) 188 + { 189 + struct crypto_instance *inst; 190 + struct crypto_alg *alg; 191 + 192 + alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_HASH, 193 + CRYPTO_ALG_TYPE_HASH_MASK | CRYPTO_ALG_ASYNC); 194 + if (IS_ERR(alg)) 195 + return ERR_PTR(PTR_ERR(alg)); 196 + 197 + inst = crypto_alloc_instance("hmac", alg); 198 + if (IS_ERR(inst)) 199 + goto out_put_alg; 200 + 201 + inst->alg.cra_flags = CRYPTO_ALG_TYPE_HASH; 202 + inst->alg.cra_priority = alg->cra_priority; 203 + inst->alg.cra_blocksize = alg->cra_blocksize; 204 + inst->alg.cra_alignmask = alg->cra_alignmask; 205 + inst->alg.cra_type = &crypto_hash_type; 206 + 207 + inst->alg.cra_hash.digestsize = 208 + (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == 209 + CRYPTO_ALG_TYPE_HASH ? alg->cra_hash.digestsize : 210 + alg->cra_digest.dia_digestsize; 211 + 212 + inst->alg.cra_ctxsize = sizeof(struct hmac_ctx) + 213 + ALIGN(inst->alg.cra_blocksize * 2 + 214 + inst->alg.cra_hash.digestsize, 215 + sizeof(void *)); 216 + 217 + inst->alg.cra_init = hmac_init_tfm; 218 + inst->alg.cra_exit = hmac_exit_tfm; 219 + 220 + inst->alg.cra_hash.init = hmac_init; 221 + inst->alg.cra_hash.update = hmac_update; 222 + inst->alg.cra_hash.final = hmac_final; 223 + inst->alg.cra_hash.digest = hmac_digest; 224 + inst->alg.cra_hash.setkey = hmac_setkey; 225 + 226 + out_put_alg: 227 + crypto_mod_put(alg); 228 + return inst; 229 + } 230 + 231 + static struct crypto_template hmac_tmpl = { 232 + .name = "hmac", 233 + .alloc = hmac_alloc, 234 + .free = hmac_free, 235 + .module = THIS_MODULE, 236 + }; 237 + 238 + static int __init hmac_module_init(void) 239 + { 240 + return crypto_register_template(&hmac_tmpl); 241 + } 242 + 243 + static void __exit hmac_module_exit(void) 244 + { 245 + crypto_unregister_template(&hmac_tmpl); 246 + } 247 + 248 + module_init(hmac_module_init); 249 + module_exit(hmac_module_exit); 250 + 251 + MODULE_LICENSE("GPL"); 252 + MODULE_DESCRIPTION("HMAC hash algorithm");
+89 -17
crypto/internal.h
··· 12 12 */ 13 13 #ifndef _CRYPTO_INTERNAL_H 14 14 #define _CRYPTO_INTERNAL_H 15 - #include <linux/crypto.h> 15 + 16 + #include <crypto/algapi.h> 17 + #include <linux/completion.h> 16 18 #include <linux/mm.h> 17 19 #include <linux/highmem.h> 18 20 #include <linux/interrupt.h> 19 21 #include <linux/init.h> 20 22 #include <linux/list.h> 23 + #include <linux/module.h> 21 24 #include <linux/kernel.h> 25 + #include <linux/notifier.h> 22 26 #include <linux/rwsem.h> 23 27 #include <linux/slab.h> 24 28 #include <asm/kmap_types.h> 25 29 30 + /* Crypto notification events. */ 31 + enum { 32 + CRYPTO_MSG_ALG_REQUEST, 33 + CRYPTO_MSG_ALG_REGISTER, 34 + CRYPTO_MSG_ALG_UNREGISTER, 35 + CRYPTO_MSG_TMPL_REGISTER, 36 + CRYPTO_MSG_TMPL_UNREGISTER, 37 + }; 38 + 39 + struct crypto_instance; 40 + struct crypto_template; 41 + 42 + struct crypto_larval { 43 + struct crypto_alg alg; 44 + struct crypto_alg *adult; 45 + struct completion completion; 46 + u32 mask; 47 + }; 48 + 26 49 extern struct list_head crypto_alg_list; 27 50 extern struct rw_semaphore crypto_alg_sem; 51 + extern struct blocking_notifier_head crypto_chain; 28 52 29 53 extern enum km_type crypto_km_types[]; 30 54 ··· 67 43 kunmap_atomic(vaddr, crypto_kmap_type(out)); 68 44 } 69 45 70 - static inline void crypto_yield(struct crypto_tfm *tfm) 46 + static inline void crypto_yield(u32 flags) 71 47 { 72 - if (tfm->crt_flags & CRYPTO_TFM_REQ_MAY_SLEEP) 48 + if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) 73 49 cond_resched(); 74 50 } 75 51 76 - #ifdef CONFIG_CRYPTO_HMAC 77 - int crypto_alloc_hmac_block(struct crypto_tfm *tfm); 78 - void crypto_free_hmac_block(struct crypto_tfm *tfm); 79 - #else 80 - static inline int crypto_alloc_hmac_block(struct crypto_tfm *tfm) 81 - { 82 - return 0; 83 - } 84 - 85 - static inline void crypto_free_hmac_block(struct crypto_tfm *tfm) 86 - { } 87 - #endif 88 - 89 52 #ifdef CONFIG_PROC_FS 90 53 void __init crypto_init_proc(void); 54 + void __exit crypto_exit_proc(void); 91 55 #else 92 56 static inline void crypto_init_proc(void) 57 + { } 58 + static inline void crypto_exit_proc(void) 93 59 { } 94 60 #endif 95 61 96 62 static inline unsigned int crypto_digest_ctxsize(struct crypto_alg *alg, 97 63 int flags) 98 64 { 99 - return alg->cra_ctxsize; 65 + unsigned int len = alg->cra_ctxsize; 66 + 67 + if (alg->cra_alignmask) { 68 + len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1); 69 + len += alg->cra_digest.dia_digestsize; 70 + } 71 + 72 + return len; 100 73 } 101 74 102 75 static inline unsigned int crypto_cipher_ctxsize(struct crypto_alg *alg, ··· 117 96 return alg->cra_ctxsize; 118 97 } 119 98 99 + struct crypto_alg *crypto_mod_get(struct crypto_alg *alg); 100 + struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, u32 mask); 101 + struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask); 102 + 120 103 int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags); 121 104 int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags); 122 105 int crypto_init_compress_flags(struct crypto_tfm *tfm, u32 flags); ··· 132 107 void crypto_exit_digest_ops(struct crypto_tfm *tfm); 133 108 void crypto_exit_cipher_ops(struct crypto_tfm *tfm); 134 109 void crypto_exit_compress_ops(struct crypto_tfm *tfm); 110 + 111 + void crypto_larval_error(const char *name, u32 type, u32 mask); 112 + 113 + void crypto_shoot_alg(struct crypto_alg *alg); 114 + struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 flags); 115 + 116 + int crypto_register_instance(struct crypto_template *tmpl, 117 + struct crypto_instance *inst); 118 + 119 + int crypto_register_notifier(struct notifier_block *nb); 120 + int crypto_unregister_notifier(struct notifier_block *nb); 121 + 122 + static inline void crypto_alg_put(struct crypto_alg *alg) 123 + { 124 + if (atomic_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy) 125 + alg->cra_destroy(alg); 126 + } 127 + 128 + static inline int crypto_tmpl_get(struct crypto_template *tmpl) 129 + { 130 + return try_module_get(tmpl->module); 131 + } 132 + 133 + static inline void crypto_tmpl_put(struct crypto_template *tmpl) 134 + { 135 + module_put(tmpl->module); 136 + } 137 + 138 + static inline int crypto_is_larval(struct crypto_alg *alg) 139 + { 140 + return alg->cra_flags & CRYPTO_ALG_LARVAL; 141 + } 142 + 143 + static inline int crypto_is_dead(struct crypto_alg *alg) 144 + { 145 + return alg->cra_flags & CRYPTO_ALG_DEAD; 146 + } 147 + 148 + static inline int crypto_is_moribund(struct crypto_alg *alg) 149 + { 150 + return alg->cra_flags & (CRYPTO_ALG_DEAD | CRYPTO_ALG_DYING); 151 + } 152 + 153 + static inline int crypto_notify(unsigned long val, void *v) 154 + { 155 + return blocking_notifier_call_chain(&crypto_chain, val, v); 156 + } 135 157 136 158 #endif /* _CRYPTO_INTERNAL_H */ 137 159
+1 -7
crypto/khazad.c
··· 755 755 }; 756 756 757 757 static int khazad_setkey(struct crypto_tfm *tfm, const u8 *in_key, 758 - unsigned int key_len, u32 *flags) 758 + unsigned int key_len) 759 759 { 760 760 struct khazad_ctx *ctx = crypto_tfm_ctx(tfm); 761 761 const __be32 *key = (const __be32 *)in_key; 762 762 int r; 763 763 const u64 *S = T7; 764 764 u64 K2, K1; 765 - 766 - if (key_len != 16) 767 - { 768 - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 769 - return -EINVAL; 770 - } 771 765 772 766 /* key is supposed to be 32-bit aligned */ 773 767 K2 = ((u64)be32_to_cpu(key[0]) << 32) | be32_to_cpu(key[1]);
+2 -3
crypto/michael_mic.c
··· 123 123 124 124 125 125 static int michael_setkey(struct crypto_tfm *tfm, const u8 *key, 126 - unsigned int keylen, u32 *flags) 126 + unsigned int keylen) 127 127 { 128 128 struct michael_mic_ctx *mctx = crypto_tfm_ctx(tfm); 129 129 const __le32 *data = (const __le32 *)key; 130 130 131 131 if (keylen != 8) { 132 - if (flags) 133 - *flags = CRYPTO_TFM_RES_BAD_KEY_LEN; 132 + tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 134 133 return -EINVAL; 135 134 } 136 135
+12 -1
crypto/proc.c
··· 12 12 * any later version. 13 13 * 14 14 */ 15 + 16 + #include <asm/atomic.h> 15 17 #include <linux/init.h> 16 18 #include <linux/crypto.h> 17 19 #include <linux/rwsem.h> ··· 56 54 seq_printf(m, "driver : %s\n", alg->cra_driver_name); 57 55 seq_printf(m, "module : %s\n", module_name(alg->cra_module)); 58 56 seq_printf(m, "priority : %d\n", alg->cra_priority); 57 + seq_printf(m, "refcnt : %d\n", atomic_read(&alg->cra_refcnt)); 59 58 60 59 switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { 61 60 case CRYPTO_ALG_TYPE_CIPHER: ··· 78 75 seq_printf(m, "type : compression\n"); 79 76 break; 80 77 default: 81 - seq_printf(m, "type : unknown\n"); 78 + if (alg->cra_type && alg->cra_type->show) 79 + alg->cra_type->show(m, alg); 80 + else 81 + seq_printf(m, "type : unknown\n"); 82 82 break; 83 83 } 84 84 ··· 115 109 proc = create_proc_entry("crypto", 0, NULL); 116 110 if (proc) 117 111 proc->proc_fops = &proc_crypto_ops; 112 + } 113 + 114 + void __exit crypto_exit_proc(void) 115 + { 116 + remove_proc_entry("crypto", NULL); 118 117 }
+40 -49
crypto/scatterwalk.c
··· 15 15 */ 16 16 #include <linux/kernel.h> 17 17 #include <linux/mm.h> 18 + #include <linux/module.h> 18 19 #include <linux/pagemap.h> 19 20 #include <linux/highmem.h> 20 - #include <asm/scatterlist.h> 21 + #include <linux/scatterlist.h> 22 + 21 23 #include "internal.h" 22 24 #include "scatterwalk.h" 23 25 ··· 29 27 KM_SOFTIRQ0, 30 28 KM_SOFTIRQ1, 31 29 }; 30 + EXPORT_SYMBOL_GPL(crypto_km_types); 32 31 33 - static void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out) 32 + static inline void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out) 34 33 { 35 - if (out) 36 - memcpy(sgdata, buf, nbytes); 37 - else 38 - memcpy(buf, sgdata, nbytes); 34 + void *src = out ? buf : sgdata; 35 + void *dst = out ? sgdata : buf; 36 + 37 + memcpy(dst, src, nbytes); 39 38 } 40 39 41 40 void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg) 42 41 { 43 - unsigned int rest_of_page; 44 - 45 42 walk->sg = sg; 46 - 47 - walk->page = sg->page; 48 - walk->len_this_segment = sg->length; 49 43 50 44 BUG_ON(!sg->length); 51 45 52 - rest_of_page = PAGE_CACHE_SIZE - (sg->offset & (PAGE_CACHE_SIZE - 1)); 53 - walk->len_this_page = min(sg->length, rest_of_page); 54 46 walk->offset = sg->offset; 55 47 } 48 + EXPORT_SYMBOL_GPL(scatterwalk_start); 56 49 57 - void scatterwalk_map(struct scatter_walk *walk, int out) 50 + void *scatterwalk_map(struct scatter_walk *walk, int out) 58 51 { 59 - walk->data = crypto_kmap(walk->page, out) + walk->offset; 52 + return crypto_kmap(scatterwalk_page(walk), out) + 53 + offset_in_page(walk->offset); 60 54 } 61 - 62 - static inline void scatterwalk_unmap(struct scatter_walk *walk, int out) 63 - { 64 - /* walk->data may be pointing the first byte of the next page; 65 - however, we know we transfered at least one byte. So, 66 - walk->data - 1 will be a virtual address in the mapped page. */ 67 - crypto_kunmap(walk->data - 1, out); 68 - } 55 + EXPORT_SYMBOL_GPL(scatterwalk_map); 69 56 70 57 static void scatterwalk_pagedone(struct scatter_walk *walk, int out, 71 58 unsigned int more) 72 59 { 73 60 if (out) 74 - flush_dcache_page(walk->page); 61 + flush_dcache_page(scatterwalk_page(walk)); 75 62 76 63 if (more) { 77 - walk->len_this_segment -= walk->len_this_page; 78 - 79 - if (walk->len_this_segment) { 80 - walk->page++; 81 - walk->len_this_page = min(walk->len_this_segment, 82 - (unsigned)PAGE_CACHE_SIZE); 83 - walk->offset = 0; 84 - } 85 - else 64 + walk->offset += PAGE_SIZE - 1; 65 + walk->offset &= PAGE_MASK; 66 + if (walk->offset >= walk->sg->offset + walk->sg->length) 86 67 scatterwalk_start(walk, sg_next(walk->sg)); 87 68 } 88 69 } 89 70 90 71 void scatterwalk_done(struct scatter_walk *walk, int out, int more) 91 72 { 92 - scatterwalk_unmap(walk, out); 93 - if (walk->len_this_page == 0 || !more) 73 + if (!offset_in_page(walk->offset) || !more) 94 74 scatterwalk_pagedone(walk, out, more); 95 75 } 76 + EXPORT_SYMBOL_GPL(scatterwalk_done); 96 77 97 - /* 98 - * Do not call this unless the total length of all of the fragments 99 - * has been verified as multiple of the block size. 100 - */ 101 - int scatterwalk_copychunks(void *buf, struct scatter_walk *walk, 102 - size_t nbytes, int out) 78 + void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, 79 + size_t nbytes, int out) 103 80 { 104 - while (nbytes > walk->len_this_page) { 105 - memcpy_dir(buf, walk->data, walk->len_this_page, out); 106 - buf += walk->len_this_page; 107 - nbytes -= walk->len_this_page; 81 + for (;;) { 82 + unsigned int len_this_page = scatterwalk_pagelen(walk); 83 + u8 *vaddr; 108 84 109 - scatterwalk_unmap(walk, out); 85 + if (len_this_page > nbytes) 86 + len_this_page = nbytes; 87 + 88 + vaddr = scatterwalk_map(walk, out); 89 + memcpy_dir(buf, vaddr, len_this_page, out); 90 + scatterwalk_unmap(vaddr, out); 91 + 92 + if (nbytes == len_this_page) 93 + break; 94 + 95 + buf += len_this_page; 96 + nbytes -= len_this_page; 97 + 110 98 scatterwalk_pagedone(walk, out, 1); 111 - scatterwalk_map(walk, out); 112 99 } 113 100 114 - memcpy_dir(buf, walk->data, nbytes, out); 115 - return nbytes; 101 + scatterwalk_advance(walk, nbytes); 116 102 } 103 + EXPORT_SYMBOL_GPL(scatterwalk_copychunks);
+30 -22
crypto/scatterwalk.h
··· 14 14 15 15 #ifndef _CRYPTO_SCATTERWALK_H 16 16 #define _CRYPTO_SCATTERWALK_H 17 + 17 18 #include <linux/mm.h> 18 - #include <asm/scatterlist.h> 19 + #include <linux/scatterlist.h> 19 20 20 - struct scatter_walk { 21 - struct scatterlist *sg; 22 - struct page *page; 23 - void *data; 24 - unsigned int len_this_page; 25 - unsigned int len_this_segment; 26 - unsigned int offset; 27 - }; 21 + #include "internal.h" 28 22 29 - /* Define sg_next is an inline routine now in case we want to change 30 - scatterlist to a linked list later. */ 31 23 static inline struct scatterlist *sg_next(struct scatterlist *sg) 32 24 { 33 - return sg + 1; 25 + return (++sg)->length ? sg : (void *)sg->page; 34 26 } 35 27 36 - static inline int scatterwalk_samebuf(struct scatter_walk *walk_in, 37 - struct scatter_walk *walk_out) 28 + static inline unsigned long scatterwalk_samebuf(struct scatter_walk *walk_in, 29 + struct scatter_walk *walk_out) 38 30 { 39 - return walk_in->page == walk_out->page && 40 - walk_in->offset == walk_out->offset; 31 + return !(((walk_in->sg->page - walk_out->sg->page) << PAGE_SHIFT) + 32 + (int)(walk_in->offset - walk_out->offset)); 33 + } 34 + 35 + static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk) 36 + { 37 + unsigned int len = walk->sg->offset + walk->sg->length - walk->offset; 38 + unsigned int len_this_page = offset_in_page(~walk->offset) + 1; 39 + return len_this_page > len ? len : len_this_page; 41 40 } 42 41 43 42 static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk, 44 43 unsigned int nbytes) 45 44 { 46 - return nbytes > walk->len_this_page ? walk->len_this_page : nbytes; 45 + unsigned int len_this_page = scatterwalk_pagelen(walk); 46 + return nbytes > len_this_page ? len_this_page : nbytes; 47 47 } 48 48 49 49 static inline void scatterwalk_advance(struct scatter_walk *walk, 50 50 unsigned int nbytes) 51 51 { 52 - walk->data += nbytes; 53 52 walk->offset += nbytes; 54 - walk->len_this_page -= nbytes; 55 - walk->len_this_segment -= nbytes; 56 53 } 57 54 58 55 static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk, ··· 58 61 return !(walk->offset & alignmask); 59 62 } 60 63 64 + static inline struct page *scatterwalk_page(struct scatter_walk *walk) 65 + { 66 + return walk->sg->page + (walk->offset >> PAGE_SHIFT); 67 + } 68 + 69 + static inline void scatterwalk_unmap(void *vaddr, int out) 70 + { 71 + crypto_kunmap(vaddr, out); 72 + } 73 + 61 74 void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg); 62 - int scatterwalk_copychunks(void *buf, struct scatter_walk *walk, size_t nbytes, int out); 63 - void scatterwalk_map(struct scatter_walk *walk, int out); 75 + void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, 76 + size_t nbytes, int out); 77 + void *scatterwalk_map(struct scatter_walk *walk, int out); 64 78 void scatterwalk_done(struct scatter_walk *walk, int out, int more); 65 79 66 80 #endif /* _CRYPTO_SCATTERWALK_H */
+3 -16
crypto/serpent.c
··· 216 216 217 217 218 218 static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, 219 - unsigned int keylen, u32 *flags) 219 + unsigned int keylen) 220 220 { 221 221 struct serpent_ctx *ctx = crypto_tfm_ctx(tfm); 222 222 u32 *k = ctx->expkey; 223 223 u8 *k8 = (u8 *)k; 224 224 u32 r0,r1,r2,r3,r4; 225 225 int i; 226 - 227 - if ((keylen < SERPENT_MIN_KEY_SIZE) 228 - || (keylen > SERPENT_MAX_KEY_SIZE)) 229 - { 230 - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 231 - return -EINVAL; 232 - } 233 226 234 227 /* Copy key, add padding */ 235 228 ··· 490 497 }; 491 498 492 499 static int tnepres_setkey(struct crypto_tfm *tfm, const u8 *key, 493 - unsigned int keylen, u32 *flags) 500 + unsigned int keylen) 494 501 { 495 502 u8 rev_key[SERPENT_MAX_KEY_SIZE]; 496 503 int i; 497 504 498 - if ((keylen < SERPENT_MIN_KEY_SIZE) 499 - || (keylen > SERPENT_MAX_KEY_SIZE)) { 500 - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 501 - return -EINVAL; 502 - } 503 - 504 505 for (i = 0; i < keylen; ++i) 505 506 rev_key[keylen - i - 1] = key[i]; 506 507 507 - return serpent_setkey(tfm, rev_key, keylen, flags); 508 + return serpent_setkey(tfm, rev_key, keylen); 508 509 } 509 510 510 511 static void tnepres_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+3
crypto/sha1.c
··· 109 109 110 110 static struct crypto_alg alg = { 111 111 .cra_name = "sha1", 112 + .cra_driver_name= "sha1-generic", 112 113 .cra_flags = CRYPTO_ALG_TYPE_DIGEST, 113 114 .cra_blocksize = SHA1_HMAC_BLOCK_SIZE, 114 115 .cra_ctxsize = sizeof(struct sha1_ctx), ··· 138 137 139 138 MODULE_LICENSE("GPL"); 140 139 MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm"); 140 + 141 + MODULE_ALIAS("sha1-generic");
+3
crypto/sha256.c
··· 309 309 310 310 static struct crypto_alg alg = { 311 311 .cra_name = "sha256", 312 + .cra_driver_name= "sha256-generic", 312 313 .cra_flags = CRYPTO_ALG_TYPE_DIGEST, 313 314 .cra_blocksize = SHA256_HMAC_BLOCK_SIZE, 314 315 .cra_ctxsize = sizeof(struct sha256_ctx), ··· 338 337 339 338 MODULE_LICENSE("GPL"); 340 339 MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm"); 340 + 341 + MODULE_ALIAS("sha256-generic");
+484 -441
crypto/tcrypt.c
··· 17 17 * 18 18 */ 19 19 20 + #include <linux/err.h> 20 21 #include <linux/init.h> 21 22 #include <linux/module.h> 22 23 #include <linux/mm.h> ··· 55 54 */ 56 55 #define ENCRYPT 1 57 56 #define DECRYPT 0 58 - #define MODE_ECB 1 59 - #define MODE_CBC 0 60 57 61 58 static unsigned int IDX[8] = { IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 }; 62 59 ··· 88 89 unsigned int i, j, k, temp; 89 90 struct scatterlist sg[8]; 90 91 char result[64]; 91 - struct crypto_tfm *tfm; 92 + struct crypto_hash *tfm; 93 + struct hash_desc desc; 92 94 struct hash_testvec *hash_tv; 93 95 unsigned int tsize; 96 + int ret; 94 97 95 98 printk("\ntesting %s\n", algo); 96 99 ··· 106 105 107 106 memcpy(tvmem, template, tsize); 108 107 hash_tv = (void *)tvmem; 109 - tfm = crypto_alloc_tfm(algo, 0); 110 - if (tfm == NULL) { 111 - printk("failed to load transform for %s\n", algo); 108 + 109 + tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC); 110 + if (IS_ERR(tfm)) { 111 + printk("failed to load transform for %s: %ld\n", algo, 112 + PTR_ERR(tfm)); 112 113 return; 113 114 } 115 + 116 + desc.tfm = tfm; 117 + desc.flags = 0; 114 118 115 119 for (i = 0; i < tcount; i++) { 116 120 printk("test %u:\n", i + 1); ··· 123 117 124 118 sg_set_buf(&sg[0], hash_tv[i].plaintext, hash_tv[i].psize); 125 119 126 - crypto_digest_init(tfm); 127 - if (tfm->crt_u.digest.dit_setkey) { 128 - crypto_digest_setkey(tfm, hash_tv[i].key, 129 - hash_tv[i].ksize); 120 + if (hash_tv[i].ksize) { 121 + ret = crypto_hash_setkey(tfm, hash_tv[i].key, 122 + hash_tv[i].ksize); 123 + if (ret) { 124 + printk("setkey() failed ret=%d\n", ret); 125 + goto out; 126 + } 130 127 } 131 - crypto_digest_update(tfm, sg, 1); 132 - crypto_digest_final(tfm, result); 133 128 134 - hexdump(result, crypto_tfm_alg_digestsize(tfm)); 129 + ret = crypto_hash_digest(&desc, sg, hash_tv[i].psize, result); 130 + if (ret) { 131 + printk("digest () failed ret=%d\n", ret); 132 + goto out; 133 + } 134 + 135 + hexdump(result, crypto_hash_digestsize(tfm)); 135 136 printk("%s\n", 136 137 memcmp(result, hash_tv[i].digest, 137 - crypto_tfm_alg_digestsize(tfm)) ? 138 + crypto_hash_digestsize(tfm)) ? 138 139 "fail" : "pass"); 139 140 } 140 141 ··· 167 154 hash_tv[i].tap[k]); 168 155 } 169 156 170 - crypto_digest_digest(tfm, sg, hash_tv[i].np, result); 157 + if (hash_tv[i].ksize) { 158 + ret = crypto_hash_setkey(tfm, hash_tv[i].key, 159 + hash_tv[i].ksize); 171 160 172 - hexdump(result, crypto_tfm_alg_digestsize(tfm)); 173 - printk("%s\n", 174 - memcmp(result, hash_tv[i].digest, 175 - crypto_tfm_alg_digestsize(tfm)) ? 176 - "fail" : "pass"); 177 - } 178 - } 179 - 180 - crypto_free_tfm(tfm); 181 - } 182 - 183 - 184 - #ifdef CONFIG_CRYPTO_HMAC 185 - 186 - static void test_hmac(char *algo, struct hmac_testvec *template, 187 - unsigned int tcount) 188 - { 189 - unsigned int i, j, k, temp; 190 - struct scatterlist sg[8]; 191 - char result[64]; 192 - struct crypto_tfm *tfm; 193 - struct hmac_testvec *hmac_tv; 194 - unsigned int tsize, klen; 195 - 196 - tfm = crypto_alloc_tfm(algo, 0); 197 - if (tfm == NULL) { 198 - printk("failed to load transform for %s\n", algo); 199 - return; 200 - } 201 - 202 - printk("\ntesting hmac_%s\n", algo); 203 - 204 - tsize = sizeof(struct hmac_testvec); 205 - tsize *= tcount; 206 - if (tsize > TVMEMSIZE) { 207 - printk("template (%u) too big for tvmem (%u)\n", tsize, 208 - TVMEMSIZE); 209 - goto out; 210 - } 211 - 212 - memcpy(tvmem, template, tsize); 213 - hmac_tv = (void *)tvmem; 214 - 215 - for (i = 0; i < tcount; i++) { 216 - printk("test %u:\n", i + 1); 217 - memset(result, 0, sizeof (result)); 218 - 219 - klen = hmac_tv[i].ksize; 220 - sg_set_buf(&sg[0], hmac_tv[i].plaintext, hmac_tv[i].psize); 221 - 222 - crypto_hmac(tfm, hmac_tv[i].key, &klen, sg, 1, result); 223 - 224 - hexdump(result, crypto_tfm_alg_digestsize(tfm)); 225 - printk("%s\n", 226 - memcmp(result, hmac_tv[i].digest, 227 - crypto_tfm_alg_digestsize(tfm)) ? "fail" : 228 - "pass"); 229 - } 230 - 231 - printk("\ntesting hmac_%s across pages\n", algo); 232 - 233 - memset(xbuf, 0, XBUFSIZE); 234 - 235 - j = 0; 236 - for (i = 0; i < tcount; i++) { 237 - if (hmac_tv[i].np) { 238 - j++; 239 - printk("test %u:\n",j); 240 - memset(result, 0, 64); 241 - 242 - temp = 0; 243 - klen = hmac_tv[i].ksize; 244 - for (k = 0; k < hmac_tv[i].np; k++) { 245 - memcpy(&xbuf[IDX[k]], 246 - hmac_tv[i].plaintext + temp, 247 - hmac_tv[i].tap[k]); 248 - temp += hmac_tv[i].tap[k]; 249 - sg_set_buf(&sg[k], &xbuf[IDX[k]], 250 - hmac_tv[i].tap[k]); 161 + if (ret) { 162 + printk("setkey() failed ret=%d\n", ret); 163 + goto out; 164 + } 251 165 } 252 166 253 - crypto_hmac(tfm, hmac_tv[i].key, &klen, sg, 254 - hmac_tv[i].np, result); 255 - hexdump(result, crypto_tfm_alg_digestsize(tfm)); 167 + ret = crypto_hash_digest(&desc, sg, hash_tv[i].psize, 168 + result); 169 + if (ret) { 170 + printk("digest () failed ret=%d\n", ret); 171 + goto out; 172 + } 256 173 174 + hexdump(result, crypto_hash_digestsize(tfm)); 257 175 printk("%s\n", 258 - memcmp(result, hmac_tv[i].digest, 259 - crypto_tfm_alg_digestsize(tfm)) ? 176 + memcmp(result, hash_tv[i].digest, 177 + crypto_hash_digestsize(tfm)) ? 260 178 "fail" : "pass"); 261 179 } 262 180 } 181 + 263 182 out: 264 - crypto_free_tfm(tfm); 183 + crypto_free_hash(tfm); 265 184 } 266 185 267 - #endif /* CONFIG_CRYPTO_HMAC */ 268 - 269 - static void test_cipher(char *algo, int mode, int enc, 186 + static void test_cipher(char *algo, int enc, 270 187 struct cipher_testvec *template, unsigned int tcount) 271 188 { 272 189 unsigned int ret, i, j, k, temp; 273 190 unsigned int tsize; 191 + unsigned int iv_len; 192 + unsigned int len; 274 193 char *q; 275 - struct crypto_tfm *tfm; 194 + struct crypto_blkcipher *tfm; 276 195 char *key; 277 196 struct cipher_testvec *cipher_tv; 197 + struct blkcipher_desc desc; 278 198 struct scatterlist sg[8]; 279 - const char *e, *m; 199 + const char *e; 280 200 281 201 if (enc == ENCRYPT) 282 202 e = "encryption"; 283 203 else 284 204 e = "decryption"; 285 - if (mode == MODE_ECB) 286 - m = "ECB"; 287 - else 288 - m = "CBC"; 289 205 290 - printk("\ntesting %s %s %s\n", algo, m, e); 206 + printk("\ntesting %s %s\n", algo, e); 291 207 292 208 tsize = sizeof (struct cipher_testvec); 293 209 tsize *= tcount; ··· 230 288 memcpy(tvmem, template, tsize); 231 289 cipher_tv = (void *)tvmem; 232 290 233 - if (mode) 234 - tfm = crypto_alloc_tfm(algo, 0); 235 - else 236 - tfm = crypto_alloc_tfm(algo, CRYPTO_TFM_MODE_CBC); 291 + tfm = crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC); 237 292 238 - if (tfm == NULL) { 239 - printk("failed to load transform for %s %s\n", algo, m); 293 + if (IS_ERR(tfm)) { 294 + printk("failed to load transform for %s: %ld\n", algo, 295 + PTR_ERR(tfm)); 240 296 return; 241 297 } 298 + desc.tfm = tfm; 299 + desc.flags = 0; 242 300 243 301 j = 0; 244 302 for (i = 0; i < tcount; i++) { ··· 247 305 printk("test %u (%d bit key):\n", 248 306 j, cipher_tv[i].klen * 8); 249 307 250 - tfm->crt_flags = 0; 308 + crypto_blkcipher_clear_flags(tfm, ~0); 251 309 if (cipher_tv[i].wk) 252 - tfm->crt_flags |= CRYPTO_TFM_REQ_WEAK_KEY; 310 + crypto_blkcipher_set_flags( 311 + tfm, CRYPTO_TFM_REQ_WEAK_KEY); 253 312 key = cipher_tv[i].key; 254 313 255 - ret = crypto_cipher_setkey(tfm, key, cipher_tv[i].klen); 314 + ret = crypto_blkcipher_setkey(tfm, key, 315 + cipher_tv[i].klen); 256 316 if (ret) { 257 - printk("setkey() failed flags=%x\n", tfm->crt_flags); 317 + printk("setkey() failed flags=%x\n", 318 + crypto_blkcipher_get_flags(tfm)); 258 319 259 320 if (!cipher_tv[i].fail) 260 321 goto out; ··· 266 321 sg_set_buf(&sg[0], cipher_tv[i].input, 267 322 cipher_tv[i].ilen); 268 323 269 - if (!mode) { 270 - crypto_cipher_set_iv(tfm, cipher_tv[i].iv, 271 - crypto_tfm_alg_ivsize(tfm)); 272 - } 324 + iv_len = crypto_blkcipher_ivsize(tfm); 325 + if (iv_len) 326 + crypto_blkcipher_set_iv(tfm, cipher_tv[i].iv, 327 + iv_len); 273 328 274 - if (enc) 275 - ret = crypto_cipher_encrypt(tfm, sg, sg, cipher_tv[i].ilen); 276 - else 277 - ret = crypto_cipher_decrypt(tfm, sg, sg, cipher_tv[i].ilen); 278 - 329 + len = cipher_tv[i].ilen; 330 + ret = enc ? 331 + crypto_blkcipher_encrypt(&desc, sg, sg, len) : 332 + crypto_blkcipher_decrypt(&desc, sg, sg, len); 279 333 280 334 if (ret) { 281 - printk("%s () failed flags=%x\n", e, tfm->crt_flags); 335 + printk("%s () failed flags=%x\n", e, 336 + desc.flags); 282 337 goto out; 283 338 } 284 339 ··· 291 346 } 292 347 } 293 348 294 - printk("\ntesting %s %s %s across pages (chunking)\n", algo, m, e); 349 + printk("\ntesting %s %s across pages (chunking)\n", algo, e); 295 350 memset(xbuf, 0, XBUFSIZE); 296 351 297 352 j = 0; ··· 301 356 printk("test %u (%d bit key):\n", 302 357 j, cipher_tv[i].klen * 8); 303 358 304 - tfm->crt_flags = 0; 359 + crypto_blkcipher_clear_flags(tfm, ~0); 305 360 if (cipher_tv[i].wk) 306 - tfm->crt_flags |= CRYPTO_TFM_REQ_WEAK_KEY; 361 + crypto_blkcipher_set_flags( 362 + tfm, CRYPTO_TFM_REQ_WEAK_KEY); 307 363 key = cipher_tv[i].key; 308 364 309 - ret = crypto_cipher_setkey(tfm, key, cipher_tv[i].klen); 365 + ret = crypto_blkcipher_setkey(tfm, key, 366 + cipher_tv[i].klen); 310 367 if (ret) { 311 - printk("setkey() failed flags=%x\n", tfm->crt_flags); 368 + printk("setkey() failed flags=%x\n", 369 + crypto_blkcipher_get_flags(tfm)); 312 370 313 371 if (!cipher_tv[i].fail) 314 372 goto out; ··· 327 379 cipher_tv[i].tap[k]); 328 380 } 329 381 330 - if (!mode) { 331 - crypto_cipher_set_iv(tfm, cipher_tv[i].iv, 332 - crypto_tfm_alg_ivsize(tfm)); 333 - } 382 + iv_len = crypto_blkcipher_ivsize(tfm); 383 + if (iv_len) 384 + crypto_blkcipher_set_iv(tfm, cipher_tv[i].iv, 385 + iv_len); 334 386 335 - if (enc) 336 - ret = crypto_cipher_encrypt(tfm, sg, sg, cipher_tv[i].ilen); 337 - else 338 - ret = crypto_cipher_decrypt(tfm, sg, sg, cipher_tv[i].ilen); 387 + len = cipher_tv[i].ilen; 388 + ret = enc ? 389 + crypto_blkcipher_encrypt(&desc, sg, sg, len) : 390 + crypto_blkcipher_decrypt(&desc, sg, sg, len); 339 391 340 392 if (ret) { 341 - printk("%s () failed flags=%x\n", e, tfm->crt_flags); 393 + printk("%s () failed flags=%x\n", e, 394 + desc.flags); 342 395 goto out; 343 396 } 344 397 ··· 358 409 } 359 410 360 411 out: 361 - crypto_free_tfm(tfm); 412 + crypto_free_blkcipher(tfm); 362 413 } 363 414 364 - static int test_cipher_jiffies(struct crypto_tfm *tfm, int enc, char *p, 415 + static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc, char *p, 365 416 int blen, int sec) 366 417 { 367 418 struct scatterlist sg[1]; ··· 374 425 for (start = jiffies, end = start + sec * HZ, bcount = 0; 375 426 time_before(jiffies, end); bcount++) { 376 427 if (enc) 377 - ret = crypto_cipher_encrypt(tfm, sg, sg, blen); 428 + ret = crypto_blkcipher_encrypt(desc, sg, sg, blen); 378 429 else 379 - ret = crypto_cipher_decrypt(tfm, sg, sg, blen); 430 + ret = crypto_blkcipher_decrypt(desc, sg, sg, blen); 380 431 381 432 if (ret) 382 433 return ret; ··· 387 438 return 0; 388 439 } 389 440 390 - static int test_cipher_cycles(struct crypto_tfm *tfm, int enc, char *p, 441 + static int test_cipher_cycles(struct blkcipher_desc *desc, int enc, char *p, 391 442 int blen) 392 443 { 393 444 struct scatterlist sg[1]; ··· 403 454 /* Warm-up run. */ 404 455 for (i = 0; i < 4; i++) { 405 456 if (enc) 406 - ret = crypto_cipher_encrypt(tfm, sg, sg, blen); 457 + ret = crypto_blkcipher_encrypt(desc, sg, sg, blen); 407 458 else 408 - ret = crypto_cipher_decrypt(tfm, sg, sg, blen); 459 + ret = crypto_blkcipher_decrypt(desc, sg, sg, blen); 409 460 410 461 if (ret) 411 462 goto out; ··· 417 468 418 469 start = get_cycles(); 419 470 if (enc) 420 - ret = crypto_cipher_encrypt(tfm, sg, sg, blen); 471 + ret = crypto_blkcipher_encrypt(desc, sg, sg, blen); 421 472 else 422 - ret = crypto_cipher_decrypt(tfm, sg, sg, blen); 473 + ret = crypto_blkcipher_decrypt(desc, sg, sg, blen); 423 474 end = get_cycles(); 424 475 425 476 if (ret) ··· 439 490 return ret; 440 491 } 441 492 442 - static void test_cipher_speed(char *algo, int mode, int enc, unsigned int sec, 493 + static void test_cipher_speed(char *algo, int enc, unsigned int sec, 443 494 struct cipher_testvec *template, 444 495 unsigned int tcount, struct cipher_speed *speed) 445 496 { 446 497 unsigned int ret, i, j, iv_len; 447 498 unsigned char *key, *p, iv[128]; 448 - struct crypto_tfm *tfm; 449 - const char *e, *m; 499 + struct crypto_blkcipher *tfm; 500 + struct blkcipher_desc desc; 501 + const char *e; 450 502 451 503 if (enc == ENCRYPT) 452 504 e = "encryption"; 453 505 else 454 506 e = "decryption"; 455 - if (mode == MODE_ECB) 456 - m = "ECB"; 457 - else 458 - m = "CBC"; 459 507 460 - printk("\ntesting speed of %s %s %s\n", algo, m, e); 508 + printk("\ntesting speed of %s %s\n", algo, e); 461 509 462 - if (mode) 463 - tfm = crypto_alloc_tfm(algo, 0); 464 - else 465 - tfm = crypto_alloc_tfm(algo, CRYPTO_TFM_MODE_CBC); 510 + tfm = crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC); 466 511 467 - if (tfm == NULL) { 468 - printk("failed to load transform for %s %s\n", algo, m); 512 + if (IS_ERR(tfm)) { 513 + printk("failed to load transform for %s: %ld\n", algo, 514 + PTR_ERR(tfm)); 469 515 return; 470 516 } 517 + desc.tfm = tfm; 518 + desc.flags = 0; 471 519 472 520 for (i = 0; speed[i].klen != 0; i++) { 473 521 if ((speed[i].blen + speed[i].klen) > TVMEMSIZE) { ··· 488 542 } 489 543 p = (unsigned char *)tvmem + speed[i].klen; 490 544 491 - ret = crypto_cipher_setkey(tfm, key, speed[i].klen); 545 + ret = crypto_blkcipher_setkey(tfm, key, speed[i].klen); 492 546 if (ret) { 493 - printk("setkey() failed flags=%x\n", tfm->crt_flags); 547 + printk("setkey() failed flags=%x\n", 548 + crypto_blkcipher_get_flags(tfm)); 494 549 goto out; 495 550 } 496 551 497 - if (!mode) { 498 - iv_len = crypto_tfm_alg_ivsize(tfm); 552 + iv_len = crypto_blkcipher_ivsize(tfm); 553 + if (iv_len) { 499 554 memset(&iv, 0xff, iv_len); 500 - crypto_cipher_set_iv(tfm, iv, iv_len); 555 + crypto_blkcipher_set_iv(tfm, iv, iv_len); 501 556 } 502 557 503 558 if (sec) 504 - ret = test_cipher_jiffies(tfm, enc, p, speed[i].blen, 559 + ret = test_cipher_jiffies(&desc, enc, p, speed[i].blen, 505 560 sec); 506 561 else 507 - ret = test_cipher_cycles(tfm, enc, p, speed[i].blen); 562 + ret = test_cipher_cycles(&desc, enc, p, speed[i].blen); 508 563 509 564 if (ret) { 510 - printk("%s() failed flags=%x\n", e, tfm->crt_flags); 565 + printk("%s() failed flags=%x\n", e, desc.flags); 511 566 break; 512 567 } 513 568 } 514 569 515 570 out: 516 - crypto_free_tfm(tfm); 571 + crypto_free_blkcipher(tfm); 517 572 } 518 573 519 - static void test_digest_jiffies(struct crypto_tfm *tfm, char *p, int blen, 520 - int plen, char *out, int sec) 574 + static int test_hash_jiffies_digest(struct hash_desc *desc, char *p, int blen, 575 + char *out, int sec) 521 576 { 522 577 struct scatterlist sg[1]; 523 578 unsigned long start, end; 524 - int bcount, pcount; 579 + int bcount; 580 + int ret; 525 581 526 582 for (start = jiffies, end = start + sec * HZ, bcount = 0; 527 583 time_before(jiffies, end); bcount++) { 528 - crypto_digest_init(tfm); 529 - for (pcount = 0; pcount < blen; pcount += plen) { 530 - sg_set_buf(sg, p + pcount, plen); 531 - crypto_digest_update(tfm, sg, 1); 532 - } 533 - /* we assume there is enough space in 'out' for the result */ 534 - crypto_digest_final(tfm, out); 584 + sg_set_buf(sg, p, blen); 585 + ret = crypto_hash_digest(desc, sg, blen, out); 586 + if (ret) 587 + return ret; 535 588 } 536 589 537 590 printk("%6u opers/sec, %9lu bytes/sec\n", 538 591 bcount / sec, ((long)bcount * blen) / sec); 539 592 540 - return; 593 + return 0; 541 594 } 542 595 543 - static void test_digest_cycles(struct crypto_tfm *tfm, char *p, int blen, 544 - int plen, char *out) 596 + static int test_hash_jiffies(struct hash_desc *desc, char *p, int blen, 597 + int plen, char *out, int sec) 598 + { 599 + struct scatterlist sg[1]; 600 + unsigned long start, end; 601 + int bcount, pcount; 602 + int ret; 603 + 604 + if (plen == blen) 605 + return test_hash_jiffies_digest(desc, p, blen, out, sec); 606 + 607 + for (start = jiffies, end = start + sec * HZ, bcount = 0; 608 + time_before(jiffies, end); bcount++) { 609 + ret = crypto_hash_init(desc); 610 + if (ret) 611 + return ret; 612 + for (pcount = 0; pcount < blen; pcount += plen) { 613 + sg_set_buf(sg, p + pcount, plen); 614 + ret = crypto_hash_update(desc, sg, plen); 615 + if (ret) 616 + return ret; 617 + } 618 + /* we assume there is enough space in 'out' for the result */ 619 + ret = crypto_hash_final(desc, out); 620 + if (ret) 621 + return ret; 622 + } 623 + 624 + printk("%6u opers/sec, %9lu bytes/sec\n", 625 + bcount / sec, ((long)bcount * blen) / sec); 626 + 627 + return 0; 628 + } 629 + 630 + static int test_hash_cycles_digest(struct hash_desc *desc, char *p, int blen, 631 + char *out) 545 632 { 546 633 struct scatterlist sg[1]; 547 634 unsigned long cycles = 0; 548 - int i, pcount; 635 + int i; 636 + int ret; 549 637 550 638 local_bh_disable(); 551 639 local_irq_disable(); 552 640 553 641 /* Warm-up run. */ 554 642 for (i = 0; i < 4; i++) { 555 - crypto_digest_init(tfm); 556 - for (pcount = 0; pcount < blen; pcount += plen) { 557 - sg_set_buf(sg, p + pcount, plen); 558 - crypto_digest_update(tfm, sg, 1); 559 - } 560 - crypto_digest_final(tfm, out); 643 + sg_set_buf(sg, p, blen); 644 + ret = crypto_hash_digest(desc, sg, blen, out); 645 + if (ret) 646 + goto out; 561 647 } 562 648 563 649 /* The real thing. */ 564 650 for (i = 0; i < 8; i++) { 565 651 cycles_t start, end; 566 652 567 - crypto_digest_init(tfm); 568 - 569 653 start = get_cycles(); 570 654 571 - for (pcount = 0; pcount < blen; pcount += plen) { 572 - sg_set_buf(sg, p + pcount, plen); 573 - crypto_digest_update(tfm, sg, 1); 574 - } 575 - crypto_digest_final(tfm, out); 655 + sg_set_buf(sg, p, blen); 656 + ret = crypto_hash_digest(desc, sg, blen, out); 657 + if (ret) 658 + goto out; 576 659 577 660 end = get_cycles(); 578 661 579 662 cycles += end - start; 580 663 } 581 664 665 + out: 582 666 local_irq_enable(); 583 667 local_bh_enable(); 668 + 669 + if (ret) 670 + return ret; 584 671 585 672 printk("%6lu cycles/operation, %4lu cycles/byte\n", 586 673 cycles / 8, cycles / (8 * blen)); 587 674 588 - return; 675 + return 0; 589 676 } 590 677 591 - static void test_digest_speed(char *algo, unsigned int sec, 592 - struct digest_speed *speed) 678 + static int test_hash_cycles(struct hash_desc *desc, char *p, int blen, 679 + int plen, char *out) 593 680 { 594 - struct crypto_tfm *tfm; 681 + struct scatterlist sg[1]; 682 + unsigned long cycles = 0; 683 + int i, pcount; 684 + int ret; 685 + 686 + if (plen == blen) 687 + return test_hash_cycles_digest(desc, p, blen, out); 688 + 689 + local_bh_disable(); 690 + local_irq_disable(); 691 + 692 + /* Warm-up run. */ 693 + for (i = 0; i < 4; i++) { 694 + ret = crypto_hash_init(desc); 695 + if (ret) 696 + goto out; 697 + for (pcount = 0; pcount < blen; pcount += plen) { 698 + sg_set_buf(sg, p + pcount, plen); 699 + ret = crypto_hash_update(desc, sg, plen); 700 + if (ret) 701 + goto out; 702 + } 703 + crypto_hash_final(desc, out); 704 + if (ret) 705 + goto out; 706 + } 707 + 708 + /* The real thing. */ 709 + for (i = 0; i < 8; i++) { 710 + cycles_t start, end; 711 + 712 + start = get_cycles(); 713 + 714 + ret = crypto_hash_init(desc); 715 + if (ret) 716 + goto out; 717 + for (pcount = 0; pcount < blen; pcount += plen) { 718 + sg_set_buf(sg, p + pcount, plen); 719 + ret = crypto_hash_update(desc, sg, plen); 720 + if (ret) 721 + goto out; 722 + } 723 + ret = crypto_hash_final(desc, out); 724 + if (ret) 725 + goto out; 726 + 727 + end = get_cycles(); 728 + 729 + cycles += end - start; 730 + } 731 + 732 + out: 733 + local_irq_enable(); 734 + local_bh_enable(); 735 + 736 + if (ret) 737 + return ret; 738 + 739 + printk("%6lu cycles/operation, %4lu cycles/byte\n", 740 + cycles / 8, cycles / (8 * blen)); 741 + 742 + return 0; 743 + } 744 + 745 + static void test_hash_speed(char *algo, unsigned int sec, 746 + struct hash_speed *speed) 747 + { 748 + struct crypto_hash *tfm; 749 + struct hash_desc desc; 595 750 char output[1024]; 596 751 int i; 752 + int ret; 597 753 598 754 printk("\ntesting speed of %s\n", algo); 599 755 600 - tfm = crypto_alloc_tfm(algo, 0); 756 + tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC); 601 757 602 - if (tfm == NULL) { 603 - printk("failed to load transform for %s\n", algo); 758 + if (IS_ERR(tfm)) { 759 + printk("failed to load transform for %s: %ld\n", algo, 760 + PTR_ERR(tfm)); 604 761 return; 605 762 } 606 763 607 - if (crypto_tfm_alg_digestsize(tfm) > sizeof(output)) { 764 + desc.tfm = tfm; 765 + desc.flags = 0; 766 + 767 + if (crypto_hash_digestsize(tfm) > sizeof(output)) { 608 768 printk("digestsize(%u) > outputbuffer(%zu)\n", 609 - crypto_tfm_alg_digestsize(tfm), sizeof(output)); 769 + crypto_hash_digestsize(tfm), sizeof(output)); 610 770 goto out; 611 771 } 612 772 ··· 729 677 memset(tvmem, 0xff, speed[i].blen); 730 678 731 679 if (sec) 732 - test_digest_jiffies(tfm, tvmem, speed[i].blen, speed[i].plen, output, sec); 680 + ret = test_hash_jiffies(&desc, tvmem, speed[i].blen, 681 + speed[i].plen, output, sec); 733 682 else 734 - test_digest_cycles(tfm, tvmem, speed[i].blen, speed[i].plen, output); 683 + ret = test_hash_cycles(&desc, tvmem, speed[i].blen, 684 + speed[i].plen, output); 685 + 686 + if (ret) { 687 + printk("hashing failed ret=%d\n", ret); 688 + break; 689 + } 735 690 } 736 691 737 692 out: 738 - crypto_free_tfm(tfm); 693 + crypto_free_hash(tfm); 739 694 } 740 695 741 696 static void test_deflate(void) 742 697 { 743 698 unsigned int i; 744 699 char result[COMP_BUF_SIZE]; 745 - struct crypto_tfm *tfm; 700 + struct crypto_comp *tfm; 746 701 struct comp_testvec *tv; 747 702 unsigned int tsize; 748 703 ··· 821 762 ilen, dlen); 822 763 } 823 764 out: 824 - crypto_free_tfm(tfm); 825 - } 826 - 827 - static void test_crc32c(void) 828 - { 829 - #define NUMVEC 6 830 - #define VECSIZE 40 831 - 832 - int i, j, pass; 833 - u32 crc; 834 - u8 b, test_vec[NUMVEC][VECSIZE]; 835 - static u32 vec_results[NUMVEC] = { 836 - 0x0e2c157f, 0xe980ebf6, 0xde74bded, 837 - 0xd579c862, 0xba979ad0, 0x2b29d913 838 - }; 839 - static u32 tot_vec_results = 0x24c5d375; 840 - 841 - struct scatterlist sg[NUMVEC]; 842 - struct crypto_tfm *tfm; 843 - char *fmtdata = "testing crc32c initialized to %08x: %s\n"; 844 - #define SEEDTESTVAL 0xedcba987 845 - u32 seed; 846 - 847 - printk("\ntesting crc32c\n"); 848 - 849 - tfm = crypto_alloc_tfm("crc32c", 0); 850 - if (tfm == NULL) { 851 - printk("failed to load transform for crc32c\n"); 852 - return; 853 - } 854 - 855 - crypto_digest_init(tfm); 856 - crypto_digest_final(tfm, (u8*)&crc); 857 - printk(fmtdata, crc, (crc == 0) ? "pass" : "ERROR"); 858 - 859 - /* 860 - * stuff test_vec with known values, simple incrementing 861 - * byte values. 862 - */ 863 - b = 0; 864 - for (i = 0; i < NUMVEC; i++) { 865 - for (j = 0; j < VECSIZE; j++) 866 - test_vec[i][j] = ++b; 867 - sg_set_buf(&sg[i], test_vec[i], VECSIZE); 868 - } 869 - 870 - seed = SEEDTESTVAL; 871 - (void)crypto_digest_setkey(tfm, (const u8*)&seed, sizeof(u32)); 872 - crypto_digest_final(tfm, (u8*)&crc); 873 - printk("testing crc32c setkey returns %08x : %s\n", crc, (crc == (SEEDTESTVAL ^ ~(u32)0)) ? 874 - "pass" : "ERROR"); 875 - 876 - printk("testing crc32c using update/final:\n"); 877 - 878 - pass = 1; /* assume all is well */ 879 - 880 - for (i = 0; i < NUMVEC; i++) { 881 - seed = ~(u32)0; 882 - (void)crypto_digest_setkey(tfm, (const u8*)&seed, sizeof(u32)); 883 - crypto_digest_update(tfm, &sg[i], 1); 884 - crypto_digest_final(tfm, (u8*)&crc); 885 - if (crc == vec_results[i]) { 886 - printk(" %08x:OK", crc); 887 - } else { 888 - printk(" %08x:BAD, wanted %08x\n", crc, vec_results[i]); 889 - pass = 0; 890 - } 891 - } 892 - 893 - printk("\ntesting crc32c using incremental accumulator:\n"); 894 - crc = 0; 895 - for (i = 0; i < NUMVEC; i++) { 896 - seed = (crc ^ ~(u32)0); 897 - (void)crypto_digest_setkey(tfm, (const u8*)&seed, sizeof(u32)); 898 - crypto_digest_update(tfm, &sg[i], 1); 899 - crypto_digest_final(tfm, (u8*)&crc); 900 - } 901 - if (crc == tot_vec_results) { 902 - printk(" %08x:OK", crc); 903 - } else { 904 - printk(" %08x:BAD, wanted %08x\n", crc, tot_vec_results); 905 - pass = 0; 906 - } 907 - 908 - printk("\ntesting crc32c using digest:\n"); 909 - seed = ~(u32)0; 910 - (void)crypto_digest_setkey(tfm, (const u8*)&seed, sizeof(u32)); 911 - crypto_digest_digest(tfm, sg, NUMVEC, (u8*)&crc); 912 - if (crc == tot_vec_results) { 913 - printk(" %08x:OK", crc); 914 - } else { 915 - printk(" %08x:BAD, wanted %08x\n", crc, tot_vec_results); 916 - pass = 0; 917 - } 918 - 919 - printk("\n%s\n", pass ? "pass" : "ERROR"); 920 - 921 - crypto_free_tfm(tfm); 922 - printk("crc32c test complete\n"); 765 + crypto_free_comp(tfm); 923 766 } 924 767 925 768 static void test_available(void) ··· 830 869 831 870 while (*name) { 832 871 printk("alg %s ", *name); 833 - printk((crypto_alg_available(*name, 0)) ? 834 - "found\n" : "not found\n"); 872 + printk(crypto_has_alg(*name, 0, CRYPTO_ALG_ASYNC) ? 873 + "found\n" : "not found\n"); 835 874 name++; 836 875 } 837 876 } ··· 846 885 test_hash("sha1", sha1_tv_template, SHA1_TEST_VECTORS); 847 886 848 887 //DES 849 - test_cipher ("des", MODE_ECB, ENCRYPT, des_enc_tv_template, DES_ENC_TEST_VECTORS); 850 - test_cipher ("des", MODE_ECB, DECRYPT, des_dec_tv_template, DES_DEC_TEST_VECTORS); 851 - test_cipher ("des", MODE_CBC, ENCRYPT, des_cbc_enc_tv_template, DES_CBC_ENC_TEST_VECTORS); 852 - test_cipher ("des", MODE_CBC, DECRYPT, des_cbc_dec_tv_template, DES_CBC_DEC_TEST_VECTORS); 888 + test_cipher("ecb(des)", ENCRYPT, des_enc_tv_template, 889 + DES_ENC_TEST_VECTORS); 890 + test_cipher("ecb(des)", DECRYPT, des_dec_tv_template, 891 + DES_DEC_TEST_VECTORS); 892 + test_cipher("cbc(des)", ENCRYPT, des_cbc_enc_tv_template, 893 + DES_CBC_ENC_TEST_VECTORS); 894 + test_cipher("cbc(des)", DECRYPT, des_cbc_dec_tv_template, 895 + DES_CBC_DEC_TEST_VECTORS); 853 896 854 897 //DES3_EDE 855 - test_cipher ("des3_ede", MODE_ECB, ENCRYPT, des3_ede_enc_tv_template, DES3_EDE_ENC_TEST_VECTORS); 856 - test_cipher ("des3_ede", MODE_ECB, DECRYPT, des3_ede_dec_tv_template, DES3_EDE_DEC_TEST_VECTORS); 898 + test_cipher("ecb(des3_ede)", ENCRYPT, des3_ede_enc_tv_template, 899 + DES3_EDE_ENC_TEST_VECTORS); 900 + test_cipher("ecb(des3_ede)", DECRYPT, des3_ede_dec_tv_template, 901 + DES3_EDE_DEC_TEST_VECTORS); 857 902 858 903 test_hash("md4", md4_tv_template, MD4_TEST_VECTORS); 859 904 860 905 test_hash("sha256", sha256_tv_template, SHA256_TEST_VECTORS); 861 906 862 907 //BLOWFISH 863 - test_cipher ("blowfish", MODE_ECB, ENCRYPT, bf_enc_tv_template, BF_ENC_TEST_VECTORS); 864 - test_cipher ("blowfish", MODE_ECB, DECRYPT, bf_dec_tv_template, BF_DEC_TEST_VECTORS); 865 - test_cipher ("blowfish", MODE_CBC, ENCRYPT, bf_cbc_enc_tv_template, BF_CBC_ENC_TEST_VECTORS); 866 - test_cipher ("blowfish", MODE_CBC, DECRYPT, bf_cbc_dec_tv_template, BF_CBC_DEC_TEST_VECTORS); 908 + test_cipher("ecb(blowfish)", ENCRYPT, bf_enc_tv_template, 909 + BF_ENC_TEST_VECTORS); 910 + test_cipher("ecb(blowfish)", DECRYPT, bf_dec_tv_template, 911 + BF_DEC_TEST_VECTORS); 912 + test_cipher("cbc(blowfish)", ENCRYPT, bf_cbc_enc_tv_template, 913 + BF_CBC_ENC_TEST_VECTORS); 914 + test_cipher("cbc(blowfish)", DECRYPT, bf_cbc_dec_tv_template, 915 + BF_CBC_DEC_TEST_VECTORS); 867 916 868 917 //TWOFISH 869 - test_cipher ("twofish", MODE_ECB, ENCRYPT, tf_enc_tv_template, TF_ENC_TEST_VECTORS); 870 - test_cipher ("twofish", MODE_ECB, DECRYPT, tf_dec_tv_template, TF_DEC_TEST_VECTORS); 871 - test_cipher ("twofish", MODE_CBC, ENCRYPT, tf_cbc_enc_tv_template, TF_CBC_ENC_TEST_VECTORS); 872 - test_cipher ("twofish", MODE_CBC, DECRYPT, tf_cbc_dec_tv_template, TF_CBC_DEC_TEST_VECTORS); 918 + test_cipher("ecb(twofish)", ENCRYPT, tf_enc_tv_template, 919 + TF_ENC_TEST_VECTORS); 920 + test_cipher("ecb(twofish)", DECRYPT, tf_dec_tv_template, 921 + TF_DEC_TEST_VECTORS); 922 + test_cipher("cbc(twofish)", ENCRYPT, tf_cbc_enc_tv_template, 923 + TF_CBC_ENC_TEST_VECTORS); 924 + test_cipher("cbc(twofish)", DECRYPT, tf_cbc_dec_tv_template, 925 + TF_CBC_DEC_TEST_VECTORS); 873 926 874 927 //SERPENT 875 - test_cipher ("serpent", MODE_ECB, ENCRYPT, serpent_enc_tv_template, SERPENT_ENC_TEST_VECTORS); 876 - test_cipher ("serpent", MODE_ECB, DECRYPT, serpent_dec_tv_template, SERPENT_DEC_TEST_VECTORS); 928 + test_cipher("ecb(serpent)", ENCRYPT, serpent_enc_tv_template, 929 + SERPENT_ENC_TEST_VECTORS); 930 + test_cipher("ecb(serpent)", DECRYPT, serpent_dec_tv_template, 931 + SERPENT_DEC_TEST_VECTORS); 877 932 878 933 //TNEPRES 879 - test_cipher ("tnepres", MODE_ECB, ENCRYPT, tnepres_enc_tv_template, TNEPRES_ENC_TEST_VECTORS); 880 - test_cipher ("tnepres", MODE_ECB, DECRYPT, tnepres_dec_tv_template, TNEPRES_DEC_TEST_VECTORS); 934 + test_cipher("ecb(tnepres)", ENCRYPT, tnepres_enc_tv_template, 935 + TNEPRES_ENC_TEST_VECTORS); 936 + test_cipher("ecb(tnepres)", DECRYPT, tnepres_dec_tv_template, 937 + TNEPRES_DEC_TEST_VECTORS); 881 938 882 939 //AES 883 - test_cipher ("aes", MODE_ECB, ENCRYPT, aes_enc_tv_template, AES_ENC_TEST_VECTORS); 884 - test_cipher ("aes", MODE_ECB, DECRYPT, aes_dec_tv_template, AES_DEC_TEST_VECTORS); 885 - test_cipher ("aes", MODE_CBC, ENCRYPT, aes_cbc_enc_tv_template, AES_CBC_ENC_TEST_VECTORS); 886 - test_cipher ("aes", MODE_CBC, DECRYPT, aes_cbc_dec_tv_template, AES_CBC_DEC_TEST_VECTORS); 940 + test_cipher("ecb(aes)", ENCRYPT, aes_enc_tv_template, 941 + AES_ENC_TEST_VECTORS); 942 + test_cipher("ecb(aes)", DECRYPT, aes_dec_tv_template, 943 + AES_DEC_TEST_VECTORS); 944 + test_cipher("cbc(aes)", ENCRYPT, aes_cbc_enc_tv_template, 945 + AES_CBC_ENC_TEST_VECTORS); 946 + test_cipher("cbc(aes)", DECRYPT, aes_cbc_dec_tv_template, 947 + AES_CBC_DEC_TEST_VECTORS); 887 948 888 949 //CAST5 889 - test_cipher ("cast5", MODE_ECB, ENCRYPT, cast5_enc_tv_template, CAST5_ENC_TEST_VECTORS); 890 - test_cipher ("cast5", MODE_ECB, DECRYPT, cast5_dec_tv_template, CAST5_DEC_TEST_VECTORS); 950 + test_cipher("ecb(cast5)", ENCRYPT, cast5_enc_tv_template, 951 + CAST5_ENC_TEST_VECTORS); 952 + test_cipher("ecb(cast5)", DECRYPT, cast5_dec_tv_template, 953 + CAST5_DEC_TEST_VECTORS); 891 954 892 955 //CAST6 893 - test_cipher ("cast6", MODE_ECB, ENCRYPT, cast6_enc_tv_template, CAST6_ENC_TEST_VECTORS); 894 - test_cipher ("cast6", MODE_ECB, DECRYPT, cast6_dec_tv_template, CAST6_DEC_TEST_VECTORS); 956 + test_cipher("ecb(cast6)", ENCRYPT, cast6_enc_tv_template, 957 + CAST6_ENC_TEST_VECTORS); 958 + test_cipher("ecb(cast6)", DECRYPT, cast6_dec_tv_template, 959 + CAST6_DEC_TEST_VECTORS); 895 960 896 961 //ARC4 897 - test_cipher ("arc4", MODE_ECB, ENCRYPT, arc4_enc_tv_template, ARC4_ENC_TEST_VECTORS); 898 - test_cipher ("arc4", MODE_ECB, DECRYPT, arc4_dec_tv_template, ARC4_DEC_TEST_VECTORS); 962 + test_cipher("ecb(arc4)", ENCRYPT, arc4_enc_tv_template, 963 + ARC4_ENC_TEST_VECTORS); 964 + test_cipher("ecb(arc4)", DECRYPT, arc4_dec_tv_template, 965 + ARC4_DEC_TEST_VECTORS); 899 966 900 967 //TEA 901 - test_cipher ("tea", MODE_ECB, ENCRYPT, tea_enc_tv_template, TEA_ENC_TEST_VECTORS); 902 - test_cipher ("tea", MODE_ECB, DECRYPT, tea_dec_tv_template, TEA_DEC_TEST_VECTORS); 968 + test_cipher("ecb(tea)", ENCRYPT, tea_enc_tv_template, 969 + TEA_ENC_TEST_VECTORS); 970 + test_cipher("ecb(tea)", DECRYPT, tea_dec_tv_template, 971 + TEA_DEC_TEST_VECTORS); 903 972 904 973 905 974 //XTEA 906 - test_cipher ("xtea", MODE_ECB, ENCRYPT, xtea_enc_tv_template, XTEA_ENC_TEST_VECTORS); 907 - test_cipher ("xtea", MODE_ECB, DECRYPT, xtea_dec_tv_template, XTEA_DEC_TEST_VECTORS); 975 + test_cipher("ecb(xtea)", ENCRYPT, xtea_enc_tv_template, 976 + XTEA_ENC_TEST_VECTORS); 977 + test_cipher("ecb(xtea)", DECRYPT, xtea_dec_tv_template, 978 + XTEA_DEC_TEST_VECTORS); 908 979 909 980 //KHAZAD 910 - test_cipher ("khazad", MODE_ECB, ENCRYPT, khazad_enc_tv_template, KHAZAD_ENC_TEST_VECTORS); 911 - test_cipher ("khazad", MODE_ECB, DECRYPT, khazad_dec_tv_template, KHAZAD_DEC_TEST_VECTORS); 981 + test_cipher("ecb(khazad)", ENCRYPT, khazad_enc_tv_template, 982 + KHAZAD_ENC_TEST_VECTORS); 983 + test_cipher("ecb(khazad)", DECRYPT, khazad_dec_tv_template, 984 + KHAZAD_DEC_TEST_VECTORS); 912 985 913 986 //ANUBIS 914 - test_cipher ("anubis", MODE_ECB, ENCRYPT, anubis_enc_tv_template, ANUBIS_ENC_TEST_VECTORS); 915 - test_cipher ("anubis", MODE_ECB, DECRYPT, anubis_dec_tv_template, ANUBIS_DEC_TEST_VECTORS); 916 - test_cipher ("anubis", MODE_CBC, ENCRYPT, anubis_cbc_enc_tv_template, ANUBIS_CBC_ENC_TEST_VECTORS); 917 - test_cipher ("anubis", MODE_CBC, DECRYPT, anubis_cbc_dec_tv_template, ANUBIS_CBC_ENC_TEST_VECTORS); 987 + test_cipher("ecb(anubis)", ENCRYPT, anubis_enc_tv_template, 988 + ANUBIS_ENC_TEST_VECTORS); 989 + test_cipher("ecb(anubis)", DECRYPT, anubis_dec_tv_template, 990 + ANUBIS_DEC_TEST_VECTORS); 991 + test_cipher("cbc(anubis)", ENCRYPT, anubis_cbc_enc_tv_template, 992 + ANUBIS_CBC_ENC_TEST_VECTORS); 993 + test_cipher("cbc(anubis)", DECRYPT, anubis_cbc_dec_tv_template, 994 + ANUBIS_CBC_ENC_TEST_VECTORS); 918 995 919 996 //XETA 920 - test_cipher ("xeta", MODE_ECB, ENCRYPT, xeta_enc_tv_template, XETA_ENC_TEST_VECTORS); 921 - test_cipher ("xeta", MODE_ECB, DECRYPT, xeta_dec_tv_template, XETA_DEC_TEST_VECTORS); 997 + test_cipher("ecb(xeta)", ENCRYPT, xeta_enc_tv_template, 998 + XETA_ENC_TEST_VECTORS); 999 + test_cipher("ecb(xeta)", DECRYPT, xeta_dec_tv_template, 1000 + XETA_DEC_TEST_VECTORS); 922 1001 923 1002 test_hash("sha384", sha384_tv_template, SHA384_TEST_VECTORS); 924 1003 test_hash("sha512", sha512_tv_template, SHA512_TEST_VECTORS); ··· 969 968 test_hash("tgr160", tgr160_tv_template, TGR160_TEST_VECTORS); 970 969 test_hash("tgr128", tgr128_tv_template, TGR128_TEST_VECTORS); 971 970 test_deflate(); 972 - test_crc32c(); 973 - #ifdef CONFIG_CRYPTO_HMAC 974 - test_hmac("md5", hmac_md5_tv_template, HMAC_MD5_TEST_VECTORS); 975 - test_hmac("sha1", hmac_sha1_tv_template, HMAC_SHA1_TEST_VECTORS); 976 - test_hmac("sha256", hmac_sha256_tv_template, HMAC_SHA256_TEST_VECTORS); 977 - #endif 971 + test_hash("crc32c", crc32c_tv_template, CRC32C_TEST_VECTORS); 972 + test_hash("hmac(md5)", hmac_md5_tv_template, 973 + HMAC_MD5_TEST_VECTORS); 974 + test_hash("hmac(sha1)", hmac_sha1_tv_template, 975 + HMAC_SHA1_TEST_VECTORS); 976 + test_hash("hmac(sha256)", hmac_sha256_tv_template, 977 + HMAC_SHA256_TEST_VECTORS); 978 978 979 979 test_hash("michael_mic", michael_mic_tv_template, MICHAEL_MIC_TEST_VECTORS); 980 980 break; ··· 989 987 break; 990 988 991 989 case 3: 992 - test_cipher ("des", MODE_ECB, ENCRYPT, des_enc_tv_template, DES_ENC_TEST_VECTORS); 993 - test_cipher ("des", MODE_ECB, DECRYPT, des_dec_tv_template, DES_DEC_TEST_VECTORS); 994 - test_cipher ("des", MODE_CBC, ENCRYPT, des_cbc_enc_tv_template, DES_CBC_ENC_TEST_VECTORS); 995 - test_cipher ("des", MODE_CBC, DECRYPT, des_cbc_dec_tv_template, DES_CBC_DEC_TEST_VECTORS); 990 + test_cipher("ecb(des)", ENCRYPT, des_enc_tv_template, 991 + DES_ENC_TEST_VECTORS); 992 + test_cipher("ecb(des)", DECRYPT, des_dec_tv_template, 993 + DES_DEC_TEST_VECTORS); 994 + test_cipher("cbc(des)", ENCRYPT, des_cbc_enc_tv_template, 995 + DES_CBC_ENC_TEST_VECTORS); 996 + test_cipher("cbc(des)", DECRYPT, des_cbc_dec_tv_template, 997 + DES_CBC_DEC_TEST_VECTORS); 996 998 break; 997 999 998 1000 case 4: 999 - test_cipher ("des3_ede", MODE_ECB, ENCRYPT, des3_ede_enc_tv_template, DES3_EDE_ENC_TEST_VECTORS); 1000 - test_cipher ("des3_ede", MODE_ECB, DECRYPT, des3_ede_dec_tv_template, DES3_EDE_DEC_TEST_VECTORS); 1001 + test_cipher("ecb(des3_ede)", ENCRYPT, des3_ede_enc_tv_template, 1002 + DES3_EDE_ENC_TEST_VECTORS); 1003 + test_cipher("ecb(des3_ede)", DECRYPT, des3_ede_dec_tv_template, 1004 + DES3_EDE_DEC_TEST_VECTORS); 1001 1005 break; 1002 1006 1003 1007 case 5: ··· 1015 1007 break; 1016 1008 1017 1009 case 7: 1018 - test_cipher ("blowfish", MODE_ECB, ENCRYPT, bf_enc_tv_template, BF_ENC_TEST_VECTORS); 1019 - test_cipher ("blowfish", MODE_ECB, DECRYPT, bf_dec_tv_template, BF_DEC_TEST_VECTORS); 1020 - test_cipher ("blowfish", MODE_CBC, ENCRYPT, bf_cbc_enc_tv_template, BF_CBC_ENC_TEST_VECTORS); 1021 - test_cipher ("blowfish", MODE_CBC, DECRYPT, bf_cbc_dec_tv_template, BF_CBC_DEC_TEST_VECTORS); 1010 + test_cipher("ecb(blowfish)", ENCRYPT, bf_enc_tv_template, 1011 + BF_ENC_TEST_VECTORS); 1012 + test_cipher("ecb(blowfish)", DECRYPT, bf_dec_tv_template, 1013 + BF_DEC_TEST_VECTORS); 1014 + test_cipher("cbc(blowfish)", ENCRYPT, bf_cbc_enc_tv_template, 1015 + BF_CBC_ENC_TEST_VECTORS); 1016 + test_cipher("cbc(blowfish)", DECRYPT, bf_cbc_dec_tv_template, 1017 + BF_CBC_DEC_TEST_VECTORS); 1022 1018 break; 1023 1019 1024 1020 case 8: 1025 - test_cipher ("twofish", MODE_ECB, ENCRYPT, tf_enc_tv_template, TF_ENC_TEST_VECTORS); 1026 - test_cipher ("twofish", MODE_ECB, DECRYPT, tf_dec_tv_template, TF_DEC_TEST_VECTORS); 1027 - test_cipher ("twofish", MODE_CBC, ENCRYPT, tf_cbc_enc_tv_template, TF_CBC_ENC_TEST_VECTORS); 1028 - test_cipher ("twofish", MODE_CBC, DECRYPT, tf_cbc_dec_tv_template, TF_CBC_DEC_TEST_VECTORS); 1021 + test_cipher("ecb(twofish)", ENCRYPT, tf_enc_tv_template, 1022 + TF_ENC_TEST_VECTORS); 1023 + test_cipher("ecb(twofish)", DECRYPT, tf_dec_tv_template, 1024 + TF_DEC_TEST_VECTORS); 1025 + test_cipher("cbc(twofish)", ENCRYPT, tf_cbc_enc_tv_template, 1026 + TF_CBC_ENC_TEST_VECTORS); 1027 + test_cipher("cbc(twofish)", DECRYPT, tf_cbc_dec_tv_template, 1028 + TF_CBC_DEC_TEST_VECTORS); 1029 1029 break; 1030 1030 1031 1031 case 9: 1032 - test_cipher ("serpent", MODE_ECB, ENCRYPT, serpent_enc_tv_template, SERPENT_ENC_TEST_VECTORS); 1033 - test_cipher ("serpent", MODE_ECB, DECRYPT, serpent_dec_tv_template, SERPENT_DEC_TEST_VECTORS); 1032 + test_cipher("ecb(serpent)", ENCRYPT, serpent_enc_tv_template, 1033 + SERPENT_ENC_TEST_VECTORS); 1034 + test_cipher("ecb(serpent)", DECRYPT, serpent_dec_tv_template, 1035 + SERPENT_DEC_TEST_VECTORS); 1034 1036 break; 1035 1037 1036 1038 case 10: 1037 - test_cipher ("aes", MODE_ECB, ENCRYPT, aes_enc_tv_template, AES_ENC_TEST_VECTORS); 1038 - test_cipher ("aes", MODE_ECB, DECRYPT, aes_dec_tv_template, AES_DEC_TEST_VECTORS); 1039 - test_cipher ("aes", MODE_CBC, ENCRYPT, aes_cbc_enc_tv_template, AES_CBC_ENC_TEST_VECTORS); 1040 - test_cipher ("aes", MODE_CBC, DECRYPT, aes_cbc_dec_tv_template, AES_CBC_DEC_TEST_VECTORS); 1039 + test_cipher("ecb(aes)", ENCRYPT, aes_enc_tv_template, 1040 + AES_ENC_TEST_VECTORS); 1041 + test_cipher("ecb(aes)", DECRYPT, aes_dec_tv_template, 1042 + AES_DEC_TEST_VECTORS); 1043 + test_cipher("cbc(aes)", ENCRYPT, aes_cbc_enc_tv_template, 1044 + AES_CBC_ENC_TEST_VECTORS); 1045 + test_cipher("cbc(aes)", DECRYPT, aes_cbc_dec_tv_template, 1046 + AES_CBC_DEC_TEST_VECTORS); 1041 1047 break; 1042 1048 1043 1049 case 11: ··· 1067 1045 break; 1068 1046 1069 1047 case 14: 1070 - test_cipher ("cast5", MODE_ECB, ENCRYPT, cast5_enc_tv_template, CAST5_ENC_TEST_VECTORS); 1071 - test_cipher ("cast5", MODE_ECB, DECRYPT, cast5_dec_tv_template, CAST5_DEC_TEST_VECTORS); 1048 + test_cipher("ecb(cast5)", ENCRYPT, cast5_enc_tv_template, 1049 + CAST5_ENC_TEST_VECTORS); 1050 + test_cipher("ecb(cast5)", DECRYPT, cast5_dec_tv_template, 1051 + CAST5_DEC_TEST_VECTORS); 1072 1052 break; 1073 1053 1074 1054 case 15: 1075 - test_cipher ("cast6", MODE_ECB, ENCRYPT, cast6_enc_tv_template, CAST6_ENC_TEST_VECTORS); 1076 - test_cipher ("cast6", MODE_ECB, DECRYPT, cast6_dec_tv_template, CAST6_DEC_TEST_VECTORS); 1055 + test_cipher("ecb(cast6)", ENCRYPT, cast6_enc_tv_template, 1056 + CAST6_ENC_TEST_VECTORS); 1057 + test_cipher("ecb(cast6)", DECRYPT, cast6_dec_tv_template, 1058 + CAST6_DEC_TEST_VECTORS); 1077 1059 break; 1078 1060 1079 1061 case 16: 1080 - test_cipher ("arc4", MODE_ECB, ENCRYPT, arc4_enc_tv_template, ARC4_ENC_TEST_VECTORS); 1081 - test_cipher ("arc4", MODE_ECB, DECRYPT, arc4_dec_tv_template, ARC4_DEC_TEST_VECTORS); 1062 + test_cipher("ecb(arc4)", ENCRYPT, arc4_enc_tv_template, 1063 + ARC4_ENC_TEST_VECTORS); 1064 + test_cipher("ecb(arc4)", DECRYPT, arc4_dec_tv_template, 1065 + ARC4_DEC_TEST_VECTORS); 1082 1066 break; 1083 1067 1084 1068 case 17: ··· 1092 1064 break; 1093 1065 1094 1066 case 18: 1095 - test_crc32c(); 1067 + test_hash("crc32c", crc32c_tv_template, CRC32C_TEST_VECTORS); 1096 1068 break; 1097 1069 1098 1070 case 19: 1099 - test_cipher ("tea", MODE_ECB, ENCRYPT, tea_enc_tv_template, TEA_ENC_TEST_VECTORS); 1100 - test_cipher ("tea", MODE_ECB, DECRYPT, tea_dec_tv_template, TEA_DEC_TEST_VECTORS); 1071 + test_cipher("ecb(tea)", ENCRYPT, tea_enc_tv_template, 1072 + TEA_ENC_TEST_VECTORS); 1073 + test_cipher("ecb(tea)", DECRYPT, tea_dec_tv_template, 1074 + TEA_DEC_TEST_VECTORS); 1101 1075 break; 1102 1076 1103 1077 case 20: 1104 - test_cipher ("xtea", MODE_ECB, ENCRYPT, xtea_enc_tv_template, XTEA_ENC_TEST_VECTORS); 1105 - test_cipher ("xtea", MODE_ECB, DECRYPT, xtea_dec_tv_template, XTEA_DEC_TEST_VECTORS); 1078 + test_cipher("ecb(xtea)", ENCRYPT, xtea_enc_tv_template, 1079 + XTEA_ENC_TEST_VECTORS); 1080 + test_cipher("ecb(xtea)", DECRYPT, xtea_dec_tv_template, 1081 + XTEA_DEC_TEST_VECTORS); 1106 1082 break; 1107 1083 1108 1084 case 21: 1109 - test_cipher ("khazad", MODE_ECB, ENCRYPT, khazad_enc_tv_template, KHAZAD_ENC_TEST_VECTORS); 1110 - test_cipher ("khazad", MODE_ECB, DECRYPT, khazad_dec_tv_template, KHAZAD_DEC_TEST_VECTORS); 1085 + test_cipher("ecb(khazad)", ENCRYPT, khazad_enc_tv_template, 1086 + KHAZAD_ENC_TEST_VECTORS); 1087 + test_cipher("ecb(khazad)", DECRYPT, khazad_dec_tv_template, 1088 + KHAZAD_DEC_TEST_VECTORS); 1111 1089 break; 1112 1090 1113 1091 case 22: ··· 1129 1095 break; 1130 1096 1131 1097 case 25: 1132 - test_cipher ("tnepres", MODE_ECB, ENCRYPT, tnepres_enc_tv_template, TNEPRES_ENC_TEST_VECTORS); 1133 - test_cipher ("tnepres", MODE_ECB, DECRYPT, tnepres_dec_tv_template, TNEPRES_DEC_TEST_VECTORS); 1098 + test_cipher("ecb(tnepres)", ENCRYPT, tnepres_enc_tv_template, 1099 + TNEPRES_ENC_TEST_VECTORS); 1100 + test_cipher("ecb(tnepres)", DECRYPT, tnepres_dec_tv_template, 1101 + TNEPRES_DEC_TEST_VECTORS); 1134 1102 break; 1135 1103 1136 1104 case 26: 1137 - test_cipher ("anubis", MODE_ECB, ENCRYPT, anubis_enc_tv_template, ANUBIS_ENC_TEST_VECTORS); 1138 - test_cipher ("anubis", MODE_ECB, DECRYPT, anubis_dec_tv_template, ANUBIS_DEC_TEST_VECTORS); 1139 - test_cipher ("anubis", MODE_CBC, ENCRYPT, anubis_cbc_enc_tv_template, ANUBIS_CBC_ENC_TEST_VECTORS); 1140 - test_cipher ("anubis", MODE_CBC, DECRYPT, anubis_cbc_dec_tv_template, ANUBIS_CBC_ENC_TEST_VECTORS); 1105 + test_cipher("ecb(anubis)", ENCRYPT, anubis_enc_tv_template, 1106 + ANUBIS_ENC_TEST_VECTORS); 1107 + test_cipher("ecb(anubis)", DECRYPT, anubis_dec_tv_template, 1108 + ANUBIS_DEC_TEST_VECTORS); 1109 + test_cipher("cbc(anubis)", ENCRYPT, anubis_cbc_enc_tv_template, 1110 + ANUBIS_CBC_ENC_TEST_VECTORS); 1111 + test_cipher("cbc(anubis)", DECRYPT, anubis_cbc_dec_tv_template, 1112 + ANUBIS_CBC_ENC_TEST_VECTORS); 1141 1113 break; 1142 1114 1143 1115 case 27: ··· 1160 1120 break; 1161 1121 1162 1122 case 30: 1163 - test_cipher ("xeta", MODE_ECB, ENCRYPT, xeta_enc_tv_template, XETA_ENC_TEST_VECTORS); 1164 - test_cipher ("xeta", MODE_ECB, DECRYPT, xeta_dec_tv_template, XETA_DEC_TEST_VECTORS); 1123 + test_cipher("ecb(xeta)", ENCRYPT, xeta_enc_tv_template, 1124 + XETA_ENC_TEST_VECTORS); 1125 + test_cipher("ecb(xeta)", DECRYPT, xeta_dec_tv_template, 1126 + XETA_DEC_TEST_VECTORS); 1165 1127 break; 1166 1128 1167 - #ifdef CONFIG_CRYPTO_HMAC 1168 1129 case 100: 1169 - test_hmac("md5", hmac_md5_tv_template, HMAC_MD5_TEST_VECTORS); 1130 + test_hash("hmac(md5)", hmac_md5_tv_template, 1131 + HMAC_MD5_TEST_VECTORS); 1170 1132 break; 1171 1133 1172 1134 case 101: 1173 - test_hmac("sha1", hmac_sha1_tv_template, HMAC_SHA1_TEST_VECTORS); 1135 + test_hash("hmac(sha1)", hmac_sha1_tv_template, 1136 + HMAC_SHA1_TEST_VECTORS); 1174 1137 break; 1175 1138 1176 1139 case 102: 1177 - test_hmac("sha256", hmac_sha256_tv_template, HMAC_SHA256_TEST_VECTORS); 1140 + test_hash("hmac(sha256)", hmac_sha256_tv_template, 1141 + HMAC_SHA256_TEST_VECTORS); 1178 1142 break; 1179 1143 1180 - #endif 1181 1144 1182 1145 case 200: 1183 - test_cipher_speed("aes", MODE_ECB, ENCRYPT, sec, NULL, 0, 1146 + test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, 1184 1147 aes_speed_template); 1185 - test_cipher_speed("aes", MODE_ECB, DECRYPT, sec, NULL, 0, 1148 + test_cipher_speed("ecb(aes)", DECRYPT, sec, NULL, 0, 1186 1149 aes_speed_template); 1187 - test_cipher_speed("aes", MODE_CBC, ENCRYPT, sec, NULL, 0, 1150 + test_cipher_speed("cbc(aes)", ENCRYPT, sec, NULL, 0, 1188 1151 aes_speed_template); 1189 - test_cipher_speed("aes", MODE_CBC, DECRYPT, sec, NULL, 0, 1152 + test_cipher_speed("cbc(aes)", DECRYPT, sec, NULL, 0, 1190 1153 aes_speed_template); 1191 1154 break; 1192 1155 1193 1156 case 201: 1194 - test_cipher_speed("des3_ede", MODE_ECB, ENCRYPT, sec, 1157 + test_cipher_speed("ecb(des3_ede)", ENCRYPT, sec, 1195 1158 des3_ede_enc_tv_template, 1196 1159 DES3_EDE_ENC_TEST_VECTORS, 1197 1160 des3_ede_speed_template); 1198 - test_cipher_speed("des3_ede", MODE_ECB, DECRYPT, sec, 1161 + test_cipher_speed("ecb(des3_ede)", DECRYPT, sec, 1199 1162 des3_ede_dec_tv_template, 1200 1163 DES3_EDE_DEC_TEST_VECTORS, 1201 1164 des3_ede_speed_template); 1202 - test_cipher_speed("des3_ede", MODE_CBC, ENCRYPT, sec, 1165 + test_cipher_speed("cbc(des3_ede)", ENCRYPT, sec, 1203 1166 des3_ede_enc_tv_template, 1204 1167 DES3_EDE_ENC_TEST_VECTORS, 1205 1168 des3_ede_speed_template); 1206 - test_cipher_speed("des3_ede", MODE_CBC, DECRYPT, sec, 1169 + test_cipher_speed("cbc(des3_ede)", DECRYPT, sec, 1207 1170 des3_ede_dec_tv_template, 1208 1171 DES3_EDE_DEC_TEST_VECTORS, 1209 1172 des3_ede_speed_template); 1210 1173 break; 1211 1174 1212 1175 case 202: 1213 - test_cipher_speed("twofish", MODE_ECB, ENCRYPT, sec, NULL, 0, 1176 + test_cipher_speed("ecb(twofish)", ENCRYPT, sec, NULL, 0, 1214 1177 twofish_speed_template); 1215 - test_cipher_speed("twofish", MODE_ECB, DECRYPT, sec, NULL, 0, 1178 + test_cipher_speed("ecb(twofish)", DECRYPT, sec, NULL, 0, 1216 1179 twofish_speed_template); 1217 - test_cipher_speed("twofish", MODE_CBC, ENCRYPT, sec, NULL, 0, 1180 + test_cipher_speed("cbc(twofish)", ENCRYPT, sec, NULL, 0, 1218 1181 twofish_speed_template); 1219 - test_cipher_speed("twofish", MODE_CBC, DECRYPT, sec, NULL, 0, 1182 + test_cipher_speed("cbc(twofish)", DECRYPT, sec, NULL, 0, 1220 1183 twofish_speed_template); 1221 1184 break; 1222 1185 1223 1186 case 203: 1224 - test_cipher_speed("blowfish", MODE_ECB, ENCRYPT, sec, NULL, 0, 1187 + test_cipher_speed("ecb(blowfish)", ENCRYPT, sec, NULL, 0, 1225 1188 blowfish_speed_template); 1226 - test_cipher_speed("blowfish", MODE_ECB, DECRYPT, sec, NULL, 0, 1189 + test_cipher_speed("ecb(blowfish)", DECRYPT, sec, NULL, 0, 1227 1190 blowfish_speed_template); 1228 - test_cipher_speed("blowfish", MODE_CBC, ENCRYPT, sec, NULL, 0, 1191 + test_cipher_speed("cbc(blowfish)", ENCRYPT, sec, NULL, 0, 1229 1192 blowfish_speed_template); 1230 - test_cipher_speed("blowfish", MODE_CBC, DECRYPT, sec, NULL, 0, 1193 + test_cipher_speed("cbc(blowfish)", DECRYPT, sec, NULL, 0, 1231 1194 blowfish_speed_template); 1232 1195 break; 1233 1196 1234 1197 case 204: 1235 - test_cipher_speed("des", MODE_ECB, ENCRYPT, sec, NULL, 0, 1198 + test_cipher_speed("ecb(des)", ENCRYPT, sec, NULL, 0, 1236 1199 des_speed_template); 1237 - test_cipher_speed("des", MODE_ECB, DECRYPT, sec, NULL, 0, 1200 + test_cipher_speed("ecb(des)", DECRYPT, sec, NULL, 0, 1238 1201 des_speed_template); 1239 - test_cipher_speed("des", MODE_CBC, ENCRYPT, sec, NULL, 0, 1202 + test_cipher_speed("cbc(des)", ENCRYPT, sec, NULL, 0, 1240 1203 des_speed_template); 1241 - test_cipher_speed("des", MODE_CBC, DECRYPT, sec, NULL, 0, 1204 + test_cipher_speed("cbc(des)", DECRYPT, sec, NULL, 0, 1242 1205 des_speed_template); 1243 1206 break; 1244 1207 ··· 1249 1206 /* fall through */ 1250 1207 1251 1208 case 301: 1252 - test_digest_speed("md4", sec, generic_digest_speed_template); 1209 + test_hash_speed("md4", sec, generic_hash_speed_template); 1253 1210 if (mode > 300 && mode < 400) break; 1254 1211 1255 1212 case 302: 1256 - test_digest_speed("md5", sec, generic_digest_speed_template); 1213 + test_hash_speed("md5", sec, generic_hash_speed_template); 1257 1214 if (mode > 300 && mode < 400) break; 1258 1215 1259 1216 case 303: 1260 - test_digest_speed("sha1", sec, generic_digest_speed_template); 1217 + test_hash_speed("sha1", sec, generic_hash_speed_template); 1261 1218 if (mode > 300 && mode < 400) break; 1262 1219 1263 1220 case 304: 1264 - test_digest_speed("sha256", sec, generic_digest_speed_template); 1221 + test_hash_speed("sha256", sec, generic_hash_speed_template); 1265 1222 if (mode > 300 && mode < 400) break; 1266 1223 1267 1224 case 305: 1268 - test_digest_speed("sha384", sec, generic_digest_speed_template); 1225 + test_hash_speed("sha384", sec, generic_hash_speed_template); 1269 1226 if (mode > 300 && mode < 400) break; 1270 1227 1271 1228 case 306: 1272 - test_digest_speed("sha512", sec, generic_digest_speed_template); 1229 + test_hash_speed("sha512", sec, generic_hash_speed_template); 1273 1230 if (mode > 300 && mode < 400) break; 1274 1231 1275 1232 case 307: 1276 - test_digest_speed("wp256", sec, generic_digest_speed_template); 1233 + test_hash_speed("wp256", sec, generic_hash_speed_template); 1277 1234 if (mode > 300 && mode < 400) break; 1278 1235 1279 1236 case 308: 1280 - test_digest_speed("wp384", sec, generic_digest_speed_template); 1237 + test_hash_speed("wp384", sec, generic_hash_speed_template); 1281 1238 if (mode > 300 && mode < 400) break; 1282 1239 1283 1240 case 309: 1284 - test_digest_speed("wp512", sec, generic_digest_speed_template); 1241 + test_hash_speed("wp512", sec, generic_hash_speed_template); 1285 1242 if (mode > 300 && mode < 400) break; 1286 1243 1287 1244 case 310: 1288 - test_digest_speed("tgr128", sec, generic_digest_speed_template); 1245 + test_hash_speed("tgr128", sec, generic_hash_speed_template); 1289 1246 if (mode > 300 && mode < 400) break; 1290 1247 1291 1248 case 311: 1292 - test_digest_speed("tgr160", sec, generic_digest_speed_template); 1249 + test_hash_speed("tgr160", sec, generic_hash_speed_template); 1293 1250 if (mode > 300 && mode < 400) break; 1294 1251 1295 1252 case 312: 1296 - test_digest_speed("tgr192", sec, generic_digest_speed_template); 1253 + test_hash_speed("tgr192", sec, generic_hash_speed_template); 1297 1254 if (mode > 300 && mode < 400) break; 1298 1255 1299 1256 case 399:
+183 -19
crypto/tcrypt.h
··· 28 28 struct hash_testvec { 29 29 /* only used with keyed hash algorithms */ 30 30 char key[128] __attribute__ ((__aligned__(4))); 31 - char plaintext[128]; 31 + char plaintext[240]; 32 32 char digest[MAX_DIGEST_SIZE]; 33 33 unsigned char tap[MAX_TAP]; 34 34 unsigned char psize; 35 35 unsigned char np; 36 36 unsigned char ksize; 37 - }; 38 - 39 - struct hmac_testvec { 40 - char key[128]; 41 - char plaintext[128]; 42 - char digest[MAX_DIGEST_SIZE]; 43 - unsigned char tap[MAX_TAP]; 44 - unsigned char ksize; 45 - unsigned char psize; 46 - unsigned char np; 47 37 }; 48 38 49 39 struct cipher_testvec { ··· 55 65 unsigned int blen; 56 66 }; 57 67 58 - struct digest_speed { 68 + struct hash_speed { 59 69 unsigned int blen; /* buffer length */ 60 70 unsigned int plen; /* per-update length */ 61 71 }; ··· 687 697 }, 688 698 }; 689 699 690 - #ifdef CONFIG_CRYPTO_HMAC 691 700 /* 692 701 * HMAC-MD5 test vectors from RFC2202 693 702 * (These need to be fixed to not use strlen). 694 703 */ 695 704 #define HMAC_MD5_TEST_VECTORS 7 696 705 697 - static struct hmac_testvec hmac_md5_tv_template[] = 706 + static struct hash_testvec hmac_md5_tv_template[] = 698 707 { 699 708 { 700 709 .key = { [0 ... 15] = 0x0b }, ··· 757 768 */ 758 769 #define HMAC_SHA1_TEST_VECTORS 7 759 770 760 - static struct hmac_testvec hmac_sha1_tv_template[] = { 771 + static struct hash_testvec hmac_sha1_tv_template[] = { 761 772 { 762 773 .key = { [0 ... 19] = 0x0b }, 763 774 .ksize = 20, ··· 822 833 */ 823 834 #define HMAC_SHA256_TEST_VECTORS 10 824 835 825 - static struct hmac_testvec hmac_sha256_tv_template[] = { 836 + static struct hash_testvec hmac_sha256_tv_template[] = { 826 837 { 827 838 .key = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 828 839 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, ··· 932 943 0x2a, 0xc7, 0xd8, 0xe0, 0x64, 0xc3, 0xb2, 0xe6 }, 933 944 }, 934 945 }; 935 - 936 - #endif /* CONFIG_CRYPTO_HMAC */ 937 946 938 947 /* 939 948 * DES test vectors. ··· 2884 2897 }; 2885 2898 2886 2899 /* 2900 + * CRC32C test vectors 2901 + */ 2902 + #define CRC32C_TEST_VECTORS 14 2903 + 2904 + static struct hash_testvec crc32c_tv_template[] = { 2905 + { 2906 + .psize = 0, 2907 + .digest = { 0x00, 0x00, 0x00, 0x00 } 2908 + }, 2909 + { 2910 + .key = { 0x87, 0xa9, 0xcb, 0xed }, 2911 + .ksize = 4, 2912 + .psize = 0, 2913 + .digest = { 0x78, 0x56, 0x34, 0x12 }, 2914 + }, 2915 + { 2916 + .key = { 0xff, 0xff, 0xff, 0xff }, 2917 + .ksize = 4, 2918 + .plaintext = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 2919 + 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 2920 + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 2921 + 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 2922 + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28 }, 2923 + .psize = 40, 2924 + .digest = { 0x7f, 0x15, 0x2c, 0x0e } 2925 + }, 2926 + { 2927 + .key = { 0xff, 0xff, 0xff, 0xff }, 2928 + .ksize = 4, 2929 + .plaintext = { 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 2930 + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 2931 + 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 2932 + 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 2933 + 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50 }, 2934 + .psize = 40, 2935 + .digest = { 0xf6, 0xeb, 0x80, 0xe9 } 2936 + }, 2937 + { 2938 + .key = { 0xff, 0xff, 0xff, 0xff }, 2939 + .ksize = 4, 2940 + .plaintext = { 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 2941 + 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 2942 + 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 2943 + 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 2944 + 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78 }, 2945 + .psize = 40, 2946 + .digest = { 0xed, 0xbd, 0x74, 0xde } 2947 + }, 2948 + { 2949 + .key = { 0xff, 0xff, 0xff, 0xff }, 2950 + .ksize = 4, 2951 + .plaintext = { 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 2952 + 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 2953 + 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 2954 + 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 2955 + 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0 }, 2956 + .psize = 40, 2957 + .digest = { 0x62, 0xc8, 0x79, 0xd5 } 2958 + }, 2959 + { 2960 + .key = { 0xff, 0xff, 0xff, 0xff }, 2961 + .ksize = 4, 2962 + .plaintext = { 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 2963 + 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 2964 + 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 2965 + 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 2966 + 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8 }, 2967 + .psize = 40, 2968 + .digest = { 0xd0, 0x9a, 0x97, 0xba } 2969 + }, 2970 + { 2971 + .key = { 0xff, 0xff, 0xff, 0xff }, 2972 + .ksize = 4, 2973 + .plaintext = { 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 2974 + 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 2975 + 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, 2976 + 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 2977 + 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0 }, 2978 + .psize = 40, 2979 + .digest = { 0x13, 0xd9, 0x29, 0x2b } 2980 + }, 2981 + { 2982 + .key = { 0x80, 0xea, 0xd3, 0xf1 }, 2983 + .ksize = 4, 2984 + .plaintext = { 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 2985 + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 2986 + 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 2987 + 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 2988 + 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50 }, 2989 + .psize = 40, 2990 + .digest = { 0x0c, 0xb5, 0xe2, 0xa2 } 2991 + }, 2992 + { 2993 + .key = { 0xf3, 0x4a, 0x1d, 0x5d }, 2994 + .ksize = 4, 2995 + .plaintext = { 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 2996 + 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 2997 + 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 2998 + 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 2999 + 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78 }, 3000 + .psize = 40, 3001 + .digest = { 0xd1, 0x7f, 0xfb, 0xa6 } 3002 + }, 3003 + { 3004 + .key = { 0x2e, 0x80, 0x04, 0x59 }, 3005 + .ksize = 4, 3006 + .plaintext = { 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 3007 + 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 3008 + 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 3009 + 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 3010 + 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0 }, 3011 + .psize = 40, 3012 + .digest = { 0x59, 0x33, 0xe6, 0x7a } 3013 + }, 3014 + { 3015 + .key = { 0xa6, 0xcc, 0x19, 0x85 }, 3016 + .ksize = 4, 3017 + .plaintext = { 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 3018 + 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 3019 + 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 3020 + 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 3021 + 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8 }, 3022 + .psize = 40, 3023 + .digest = { 0xbe, 0x03, 0x01, 0xd2 } 3024 + }, 3025 + { 3026 + .key = { 0x41, 0xfc, 0xfe, 0x2d }, 3027 + .ksize = 4, 3028 + .plaintext = { 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 3029 + 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 3030 + 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, 3031 + 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 3032 + 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0 }, 3033 + .psize = 40, 3034 + .digest = { 0x75, 0xd3, 0xc5, 0x24 } 3035 + }, 3036 + { 3037 + .key = { 0xff, 0xff, 0xff, 0xff }, 3038 + .ksize = 4, 3039 + .plaintext = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 3040 + 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 3041 + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 3042 + 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 3043 + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 3044 + 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 3045 + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 3046 + 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 3047 + 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 3048 + 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 3049 + 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 3050 + 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 3051 + 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 3052 + 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 3053 + 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 3054 + 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 3055 + 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 3056 + 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 3057 + 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 3058 + 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 3059 + 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 3060 + 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 3061 + 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 3062 + 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 3063 + 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 3064 + 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 3065 + 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 3066 + 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, 3067 + 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 3068 + 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0 }, 3069 + .psize = 240, 3070 + .digest = { 0x75, 0xd3, 0xc5, 0x24 }, 3071 + .np = 2, 3072 + .tap = { 31, 209 } 3073 + }, 3074 + }; 3075 + 3076 + /* 2887 3077 * Cipher speed tests 2888 3078 */ 2889 3079 static struct cipher_speed aes_speed_template[] = { ··· 3147 2983 /* 3148 2984 * Digest speed tests 3149 2985 */ 3150 - static struct digest_speed generic_digest_speed_template[] = { 2986 + static struct hash_speed generic_hash_speed_template[] = { 3151 2987 { .blen = 16, .plen = 16, }, 3152 2988 { .blen = 64, .plen = 16, }, 3153 2989 { .blen = 64, .plen = 64, },
+2 -14
crypto/tea.c
··· 46 46 }; 47 47 48 48 static int tea_setkey(struct crypto_tfm *tfm, const u8 *in_key, 49 - unsigned int key_len, u32 *flags) 49 + unsigned int key_len) 50 50 { 51 51 struct tea_ctx *ctx = crypto_tfm_ctx(tfm); 52 52 const __le32 *key = (const __le32 *)in_key; 53 - 54 - if (key_len != 16) 55 - { 56 - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 57 - return -EINVAL; 58 - } 59 53 60 54 ctx->KEY[0] = le32_to_cpu(key[0]); 61 55 ctx->KEY[1] = le32_to_cpu(key[1]); ··· 119 125 } 120 126 121 127 static int xtea_setkey(struct crypto_tfm *tfm, const u8 *in_key, 122 - unsigned int key_len, u32 *flags) 128 + unsigned int key_len) 123 129 { 124 130 struct xtea_ctx *ctx = crypto_tfm_ctx(tfm); 125 131 const __le32 *key = (const __le32 *)in_key; 126 - 127 - if (key_len != 16) 128 - { 129 - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 130 - return -EINVAL; 131 - } 132 132 133 133 ctx->KEY[0] = le32_to_cpu(key[0]); 134 134 ctx->KEY[1] = le32_to_cpu(key[1]);
+3 -697
crypto/twofish.c
··· 39 39 */ 40 40 41 41 #include <asm/byteorder.h> 42 + #include <crypto/twofish.h> 42 43 #include <linux/module.h> 43 44 #include <linux/init.h> 44 45 #include <linux/types.h> 45 46 #include <linux/errno.h> 46 47 #include <linux/crypto.h> 47 48 #include <linux/bitops.h> 48 - 49 - 50 - /* The large precomputed tables for the Twofish cipher (twofish.c) 51 - * Taken from the same source as twofish.c 52 - * Marc Mutz <Marc@Mutz.com> 53 - */ 54 - 55 - /* These two tables are the q0 and q1 permutations, exactly as described in 56 - * the Twofish paper. */ 57 - 58 - static const u8 q0[256] = { 59 - 0xA9, 0x67, 0xB3, 0xE8, 0x04, 0xFD, 0xA3, 0x76, 0x9A, 0x92, 0x80, 0x78, 60 - 0xE4, 0xDD, 0xD1, 0x38, 0x0D, 0xC6, 0x35, 0x98, 0x18, 0xF7, 0xEC, 0x6C, 61 - 0x43, 0x75, 0x37, 0x26, 0xFA, 0x13, 0x94, 0x48, 0xF2, 0xD0, 0x8B, 0x30, 62 - 0x84, 0x54, 0xDF, 0x23, 0x19, 0x5B, 0x3D, 0x59, 0xF3, 0xAE, 0xA2, 0x82, 63 - 0x63, 0x01, 0x83, 0x2E, 0xD9, 0x51, 0x9B, 0x7C, 0xA6, 0xEB, 0xA5, 0xBE, 64 - 0x16, 0x0C, 0xE3, 0x61, 0xC0, 0x8C, 0x3A, 0xF5, 0x73, 0x2C, 0x25, 0x0B, 65 - 0xBB, 0x4E, 0x89, 0x6B, 0x53, 0x6A, 0xB4, 0xF1, 0xE1, 0xE6, 0xBD, 0x45, 66 - 0xE2, 0xF4, 0xB6, 0x66, 0xCC, 0x95, 0x03, 0x56, 0xD4, 0x1C, 0x1E, 0xD7, 67 - 0xFB, 0xC3, 0x8E, 0xB5, 0xE9, 0xCF, 0xBF, 0xBA, 0xEA, 0x77, 0x39, 0xAF, 68 - 0x33, 0xC9, 0x62, 0x71, 0x81, 0x79, 0x09, 0xAD, 0x24, 0xCD, 0xF9, 0xD8, 69 - 0xE5, 0xC5, 0xB9, 0x4D, 0x44, 0x08, 0x86, 0xE7, 0xA1, 0x1D, 0xAA, 0xED, 70 - 0x06, 0x70, 0xB2, 0xD2, 0x41, 0x7B, 0xA0, 0x11, 0x31, 0xC2, 0x27, 0x90, 71 - 0x20, 0xF6, 0x60, 0xFF, 0x96, 0x5C, 0xB1, 0xAB, 0x9E, 0x9C, 0x52, 0x1B, 72 - 0x5F, 0x93, 0x0A, 0xEF, 0x91, 0x85, 0x49, 0xEE, 0x2D, 0x4F, 0x8F, 0x3B, 73 - 0x47, 0x87, 0x6D, 0x46, 0xD6, 0x3E, 0x69, 0x64, 0x2A, 0xCE, 0xCB, 0x2F, 74 - 0xFC, 0x97, 0x05, 0x7A, 0xAC, 0x7F, 0xD5, 0x1A, 0x4B, 0x0E, 0xA7, 0x5A, 75 - 0x28, 0x14, 0x3F, 0x29, 0x88, 0x3C, 0x4C, 0x02, 0xB8, 0xDA, 0xB0, 0x17, 76 - 0x55, 0x1F, 0x8A, 0x7D, 0x57, 0xC7, 0x8D, 0x74, 0xB7, 0xC4, 0x9F, 0x72, 77 - 0x7E, 0x15, 0x22, 0x12, 0x58, 0x07, 0x99, 0x34, 0x6E, 0x50, 0xDE, 0x68, 78 - 0x65, 0xBC, 0xDB, 0xF8, 0xC8, 0xA8, 0x2B, 0x40, 0xDC, 0xFE, 0x32, 0xA4, 79 - 0xCA, 0x10, 0x21, 0xF0, 0xD3, 0x5D, 0x0F, 0x00, 0x6F, 0x9D, 0x36, 0x42, 80 - 0x4A, 0x5E, 0xC1, 0xE0 81 - }; 82 - 83 - static const u8 q1[256] = { 84 - 0x75, 0xF3, 0xC6, 0xF4, 0xDB, 0x7B, 0xFB, 0xC8, 0x4A, 0xD3, 0xE6, 0x6B, 85 - 0x45, 0x7D, 0xE8, 0x4B, 0xD6, 0x32, 0xD8, 0xFD, 0x37, 0x71, 0xF1, 0xE1, 86 - 0x30, 0x0F, 0xF8, 0x1B, 0x87, 0xFA, 0x06, 0x3F, 0x5E, 0xBA, 0xAE, 0x5B, 87 - 0x8A, 0x00, 0xBC, 0x9D, 0x6D, 0xC1, 0xB1, 0x0E, 0x80, 0x5D, 0xD2, 0xD5, 88 - 0xA0, 0x84, 0x07, 0x14, 0xB5, 0x90, 0x2C, 0xA3, 0xB2, 0x73, 0x4C, 0x54, 89 - 0x92, 0x74, 0x36, 0x51, 0x38, 0xB0, 0xBD, 0x5A, 0xFC, 0x60, 0x62, 0x96, 90 - 0x6C, 0x42, 0xF7, 0x10, 0x7C, 0x28, 0x27, 0x8C, 0x13, 0x95, 0x9C, 0xC7, 91 - 0x24, 0x46, 0x3B, 0x70, 0xCA, 0xE3, 0x85, 0xCB, 0x11, 0xD0, 0x93, 0xB8, 92 - 0xA6, 0x83, 0x20, 0xFF, 0x9F, 0x77, 0xC3, 0xCC, 0x03, 0x6F, 0x08, 0xBF, 93 - 0x40, 0xE7, 0x2B, 0xE2, 0x79, 0x0C, 0xAA, 0x82, 0x41, 0x3A, 0xEA, 0xB9, 94 - 0xE4, 0x9A, 0xA4, 0x97, 0x7E, 0xDA, 0x7A, 0x17, 0x66, 0x94, 0xA1, 0x1D, 95 - 0x3D, 0xF0, 0xDE, 0xB3, 0x0B, 0x72, 0xA7, 0x1C, 0xEF, 0xD1, 0x53, 0x3E, 96 - 0x8F, 0x33, 0x26, 0x5F, 0xEC, 0x76, 0x2A, 0x49, 0x81, 0x88, 0xEE, 0x21, 97 - 0xC4, 0x1A, 0xEB, 0xD9, 0xC5, 0x39, 0x99, 0xCD, 0xAD, 0x31, 0x8B, 0x01, 98 - 0x18, 0x23, 0xDD, 0x1F, 0x4E, 0x2D, 0xF9, 0x48, 0x4F, 0xF2, 0x65, 0x8E, 99 - 0x78, 0x5C, 0x58, 0x19, 0x8D, 0xE5, 0x98, 0x57, 0x67, 0x7F, 0x05, 0x64, 100 - 0xAF, 0x63, 0xB6, 0xFE, 0xF5, 0xB7, 0x3C, 0xA5, 0xCE, 0xE9, 0x68, 0x44, 101 - 0xE0, 0x4D, 0x43, 0x69, 0x29, 0x2E, 0xAC, 0x15, 0x59, 0xA8, 0x0A, 0x9E, 102 - 0x6E, 0x47, 0xDF, 0x34, 0x35, 0x6A, 0xCF, 0xDC, 0x22, 0xC9, 0xC0, 0x9B, 103 - 0x89, 0xD4, 0xED, 0xAB, 0x12, 0xA2, 0x0D, 0x52, 0xBB, 0x02, 0x2F, 0xA9, 104 - 0xD7, 0x61, 0x1E, 0xB4, 0x50, 0x04, 0xF6, 0xC2, 0x16, 0x25, 0x86, 0x56, 105 - 0x55, 0x09, 0xBE, 0x91 106 - }; 107 - 108 - /* These MDS tables are actually tables of MDS composed with q0 and q1, 109 - * because it is only ever used that way and we can save some time by 110 - * precomputing. Of course the main saving comes from precomputing the 111 - * GF(2^8) multiplication involved in the MDS matrix multiply; by looking 112 - * things up in these tables we reduce the matrix multiply to four lookups 113 - * and three XORs. Semi-formally, the definition of these tables is: 114 - * mds[0][i] = MDS (q1[i] 0 0 0)^T mds[1][i] = MDS (0 q0[i] 0 0)^T 115 - * mds[2][i] = MDS (0 0 q1[i] 0)^T mds[3][i] = MDS (0 0 0 q0[i])^T 116 - * where ^T means "transpose", the matrix multiply is performed in GF(2^8) 117 - * represented as GF(2)[x]/v(x) where v(x)=x^8+x^6+x^5+x^3+1 as described 118 - * by Schneier et al, and I'm casually glossing over the byte/word 119 - * conversion issues. */ 120 - 121 - static const u32 mds[4][256] = { 122 - {0xBCBC3275, 0xECEC21F3, 0x202043C6, 0xB3B3C9F4, 0xDADA03DB, 0x02028B7B, 123 - 0xE2E22BFB, 0x9E9EFAC8, 0xC9C9EC4A, 0xD4D409D3, 0x18186BE6, 0x1E1E9F6B, 124 - 0x98980E45, 0xB2B2387D, 0xA6A6D2E8, 0x2626B74B, 0x3C3C57D6, 0x93938A32, 125 - 0x8282EED8, 0x525298FD, 0x7B7BD437, 0xBBBB3771, 0x5B5B97F1, 0x474783E1, 126 - 0x24243C30, 0x5151E20F, 0xBABAC6F8, 0x4A4AF31B, 0xBFBF4887, 0x0D0D70FA, 127 - 0xB0B0B306, 0x7575DE3F, 0xD2D2FD5E, 0x7D7D20BA, 0x666631AE, 0x3A3AA35B, 128 - 0x59591C8A, 0x00000000, 0xCDCD93BC, 0x1A1AE09D, 0xAEAE2C6D, 0x7F7FABC1, 129 - 0x2B2BC7B1, 0xBEBEB90E, 0xE0E0A080, 0x8A8A105D, 0x3B3B52D2, 0x6464BAD5, 130 - 0xD8D888A0, 0xE7E7A584, 0x5F5FE807, 0x1B1B1114, 0x2C2CC2B5, 0xFCFCB490, 131 - 0x3131272C, 0x808065A3, 0x73732AB2, 0x0C0C8173, 0x79795F4C, 0x6B6B4154, 132 - 0x4B4B0292, 0x53536974, 0x94948F36, 0x83831F51, 0x2A2A3638, 0xC4C49CB0, 133 - 0x2222C8BD, 0xD5D5F85A, 0xBDBDC3FC, 0x48487860, 0xFFFFCE62, 0x4C4C0796, 134 - 0x4141776C, 0xC7C7E642, 0xEBEB24F7, 0x1C1C1410, 0x5D5D637C, 0x36362228, 135 - 0x6767C027, 0xE9E9AF8C, 0x4444F913, 0x1414EA95, 0xF5F5BB9C, 0xCFCF18C7, 136 - 0x3F3F2D24, 0xC0C0E346, 0x7272DB3B, 0x54546C70, 0x29294CCA, 0xF0F035E3, 137 - 0x0808FE85, 0xC6C617CB, 0xF3F34F11, 0x8C8CE4D0, 0xA4A45993, 0xCACA96B8, 138 - 0x68683BA6, 0xB8B84D83, 0x38382820, 0xE5E52EFF, 0xADAD569F, 0x0B0B8477, 139 - 0xC8C81DC3, 0x9999FFCC, 0x5858ED03, 0x19199A6F, 0x0E0E0A08, 0x95957EBF, 140 - 0x70705040, 0xF7F730E7, 0x6E6ECF2B, 0x1F1F6EE2, 0xB5B53D79, 0x09090F0C, 141 - 0x616134AA, 0x57571682, 0x9F9F0B41, 0x9D9D803A, 0x111164EA, 0x2525CDB9, 142 - 0xAFAFDDE4, 0x4545089A, 0xDFDF8DA4, 0xA3A35C97, 0xEAEAD57E, 0x353558DA, 143 - 0xEDEDD07A, 0x4343FC17, 0xF8F8CB66, 0xFBFBB194, 0x3737D3A1, 0xFAFA401D, 144 - 0xC2C2683D, 0xB4B4CCF0, 0x32325DDE, 0x9C9C71B3, 0x5656E70B, 0xE3E3DA72, 145 - 0x878760A7, 0x15151B1C, 0xF9F93AEF, 0x6363BFD1, 0x3434A953, 0x9A9A853E, 146 - 0xB1B1428F, 0x7C7CD133, 0x88889B26, 0x3D3DA65F, 0xA1A1D7EC, 0xE4E4DF76, 147 - 0x8181942A, 0x91910149, 0x0F0FFB81, 0xEEEEAA88, 0x161661EE, 0xD7D77321, 148 - 0x9797F5C4, 0xA5A5A81A, 0xFEFE3FEB, 0x6D6DB5D9, 0x7878AEC5, 0xC5C56D39, 149 - 0x1D1DE599, 0x7676A4CD, 0x3E3EDCAD, 0xCBCB6731, 0xB6B6478B, 0xEFEF5B01, 150 - 0x12121E18, 0x6060C523, 0x6A6AB0DD, 0x4D4DF61F, 0xCECEE94E, 0xDEDE7C2D, 151 - 0x55559DF9, 0x7E7E5A48, 0x2121B24F, 0x03037AF2, 0xA0A02665, 0x5E5E198E, 152 - 0x5A5A6678, 0x65654B5C, 0x62624E58, 0xFDFD4519, 0x0606F48D, 0x404086E5, 153 - 0xF2F2BE98, 0x3333AC57, 0x17179067, 0x05058E7F, 0xE8E85E05, 0x4F4F7D64, 154 - 0x89896AAF, 0x10109563, 0x74742FB6, 0x0A0A75FE, 0x5C5C92F5, 0x9B9B74B7, 155 - 0x2D2D333C, 0x3030D6A5, 0x2E2E49CE, 0x494989E9, 0x46467268, 0x77775544, 156 - 0xA8A8D8E0, 0x9696044D, 0x2828BD43, 0xA9A92969, 0xD9D97929, 0x8686912E, 157 - 0xD1D187AC, 0xF4F44A15, 0x8D8D1559, 0xD6D682A8, 0xB9B9BC0A, 0x42420D9E, 158 - 0xF6F6C16E, 0x2F2FB847, 0xDDDD06DF, 0x23233934, 0xCCCC6235, 0xF1F1C46A, 159 - 0xC1C112CF, 0x8585EBDC, 0x8F8F9E22, 0x7171A1C9, 0x9090F0C0, 0xAAAA539B, 160 - 0x0101F189, 0x8B8BE1D4, 0x4E4E8CED, 0x8E8E6FAB, 0xABABA212, 0x6F6F3EA2, 161 - 0xE6E6540D, 0xDBDBF252, 0x92927BBB, 0xB7B7B602, 0x6969CA2F, 0x3939D9A9, 162 - 0xD3D30CD7, 0xA7A72361, 0xA2A2AD1E, 0xC3C399B4, 0x6C6C4450, 0x07070504, 163 - 0x04047FF6, 0x272746C2, 0xACACA716, 0xD0D07625, 0x50501386, 0xDCDCF756, 164 - 0x84841A55, 0xE1E15109, 0x7A7A25BE, 0x1313EF91}, 165 - 166 - {0xA9D93939, 0x67901717, 0xB3719C9C, 0xE8D2A6A6, 0x04050707, 0xFD985252, 167 - 0xA3658080, 0x76DFE4E4, 0x9A084545, 0x92024B4B, 0x80A0E0E0, 0x78665A5A, 168 - 0xE4DDAFAF, 0xDDB06A6A, 0xD1BF6363, 0x38362A2A, 0x0D54E6E6, 0xC6432020, 169 - 0x3562CCCC, 0x98BEF2F2, 0x181E1212, 0xF724EBEB, 0xECD7A1A1, 0x6C774141, 170 - 0x43BD2828, 0x7532BCBC, 0x37D47B7B, 0x269B8888, 0xFA700D0D, 0x13F94444, 171 - 0x94B1FBFB, 0x485A7E7E, 0xF27A0303, 0xD0E48C8C, 0x8B47B6B6, 0x303C2424, 172 - 0x84A5E7E7, 0x54416B6B, 0xDF06DDDD, 0x23C56060, 0x1945FDFD, 0x5BA33A3A, 173 - 0x3D68C2C2, 0x59158D8D, 0xF321ECEC, 0xAE316666, 0xA23E6F6F, 0x82165757, 174 - 0x63951010, 0x015BEFEF, 0x834DB8B8, 0x2E918686, 0xD9B56D6D, 0x511F8383, 175 - 0x9B53AAAA, 0x7C635D5D, 0xA63B6868, 0xEB3FFEFE, 0xA5D63030, 0xBE257A7A, 176 - 0x16A7ACAC, 0x0C0F0909, 0xE335F0F0, 0x6123A7A7, 0xC0F09090, 0x8CAFE9E9, 177 - 0x3A809D9D, 0xF5925C5C, 0x73810C0C, 0x2C273131, 0x2576D0D0, 0x0BE75656, 178 - 0xBB7B9292, 0x4EE9CECE, 0x89F10101, 0x6B9F1E1E, 0x53A93434, 0x6AC4F1F1, 179 - 0xB499C3C3, 0xF1975B5B, 0xE1834747, 0xE66B1818, 0xBDC82222, 0x450E9898, 180 - 0xE26E1F1F, 0xF4C9B3B3, 0xB62F7474, 0x66CBF8F8, 0xCCFF9999, 0x95EA1414, 181 - 0x03ED5858, 0x56F7DCDC, 0xD4E18B8B, 0x1C1B1515, 0x1EADA2A2, 0xD70CD3D3, 182 - 0xFB2BE2E2, 0xC31DC8C8, 0x8E195E5E, 0xB5C22C2C, 0xE9894949, 0xCF12C1C1, 183 - 0xBF7E9595, 0xBA207D7D, 0xEA641111, 0x77840B0B, 0x396DC5C5, 0xAF6A8989, 184 - 0x33D17C7C, 0xC9A17171, 0x62CEFFFF, 0x7137BBBB, 0x81FB0F0F, 0x793DB5B5, 185 - 0x0951E1E1, 0xADDC3E3E, 0x242D3F3F, 0xCDA47676, 0xF99D5555, 0xD8EE8282, 186 - 0xE5864040, 0xC5AE7878, 0xB9CD2525, 0x4D049696, 0x44557777, 0x080A0E0E, 187 - 0x86135050, 0xE730F7F7, 0xA1D33737, 0x1D40FAFA, 0xAA346161, 0xED8C4E4E, 188 - 0x06B3B0B0, 0x706C5454, 0xB22A7373, 0xD2523B3B, 0x410B9F9F, 0x7B8B0202, 189 - 0xA088D8D8, 0x114FF3F3, 0x3167CBCB, 0xC2462727, 0x27C06767, 0x90B4FCFC, 190 - 0x20283838, 0xF67F0404, 0x60784848, 0xFF2EE5E5, 0x96074C4C, 0x5C4B6565, 191 - 0xB1C72B2B, 0xAB6F8E8E, 0x9E0D4242, 0x9CBBF5F5, 0x52F2DBDB, 0x1BF34A4A, 192 - 0x5FA63D3D, 0x9359A4A4, 0x0ABCB9B9, 0xEF3AF9F9, 0x91EF1313, 0x85FE0808, 193 - 0x49019191, 0xEE611616, 0x2D7CDEDE, 0x4FB22121, 0x8F42B1B1, 0x3BDB7272, 194 - 0x47B82F2F, 0x8748BFBF, 0x6D2CAEAE, 0x46E3C0C0, 0xD6573C3C, 0x3E859A9A, 195 - 0x6929A9A9, 0x647D4F4F, 0x2A948181, 0xCE492E2E, 0xCB17C6C6, 0x2FCA6969, 196 - 0xFCC3BDBD, 0x975CA3A3, 0x055EE8E8, 0x7AD0EDED, 0xAC87D1D1, 0x7F8E0505, 197 - 0xD5BA6464, 0x1AA8A5A5, 0x4BB72626, 0x0EB9BEBE, 0xA7608787, 0x5AF8D5D5, 198 - 0x28223636, 0x14111B1B, 0x3FDE7575, 0x2979D9D9, 0x88AAEEEE, 0x3C332D2D, 199 - 0x4C5F7979, 0x02B6B7B7, 0xB896CACA, 0xDA583535, 0xB09CC4C4, 0x17FC4343, 200 - 0x551A8484, 0x1FF64D4D, 0x8A1C5959, 0x7D38B2B2, 0x57AC3333, 0xC718CFCF, 201 - 0x8DF40606, 0x74695353, 0xB7749B9B, 0xC4F59797, 0x9F56ADAD, 0x72DAE3E3, 202 - 0x7ED5EAEA, 0x154AF4F4, 0x229E8F8F, 0x12A2ABAB, 0x584E6262, 0x07E85F5F, 203 - 0x99E51D1D, 0x34392323, 0x6EC1F6F6, 0x50446C6C, 0xDE5D3232, 0x68724646, 204 - 0x6526A0A0, 0xBC93CDCD, 0xDB03DADA, 0xF8C6BABA, 0xC8FA9E9E, 0xA882D6D6, 205 - 0x2BCF6E6E, 0x40507070, 0xDCEB8585, 0xFE750A0A, 0x328A9393, 0xA48DDFDF, 206 - 0xCA4C2929, 0x10141C1C, 0x2173D7D7, 0xF0CCB4B4, 0xD309D4D4, 0x5D108A8A, 207 - 0x0FE25151, 0x00000000, 0x6F9A1919, 0x9DE01A1A, 0x368F9494, 0x42E6C7C7, 208 - 0x4AECC9C9, 0x5EFDD2D2, 0xC1AB7F7F, 0xE0D8A8A8}, 209 - 210 - {0xBC75BC32, 0xECF3EC21, 0x20C62043, 0xB3F4B3C9, 0xDADBDA03, 0x027B028B, 211 - 0xE2FBE22B, 0x9EC89EFA, 0xC94AC9EC, 0xD4D3D409, 0x18E6186B, 0x1E6B1E9F, 212 - 0x9845980E, 0xB27DB238, 0xA6E8A6D2, 0x264B26B7, 0x3CD63C57, 0x9332938A, 213 - 0x82D882EE, 0x52FD5298, 0x7B377BD4, 0xBB71BB37, 0x5BF15B97, 0x47E14783, 214 - 0x2430243C, 0x510F51E2, 0xBAF8BAC6, 0x4A1B4AF3, 0xBF87BF48, 0x0DFA0D70, 215 - 0xB006B0B3, 0x753F75DE, 0xD25ED2FD, 0x7DBA7D20, 0x66AE6631, 0x3A5B3AA3, 216 - 0x598A591C, 0x00000000, 0xCDBCCD93, 0x1A9D1AE0, 0xAE6DAE2C, 0x7FC17FAB, 217 - 0x2BB12BC7, 0xBE0EBEB9, 0xE080E0A0, 0x8A5D8A10, 0x3BD23B52, 0x64D564BA, 218 - 0xD8A0D888, 0xE784E7A5, 0x5F075FE8, 0x1B141B11, 0x2CB52CC2, 0xFC90FCB4, 219 - 0x312C3127, 0x80A38065, 0x73B2732A, 0x0C730C81, 0x794C795F, 0x6B546B41, 220 - 0x4B924B02, 0x53745369, 0x9436948F, 0x8351831F, 0x2A382A36, 0xC4B0C49C, 221 - 0x22BD22C8, 0xD55AD5F8, 0xBDFCBDC3, 0x48604878, 0xFF62FFCE, 0x4C964C07, 222 - 0x416C4177, 0xC742C7E6, 0xEBF7EB24, 0x1C101C14, 0x5D7C5D63, 0x36283622, 223 - 0x672767C0, 0xE98CE9AF, 0x441344F9, 0x149514EA, 0xF59CF5BB, 0xCFC7CF18, 224 - 0x3F243F2D, 0xC046C0E3, 0x723B72DB, 0x5470546C, 0x29CA294C, 0xF0E3F035, 225 - 0x088508FE, 0xC6CBC617, 0xF311F34F, 0x8CD08CE4, 0xA493A459, 0xCAB8CA96, 226 - 0x68A6683B, 0xB883B84D, 0x38203828, 0xE5FFE52E, 0xAD9FAD56, 0x0B770B84, 227 - 0xC8C3C81D, 0x99CC99FF, 0x580358ED, 0x196F199A, 0x0E080E0A, 0x95BF957E, 228 - 0x70407050, 0xF7E7F730, 0x6E2B6ECF, 0x1FE21F6E, 0xB579B53D, 0x090C090F, 229 - 0x61AA6134, 0x57825716, 0x9F419F0B, 0x9D3A9D80, 0x11EA1164, 0x25B925CD, 230 - 0xAFE4AFDD, 0x459A4508, 0xDFA4DF8D, 0xA397A35C, 0xEA7EEAD5, 0x35DA3558, 231 - 0xED7AEDD0, 0x431743FC, 0xF866F8CB, 0xFB94FBB1, 0x37A137D3, 0xFA1DFA40, 232 - 0xC23DC268, 0xB4F0B4CC, 0x32DE325D, 0x9CB39C71, 0x560B56E7, 0xE372E3DA, 233 - 0x87A78760, 0x151C151B, 0xF9EFF93A, 0x63D163BF, 0x345334A9, 0x9A3E9A85, 234 - 0xB18FB142, 0x7C337CD1, 0x8826889B, 0x3D5F3DA6, 0xA1ECA1D7, 0xE476E4DF, 235 - 0x812A8194, 0x91499101, 0x0F810FFB, 0xEE88EEAA, 0x16EE1661, 0xD721D773, 236 - 0x97C497F5, 0xA51AA5A8, 0xFEEBFE3F, 0x6DD96DB5, 0x78C578AE, 0xC539C56D, 237 - 0x1D991DE5, 0x76CD76A4, 0x3EAD3EDC, 0xCB31CB67, 0xB68BB647, 0xEF01EF5B, 238 - 0x1218121E, 0x602360C5, 0x6ADD6AB0, 0x4D1F4DF6, 0xCE4ECEE9, 0xDE2DDE7C, 239 - 0x55F9559D, 0x7E487E5A, 0x214F21B2, 0x03F2037A, 0xA065A026, 0x5E8E5E19, 240 - 0x5A785A66, 0x655C654B, 0x6258624E, 0xFD19FD45, 0x068D06F4, 0x40E54086, 241 - 0xF298F2BE, 0x335733AC, 0x17671790, 0x057F058E, 0xE805E85E, 0x4F644F7D, 242 - 0x89AF896A, 0x10631095, 0x74B6742F, 0x0AFE0A75, 0x5CF55C92, 0x9BB79B74, 243 - 0x2D3C2D33, 0x30A530D6, 0x2ECE2E49, 0x49E94989, 0x46684672, 0x77447755, 244 - 0xA8E0A8D8, 0x964D9604, 0x284328BD, 0xA969A929, 0xD929D979, 0x862E8691, 245 - 0xD1ACD187, 0xF415F44A, 0x8D598D15, 0xD6A8D682, 0xB90AB9BC, 0x429E420D, 246 - 0xF66EF6C1, 0x2F472FB8, 0xDDDFDD06, 0x23342339, 0xCC35CC62, 0xF16AF1C4, 247 - 0xC1CFC112, 0x85DC85EB, 0x8F228F9E, 0x71C971A1, 0x90C090F0, 0xAA9BAA53, 248 - 0x018901F1, 0x8BD48BE1, 0x4EED4E8C, 0x8EAB8E6F, 0xAB12ABA2, 0x6FA26F3E, 249 - 0xE60DE654, 0xDB52DBF2, 0x92BB927B, 0xB702B7B6, 0x692F69CA, 0x39A939D9, 250 - 0xD3D7D30C, 0xA761A723, 0xA21EA2AD, 0xC3B4C399, 0x6C506C44, 0x07040705, 251 - 0x04F6047F, 0x27C22746, 0xAC16ACA7, 0xD025D076, 0x50865013, 0xDC56DCF7, 252 - 0x8455841A, 0xE109E151, 0x7ABE7A25, 0x139113EF}, 253 - 254 - {0xD939A9D9, 0x90176790, 0x719CB371, 0xD2A6E8D2, 0x05070405, 0x9852FD98, 255 - 0x6580A365, 0xDFE476DF, 0x08459A08, 0x024B9202, 0xA0E080A0, 0x665A7866, 256 - 0xDDAFE4DD, 0xB06ADDB0, 0xBF63D1BF, 0x362A3836, 0x54E60D54, 0x4320C643, 257 - 0x62CC3562, 0xBEF298BE, 0x1E12181E, 0x24EBF724, 0xD7A1ECD7, 0x77416C77, 258 - 0xBD2843BD, 0x32BC7532, 0xD47B37D4, 0x9B88269B, 0x700DFA70, 0xF94413F9, 259 - 0xB1FB94B1, 0x5A7E485A, 0x7A03F27A, 0xE48CD0E4, 0x47B68B47, 0x3C24303C, 260 - 0xA5E784A5, 0x416B5441, 0x06DDDF06, 0xC56023C5, 0x45FD1945, 0xA33A5BA3, 261 - 0x68C23D68, 0x158D5915, 0x21ECF321, 0x3166AE31, 0x3E6FA23E, 0x16578216, 262 - 0x95106395, 0x5BEF015B, 0x4DB8834D, 0x91862E91, 0xB56DD9B5, 0x1F83511F, 263 - 0x53AA9B53, 0x635D7C63, 0x3B68A63B, 0x3FFEEB3F, 0xD630A5D6, 0x257ABE25, 264 - 0xA7AC16A7, 0x0F090C0F, 0x35F0E335, 0x23A76123, 0xF090C0F0, 0xAFE98CAF, 265 - 0x809D3A80, 0x925CF592, 0x810C7381, 0x27312C27, 0x76D02576, 0xE7560BE7, 266 - 0x7B92BB7B, 0xE9CE4EE9, 0xF10189F1, 0x9F1E6B9F, 0xA93453A9, 0xC4F16AC4, 267 - 0x99C3B499, 0x975BF197, 0x8347E183, 0x6B18E66B, 0xC822BDC8, 0x0E98450E, 268 - 0x6E1FE26E, 0xC9B3F4C9, 0x2F74B62F, 0xCBF866CB, 0xFF99CCFF, 0xEA1495EA, 269 - 0xED5803ED, 0xF7DC56F7, 0xE18BD4E1, 0x1B151C1B, 0xADA21EAD, 0x0CD3D70C, 270 - 0x2BE2FB2B, 0x1DC8C31D, 0x195E8E19, 0xC22CB5C2, 0x8949E989, 0x12C1CF12, 271 - 0x7E95BF7E, 0x207DBA20, 0x6411EA64, 0x840B7784, 0x6DC5396D, 0x6A89AF6A, 272 - 0xD17C33D1, 0xA171C9A1, 0xCEFF62CE, 0x37BB7137, 0xFB0F81FB, 0x3DB5793D, 273 - 0x51E10951, 0xDC3EADDC, 0x2D3F242D, 0xA476CDA4, 0x9D55F99D, 0xEE82D8EE, 274 - 0x8640E586, 0xAE78C5AE, 0xCD25B9CD, 0x04964D04, 0x55774455, 0x0A0E080A, 275 - 0x13508613, 0x30F7E730, 0xD337A1D3, 0x40FA1D40, 0x3461AA34, 0x8C4EED8C, 276 - 0xB3B006B3, 0x6C54706C, 0x2A73B22A, 0x523BD252, 0x0B9F410B, 0x8B027B8B, 277 - 0x88D8A088, 0x4FF3114F, 0x67CB3167, 0x4627C246, 0xC06727C0, 0xB4FC90B4, 278 - 0x28382028, 0x7F04F67F, 0x78486078, 0x2EE5FF2E, 0x074C9607, 0x4B655C4B, 279 - 0xC72BB1C7, 0x6F8EAB6F, 0x0D429E0D, 0xBBF59CBB, 0xF2DB52F2, 0xF34A1BF3, 280 - 0xA63D5FA6, 0x59A49359, 0xBCB90ABC, 0x3AF9EF3A, 0xEF1391EF, 0xFE0885FE, 281 - 0x01914901, 0x6116EE61, 0x7CDE2D7C, 0xB2214FB2, 0x42B18F42, 0xDB723BDB, 282 - 0xB82F47B8, 0x48BF8748, 0x2CAE6D2C, 0xE3C046E3, 0x573CD657, 0x859A3E85, 283 - 0x29A96929, 0x7D4F647D, 0x94812A94, 0x492ECE49, 0x17C6CB17, 0xCA692FCA, 284 - 0xC3BDFCC3, 0x5CA3975C, 0x5EE8055E, 0xD0ED7AD0, 0x87D1AC87, 0x8E057F8E, 285 - 0xBA64D5BA, 0xA8A51AA8, 0xB7264BB7, 0xB9BE0EB9, 0x6087A760, 0xF8D55AF8, 286 - 0x22362822, 0x111B1411, 0xDE753FDE, 0x79D92979, 0xAAEE88AA, 0x332D3C33, 287 - 0x5F794C5F, 0xB6B702B6, 0x96CAB896, 0x5835DA58, 0x9CC4B09C, 0xFC4317FC, 288 - 0x1A84551A, 0xF64D1FF6, 0x1C598A1C, 0x38B27D38, 0xAC3357AC, 0x18CFC718, 289 - 0xF4068DF4, 0x69537469, 0x749BB774, 0xF597C4F5, 0x56AD9F56, 0xDAE372DA, 290 - 0xD5EA7ED5, 0x4AF4154A, 0x9E8F229E, 0xA2AB12A2, 0x4E62584E, 0xE85F07E8, 291 - 0xE51D99E5, 0x39233439, 0xC1F66EC1, 0x446C5044, 0x5D32DE5D, 0x72466872, 292 - 0x26A06526, 0x93CDBC93, 0x03DADB03, 0xC6BAF8C6, 0xFA9EC8FA, 0x82D6A882, 293 - 0xCF6E2BCF, 0x50704050, 0xEB85DCEB, 0x750AFE75, 0x8A93328A, 0x8DDFA48D, 294 - 0x4C29CA4C, 0x141C1014, 0x73D72173, 0xCCB4F0CC, 0x09D4D309, 0x108A5D10, 295 - 0xE2510FE2, 0x00000000, 0x9A196F9A, 0xE01A9DE0, 0x8F94368F, 0xE6C742E6, 296 - 0xECC94AEC, 0xFDD25EFD, 0xAB7FC1AB, 0xD8A8E0D8} 297 - }; 298 - 299 - /* The exp_to_poly and poly_to_exp tables are used to perform efficient 300 - * operations in GF(2^8) represented as GF(2)[x]/w(x) where 301 - * w(x)=x^8+x^6+x^3+x^2+1. We care about doing that because it's part of the 302 - * definition of the RS matrix in the key schedule. Elements of that field 303 - * are polynomials of degree not greater than 7 and all coefficients 0 or 1, 304 - * which can be represented naturally by bytes (just substitute x=2). In that 305 - * form, GF(2^8) addition is the same as bitwise XOR, but GF(2^8) 306 - * multiplication is inefficient without hardware support. To multiply 307 - * faster, I make use of the fact x is a generator for the nonzero elements, 308 - * so that every element p of GF(2)[x]/w(x) is either 0 or equal to (x)^n for 309 - * some n in 0..254. Note that that caret is exponentiation in GF(2^8), 310 - * *not* polynomial notation. So if I want to compute pq where p and q are 311 - * in GF(2^8), I can just say: 312 - * 1. if p=0 or q=0 then pq=0 313 - * 2. otherwise, find m and n such that p=x^m and q=x^n 314 - * 3. pq=(x^m)(x^n)=x^(m+n), so add m and n and find pq 315 - * The translations in steps 2 and 3 are looked up in the tables 316 - * poly_to_exp (for step 2) and exp_to_poly (for step 3). To see this 317 - * in action, look at the CALC_S macro. As additional wrinkles, note that 318 - * one of my operands is always a constant, so the poly_to_exp lookup on it 319 - * is done in advance; I included the original values in the comments so 320 - * readers can have some chance of recognizing that this *is* the RS matrix 321 - * from the Twofish paper. I've only included the table entries I actually 322 - * need; I never do a lookup on a variable input of zero and the biggest 323 - * exponents I'll ever see are 254 (variable) and 237 (constant), so they'll 324 - * never sum to more than 491. I'm repeating part of the exp_to_poly table 325 - * so that I don't have to do mod-255 reduction in the exponent arithmetic. 326 - * Since I know my constant operands are never zero, I only have to worry 327 - * about zero values in the variable operand, and I do it with a simple 328 - * conditional branch. I know conditionals are expensive, but I couldn't 329 - * see a non-horrible way of avoiding them, and I did manage to group the 330 - * statements so that each if covers four group multiplications. */ 331 - 332 - static const u8 poly_to_exp[255] = { 333 - 0x00, 0x01, 0x17, 0x02, 0x2E, 0x18, 0x53, 0x03, 0x6A, 0x2F, 0x93, 0x19, 334 - 0x34, 0x54, 0x45, 0x04, 0x5C, 0x6B, 0xB6, 0x30, 0xA6, 0x94, 0x4B, 0x1A, 335 - 0x8C, 0x35, 0x81, 0x55, 0xAA, 0x46, 0x0D, 0x05, 0x24, 0x5D, 0x87, 0x6C, 336 - 0x9B, 0xB7, 0xC1, 0x31, 0x2B, 0xA7, 0xA3, 0x95, 0x98, 0x4C, 0xCA, 0x1B, 337 - 0xE6, 0x8D, 0x73, 0x36, 0xCD, 0x82, 0x12, 0x56, 0x62, 0xAB, 0xF0, 0x47, 338 - 0x4F, 0x0E, 0xBD, 0x06, 0xD4, 0x25, 0xD2, 0x5E, 0x27, 0x88, 0x66, 0x6D, 339 - 0xD6, 0x9C, 0x79, 0xB8, 0x08, 0xC2, 0xDF, 0x32, 0x68, 0x2C, 0xFD, 0xA8, 340 - 0x8A, 0xA4, 0x5A, 0x96, 0x29, 0x99, 0x22, 0x4D, 0x60, 0xCB, 0xE4, 0x1C, 341 - 0x7B, 0xE7, 0x3B, 0x8E, 0x9E, 0x74, 0xF4, 0x37, 0xD8, 0xCE, 0xF9, 0x83, 342 - 0x6F, 0x13, 0xB2, 0x57, 0xE1, 0x63, 0xDC, 0xAC, 0xC4, 0xF1, 0xAF, 0x48, 343 - 0x0A, 0x50, 0x42, 0x0F, 0xBA, 0xBE, 0xC7, 0x07, 0xDE, 0xD5, 0x78, 0x26, 344 - 0x65, 0xD3, 0xD1, 0x5F, 0xE3, 0x28, 0x21, 0x89, 0x59, 0x67, 0xFC, 0x6E, 345 - 0xB1, 0xD7, 0xF8, 0x9D, 0xF3, 0x7A, 0x3A, 0xB9, 0xC6, 0x09, 0x41, 0xC3, 346 - 0xAE, 0xE0, 0xDB, 0x33, 0x44, 0x69, 0x92, 0x2D, 0x52, 0xFE, 0x16, 0xA9, 347 - 0x0C, 0x8B, 0x80, 0xA5, 0x4A, 0x5B, 0xB5, 0x97, 0xC9, 0x2A, 0xA2, 0x9A, 348 - 0xC0, 0x23, 0x86, 0x4E, 0xBC, 0x61, 0xEF, 0xCC, 0x11, 0xE5, 0x72, 0x1D, 349 - 0x3D, 0x7C, 0xEB, 0xE8, 0xE9, 0x3C, 0xEA, 0x8F, 0x7D, 0x9F, 0xEC, 0x75, 350 - 0x1E, 0xF5, 0x3E, 0x38, 0xF6, 0xD9, 0x3F, 0xCF, 0x76, 0xFA, 0x1F, 0x84, 351 - 0xA0, 0x70, 0xED, 0x14, 0x90, 0xB3, 0x7E, 0x58, 0xFB, 0xE2, 0x20, 0x64, 352 - 0xD0, 0xDD, 0x77, 0xAD, 0xDA, 0xC5, 0x40, 0xF2, 0x39, 0xB0, 0xF7, 0x49, 353 - 0xB4, 0x0B, 0x7F, 0x51, 0x15, 0x43, 0x91, 0x10, 0x71, 0xBB, 0xEE, 0xBF, 354 - 0x85, 0xC8, 0xA1 355 - }; 356 - 357 - static const u8 exp_to_poly[492] = { 358 - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x4D, 0x9A, 0x79, 0xF2, 359 - 0xA9, 0x1F, 0x3E, 0x7C, 0xF8, 0xBD, 0x37, 0x6E, 0xDC, 0xF5, 0xA7, 0x03, 360 - 0x06, 0x0C, 0x18, 0x30, 0x60, 0xC0, 0xCD, 0xD7, 0xE3, 0x8B, 0x5B, 0xB6, 361 - 0x21, 0x42, 0x84, 0x45, 0x8A, 0x59, 0xB2, 0x29, 0x52, 0xA4, 0x05, 0x0A, 362 - 0x14, 0x28, 0x50, 0xA0, 0x0D, 0x1A, 0x34, 0x68, 0xD0, 0xED, 0x97, 0x63, 363 - 0xC6, 0xC1, 0xCF, 0xD3, 0xEB, 0x9B, 0x7B, 0xF6, 0xA1, 0x0F, 0x1E, 0x3C, 364 - 0x78, 0xF0, 0xAD, 0x17, 0x2E, 0x5C, 0xB8, 0x3D, 0x7A, 0xF4, 0xA5, 0x07, 365 - 0x0E, 0x1C, 0x38, 0x70, 0xE0, 0x8D, 0x57, 0xAE, 0x11, 0x22, 0x44, 0x88, 366 - 0x5D, 0xBA, 0x39, 0x72, 0xE4, 0x85, 0x47, 0x8E, 0x51, 0xA2, 0x09, 0x12, 367 - 0x24, 0x48, 0x90, 0x6D, 0xDA, 0xF9, 0xBF, 0x33, 0x66, 0xCC, 0xD5, 0xE7, 368 - 0x83, 0x4B, 0x96, 0x61, 0xC2, 0xC9, 0xDF, 0xF3, 0xAB, 0x1B, 0x36, 0x6C, 369 - 0xD8, 0xFD, 0xB7, 0x23, 0x46, 0x8C, 0x55, 0xAA, 0x19, 0x32, 0x64, 0xC8, 370 - 0xDD, 0xF7, 0xA3, 0x0B, 0x16, 0x2C, 0x58, 0xB0, 0x2D, 0x5A, 0xB4, 0x25, 371 - 0x4A, 0x94, 0x65, 0xCA, 0xD9, 0xFF, 0xB3, 0x2B, 0x56, 0xAC, 0x15, 0x2A, 372 - 0x54, 0xA8, 0x1D, 0x3A, 0x74, 0xE8, 0x9D, 0x77, 0xEE, 0x91, 0x6F, 0xDE, 373 - 0xF1, 0xAF, 0x13, 0x26, 0x4C, 0x98, 0x7D, 0xFA, 0xB9, 0x3F, 0x7E, 0xFC, 374 - 0xB5, 0x27, 0x4E, 0x9C, 0x75, 0xEA, 0x99, 0x7F, 0xFE, 0xB1, 0x2F, 0x5E, 375 - 0xBC, 0x35, 0x6A, 0xD4, 0xE5, 0x87, 0x43, 0x86, 0x41, 0x82, 0x49, 0x92, 376 - 0x69, 0xD2, 0xE9, 0x9F, 0x73, 0xE6, 0x81, 0x4F, 0x9E, 0x71, 0xE2, 0x89, 377 - 0x5F, 0xBE, 0x31, 0x62, 0xC4, 0xC5, 0xC7, 0xC3, 0xCB, 0xDB, 0xFB, 0xBB, 378 - 0x3B, 0x76, 0xEC, 0x95, 0x67, 0xCE, 0xD1, 0xEF, 0x93, 0x6B, 0xD6, 0xE1, 379 - 0x8F, 0x53, 0xA6, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x4D, 380 - 0x9A, 0x79, 0xF2, 0xA9, 0x1F, 0x3E, 0x7C, 0xF8, 0xBD, 0x37, 0x6E, 0xDC, 381 - 0xF5, 0xA7, 0x03, 0x06, 0x0C, 0x18, 0x30, 0x60, 0xC0, 0xCD, 0xD7, 0xE3, 382 - 0x8B, 0x5B, 0xB6, 0x21, 0x42, 0x84, 0x45, 0x8A, 0x59, 0xB2, 0x29, 0x52, 383 - 0xA4, 0x05, 0x0A, 0x14, 0x28, 0x50, 0xA0, 0x0D, 0x1A, 0x34, 0x68, 0xD0, 384 - 0xED, 0x97, 0x63, 0xC6, 0xC1, 0xCF, 0xD3, 0xEB, 0x9B, 0x7B, 0xF6, 0xA1, 385 - 0x0F, 0x1E, 0x3C, 0x78, 0xF0, 0xAD, 0x17, 0x2E, 0x5C, 0xB8, 0x3D, 0x7A, 386 - 0xF4, 0xA5, 0x07, 0x0E, 0x1C, 0x38, 0x70, 0xE0, 0x8D, 0x57, 0xAE, 0x11, 387 - 0x22, 0x44, 0x88, 0x5D, 0xBA, 0x39, 0x72, 0xE4, 0x85, 0x47, 0x8E, 0x51, 388 - 0xA2, 0x09, 0x12, 0x24, 0x48, 0x90, 0x6D, 0xDA, 0xF9, 0xBF, 0x33, 0x66, 389 - 0xCC, 0xD5, 0xE7, 0x83, 0x4B, 0x96, 0x61, 0xC2, 0xC9, 0xDF, 0xF3, 0xAB, 390 - 0x1B, 0x36, 0x6C, 0xD8, 0xFD, 0xB7, 0x23, 0x46, 0x8C, 0x55, 0xAA, 0x19, 391 - 0x32, 0x64, 0xC8, 0xDD, 0xF7, 0xA3, 0x0B, 0x16, 0x2C, 0x58, 0xB0, 0x2D, 392 - 0x5A, 0xB4, 0x25, 0x4A, 0x94, 0x65, 0xCA, 0xD9, 0xFF, 0xB3, 0x2B, 0x56, 393 - 0xAC, 0x15, 0x2A, 0x54, 0xA8, 0x1D, 0x3A, 0x74, 0xE8, 0x9D, 0x77, 0xEE, 394 - 0x91, 0x6F, 0xDE, 0xF1, 0xAF, 0x13, 0x26, 0x4C, 0x98, 0x7D, 0xFA, 0xB9, 395 - 0x3F, 0x7E, 0xFC, 0xB5, 0x27, 0x4E, 0x9C, 0x75, 0xEA, 0x99, 0x7F, 0xFE, 396 - 0xB1, 0x2F, 0x5E, 0xBC, 0x35, 0x6A, 0xD4, 0xE5, 0x87, 0x43, 0x86, 0x41, 397 - 0x82, 0x49, 0x92, 0x69, 0xD2, 0xE9, 0x9F, 0x73, 0xE6, 0x81, 0x4F, 0x9E, 398 - 0x71, 0xE2, 0x89, 0x5F, 0xBE, 0x31, 0x62, 0xC4, 0xC5, 0xC7, 0xC3, 0xCB 399 - }; 400 - 401 - 402 - /* The table constants are indices of 403 - * S-box entries, preprocessed through q0 and q1. */ 404 - static const u8 calc_sb_tbl[512] = { 405 - 0xA9, 0x75, 0x67, 0xF3, 0xB3, 0xC6, 0xE8, 0xF4, 406 - 0x04, 0xDB, 0xFD, 0x7B, 0xA3, 0xFB, 0x76, 0xC8, 407 - 0x9A, 0x4A, 0x92, 0xD3, 0x80, 0xE6, 0x78, 0x6B, 408 - 0xE4, 0x45, 0xDD, 0x7D, 0xD1, 0xE8, 0x38, 0x4B, 409 - 0x0D, 0xD6, 0xC6, 0x32, 0x35, 0xD8, 0x98, 0xFD, 410 - 0x18, 0x37, 0xF7, 0x71, 0xEC, 0xF1, 0x6C, 0xE1, 411 - 0x43, 0x30, 0x75, 0x0F, 0x37, 0xF8, 0x26, 0x1B, 412 - 0xFA, 0x87, 0x13, 0xFA, 0x94, 0x06, 0x48, 0x3F, 413 - 0xF2, 0x5E, 0xD0, 0xBA, 0x8B, 0xAE, 0x30, 0x5B, 414 - 0x84, 0x8A, 0x54, 0x00, 0xDF, 0xBC, 0x23, 0x9D, 415 - 0x19, 0x6D, 0x5B, 0xC1, 0x3D, 0xB1, 0x59, 0x0E, 416 - 0xF3, 0x80, 0xAE, 0x5D, 0xA2, 0xD2, 0x82, 0xD5, 417 - 0x63, 0xA0, 0x01, 0x84, 0x83, 0x07, 0x2E, 0x14, 418 - 0xD9, 0xB5, 0x51, 0x90, 0x9B, 0x2C, 0x7C, 0xA3, 419 - 0xA6, 0xB2, 0xEB, 0x73, 0xA5, 0x4C, 0xBE, 0x54, 420 - 0x16, 0x92, 0x0C, 0x74, 0xE3, 0x36, 0x61, 0x51, 421 - 0xC0, 0x38, 0x8C, 0xB0, 0x3A, 0xBD, 0xF5, 0x5A, 422 - 0x73, 0xFC, 0x2C, 0x60, 0x25, 0x62, 0x0B, 0x96, 423 - 0xBB, 0x6C, 0x4E, 0x42, 0x89, 0xF7, 0x6B, 0x10, 424 - 0x53, 0x7C, 0x6A, 0x28, 0xB4, 0x27, 0xF1, 0x8C, 425 - 0xE1, 0x13, 0xE6, 0x95, 0xBD, 0x9C, 0x45, 0xC7, 426 - 0xE2, 0x24, 0xF4, 0x46, 0xB6, 0x3B, 0x66, 0x70, 427 - 0xCC, 0xCA, 0x95, 0xE3, 0x03, 0x85, 0x56, 0xCB, 428 - 0xD4, 0x11, 0x1C, 0xD0, 0x1E, 0x93, 0xD7, 0xB8, 429 - 0xFB, 0xA6, 0xC3, 0x83, 0x8E, 0x20, 0xB5, 0xFF, 430 - 0xE9, 0x9F, 0xCF, 0x77, 0xBF, 0xC3, 0xBA, 0xCC, 431 - 0xEA, 0x03, 0x77, 0x6F, 0x39, 0x08, 0xAF, 0xBF, 432 - 0x33, 0x40, 0xC9, 0xE7, 0x62, 0x2B, 0x71, 0xE2, 433 - 0x81, 0x79, 0x79, 0x0C, 0x09, 0xAA, 0xAD, 0x82, 434 - 0x24, 0x41, 0xCD, 0x3A, 0xF9, 0xEA, 0xD8, 0xB9, 435 - 0xE5, 0xE4, 0xC5, 0x9A, 0xB9, 0xA4, 0x4D, 0x97, 436 - 0x44, 0x7E, 0x08, 0xDA, 0x86, 0x7A, 0xE7, 0x17, 437 - 0xA1, 0x66, 0x1D, 0x94, 0xAA, 0xA1, 0xED, 0x1D, 438 - 0x06, 0x3D, 0x70, 0xF0, 0xB2, 0xDE, 0xD2, 0xB3, 439 - 0x41, 0x0B, 0x7B, 0x72, 0xA0, 0xA7, 0x11, 0x1C, 440 - 0x31, 0xEF, 0xC2, 0xD1, 0x27, 0x53, 0x90, 0x3E, 441 - 0x20, 0x8F, 0xF6, 0x33, 0x60, 0x26, 0xFF, 0x5F, 442 - 0x96, 0xEC, 0x5C, 0x76, 0xB1, 0x2A, 0xAB, 0x49, 443 - 0x9E, 0x81, 0x9C, 0x88, 0x52, 0xEE, 0x1B, 0x21, 444 - 0x5F, 0xC4, 0x93, 0x1A, 0x0A, 0xEB, 0xEF, 0xD9, 445 - 0x91, 0xC5, 0x85, 0x39, 0x49, 0x99, 0xEE, 0xCD, 446 - 0x2D, 0xAD, 0x4F, 0x31, 0x8F, 0x8B, 0x3B, 0x01, 447 - 0x47, 0x18, 0x87, 0x23, 0x6D, 0xDD, 0x46, 0x1F, 448 - 0xD6, 0x4E, 0x3E, 0x2D, 0x69, 0xF9, 0x64, 0x48, 449 - 0x2A, 0x4F, 0xCE, 0xF2, 0xCB, 0x65, 0x2F, 0x8E, 450 - 0xFC, 0x78, 0x97, 0x5C, 0x05, 0x58, 0x7A, 0x19, 451 - 0xAC, 0x8D, 0x7F, 0xE5, 0xD5, 0x98, 0x1A, 0x57, 452 - 0x4B, 0x67, 0x0E, 0x7F, 0xA7, 0x05, 0x5A, 0x64, 453 - 0x28, 0xAF, 0x14, 0x63, 0x3F, 0xB6, 0x29, 0xFE, 454 - 0x88, 0xF5, 0x3C, 0xB7, 0x4C, 0x3C, 0x02, 0xA5, 455 - 0xB8, 0xCE, 0xDA, 0xE9, 0xB0, 0x68, 0x17, 0x44, 456 - 0x55, 0xE0, 0x1F, 0x4D, 0x8A, 0x43, 0x7D, 0x69, 457 - 0x57, 0x29, 0xC7, 0x2E, 0x8D, 0xAC, 0x74, 0x15, 458 - 0xB7, 0x59, 0xC4, 0xA8, 0x9F, 0x0A, 0x72, 0x9E, 459 - 0x7E, 0x6E, 0x15, 0x47, 0x22, 0xDF, 0x12, 0x34, 460 - 0x58, 0x35, 0x07, 0x6A, 0x99, 0xCF, 0x34, 0xDC, 461 - 0x6E, 0x22, 0x50, 0xC9, 0xDE, 0xC0, 0x68, 0x9B, 462 - 0x65, 0x89, 0xBC, 0xD4, 0xDB, 0xED, 0xF8, 0xAB, 463 - 0xC8, 0x12, 0xA8, 0xA2, 0x2B, 0x0D, 0x40, 0x52, 464 - 0xDC, 0xBB, 0xFE, 0x02, 0x32, 0x2F, 0xA4, 0xA9, 465 - 0xCA, 0xD7, 0x10, 0x61, 0x21, 0x1E, 0xF0, 0xB4, 466 - 0xD3, 0x50, 0x5D, 0x04, 0x0F, 0xF6, 0x00, 0xC2, 467 - 0x6F, 0x16, 0x9D, 0x25, 0x36, 0x86, 0x42, 0x56, 468 - 0x4A, 0x55, 0x5E, 0x09, 0xC1, 0xBE, 0xE0, 0x91 469 - }; 470 - 471 - /* Macro to perform one column of the RS matrix multiplication. The 472 - * parameters a, b, c, and d are the four bytes of output; i is the index 473 - * of the key bytes, and w, x, y, and z, are the column of constants from 474 - * the RS matrix, preprocessed through the poly_to_exp table. */ 475 - 476 - #define CALC_S(a, b, c, d, i, w, x, y, z) \ 477 - if (key[i]) { \ 478 - tmp = poly_to_exp[key[i] - 1]; \ 479 - (a) ^= exp_to_poly[tmp + (w)]; \ 480 - (b) ^= exp_to_poly[tmp + (x)]; \ 481 - (c) ^= exp_to_poly[tmp + (y)]; \ 482 - (d) ^= exp_to_poly[tmp + (z)]; \ 483 - } 484 - 485 - /* Macros to calculate the key-dependent S-boxes for a 128-bit key using 486 - * the S vector from CALC_S. CALC_SB_2 computes a single entry in all 487 - * four S-boxes, where i is the index of the entry to compute, and a and b 488 - * are the index numbers preprocessed through the q0 and q1 tables 489 - * respectively. */ 490 - 491 - #define CALC_SB_2(i, a, b) \ 492 - ctx->s[0][i] = mds[0][q0[(a) ^ sa] ^ se]; \ 493 - ctx->s[1][i] = mds[1][q0[(b) ^ sb] ^ sf]; \ 494 - ctx->s[2][i] = mds[2][q1[(a) ^ sc] ^ sg]; \ 495 - ctx->s[3][i] = mds[3][q1[(b) ^ sd] ^ sh] 496 - 497 - /* Macro exactly like CALC_SB_2, but for 192-bit keys. */ 498 - 499 - #define CALC_SB192_2(i, a, b) \ 500 - ctx->s[0][i] = mds[0][q0[q0[(b) ^ sa] ^ se] ^ si]; \ 501 - ctx->s[1][i] = mds[1][q0[q1[(b) ^ sb] ^ sf] ^ sj]; \ 502 - ctx->s[2][i] = mds[2][q1[q0[(a) ^ sc] ^ sg] ^ sk]; \ 503 - ctx->s[3][i] = mds[3][q1[q1[(a) ^ sd] ^ sh] ^ sl]; 504 - 505 - /* Macro exactly like CALC_SB_2, but for 256-bit keys. */ 506 - 507 - #define CALC_SB256_2(i, a, b) \ 508 - ctx->s[0][i] = mds[0][q0[q0[q1[(b) ^ sa] ^ se] ^ si] ^ sm]; \ 509 - ctx->s[1][i] = mds[1][q0[q1[q1[(a) ^ sb] ^ sf] ^ sj] ^ sn]; \ 510 - ctx->s[2][i] = mds[2][q1[q0[q0[(a) ^ sc] ^ sg] ^ sk] ^ so]; \ 511 - ctx->s[3][i] = mds[3][q1[q1[q0[(b) ^ sd] ^ sh] ^ sl] ^ sp]; 512 - 513 - /* Macros to calculate the whitening and round subkeys. CALC_K_2 computes the 514 - * last two stages of the h() function for a given index (either 2i or 2i+1). 515 - * a, b, c, and d are the four bytes going into the last two stages. For 516 - * 128-bit keys, this is the entire h() function and a and c are the index 517 - * preprocessed through q0 and q1 respectively; for longer keys they are the 518 - * output of previous stages. j is the index of the first key byte to use. 519 - * CALC_K computes a pair of subkeys for 128-bit Twofish, by calling CALC_K_2 520 - * twice, doing the Pseudo-Hadamard Transform, and doing the necessary 521 - * rotations. Its parameters are: a, the array to write the results into, 522 - * j, the index of the first output entry, k and l, the preprocessed indices 523 - * for index 2i, and m and n, the preprocessed indices for index 2i+1. 524 - * CALC_K192_2 expands CALC_K_2 to handle 192-bit keys, by doing an 525 - * additional lookup-and-XOR stage. The parameters a, b, c and d are the 526 - * four bytes going into the last three stages. For 192-bit keys, c = d 527 - * are the index preprocessed through q0, and a = b are the index 528 - * preprocessed through q1; j is the index of the first key byte to use. 529 - * CALC_K192 is identical to CALC_K but for using the CALC_K192_2 macro 530 - * instead of CALC_K_2. 531 - * CALC_K256_2 expands CALC_K192_2 to handle 256-bit keys, by doing an 532 - * additional lookup-and-XOR stage. The parameters a and b are the index 533 - * preprocessed through q0 and q1 respectively; j is the index of the first 534 - * key byte to use. CALC_K256 is identical to CALC_K but for using the 535 - * CALC_K256_2 macro instead of CALC_K_2. */ 536 - 537 - #define CALC_K_2(a, b, c, d, j) \ 538 - mds[0][q0[a ^ key[(j) + 8]] ^ key[j]] \ 539 - ^ mds[1][q0[b ^ key[(j) + 9]] ^ key[(j) + 1]] \ 540 - ^ mds[2][q1[c ^ key[(j) + 10]] ^ key[(j) + 2]] \ 541 - ^ mds[3][q1[d ^ key[(j) + 11]] ^ key[(j) + 3]] 542 - 543 - #define CALC_K(a, j, k, l, m, n) \ 544 - x = CALC_K_2 (k, l, k, l, 0); \ 545 - y = CALC_K_2 (m, n, m, n, 4); \ 546 - y = rol32(y, 8); \ 547 - x += y; y += x; ctx->a[j] = x; \ 548 - ctx->a[(j) + 1] = rol32(y, 9) 549 - 550 - #define CALC_K192_2(a, b, c, d, j) \ 551 - CALC_K_2 (q0[a ^ key[(j) + 16]], \ 552 - q1[b ^ key[(j) + 17]], \ 553 - q0[c ^ key[(j) + 18]], \ 554 - q1[d ^ key[(j) + 19]], j) 555 - 556 - #define CALC_K192(a, j, k, l, m, n) \ 557 - x = CALC_K192_2 (l, l, k, k, 0); \ 558 - y = CALC_K192_2 (n, n, m, m, 4); \ 559 - y = rol32(y, 8); \ 560 - x += y; y += x; ctx->a[j] = x; \ 561 - ctx->a[(j) + 1] = rol32(y, 9) 562 - 563 - #define CALC_K256_2(a, b, j) \ 564 - CALC_K192_2 (q1[b ^ key[(j) + 24]], \ 565 - q1[a ^ key[(j) + 25]], \ 566 - q0[a ^ key[(j) + 26]], \ 567 - q0[b ^ key[(j) + 27]], j) 568 - 569 - #define CALC_K256(a, j, k, l, m, n) \ 570 - x = CALC_K256_2 (k, l, 0); \ 571 - y = CALC_K256_2 (m, n, 4); \ 572 - y = rol32(y, 8); \ 573 - x += y; y += x; ctx->a[j] = x; \ 574 - ctx->a[(j) + 1] = rol32(y, 9) 575 - 576 49 577 50 /* Macros to compute the g() function in the encryption and decryption 578 51 * rounds. G1 is the straight g() function; G2 includes the 8-bit ··· 103 630 x ^= ctx->w[m]; \ 104 631 dst[n] = cpu_to_le32(x) 105 632 106 - #define TF_MIN_KEY_SIZE 16 107 - #define TF_MAX_KEY_SIZE 32 108 - #define TF_BLOCK_SIZE 16 109 633 110 - /* Structure for an expanded Twofish key. s contains the key-dependent 111 - * S-boxes composed with the MDS matrix; w contains the eight "whitening" 112 - * subkeys, K[0] through K[7]. k holds the remaining, "round" subkeys. Note 113 - * that k[i] corresponds to what the Twofish paper calls K[i+8]. */ 114 - struct twofish_ctx { 115 - u32 s[4][256], w[8], k[32]; 116 - }; 117 - 118 - /* Perform the key setup. */ 119 - static int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, 120 - unsigned int key_len, u32 *flags) 121 - { 122 - 123 - struct twofish_ctx *ctx = crypto_tfm_ctx(tfm); 124 - 125 - int i, j, k; 126 - 127 - /* Temporaries for CALC_K. */ 128 - u32 x, y; 129 - 130 - /* The S vector used to key the S-boxes, split up into individual bytes. 131 - * 128-bit keys use only sa through sh; 256-bit use all of them. */ 132 - u8 sa = 0, sb = 0, sc = 0, sd = 0, se = 0, sf = 0, sg = 0, sh = 0; 133 - u8 si = 0, sj = 0, sk = 0, sl = 0, sm = 0, sn = 0, so = 0, sp = 0; 134 - 135 - /* Temporary for CALC_S. */ 136 - u8 tmp; 137 - 138 - /* Check key length. */ 139 - if (key_len != 16 && key_len != 24 && key_len != 32) 140 - { 141 - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 142 - return -EINVAL; /* unsupported key length */ 143 - } 144 - 145 - /* Compute the first two words of the S vector. The magic numbers are 146 - * the entries of the RS matrix, preprocessed through poly_to_exp. The 147 - * numbers in the comments are the original (polynomial form) matrix 148 - * entries. */ 149 - CALC_S (sa, sb, sc, sd, 0, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */ 150 - CALC_S (sa, sb, sc, sd, 1, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */ 151 - CALC_S (sa, sb, sc, sd, 2, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */ 152 - CALC_S (sa, sb, sc, sd, 3, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */ 153 - CALC_S (sa, sb, sc, sd, 4, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */ 154 - CALC_S (sa, sb, sc, sd, 5, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */ 155 - CALC_S (sa, sb, sc, sd, 6, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */ 156 - CALC_S (sa, sb, sc, sd, 7, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */ 157 - CALC_S (se, sf, sg, sh, 8, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */ 158 - CALC_S (se, sf, sg, sh, 9, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */ 159 - CALC_S (se, sf, sg, sh, 10, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */ 160 - CALC_S (se, sf, sg, sh, 11, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */ 161 - CALC_S (se, sf, sg, sh, 12, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */ 162 - CALC_S (se, sf, sg, sh, 13, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */ 163 - CALC_S (se, sf, sg, sh, 14, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */ 164 - CALC_S (se, sf, sg, sh, 15, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */ 165 - 166 - if (key_len == 24 || key_len == 32) { /* 192- or 256-bit key */ 167 - /* Calculate the third word of the S vector */ 168 - CALC_S (si, sj, sk, sl, 16, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */ 169 - CALC_S (si, sj, sk, sl, 17, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */ 170 - CALC_S (si, sj, sk, sl, 18, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */ 171 - CALC_S (si, sj, sk, sl, 19, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */ 172 - CALC_S (si, sj, sk, sl, 20, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */ 173 - CALC_S (si, sj, sk, sl, 21, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */ 174 - CALC_S (si, sj, sk, sl, 22, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */ 175 - CALC_S (si, sj, sk, sl, 23, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */ 176 - } 177 - 178 - if (key_len == 32) { /* 256-bit key */ 179 - /* Calculate the fourth word of the S vector */ 180 - CALC_S (sm, sn, so, sp, 24, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */ 181 - CALC_S (sm, sn, so, sp, 25, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */ 182 - CALC_S (sm, sn, so, sp, 26, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */ 183 - CALC_S (sm, sn, so, sp, 27, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */ 184 - CALC_S (sm, sn, so, sp, 28, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */ 185 - CALC_S (sm, sn, so, sp, 29, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */ 186 - CALC_S (sm, sn, so, sp, 30, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */ 187 - CALC_S (sm, sn, so, sp, 31, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */ 188 - 189 - /* Compute the S-boxes. */ 190 - for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) { 191 - CALC_SB256_2( i, calc_sb_tbl[j], calc_sb_tbl[k] ); 192 - } 193 - 194 - /* Calculate whitening and round subkeys. The constants are 195 - * indices of subkeys, preprocessed through q0 and q1. */ 196 - CALC_K256 (w, 0, 0xA9, 0x75, 0x67, 0xF3); 197 - CALC_K256 (w, 2, 0xB3, 0xC6, 0xE8, 0xF4); 198 - CALC_K256 (w, 4, 0x04, 0xDB, 0xFD, 0x7B); 199 - CALC_K256 (w, 6, 0xA3, 0xFB, 0x76, 0xC8); 200 - CALC_K256 (k, 0, 0x9A, 0x4A, 0x92, 0xD3); 201 - CALC_K256 (k, 2, 0x80, 0xE6, 0x78, 0x6B); 202 - CALC_K256 (k, 4, 0xE4, 0x45, 0xDD, 0x7D); 203 - CALC_K256 (k, 6, 0xD1, 0xE8, 0x38, 0x4B); 204 - CALC_K256 (k, 8, 0x0D, 0xD6, 0xC6, 0x32); 205 - CALC_K256 (k, 10, 0x35, 0xD8, 0x98, 0xFD); 206 - CALC_K256 (k, 12, 0x18, 0x37, 0xF7, 0x71); 207 - CALC_K256 (k, 14, 0xEC, 0xF1, 0x6C, 0xE1); 208 - CALC_K256 (k, 16, 0x43, 0x30, 0x75, 0x0F); 209 - CALC_K256 (k, 18, 0x37, 0xF8, 0x26, 0x1B); 210 - CALC_K256 (k, 20, 0xFA, 0x87, 0x13, 0xFA); 211 - CALC_K256 (k, 22, 0x94, 0x06, 0x48, 0x3F); 212 - CALC_K256 (k, 24, 0xF2, 0x5E, 0xD0, 0xBA); 213 - CALC_K256 (k, 26, 0x8B, 0xAE, 0x30, 0x5B); 214 - CALC_K256 (k, 28, 0x84, 0x8A, 0x54, 0x00); 215 - CALC_K256 (k, 30, 0xDF, 0xBC, 0x23, 0x9D); 216 - } else if (key_len == 24) { /* 192-bit key */ 217 - /* Compute the S-boxes. */ 218 - for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) { 219 - CALC_SB192_2( i, calc_sb_tbl[j], calc_sb_tbl[k] ); 220 - } 221 - 222 - /* Calculate whitening and round subkeys. The constants are 223 - * indices of subkeys, preprocessed through q0 and q1. */ 224 - CALC_K192 (w, 0, 0xA9, 0x75, 0x67, 0xF3); 225 - CALC_K192 (w, 2, 0xB3, 0xC6, 0xE8, 0xF4); 226 - CALC_K192 (w, 4, 0x04, 0xDB, 0xFD, 0x7B); 227 - CALC_K192 (w, 6, 0xA3, 0xFB, 0x76, 0xC8); 228 - CALC_K192 (k, 0, 0x9A, 0x4A, 0x92, 0xD3); 229 - CALC_K192 (k, 2, 0x80, 0xE6, 0x78, 0x6B); 230 - CALC_K192 (k, 4, 0xE4, 0x45, 0xDD, 0x7D); 231 - CALC_K192 (k, 6, 0xD1, 0xE8, 0x38, 0x4B); 232 - CALC_K192 (k, 8, 0x0D, 0xD6, 0xC6, 0x32); 233 - CALC_K192 (k, 10, 0x35, 0xD8, 0x98, 0xFD); 234 - CALC_K192 (k, 12, 0x18, 0x37, 0xF7, 0x71); 235 - CALC_K192 (k, 14, 0xEC, 0xF1, 0x6C, 0xE1); 236 - CALC_K192 (k, 16, 0x43, 0x30, 0x75, 0x0F); 237 - CALC_K192 (k, 18, 0x37, 0xF8, 0x26, 0x1B); 238 - CALC_K192 (k, 20, 0xFA, 0x87, 0x13, 0xFA); 239 - CALC_K192 (k, 22, 0x94, 0x06, 0x48, 0x3F); 240 - CALC_K192 (k, 24, 0xF2, 0x5E, 0xD0, 0xBA); 241 - CALC_K192 (k, 26, 0x8B, 0xAE, 0x30, 0x5B); 242 - CALC_K192 (k, 28, 0x84, 0x8A, 0x54, 0x00); 243 - CALC_K192 (k, 30, 0xDF, 0xBC, 0x23, 0x9D); 244 - } else { /* 128-bit key */ 245 - /* Compute the S-boxes. */ 246 - for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) { 247 - CALC_SB_2( i, calc_sb_tbl[j], calc_sb_tbl[k] ); 248 - } 249 - 250 - /* Calculate whitening and round subkeys. The constants are 251 - * indices of subkeys, preprocessed through q0 and q1. */ 252 - CALC_K (w, 0, 0xA9, 0x75, 0x67, 0xF3); 253 - CALC_K (w, 2, 0xB3, 0xC6, 0xE8, 0xF4); 254 - CALC_K (w, 4, 0x04, 0xDB, 0xFD, 0x7B); 255 - CALC_K (w, 6, 0xA3, 0xFB, 0x76, 0xC8); 256 - CALC_K (k, 0, 0x9A, 0x4A, 0x92, 0xD3); 257 - CALC_K (k, 2, 0x80, 0xE6, 0x78, 0x6B); 258 - CALC_K (k, 4, 0xE4, 0x45, 0xDD, 0x7D); 259 - CALC_K (k, 6, 0xD1, 0xE8, 0x38, 0x4B); 260 - CALC_K (k, 8, 0x0D, 0xD6, 0xC6, 0x32); 261 - CALC_K (k, 10, 0x35, 0xD8, 0x98, 0xFD); 262 - CALC_K (k, 12, 0x18, 0x37, 0xF7, 0x71); 263 - CALC_K (k, 14, 0xEC, 0xF1, 0x6C, 0xE1); 264 - CALC_K (k, 16, 0x43, 0x30, 0x75, 0x0F); 265 - CALC_K (k, 18, 0x37, 0xF8, 0x26, 0x1B); 266 - CALC_K (k, 20, 0xFA, 0x87, 0x13, 0xFA); 267 - CALC_K (k, 22, 0x94, 0x06, 0x48, 0x3F); 268 - CALC_K (k, 24, 0xF2, 0x5E, 0xD0, 0xBA); 269 - CALC_K (k, 26, 0x8B, 0xAE, 0x30, 0x5B); 270 - CALC_K (k, 28, 0x84, 0x8A, 0x54, 0x00); 271 - CALC_K (k, 30, 0xDF, 0xBC, 0x23, 0x9D); 272 - } 273 - 274 - return 0; 275 - } 276 634 277 635 /* Encrypt one block. in and out may be the same. */ 278 636 static void twofish_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) ··· 181 877 182 878 static struct crypto_alg alg = { 183 879 .cra_name = "twofish", 880 + .cra_driver_name = "twofish-generic", 881 + .cra_priority = 100, 184 882 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 185 883 .cra_blocksize = TF_BLOCK_SIZE, 186 884 .cra_ctxsize = sizeof(struct twofish_ctx),
+744
crypto/twofish_common.c
··· 1 + /* 2 + * Common Twofish algorithm parts shared between the c and assembler 3 + * implementations 4 + * 5 + * Originally Twofish for GPG 6 + * By Matthew Skala <mskala@ansuz.sooke.bc.ca>, July 26, 1998 7 + * 256-bit key length added March 20, 1999 8 + * Some modifications to reduce the text size by Werner Koch, April, 1998 9 + * Ported to the kerneli patch by Marc Mutz <Marc@Mutz.com> 10 + * Ported to CryptoAPI by Colin Slater <hoho@tacomeat.net> 11 + * 12 + * The original author has disclaimed all copyright interest in this 13 + * code and thus put it in the public domain. The subsequent authors 14 + * have put this under the GNU General Public License. 15 + * 16 + * This program is free software; you can redistribute it and/or modify 17 + * it under the terms of the GNU General Public License as published by 18 + * the Free Software Foundation; either version 2 of the License, or 19 + * (at your option) any later version. 20 + * 21 + * This program is distributed in the hope that it will be useful, 22 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 23 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 24 + * GNU General Public License for more details. 25 + * 26 + * You should have received a copy of the GNU General Public License 27 + * along with this program; if not, write to the Free Software 28 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 29 + * USA 30 + * 31 + * This code is a "clean room" implementation, written from the paper 32 + * _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey, 33 + * Doug Whiting, David Wagner, Chris Hall, and Niels Ferguson, available 34 + * through http://www.counterpane.com/twofish.html 35 + * 36 + * For background information on multiplication in finite fields, used for 37 + * the matrix operations in the key schedule, see the book _Contemporary 38 + * Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the 39 + * Third Edition. 40 + */ 41 + 42 + #include <crypto/twofish.h> 43 + #include <linux/bitops.h> 44 + #include <linux/crypto.h> 45 + #include <linux/errno.h> 46 + #include <linux/init.h> 47 + #include <linux/kernel.h> 48 + #include <linux/module.h> 49 + #include <linux/types.h> 50 + 51 + 52 + /* The large precomputed tables for the Twofish cipher (twofish.c) 53 + * Taken from the same source as twofish.c 54 + * Marc Mutz <Marc@Mutz.com> 55 + */ 56 + 57 + /* These two tables are the q0 and q1 permutations, exactly as described in 58 + * the Twofish paper. */ 59 + 60 + static const u8 q0[256] = { 61 + 0xA9, 0x67, 0xB3, 0xE8, 0x04, 0xFD, 0xA3, 0x76, 0x9A, 0x92, 0x80, 0x78, 62 + 0xE4, 0xDD, 0xD1, 0x38, 0x0D, 0xC6, 0x35, 0x98, 0x18, 0xF7, 0xEC, 0x6C, 63 + 0x43, 0x75, 0x37, 0x26, 0xFA, 0x13, 0x94, 0x48, 0xF2, 0xD0, 0x8B, 0x30, 64 + 0x84, 0x54, 0xDF, 0x23, 0x19, 0x5B, 0x3D, 0x59, 0xF3, 0xAE, 0xA2, 0x82, 65 + 0x63, 0x01, 0x83, 0x2E, 0xD9, 0x51, 0x9B, 0x7C, 0xA6, 0xEB, 0xA5, 0xBE, 66 + 0x16, 0x0C, 0xE3, 0x61, 0xC0, 0x8C, 0x3A, 0xF5, 0x73, 0x2C, 0x25, 0x0B, 67 + 0xBB, 0x4E, 0x89, 0x6B, 0x53, 0x6A, 0xB4, 0xF1, 0xE1, 0xE6, 0xBD, 0x45, 68 + 0xE2, 0xF4, 0xB6, 0x66, 0xCC, 0x95, 0x03, 0x56, 0xD4, 0x1C, 0x1E, 0xD7, 69 + 0xFB, 0xC3, 0x8E, 0xB5, 0xE9, 0xCF, 0xBF, 0xBA, 0xEA, 0x77, 0x39, 0xAF, 70 + 0x33, 0xC9, 0x62, 0x71, 0x81, 0x79, 0x09, 0xAD, 0x24, 0xCD, 0xF9, 0xD8, 71 + 0xE5, 0xC5, 0xB9, 0x4D, 0x44, 0x08, 0x86, 0xE7, 0xA1, 0x1D, 0xAA, 0xED, 72 + 0x06, 0x70, 0xB2, 0xD2, 0x41, 0x7B, 0xA0, 0x11, 0x31, 0xC2, 0x27, 0x90, 73 + 0x20, 0xF6, 0x60, 0xFF, 0x96, 0x5C, 0xB1, 0xAB, 0x9E, 0x9C, 0x52, 0x1B, 74 + 0x5F, 0x93, 0x0A, 0xEF, 0x91, 0x85, 0x49, 0xEE, 0x2D, 0x4F, 0x8F, 0x3B, 75 + 0x47, 0x87, 0x6D, 0x46, 0xD6, 0x3E, 0x69, 0x64, 0x2A, 0xCE, 0xCB, 0x2F, 76 + 0xFC, 0x97, 0x05, 0x7A, 0xAC, 0x7F, 0xD5, 0x1A, 0x4B, 0x0E, 0xA7, 0x5A, 77 + 0x28, 0x14, 0x3F, 0x29, 0x88, 0x3C, 0x4C, 0x02, 0xB8, 0xDA, 0xB0, 0x17, 78 + 0x55, 0x1F, 0x8A, 0x7D, 0x57, 0xC7, 0x8D, 0x74, 0xB7, 0xC4, 0x9F, 0x72, 79 + 0x7E, 0x15, 0x22, 0x12, 0x58, 0x07, 0x99, 0x34, 0x6E, 0x50, 0xDE, 0x68, 80 + 0x65, 0xBC, 0xDB, 0xF8, 0xC8, 0xA8, 0x2B, 0x40, 0xDC, 0xFE, 0x32, 0xA4, 81 + 0xCA, 0x10, 0x21, 0xF0, 0xD3, 0x5D, 0x0F, 0x00, 0x6F, 0x9D, 0x36, 0x42, 82 + 0x4A, 0x5E, 0xC1, 0xE0 83 + }; 84 + 85 + static const u8 q1[256] = { 86 + 0x75, 0xF3, 0xC6, 0xF4, 0xDB, 0x7B, 0xFB, 0xC8, 0x4A, 0xD3, 0xE6, 0x6B, 87 + 0x45, 0x7D, 0xE8, 0x4B, 0xD6, 0x32, 0xD8, 0xFD, 0x37, 0x71, 0xF1, 0xE1, 88 + 0x30, 0x0F, 0xF8, 0x1B, 0x87, 0xFA, 0x06, 0x3F, 0x5E, 0xBA, 0xAE, 0x5B, 89 + 0x8A, 0x00, 0xBC, 0x9D, 0x6D, 0xC1, 0xB1, 0x0E, 0x80, 0x5D, 0xD2, 0xD5, 90 + 0xA0, 0x84, 0x07, 0x14, 0xB5, 0x90, 0x2C, 0xA3, 0xB2, 0x73, 0x4C, 0x54, 91 + 0x92, 0x74, 0x36, 0x51, 0x38, 0xB0, 0xBD, 0x5A, 0xFC, 0x60, 0x62, 0x96, 92 + 0x6C, 0x42, 0xF7, 0x10, 0x7C, 0x28, 0x27, 0x8C, 0x13, 0x95, 0x9C, 0xC7, 93 + 0x24, 0x46, 0x3B, 0x70, 0xCA, 0xE3, 0x85, 0xCB, 0x11, 0xD0, 0x93, 0xB8, 94 + 0xA6, 0x83, 0x20, 0xFF, 0x9F, 0x77, 0xC3, 0xCC, 0x03, 0x6F, 0x08, 0xBF, 95 + 0x40, 0xE7, 0x2B, 0xE2, 0x79, 0x0C, 0xAA, 0x82, 0x41, 0x3A, 0xEA, 0xB9, 96 + 0xE4, 0x9A, 0xA4, 0x97, 0x7E, 0xDA, 0x7A, 0x17, 0x66, 0x94, 0xA1, 0x1D, 97 + 0x3D, 0xF0, 0xDE, 0xB3, 0x0B, 0x72, 0xA7, 0x1C, 0xEF, 0xD1, 0x53, 0x3E, 98 + 0x8F, 0x33, 0x26, 0x5F, 0xEC, 0x76, 0x2A, 0x49, 0x81, 0x88, 0xEE, 0x21, 99 + 0xC4, 0x1A, 0xEB, 0xD9, 0xC5, 0x39, 0x99, 0xCD, 0xAD, 0x31, 0x8B, 0x01, 100 + 0x18, 0x23, 0xDD, 0x1F, 0x4E, 0x2D, 0xF9, 0x48, 0x4F, 0xF2, 0x65, 0x8E, 101 + 0x78, 0x5C, 0x58, 0x19, 0x8D, 0xE5, 0x98, 0x57, 0x67, 0x7F, 0x05, 0x64, 102 + 0xAF, 0x63, 0xB6, 0xFE, 0xF5, 0xB7, 0x3C, 0xA5, 0xCE, 0xE9, 0x68, 0x44, 103 + 0xE0, 0x4D, 0x43, 0x69, 0x29, 0x2E, 0xAC, 0x15, 0x59, 0xA8, 0x0A, 0x9E, 104 + 0x6E, 0x47, 0xDF, 0x34, 0x35, 0x6A, 0xCF, 0xDC, 0x22, 0xC9, 0xC0, 0x9B, 105 + 0x89, 0xD4, 0xED, 0xAB, 0x12, 0xA2, 0x0D, 0x52, 0xBB, 0x02, 0x2F, 0xA9, 106 + 0xD7, 0x61, 0x1E, 0xB4, 0x50, 0x04, 0xF6, 0xC2, 0x16, 0x25, 0x86, 0x56, 107 + 0x55, 0x09, 0xBE, 0x91 108 + }; 109 + 110 + /* These MDS tables are actually tables of MDS composed with q0 and q1, 111 + * because it is only ever used that way and we can save some time by 112 + * precomputing. Of course the main saving comes from precomputing the 113 + * GF(2^8) multiplication involved in the MDS matrix multiply; by looking 114 + * things up in these tables we reduce the matrix multiply to four lookups 115 + * and three XORs. Semi-formally, the definition of these tables is: 116 + * mds[0][i] = MDS (q1[i] 0 0 0)^T mds[1][i] = MDS (0 q0[i] 0 0)^T 117 + * mds[2][i] = MDS (0 0 q1[i] 0)^T mds[3][i] = MDS (0 0 0 q0[i])^T 118 + * where ^T means "transpose", the matrix multiply is performed in GF(2^8) 119 + * represented as GF(2)[x]/v(x) where v(x)=x^8+x^6+x^5+x^3+1 as described 120 + * by Schneier et al, and I'm casually glossing over the byte/word 121 + * conversion issues. */ 122 + 123 + static const u32 mds[4][256] = { 124 + { 125 + 0xBCBC3275, 0xECEC21F3, 0x202043C6, 0xB3B3C9F4, 0xDADA03DB, 0x02028B7B, 126 + 0xE2E22BFB, 0x9E9EFAC8, 0xC9C9EC4A, 0xD4D409D3, 0x18186BE6, 0x1E1E9F6B, 127 + 0x98980E45, 0xB2B2387D, 0xA6A6D2E8, 0x2626B74B, 0x3C3C57D6, 0x93938A32, 128 + 0x8282EED8, 0x525298FD, 0x7B7BD437, 0xBBBB3771, 0x5B5B97F1, 0x474783E1, 129 + 0x24243C30, 0x5151E20F, 0xBABAC6F8, 0x4A4AF31B, 0xBFBF4887, 0x0D0D70FA, 130 + 0xB0B0B306, 0x7575DE3F, 0xD2D2FD5E, 0x7D7D20BA, 0x666631AE, 0x3A3AA35B, 131 + 0x59591C8A, 0x00000000, 0xCDCD93BC, 0x1A1AE09D, 0xAEAE2C6D, 0x7F7FABC1, 132 + 0x2B2BC7B1, 0xBEBEB90E, 0xE0E0A080, 0x8A8A105D, 0x3B3B52D2, 0x6464BAD5, 133 + 0xD8D888A0, 0xE7E7A584, 0x5F5FE807, 0x1B1B1114, 0x2C2CC2B5, 0xFCFCB490, 134 + 0x3131272C, 0x808065A3, 0x73732AB2, 0x0C0C8173, 0x79795F4C, 0x6B6B4154, 135 + 0x4B4B0292, 0x53536974, 0x94948F36, 0x83831F51, 0x2A2A3638, 0xC4C49CB0, 136 + 0x2222C8BD, 0xD5D5F85A, 0xBDBDC3FC, 0x48487860, 0xFFFFCE62, 0x4C4C0796, 137 + 0x4141776C, 0xC7C7E642, 0xEBEB24F7, 0x1C1C1410, 0x5D5D637C, 0x36362228, 138 + 0x6767C027, 0xE9E9AF8C, 0x4444F913, 0x1414EA95, 0xF5F5BB9C, 0xCFCF18C7, 139 + 0x3F3F2D24, 0xC0C0E346, 0x7272DB3B, 0x54546C70, 0x29294CCA, 0xF0F035E3, 140 + 0x0808FE85, 0xC6C617CB, 0xF3F34F11, 0x8C8CE4D0, 0xA4A45993, 0xCACA96B8, 141 + 0x68683BA6, 0xB8B84D83, 0x38382820, 0xE5E52EFF, 0xADAD569F, 0x0B0B8477, 142 + 0xC8C81DC3, 0x9999FFCC, 0x5858ED03, 0x19199A6F, 0x0E0E0A08, 0x95957EBF, 143 + 0x70705040, 0xF7F730E7, 0x6E6ECF2B, 0x1F1F6EE2, 0xB5B53D79, 0x09090F0C, 144 + 0x616134AA, 0x57571682, 0x9F9F0B41, 0x9D9D803A, 0x111164EA, 0x2525CDB9, 145 + 0xAFAFDDE4, 0x4545089A, 0xDFDF8DA4, 0xA3A35C97, 0xEAEAD57E, 0x353558DA, 146 + 0xEDEDD07A, 0x4343FC17, 0xF8F8CB66, 0xFBFBB194, 0x3737D3A1, 0xFAFA401D, 147 + 0xC2C2683D, 0xB4B4CCF0, 0x32325DDE, 0x9C9C71B3, 0x5656E70B, 0xE3E3DA72, 148 + 0x878760A7, 0x15151B1C, 0xF9F93AEF, 0x6363BFD1, 0x3434A953, 0x9A9A853E, 149 + 0xB1B1428F, 0x7C7CD133, 0x88889B26, 0x3D3DA65F, 0xA1A1D7EC, 0xE4E4DF76, 150 + 0x8181942A, 0x91910149, 0x0F0FFB81, 0xEEEEAA88, 0x161661EE, 0xD7D77321, 151 + 0x9797F5C4, 0xA5A5A81A, 0xFEFE3FEB, 0x6D6DB5D9, 0x7878AEC5, 0xC5C56D39, 152 + 0x1D1DE599, 0x7676A4CD, 0x3E3EDCAD, 0xCBCB6731, 0xB6B6478B, 0xEFEF5B01, 153 + 0x12121E18, 0x6060C523, 0x6A6AB0DD, 0x4D4DF61F, 0xCECEE94E, 0xDEDE7C2D, 154 + 0x55559DF9, 0x7E7E5A48, 0x2121B24F, 0x03037AF2, 0xA0A02665, 0x5E5E198E, 155 + 0x5A5A6678, 0x65654B5C, 0x62624E58, 0xFDFD4519, 0x0606F48D, 0x404086E5, 156 + 0xF2F2BE98, 0x3333AC57, 0x17179067, 0x05058E7F, 0xE8E85E05, 0x4F4F7D64, 157 + 0x89896AAF, 0x10109563, 0x74742FB6, 0x0A0A75FE, 0x5C5C92F5, 0x9B9B74B7, 158 + 0x2D2D333C, 0x3030D6A5, 0x2E2E49CE, 0x494989E9, 0x46467268, 0x77775544, 159 + 0xA8A8D8E0, 0x9696044D, 0x2828BD43, 0xA9A92969, 0xD9D97929, 0x8686912E, 160 + 0xD1D187AC, 0xF4F44A15, 0x8D8D1559, 0xD6D682A8, 0xB9B9BC0A, 0x42420D9E, 161 + 0xF6F6C16E, 0x2F2FB847, 0xDDDD06DF, 0x23233934, 0xCCCC6235, 0xF1F1C46A, 162 + 0xC1C112CF, 0x8585EBDC, 0x8F8F9E22, 0x7171A1C9, 0x9090F0C0, 0xAAAA539B, 163 + 0x0101F189, 0x8B8BE1D4, 0x4E4E8CED, 0x8E8E6FAB, 0xABABA212, 0x6F6F3EA2, 164 + 0xE6E6540D, 0xDBDBF252, 0x92927BBB, 0xB7B7B602, 0x6969CA2F, 0x3939D9A9, 165 + 0xD3D30CD7, 0xA7A72361, 0xA2A2AD1E, 0xC3C399B4, 0x6C6C4450, 0x07070504, 166 + 0x04047FF6, 0x272746C2, 0xACACA716, 0xD0D07625, 0x50501386, 0xDCDCF756, 167 + 0x84841A55, 0xE1E15109, 0x7A7A25BE, 0x1313EF91}, 168 + 169 + { 170 + 0xA9D93939, 0x67901717, 0xB3719C9C, 0xE8D2A6A6, 0x04050707, 0xFD985252, 171 + 0xA3658080, 0x76DFE4E4, 0x9A084545, 0x92024B4B, 0x80A0E0E0, 0x78665A5A, 172 + 0xE4DDAFAF, 0xDDB06A6A, 0xD1BF6363, 0x38362A2A, 0x0D54E6E6, 0xC6432020, 173 + 0x3562CCCC, 0x98BEF2F2, 0x181E1212, 0xF724EBEB, 0xECD7A1A1, 0x6C774141, 174 + 0x43BD2828, 0x7532BCBC, 0x37D47B7B, 0x269B8888, 0xFA700D0D, 0x13F94444, 175 + 0x94B1FBFB, 0x485A7E7E, 0xF27A0303, 0xD0E48C8C, 0x8B47B6B6, 0x303C2424, 176 + 0x84A5E7E7, 0x54416B6B, 0xDF06DDDD, 0x23C56060, 0x1945FDFD, 0x5BA33A3A, 177 + 0x3D68C2C2, 0x59158D8D, 0xF321ECEC, 0xAE316666, 0xA23E6F6F, 0x82165757, 178 + 0x63951010, 0x015BEFEF, 0x834DB8B8, 0x2E918686, 0xD9B56D6D, 0x511F8383, 179 + 0x9B53AAAA, 0x7C635D5D, 0xA63B6868, 0xEB3FFEFE, 0xA5D63030, 0xBE257A7A, 180 + 0x16A7ACAC, 0x0C0F0909, 0xE335F0F0, 0x6123A7A7, 0xC0F09090, 0x8CAFE9E9, 181 + 0x3A809D9D, 0xF5925C5C, 0x73810C0C, 0x2C273131, 0x2576D0D0, 0x0BE75656, 182 + 0xBB7B9292, 0x4EE9CECE, 0x89F10101, 0x6B9F1E1E, 0x53A93434, 0x6AC4F1F1, 183 + 0xB499C3C3, 0xF1975B5B, 0xE1834747, 0xE66B1818, 0xBDC82222, 0x450E9898, 184 + 0xE26E1F1F, 0xF4C9B3B3, 0xB62F7474, 0x66CBF8F8, 0xCCFF9999, 0x95EA1414, 185 + 0x03ED5858, 0x56F7DCDC, 0xD4E18B8B, 0x1C1B1515, 0x1EADA2A2, 0xD70CD3D3, 186 + 0xFB2BE2E2, 0xC31DC8C8, 0x8E195E5E, 0xB5C22C2C, 0xE9894949, 0xCF12C1C1, 187 + 0xBF7E9595, 0xBA207D7D, 0xEA641111, 0x77840B0B, 0x396DC5C5, 0xAF6A8989, 188 + 0x33D17C7C, 0xC9A17171, 0x62CEFFFF, 0x7137BBBB, 0x81FB0F0F, 0x793DB5B5, 189 + 0x0951E1E1, 0xADDC3E3E, 0x242D3F3F, 0xCDA47676, 0xF99D5555, 0xD8EE8282, 190 + 0xE5864040, 0xC5AE7878, 0xB9CD2525, 0x4D049696, 0x44557777, 0x080A0E0E, 191 + 0x86135050, 0xE730F7F7, 0xA1D33737, 0x1D40FAFA, 0xAA346161, 0xED8C4E4E, 192 + 0x06B3B0B0, 0x706C5454, 0xB22A7373, 0xD2523B3B, 0x410B9F9F, 0x7B8B0202, 193 + 0xA088D8D8, 0x114FF3F3, 0x3167CBCB, 0xC2462727, 0x27C06767, 0x90B4FCFC, 194 + 0x20283838, 0xF67F0404, 0x60784848, 0xFF2EE5E5, 0x96074C4C, 0x5C4B6565, 195 + 0xB1C72B2B, 0xAB6F8E8E, 0x9E0D4242, 0x9CBBF5F5, 0x52F2DBDB, 0x1BF34A4A, 196 + 0x5FA63D3D, 0x9359A4A4, 0x0ABCB9B9, 0xEF3AF9F9, 0x91EF1313, 0x85FE0808, 197 + 0x49019191, 0xEE611616, 0x2D7CDEDE, 0x4FB22121, 0x8F42B1B1, 0x3BDB7272, 198 + 0x47B82F2F, 0x8748BFBF, 0x6D2CAEAE, 0x46E3C0C0, 0xD6573C3C, 0x3E859A9A, 199 + 0x6929A9A9, 0x647D4F4F, 0x2A948181, 0xCE492E2E, 0xCB17C6C6, 0x2FCA6969, 200 + 0xFCC3BDBD, 0x975CA3A3, 0x055EE8E8, 0x7AD0EDED, 0xAC87D1D1, 0x7F8E0505, 201 + 0xD5BA6464, 0x1AA8A5A5, 0x4BB72626, 0x0EB9BEBE, 0xA7608787, 0x5AF8D5D5, 202 + 0x28223636, 0x14111B1B, 0x3FDE7575, 0x2979D9D9, 0x88AAEEEE, 0x3C332D2D, 203 + 0x4C5F7979, 0x02B6B7B7, 0xB896CACA, 0xDA583535, 0xB09CC4C4, 0x17FC4343, 204 + 0x551A8484, 0x1FF64D4D, 0x8A1C5959, 0x7D38B2B2, 0x57AC3333, 0xC718CFCF, 205 + 0x8DF40606, 0x74695353, 0xB7749B9B, 0xC4F59797, 0x9F56ADAD, 0x72DAE3E3, 206 + 0x7ED5EAEA, 0x154AF4F4, 0x229E8F8F, 0x12A2ABAB, 0x584E6262, 0x07E85F5F, 207 + 0x99E51D1D, 0x34392323, 0x6EC1F6F6, 0x50446C6C, 0xDE5D3232, 0x68724646, 208 + 0x6526A0A0, 0xBC93CDCD, 0xDB03DADA, 0xF8C6BABA, 0xC8FA9E9E, 0xA882D6D6, 209 + 0x2BCF6E6E, 0x40507070, 0xDCEB8585, 0xFE750A0A, 0x328A9393, 0xA48DDFDF, 210 + 0xCA4C2929, 0x10141C1C, 0x2173D7D7, 0xF0CCB4B4, 0xD309D4D4, 0x5D108A8A, 211 + 0x0FE25151, 0x00000000, 0x6F9A1919, 0x9DE01A1A, 0x368F9494, 0x42E6C7C7, 212 + 0x4AECC9C9, 0x5EFDD2D2, 0xC1AB7F7F, 0xE0D8A8A8}, 213 + 214 + { 215 + 0xBC75BC32, 0xECF3EC21, 0x20C62043, 0xB3F4B3C9, 0xDADBDA03, 0x027B028B, 216 + 0xE2FBE22B, 0x9EC89EFA, 0xC94AC9EC, 0xD4D3D409, 0x18E6186B, 0x1E6B1E9F, 217 + 0x9845980E, 0xB27DB238, 0xA6E8A6D2, 0x264B26B7, 0x3CD63C57, 0x9332938A, 218 + 0x82D882EE, 0x52FD5298, 0x7B377BD4, 0xBB71BB37, 0x5BF15B97, 0x47E14783, 219 + 0x2430243C, 0x510F51E2, 0xBAF8BAC6, 0x4A1B4AF3, 0xBF87BF48, 0x0DFA0D70, 220 + 0xB006B0B3, 0x753F75DE, 0xD25ED2FD, 0x7DBA7D20, 0x66AE6631, 0x3A5B3AA3, 221 + 0x598A591C, 0x00000000, 0xCDBCCD93, 0x1A9D1AE0, 0xAE6DAE2C, 0x7FC17FAB, 222 + 0x2BB12BC7, 0xBE0EBEB9, 0xE080E0A0, 0x8A5D8A10, 0x3BD23B52, 0x64D564BA, 223 + 0xD8A0D888, 0xE784E7A5, 0x5F075FE8, 0x1B141B11, 0x2CB52CC2, 0xFC90FCB4, 224 + 0x312C3127, 0x80A38065, 0x73B2732A, 0x0C730C81, 0x794C795F, 0x6B546B41, 225 + 0x4B924B02, 0x53745369, 0x9436948F, 0x8351831F, 0x2A382A36, 0xC4B0C49C, 226 + 0x22BD22C8, 0xD55AD5F8, 0xBDFCBDC3, 0x48604878, 0xFF62FFCE, 0x4C964C07, 227 + 0x416C4177, 0xC742C7E6, 0xEBF7EB24, 0x1C101C14, 0x5D7C5D63, 0x36283622, 228 + 0x672767C0, 0xE98CE9AF, 0x441344F9, 0x149514EA, 0xF59CF5BB, 0xCFC7CF18, 229 + 0x3F243F2D, 0xC046C0E3, 0x723B72DB, 0x5470546C, 0x29CA294C, 0xF0E3F035, 230 + 0x088508FE, 0xC6CBC617, 0xF311F34F, 0x8CD08CE4, 0xA493A459, 0xCAB8CA96, 231 + 0x68A6683B, 0xB883B84D, 0x38203828, 0xE5FFE52E, 0xAD9FAD56, 0x0B770B84, 232 + 0xC8C3C81D, 0x99CC99FF, 0x580358ED, 0x196F199A, 0x0E080E0A, 0x95BF957E, 233 + 0x70407050, 0xF7E7F730, 0x6E2B6ECF, 0x1FE21F6E, 0xB579B53D, 0x090C090F, 234 + 0x61AA6134, 0x57825716, 0x9F419F0B, 0x9D3A9D80, 0x11EA1164, 0x25B925CD, 235 + 0xAFE4AFDD, 0x459A4508, 0xDFA4DF8D, 0xA397A35C, 0xEA7EEAD5, 0x35DA3558, 236 + 0xED7AEDD0, 0x431743FC, 0xF866F8CB, 0xFB94FBB1, 0x37A137D3, 0xFA1DFA40, 237 + 0xC23DC268, 0xB4F0B4CC, 0x32DE325D, 0x9CB39C71, 0x560B56E7, 0xE372E3DA, 238 + 0x87A78760, 0x151C151B, 0xF9EFF93A, 0x63D163BF, 0x345334A9, 0x9A3E9A85, 239 + 0xB18FB142, 0x7C337CD1, 0x8826889B, 0x3D5F3DA6, 0xA1ECA1D7, 0xE476E4DF, 240 + 0x812A8194, 0x91499101, 0x0F810FFB, 0xEE88EEAA, 0x16EE1661, 0xD721D773, 241 + 0x97C497F5, 0xA51AA5A8, 0xFEEBFE3F, 0x6DD96DB5, 0x78C578AE, 0xC539C56D, 242 + 0x1D991DE5, 0x76CD76A4, 0x3EAD3EDC, 0xCB31CB67, 0xB68BB647, 0xEF01EF5B, 243 + 0x1218121E, 0x602360C5, 0x6ADD6AB0, 0x4D1F4DF6, 0xCE4ECEE9, 0xDE2DDE7C, 244 + 0x55F9559D, 0x7E487E5A, 0x214F21B2, 0x03F2037A, 0xA065A026, 0x5E8E5E19, 245 + 0x5A785A66, 0x655C654B, 0x6258624E, 0xFD19FD45, 0x068D06F4, 0x40E54086, 246 + 0xF298F2BE, 0x335733AC, 0x17671790, 0x057F058E, 0xE805E85E, 0x4F644F7D, 247 + 0x89AF896A, 0x10631095, 0x74B6742F, 0x0AFE0A75, 0x5CF55C92, 0x9BB79B74, 248 + 0x2D3C2D33, 0x30A530D6, 0x2ECE2E49, 0x49E94989, 0x46684672, 0x77447755, 249 + 0xA8E0A8D8, 0x964D9604, 0x284328BD, 0xA969A929, 0xD929D979, 0x862E8691, 250 + 0xD1ACD187, 0xF415F44A, 0x8D598D15, 0xD6A8D682, 0xB90AB9BC, 0x429E420D, 251 + 0xF66EF6C1, 0x2F472FB8, 0xDDDFDD06, 0x23342339, 0xCC35CC62, 0xF16AF1C4, 252 + 0xC1CFC112, 0x85DC85EB, 0x8F228F9E, 0x71C971A1, 0x90C090F0, 0xAA9BAA53, 253 + 0x018901F1, 0x8BD48BE1, 0x4EED4E8C, 0x8EAB8E6F, 0xAB12ABA2, 0x6FA26F3E, 254 + 0xE60DE654, 0xDB52DBF2, 0x92BB927B, 0xB702B7B6, 0x692F69CA, 0x39A939D9, 255 + 0xD3D7D30C, 0xA761A723, 0xA21EA2AD, 0xC3B4C399, 0x6C506C44, 0x07040705, 256 + 0x04F6047F, 0x27C22746, 0xAC16ACA7, 0xD025D076, 0x50865013, 0xDC56DCF7, 257 + 0x8455841A, 0xE109E151, 0x7ABE7A25, 0x139113EF}, 258 + 259 + { 260 + 0xD939A9D9, 0x90176790, 0x719CB371, 0xD2A6E8D2, 0x05070405, 0x9852FD98, 261 + 0x6580A365, 0xDFE476DF, 0x08459A08, 0x024B9202, 0xA0E080A0, 0x665A7866, 262 + 0xDDAFE4DD, 0xB06ADDB0, 0xBF63D1BF, 0x362A3836, 0x54E60D54, 0x4320C643, 263 + 0x62CC3562, 0xBEF298BE, 0x1E12181E, 0x24EBF724, 0xD7A1ECD7, 0x77416C77, 264 + 0xBD2843BD, 0x32BC7532, 0xD47B37D4, 0x9B88269B, 0x700DFA70, 0xF94413F9, 265 + 0xB1FB94B1, 0x5A7E485A, 0x7A03F27A, 0xE48CD0E4, 0x47B68B47, 0x3C24303C, 266 + 0xA5E784A5, 0x416B5441, 0x06DDDF06, 0xC56023C5, 0x45FD1945, 0xA33A5BA3, 267 + 0x68C23D68, 0x158D5915, 0x21ECF321, 0x3166AE31, 0x3E6FA23E, 0x16578216, 268 + 0x95106395, 0x5BEF015B, 0x4DB8834D, 0x91862E91, 0xB56DD9B5, 0x1F83511F, 269 + 0x53AA9B53, 0x635D7C63, 0x3B68A63B, 0x3FFEEB3F, 0xD630A5D6, 0x257ABE25, 270 + 0xA7AC16A7, 0x0F090C0F, 0x35F0E335, 0x23A76123, 0xF090C0F0, 0xAFE98CAF, 271 + 0x809D3A80, 0x925CF592, 0x810C7381, 0x27312C27, 0x76D02576, 0xE7560BE7, 272 + 0x7B92BB7B, 0xE9CE4EE9, 0xF10189F1, 0x9F1E6B9F, 0xA93453A9, 0xC4F16AC4, 273 + 0x99C3B499, 0x975BF197, 0x8347E183, 0x6B18E66B, 0xC822BDC8, 0x0E98450E, 274 + 0x6E1FE26E, 0xC9B3F4C9, 0x2F74B62F, 0xCBF866CB, 0xFF99CCFF, 0xEA1495EA, 275 + 0xED5803ED, 0xF7DC56F7, 0xE18BD4E1, 0x1B151C1B, 0xADA21EAD, 0x0CD3D70C, 276 + 0x2BE2FB2B, 0x1DC8C31D, 0x195E8E19, 0xC22CB5C2, 0x8949E989, 0x12C1CF12, 277 + 0x7E95BF7E, 0x207DBA20, 0x6411EA64, 0x840B7784, 0x6DC5396D, 0x6A89AF6A, 278 + 0xD17C33D1, 0xA171C9A1, 0xCEFF62CE, 0x37BB7137, 0xFB0F81FB, 0x3DB5793D, 279 + 0x51E10951, 0xDC3EADDC, 0x2D3F242D, 0xA476CDA4, 0x9D55F99D, 0xEE82D8EE, 280 + 0x8640E586, 0xAE78C5AE, 0xCD25B9CD, 0x04964D04, 0x55774455, 0x0A0E080A, 281 + 0x13508613, 0x30F7E730, 0xD337A1D3, 0x40FA1D40, 0x3461AA34, 0x8C4EED8C, 282 + 0xB3B006B3, 0x6C54706C, 0x2A73B22A, 0x523BD252, 0x0B9F410B, 0x8B027B8B, 283 + 0x88D8A088, 0x4FF3114F, 0x67CB3167, 0x4627C246, 0xC06727C0, 0xB4FC90B4, 284 + 0x28382028, 0x7F04F67F, 0x78486078, 0x2EE5FF2E, 0x074C9607, 0x4B655C4B, 285 + 0xC72BB1C7, 0x6F8EAB6F, 0x0D429E0D, 0xBBF59CBB, 0xF2DB52F2, 0xF34A1BF3, 286 + 0xA63D5FA6, 0x59A49359, 0xBCB90ABC, 0x3AF9EF3A, 0xEF1391EF, 0xFE0885FE, 287 + 0x01914901, 0x6116EE61, 0x7CDE2D7C, 0xB2214FB2, 0x42B18F42, 0xDB723BDB, 288 + 0xB82F47B8, 0x48BF8748, 0x2CAE6D2C, 0xE3C046E3, 0x573CD657, 0x859A3E85, 289 + 0x29A96929, 0x7D4F647D, 0x94812A94, 0x492ECE49, 0x17C6CB17, 0xCA692FCA, 290 + 0xC3BDFCC3, 0x5CA3975C, 0x5EE8055E, 0xD0ED7AD0, 0x87D1AC87, 0x8E057F8E, 291 + 0xBA64D5BA, 0xA8A51AA8, 0xB7264BB7, 0xB9BE0EB9, 0x6087A760, 0xF8D55AF8, 292 + 0x22362822, 0x111B1411, 0xDE753FDE, 0x79D92979, 0xAAEE88AA, 0x332D3C33, 293 + 0x5F794C5F, 0xB6B702B6, 0x96CAB896, 0x5835DA58, 0x9CC4B09C, 0xFC4317FC, 294 + 0x1A84551A, 0xF64D1FF6, 0x1C598A1C, 0x38B27D38, 0xAC3357AC, 0x18CFC718, 295 + 0xF4068DF4, 0x69537469, 0x749BB774, 0xF597C4F5, 0x56AD9F56, 0xDAE372DA, 296 + 0xD5EA7ED5, 0x4AF4154A, 0x9E8F229E, 0xA2AB12A2, 0x4E62584E, 0xE85F07E8, 297 + 0xE51D99E5, 0x39233439, 0xC1F66EC1, 0x446C5044, 0x5D32DE5D, 0x72466872, 298 + 0x26A06526, 0x93CDBC93, 0x03DADB03, 0xC6BAF8C6, 0xFA9EC8FA, 0x82D6A882, 299 + 0xCF6E2BCF, 0x50704050, 0xEB85DCEB, 0x750AFE75, 0x8A93328A, 0x8DDFA48D, 300 + 0x4C29CA4C, 0x141C1014, 0x73D72173, 0xCCB4F0CC, 0x09D4D309, 0x108A5D10, 301 + 0xE2510FE2, 0x00000000, 0x9A196F9A, 0xE01A9DE0, 0x8F94368F, 0xE6C742E6, 302 + 0xECC94AEC, 0xFDD25EFD, 0xAB7FC1AB, 0xD8A8E0D8} 303 + }; 304 + 305 + /* The exp_to_poly and poly_to_exp tables are used to perform efficient 306 + * operations in GF(2^8) represented as GF(2)[x]/w(x) where 307 + * w(x)=x^8+x^6+x^3+x^2+1. We care about doing that because it's part of the 308 + * definition of the RS matrix in the key schedule. Elements of that field 309 + * are polynomials of degree not greater than 7 and all coefficients 0 or 1, 310 + * which can be represented naturally by bytes (just substitute x=2). In that 311 + * form, GF(2^8) addition is the same as bitwise XOR, but GF(2^8) 312 + * multiplication is inefficient without hardware support. To multiply 313 + * faster, I make use of the fact x is a generator for the nonzero elements, 314 + * so that every element p of GF(2)[x]/w(x) is either 0 or equal to (x)^n for 315 + * some n in 0..254. Note that that caret is exponentiation in GF(2^8), 316 + * *not* polynomial notation. So if I want to compute pq where p and q are 317 + * in GF(2^8), I can just say: 318 + * 1. if p=0 or q=0 then pq=0 319 + * 2. otherwise, find m and n such that p=x^m and q=x^n 320 + * 3. pq=(x^m)(x^n)=x^(m+n), so add m and n and find pq 321 + * The translations in steps 2 and 3 are looked up in the tables 322 + * poly_to_exp (for step 2) and exp_to_poly (for step 3). To see this 323 + * in action, look at the CALC_S macro. As additional wrinkles, note that 324 + * one of my operands is always a constant, so the poly_to_exp lookup on it 325 + * is done in advance; I included the original values in the comments so 326 + * readers can have some chance of recognizing that this *is* the RS matrix 327 + * from the Twofish paper. I've only included the table entries I actually 328 + * need; I never do a lookup on a variable input of zero and the biggest 329 + * exponents I'll ever see are 254 (variable) and 237 (constant), so they'll 330 + * never sum to more than 491. I'm repeating part of the exp_to_poly table 331 + * so that I don't have to do mod-255 reduction in the exponent arithmetic. 332 + * Since I know my constant operands are never zero, I only have to worry 333 + * about zero values in the variable operand, and I do it with a simple 334 + * conditional branch. I know conditionals are expensive, but I couldn't 335 + * see a non-horrible way of avoiding them, and I did manage to group the 336 + * statements so that each if covers four group multiplications. */ 337 + 338 + static const u8 poly_to_exp[255] = { 339 + 0x00, 0x01, 0x17, 0x02, 0x2E, 0x18, 0x53, 0x03, 0x6A, 0x2F, 0x93, 0x19, 340 + 0x34, 0x54, 0x45, 0x04, 0x5C, 0x6B, 0xB6, 0x30, 0xA6, 0x94, 0x4B, 0x1A, 341 + 0x8C, 0x35, 0x81, 0x55, 0xAA, 0x46, 0x0D, 0x05, 0x24, 0x5D, 0x87, 0x6C, 342 + 0x9B, 0xB7, 0xC1, 0x31, 0x2B, 0xA7, 0xA3, 0x95, 0x98, 0x4C, 0xCA, 0x1B, 343 + 0xE6, 0x8D, 0x73, 0x36, 0xCD, 0x82, 0x12, 0x56, 0x62, 0xAB, 0xF0, 0x47, 344 + 0x4F, 0x0E, 0xBD, 0x06, 0xD4, 0x25, 0xD2, 0x5E, 0x27, 0x88, 0x66, 0x6D, 345 + 0xD6, 0x9C, 0x79, 0xB8, 0x08, 0xC2, 0xDF, 0x32, 0x68, 0x2C, 0xFD, 0xA8, 346 + 0x8A, 0xA4, 0x5A, 0x96, 0x29, 0x99, 0x22, 0x4D, 0x60, 0xCB, 0xE4, 0x1C, 347 + 0x7B, 0xE7, 0x3B, 0x8E, 0x9E, 0x74, 0xF4, 0x37, 0xD8, 0xCE, 0xF9, 0x83, 348 + 0x6F, 0x13, 0xB2, 0x57, 0xE1, 0x63, 0xDC, 0xAC, 0xC4, 0xF1, 0xAF, 0x48, 349 + 0x0A, 0x50, 0x42, 0x0F, 0xBA, 0xBE, 0xC7, 0x07, 0xDE, 0xD5, 0x78, 0x26, 350 + 0x65, 0xD3, 0xD1, 0x5F, 0xE3, 0x28, 0x21, 0x89, 0x59, 0x67, 0xFC, 0x6E, 351 + 0xB1, 0xD7, 0xF8, 0x9D, 0xF3, 0x7A, 0x3A, 0xB9, 0xC6, 0x09, 0x41, 0xC3, 352 + 0xAE, 0xE0, 0xDB, 0x33, 0x44, 0x69, 0x92, 0x2D, 0x52, 0xFE, 0x16, 0xA9, 353 + 0x0C, 0x8B, 0x80, 0xA5, 0x4A, 0x5B, 0xB5, 0x97, 0xC9, 0x2A, 0xA2, 0x9A, 354 + 0xC0, 0x23, 0x86, 0x4E, 0xBC, 0x61, 0xEF, 0xCC, 0x11, 0xE5, 0x72, 0x1D, 355 + 0x3D, 0x7C, 0xEB, 0xE8, 0xE9, 0x3C, 0xEA, 0x8F, 0x7D, 0x9F, 0xEC, 0x75, 356 + 0x1E, 0xF5, 0x3E, 0x38, 0xF6, 0xD9, 0x3F, 0xCF, 0x76, 0xFA, 0x1F, 0x84, 357 + 0xA0, 0x70, 0xED, 0x14, 0x90, 0xB3, 0x7E, 0x58, 0xFB, 0xE2, 0x20, 0x64, 358 + 0xD0, 0xDD, 0x77, 0xAD, 0xDA, 0xC5, 0x40, 0xF2, 0x39, 0xB0, 0xF7, 0x49, 359 + 0xB4, 0x0B, 0x7F, 0x51, 0x15, 0x43, 0x91, 0x10, 0x71, 0xBB, 0xEE, 0xBF, 360 + 0x85, 0xC8, 0xA1 361 + }; 362 + 363 + static const u8 exp_to_poly[492] = { 364 + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x4D, 0x9A, 0x79, 0xF2, 365 + 0xA9, 0x1F, 0x3E, 0x7C, 0xF8, 0xBD, 0x37, 0x6E, 0xDC, 0xF5, 0xA7, 0x03, 366 + 0x06, 0x0C, 0x18, 0x30, 0x60, 0xC0, 0xCD, 0xD7, 0xE3, 0x8B, 0x5B, 0xB6, 367 + 0x21, 0x42, 0x84, 0x45, 0x8A, 0x59, 0xB2, 0x29, 0x52, 0xA4, 0x05, 0x0A, 368 + 0x14, 0x28, 0x50, 0xA0, 0x0D, 0x1A, 0x34, 0x68, 0xD0, 0xED, 0x97, 0x63, 369 + 0xC6, 0xC1, 0xCF, 0xD3, 0xEB, 0x9B, 0x7B, 0xF6, 0xA1, 0x0F, 0x1E, 0x3C, 370 + 0x78, 0xF0, 0xAD, 0x17, 0x2E, 0x5C, 0xB8, 0x3D, 0x7A, 0xF4, 0xA5, 0x07, 371 + 0x0E, 0x1C, 0x38, 0x70, 0xE0, 0x8D, 0x57, 0xAE, 0x11, 0x22, 0x44, 0x88, 372 + 0x5D, 0xBA, 0x39, 0x72, 0xE4, 0x85, 0x47, 0x8E, 0x51, 0xA2, 0x09, 0x12, 373 + 0x24, 0x48, 0x90, 0x6D, 0xDA, 0xF9, 0xBF, 0x33, 0x66, 0xCC, 0xD5, 0xE7, 374 + 0x83, 0x4B, 0x96, 0x61, 0xC2, 0xC9, 0xDF, 0xF3, 0xAB, 0x1B, 0x36, 0x6C, 375 + 0xD8, 0xFD, 0xB7, 0x23, 0x46, 0x8C, 0x55, 0xAA, 0x19, 0x32, 0x64, 0xC8, 376 + 0xDD, 0xF7, 0xA3, 0x0B, 0x16, 0x2C, 0x58, 0xB0, 0x2D, 0x5A, 0xB4, 0x25, 377 + 0x4A, 0x94, 0x65, 0xCA, 0xD9, 0xFF, 0xB3, 0x2B, 0x56, 0xAC, 0x15, 0x2A, 378 + 0x54, 0xA8, 0x1D, 0x3A, 0x74, 0xE8, 0x9D, 0x77, 0xEE, 0x91, 0x6F, 0xDE, 379 + 0xF1, 0xAF, 0x13, 0x26, 0x4C, 0x98, 0x7D, 0xFA, 0xB9, 0x3F, 0x7E, 0xFC, 380 + 0xB5, 0x27, 0x4E, 0x9C, 0x75, 0xEA, 0x99, 0x7F, 0xFE, 0xB1, 0x2F, 0x5E, 381 + 0xBC, 0x35, 0x6A, 0xD4, 0xE5, 0x87, 0x43, 0x86, 0x41, 0x82, 0x49, 0x92, 382 + 0x69, 0xD2, 0xE9, 0x9F, 0x73, 0xE6, 0x81, 0x4F, 0x9E, 0x71, 0xE2, 0x89, 383 + 0x5F, 0xBE, 0x31, 0x62, 0xC4, 0xC5, 0xC7, 0xC3, 0xCB, 0xDB, 0xFB, 0xBB, 384 + 0x3B, 0x76, 0xEC, 0x95, 0x67, 0xCE, 0xD1, 0xEF, 0x93, 0x6B, 0xD6, 0xE1, 385 + 0x8F, 0x53, 0xA6, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x4D, 386 + 0x9A, 0x79, 0xF2, 0xA9, 0x1F, 0x3E, 0x7C, 0xF8, 0xBD, 0x37, 0x6E, 0xDC, 387 + 0xF5, 0xA7, 0x03, 0x06, 0x0C, 0x18, 0x30, 0x60, 0xC0, 0xCD, 0xD7, 0xE3, 388 + 0x8B, 0x5B, 0xB6, 0x21, 0x42, 0x84, 0x45, 0x8A, 0x59, 0xB2, 0x29, 0x52, 389 + 0xA4, 0x05, 0x0A, 0x14, 0x28, 0x50, 0xA0, 0x0D, 0x1A, 0x34, 0x68, 0xD0, 390 + 0xED, 0x97, 0x63, 0xC6, 0xC1, 0xCF, 0xD3, 0xEB, 0x9B, 0x7B, 0xF6, 0xA1, 391 + 0x0F, 0x1E, 0x3C, 0x78, 0xF0, 0xAD, 0x17, 0x2E, 0x5C, 0xB8, 0x3D, 0x7A, 392 + 0xF4, 0xA5, 0x07, 0x0E, 0x1C, 0x38, 0x70, 0xE0, 0x8D, 0x57, 0xAE, 0x11, 393 + 0x22, 0x44, 0x88, 0x5D, 0xBA, 0x39, 0x72, 0xE4, 0x85, 0x47, 0x8E, 0x51, 394 + 0xA2, 0x09, 0x12, 0x24, 0x48, 0x90, 0x6D, 0xDA, 0xF9, 0xBF, 0x33, 0x66, 395 + 0xCC, 0xD5, 0xE7, 0x83, 0x4B, 0x96, 0x61, 0xC2, 0xC9, 0xDF, 0xF3, 0xAB, 396 + 0x1B, 0x36, 0x6C, 0xD8, 0xFD, 0xB7, 0x23, 0x46, 0x8C, 0x55, 0xAA, 0x19, 397 + 0x32, 0x64, 0xC8, 0xDD, 0xF7, 0xA3, 0x0B, 0x16, 0x2C, 0x58, 0xB0, 0x2D, 398 + 0x5A, 0xB4, 0x25, 0x4A, 0x94, 0x65, 0xCA, 0xD9, 0xFF, 0xB3, 0x2B, 0x56, 399 + 0xAC, 0x15, 0x2A, 0x54, 0xA8, 0x1D, 0x3A, 0x74, 0xE8, 0x9D, 0x77, 0xEE, 400 + 0x91, 0x6F, 0xDE, 0xF1, 0xAF, 0x13, 0x26, 0x4C, 0x98, 0x7D, 0xFA, 0xB9, 401 + 0x3F, 0x7E, 0xFC, 0xB5, 0x27, 0x4E, 0x9C, 0x75, 0xEA, 0x99, 0x7F, 0xFE, 402 + 0xB1, 0x2F, 0x5E, 0xBC, 0x35, 0x6A, 0xD4, 0xE5, 0x87, 0x43, 0x86, 0x41, 403 + 0x82, 0x49, 0x92, 0x69, 0xD2, 0xE9, 0x9F, 0x73, 0xE6, 0x81, 0x4F, 0x9E, 404 + 0x71, 0xE2, 0x89, 0x5F, 0xBE, 0x31, 0x62, 0xC4, 0xC5, 0xC7, 0xC3, 0xCB 405 + }; 406 + 407 + 408 + /* The table constants are indices of 409 + * S-box entries, preprocessed through q0 and q1. */ 410 + static const u8 calc_sb_tbl[512] = { 411 + 0xA9, 0x75, 0x67, 0xF3, 0xB3, 0xC6, 0xE8, 0xF4, 412 + 0x04, 0xDB, 0xFD, 0x7B, 0xA3, 0xFB, 0x76, 0xC8, 413 + 0x9A, 0x4A, 0x92, 0xD3, 0x80, 0xE6, 0x78, 0x6B, 414 + 0xE4, 0x45, 0xDD, 0x7D, 0xD1, 0xE8, 0x38, 0x4B, 415 + 0x0D, 0xD6, 0xC6, 0x32, 0x35, 0xD8, 0x98, 0xFD, 416 + 0x18, 0x37, 0xF7, 0x71, 0xEC, 0xF1, 0x6C, 0xE1, 417 + 0x43, 0x30, 0x75, 0x0F, 0x37, 0xF8, 0x26, 0x1B, 418 + 0xFA, 0x87, 0x13, 0xFA, 0x94, 0x06, 0x48, 0x3F, 419 + 0xF2, 0x5E, 0xD0, 0xBA, 0x8B, 0xAE, 0x30, 0x5B, 420 + 0x84, 0x8A, 0x54, 0x00, 0xDF, 0xBC, 0x23, 0x9D, 421 + 0x19, 0x6D, 0x5B, 0xC1, 0x3D, 0xB1, 0x59, 0x0E, 422 + 0xF3, 0x80, 0xAE, 0x5D, 0xA2, 0xD2, 0x82, 0xD5, 423 + 0x63, 0xA0, 0x01, 0x84, 0x83, 0x07, 0x2E, 0x14, 424 + 0xD9, 0xB5, 0x51, 0x90, 0x9B, 0x2C, 0x7C, 0xA3, 425 + 0xA6, 0xB2, 0xEB, 0x73, 0xA5, 0x4C, 0xBE, 0x54, 426 + 0x16, 0x92, 0x0C, 0x74, 0xE3, 0x36, 0x61, 0x51, 427 + 0xC0, 0x38, 0x8C, 0xB0, 0x3A, 0xBD, 0xF5, 0x5A, 428 + 0x73, 0xFC, 0x2C, 0x60, 0x25, 0x62, 0x0B, 0x96, 429 + 0xBB, 0x6C, 0x4E, 0x42, 0x89, 0xF7, 0x6B, 0x10, 430 + 0x53, 0x7C, 0x6A, 0x28, 0xB4, 0x27, 0xF1, 0x8C, 431 + 0xE1, 0x13, 0xE6, 0x95, 0xBD, 0x9C, 0x45, 0xC7, 432 + 0xE2, 0x24, 0xF4, 0x46, 0xB6, 0x3B, 0x66, 0x70, 433 + 0xCC, 0xCA, 0x95, 0xE3, 0x03, 0x85, 0x56, 0xCB, 434 + 0xD4, 0x11, 0x1C, 0xD0, 0x1E, 0x93, 0xD7, 0xB8, 435 + 0xFB, 0xA6, 0xC3, 0x83, 0x8E, 0x20, 0xB5, 0xFF, 436 + 0xE9, 0x9F, 0xCF, 0x77, 0xBF, 0xC3, 0xBA, 0xCC, 437 + 0xEA, 0x03, 0x77, 0x6F, 0x39, 0x08, 0xAF, 0xBF, 438 + 0x33, 0x40, 0xC9, 0xE7, 0x62, 0x2B, 0x71, 0xE2, 439 + 0x81, 0x79, 0x79, 0x0C, 0x09, 0xAA, 0xAD, 0x82, 440 + 0x24, 0x41, 0xCD, 0x3A, 0xF9, 0xEA, 0xD8, 0xB9, 441 + 0xE5, 0xE4, 0xC5, 0x9A, 0xB9, 0xA4, 0x4D, 0x97, 442 + 0x44, 0x7E, 0x08, 0xDA, 0x86, 0x7A, 0xE7, 0x17, 443 + 0xA1, 0x66, 0x1D, 0x94, 0xAA, 0xA1, 0xED, 0x1D, 444 + 0x06, 0x3D, 0x70, 0xF0, 0xB2, 0xDE, 0xD2, 0xB3, 445 + 0x41, 0x0B, 0x7B, 0x72, 0xA0, 0xA7, 0x11, 0x1C, 446 + 0x31, 0xEF, 0xC2, 0xD1, 0x27, 0x53, 0x90, 0x3E, 447 + 0x20, 0x8F, 0xF6, 0x33, 0x60, 0x26, 0xFF, 0x5F, 448 + 0x96, 0xEC, 0x5C, 0x76, 0xB1, 0x2A, 0xAB, 0x49, 449 + 0x9E, 0x81, 0x9C, 0x88, 0x52, 0xEE, 0x1B, 0x21, 450 + 0x5F, 0xC4, 0x93, 0x1A, 0x0A, 0xEB, 0xEF, 0xD9, 451 + 0x91, 0xC5, 0x85, 0x39, 0x49, 0x99, 0xEE, 0xCD, 452 + 0x2D, 0xAD, 0x4F, 0x31, 0x8F, 0x8B, 0x3B, 0x01, 453 + 0x47, 0x18, 0x87, 0x23, 0x6D, 0xDD, 0x46, 0x1F, 454 + 0xD6, 0x4E, 0x3E, 0x2D, 0x69, 0xF9, 0x64, 0x48, 455 + 0x2A, 0x4F, 0xCE, 0xF2, 0xCB, 0x65, 0x2F, 0x8E, 456 + 0xFC, 0x78, 0x97, 0x5C, 0x05, 0x58, 0x7A, 0x19, 457 + 0xAC, 0x8D, 0x7F, 0xE5, 0xD5, 0x98, 0x1A, 0x57, 458 + 0x4B, 0x67, 0x0E, 0x7F, 0xA7, 0x05, 0x5A, 0x64, 459 + 0x28, 0xAF, 0x14, 0x63, 0x3F, 0xB6, 0x29, 0xFE, 460 + 0x88, 0xF5, 0x3C, 0xB7, 0x4C, 0x3C, 0x02, 0xA5, 461 + 0xB8, 0xCE, 0xDA, 0xE9, 0xB0, 0x68, 0x17, 0x44, 462 + 0x55, 0xE0, 0x1F, 0x4D, 0x8A, 0x43, 0x7D, 0x69, 463 + 0x57, 0x29, 0xC7, 0x2E, 0x8D, 0xAC, 0x74, 0x15, 464 + 0xB7, 0x59, 0xC4, 0xA8, 0x9F, 0x0A, 0x72, 0x9E, 465 + 0x7E, 0x6E, 0x15, 0x47, 0x22, 0xDF, 0x12, 0x34, 466 + 0x58, 0x35, 0x07, 0x6A, 0x99, 0xCF, 0x34, 0xDC, 467 + 0x6E, 0x22, 0x50, 0xC9, 0xDE, 0xC0, 0x68, 0x9B, 468 + 0x65, 0x89, 0xBC, 0xD4, 0xDB, 0xED, 0xF8, 0xAB, 469 + 0xC8, 0x12, 0xA8, 0xA2, 0x2B, 0x0D, 0x40, 0x52, 470 + 0xDC, 0xBB, 0xFE, 0x02, 0x32, 0x2F, 0xA4, 0xA9, 471 + 0xCA, 0xD7, 0x10, 0x61, 0x21, 0x1E, 0xF0, 0xB4, 472 + 0xD3, 0x50, 0x5D, 0x04, 0x0F, 0xF6, 0x00, 0xC2, 473 + 0x6F, 0x16, 0x9D, 0x25, 0x36, 0x86, 0x42, 0x56, 474 + 0x4A, 0x55, 0x5E, 0x09, 0xC1, 0xBE, 0xE0, 0x91 475 + }; 476 + 477 + /* Macro to perform one column of the RS matrix multiplication. The 478 + * parameters a, b, c, and d are the four bytes of output; i is the index 479 + * of the key bytes, and w, x, y, and z, are the column of constants from 480 + * the RS matrix, preprocessed through the poly_to_exp table. */ 481 + 482 + #define CALC_S(a, b, c, d, i, w, x, y, z) \ 483 + if (key[i]) { \ 484 + tmp = poly_to_exp[key[i] - 1]; \ 485 + (a) ^= exp_to_poly[tmp + (w)]; \ 486 + (b) ^= exp_to_poly[tmp + (x)]; \ 487 + (c) ^= exp_to_poly[tmp + (y)]; \ 488 + (d) ^= exp_to_poly[tmp + (z)]; \ 489 + } 490 + 491 + /* Macros to calculate the key-dependent S-boxes for a 128-bit key using 492 + * the S vector from CALC_S. CALC_SB_2 computes a single entry in all 493 + * four S-boxes, where i is the index of the entry to compute, and a and b 494 + * are the index numbers preprocessed through the q0 and q1 tables 495 + * respectively. */ 496 + 497 + #define CALC_SB_2(i, a, b) \ 498 + ctx->s[0][i] = mds[0][q0[(a) ^ sa] ^ se]; \ 499 + ctx->s[1][i] = mds[1][q0[(b) ^ sb] ^ sf]; \ 500 + ctx->s[2][i] = mds[2][q1[(a) ^ sc] ^ sg]; \ 501 + ctx->s[3][i] = mds[3][q1[(b) ^ sd] ^ sh] 502 + 503 + /* Macro exactly like CALC_SB_2, but for 192-bit keys. */ 504 + 505 + #define CALC_SB192_2(i, a, b) \ 506 + ctx->s[0][i] = mds[0][q0[q0[(b) ^ sa] ^ se] ^ si]; \ 507 + ctx->s[1][i] = mds[1][q0[q1[(b) ^ sb] ^ sf] ^ sj]; \ 508 + ctx->s[2][i] = mds[2][q1[q0[(a) ^ sc] ^ sg] ^ sk]; \ 509 + ctx->s[3][i] = mds[3][q1[q1[(a) ^ sd] ^ sh] ^ sl]; 510 + 511 + /* Macro exactly like CALC_SB_2, but for 256-bit keys. */ 512 + 513 + #define CALC_SB256_2(i, a, b) \ 514 + ctx->s[0][i] = mds[0][q0[q0[q1[(b) ^ sa] ^ se] ^ si] ^ sm]; \ 515 + ctx->s[1][i] = mds[1][q0[q1[q1[(a) ^ sb] ^ sf] ^ sj] ^ sn]; \ 516 + ctx->s[2][i] = mds[2][q1[q0[q0[(a) ^ sc] ^ sg] ^ sk] ^ so]; \ 517 + ctx->s[3][i] = mds[3][q1[q1[q0[(b) ^ sd] ^ sh] ^ sl] ^ sp]; 518 + 519 + /* Macros to calculate the whitening and round subkeys. CALC_K_2 computes the 520 + * last two stages of the h() function for a given index (either 2i or 2i+1). 521 + * a, b, c, and d are the four bytes going into the last two stages. For 522 + * 128-bit keys, this is the entire h() function and a and c are the index 523 + * preprocessed through q0 and q1 respectively; for longer keys they are the 524 + * output of previous stages. j is the index of the first key byte to use. 525 + * CALC_K computes a pair of subkeys for 128-bit Twofish, by calling CALC_K_2 526 + * twice, doing the Pseudo-Hadamard Transform, and doing the necessary 527 + * rotations. Its parameters are: a, the array to write the results into, 528 + * j, the index of the first output entry, k and l, the preprocessed indices 529 + * for index 2i, and m and n, the preprocessed indices for index 2i+1. 530 + * CALC_K192_2 expands CALC_K_2 to handle 192-bit keys, by doing an 531 + * additional lookup-and-XOR stage. The parameters a, b, c and d are the 532 + * four bytes going into the last three stages. For 192-bit keys, c = d 533 + * are the index preprocessed through q0, and a = b are the index 534 + * preprocessed through q1; j is the index of the first key byte to use. 535 + * CALC_K192 is identical to CALC_K but for using the CALC_K192_2 macro 536 + * instead of CALC_K_2. 537 + * CALC_K256_2 expands CALC_K192_2 to handle 256-bit keys, by doing an 538 + * additional lookup-and-XOR stage. The parameters a and b are the index 539 + * preprocessed through q0 and q1 respectively; j is the index of the first 540 + * key byte to use. CALC_K256 is identical to CALC_K but for using the 541 + * CALC_K256_2 macro instead of CALC_K_2. */ 542 + 543 + #define CALC_K_2(a, b, c, d, j) \ 544 + mds[0][q0[a ^ key[(j) + 8]] ^ key[j]] \ 545 + ^ mds[1][q0[b ^ key[(j) + 9]] ^ key[(j) + 1]] \ 546 + ^ mds[2][q1[c ^ key[(j) + 10]] ^ key[(j) + 2]] \ 547 + ^ mds[3][q1[d ^ key[(j) + 11]] ^ key[(j) + 3]] 548 + 549 + #define CALC_K(a, j, k, l, m, n) \ 550 + x = CALC_K_2 (k, l, k, l, 0); \ 551 + y = CALC_K_2 (m, n, m, n, 4); \ 552 + y = rol32(y, 8); \ 553 + x += y; y += x; ctx->a[j] = x; \ 554 + ctx->a[(j) + 1] = rol32(y, 9) 555 + 556 + #define CALC_K192_2(a, b, c, d, j) \ 557 + CALC_K_2 (q0[a ^ key[(j) + 16]], \ 558 + q1[b ^ key[(j) + 17]], \ 559 + q0[c ^ key[(j) + 18]], \ 560 + q1[d ^ key[(j) + 19]], j) 561 + 562 + #define CALC_K192(a, j, k, l, m, n) \ 563 + x = CALC_K192_2 (l, l, k, k, 0); \ 564 + y = CALC_K192_2 (n, n, m, m, 4); \ 565 + y = rol32(y, 8); \ 566 + x += y; y += x; ctx->a[j] = x; \ 567 + ctx->a[(j) + 1] = rol32(y, 9) 568 + 569 + #define CALC_K256_2(a, b, j) \ 570 + CALC_K192_2 (q1[b ^ key[(j) + 24]], \ 571 + q1[a ^ key[(j) + 25]], \ 572 + q0[a ^ key[(j) + 26]], \ 573 + q0[b ^ key[(j) + 27]], j) 574 + 575 + #define CALC_K256(a, j, k, l, m, n) \ 576 + x = CALC_K256_2 (k, l, 0); \ 577 + y = CALC_K256_2 (m, n, 4); \ 578 + y = rol32(y, 8); \ 579 + x += y; y += x; ctx->a[j] = x; \ 580 + ctx->a[(j) + 1] = rol32(y, 9) 581 + 582 + /* Perform the key setup. */ 583 + int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len) 584 + { 585 + 586 + struct twofish_ctx *ctx = crypto_tfm_ctx(tfm); 587 + u32 *flags = &tfm->crt_flags; 588 + 589 + int i, j, k; 590 + 591 + /* Temporaries for CALC_K. */ 592 + u32 x, y; 593 + 594 + /* The S vector used to key the S-boxes, split up into individual bytes. 595 + * 128-bit keys use only sa through sh; 256-bit use all of them. */ 596 + u8 sa = 0, sb = 0, sc = 0, sd = 0, se = 0, sf = 0, sg = 0, sh = 0; 597 + u8 si = 0, sj = 0, sk = 0, sl = 0, sm = 0, sn = 0, so = 0, sp = 0; 598 + 599 + /* Temporary for CALC_S. */ 600 + u8 tmp; 601 + 602 + /* Check key length. */ 603 + if (key_len % 8) 604 + { 605 + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 606 + return -EINVAL; /* unsupported key length */ 607 + } 608 + 609 + /* Compute the first two words of the S vector. The magic numbers are 610 + * the entries of the RS matrix, preprocessed through poly_to_exp. The 611 + * numbers in the comments are the original (polynomial form) matrix 612 + * entries. */ 613 + CALC_S (sa, sb, sc, sd, 0, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */ 614 + CALC_S (sa, sb, sc, sd, 1, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */ 615 + CALC_S (sa, sb, sc, sd, 2, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */ 616 + CALC_S (sa, sb, sc, sd, 3, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */ 617 + CALC_S (sa, sb, sc, sd, 4, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */ 618 + CALC_S (sa, sb, sc, sd, 5, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */ 619 + CALC_S (sa, sb, sc, sd, 6, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */ 620 + CALC_S (sa, sb, sc, sd, 7, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */ 621 + CALC_S (se, sf, sg, sh, 8, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */ 622 + CALC_S (se, sf, sg, sh, 9, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */ 623 + CALC_S (se, sf, sg, sh, 10, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */ 624 + CALC_S (se, sf, sg, sh, 11, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */ 625 + CALC_S (se, sf, sg, sh, 12, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */ 626 + CALC_S (se, sf, sg, sh, 13, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */ 627 + CALC_S (se, sf, sg, sh, 14, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */ 628 + CALC_S (se, sf, sg, sh, 15, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */ 629 + 630 + if (key_len == 24 || key_len == 32) { /* 192- or 256-bit key */ 631 + /* Calculate the third word of the S vector */ 632 + CALC_S (si, sj, sk, sl, 16, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */ 633 + CALC_S (si, sj, sk, sl, 17, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */ 634 + CALC_S (si, sj, sk, sl, 18, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */ 635 + CALC_S (si, sj, sk, sl, 19, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */ 636 + CALC_S (si, sj, sk, sl, 20, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */ 637 + CALC_S (si, sj, sk, sl, 21, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */ 638 + CALC_S (si, sj, sk, sl, 22, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */ 639 + CALC_S (si, sj, sk, sl, 23, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */ 640 + } 641 + 642 + if (key_len == 32) { /* 256-bit key */ 643 + /* Calculate the fourth word of the S vector */ 644 + CALC_S (sm, sn, so, sp, 24, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */ 645 + CALC_S (sm, sn, so, sp, 25, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */ 646 + CALC_S (sm, sn, so, sp, 26, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */ 647 + CALC_S (sm, sn, so, sp, 27, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */ 648 + CALC_S (sm, sn, so, sp, 28, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */ 649 + CALC_S (sm, sn, so, sp, 29, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */ 650 + CALC_S (sm, sn, so, sp, 30, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */ 651 + CALC_S (sm, sn, so, sp, 31, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */ 652 + 653 + /* Compute the S-boxes. */ 654 + for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) { 655 + CALC_SB256_2( i, calc_sb_tbl[j], calc_sb_tbl[k] ); 656 + } 657 + 658 + /* Calculate whitening and round subkeys. The constants are 659 + * indices of subkeys, preprocessed through q0 and q1. */ 660 + CALC_K256 (w, 0, 0xA9, 0x75, 0x67, 0xF3); 661 + CALC_K256 (w, 2, 0xB3, 0xC6, 0xE8, 0xF4); 662 + CALC_K256 (w, 4, 0x04, 0xDB, 0xFD, 0x7B); 663 + CALC_K256 (w, 6, 0xA3, 0xFB, 0x76, 0xC8); 664 + CALC_K256 (k, 0, 0x9A, 0x4A, 0x92, 0xD3); 665 + CALC_K256 (k, 2, 0x80, 0xE6, 0x78, 0x6B); 666 + CALC_K256 (k, 4, 0xE4, 0x45, 0xDD, 0x7D); 667 + CALC_K256 (k, 6, 0xD1, 0xE8, 0x38, 0x4B); 668 + CALC_K256 (k, 8, 0x0D, 0xD6, 0xC6, 0x32); 669 + CALC_K256 (k, 10, 0x35, 0xD8, 0x98, 0xFD); 670 + CALC_K256 (k, 12, 0x18, 0x37, 0xF7, 0x71); 671 + CALC_K256 (k, 14, 0xEC, 0xF1, 0x6C, 0xE1); 672 + CALC_K256 (k, 16, 0x43, 0x30, 0x75, 0x0F); 673 + CALC_K256 (k, 18, 0x37, 0xF8, 0x26, 0x1B); 674 + CALC_K256 (k, 20, 0xFA, 0x87, 0x13, 0xFA); 675 + CALC_K256 (k, 22, 0x94, 0x06, 0x48, 0x3F); 676 + CALC_K256 (k, 24, 0xF2, 0x5E, 0xD0, 0xBA); 677 + CALC_K256 (k, 26, 0x8B, 0xAE, 0x30, 0x5B); 678 + CALC_K256 (k, 28, 0x84, 0x8A, 0x54, 0x00); 679 + CALC_K256 (k, 30, 0xDF, 0xBC, 0x23, 0x9D); 680 + } else if (key_len == 24) { /* 192-bit key */ 681 + /* Compute the S-boxes. */ 682 + for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) { 683 + CALC_SB192_2( i, calc_sb_tbl[j], calc_sb_tbl[k] ); 684 + } 685 + 686 + /* Calculate whitening and round subkeys. The constants are 687 + * indices of subkeys, preprocessed through q0 and q1. */ 688 + CALC_K192 (w, 0, 0xA9, 0x75, 0x67, 0xF3); 689 + CALC_K192 (w, 2, 0xB3, 0xC6, 0xE8, 0xF4); 690 + CALC_K192 (w, 4, 0x04, 0xDB, 0xFD, 0x7B); 691 + CALC_K192 (w, 6, 0xA3, 0xFB, 0x76, 0xC8); 692 + CALC_K192 (k, 0, 0x9A, 0x4A, 0x92, 0xD3); 693 + CALC_K192 (k, 2, 0x80, 0xE6, 0x78, 0x6B); 694 + CALC_K192 (k, 4, 0xE4, 0x45, 0xDD, 0x7D); 695 + CALC_K192 (k, 6, 0xD1, 0xE8, 0x38, 0x4B); 696 + CALC_K192 (k, 8, 0x0D, 0xD6, 0xC6, 0x32); 697 + CALC_K192 (k, 10, 0x35, 0xD8, 0x98, 0xFD); 698 + CALC_K192 (k, 12, 0x18, 0x37, 0xF7, 0x71); 699 + CALC_K192 (k, 14, 0xEC, 0xF1, 0x6C, 0xE1); 700 + CALC_K192 (k, 16, 0x43, 0x30, 0x75, 0x0F); 701 + CALC_K192 (k, 18, 0x37, 0xF8, 0x26, 0x1B); 702 + CALC_K192 (k, 20, 0xFA, 0x87, 0x13, 0xFA); 703 + CALC_K192 (k, 22, 0x94, 0x06, 0x48, 0x3F); 704 + CALC_K192 (k, 24, 0xF2, 0x5E, 0xD0, 0xBA); 705 + CALC_K192 (k, 26, 0x8B, 0xAE, 0x30, 0x5B); 706 + CALC_K192 (k, 28, 0x84, 0x8A, 0x54, 0x00); 707 + CALC_K192 (k, 30, 0xDF, 0xBC, 0x23, 0x9D); 708 + } else { /* 128-bit key */ 709 + /* Compute the S-boxes. */ 710 + for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) { 711 + CALC_SB_2( i, calc_sb_tbl[j], calc_sb_tbl[k] ); 712 + } 713 + 714 + /* Calculate whitening and round subkeys. The constants are 715 + * indices of subkeys, preprocessed through q0 and q1. */ 716 + CALC_K (w, 0, 0xA9, 0x75, 0x67, 0xF3); 717 + CALC_K (w, 2, 0xB3, 0xC6, 0xE8, 0xF4); 718 + CALC_K (w, 4, 0x04, 0xDB, 0xFD, 0x7B); 719 + CALC_K (w, 6, 0xA3, 0xFB, 0x76, 0xC8); 720 + CALC_K (k, 0, 0x9A, 0x4A, 0x92, 0xD3); 721 + CALC_K (k, 2, 0x80, 0xE6, 0x78, 0x6B); 722 + CALC_K (k, 4, 0xE4, 0x45, 0xDD, 0x7D); 723 + CALC_K (k, 6, 0xD1, 0xE8, 0x38, 0x4B); 724 + CALC_K (k, 8, 0x0D, 0xD6, 0xC6, 0x32); 725 + CALC_K (k, 10, 0x35, 0xD8, 0x98, 0xFD); 726 + CALC_K (k, 12, 0x18, 0x37, 0xF7, 0x71); 727 + CALC_K (k, 14, 0xEC, 0xF1, 0x6C, 0xE1); 728 + CALC_K (k, 16, 0x43, 0x30, 0x75, 0x0F); 729 + CALC_K (k, 18, 0x37, 0xF8, 0x26, 0x1B); 730 + CALC_K (k, 20, 0xFA, 0x87, 0x13, 0xFA); 731 + CALC_K (k, 22, 0x94, 0x06, 0x48, 0x3F); 732 + CALC_K (k, 24, 0xF2, 0x5E, 0xD0, 0xBA); 733 + CALC_K (k, 26, 0x8B, 0xAE, 0x30, 0x5B); 734 + CALC_K (k, 28, 0x84, 0x8A, 0x54, 0x00); 735 + CALC_K (k, 30, 0xDF, 0xBC, 0x23, 0x9D); 736 + } 737 + 738 + return 0; 739 + } 740 + 741 + EXPORT_SYMBOL_GPL(twofish_setkey); 742 + 743 + MODULE_LICENSE("GPL"); 744 + MODULE_DESCRIPTION("Twofish cipher common functions");
+53 -105
drivers/block/cryptoloop.c
··· 40 40 cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info) 41 41 { 42 42 int err = -EINVAL; 43 + int cipher_len; 44 + int mode_len; 43 45 char cms[LO_NAME_SIZE]; /* cipher-mode string */ 44 46 char *cipher; 45 47 char *mode; 46 48 char *cmsp = cms; /* c-m string pointer */ 47 - struct crypto_tfm *tfm = NULL; 49 + struct crypto_blkcipher *tfm; 48 50 49 51 /* encryption breaks for non sector aligned offsets */ 50 52 ··· 55 53 56 54 strncpy(cms, info->lo_crypt_name, LO_NAME_SIZE); 57 55 cms[LO_NAME_SIZE - 1] = 0; 58 - cipher = strsep(&cmsp, "-"); 59 - mode = strsep(&cmsp, "-"); 60 56 61 - if (mode == NULL || strcmp(mode, "cbc") == 0) 62 - tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_CBC | 63 - CRYPTO_TFM_REQ_MAY_SLEEP); 64 - else if (strcmp(mode, "ecb") == 0) 65 - tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_ECB | 66 - CRYPTO_TFM_REQ_MAY_SLEEP); 67 - if (tfm == NULL) 57 + cipher = cmsp; 58 + cipher_len = strcspn(cmsp, "-"); 59 + 60 + mode = cmsp + cipher_len; 61 + mode_len = 0; 62 + if (*mode) { 63 + mode++; 64 + mode_len = strcspn(mode, "-"); 65 + } 66 + 67 + if (!mode_len) { 68 + mode = "cbc"; 69 + mode_len = 3; 70 + } 71 + 72 + if (cipher_len + mode_len + 3 > LO_NAME_SIZE) 68 73 return -EINVAL; 69 74 70 - err = tfm->crt_u.cipher.cit_setkey(tfm, info->lo_encrypt_key, 71 - info->lo_encrypt_key_size); 75 + memmove(cms, mode, mode_len); 76 + cmsp = cms + mode_len; 77 + *cmsp++ = '('; 78 + memcpy(cmsp, info->lo_crypt_name, cipher_len); 79 + cmsp += cipher_len; 80 + *cmsp++ = ')'; 81 + *cmsp = 0; 82 + 83 + tfm = crypto_alloc_blkcipher(cms, 0, CRYPTO_ALG_ASYNC); 84 + if (IS_ERR(tfm)) 85 + return PTR_ERR(tfm); 86 + 87 + err = crypto_blkcipher_setkey(tfm, info->lo_encrypt_key, 88 + info->lo_encrypt_key_size); 72 89 73 90 if (err != 0) 74 91 goto out_free_tfm; ··· 96 75 return 0; 97 76 98 77 out_free_tfm: 99 - crypto_free_tfm(tfm); 78 + crypto_free_blkcipher(tfm); 100 79 101 80 out: 102 81 return err; 103 82 } 104 83 105 84 106 - typedef int (*encdec_ecb_t)(struct crypto_tfm *tfm, 85 + typedef int (*encdec_cbc_t)(struct blkcipher_desc *desc, 107 86 struct scatterlist *sg_out, 108 87 struct scatterlist *sg_in, 109 88 unsigned int nsg); 110 89 111 - 112 90 static int 113 - cryptoloop_transfer_ecb(struct loop_device *lo, int cmd, 114 - struct page *raw_page, unsigned raw_off, 115 - struct page *loop_page, unsigned loop_off, 116 - int size, sector_t IV) 91 + cryptoloop_transfer(struct loop_device *lo, int cmd, 92 + struct page *raw_page, unsigned raw_off, 93 + struct page *loop_page, unsigned loop_off, 94 + int size, sector_t IV) 117 95 { 118 - struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data; 119 - struct scatterlist sg_out = { NULL, }; 120 - struct scatterlist sg_in = { NULL, }; 121 - 122 - encdec_ecb_t encdecfunc; 123 - struct page *in_page, *out_page; 124 - unsigned in_offs, out_offs; 125 - 126 - if (cmd == READ) { 127 - in_page = raw_page; 128 - in_offs = raw_off; 129 - out_page = loop_page; 130 - out_offs = loop_off; 131 - encdecfunc = tfm->crt_u.cipher.cit_decrypt; 132 - } else { 133 - in_page = loop_page; 134 - in_offs = loop_off; 135 - out_page = raw_page; 136 - out_offs = raw_off; 137 - encdecfunc = tfm->crt_u.cipher.cit_encrypt; 138 - } 139 - 140 - while (size > 0) { 141 - const int sz = min(size, LOOP_IV_SECTOR_SIZE); 142 - 143 - sg_in.page = in_page; 144 - sg_in.offset = in_offs; 145 - sg_in.length = sz; 146 - 147 - sg_out.page = out_page; 148 - sg_out.offset = out_offs; 149 - sg_out.length = sz; 150 - 151 - encdecfunc(tfm, &sg_out, &sg_in, sz); 152 - 153 - size -= sz; 154 - in_offs += sz; 155 - out_offs += sz; 156 - } 157 - 158 - return 0; 159 - } 160 - 161 - typedef int (*encdec_cbc_t)(struct crypto_tfm *tfm, 162 - struct scatterlist *sg_out, 163 - struct scatterlist *sg_in, 164 - unsigned int nsg, u8 *iv); 165 - 166 - static int 167 - cryptoloop_transfer_cbc(struct loop_device *lo, int cmd, 168 - struct page *raw_page, unsigned raw_off, 169 - struct page *loop_page, unsigned loop_off, 170 - int size, sector_t IV) 171 - { 172 - struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data; 96 + struct crypto_blkcipher *tfm = lo->key_data; 97 + struct blkcipher_desc desc = { 98 + .tfm = tfm, 99 + .flags = CRYPTO_TFM_REQ_MAY_SLEEP, 100 + }; 173 101 struct scatterlist sg_out = { NULL, }; 174 102 struct scatterlist sg_in = { NULL, }; 175 103 176 104 encdec_cbc_t encdecfunc; 177 105 struct page *in_page, *out_page; 178 106 unsigned in_offs, out_offs; 107 + int err; 179 108 180 109 if (cmd == READ) { 181 110 in_page = raw_page; 182 111 in_offs = raw_off; 183 112 out_page = loop_page; 184 113 out_offs = loop_off; 185 - encdecfunc = tfm->crt_u.cipher.cit_decrypt_iv; 114 + encdecfunc = crypto_blkcipher_crt(tfm)->decrypt; 186 115 } else { 187 116 in_page = loop_page; 188 117 in_offs = loop_off; 189 118 out_page = raw_page; 190 119 out_offs = raw_off; 191 - encdecfunc = tfm->crt_u.cipher.cit_encrypt_iv; 120 + encdecfunc = crypto_blkcipher_crt(tfm)->encrypt; 192 121 } 193 122 194 123 while (size > 0) { ··· 154 183 sg_out.offset = out_offs; 155 184 sg_out.length = sz; 156 185 157 - encdecfunc(tfm, &sg_out, &sg_in, sz, (u8 *)iv); 186 + desc.info = iv; 187 + err = encdecfunc(&desc, &sg_out, &sg_in, sz); 188 + if (err) 189 + return err; 158 190 159 191 IV++; 160 192 size -= sz; ··· 169 195 } 170 196 171 197 static int 172 - cryptoloop_transfer(struct loop_device *lo, int cmd, 173 - struct page *raw_page, unsigned raw_off, 174 - struct page *loop_page, unsigned loop_off, 175 - int size, sector_t IV) 176 - { 177 - struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data; 178 - if(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_ECB) 179 - { 180 - lo->transfer = cryptoloop_transfer_ecb; 181 - return cryptoloop_transfer_ecb(lo, cmd, raw_page, raw_off, 182 - loop_page, loop_off, size, IV); 183 - } 184 - if(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_CBC) 185 - { 186 - lo->transfer = cryptoloop_transfer_cbc; 187 - return cryptoloop_transfer_cbc(lo, cmd, raw_page, raw_off, 188 - loop_page, loop_off, size, IV); 189 - } 190 - 191 - /* This is not supposed to happen */ 192 - 193 - printk( KERN_ERR "cryptoloop: unsupported cipher mode in cryptoloop_transfer!\n"); 194 - return -EINVAL; 195 - } 196 - 197 - static int 198 198 cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg) 199 199 { 200 200 return -EINVAL; ··· 177 229 static int 178 230 cryptoloop_release(struct loop_device *lo) 179 231 { 180 - struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data; 232 + struct crypto_blkcipher *tfm = lo->key_data; 181 233 if (tfm != NULL) { 182 - crypto_free_tfm(tfm); 234 + crypto_free_blkcipher(tfm); 183 235 lo->key_data = NULL; 184 236 return 0; 185 237 }
+38 -7
drivers/crypto/Kconfig
··· 2 2 3 3 config CRYPTO_DEV_PADLOCK 4 4 tristate "Support for VIA PadLock ACE" 5 - depends on CRYPTO && X86_32 5 + depends on X86_32 6 + select CRYPTO_ALGAPI 7 + default m 6 8 help 7 9 Some VIA processors come with an integrated crypto engine 8 10 (so called VIA PadLock ACE, Advanced Cryptography Engine) 9 - that provides instructions for very fast {en,de}cryption 10 - with some algorithms. 11 + that provides instructions for very fast cryptographic 12 + operations with supported algorithms. 11 13 12 14 The instructions are used only when the CPU supports them. 13 - Otherwise software encryption is used. If you are unsure, 14 - say Y. 15 + Otherwise software encryption is used. 16 + 17 + Selecting M for this option will compile a helper module 18 + padlock.ko that should autoload all below configured 19 + algorithms. Don't worry if your hardware does not support 20 + some or all of them. In such case padlock.ko will 21 + simply write a single line into the kernel log informing 22 + about its failure but everything will keep working fine. 23 + 24 + If you are unsure, say M. The compiled module will be 25 + called padlock.ko 15 26 16 27 config CRYPTO_DEV_PADLOCK_AES 17 - bool "Support for AES in VIA PadLock" 28 + tristate "PadLock driver for AES algorithm" 18 29 depends on CRYPTO_DEV_PADLOCK 19 - default y 30 + select CRYPTO_BLKCIPHER 31 + default m 20 32 help 21 33 Use VIA PadLock for AES algorithm. 34 + 35 + Available in VIA C3 and newer CPUs. 36 + 37 + If unsure say M. The compiled module will be 38 + called padlock-aes.ko 39 + 40 + config CRYPTO_DEV_PADLOCK_SHA 41 + tristate "PadLock driver for SHA1 and SHA256 algorithms" 42 + depends on CRYPTO_DEV_PADLOCK 43 + select CRYPTO_SHA1 44 + select CRYPTO_SHA256 45 + default m 46 + help 47 + Use VIA PadLock for SHA1/SHA256 algorithms. 48 + 49 + Available in VIA C7 and newer processors. 50 + 51 + If unsure say M. The compiled module will be 52 + called padlock-sha.ko 22 53 23 54 endmenu
+2 -6
drivers/crypto/Makefile
··· 1 - 2 1 obj-$(CONFIG_CRYPTO_DEV_PADLOCK) += padlock.o 3 - 4 - padlock-objs-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o 5 - 6 - padlock-objs := padlock-generic.o $(padlock-objs-y) 7 - 2 + obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o 3 + obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
+208 -54
drivers/crypto/padlock-aes.c
··· 43 43 * --------------------------------------------------------------------------- 44 44 */ 45 45 46 + #include <crypto/algapi.h> 46 47 #include <linux/module.h> 47 48 #include <linux/init.h> 48 49 #include <linux/types.h> 49 50 #include <linux/errno.h> 50 - #include <linux/crypto.h> 51 51 #include <linux/interrupt.h> 52 52 #include <linux/kernel.h> 53 53 #include <asm/byteorder.h> ··· 58 58 #define AES_BLOCK_SIZE 16 /* ditto */ 59 59 #define AES_EXTENDED_KEY_SIZE 64 /* in uint32_t units */ 60 60 #define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t)) 61 + 62 + /* Control word. */ 63 + struct cword { 64 + unsigned int __attribute__ ((__packed__)) 65 + rounds:4, 66 + algo:3, 67 + keygen:1, 68 + interm:1, 69 + encdec:1, 70 + ksize:2; 71 + } __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); 61 72 62 73 /* Whenever making any changes to the following 63 74 * structure *make sure* you keep E, d_data ··· 297 286 return 0; 298 287 } 299 288 300 - static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) 289 + static inline struct aes_ctx *aes_ctx_common(void *ctx) 301 290 { 302 - unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm); 291 + unsigned long addr = (unsigned long)ctx; 303 292 unsigned long align = PADLOCK_ALIGNMENT; 304 293 305 294 if (align <= crypto_tfm_ctx_alignment()) ··· 307 296 return (struct aes_ctx *)ALIGN(addr, align); 308 297 } 309 298 299 + static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) 300 + { 301 + return aes_ctx_common(crypto_tfm_ctx(tfm)); 302 + } 303 + 304 + static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm) 305 + { 306 + return aes_ctx_common(crypto_blkcipher_ctx(tfm)); 307 + } 308 + 310 309 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, 311 - unsigned int key_len, u32 *flags) 310 + unsigned int key_len) 312 311 { 313 312 struct aes_ctx *ctx = aes_ctx(tfm); 314 313 const __le32 *key = (const __le32 *)in_key; 314 + u32 *flags = &tfm->crt_flags; 315 315 uint32_t i, t, u, v, w; 316 316 uint32_t P[AES_EXTENDED_KEY_SIZE]; 317 317 uint32_t rounds; 318 318 319 - if (key_len != 16 && key_len != 24 && key_len != 32) { 319 + if (key_len % 8) { 320 320 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 321 321 return -EINVAL; 322 322 } ··· 452 430 padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 1); 453 431 } 454 432 455 - static unsigned int aes_encrypt_ecb(const struct cipher_desc *desc, u8 *out, 456 - const u8 *in, unsigned int nbytes) 457 - { 458 - struct aes_ctx *ctx = aes_ctx(desc->tfm); 459 - padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt, 460 - nbytes / AES_BLOCK_SIZE); 461 - return nbytes & ~(AES_BLOCK_SIZE - 1); 462 - } 463 - 464 - static unsigned int aes_decrypt_ecb(const struct cipher_desc *desc, u8 *out, 465 - const u8 *in, unsigned int nbytes) 466 - { 467 - struct aes_ctx *ctx = aes_ctx(desc->tfm); 468 - padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 469 - nbytes / AES_BLOCK_SIZE); 470 - return nbytes & ~(AES_BLOCK_SIZE - 1); 471 - } 472 - 473 - static unsigned int aes_encrypt_cbc(const struct cipher_desc *desc, u8 *out, 474 - const u8 *in, unsigned int nbytes) 475 - { 476 - struct aes_ctx *ctx = aes_ctx(desc->tfm); 477 - u8 *iv; 478 - 479 - iv = padlock_xcrypt_cbc(in, out, ctx->E, desc->info, 480 - &ctx->cword.encrypt, nbytes / AES_BLOCK_SIZE); 481 - memcpy(desc->info, iv, AES_BLOCK_SIZE); 482 - 483 - return nbytes & ~(AES_BLOCK_SIZE - 1); 484 - } 485 - 486 - static unsigned int aes_decrypt_cbc(const struct cipher_desc *desc, u8 *out, 487 - const u8 *in, unsigned int nbytes) 488 - { 489 - struct aes_ctx *ctx = aes_ctx(desc->tfm); 490 - padlock_xcrypt_cbc(in, out, ctx->D, desc->info, &ctx->cword.decrypt, 491 - nbytes / AES_BLOCK_SIZE); 492 - return nbytes & ~(AES_BLOCK_SIZE - 1); 493 - } 494 - 495 433 static struct crypto_alg aes_alg = { 496 434 .cra_name = "aes", 497 435 .cra_driver_name = "aes-padlock", 498 - .cra_priority = 300, 436 + .cra_priority = PADLOCK_CRA_PRIORITY, 499 437 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 500 438 .cra_blocksize = AES_BLOCK_SIZE, 501 439 .cra_ctxsize = sizeof(struct aes_ctx), ··· 469 487 .cia_setkey = aes_set_key, 470 488 .cia_encrypt = aes_encrypt, 471 489 .cia_decrypt = aes_decrypt, 472 - .cia_encrypt_ecb = aes_encrypt_ecb, 473 - .cia_decrypt_ecb = aes_decrypt_ecb, 474 - .cia_encrypt_cbc = aes_encrypt_cbc, 475 - .cia_decrypt_cbc = aes_decrypt_cbc, 476 490 } 477 491 } 478 492 }; 479 493 480 - int __init padlock_init_aes(void) 494 + static int ecb_aes_encrypt(struct blkcipher_desc *desc, 495 + struct scatterlist *dst, struct scatterlist *src, 496 + unsigned int nbytes) 481 497 { 482 - printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); 498 + struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); 499 + struct blkcipher_walk walk; 500 + int err; 501 + 502 + blkcipher_walk_init(&walk, dst, src, nbytes); 503 + err = blkcipher_walk_virt(desc, &walk); 504 + 505 + while ((nbytes = walk.nbytes)) { 506 + padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, 507 + ctx->E, &ctx->cword.encrypt, 508 + nbytes / AES_BLOCK_SIZE); 509 + nbytes &= AES_BLOCK_SIZE - 1; 510 + err = blkcipher_walk_done(desc, &walk, nbytes); 511 + } 512 + 513 + return err; 514 + } 515 + 516 + static int ecb_aes_decrypt(struct blkcipher_desc *desc, 517 + struct scatterlist *dst, struct scatterlist *src, 518 + unsigned int nbytes) 519 + { 520 + struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); 521 + struct blkcipher_walk walk; 522 + int err; 523 + 524 + blkcipher_walk_init(&walk, dst, src, nbytes); 525 + err = blkcipher_walk_virt(desc, &walk); 526 + 527 + while ((nbytes = walk.nbytes)) { 528 + padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, 529 + ctx->D, &ctx->cword.decrypt, 530 + nbytes / AES_BLOCK_SIZE); 531 + nbytes &= AES_BLOCK_SIZE - 1; 532 + err = blkcipher_walk_done(desc, &walk, nbytes); 533 + } 534 + 535 + return err; 536 + } 537 + 538 + static struct crypto_alg ecb_aes_alg = { 539 + .cra_name = "ecb(aes)", 540 + .cra_driver_name = "ecb-aes-padlock", 541 + .cra_priority = PADLOCK_COMPOSITE_PRIORITY, 542 + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 543 + .cra_blocksize = AES_BLOCK_SIZE, 544 + .cra_ctxsize = sizeof(struct aes_ctx), 545 + .cra_alignmask = PADLOCK_ALIGNMENT - 1, 546 + .cra_type = &crypto_blkcipher_type, 547 + .cra_module = THIS_MODULE, 548 + .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list), 549 + .cra_u = { 550 + .blkcipher = { 551 + .min_keysize = AES_MIN_KEY_SIZE, 552 + .max_keysize = AES_MAX_KEY_SIZE, 553 + .setkey = aes_set_key, 554 + .encrypt = ecb_aes_encrypt, 555 + .decrypt = ecb_aes_decrypt, 556 + } 557 + } 558 + }; 559 + 560 + static int cbc_aes_encrypt(struct blkcipher_desc *desc, 561 + struct scatterlist *dst, struct scatterlist *src, 562 + unsigned int nbytes) 563 + { 564 + struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); 565 + struct blkcipher_walk walk; 566 + int err; 567 + 568 + blkcipher_walk_init(&walk, dst, src, nbytes); 569 + err = blkcipher_walk_virt(desc, &walk); 570 + 571 + while ((nbytes = walk.nbytes)) { 572 + u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr, 573 + walk.dst.virt.addr, ctx->E, 574 + walk.iv, &ctx->cword.encrypt, 575 + nbytes / AES_BLOCK_SIZE); 576 + memcpy(walk.iv, iv, AES_BLOCK_SIZE); 577 + nbytes &= AES_BLOCK_SIZE - 1; 578 + err = blkcipher_walk_done(desc, &walk, nbytes); 579 + } 580 + 581 + return err; 582 + } 583 + 584 + static int cbc_aes_decrypt(struct blkcipher_desc *desc, 585 + struct scatterlist *dst, struct scatterlist *src, 586 + unsigned int nbytes) 587 + { 588 + struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); 589 + struct blkcipher_walk walk; 590 + int err; 591 + 592 + blkcipher_walk_init(&walk, dst, src, nbytes); 593 + err = blkcipher_walk_virt(desc, &walk); 594 + 595 + while ((nbytes = walk.nbytes)) { 596 + padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, 597 + ctx->D, walk.iv, &ctx->cword.decrypt, 598 + nbytes / AES_BLOCK_SIZE); 599 + nbytes &= AES_BLOCK_SIZE - 1; 600 + err = blkcipher_walk_done(desc, &walk, nbytes); 601 + } 602 + 603 + return err; 604 + } 605 + 606 + static struct crypto_alg cbc_aes_alg = { 607 + .cra_name = "cbc(aes)", 608 + .cra_driver_name = "cbc-aes-padlock", 609 + .cra_priority = PADLOCK_COMPOSITE_PRIORITY, 610 + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 611 + .cra_blocksize = AES_BLOCK_SIZE, 612 + .cra_ctxsize = sizeof(struct aes_ctx), 613 + .cra_alignmask = PADLOCK_ALIGNMENT - 1, 614 + .cra_type = &crypto_blkcipher_type, 615 + .cra_module = THIS_MODULE, 616 + .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list), 617 + .cra_u = { 618 + .blkcipher = { 619 + .min_keysize = AES_MIN_KEY_SIZE, 620 + .max_keysize = AES_MAX_KEY_SIZE, 621 + .ivsize = AES_BLOCK_SIZE, 622 + .setkey = aes_set_key, 623 + .encrypt = cbc_aes_encrypt, 624 + .decrypt = cbc_aes_decrypt, 625 + } 626 + } 627 + }; 628 + 629 + static int __init padlock_init(void) 630 + { 631 + int ret; 632 + 633 + if (!cpu_has_xcrypt) { 634 + printk(KERN_ERR PFX "VIA PadLock not detected.\n"); 635 + return -ENODEV; 636 + } 637 + 638 + if (!cpu_has_xcrypt_enabled) { 639 + printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); 640 + return -ENODEV; 641 + } 483 642 484 643 gen_tabs(); 485 - return crypto_register_alg(&aes_alg); 644 + if ((ret = crypto_register_alg(&aes_alg))) 645 + goto aes_err; 646 + 647 + if ((ret = crypto_register_alg(&ecb_aes_alg))) 648 + goto ecb_aes_err; 649 + 650 + if ((ret = crypto_register_alg(&cbc_aes_alg))) 651 + goto cbc_aes_err; 652 + 653 + printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); 654 + 655 + out: 656 + return ret; 657 + 658 + cbc_aes_err: 659 + crypto_unregister_alg(&ecb_aes_alg); 660 + ecb_aes_err: 661 + crypto_unregister_alg(&aes_alg); 662 + aes_err: 663 + printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n"); 664 + goto out; 486 665 } 487 666 488 - void __exit padlock_fini_aes(void) 667 + static void __exit padlock_fini(void) 489 668 { 669 + crypto_unregister_alg(&cbc_aes_alg); 670 + crypto_unregister_alg(&ecb_aes_alg); 490 671 crypto_unregister_alg(&aes_alg); 491 672 } 673 + 674 + module_init(padlock_init); 675 + module_exit(padlock_fini); 676 + 677 + MODULE_DESCRIPTION("VIA PadLock AES algorithm support"); 678 + MODULE_LICENSE("GPL"); 679 + MODULE_AUTHOR("Michal Ludvig"); 680 + 681 + MODULE_ALIAS("aes-padlock");
-63
drivers/crypto/padlock-generic.c
··· 1 - /* 2 - * Cryptographic API. 3 - * 4 - * Support for VIA PadLock hardware crypto engine. 5 - * 6 - * Copyright (c) 2004 Michal Ludvig <michal@logix.cz> 7 - * 8 - * This program is free software; you can redistribute it and/or modify 9 - * it under the terms of the GNU General Public License as published by 10 - * the Free Software Foundation; either version 2 of the License, or 11 - * (at your option) any later version. 12 - */ 13 - 14 - #include <linux/module.h> 15 - #include <linux/init.h> 16 - #include <linux/types.h> 17 - #include <linux/errno.h> 18 - #include <linux/crypto.h> 19 - #include <asm/byteorder.h> 20 - #include "padlock.h" 21 - 22 - static int __init 23 - padlock_init(void) 24 - { 25 - int ret = -ENOSYS; 26 - 27 - if (!cpu_has_xcrypt) { 28 - printk(KERN_ERR PFX "VIA PadLock not detected.\n"); 29 - return -ENODEV; 30 - } 31 - 32 - if (!cpu_has_xcrypt_enabled) { 33 - printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); 34 - return -ENODEV; 35 - } 36 - 37 - #ifdef CONFIG_CRYPTO_DEV_PADLOCK_AES 38 - if ((ret = padlock_init_aes())) { 39 - printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n"); 40 - return ret; 41 - } 42 - #endif 43 - 44 - if (ret == -ENOSYS) 45 - printk(KERN_ERR PFX "Hmm, VIA PadLock was compiled without any algorithm.\n"); 46 - 47 - return ret; 48 - } 49 - 50 - static void __exit 51 - padlock_fini(void) 52 - { 53 - #ifdef CONFIG_CRYPTO_DEV_PADLOCK_AES 54 - padlock_fini_aes(); 55 - #endif 56 - } 57 - 58 - module_init(padlock_init); 59 - module_exit(padlock_fini); 60 - 61 - MODULE_DESCRIPTION("VIA PadLock crypto engine support."); 62 - MODULE_LICENSE("Dual BSD/GPL"); 63 - MODULE_AUTHOR("Michal Ludvig");
+318
drivers/crypto/padlock-sha.c
··· 1 + /* 2 + * Cryptographic API. 3 + * 4 + * Support for VIA PadLock hardware crypto engine. 5 + * 6 + * Copyright (c) 2006 Michal Ludvig <michal@logix.cz> 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License as published by 10 + * the Free Software Foundation; either version 2 of the License, or 11 + * (at your option) any later version. 12 + * 13 + */ 14 + 15 + #include <crypto/algapi.h> 16 + #include <linux/err.h> 17 + #include <linux/module.h> 18 + #include <linux/init.h> 19 + #include <linux/errno.h> 20 + #include <linux/cryptohash.h> 21 + #include <linux/interrupt.h> 22 + #include <linux/kernel.h> 23 + #include <linux/scatterlist.h> 24 + #include "padlock.h" 25 + 26 + #define SHA1_DEFAULT_FALLBACK "sha1-generic" 27 + #define SHA1_DIGEST_SIZE 20 28 + #define SHA1_HMAC_BLOCK_SIZE 64 29 + 30 + #define SHA256_DEFAULT_FALLBACK "sha256-generic" 31 + #define SHA256_DIGEST_SIZE 32 32 + #define SHA256_HMAC_BLOCK_SIZE 64 33 + 34 + struct padlock_sha_ctx { 35 + char *data; 36 + size_t used; 37 + int bypass; 38 + void (*f_sha_padlock)(const char *in, char *out, int count); 39 + struct hash_desc fallback; 40 + }; 41 + 42 + static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm) 43 + { 44 + return crypto_tfm_ctx(tfm); 45 + } 46 + 47 + /* We'll need aligned address on the stack */ 48 + #define NEAREST_ALIGNED(ptr) \ 49 + ((void *)ALIGN((size_t)(ptr), PADLOCK_ALIGNMENT)) 50 + 51 + static struct crypto_alg sha1_alg, sha256_alg; 52 + 53 + static void padlock_sha_bypass(struct crypto_tfm *tfm) 54 + { 55 + if (ctx(tfm)->bypass) 56 + return; 57 + 58 + crypto_hash_init(&ctx(tfm)->fallback); 59 + if (ctx(tfm)->data && ctx(tfm)->used) { 60 + struct scatterlist sg; 61 + 62 + sg_set_buf(&sg, ctx(tfm)->data, ctx(tfm)->used); 63 + crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length); 64 + } 65 + 66 + ctx(tfm)->used = 0; 67 + ctx(tfm)->bypass = 1; 68 + } 69 + 70 + static void padlock_sha_init(struct crypto_tfm *tfm) 71 + { 72 + ctx(tfm)->used = 0; 73 + ctx(tfm)->bypass = 0; 74 + } 75 + 76 + static void padlock_sha_update(struct crypto_tfm *tfm, 77 + const uint8_t *data, unsigned int length) 78 + { 79 + /* Our buffer is always one page. */ 80 + if (unlikely(!ctx(tfm)->bypass && 81 + (ctx(tfm)->used + length > PAGE_SIZE))) 82 + padlock_sha_bypass(tfm); 83 + 84 + if (unlikely(ctx(tfm)->bypass)) { 85 + struct scatterlist sg; 86 + sg_set_buf(&sg, (uint8_t *)data, length); 87 + crypto_hash_update(&ctx(tfm)->fallback, &sg, length); 88 + return; 89 + } 90 + 91 + memcpy(ctx(tfm)->data + ctx(tfm)->used, data, length); 92 + ctx(tfm)->used += length; 93 + } 94 + 95 + static inline void padlock_output_block(uint32_t *src, 96 + uint32_t *dst, size_t count) 97 + { 98 + while (count--) 99 + *dst++ = swab32(*src++); 100 + } 101 + 102 + static void padlock_do_sha1(const char *in, char *out, int count) 103 + { 104 + /* We can't store directly to *out as it may be unaligned. */ 105 + /* BTW Don't reduce the buffer size below 128 Bytes! 106 + * PadLock microcode needs it that big. */ 107 + char buf[128+16]; 108 + char *result = NEAREST_ALIGNED(buf); 109 + 110 + ((uint32_t *)result)[0] = 0x67452301; 111 + ((uint32_t *)result)[1] = 0xEFCDAB89; 112 + ((uint32_t *)result)[2] = 0x98BADCFE; 113 + ((uint32_t *)result)[3] = 0x10325476; 114 + ((uint32_t *)result)[4] = 0xC3D2E1F0; 115 + 116 + asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */ 117 + : "+S"(in), "+D"(result) 118 + : "c"(count), "a"(0)); 119 + 120 + padlock_output_block((uint32_t *)result, (uint32_t *)out, 5); 121 + } 122 + 123 + static void padlock_do_sha256(const char *in, char *out, int count) 124 + { 125 + /* We can't store directly to *out as it may be unaligned. */ 126 + /* BTW Don't reduce the buffer size below 128 Bytes! 127 + * PadLock microcode needs it that big. */ 128 + char buf[128+16]; 129 + char *result = NEAREST_ALIGNED(buf); 130 + 131 + ((uint32_t *)result)[0] = 0x6A09E667; 132 + ((uint32_t *)result)[1] = 0xBB67AE85; 133 + ((uint32_t *)result)[2] = 0x3C6EF372; 134 + ((uint32_t *)result)[3] = 0xA54FF53A; 135 + ((uint32_t *)result)[4] = 0x510E527F; 136 + ((uint32_t *)result)[5] = 0x9B05688C; 137 + ((uint32_t *)result)[6] = 0x1F83D9AB; 138 + ((uint32_t *)result)[7] = 0x5BE0CD19; 139 + 140 + asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */ 141 + : "+S"(in), "+D"(result) 142 + : "c"(count), "a"(0)); 143 + 144 + padlock_output_block((uint32_t *)result, (uint32_t *)out, 8); 145 + } 146 + 147 + static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out) 148 + { 149 + if (unlikely(ctx(tfm)->bypass)) { 150 + crypto_hash_final(&ctx(tfm)->fallback, out); 151 + ctx(tfm)->bypass = 0; 152 + return; 153 + } 154 + 155 + /* Pass the input buffer to PadLock microcode... */ 156 + ctx(tfm)->f_sha_padlock(ctx(tfm)->data, out, ctx(tfm)->used); 157 + 158 + ctx(tfm)->used = 0; 159 + } 160 + 161 + static int padlock_cra_init(struct crypto_tfm *tfm) 162 + { 163 + const char *fallback_driver_name = tfm->__crt_alg->cra_name; 164 + struct crypto_hash *fallback_tfm; 165 + 166 + /* For now we'll allocate one page. This 167 + * could eventually be configurable one day. */ 168 + ctx(tfm)->data = (char *)__get_free_page(GFP_KERNEL); 169 + if (!ctx(tfm)->data) 170 + return -ENOMEM; 171 + 172 + /* Allocate a fallback and abort if it failed. */ 173 + fallback_tfm = crypto_alloc_hash(fallback_driver_name, 0, 174 + CRYPTO_ALG_ASYNC | 175 + CRYPTO_ALG_NEED_FALLBACK); 176 + if (IS_ERR(fallback_tfm)) { 177 + printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n", 178 + fallback_driver_name); 179 + free_page((unsigned long)(ctx(tfm)->data)); 180 + return PTR_ERR(fallback_tfm); 181 + } 182 + 183 + ctx(tfm)->fallback.tfm = fallback_tfm; 184 + return 0; 185 + } 186 + 187 + static int padlock_sha1_cra_init(struct crypto_tfm *tfm) 188 + { 189 + ctx(tfm)->f_sha_padlock = padlock_do_sha1; 190 + 191 + return padlock_cra_init(tfm); 192 + } 193 + 194 + static int padlock_sha256_cra_init(struct crypto_tfm *tfm) 195 + { 196 + ctx(tfm)->f_sha_padlock = padlock_do_sha256; 197 + 198 + return padlock_cra_init(tfm); 199 + } 200 + 201 + static void padlock_cra_exit(struct crypto_tfm *tfm) 202 + { 203 + if (ctx(tfm)->data) { 204 + free_page((unsigned long)(ctx(tfm)->data)); 205 + ctx(tfm)->data = NULL; 206 + } 207 + 208 + crypto_free_hash(ctx(tfm)->fallback.tfm); 209 + ctx(tfm)->fallback.tfm = NULL; 210 + } 211 + 212 + static struct crypto_alg sha1_alg = { 213 + .cra_name = "sha1", 214 + .cra_driver_name = "sha1-padlock", 215 + .cra_priority = PADLOCK_CRA_PRIORITY, 216 + .cra_flags = CRYPTO_ALG_TYPE_DIGEST | 217 + CRYPTO_ALG_NEED_FALLBACK, 218 + .cra_blocksize = SHA1_HMAC_BLOCK_SIZE, 219 + .cra_ctxsize = sizeof(struct padlock_sha_ctx), 220 + .cra_module = THIS_MODULE, 221 + .cra_list = LIST_HEAD_INIT(sha1_alg.cra_list), 222 + .cra_init = padlock_sha1_cra_init, 223 + .cra_exit = padlock_cra_exit, 224 + .cra_u = { 225 + .digest = { 226 + .dia_digestsize = SHA1_DIGEST_SIZE, 227 + .dia_init = padlock_sha_init, 228 + .dia_update = padlock_sha_update, 229 + .dia_final = padlock_sha_final, 230 + } 231 + } 232 + }; 233 + 234 + static struct crypto_alg sha256_alg = { 235 + .cra_name = "sha256", 236 + .cra_driver_name = "sha256-padlock", 237 + .cra_priority = PADLOCK_CRA_PRIORITY, 238 + .cra_flags = CRYPTO_ALG_TYPE_DIGEST | 239 + CRYPTO_ALG_NEED_FALLBACK, 240 + .cra_blocksize = SHA256_HMAC_BLOCK_SIZE, 241 + .cra_ctxsize = sizeof(struct padlock_sha_ctx), 242 + .cra_module = THIS_MODULE, 243 + .cra_list = LIST_HEAD_INIT(sha256_alg.cra_list), 244 + .cra_init = padlock_sha256_cra_init, 245 + .cra_exit = padlock_cra_exit, 246 + .cra_u = { 247 + .digest = { 248 + .dia_digestsize = SHA256_DIGEST_SIZE, 249 + .dia_init = padlock_sha_init, 250 + .dia_update = padlock_sha_update, 251 + .dia_final = padlock_sha_final, 252 + } 253 + } 254 + }; 255 + 256 + static void __init padlock_sha_check_fallbacks(void) 257 + { 258 + if (!crypto_has_hash("sha1", 0, CRYPTO_ALG_ASYNC | 259 + CRYPTO_ALG_NEED_FALLBACK)) 260 + printk(KERN_WARNING PFX 261 + "Couldn't load fallback module for sha1.\n"); 262 + 263 + if (!crypto_has_hash("sha256", 0, CRYPTO_ALG_ASYNC | 264 + CRYPTO_ALG_NEED_FALLBACK)) 265 + printk(KERN_WARNING PFX 266 + "Couldn't load fallback module for sha256.\n"); 267 + } 268 + 269 + static int __init padlock_init(void) 270 + { 271 + int rc = -ENODEV; 272 + 273 + if (!cpu_has_phe) { 274 + printk(KERN_ERR PFX "VIA PadLock Hash Engine not detected.\n"); 275 + return -ENODEV; 276 + } 277 + 278 + if (!cpu_has_phe_enabled) { 279 + printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); 280 + return -ENODEV; 281 + } 282 + 283 + padlock_sha_check_fallbacks(); 284 + 285 + rc = crypto_register_alg(&sha1_alg); 286 + if (rc) 287 + goto out; 288 + 289 + rc = crypto_register_alg(&sha256_alg); 290 + if (rc) 291 + goto out_unreg1; 292 + 293 + printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n"); 294 + 295 + return 0; 296 + 297 + out_unreg1: 298 + crypto_unregister_alg(&sha1_alg); 299 + out: 300 + printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n"); 301 + return rc; 302 + } 303 + 304 + static void __exit padlock_fini(void) 305 + { 306 + crypto_unregister_alg(&sha1_alg); 307 + crypto_unregister_alg(&sha256_alg); 308 + } 309 + 310 + module_init(padlock_init); 311 + module_exit(padlock_fini); 312 + 313 + MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support."); 314 + MODULE_LICENSE("GPL"); 315 + MODULE_AUTHOR("Michal Ludvig"); 316 + 317 + MODULE_ALIAS("sha1-padlock"); 318 + MODULE_ALIAS("sha256-padlock");
+58
drivers/crypto/padlock.c
··· 1 + /* 2 + * Cryptographic API. 3 + * 4 + * Support for VIA PadLock hardware crypto engine. 5 + * 6 + * Copyright (c) 2006 Michal Ludvig <michal@logix.cz> 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License as published by 10 + * the Free Software Foundation; either version 2 of the License, or 11 + * (at your option) any later version. 12 + * 13 + */ 14 + 15 + #include <linux/module.h> 16 + #include <linux/init.h> 17 + #include <linux/errno.h> 18 + #include <linux/crypto.h> 19 + #include <linux/cryptohash.h> 20 + #include <linux/interrupt.h> 21 + #include <linux/kernel.h> 22 + #include <linux/scatterlist.h> 23 + #include "padlock.h" 24 + 25 + static int __init padlock_init(void) 26 + { 27 + int success = 0; 28 + 29 + if (crypto_has_cipher("aes-padlock", 0, 0)) 30 + success++; 31 + 32 + if (crypto_has_hash("sha1-padlock", 0, 0)) 33 + success++; 34 + 35 + if (crypto_has_hash("sha256-padlock", 0, 0)) 36 + success++; 37 + 38 + if (!success) { 39 + printk(KERN_WARNING PFX "No VIA PadLock drivers have been loaded.\n"); 40 + return -ENODEV; 41 + } 42 + 43 + printk(KERN_NOTICE PFX "%d drivers are available.\n", success); 44 + 45 + return 0; 46 + } 47 + 48 + static void __exit padlock_fini(void) 49 + { 50 + } 51 + 52 + module_init(padlock_init); 53 + module_exit(padlock_fini); 54 + 55 + MODULE_DESCRIPTION("Load all configured PadLock algorithms."); 56 + MODULE_LICENSE("GPL"); 57 + MODULE_AUTHOR("Michal Ludvig"); 58 +
+2 -15
drivers/crypto/padlock.h
··· 15 15 16 16 #define PADLOCK_ALIGNMENT 16 17 17 18 - /* Control word. */ 19 - struct cword { 20 - unsigned int __attribute__ ((__packed__)) 21 - rounds:4, 22 - algo:3, 23 - keygen:1, 24 - interm:1, 25 - encdec:1, 26 - ksize:2; 27 - } __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); 28 - 29 18 #define PFX "padlock: " 30 19 31 - #ifdef CONFIG_CRYPTO_DEV_PADLOCK_AES 32 - int padlock_init_aes(void); 33 - void padlock_fini_aes(void); 34 - #endif 20 + #define PADLOCK_CRA_PRIORITY 300 21 + #define PADLOCK_COMPOSITE_PRIORITY 400 35 22 36 23 #endif /* _CRYPTO_PADLOCK_H */
+69 -81
drivers/md/dm-crypt.c
··· 5 5 * This file is released under the GPL. 6 6 */ 7 7 8 + #include <linux/err.h> 8 9 #include <linux/module.h> 9 10 #include <linux/init.h> 10 11 #include <linux/kernel.h> ··· 79 78 */ 80 79 struct crypt_iv_operations *iv_gen_ops; 81 80 char *iv_mode; 82 - void *iv_gen_private; 81 + struct crypto_cipher *iv_gen_private; 83 82 sector_t iv_offset; 84 83 unsigned int iv_size; 85 84 86 - struct crypto_tfm *tfm; 85 + char cipher[CRYPTO_MAX_ALG_NAME]; 86 + char chainmode[CRYPTO_MAX_ALG_NAME]; 87 + struct crypto_blkcipher *tfm; 87 88 unsigned int key_size; 88 89 u8 key[0]; 89 90 }; ··· 99 96 /* 100 97 * Different IV generation algorithms: 101 98 * 102 - * plain: the initial vector is the 32-bit low-endian version of the sector 99 + * plain: the initial vector is the 32-bit little-endian version of the sector 103 100 * number, padded with zeros if neccessary. 104 101 * 105 - * ess_iv: "encrypted sector|salt initial vector", the sector number is 106 - * encrypted with the bulk cipher using a salt as key. The salt 107 - * should be derived from the bulk cipher's key via hashing. 102 + * essiv: "encrypted sector|salt initial vector", the sector number is 103 + * encrypted with the bulk cipher using a salt as key. The salt 104 + * should be derived from the bulk cipher's key via hashing. 108 105 * 109 106 * plumb: unimplemented, see: 110 107 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 ··· 121 118 static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, 122 119 const char *opts) 123 120 { 124 - struct crypto_tfm *essiv_tfm; 125 - struct crypto_tfm *hash_tfm; 121 + struct crypto_cipher *essiv_tfm; 122 + struct crypto_hash *hash_tfm; 123 + struct hash_desc desc; 126 124 struct scatterlist sg; 127 125 unsigned int saltsize; 128 126 u8 *salt; 127 + int err; 129 128 130 129 if (opts == NULL) { 131 130 ti->error = "Digest algorithm missing for ESSIV mode"; ··· 135 130 } 136 131 137 132 /* Hash the cipher key with the given hash algorithm */ 138 - hash_tfm = crypto_alloc_tfm(opts, CRYPTO_TFM_REQ_MAY_SLEEP); 139 - if (hash_tfm == NULL) { 133 + hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); 134 + if (IS_ERR(hash_tfm)) { 140 135 ti->error = "Error initializing ESSIV hash"; 141 - return -EINVAL; 136 + return PTR_ERR(hash_tfm); 142 137 } 143 138 144 - if (crypto_tfm_alg_type(hash_tfm) != CRYPTO_ALG_TYPE_DIGEST) { 145 - ti->error = "Expected digest algorithm for ESSIV hash"; 146 - crypto_free_tfm(hash_tfm); 147 - return -EINVAL; 148 - } 149 - 150 - saltsize = crypto_tfm_alg_digestsize(hash_tfm); 139 + saltsize = crypto_hash_digestsize(hash_tfm); 151 140 salt = kmalloc(saltsize, GFP_KERNEL); 152 141 if (salt == NULL) { 153 142 ti->error = "Error kmallocing salt storage in ESSIV"; 154 - crypto_free_tfm(hash_tfm); 143 + crypto_free_hash(hash_tfm); 155 144 return -ENOMEM; 156 145 } 157 146 158 147 sg_set_buf(&sg, cc->key, cc->key_size); 159 - crypto_digest_digest(hash_tfm, &sg, 1, salt); 160 - crypto_free_tfm(hash_tfm); 148 + desc.tfm = hash_tfm; 149 + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 150 + err = crypto_hash_digest(&desc, &sg, cc->key_size, salt); 151 + crypto_free_hash(hash_tfm); 152 + 153 + if (err) { 154 + ti->error = "Error calculating hash in ESSIV"; 155 + return err; 156 + } 161 157 162 158 /* Setup the essiv_tfm with the given salt */ 163 - essiv_tfm = crypto_alloc_tfm(crypto_tfm_alg_name(cc->tfm), 164 - CRYPTO_TFM_MODE_ECB | 165 - CRYPTO_TFM_REQ_MAY_SLEEP); 166 - if (essiv_tfm == NULL) { 159 + essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); 160 + if (IS_ERR(essiv_tfm)) { 167 161 ti->error = "Error allocating crypto tfm for ESSIV"; 168 162 kfree(salt); 169 - return -EINVAL; 163 + return PTR_ERR(essiv_tfm); 170 164 } 171 - if (crypto_tfm_alg_blocksize(essiv_tfm) 172 - != crypto_tfm_alg_ivsize(cc->tfm)) { 165 + if (crypto_cipher_blocksize(essiv_tfm) != 166 + crypto_blkcipher_ivsize(cc->tfm)) { 173 167 ti->error = "Block size of ESSIV cipher does " 174 168 "not match IV size of block cipher"; 175 - crypto_free_tfm(essiv_tfm); 169 + crypto_free_cipher(essiv_tfm); 176 170 kfree(salt); 177 171 return -EINVAL; 178 172 } 179 - if (crypto_cipher_setkey(essiv_tfm, salt, saltsize) < 0) { 173 + err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); 174 + if (err) { 180 175 ti->error = "Failed to set key for ESSIV cipher"; 181 - crypto_free_tfm(essiv_tfm); 176 + crypto_free_cipher(essiv_tfm); 182 177 kfree(salt); 183 - return -EINVAL; 178 + return err; 184 179 } 185 180 kfree(salt); 186 181 187 - cc->iv_gen_private = (void *)essiv_tfm; 182 + cc->iv_gen_private = essiv_tfm; 188 183 return 0; 189 184 } 190 185 191 186 static void crypt_iv_essiv_dtr(struct crypt_config *cc) 192 187 { 193 - crypto_free_tfm((struct crypto_tfm *)cc->iv_gen_private); 188 + crypto_free_cipher(cc->iv_gen_private); 194 189 cc->iv_gen_private = NULL; 195 190 } 196 191 197 192 static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) 198 193 { 199 - struct scatterlist sg; 200 - 201 194 memset(iv, 0, cc->iv_size); 202 195 *(u64 *)iv = cpu_to_le64(sector); 203 - 204 - sg_set_buf(&sg, iv, cc->iv_size); 205 - crypto_cipher_encrypt((struct crypto_tfm *)cc->iv_gen_private, 206 - &sg, &sg, cc->iv_size); 207 - 196 + crypto_cipher_encrypt_one(cc->iv_gen_private, iv, iv); 208 197 return 0; 209 198 } 210 199 ··· 219 220 int write, sector_t sector) 220 221 { 221 222 u8 iv[cc->iv_size]; 223 + struct blkcipher_desc desc = { 224 + .tfm = cc->tfm, 225 + .info = iv, 226 + .flags = CRYPTO_TFM_REQ_MAY_SLEEP, 227 + }; 222 228 int r; 223 229 224 230 if (cc->iv_gen_ops) { ··· 232 228 return r; 233 229 234 230 if (write) 235 - r = crypto_cipher_encrypt_iv(cc->tfm, out, in, length, iv); 231 + r = crypto_blkcipher_encrypt_iv(&desc, out, in, length); 236 232 else 237 - r = crypto_cipher_decrypt_iv(cc->tfm, out, in, length, iv); 233 + r = crypto_blkcipher_decrypt_iv(&desc, out, in, length); 238 234 } else { 239 235 if (write) 240 - r = crypto_cipher_encrypt(cc->tfm, out, in, length); 236 + r = crypto_blkcipher_encrypt(&desc, out, in, length); 241 237 else 242 - r = crypto_cipher_decrypt(cc->tfm, out, in, length); 238 + r = crypto_blkcipher_decrypt(&desc, out, in, length); 243 239 } 244 240 245 241 return r; ··· 514 510 static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) 515 511 { 516 512 struct crypt_config *cc; 517 - struct crypto_tfm *tfm; 513 + struct crypto_blkcipher *tfm; 518 514 char *tmp; 519 515 char *cipher; 520 516 char *chainmode; 521 517 char *ivmode; 522 518 char *ivopts; 523 - unsigned int crypto_flags; 524 519 unsigned int key_size; 525 520 unsigned long long tmpll; 526 521 ··· 559 556 ivmode = "plain"; 560 557 } 561 558 562 - /* Choose crypto_flags according to chainmode */ 563 - if (strcmp(chainmode, "cbc") == 0) 564 - crypto_flags = CRYPTO_TFM_MODE_CBC; 565 - else if (strcmp(chainmode, "ecb") == 0) 566 - crypto_flags = CRYPTO_TFM_MODE_ECB; 567 - else { 568 - ti->error = "Unknown chaining mode"; 569 - goto bad1; 570 - } 571 - 572 - if (crypto_flags != CRYPTO_TFM_MODE_ECB && !ivmode) { 559 + if (strcmp(chainmode, "ecb") && !ivmode) { 573 560 ti->error = "This chaining mode requires an IV mechanism"; 574 561 goto bad1; 575 562 } 576 563 577 - tfm = crypto_alloc_tfm(cipher, crypto_flags | CRYPTO_TFM_REQ_MAY_SLEEP); 578 - if (!tfm) { 564 + if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)", chainmode, 565 + cipher) >= CRYPTO_MAX_ALG_NAME) { 566 + ti->error = "Chain mode + cipher name is too long"; 567 + goto bad1; 568 + } 569 + 570 + tfm = crypto_alloc_blkcipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); 571 + if (IS_ERR(tfm)) { 579 572 ti->error = "Error allocating crypto tfm"; 580 573 goto bad1; 581 574 } 582 - if (crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER) { 583 - ti->error = "Expected cipher algorithm"; 584 - goto bad2; 585 - } 586 575 576 + strcpy(cc->cipher, cipher); 577 + strcpy(cc->chainmode, chainmode); 587 578 cc->tfm = tfm; 588 579 589 580 /* ··· 600 603 cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0) 601 604 goto bad2; 602 605 603 - if (tfm->crt_cipher.cit_decrypt_iv && tfm->crt_cipher.cit_encrypt_iv) 606 + cc->iv_size = crypto_blkcipher_ivsize(tfm); 607 + if (cc->iv_size) 604 608 /* at least a 64 bit sector number should fit in our buffer */ 605 - cc->iv_size = max(crypto_tfm_alg_ivsize(tfm), 609 + cc->iv_size = max(cc->iv_size, 606 610 (unsigned int)(sizeof(u64) / sizeof(u8))); 607 611 else { 608 - cc->iv_size = 0; 609 612 if (cc->iv_gen_ops) { 610 613 DMWARN("Selected cipher does not support IVs"); 611 614 if (cc->iv_gen_ops->dtr) ··· 626 629 goto bad4; 627 630 } 628 631 629 - if (tfm->crt_cipher.cit_setkey(tfm, cc->key, key_size) < 0) { 632 + if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) { 630 633 ti->error = "Error setting key"; 631 634 goto bad5; 632 635 } ··· 672 675 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) 673 676 cc->iv_gen_ops->dtr(cc); 674 677 bad2: 675 - crypto_free_tfm(tfm); 678 + crypto_free_blkcipher(tfm); 676 679 bad1: 677 680 /* Must zero key material before freeing */ 678 681 memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8)); ··· 690 693 kfree(cc->iv_mode); 691 694 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) 692 695 cc->iv_gen_ops->dtr(cc); 693 - crypto_free_tfm(cc->tfm); 696 + crypto_free_blkcipher(cc->tfm); 694 697 dm_put_device(ti, cc->dev); 695 698 696 699 /* Must zero key material before freeing */ ··· 855 858 break; 856 859 857 860 case STATUSTYPE_TABLE: 858 - cipher = crypto_tfm_alg_name(cc->tfm); 861 + cipher = crypto_blkcipher_name(cc->tfm); 859 862 860 - switch(cc->tfm->crt_cipher.cit_mode) { 861 - case CRYPTO_TFM_MODE_CBC: 862 - chainmode = "cbc"; 863 - break; 864 - case CRYPTO_TFM_MODE_ECB: 865 - chainmode = "ecb"; 866 - break; 867 - default: 868 - BUG(); 869 - } 863 + chainmode = cc->chainmode; 870 864 871 865 if (cc->iv_mode) 872 866 DMEMIT("%s-%s-%s ", cipher, chainmode, cc->iv_mode);
+42 -26
drivers/net/ppp_mppe.c
··· 43 43 * deprecated in 2.6 44 44 */ 45 45 46 + #include <linux/err.h> 46 47 #include <linux/module.h> 47 48 #include <linux/kernel.h> 48 49 #include <linux/version.h> ··· 65 64 MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE)); 66 65 MODULE_VERSION("1.0.2"); 67 66 68 - static void 67 + static unsigned int 69 68 setup_sg(struct scatterlist *sg, const void *address, unsigned int length) 70 69 { 71 70 sg[0].page = virt_to_page(address); 72 71 sg[0].offset = offset_in_page(address); 73 72 sg[0].length = length; 73 + return length; 74 74 } 75 75 76 76 #define SHA1_PAD_SIZE 40 ··· 97 95 * State for an MPPE (de)compressor. 98 96 */ 99 97 struct ppp_mppe_state { 100 - struct crypto_tfm *arc4; 101 - struct crypto_tfm *sha1; 98 + struct crypto_blkcipher *arc4; 99 + struct crypto_hash *sha1; 102 100 unsigned char *sha1_digest; 103 101 unsigned char master_key[MPPE_MAX_KEY_LEN]; 104 102 unsigned char session_key[MPPE_MAX_KEY_LEN]; ··· 138 136 */ 139 137 static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *InterimKey) 140 138 { 139 + struct hash_desc desc; 141 140 struct scatterlist sg[4]; 141 + unsigned int nbytes; 142 142 143 - setup_sg(&sg[0], state->master_key, state->keylen); 144 - setup_sg(&sg[1], sha_pad->sha_pad1, sizeof(sha_pad->sha_pad1)); 145 - setup_sg(&sg[2], state->session_key, state->keylen); 146 - setup_sg(&sg[3], sha_pad->sha_pad2, sizeof(sha_pad->sha_pad2)); 143 + nbytes = setup_sg(&sg[0], state->master_key, state->keylen); 144 + nbytes += setup_sg(&sg[1], sha_pad->sha_pad1, 145 + sizeof(sha_pad->sha_pad1)); 146 + nbytes += setup_sg(&sg[2], state->session_key, state->keylen); 147 + nbytes += setup_sg(&sg[3], sha_pad->sha_pad2, 148 + sizeof(sha_pad->sha_pad2)); 147 149 148 - crypto_digest_digest (state->sha1, sg, 4, state->sha1_digest); 150 + desc.tfm = state->sha1; 151 + desc.flags = 0; 152 + 153 + crypto_hash_digest(&desc, sg, nbytes, state->sha1_digest); 149 154 150 155 memcpy(InterimKey, state->sha1_digest, state->keylen); 151 156 } ··· 165 156 { 166 157 unsigned char InterimKey[MPPE_MAX_KEY_LEN]; 167 158 struct scatterlist sg_in[1], sg_out[1]; 159 + struct blkcipher_desc desc = { .tfm = state->arc4 }; 168 160 169 161 get_new_key_from_sha(state, InterimKey); 170 162 if (!initial_key) { 171 - crypto_cipher_setkey(state->arc4, InterimKey, state->keylen); 163 + crypto_blkcipher_setkey(state->arc4, InterimKey, state->keylen); 172 164 setup_sg(sg_in, InterimKey, state->keylen); 173 165 setup_sg(sg_out, state->session_key, state->keylen); 174 - if (crypto_cipher_encrypt(state->arc4, sg_out, sg_in, 175 - state->keylen) != 0) { 166 + if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, 167 + state->keylen) != 0) { 176 168 printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n"); 177 169 } 178 170 } else { ··· 185 175 state->session_key[1] = 0x26; 186 176 state->session_key[2] = 0x9e; 187 177 } 188 - crypto_cipher_setkey(state->arc4, state->session_key, state->keylen); 178 + crypto_blkcipher_setkey(state->arc4, state->session_key, state->keylen); 189 179 } 190 180 191 181 /* ··· 206 196 207 197 memset(state, 0, sizeof(*state)); 208 198 209 - state->arc4 = crypto_alloc_tfm("arc4", 0); 210 - if (!state->arc4) 199 + state->arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); 200 + if (IS_ERR(state->arc4)) { 201 + state->arc4 = NULL; 211 202 goto out_free; 203 + } 212 204 213 - state->sha1 = crypto_alloc_tfm("sha1", 0); 214 - if (!state->sha1) 205 + state->sha1 = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC); 206 + if (IS_ERR(state->sha1)) { 207 + state->sha1 = NULL; 215 208 goto out_free; 209 + } 216 210 217 - digestsize = crypto_tfm_alg_digestsize(state->sha1); 211 + digestsize = crypto_hash_digestsize(state->sha1); 218 212 if (digestsize < MPPE_MAX_KEY_LEN) 219 213 goto out_free; 220 214 ··· 243 229 if (state->sha1_digest) 244 230 kfree(state->sha1_digest); 245 231 if (state->sha1) 246 - crypto_free_tfm(state->sha1); 232 + crypto_free_hash(state->sha1); 247 233 if (state->arc4) 248 - crypto_free_tfm(state->arc4); 234 + crypto_free_blkcipher(state->arc4); 249 235 kfree(state); 250 236 out: 251 237 return NULL; ··· 261 247 if (state->sha1_digest) 262 248 kfree(state->sha1_digest); 263 249 if (state->sha1) 264 - crypto_free_tfm(state->sha1); 250 + crypto_free_hash(state->sha1); 265 251 if (state->arc4) 266 - crypto_free_tfm(state->arc4); 252 + crypto_free_blkcipher(state->arc4); 267 253 kfree(state); 268 254 } 269 255 } ··· 370 356 int isize, int osize) 371 357 { 372 358 struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; 359 + struct blkcipher_desc desc = { .tfm = state->arc4 }; 373 360 int proto; 374 361 struct scatterlist sg_in[1], sg_out[1]; 375 362 ··· 428 413 /* Encrypt packet */ 429 414 setup_sg(sg_in, ibuf, isize); 430 415 setup_sg(sg_out, obuf, osize); 431 - if (crypto_cipher_encrypt(state->arc4, sg_out, sg_in, isize) != 0) { 416 + if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, isize) != 0) { 432 417 printk(KERN_DEBUG "crypto_cypher_encrypt failed\n"); 433 418 return -1; 434 419 } ··· 477 462 int osize) 478 463 { 479 464 struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; 465 + struct blkcipher_desc desc = { .tfm = state->arc4 }; 480 466 unsigned ccount; 481 467 int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED; 482 468 int sanity = 0; ··· 615 599 */ 616 600 setup_sg(sg_in, ibuf, 1); 617 601 setup_sg(sg_out, obuf, 1); 618 - if (crypto_cipher_decrypt(state->arc4, sg_out, sg_in, 1) != 0) { 602 + if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, 1) != 0) { 619 603 printk(KERN_DEBUG "crypto_cypher_decrypt failed\n"); 620 604 return DECOMP_ERROR; 621 605 } ··· 635 619 /* And finally, decrypt the rest of the packet. */ 636 620 setup_sg(sg_in, ibuf + 1, isize - 1); 637 621 setup_sg(sg_out, obuf + 1, osize - 1); 638 - if (crypto_cipher_decrypt(state->arc4, sg_out, sg_in, isize - 1) != 0) { 622 + if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, isize - 1)) { 639 623 printk(KERN_DEBUG "crypto_cypher_decrypt failed\n"); 640 624 return DECOMP_ERROR; 641 625 } ··· 710 694 static int __init ppp_mppe_init(void) 711 695 { 712 696 int answer; 713 - if (!(crypto_alg_available("arc4", 0) && 714 - crypto_alg_available("sha1", 0))) 697 + if (!(crypto_has_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC) && 698 + crypto_has_hash("sha1", 0, CRYPTO_ALG_ASYNC))) 715 699 return -ENODEV; 716 700 717 701 sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL);
+12 -10
drivers/net/wireless/airo.c
··· 19 19 20 20 ======================================================================*/ 21 21 22 + #include <linux/err.h> 22 23 #include <linux/init.h> 23 24 24 25 #include <linux/kernel.h> ··· 1204 1203 struct iw_spy_data spy_data; 1205 1204 struct iw_public_data wireless_data; 1206 1205 /* MIC stuff */ 1207 - struct crypto_tfm *tfm; 1206 + struct crypto_cipher *tfm; 1208 1207 mic_module mod[2]; 1209 1208 mic_statistics micstats; 1210 1209 HostRxDesc rxfids[MPI_MAX_FIDS]; // rx/tx/config MPI350 descriptors ··· 1272 1271 1273 1272 static int RxSeqValid (struct airo_info *ai,miccntx *context,int mcast,u32 micSeq); 1274 1273 static void MoveWindow(miccntx *context, u32 micSeq); 1275 - static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct crypto_tfm *); 1274 + static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, 1275 + struct crypto_cipher *tfm); 1276 1276 static void emmh32_init(emmh32_context *context); 1277 1277 static void emmh32_update(emmh32_context *context, u8 *pOctets, int len); 1278 1278 static void emmh32_final(emmh32_context *context, u8 digest[4]); ··· 1341 1339 int i; 1342 1340 1343 1341 if (ai->tfm == NULL) 1344 - ai->tfm = crypto_alloc_tfm("aes", CRYPTO_TFM_REQ_MAY_SLEEP); 1342 + ai->tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); 1345 1343 1346 - if (ai->tfm == NULL) { 1344 + if (IS_ERR(ai->tfm)) { 1347 1345 airo_print_err(ai->dev->name, "failed to load transform for AES"); 1346 + ai->tfm = NULL; 1348 1347 return ERROR; 1349 1348 } 1350 1349 ··· 1611 1608 static unsigned char aes_counter[16]; 1612 1609 1613 1610 /* expand the key to fill the MMH coefficient array */ 1614 - static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct crypto_tfm *tfm) 1611 + static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, 1612 + struct crypto_cipher *tfm) 1615 1613 { 1616 1614 /* take the keying material, expand if necessary, truncate at 16-bytes */ 1617 1615 /* run through AES counter mode to generate context->coeff[] */ ··· 1620 1616 int i,j; 1621 1617 u32 counter; 1622 1618 u8 *cipher, plain[16]; 1623 - struct scatterlist sg[1]; 1624 1619 1625 1620 crypto_cipher_setkey(tfm, pkey, 16); 1626 1621 counter = 0; ··· 1630 1627 aes_counter[12] = (u8)(counter >> 24); 1631 1628 counter++; 1632 1629 memcpy (plain, aes_counter, 16); 1633 - sg_set_buf(sg, plain, 16); 1634 - crypto_cipher_encrypt(tfm, sg, sg, 16); 1635 - cipher = kmap(sg->page) + sg->offset; 1630 + crypto_cipher_encrypt_one(tfm, plain, plain); 1631 + cipher = plain; 1636 1632 for (j=0; (j<16) && (i< (sizeof(context->coeff)/sizeof(context->coeff[0]))); ) { 1637 1633 context->coeff[i++] = ntohl(*(u32 *)&cipher[j]); 1638 1634 j += 4; ··· 2434 2432 ai->shared, ai->shared_dma); 2435 2433 } 2436 2434 } 2437 - crypto_free_tfm(ai->tfm); 2435 + crypto_free_cipher(ai->tfm); 2438 2436 del_airo_dev( dev ); 2439 2437 free_netdev( dev ); 2440 2438 }
+73 -61
drivers/scsi/iscsi_tcp.c
··· 26 26 * Zhenyu Wang 27 27 */ 28 28 29 + #include <linux/err.h> 29 30 #include <linux/types.h> 30 31 #include <linux/list.h> 31 32 #include <linux/inet.h> ··· 108 107 u8* crc) 109 108 { 110 109 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 110 + struct hash_desc desc; 111 111 112 - crypto_digest_digest(tcp_conn->tx_tfm, &buf->sg, 1, crc); 112 + desc.tfm = tcp_conn->tx_tfm; 113 + desc.flags = 0; 114 + crypto_hash_digest(&desc, &buf->sg, buf->sg.length, crc); 113 115 buf->sg.length += sizeof(uint32_t); 114 116 } 115 117 ··· 456 452 } 457 453 458 454 if (conn->hdrdgst_en) { 455 + struct hash_desc desc; 459 456 struct scatterlist sg; 460 457 461 458 sg_init_one(&sg, (u8 *)hdr, 462 459 sizeof(struct iscsi_hdr) + ahslen); 463 - crypto_digest_digest(tcp_conn->rx_tfm, &sg, 1, (u8 *)&cdgst); 460 + desc.tfm = tcp_conn->rx_tfm; 461 + desc.flags = 0; 462 + crypto_hash_digest(&desc, &sg, sg.length, (u8 *)&cdgst); 464 463 rdgst = *(uint32_t*)((char*)hdr + sizeof(struct iscsi_hdr) + 465 464 ahslen); 466 465 if (cdgst != rdgst) { ··· 680 673 memcpy(&temp, sg, sizeof(struct scatterlist)); 681 674 temp.offset = offset; 682 675 temp.length = length; 683 - crypto_digest_update(tcp_conn->data_rx_tfm, &temp, 1); 676 + crypto_hash_update(&tcp_conn->data_rx_hash, &temp, length); 684 677 } 685 678 686 679 static void ··· 689 682 struct scatterlist tmp; 690 683 691 684 sg_init_one(&tmp, buf, len); 692 - crypto_digest_update(tcp_conn->data_rx_tfm, &tmp, 1); 685 + crypto_hash_update(&tcp_conn->data_rx_hash, &tmp, len); 693 686 } 694 687 695 688 static int iscsi_scsi_data_in(struct iscsi_conn *conn) ··· 743 736 if (!rc) { 744 737 if (conn->datadgst_en) { 745 738 if (!offset) 746 - crypto_digest_update( 747 - tcp_conn->data_rx_tfm, 748 - &sg[i], 1); 739 + crypto_hash_update( 740 + &tcp_conn->data_rx_hash, 741 + &sg[i], sg[i].length); 749 742 else 750 743 partial_sg_digest_update(tcp_conn, 751 744 &sg[i], ··· 884 877 rc = iscsi_tcp_hdr_recv(conn); 885 878 if (!rc && tcp_conn->in.datalen) { 886 879 if (conn->datadgst_en) { 887 - BUG_ON(!tcp_conn->data_rx_tfm); 888 - crypto_digest_init(tcp_conn->data_rx_tfm); 880 + crypto_hash_init(&tcp_conn->data_rx_hash); 889 881 } 890 882 tcp_conn->in_progress = IN_PROGRESS_DATA_RECV; 891 883 } else if (rc) { ··· 937 931 tcp_conn->in.padding); 938 932 memset(pad, 0, tcp_conn->in.padding); 939 933 sg_init_one(&sg, pad, tcp_conn->in.padding); 940 - crypto_digest_update(tcp_conn->data_rx_tfm, 941 - &sg, 1); 934 + crypto_hash_update(&tcp_conn->data_rx_hash, 935 + &sg, sg.length); 942 936 } 943 - crypto_digest_final(tcp_conn->data_rx_tfm, 944 - (u8 *) & tcp_conn->in.datadgst); 937 + crypto_hash_final(&tcp_conn->data_rx_hash, 938 + (u8 *)&tcp_conn->in.datadgst); 945 939 debug_tcp("rx digest 0x%x\n", tcp_conn->in.datadgst); 946 940 tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV; 947 941 } else ··· 1187 1181 { 1188 1182 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1189 1183 1190 - BUG_ON(!tcp_conn->data_tx_tfm); 1191 - crypto_digest_init(tcp_conn->data_tx_tfm); 1184 + crypto_hash_init(&tcp_conn->data_tx_hash); 1192 1185 tcp_ctask->digest_count = 4; 1193 1186 } 1194 1187 ··· 1201 1196 int sent = 0; 1202 1197 1203 1198 if (final) 1204 - crypto_digest_final(tcp_conn->data_tx_tfm, (u8*)digest); 1199 + crypto_hash_final(&tcp_conn->data_tx_hash, (u8 *)digest); 1205 1200 1206 1201 iscsi_buf_init_iov(buf, (char*)digest, 4); 1207 1202 rc = iscsi_sendpage(conn, buf, &tcp_ctask->digest_count, &sent); ··· 1496 1491 if (rc) { 1497 1492 tcp_ctask->xmstate |= XMSTATE_IMM_DATA; 1498 1493 if (conn->datadgst_en) { 1499 - crypto_digest_final(tcp_conn->data_tx_tfm, 1500 - (u8*)&tcp_ctask->immdigest); 1494 + crypto_hash_final(&tcp_conn->data_tx_hash, 1495 + (u8 *)&tcp_ctask->immdigest); 1501 1496 debug_tcp("tx imm sendpage fail 0x%x\n", 1502 1497 tcp_ctask->datadigest); 1503 1498 } 1504 1499 return rc; 1505 1500 } 1506 1501 if (conn->datadgst_en) 1507 - crypto_digest_update(tcp_conn->data_tx_tfm, 1508 - &tcp_ctask->sendbuf.sg, 1); 1502 + crypto_hash_update(&tcp_conn->data_tx_hash, 1503 + &tcp_ctask->sendbuf.sg, 1504 + tcp_ctask->sendbuf.sg.length); 1509 1505 1510 1506 if (!ctask->imm_count) 1511 1507 break; ··· 1583 1577 tcp_ctask->xmstate |= XMSTATE_UNS_DATA; 1584 1578 /* will continue with this ctask later.. */ 1585 1579 if (conn->datadgst_en) { 1586 - crypto_digest_final(tcp_conn->data_tx_tfm, 1587 - (u8 *)&dtask->digest); 1580 + crypto_hash_final(&tcp_conn->data_tx_hash, 1581 + (u8 *)&dtask->digest); 1588 1582 debug_tcp("tx uns data fail 0x%x\n", 1589 1583 dtask->digest); 1590 1584 } ··· 1599 1593 * so pass it 1600 1594 */ 1601 1595 if (conn->datadgst_en && tcp_ctask->sent - start > 0) 1602 - crypto_digest_update(tcp_conn->data_tx_tfm, 1603 - &tcp_ctask->sendbuf.sg, 1); 1596 + crypto_hash_update(&tcp_conn->data_tx_hash, 1597 + &tcp_ctask->sendbuf.sg, 1598 + tcp_ctask->sendbuf.sg.length); 1604 1599 1605 1600 if (!ctask->data_count) 1606 1601 break; ··· 1675 1668 tcp_ctask->xmstate |= XMSTATE_SOL_DATA; 1676 1669 /* will continue with this ctask later.. */ 1677 1670 if (conn->datadgst_en) { 1678 - crypto_digest_final(tcp_conn->data_tx_tfm, 1671 + crypto_hash_final(&tcp_conn->data_tx_hash, 1679 1672 (u8 *)&dtask->digest); 1680 1673 debug_tcp("r2t data send fail 0x%x\n", dtask->digest); 1681 1674 } ··· 1684 1677 1685 1678 BUG_ON(r2t->data_count < 0); 1686 1679 if (conn->datadgst_en) 1687 - crypto_digest_update(tcp_conn->data_tx_tfm, &r2t->sendbuf.sg, 1688 - 1); 1680 + crypto_hash_update(&tcp_conn->data_tx_hash, &r2t->sendbuf.sg, 1681 + r2t->sendbuf.sg.length); 1689 1682 1690 1683 if (r2t->data_count) { 1691 1684 BUG_ON(ctask->sc->use_sg == 0); ··· 1773 1766 } 1774 1767 1775 1768 if (conn->datadgst_en) { 1776 - crypto_digest_update(tcp_conn->data_tx_tfm, 1777 - &tcp_ctask->sendbuf.sg, 1); 1769 + crypto_hash_update(&tcp_conn->data_tx_hash, 1770 + &tcp_ctask->sendbuf.sg, 1771 + tcp_ctask->sendbuf.sg.length); 1778 1772 /* imm data? */ 1779 1773 if (!dtask) { 1780 1774 rc = iscsi_digest_final_send(conn, ctask, ··· 1971 1963 /* now free tcp_conn */ 1972 1964 if (digest) { 1973 1965 if (tcp_conn->tx_tfm) 1974 - crypto_free_tfm(tcp_conn->tx_tfm); 1966 + crypto_free_hash(tcp_conn->tx_tfm); 1975 1967 if (tcp_conn->rx_tfm) 1976 - crypto_free_tfm(tcp_conn->rx_tfm); 1977 - if (tcp_conn->data_tx_tfm) 1978 - crypto_free_tfm(tcp_conn->data_tx_tfm); 1979 - if (tcp_conn->data_rx_tfm) 1980 - crypto_free_tfm(tcp_conn->data_rx_tfm); 1968 + crypto_free_hash(tcp_conn->rx_tfm); 1969 + if (tcp_conn->data_tx_hash.tfm) 1970 + crypto_free_hash(tcp_conn->data_tx_hash.tfm); 1971 + if (tcp_conn->data_rx_hash.tfm) 1972 + crypto_free_hash(tcp_conn->data_rx_hash.tfm); 1981 1973 } 1982 1974 1983 1975 kfree(tcp_conn); ··· 2138 2130 if (conn->hdrdgst_en) { 2139 2131 tcp_conn->hdr_size += sizeof(__u32); 2140 2132 if (!tcp_conn->tx_tfm) 2141 - tcp_conn->tx_tfm = crypto_alloc_tfm("crc32c", 2142 - 0); 2143 - if (!tcp_conn->tx_tfm) 2144 - return -ENOMEM; 2133 + tcp_conn->tx_tfm = 2134 + crypto_alloc_hash("crc32c", 0, 2135 + CRYPTO_ALG_ASYNC); 2136 + if (IS_ERR(tcp_conn->tx_tfm)) 2137 + return PTR_ERR(tcp_conn->tx_tfm); 2145 2138 if (!tcp_conn->rx_tfm) 2146 - tcp_conn->rx_tfm = crypto_alloc_tfm("crc32c", 2147 - 0); 2148 - if (!tcp_conn->rx_tfm) { 2149 - crypto_free_tfm(tcp_conn->tx_tfm); 2150 - return -ENOMEM; 2139 + tcp_conn->rx_tfm = 2140 + crypto_alloc_hash("crc32c", 0, 2141 + CRYPTO_ALG_ASYNC); 2142 + if (IS_ERR(tcp_conn->rx_tfm)) { 2143 + crypto_free_hash(tcp_conn->tx_tfm); 2144 + return PTR_ERR(tcp_conn->rx_tfm); 2151 2145 } 2152 2146 } else { 2153 2147 if (tcp_conn->tx_tfm) 2154 - crypto_free_tfm(tcp_conn->tx_tfm); 2148 + crypto_free_hash(tcp_conn->tx_tfm); 2155 2149 if (tcp_conn->rx_tfm) 2156 - crypto_free_tfm(tcp_conn->rx_tfm); 2150 + crypto_free_hash(tcp_conn->rx_tfm); 2157 2151 } 2158 2152 break; 2159 2153 case ISCSI_PARAM_DATADGST_EN: 2160 2154 iscsi_set_param(cls_conn, param, buf, buflen); 2161 2155 if (conn->datadgst_en) { 2162 - if (!tcp_conn->data_tx_tfm) 2163 - tcp_conn->data_tx_tfm = 2164 - crypto_alloc_tfm("crc32c", 0); 2165 - if (!tcp_conn->data_tx_tfm) 2166 - return -ENOMEM; 2167 - if (!tcp_conn->data_rx_tfm) 2168 - tcp_conn->data_rx_tfm = 2169 - crypto_alloc_tfm("crc32c", 0); 2170 - if (!tcp_conn->data_rx_tfm) { 2171 - crypto_free_tfm(tcp_conn->data_tx_tfm); 2172 - return -ENOMEM; 2156 + if (!tcp_conn->data_tx_hash.tfm) 2157 + tcp_conn->data_tx_hash.tfm = 2158 + crypto_alloc_hash("crc32c", 0, 2159 + CRYPTO_ALG_ASYNC); 2160 + if (IS_ERR(tcp_conn->data_tx_hash.tfm)) 2161 + return PTR_ERR(tcp_conn->data_tx_hash.tfm); 2162 + if (!tcp_conn->data_rx_hash.tfm) 2163 + tcp_conn->data_rx_hash.tfm = 2164 + crypto_alloc_hash("crc32c", 0, 2165 + CRYPTO_ALG_ASYNC); 2166 + if (IS_ERR(tcp_conn->data_rx_hash.tfm)) { 2167 + crypto_free_hash(tcp_conn->data_tx_hash.tfm); 2168 + return PTR_ERR(tcp_conn->data_rx_hash.tfm); 2173 2169 } 2174 2170 } else { 2175 - if (tcp_conn->data_tx_tfm) 2176 - crypto_free_tfm(tcp_conn->data_tx_tfm); 2177 - if (tcp_conn->data_rx_tfm) 2178 - crypto_free_tfm(tcp_conn->data_rx_tfm); 2171 + if (tcp_conn->data_tx_hash.tfm) 2172 + crypto_free_hash(tcp_conn->data_tx_hash.tfm); 2173 + if (tcp_conn->data_rx_hash.tfm) 2174 + crypto_free_hash(tcp_conn->data_rx_hash.tfm); 2179 2175 } 2180 2176 tcp_conn->sendpage = conn->datadgst_en ? 2181 2177 sock_no_sendpage : tcp_conn->sock->ops->sendpage;
+5 -4
drivers/scsi/iscsi_tcp.h
··· 51 51 #define ISCSI_SG_TABLESIZE SG_ALL 52 52 #define ISCSI_TCP_MAX_CMD_LEN 16 53 53 54 + struct crypto_hash; 54 55 struct socket; 55 56 56 57 /* Socket connection recieve helper */ ··· 85 84 /* iSCSI connection-wide sequencing */ 86 85 int hdr_size; /* PDU header size */ 87 86 88 - struct crypto_tfm *rx_tfm; /* CRC32C (Rx) */ 89 - struct crypto_tfm *data_rx_tfm; /* CRC32C (Rx) for data */ 87 + struct crypto_hash *rx_tfm; /* CRC32C (Rx) */ 88 + struct hash_desc data_rx_hash; /* CRC32C (Rx) for data */ 90 89 91 90 /* control data */ 92 91 struct iscsi_tcp_recv in; /* TCP receive context */ ··· 98 97 void (*old_write_space)(struct sock *); 99 98 100 99 /* xmit */ 101 - struct crypto_tfm *tx_tfm; /* CRC32C (Tx) */ 102 - struct crypto_tfm *data_tx_tfm; /* CRC32C (Tx) for data */ 100 + struct crypto_hash *tx_tfm; /* CRC32C (Tx) */ 101 + struct hash_desc data_tx_hash; /* CRC32C (Tx) for data */ 103 102 104 103 /* MIB custom statistics */ 105 104 uint32_t sendpage_failures_cnt;
+11 -10
fs/nfsd/nfs4recover.c
··· 33 33 * 34 34 */ 35 35 36 - 36 + #include <linux/err.h> 37 37 #include <linux/sunrpc/svc.h> 38 38 #include <linux/nfsd/nfsd.h> 39 39 #include <linux/nfs4.h> ··· 87 87 nfs4_make_rec_clidname(char *dname, struct xdr_netobj *clname) 88 88 { 89 89 struct xdr_netobj cksum; 90 - struct crypto_tfm *tfm; 90 + struct hash_desc desc; 91 91 struct scatterlist sg[1]; 92 92 int status = nfserr_resource; 93 93 94 94 dprintk("NFSD: nfs4_make_rec_clidname for %.*s\n", 95 95 clname->len, clname->data); 96 - tfm = crypto_alloc_tfm("md5", CRYPTO_TFM_REQ_MAY_SLEEP); 97 - if (tfm == NULL) 98 - goto out; 99 - cksum.len = crypto_tfm_alg_digestsize(tfm); 96 + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 97 + desc.tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); 98 + if (IS_ERR(desc.tfm)) 99 + goto out_no_tfm; 100 + cksum.len = crypto_hash_digestsize(desc.tfm); 100 101 cksum.data = kmalloc(cksum.len, GFP_KERNEL); 101 102 if (cksum.data == NULL) 102 103 goto out; 103 - crypto_digest_init(tfm); 104 104 105 105 sg[0].page = virt_to_page(clname->data); 106 106 sg[0].offset = offset_in_page(clname->data); 107 107 sg[0].length = clname->len; 108 108 109 - crypto_digest_update(tfm, sg, 1); 110 - crypto_digest_final(tfm, cksum.data); 109 + if (crypto_hash_digest(&desc, sg, sg->length, cksum.data)) 110 + goto out; 111 111 112 112 md5_to_hex(dname, cksum.data); 113 113 114 114 kfree(cksum.data); 115 115 status = nfs_ok; 116 116 out: 117 - crypto_free_tfm(tfm); 117 + crypto_free_hash(desc.tfm); 118 + out_no_tfm: 118 119 return status; 119 120 } 120 121
+156
include/crypto/algapi.h
··· 1 + /* 2 + * Cryptographic API for algorithms (i.e., low-level API). 3 + * 4 + * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms of the GNU General Public License as published by the Free 8 + * Software Foundation; either version 2 of the License, or (at your option) 9 + * any later version. 10 + * 11 + */ 12 + #ifndef _CRYPTO_ALGAPI_H 13 + #define _CRYPTO_ALGAPI_H 14 + 15 + #include <linux/crypto.h> 16 + 17 + struct module; 18 + struct seq_file; 19 + 20 + struct crypto_type { 21 + unsigned int (*ctxsize)(struct crypto_alg *alg); 22 + int (*init)(struct crypto_tfm *tfm); 23 + void (*exit)(struct crypto_tfm *tfm); 24 + void (*show)(struct seq_file *m, struct crypto_alg *alg); 25 + }; 26 + 27 + struct crypto_instance { 28 + struct crypto_alg alg; 29 + 30 + struct crypto_template *tmpl; 31 + struct hlist_node list; 32 + 33 + void *__ctx[] CRYPTO_MINALIGN_ATTR; 34 + }; 35 + 36 + struct crypto_template { 37 + struct list_head list; 38 + struct hlist_head instances; 39 + struct module *module; 40 + 41 + struct crypto_instance *(*alloc)(void *param, unsigned int len); 42 + void (*free)(struct crypto_instance *inst); 43 + 44 + char name[CRYPTO_MAX_ALG_NAME]; 45 + }; 46 + 47 + struct crypto_spawn { 48 + struct list_head list; 49 + struct crypto_alg *alg; 50 + struct crypto_instance *inst; 51 + }; 52 + 53 + struct scatter_walk { 54 + struct scatterlist *sg; 55 + unsigned int offset; 56 + }; 57 + 58 + struct blkcipher_walk { 59 + union { 60 + struct { 61 + struct page *page; 62 + unsigned long offset; 63 + } phys; 64 + 65 + struct { 66 + u8 *page; 67 + u8 *addr; 68 + } virt; 69 + } src, dst; 70 + 71 + struct scatter_walk in; 72 + unsigned int nbytes; 73 + 74 + struct scatter_walk out; 75 + unsigned int total; 76 + 77 + void *page; 78 + u8 *buffer; 79 + u8 *iv; 80 + 81 + int flags; 82 + }; 83 + 84 + extern const struct crypto_type crypto_blkcipher_type; 85 + extern const struct crypto_type crypto_hash_type; 86 + 87 + void crypto_mod_put(struct crypto_alg *alg); 88 + 89 + int crypto_register_template(struct crypto_template *tmpl); 90 + void crypto_unregister_template(struct crypto_template *tmpl); 91 + struct crypto_template *crypto_lookup_template(const char *name); 92 + 93 + int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, 94 + struct crypto_instance *inst); 95 + void crypto_drop_spawn(struct crypto_spawn *spawn); 96 + struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn); 97 + 98 + struct crypto_alg *crypto_get_attr_alg(void *param, unsigned int len, 99 + u32 type, u32 mask); 100 + struct crypto_instance *crypto_alloc_instance(const char *name, 101 + struct crypto_alg *alg); 102 + 103 + int blkcipher_walk_done(struct blkcipher_desc *desc, 104 + struct blkcipher_walk *walk, int err); 105 + int blkcipher_walk_virt(struct blkcipher_desc *desc, 106 + struct blkcipher_walk *walk); 107 + int blkcipher_walk_phys(struct blkcipher_desc *desc, 108 + struct blkcipher_walk *walk); 109 + 110 + static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm) 111 + { 112 + unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm); 113 + unsigned long align = crypto_tfm_alg_alignmask(tfm); 114 + 115 + if (align <= crypto_tfm_ctx_alignment()) 116 + align = 1; 117 + return (void *)ALIGN(addr, align); 118 + } 119 + 120 + static inline void *crypto_instance_ctx(struct crypto_instance *inst) 121 + { 122 + return inst->__ctx; 123 + } 124 + 125 + static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm) 126 + { 127 + return crypto_tfm_ctx(&tfm->base); 128 + } 129 + 130 + static inline void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm) 131 + { 132 + return crypto_tfm_ctx_aligned(&tfm->base); 133 + } 134 + 135 + static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm) 136 + { 137 + return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher; 138 + } 139 + 140 + static inline void *crypto_hash_ctx_aligned(struct crypto_hash *tfm) 141 + { 142 + return crypto_tfm_ctx_aligned(&tfm->base); 143 + } 144 + 145 + static inline void blkcipher_walk_init(struct blkcipher_walk *walk, 146 + struct scatterlist *dst, 147 + struct scatterlist *src, 148 + unsigned int nbytes) 149 + { 150 + walk->in.sg = src; 151 + walk->out.sg = dst; 152 + walk->total = nbytes; 153 + } 154 + 155 + #endif /* _CRYPTO_ALGAPI_H */ 156 +
+22
include/crypto/twofish.h
··· 1 + #ifndef _CRYPTO_TWOFISH_H 2 + #define _CRYPTO_TWOFISH_H 3 + 4 + #include <linux/types.h> 5 + 6 + #define TF_MIN_KEY_SIZE 16 7 + #define TF_MAX_KEY_SIZE 32 8 + #define TF_BLOCK_SIZE 16 9 + 10 + struct crypto_tfm; 11 + 12 + /* Structure for an expanded Twofish key. s contains the key-dependent 13 + * S-boxes composed with the MDS matrix; w contains the eight "whitening" 14 + * subkeys, K[0] through K[7]. k holds the remaining, "round" subkeys. Note 15 + * that k[i] corresponds to what the Twofish paper calls K[i+8]. */ 16 + struct twofish_ctx { 17 + u32 s[4][256], w[8], k[32]; 18 + }; 19 + 20 + int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len); 21 + 22 + #endif
+607 -82
include/linux/crypto.h
··· 17 17 #ifndef _LINUX_CRYPTO_H 18 18 #define _LINUX_CRYPTO_H 19 19 20 + #include <asm/atomic.h> 20 21 #include <linux/module.h> 21 22 #include <linux/kernel.h> 22 - #include <linux/types.h> 23 23 #include <linux/list.h> 24 + #include <linux/slab.h> 24 25 #include <linux/string.h> 25 - #include <asm/page.h> 26 + #include <linux/uaccess.h> 26 27 27 28 /* 28 29 * Algorithm masks and types. 29 30 */ 30 - #define CRYPTO_ALG_TYPE_MASK 0x000000ff 31 + #define CRYPTO_ALG_TYPE_MASK 0x0000000f 31 32 #define CRYPTO_ALG_TYPE_CIPHER 0x00000001 32 33 #define CRYPTO_ALG_TYPE_DIGEST 0x00000002 33 - #define CRYPTO_ALG_TYPE_COMPRESS 0x00000004 34 + #define CRYPTO_ALG_TYPE_HASH 0x00000003 35 + #define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004 36 + #define CRYPTO_ALG_TYPE_COMPRESS 0x00000005 37 + 38 + #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e 39 + 40 + #define CRYPTO_ALG_LARVAL 0x00000010 41 + #define CRYPTO_ALG_DEAD 0x00000020 42 + #define CRYPTO_ALG_DYING 0x00000040 43 + #define CRYPTO_ALG_ASYNC 0x00000080 44 + 45 + /* 46 + * Set this bit if and only if the algorithm requires another algorithm of 47 + * the same type to handle corner cases. 48 + */ 49 + #define CRYPTO_ALG_NEED_FALLBACK 0x00000100 34 50 35 51 /* 36 52 * Transform masks and values (for crt_flags). ··· 77 61 #define CRYPTO_DIR_ENCRYPT 1 78 62 #define CRYPTO_DIR_DECRYPT 0 79 63 64 + /* 65 + * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual 66 + * declaration) is used to ensure that the crypto_tfm context structure is 67 + * aligned correctly for the given architecture so that there are no alignment 68 + * faults for C data types. In particular, this is required on platforms such 69 + * as arm where pointers are 32-bit aligned but there are data types such as 70 + * u64 which require 64-bit alignment. 71 + */ 72 + #if defined(ARCH_KMALLOC_MINALIGN) 73 + #define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN 74 + #elif defined(ARCH_SLAB_MINALIGN) 75 + #define CRYPTO_MINALIGN ARCH_SLAB_MINALIGN 76 + #endif 77 + 78 + #ifdef CRYPTO_MINALIGN 79 + #define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN))) 80 + #else 81 + #define CRYPTO_MINALIGN_ATTR 82 + #endif 83 + 80 84 struct scatterlist; 85 + struct crypto_blkcipher; 86 + struct crypto_hash; 81 87 struct crypto_tfm; 88 + struct crypto_type; 89 + 90 + struct blkcipher_desc { 91 + struct crypto_blkcipher *tfm; 92 + void *info; 93 + u32 flags; 94 + }; 82 95 83 96 struct cipher_desc { 84 97 struct crypto_tfm *tfm; ··· 117 72 void *info; 118 73 }; 119 74 75 + struct hash_desc { 76 + struct crypto_hash *tfm; 77 + u32 flags; 78 + }; 79 + 120 80 /* 121 81 * Algorithms: modular crypto algorithm implementations, managed 122 82 * via crypto_register_alg() and crypto_unregister_alg(). 123 83 */ 84 + struct blkcipher_alg { 85 + int (*setkey)(struct crypto_tfm *tfm, const u8 *key, 86 + unsigned int keylen); 87 + int (*encrypt)(struct blkcipher_desc *desc, 88 + struct scatterlist *dst, struct scatterlist *src, 89 + unsigned int nbytes); 90 + int (*decrypt)(struct blkcipher_desc *desc, 91 + struct scatterlist *dst, struct scatterlist *src, 92 + unsigned int nbytes); 93 + 94 + unsigned int min_keysize; 95 + unsigned int max_keysize; 96 + unsigned int ivsize; 97 + }; 98 + 124 99 struct cipher_alg { 125 100 unsigned int cia_min_keysize; 126 101 unsigned int cia_max_keysize; 127 102 int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key, 128 - unsigned int keylen, u32 *flags); 103 + unsigned int keylen); 129 104 void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 130 105 void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 131 106 132 107 unsigned int (*cia_encrypt_ecb)(const struct cipher_desc *desc, 133 108 u8 *dst, const u8 *src, 134 - unsigned int nbytes); 109 + unsigned int nbytes) __deprecated; 135 110 unsigned int (*cia_decrypt_ecb)(const struct cipher_desc *desc, 136 111 u8 *dst, const u8 *src, 137 - unsigned int nbytes); 112 + unsigned int nbytes) __deprecated; 138 113 unsigned int (*cia_encrypt_cbc)(const struct cipher_desc *desc, 139 114 u8 *dst, const u8 *src, 140 - unsigned int nbytes); 115 + unsigned int nbytes) __deprecated; 141 116 unsigned int (*cia_decrypt_cbc)(const struct cipher_desc *desc, 142 117 u8 *dst, const u8 *src, 143 - unsigned int nbytes); 118 + unsigned int nbytes) __deprecated; 144 119 }; 145 120 146 121 struct digest_alg { ··· 170 105 unsigned int len); 171 106 void (*dia_final)(struct crypto_tfm *tfm, u8 *out); 172 107 int (*dia_setkey)(struct crypto_tfm *tfm, const u8 *key, 173 - unsigned int keylen, u32 *flags); 108 + unsigned int keylen); 109 + }; 110 + 111 + struct hash_alg { 112 + int (*init)(struct hash_desc *desc); 113 + int (*update)(struct hash_desc *desc, struct scatterlist *sg, 114 + unsigned int nbytes); 115 + int (*final)(struct hash_desc *desc, u8 *out); 116 + int (*digest)(struct hash_desc *desc, struct scatterlist *sg, 117 + unsigned int nbytes, u8 *out); 118 + int (*setkey)(struct crypto_hash *tfm, const u8 *key, 119 + unsigned int keylen); 120 + 121 + unsigned int digestsize; 174 122 }; 175 123 176 124 struct compress_alg { ··· 193 115 unsigned int slen, u8 *dst, unsigned int *dlen); 194 116 }; 195 117 118 + #define cra_blkcipher cra_u.blkcipher 196 119 #define cra_cipher cra_u.cipher 197 120 #define cra_digest cra_u.digest 121 + #define cra_hash cra_u.hash 198 122 #define cra_compress cra_u.compress 199 123 200 124 struct crypto_alg { 201 125 struct list_head cra_list; 126 + struct list_head cra_users; 127 + 202 128 u32 cra_flags; 203 129 unsigned int cra_blocksize; 204 130 unsigned int cra_ctxsize; 205 131 unsigned int cra_alignmask; 206 132 207 133 int cra_priority; 134 + atomic_t cra_refcnt; 208 135 209 136 char cra_name[CRYPTO_MAX_ALG_NAME]; 210 137 char cra_driver_name[CRYPTO_MAX_ALG_NAME]; 211 138 139 + const struct crypto_type *cra_type; 140 + 212 141 union { 142 + struct blkcipher_alg blkcipher; 213 143 struct cipher_alg cipher; 214 144 struct digest_alg digest; 145 + struct hash_alg hash; 215 146 struct compress_alg compress; 216 147 } cra_u; 217 148 218 149 int (*cra_init)(struct crypto_tfm *tfm); 219 150 void (*cra_exit)(struct crypto_tfm *tfm); 151 + void (*cra_destroy)(struct crypto_alg *alg); 220 152 221 153 struct module *cra_module; 222 154 }; ··· 241 153 * Algorithm query interface. 242 154 */ 243 155 #ifdef CONFIG_CRYPTO 244 - int crypto_alg_available(const char *name, u32 flags); 156 + int crypto_alg_available(const char *name, u32 flags) 157 + __deprecated_for_modules; 158 + int crypto_has_alg(const char *name, u32 type, u32 mask); 245 159 #else 160 + static int crypto_alg_available(const char *name, u32 flags); 161 + __deprecated_for_modules; 246 162 static inline int crypto_alg_available(const char *name, u32 flags) 163 + { 164 + return 0; 165 + } 166 + 167 + static inline int crypto_has_alg(const char *name, u32 type, u32 mask) 247 168 { 248 169 return 0; 249 170 } ··· 260 163 261 164 /* 262 165 * Transforms: user-instantiated objects which encapsulate algorithms 263 - * and core processing logic. Managed via crypto_alloc_tfm() and 264 - * crypto_free_tfm(), as well as the various helpers below. 166 + * and core processing logic. Managed via crypto_alloc_*() and 167 + * crypto_free_*(), as well as the various helpers below. 265 168 */ 169 + 170 + struct blkcipher_tfm { 171 + void *iv; 172 + int (*setkey)(struct crypto_tfm *tfm, const u8 *key, 173 + unsigned int keylen); 174 + int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst, 175 + struct scatterlist *src, unsigned int nbytes); 176 + int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst, 177 + struct scatterlist *src, unsigned int nbytes); 178 + }; 266 179 267 180 struct cipher_tfm { 268 181 void *cit_iv; ··· 297 190 struct scatterlist *src, 298 191 unsigned int nbytes, u8 *iv); 299 192 void (*cit_xor_block)(u8 *dst, const u8 *src); 193 + void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 194 + void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 300 195 }; 301 196 302 - struct digest_tfm { 303 - void (*dit_init)(struct crypto_tfm *tfm); 304 - void (*dit_update)(struct crypto_tfm *tfm, 305 - struct scatterlist *sg, unsigned int nsg); 306 - void (*dit_final)(struct crypto_tfm *tfm, u8 *out); 307 - void (*dit_digest)(struct crypto_tfm *tfm, struct scatterlist *sg, 308 - unsigned int nsg, u8 *out); 309 - int (*dit_setkey)(struct crypto_tfm *tfm, 310 - const u8 *key, unsigned int keylen); 311 - #ifdef CONFIG_CRYPTO_HMAC 312 - void *dit_hmac_block; 313 - #endif 197 + struct hash_tfm { 198 + int (*init)(struct hash_desc *desc); 199 + int (*update)(struct hash_desc *desc, 200 + struct scatterlist *sg, unsigned int nsg); 201 + int (*final)(struct hash_desc *desc, u8 *out); 202 + int (*digest)(struct hash_desc *desc, struct scatterlist *sg, 203 + unsigned int nsg, u8 *out); 204 + int (*setkey)(struct crypto_hash *tfm, const u8 *key, 205 + unsigned int keylen); 206 + unsigned int digestsize; 314 207 }; 315 208 316 209 struct compress_tfm { ··· 322 215 u8 *dst, unsigned int *dlen); 323 216 }; 324 217 218 + #define crt_blkcipher crt_u.blkcipher 325 219 #define crt_cipher crt_u.cipher 326 - #define crt_digest crt_u.digest 220 + #define crt_hash crt_u.hash 327 221 #define crt_compress crt_u.compress 328 222 329 223 struct crypto_tfm { ··· 332 224 u32 crt_flags; 333 225 334 226 union { 227 + struct blkcipher_tfm blkcipher; 335 228 struct cipher_tfm cipher; 336 - struct digest_tfm digest; 229 + struct hash_tfm hash; 337 230 struct compress_tfm compress; 338 231 } crt_u; 339 232 340 233 struct crypto_alg *__crt_alg; 341 234 342 - char __crt_ctx[] __attribute__ ((__aligned__)); 235 + void *__crt_ctx[] CRYPTO_MINALIGN_ATTR; 236 + }; 237 + 238 + #define crypto_cipher crypto_tfm 239 + #define crypto_comp crypto_tfm 240 + 241 + struct crypto_blkcipher { 242 + struct crypto_tfm base; 243 + }; 244 + 245 + struct crypto_hash { 246 + struct crypto_tfm base; 247 + }; 248 + 249 + enum { 250 + CRYPTOA_UNSPEC, 251 + CRYPTOA_ALG, 252 + }; 253 + 254 + struct crypto_attr_alg { 255 + char name[CRYPTO_MAX_ALG_NAME]; 343 256 }; 344 257 345 258 /* 346 259 * Transform user interface. 347 260 */ 348 261 349 - /* 350 - * crypto_alloc_tfm() will first attempt to locate an already loaded algorithm. 351 - * If that fails and the kernel supports dynamically loadable modules, it 352 - * will then attempt to load a module of the same name or alias. A refcount 353 - * is grabbed on the algorithm which is then associated with the new transform. 354 - * 355 - * crypto_free_tfm() frees up the transform and any associated resources, 356 - * then drops the refcount on the associated algorithm. 357 - */ 358 262 struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, u32 tfm_flags); 263 + struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask); 359 264 void crypto_free_tfm(struct crypto_tfm *tfm); 360 265 361 266 /* ··· 377 256 static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm) 378 257 { 379 258 return tfm->__crt_alg->cra_name; 259 + } 260 + 261 + static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm) 262 + { 263 + return tfm->__crt_alg->cra_driver_name; 264 + } 265 + 266 + static inline int crypto_tfm_alg_priority(struct crypto_tfm *tfm) 267 + { 268 + return tfm->__crt_alg->cra_priority; 380 269 } 381 270 382 271 static inline const char *crypto_tfm_alg_modname(struct crypto_tfm *tfm) ··· 399 268 return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK; 400 269 } 401 270 271 + static unsigned int crypto_tfm_alg_min_keysize(struct crypto_tfm *tfm) 272 + __deprecated; 402 273 static inline unsigned int crypto_tfm_alg_min_keysize(struct crypto_tfm *tfm) 403 274 { 404 275 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); 405 276 return tfm->__crt_alg->cra_cipher.cia_min_keysize; 406 277 } 407 278 279 + static unsigned int crypto_tfm_alg_max_keysize(struct crypto_tfm *tfm) 280 + __deprecated; 408 281 static inline unsigned int crypto_tfm_alg_max_keysize(struct crypto_tfm *tfm) 409 282 { 410 283 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); 411 284 return tfm->__crt_alg->cra_cipher.cia_max_keysize; 412 285 } 413 286 287 + static unsigned int crypto_tfm_alg_ivsize(struct crypto_tfm *tfm) __deprecated; 414 288 static inline unsigned int crypto_tfm_alg_ivsize(struct crypto_tfm *tfm) 415 289 { 416 290 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); ··· 438 302 return tfm->__crt_alg->cra_alignmask; 439 303 } 440 304 305 + static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm) 306 + { 307 + return tfm->crt_flags; 308 + } 309 + 310 + static inline void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags) 311 + { 312 + tfm->crt_flags |= flags; 313 + } 314 + 315 + static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags) 316 + { 317 + tfm->crt_flags &= ~flags; 318 + } 319 + 441 320 static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm) 442 321 { 443 322 return tfm->__crt_ctx; ··· 467 316 /* 468 317 * API wrappers. 469 318 */ 470 - static inline void crypto_digest_init(struct crypto_tfm *tfm) 319 + static inline struct crypto_blkcipher *__crypto_blkcipher_cast( 320 + struct crypto_tfm *tfm) 471 321 { 472 - BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST); 473 - tfm->crt_digest.dit_init(tfm); 322 + return (struct crypto_blkcipher *)tfm; 474 323 } 475 324 476 - static inline void crypto_digest_update(struct crypto_tfm *tfm, 477 - struct scatterlist *sg, 478 - unsigned int nsg) 325 + static inline struct crypto_blkcipher *crypto_blkcipher_cast( 326 + struct crypto_tfm *tfm) 479 327 { 480 - BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST); 481 - tfm->crt_digest.dit_update(tfm, sg, nsg); 328 + BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_BLKCIPHER); 329 + return __crypto_blkcipher_cast(tfm); 482 330 } 483 331 484 - static inline void crypto_digest_final(struct crypto_tfm *tfm, u8 *out) 332 + static inline struct crypto_blkcipher *crypto_alloc_blkcipher( 333 + const char *alg_name, u32 type, u32 mask) 485 334 { 486 - BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST); 487 - tfm->crt_digest.dit_final(tfm, out); 335 + type &= ~CRYPTO_ALG_TYPE_MASK; 336 + type |= CRYPTO_ALG_TYPE_BLKCIPHER; 337 + mask |= CRYPTO_ALG_TYPE_MASK; 338 + 339 + return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask)); 488 340 } 489 341 490 - static inline void crypto_digest_digest(struct crypto_tfm *tfm, 491 - struct scatterlist *sg, 492 - unsigned int nsg, u8 *out) 342 + static inline struct crypto_tfm *crypto_blkcipher_tfm( 343 + struct crypto_blkcipher *tfm) 493 344 { 494 - BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST); 495 - tfm->crt_digest.dit_digest(tfm, sg, nsg, out); 345 + return &tfm->base; 496 346 } 497 347 348 + static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm) 349 + { 350 + crypto_free_tfm(crypto_blkcipher_tfm(tfm)); 351 + } 352 + 353 + static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask) 354 + { 355 + type &= ~CRYPTO_ALG_TYPE_MASK; 356 + type |= CRYPTO_ALG_TYPE_BLKCIPHER; 357 + mask |= CRYPTO_ALG_TYPE_MASK; 358 + 359 + return crypto_has_alg(alg_name, type, mask); 360 + } 361 + 362 + static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm) 363 + { 364 + return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm)); 365 + } 366 + 367 + static inline struct blkcipher_tfm *crypto_blkcipher_crt( 368 + struct crypto_blkcipher *tfm) 369 + { 370 + return &crypto_blkcipher_tfm(tfm)->crt_blkcipher; 371 + } 372 + 373 + static inline struct blkcipher_alg *crypto_blkcipher_alg( 374 + struct crypto_blkcipher *tfm) 375 + { 376 + return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher; 377 + } 378 + 379 + static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm) 380 + { 381 + return crypto_blkcipher_alg(tfm)->ivsize; 382 + } 383 + 384 + static inline unsigned int crypto_blkcipher_blocksize( 385 + struct crypto_blkcipher *tfm) 386 + { 387 + return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm)); 388 + } 389 + 390 + static inline unsigned int crypto_blkcipher_alignmask( 391 + struct crypto_blkcipher *tfm) 392 + { 393 + return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm)); 394 + } 395 + 396 + static inline u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm) 397 + { 398 + return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm)); 399 + } 400 + 401 + static inline void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm, 402 + u32 flags) 403 + { 404 + crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags); 405 + } 406 + 407 + static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm, 408 + u32 flags) 409 + { 410 + crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags); 411 + } 412 + 413 + static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm, 414 + const u8 *key, unsigned int keylen) 415 + { 416 + return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm), 417 + key, keylen); 418 + } 419 + 420 + static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc, 421 + struct scatterlist *dst, 422 + struct scatterlist *src, 423 + unsigned int nbytes) 424 + { 425 + desc->info = crypto_blkcipher_crt(desc->tfm)->iv; 426 + return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); 427 + } 428 + 429 + static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc, 430 + struct scatterlist *dst, 431 + struct scatterlist *src, 432 + unsigned int nbytes) 433 + { 434 + return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); 435 + } 436 + 437 + static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc, 438 + struct scatterlist *dst, 439 + struct scatterlist *src, 440 + unsigned int nbytes) 441 + { 442 + desc->info = crypto_blkcipher_crt(desc->tfm)->iv; 443 + return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); 444 + } 445 + 446 + static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc, 447 + struct scatterlist *dst, 448 + struct scatterlist *src, 449 + unsigned int nbytes) 450 + { 451 + return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); 452 + } 453 + 454 + static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm, 455 + const u8 *src, unsigned int len) 456 + { 457 + memcpy(crypto_blkcipher_crt(tfm)->iv, src, len); 458 + } 459 + 460 + static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm, 461 + u8 *dst, unsigned int len) 462 + { 463 + memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len); 464 + } 465 + 466 + static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm) 467 + { 468 + return (struct crypto_cipher *)tfm; 469 + } 470 + 471 + static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm) 472 + { 473 + BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); 474 + return __crypto_cipher_cast(tfm); 475 + } 476 + 477 + static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name, 478 + u32 type, u32 mask) 479 + { 480 + type &= ~CRYPTO_ALG_TYPE_MASK; 481 + type |= CRYPTO_ALG_TYPE_CIPHER; 482 + mask |= CRYPTO_ALG_TYPE_MASK; 483 + 484 + return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask)); 485 + } 486 + 487 + static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm) 488 + { 489 + return tfm; 490 + } 491 + 492 + static inline void crypto_free_cipher(struct crypto_cipher *tfm) 493 + { 494 + crypto_free_tfm(crypto_cipher_tfm(tfm)); 495 + } 496 + 497 + static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask) 498 + { 499 + type &= ~CRYPTO_ALG_TYPE_MASK; 500 + type |= CRYPTO_ALG_TYPE_CIPHER; 501 + mask |= CRYPTO_ALG_TYPE_MASK; 502 + 503 + return crypto_has_alg(alg_name, type, mask); 504 + } 505 + 506 + static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm) 507 + { 508 + return &crypto_cipher_tfm(tfm)->crt_cipher; 509 + } 510 + 511 + static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm) 512 + { 513 + return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm)); 514 + } 515 + 516 + static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm) 517 + { 518 + return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm)); 519 + } 520 + 521 + static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm) 522 + { 523 + return crypto_tfm_get_flags(crypto_cipher_tfm(tfm)); 524 + } 525 + 526 + static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm, 527 + u32 flags) 528 + { 529 + crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags); 530 + } 531 + 532 + static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm, 533 + u32 flags) 534 + { 535 + crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags); 536 + } 537 + 538 + static inline int crypto_cipher_setkey(struct crypto_cipher *tfm, 539 + const u8 *key, unsigned int keylen) 540 + { 541 + return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm), 542 + key, keylen); 543 + } 544 + 545 + static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, 546 + u8 *dst, const u8 *src) 547 + { 548 + crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm), 549 + dst, src); 550 + } 551 + 552 + static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, 553 + u8 *dst, const u8 *src) 554 + { 555 + crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm), 556 + dst, src); 557 + } 558 + 559 + void crypto_digest_init(struct crypto_tfm *tfm) __deprecated_for_modules; 560 + void crypto_digest_update(struct crypto_tfm *tfm, 561 + struct scatterlist *sg, unsigned int nsg) 562 + __deprecated_for_modules; 563 + void crypto_digest_final(struct crypto_tfm *tfm, u8 *out) 564 + __deprecated_for_modules; 565 + void crypto_digest_digest(struct crypto_tfm *tfm, 566 + struct scatterlist *sg, unsigned int nsg, u8 *out) 567 + __deprecated_for_modules; 568 + 569 + static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm) 570 + { 571 + return (struct crypto_hash *)tfm; 572 + } 573 + 574 + static inline struct crypto_hash *crypto_hash_cast(struct crypto_tfm *tfm) 575 + { 576 + BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_HASH) & 577 + CRYPTO_ALG_TYPE_HASH_MASK); 578 + return __crypto_hash_cast(tfm); 579 + } 580 + 581 + static int crypto_digest_setkey(struct crypto_tfm *tfm, const u8 *key, 582 + unsigned int keylen) __deprecated; 498 583 static inline int crypto_digest_setkey(struct crypto_tfm *tfm, 499 584 const u8 *key, unsigned int keylen) 500 585 { 501 - BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST); 502 - if (tfm->crt_digest.dit_setkey == NULL) 503 - return -ENOSYS; 504 - return tfm->crt_digest.dit_setkey(tfm, key, keylen); 586 + return tfm->crt_hash.setkey(crypto_hash_cast(tfm), key, keylen); 505 587 } 506 588 507 - static inline int crypto_cipher_setkey(struct crypto_tfm *tfm, 508 - const u8 *key, unsigned int keylen) 589 + static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name, 590 + u32 type, u32 mask) 509 591 { 510 - BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); 511 - return tfm->crt_cipher.cit_setkey(tfm, key, keylen); 592 + type &= ~CRYPTO_ALG_TYPE_MASK; 593 + type |= CRYPTO_ALG_TYPE_HASH; 594 + mask |= CRYPTO_ALG_TYPE_HASH_MASK; 595 + 596 + return __crypto_hash_cast(crypto_alloc_base(alg_name, type, mask)); 512 597 } 513 598 599 + static inline struct crypto_tfm *crypto_hash_tfm(struct crypto_hash *tfm) 600 + { 601 + return &tfm->base; 602 + } 603 + 604 + static inline void crypto_free_hash(struct crypto_hash *tfm) 605 + { 606 + crypto_free_tfm(crypto_hash_tfm(tfm)); 607 + } 608 + 609 + static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask) 610 + { 611 + type &= ~CRYPTO_ALG_TYPE_MASK; 612 + type |= CRYPTO_ALG_TYPE_HASH; 613 + mask |= CRYPTO_ALG_TYPE_HASH_MASK; 614 + 615 + return crypto_has_alg(alg_name, type, mask); 616 + } 617 + 618 + static inline struct hash_tfm *crypto_hash_crt(struct crypto_hash *tfm) 619 + { 620 + return &crypto_hash_tfm(tfm)->crt_hash; 621 + } 622 + 623 + static inline unsigned int crypto_hash_blocksize(struct crypto_hash *tfm) 624 + { 625 + return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm)); 626 + } 627 + 628 + static inline unsigned int crypto_hash_alignmask(struct crypto_hash *tfm) 629 + { 630 + return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm)); 631 + } 632 + 633 + static inline unsigned int crypto_hash_digestsize(struct crypto_hash *tfm) 634 + { 635 + return crypto_hash_crt(tfm)->digestsize; 636 + } 637 + 638 + static inline u32 crypto_hash_get_flags(struct crypto_hash *tfm) 639 + { 640 + return crypto_tfm_get_flags(crypto_hash_tfm(tfm)); 641 + } 642 + 643 + static inline void crypto_hash_set_flags(struct crypto_hash *tfm, u32 flags) 644 + { 645 + crypto_tfm_set_flags(crypto_hash_tfm(tfm), flags); 646 + } 647 + 648 + static inline void crypto_hash_clear_flags(struct crypto_hash *tfm, u32 flags) 649 + { 650 + crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags); 651 + } 652 + 653 + static inline int crypto_hash_init(struct hash_desc *desc) 654 + { 655 + return crypto_hash_crt(desc->tfm)->init(desc); 656 + } 657 + 658 + static inline int crypto_hash_update(struct hash_desc *desc, 659 + struct scatterlist *sg, 660 + unsigned int nbytes) 661 + { 662 + return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes); 663 + } 664 + 665 + static inline int crypto_hash_final(struct hash_desc *desc, u8 *out) 666 + { 667 + return crypto_hash_crt(desc->tfm)->final(desc, out); 668 + } 669 + 670 + static inline int crypto_hash_digest(struct hash_desc *desc, 671 + struct scatterlist *sg, 672 + unsigned int nbytes, u8 *out) 673 + { 674 + return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out); 675 + } 676 + 677 + static inline int crypto_hash_setkey(struct crypto_hash *hash, 678 + const u8 *key, unsigned int keylen) 679 + { 680 + return crypto_hash_crt(hash)->setkey(hash, key, keylen); 681 + } 682 + 683 + static int crypto_cipher_encrypt(struct crypto_tfm *tfm, 684 + struct scatterlist *dst, 685 + struct scatterlist *src, 686 + unsigned int nbytes) __deprecated; 514 687 static inline int crypto_cipher_encrypt(struct crypto_tfm *tfm, 515 688 struct scatterlist *dst, 516 689 struct scatterlist *src, ··· 844 369 return tfm->crt_cipher.cit_encrypt(tfm, dst, src, nbytes); 845 370 } 846 371 372 + static int crypto_cipher_encrypt_iv(struct crypto_tfm *tfm, 373 + struct scatterlist *dst, 374 + struct scatterlist *src, 375 + unsigned int nbytes, u8 *iv) __deprecated; 847 376 static inline int crypto_cipher_encrypt_iv(struct crypto_tfm *tfm, 848 377 struct scatterlist *dst, 849 378 struct scatterlist *src, 850 379 unsigned int nbytes, u8 *iv) 851 380 { 852 381 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); 853 - BUG_ON(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_ECB); 854 382 return tfm->crt_cipher.cit_encrypt_iv(tfm, dst, src, nbytes, iv); 855 383 } 856 384 385 + static int crypto_cipher_decrypt(struct crypto_tfm *tfm, 386 + struct scatterlist *dst, 387 + struct scatterlist *src, 388 + unsigned int nbytes) __deprecated; 857 389 static inline int crypto_cipher_decrypt(struct crypto_tfm *tfm, 858 390 struct scatterlist *dst, 859 391 struct scatterlist *src, ··· 870 388 return tfm->crt_cipher.cit_decrypt(tfm, dst, src, nbytes); 871 389 } 872 390 391 + static int crypto_cipher_decrypt_iv(struct crypto_tfm *tfm, 392 + struct scatterlist *dst, 393 + struct scatterlist *src, 394 + unsigned int nbytes, u8 *iv) __deprecated; 873 395 static inline int crypto_cipher_decrypt_iv(struct crypto_tfm *tfm, 874 396 struct scatterlist *dst, 875 397 struct scatterlist *src, 876 398 unsigned int nbytes, u8 *iv) 877 399 { 878 400 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); 879 - BUG_ON(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_ECB); 880 401 return tfm->crt_cipher.cit_decrypt_iv(tfm, dst, src, nbytes, iv); 881 402 } 882 403 404 + static void crypto_cipher_set_iv(struct crypto_tfm *tfm, 405 + const u8 *src, unsigned int len) __deprecated; 883 406 static inline void crypto_cipher_set_iv(struct crypto_tfm *tfm, 884 407 const u8 *src, unsigned int len) 885 408 { ··· 892 405 memcpy(tfm->crt_cipher.cit_iv, src, len); 893 406 } 894 407 408 + static void crypto_cipher_get_iv(struct crypto_tfm *tfm, 409 + u8 *dst, unsigned int len) __deprecated; 895 410 static inline void crypto_cipher_get_iv(struct crypto_tfm *tfm, 896 411 u8 *dst, unsigned int len) 897 412 { ··· 901 412 memcpy(dst, tfm->crt_cipher.cit_iv, len); 902 413 } 903 414 904 - static inline int crypto_comp_compress(struct crypto_tfm *tfm, 415 + static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm) 416 + { 417 + return (struct crypto_comp *)tfm; 418 + } 419 + 420 + static inline struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm) 421 + { 422 + BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_COMPRESS) & 423 + CRYPTO_ALG_TYPE_MASK); 424 + return __crypto_comp_cast(tfm); 425 + } 426 + 427 + static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name, 428 + u32 type, u32 mask) 429 + { 430 + type &= ~CRYPTO_ALG_TYPE_MASK; 431 + type |= CRYPTO_ALG_TYPE_COMPRESS; 432 + mask |= CRYPTO_ALG_TYPE_MASK; 433 + 434 + return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask)); 435 + } 436 + 437 + static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm) 438 + { 439 + return tfm; 440 + } 441 + 442 + static inline void crypto_free_comp(struct crypto_comp *tfm) 443 + { 444 + crypto_free_tfm(crypto_comp_tfm(tfm)); 445 + } 446 + 447 + static inline int crypto_has_comp(const char *alg_name, u32 type, u32 mask) 448 + { 449 + type &= ~CRYPTO_ALG_TYPE_MASK; 450 + type |= CRYPTO_ALG_TYPE_COMPRESS; 451 + mask |= CRYPTO_ALG_TYPE_MASK; 452 + 453 + return crypto_has_alg(alg_name, type, mask); 454 + } 455 + 456 + static inline const char *crypto_comp_name(struct crypto_comp *tfm) 457 + { 458 + return crypto_tfm_alg_name(crypto_comp_tfm(tfm)); 459 + } 460 + 461 + static inline struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm) 462 + { 463 + return &crypto_comp_tfm(tfm)->crt_compress; 464 + } 465 + 466 + static inline int crypto_comp_compress(struct crypto_comp *tfm, 905 467 const u8 *src, unsigned int slen, 906 468 u8 *dst, unsigned int *dlen) 907 469 { 908 - BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_COMPRESS); 909 - return tfm->crt_compress.cot_compress(tfm, src, slen, dst, dlen); 470 + return crypto_comp_crt(tfm)->cot_compress(tfm, src, slen, dst, dlen); 910 471 } 911 472 912 - static inline int crypto_comp_decompress(struct crypto_tfm *tfm, 473 + static inline int crypto_comp_decompress(struct crypto_comp *tfm, 913 474 const u8 *src, unsigned int slen, 914 475 u8 *dst, unsigned int *dlen) 915 476 { 916 - BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_COMPRESS); 917 - return tfm->crt_compress.cot_decompress(tfm, src, slen, dst, dlen); 477 + return crypto_comp_crt(tfm)->cot_decompress(tfm, src, slen, dst, dlen); 918 478 } 919 - 920 - /* 921 - * HMAC support. 922 - */ 923 - #ifdef CONFIG_CRYPTO_HMAC 924 - void crypto_hmac_init(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen); 925 - void crypto_hmac_update(struct crypto_tfm *tfm, 926 - struct scatterlist *sg, unsigned int nsg); 927 - void crypto_hmac_final(struct crypto_tfm *tfm, u8 *key, 928 - unsigned int *keylen, u8 *out); 929 - void crypto_hmac(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen, 930 - struct scatterlist *sg, unsigned int nsg, u8 *out); 931 - #endif /* CONFIG_CRYPTO_HMAC */ 932 479 933 480 #endif /* _LINUX_CRYPTO_H */ 934 481
+2 -2
include/linux/scatterlist.h
··· 5 5 #include <linux/mm.h> 6 6 #include <linux/string.h> 7 7 8 - static inline void sg_set_buf(struct scatterlist *sg, void *buf, 8 + static inline void sg_set_buf(struct scatterlist *sg, const void *buf, 9 9 unsigned int buflen) 10 10 { 11 11 sg->page = virt_to_page(buf); ··· 13 13 sg->length = buflen; 14 14 } 15 15 16 - static inline void sg_init_one(struct scatterlist *sg, void *buf, 16 + static inline void sg_init_one(struct scatterlist *sg, const void *buf, 17 17 unsigned int buflen) 18 18 { 19 19 memset(sg, 0, sizeof(*sg));
+10 -9
include/linux/sunrpc/gss_krb5.h
··· 46 46 unsigned char seed[16]; 47 47 int signalg; 48 48 int sealalg; 49 - struct crypto_tfm *enc; 50 - struct crypto_tfm *seq; 49 + struct crypto_blkcipher *enc; 50 + struct crypto_blkcipher *seq; 51 51 s32 endtime; 52 52 u32 seq_send; 53 53 struct xdr_netobj mech_used; ··· 136 136 137 137 138 138 u32 139 - krb5_encrypt(struct crypto_tfm * key, 139 + krb5_encrypt(struct crypto_blkcipher *key, 140 140 void *iv, void *in, void *out, int length); 141 141 142 142 u32 143 - krb5_decrypt(struct crypto_tfm * key, 143 + krb5_decrypt(struct crypto_blkcipher *key, 144 144 void *iv, void *in, void *out, int length); 145 145 146 146 int 147 - gss_encrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *outbuf, int offset, 148 - struct page **pages); 147 + gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *outbuf, 148 + int offset, struct page **pages); 149 149 150 150 int 151 - gss_decrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *inbuf, int offset); 151 + gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *inbuf, 152 + int offset); 152 153 153 154 s32 154 - krb5_make_seq_num(struct crypto_tfm * key, 155 + krb5_make_seq_num(struct crypto_blkcipher *key, 155 156 int direction, 156 157 s32 seqnum, unsigned char *cksum, unsigned char *buf); 157 158 158 159 s32 159 - krb5_get_seq_num(struct crypto_tfm * key, 160 + krb5_get_seq_num(struct crypto_blkcipher *key, 160 161 unsigned char *cksum, 161 162 unsigned char *buf, int *direction, s32 * seqnum);
+2 -2
include/linux/sunrpc/gss_spkm3.h
··· 19 19 unsigned int req_flags ; 20 20 struct xdr_netobj share_key; 21 21 int conf_alg; 22 - struct crypto_tfm* derived_conf_key; 22 + struct crypto_blkcipher *derived_conf_key; 23 23 int intg_alg; 24 - struct crypto_tfm* derived_integ_key; 24 + struct crypto_blkcipher *derived_integ_key; 25 25 int keyestb_alg; /* alg used to get share_key */ 26 26 int owf_alg; /* one way function */ 27 27 };
+19 -11
include/net/ah.h
··· 1 1 #ifndef _NET_AH_H 2 2 #define _NET_AH_H 3 3 4 + #include <linux/crypto.h> 4 5 #include <net/xfrm.h> 5 6 6 7 /* This is the maximum truncated ICV length that we know of. */ ··· 15 14 int icv_full_len; 16 15 int icv_trunc_len; 17 16 18 - void (*icv)(struct ah_data*, 19 - struct sk_buff *skb, u8 *icv); 20 - 21 - struct crypto_tfm *tfm; 17 + struct crypto_hash *tfm; 22 18 }; 23 19 24 - static inline void 25 - ah_hmac_digest(struct ah_data *ahp, struct sk_buff *skb, u8 *auth_data) 20 + static inline int ah_mac_digest(struct ah_data *ahp, struct sk_buff *skb, 21 + u8 *auth_data) 26 22 { 27 - struct crypto_tfm *tfm = ahp->tfm; 23 + struct hash_desc desc; 24 + int err; 25 + 26 + desc.tfm = ahp->tfm; 27 + desc.flags = 0; 28 28 29 29 memset(auth_data, 0, ahp->icv_trunc_len); 30 - crypto_hmac_init(tfm, ahp->key, &ahp->key_len); 31 - skb_icv_walk(skb, tfm, 0, skb->len, crypto_hmac_update); 32 - crypto_hmac_final(tfm, ahp->key, &ahp->key_len, ahp->work_icv); 33 - memcpy(auth_data, ahp->work_icv, ahp->icv_trunc_len); 30 + err = crypto_hash_init(&desc); 31 + if (unlikely(err)) 32 + goto out; 33 + err = skb_icv_walk(skb, &desc, 0, skb->len, crypto_hash_update); 34 + if (unlikely(err)) 35 + goto out; 36 + err = crypto_hash_final(&desc, ahp->work_icv); 37 + 38 + out: 39 + return err; 34 40 } 35 41 36 42 #endif
+17 -12
include/net/esp.h
··· 1 1 #ifndef _NET_ESP_H 2 2 #define _NET_ESP_H 3 3 4 + #include <linux/crypto.h> 4 5 #include <net/xfrm.h> 5 6 #include <asm/scatterlist.h> 6 7 ··· 22 21 * >= crypto_tfm_alg_ivsize(tfm). */ 23 22 int ivlen; 24 23 int padlen; /* 0..255 */ 25 - struct crypto_tfm *tfm; /* crypto handle */ 24 + struct crypto_blkcipher *tfm; /* crypto handle */ 26 25 } conf; 27 26 28 27 /* Integrity. It is active when icv_full_len != 0 */ ··· 35 34 void (*icv)(struct esp_data*, 36 35 struct sk_buff *skb, 37 36 int offset, int len, u8 *icv); 38 - struct crypto_tfm *tfm; 37 + struct crypto_hash *tfm; 39 38 } auth; 40 39 }; 41 40 ··· 43 42 extern int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer); 44 43 extern void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len); 45 44 46 - static inline void 47 - esp_hmac_digest(struct esp_data *esp, struct sk_buff *skb, int offset, 48 - int len, u8 *auth_data) 45 + static inline int esp_mac_digest(struct esp_data *esp, struct sk_buff *skb, 46 + int offset, int len) 49 47 { 50 - struct crypto_tfm *tfm = esp->auth.tfm; 51 - char *icv = esp->auth.work_icv; 48 + struct hash_desc desc; 49 + int err; 52 50 53 - memset(auth_data, 0, esp->auth.icv_trunc_len); 54 - crypto_hmac_init(tfm, esp->auth.key, &esp->auth.key_len); 55 - skb_icv_walk(skb, tfm, offset, len, crypto_hmac_update); 56 - crypto_hmac_final(tfm, esp->auth.key, &esp->auth.key_len, icv); 57 - memcpy(auth_data, icv, esp->auth.icv_trunc_len); 51 + desc.tfm = esp->auth.tfm; 52 + desc.flags = 0; 53 + 54 + err = crypto_hash_init(&desc); 55 + if (unlikely(err)) 56 + return err; 57 + err = skb_icv_walk(skb, &desc, offset, len, crypto_hash_update); 58 + if (unlikely(err)) 59 + return err; 60 + return crypto_hash_final(&desc, esp->auth.work_icv); 58 61 } 59 62 60 63 #endif
+4 -1
include/net/ipcomp.h
··· 1 1 #ifndef _NET_IPCOMP_H 2 2 #define _NET_IPCOMP_H 3 3 4 + #include <linux/crypto.h> 5 + #include <linux/types.h> 6 + 4 7 #define IPCOMP_SCRATCH_SIZE 65400 5 8 6 9 struct ipcomp_data { 7 10 u16 threshold; 8 - struct crypto_tfm **tfms; 11 + struct crypto_comp **tfms; 9 12 }; 10 13 11 14 #endif
+2 -2
include/net/sctp/constants.h
··· 312 312 */ 313 313 314 314 #if defined (CONFIG_SCTP_HMAC_MD5) 315 - #define SCTP_COOKIE_HMAC_ALG "md5" 315 + #define SCTP_COOKIE_HMAC_ALG "hmac(md5)" 316 316 #elif defined (CONFIG_SCTP_HMAC_SHA1) 317 - #define SCTP_COOKIE_HMAC_ALG "sha1" 317 + #define SCTP_COOKIE_HMAC_ALG "hmac(sha1)" 318 318 #else 319 319 #define SCTP_COOKIE_HMAC_ALG NULL 320 320 #endif
-11
include/net/sctp/sctp.h
··· 330 330 331 331 #endif /* #if defined(CONFIG_IPV6) */ 332 332 333 - /* Some wrappers, in case crypto not available. */ 334 - #if defined (CONFIG_CRYPTO_HMAC) 335 - #define sctp_crypto_alloc_tfm crypto_alloc_tfm 336 - #define sctp_crypto_free_tfm crypto_free_tfm 337 - #define sctp_crypto_hmac crypto_hmac 338 - #else 339 - #define sctp_crypto_alloc_tfm(x...) NULL 340 - #define sctp_crypto_free_tfm(x...) 341 - #define sctp_crypto_hmac(x...) 342 - #endif 343 - 344 333 345 334 /* Map an association to an assoc_id. */ 346 335 static inline sctp_assoc_t sctp_assoc2id(const struct sctp_association *asoc)
+2 -1
include/net/sctp/structs.h
··· 87 87 struct sctp_ulpq; 88 88 struct sctp_ep_common; 89 89 struct sctp_ssnmap; 90 + struct crypto_hash; 90 91 91 92 92 93 #include <net/sctp/tsnmap.h> ··· 265 264 struct sctp_pf *pf; 266 265 267 266 /* Access to HMAC transform. */ 268 - struct crypto_tfm *hmac; 267 + struct crypto_hash *hmac; 269 268 270 269 /* What is our base endpointer? */ 271 270 struct sctp_endpoint *ep;
+7 -5
include/net/xfrm.h
··· 8 8 #include <linux/list.h> 9 9 #include <linux/skbuff.h> 10 10 #include <linux/socket.h> 11 - #include <linux/crypto.h> 12 11 #include <linux/pfkeyv2.h> 13 12 #include <linux/in6.h> 14 13 #include <linux/mutex.h> ··· 854 855 855 856 struct xfrm_algo_desc { 856 857 char *name; 858 + char *compat; 857 859 u8 available:1; 858 860 union { 859 861 struct xfrm_algo_auth_info auth; ··· 984 984 extern struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe); 985 985 extern struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe); 986 986 987 - struct crypto_tfm; 988 - typedef void (icv_update_fn_t)(struct crypto_tfm *, struct scatterlist *, unsigned int); 987 + struct hash_desc; 988 + struct scatterlist; 989 + typedef int (icv_update_fn_t)(struct hash_desc *, struct scatterlist *, 990 + unsigned int); 989 991 990 - extern void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm, 991 - int offset, int len, icv_update_fn_t icv_update); 992 + extern int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *tfm, 993 + int offset, int len, icv_update_fn_t icv_update); 992 994 993 995 static inline int xfrm_addr_cmp(xfrm_address_t *a, xfrm_address_t *b, 994 996 int family)
+12 -20
net/ieee80211/ieee80211_crypt_ccmp.c
··· 9 9 * more details. 10 10 */ 11 11 12 + #include <linux/err.h> 12 13 #include <linux/module.h> 13 14 #include <linux/init.h> 14 15 #include <linux/slab.h> ··· 49 48 50 49 int key_idx; 51 50 52 - struct crypto_tfm *tfm; 51 + struct crypto_cipher *tfm; 53 52 54 53 /* scratch buffers for virt_to_page() (crypto API) */ 55 54 u8 tx_b0[AES_BLOCK_LEN], tx_b[AES_BLOCK_LEN], ··· 57 56 u8 rx_b0[AES_BLOCK_LEN], rx_b[AES_BLOCK_LEN], rx_a[AES_BLOCK_LEN]; 58 57 }; 59 58 60 - static void ieee80211_ccmp_aes_encrypt(struct crypto_tfm *tfm, 61 - const u8 pt[16], u8 ct[16]) 59 + static inline void ieee80211_ccmp_aes_encrypt(struct crypto_cipher *tfm, 60 + const u8 pt[16], u8 ct[16]) 62 61 { 63 - struct scatterlist src, dst; 64 - 65 - src.page = virt_to_page(pt); 66 - src.offset = offset_in_page(pt); 67 - src.length = AES_BLOCK_LEN; 68 - 69 - dst.page = virt_to_page(ct); 70 - dst.offset = offset_in_page(ct); 71 - dst.length = AES_BLOCK_LEN; 72 - 73 - crypto_cipher_encrypt(tfm, &dst, &src, AES_BLOCK_LEN); 62 + crypto_cipher_encrypt_one(tfm, ct, pt); 74 63 } 75 64 76 65 static void *ieee80211_ccmp_init(int key_idx) ··· 72 81 goto fail; 73 82 priv->key_idx = key_idx; 74 83 75 - priv->tfm = crypto_alloc_tfm("aes", 0); 76 - if (priv->tfm == NULL) { 84 + priv->tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); 85 + if (IS_ERR(priv->tfm)) { 77 86 printk(KERN_DEBUG "ieee80211_crypt_ccmp: could not allocate " 78 87 "crypto API aes\n"); 88 + priv->tfm = NULL; 79 89 goto fail; 80 90 } 81 91 ··· 85 93 fail: 86 94 if (priv) { 87 95 if (priv->tfm) 88 - crypto_free_tfm(priv->tfm); 96 + crypto_free_cipher(priv->tfm); 89 97 kfree(priv); 90 98 } 91 99 ··· 96 104 { 97 105 struct ieee80211_ccmp_data *_priv = priv; 98 106 if (_priv && _priv->tfm) 99 - crypto_free_tfm(_priv->tfm); 107 + crypto_free_cipher(_priv->tfm); 100 108 kfree(priv); 101 109 } 102 110 ··· 107 115 b[i] ^= a[i]; 108 116 } 109 117 110 - static void ccmp_init_blocks(struct crypto_tfm *tfm, 118 + static void ccmp_init_blocks(struct crypto_cipher *tfm, 111 119 struct ieee80211_hdr_4addr *hdr, 112 120 u8 * pn, size_t dlen, u8 * b0, u8 * auth, u8 * s0) 113 121 { ··· 369 377 { 370 378 struct ieee80211_ccmp_data *data = priv; 371 379 int keyidx; 372 - struct crypto_tfm *tfm = data->tfm; 380 + struct crypto_cipher *tfm = data->tfm; 373 381 374 382 keyidx = data->key_idx; 375 383 memset(data, 0, sizeof(*data));
+36 -23
net/ieee80211/ieee80211_crypt_tkip.c
··· 9 9 * more details. 10 10 */ 11 11 12 + #include <linux/err.h> 12 13 #include <linux/module.h> 13 14 #include <linux/init.h> 14 15 #include <linux/slab.h> ··· 53 52 54 53 int key_idx; 55 54 56 - struct crypto_tfm *tfm_arc4; 57 - struct crypto_tfm *tfm_michael; 55 + struct crypto_blkcipher *tfm_arc4; 56 + struct crypto_hash *tfm_michael; 58 57 59 58 /* scratch buffers for virt_to_page() (crypto API) */ 60 59 u8 rx_hdr[16], tx_hdr[16]; ··· 86 85 87 86 priv->key_idx = key_idx; 88 87 89 - priv->tfm_arc4 = crypto_alloc_tfm("arc4", 0); 90 - if (priv->tfm_arc4 == NULL) { 88 + priv->tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, 89 + CRYPTO_ALG_ASYNC); 90 + if (IS_ERR(priv->tfm_arc4)) { 91 91 printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate " 92 92 "crypto API arc4\n"); 93 + priv->tfm_arc4 = NULL; 93 94 goto fail; 94 95 } 95 96 96 - priv->tfm_michael = crypto_alloc_tfm("michael_mic", 0); 97 - if (priv->tfm_michael == NULL) { 97 + priv->tfm_michael = crypto_alloc_hash("michael_mic", 0, 98 + CRYPTO_ALG_ASYNC); 99 + if (IS_ERR(priv->tfm_michael)) { 98 100 printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate " 99 101 "crypto API michael_mic\n"); 102 + priv->tfm_michael = NULL; 100 103 goto fail; 101 104 } 102 105 ··· 109 104 fail: 110 105 if (priv) { 111 106 if (priv->tfm_michael) 112 - crypto_free_tfm(priv->tfm_michael); 107 + crypto_free_hash(priv->tfm_michael); 113 108 if (priv->tfm_arc4) 114 - crypto_free_tfm(priv->tfm_arc4); 109 + crypto_free_blkcipher(priv->tfm_arc4); 115 110 kfree(priv); 116 111 } 117 112 ··· 122 117 { 123 118 struct ieee80211_tkip_data *_priv = priv; 124 119 if (_priv && _priv->tfm_michael) 125 - crypto_free_tfm(_priv->tfm_michael); 120 + crypto_free_hash(_priv->tfm_michael); 126 121 if (_priv && _priv->tfm_arc4) 127 - crypto_free_tfm(_priv->tfm_arc4); 122 + crypto_free_blkcipher(_priv->tfm_arc4); 128 123 kfree(priv); 129 124 } 130 125 ··· 323 318 static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) 324 319 { 325 320 struct ieee80211_tkip_data *tkey = priv; 321 + struct blkcipher_desc desc = { .tfm = tkey->tfm_arc4 }; 326 322 int len; 327 323 u8 rc4key[16], *pos, *icv; 328 324 u32 crc; ··· 357 351 icv[2] = crc >> 16; 358 352 icv[3] = crc >> 24; 359 353 360 - crypto_cipher_setkey(tkey->tfm_arc4, rc4key, 16); 354 + crypto_blkcipher_setkey(tkey->tfm_arc4, rc4key, 16); 361 355 sg.page = virt_to_page(pos); 362 356 sg.offset = offset_in_page(pos); 363 357 sg.length = len + 4; 364 - crypto_cipher_encrypt(tkey->tfm_arc4, &sg, &sg, len + 4); 365 - 366 - return 0; 358 + return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); 367 359 } 368 360 369 361 static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) 370 362 { 371 363 struct ieee80211_tkip_data *tkey = priv; 364 + struct blkcipher_desc desc = { .tfm = tkey->tfm_arc4 }; 372 365 u8 rc4key[16]; 373 366 u8 keyidx, *pos; 374 367 u32 iv32; ··· 439 434 440 435 plen = skb->len - hdr_len - 12; 441 436 442 - crypto_cipher_setkey(tkey->tfm_arc4, rc4key, 16); 437 + crypto_blkcipher_setkey(tkey->tfm_arc4, rc4key, 16); 443 438 sg.page = virt_to_page(pos); 444 439 sg.offset = offset_in_page(pos); 445 440 sg.length = plen + 4; 446 - crypto_cipher_decrypt(tkey->tfm_arc4, &sg, &sg, plen + 4); 441 + if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) { 442 + if (net_ratelimit()) { 443 + printk(KERN_DEBUG ": TKIP: failed to decrypt " 444 + "received packet from " MAC_FMT "\n", 445 + MAC_ARG(hdr->addr2)); 446 + } 447 + return -7; 448 + } 447 449 448 450 crc = ~crc32_le(~0, pos, plen); 449 451 icv[0] = crc; ··· 487 475 static int michael_mic(struct ieee80211_tkip_data *tkey, u8 * key, u8 * hdr, 488 476 u8 * data, size_t data_len, u8 * mic) 489 477 { 478 + struct hash_desc desc; 490 479 struct scatterlist sg[2]; 491 480 492 481 if (tkey->tfm_michael == NULL) { ··· 502 489 sg[1].offset = offset_in_page(data); 503 490 sg[1].length = data_len; 504 491 505 - crypto_digest_init(tkey->tfm_michael); 506 - crypto_digest_setkey(tkey->tfm_michael, key, 8); 507 - crypto_digest_update(tkey->tfm_michael, sg, 2); 508 - crypto_digest_final(tkey->tfm_michael, mic); 492 + if (crypto_hash_setkey(tkey->tfm_michael, key, 8)) 493 + return -1; 509 494 510 - return 0; 495 + desc.tfm = tkey->tfm_michael; 496 + desc.flags = 0; 497 + return crypto_hash_digest(&desc, sg, data_len + 16, mic); 511 498 } 512 499 513 500 static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr) ··· 631 618 { 632 619 struct ieee80211_tkip_data *tkey = priv; 633 620 int keyidx; 634 - struct crypto_tfm *tfm = tkey->tfm_michael; 635 - struct crypto_tfm *tfm2 = tkey->tfm_arc4; 621 + struct crypto_hash *tfm = tkey->tfm_michael; 622 + struct crypto_blkcipher *tfm2 = tkey->tfm_arc4; 636 623 637 624 keyidx = tkey->key_idx; 638 625 memset(tkey, 0, sizeof(*tkey));
+14 -11
net/ieee80211/ieee80211_crypt_wep.c
··· 9 9 * more details. 10 10 */ 11 11 12 + #include <linux/err.h> 12 13 #include <linux/module.h> 13 14 #include <linux/init.h> 14 15 #include <linux/slab.h> ··· 33 32 u8 key[WEP_KEY_LEN + 1]; 34 33 u8 key_len; 35 34 u8 key_idx; 36 - struct crypto_tfm *tfm; 35 + struct crypto_blkcipher *tfm; 37 36 }; 38 37 39 38 static void *prism2_wep_init(int keyidx) ··· 45 44 goto fail; 46 45 priv->key_idx = keyidx; 47 46 48 - priv->tfm = crypto_alloc_tfm("arc4", 0); 49 - if (priv->tfm == NULL) { 47 + priv->tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); 48 + if (IS_ERR(priv->tfm)) { 50 49 printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate " 51 50 "crypto API arc4\n"); 51 + priv->tfm = NULL; 52 52 goto fail; 53 53 } 54 54 ··· 61 59 fail: 62 60 if (priv) { 63 61 if (priv->tfm) 64 - crypto_free_tfm(priv->tfm); 62 + crypto_free_blkcipher(priv->tfm); 65 63 kfree(priv); 66 64 } 67 65 return NULL; ··· 71 69 { 72 70 struct prism2_wep_data *_priv = priv; 73 71 if (_priv && _priv->tfm) 74 - crypto_free_tfm(_priv->tfm); 72 + crypto_free_blkcipher(_priv->tfm); 75 73 kfree(priv); 76 74 } 77 75 ··· 122 120 static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) 123 121 { 124 122 struct prism2_wep_data *wep = priv; 123 + struct blkcipher_desc desc = { .tfm = wep->tfm }; 125 124 u32 crc, klen, len; 126 125 u8 *pos, *icv; 127 126 struct scatterlist sg; ··· 154 151 icv[2] = crc >> 16; 155 152 icv[3] = crc >> 24; 156 153 157 - crypto_cipher_setkey(wep->tfm, key, klen); 154 + crypto_blkcipher_setkey(wep->tfm, key, klen); 158 155 sg.page = virt_to_page(pos); 159 156 sg.offset = offset_in_page(pos); 160 157 sg.length = len + 4; 161 - crypto_cipher_encrypt(wep->tfm, &sg, &sg, len + 4); 162 - 163 - return 0; 158 + return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); 164 159 } 165 160 166 161 /* Perform WEP decryption on given buffer. Buffer includes whole WEP part of ··· 171 170 static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv) 172 171 { 173 172 struct prism2_wep_data *wep = priv; 173 + struct blkcipher_desc desc = { .tfm = wep->tfm }; 174 174 u32 crc, klen, plen; 175 175 u8 key[WEP_KEY_LEN + 3]; 176 176 u8 keyidx, *pos, icv[4]; ··· 196 194 /* Apply RC4 to data and compute CRC32 over decrypted data */ 197 195 plen = skb->len - hdr_len - 8; 198 196 199 - crypto_cipher_setkey(wep->tfm, key, klen); 197 + crypto_blkcipher_setkey(wep->tfm, key, klen); 200 198 sg.page = virt_to_page(pos); 201 199 sg.offset = offset_in_page(pos); 202 200 sg.length = plen + 4; 203 - crypto_cipher_decrypt(wep->tfm, &sg, &sg, plen + 4); 201 + if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) 202 + return -7; 204 203 205 204 crc = ~crc32_le(~0, pos, plen); 206 205 icv[0] = crc;
+1
net/ipv4/Kconfig
··· 386 386 select CRYPTO 387 387 select CRYPTO_HMAC 388 388 select CRYPTO_MD5 389 + select CRYPTO_CBC 389 390 select CRYPTO_SHA1 390 391 select CRYPTO_DES 391 392 ---help---
+24 -12
net/ipv4/ah4.c
··· 1 + #include <linux/err.h> 1 2 #include <linux/module.h> 2 3 #include <net/ip.h> 3 4 #include <net/xfrm.h> ··· 98 97 ah->spi = x->id.spi; 99 98 ah->seq_no = htonl(++x->replay.oseq); 100 99 xfrm_aevent_doreplay(x); 101 - ahp->icv(ahp, skb, ah->auth_data); 100 + err = ah_mac_digest(ahp, skb, ah->auth_data); 101 + if (err) 102 + goto error; 103 + memcpy(ah->auth_data, ahp->work_icv, ahp->icv_trunc_len); 102 104 103 105 top_iph->tos = iph->tos; 104 106 top_iph->ttl = iph->ttl; ··· 123 119 { 124 120 int ah_hlen; 125 121 int ihl; 122 + int err = -EINVAL; 126 123 struct iphdr *iph; 127 124 struct ip_auth_hdr *ah; 128 125 struct ah_data *ahp; ··· 171 166 172 167 memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); 173 168 skb_push(skb, ihl); 174 - ahp->icv(ahp, skb, ah->auth_data); 175 - if (memcmp(ah->auth_data, auth_data, ahp->icv_trunc_len)) { 169 + err = ah_mac_digest(ahp, skb, ah->auth_data); 170 + if (err) 171 + goto out; 172 + err = -EINVAL; 173 + if (memcmp(ahp->work_icv, auth_data, ahp->icv_trunc_len)) { 176 174 x->stats.integrity_failed++; 177 175 goto out; 178 176 } ··· 187 179 return 0; 188 180 189 181 out: 190 - return -EINVAL; 182 + return err; 191 183 } 192 184 193 185 static void ah4_err(struct sk_buff *skb, u32 info) ··· 212 204 { 213 205 struct ah_data *ahp = NULL; 214 206 struct xfrm_algo_desc *aalg_desc; 207 + struct crypto_hash *tfm; 215 208 216 209 if (!x->aalg) 217 210 goto error; ··· 230 221 231 222 ahp->key = x->aalg->alg_key; 232 223 ahp->key_len = (x->aalg->alg_key_len+7)/8; 233 - ahp->tfm = crypto_alloc_tfm(x->aalg->alg_name, 0); 234 - if (!ahp->tfm) 224 + tfm = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC); 225 + if (IS_ERR(tfm)) 235 226 goto error; 236 - ahp->icv = ah_hmac_digest; 227 + 228 + ahp->tfm = tfm; 229 + if (crypto_hash_setkey(tfm, ahp->key, ahp->key_len)) 230 + goto error; 237 231 238 232 /* 239 233 * Lookup the algorithm description maintained by xfrm_algo, 240 234 * verify crypto transform properties, and store information 241 235 * we need for AH processing. This lookup cannot fail here 242 - * after a successful crypto_alloc_tfm(). 236 + * after a successful crypto_alloc_hash(). 243 237 */ 244 238 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 245 239 BUG_ON(!aalg_desc); 246 240 247 241 if (aalg_desc->uinfo.auth.icv_fullbits/8 != 248 - crypto_tfm_alg_digestsize(ahp->tfm)) { 242 + crypto_hash_digestsize(tfm)) { 249 243 printk(KERN_INFO "AH: %s digestsize %u != %hu\n", 250 - x->aalg->alg_name, crypto_tfm_alg_digestsize(ahp->tfm), 244 + x->aalg->alg_name, crypto_hash_digestsize(tfm), 251 245 aalg_desc->uinfo.auth.icv_fullbits/8); 252 246 goto error; 253 247 } ··· 274 262 error: 275 263 if (ahp) { 276 264 kfree(ahp->work_icv); 277 - crypto_free_tfm(ahp->tfm); 265 + crypto_free_hash(ahp->tfm); 278 266 kfree(ahp); 279 267 } 280 268 return -EINVAL; ··· 289 277 290 278 kfree(ahp->work_icv); 291 279 ahp->work_icv = NULL; 292 - crypto_free_tfm(ahp->tfm); 280 + crypto_free_hash(ahp->tfm); 293 281 ahp->tfm = NULL; 294 282 kfree(ahp); 295 283 }
+50 -35
net/ipv4/esp4.c
··· 1 + #include <linux/err.h> 1 2 #include <linux/module.h> 2 3 #include <net/ip.h> 3 4 #include <net/xfrm.h> ··· 17 16 int err; 18 17 struct iphdr *top_iph; 19 18 struct ip_esp_hdr *esph; 20 - struct crypto_tfm *tfm; 19 + struct crypto_blkcipher *tfm; 20 + struct blkcipher_desc desc; 21 21 struct esp_data *esp; 22 22 struct sk_buff *trailer; 23 23 int blksize; ··· 38 36 esp = x->data; 39 37 alen = esp->auth.icv_trunc_len; 40 38 tfm = esp->conf.tfm; 41 - blksize = ALIGN(crypto_tfm_alg_blocksize(tfm), 4); 39 + desc.tfm = tfm; 40 + desc.flags = 0; 41 + blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4); 42 42 clen = ALIGN(clen + 2, blksize); 43 43 if (esp->conf.padlen) 44 44 clen = ALIGN(clen, esp->conf.padlen); ··· 96 92 xfrm_aevent_doreplay(x); 97 93 98 94 if (esp->conf.ivlen) 99 - crypto_cipher_set_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); 95 + crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen); 100 96 101 97 do { 102 98 struct scatterlist *sg = &esp->sgbuf[0]; ··· 107 103 goto error; 108 104 } 109 105 skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); 110 - crypto_cipher_encrypt(tfm, sg, sg, clen); 106 + err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); 111 107 if (unlikely(sg != &esp->sgbuf[0])) 112 108 kfree(sg); 113 109 } while (0); 114 110 111 + if (unlikely(err)) 112 + goto error; 113 + 115 114 if (esp->conf.ivlen) { 116 - memcpy(esph->enc_data, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); 117 - crypto_cipher_get_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); 115 + memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen); 116 + crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen); 118 117 } 119 118 120 119 if (esp->auth.icv_full_len) { 121 - esp->auth.icv(esp, skb, (u8*)esph-skb->data, 122 - sizeof(struct ip_esp_hdr) + esp->conf.ivlen+clen, trailer->tail); 123 - pskb_put(skb, trailer, alen); 120 + err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data, 121 + sizeof(*esph) + esp->conf.ivlen + clen); 122 + memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen); 124 123 } 125 124 126 125 ip_send_check(top_iph); 127 - 128 - err = 0; 129 126 130 127 error: 131 128 return err; ··· 142 137 struct iphdr *iph; 143 138 struct ip_esp_hdr *esph; 144 139 struct esp_data *esp = x->data; 140 + struct crypto_blkcipher *tfm = esp->conf.tfm; 141 + struct blkcipher_desc desc = { .tfm = tfm }; 145 142 struct sk_buff *trailer; 146 - int blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4); 143 + int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4); 147 144 int alen = esp->auth.icv_trunc_len; 148 145 int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen; 149 146 int nfrags; ··· 153 146 u8 nexthdr[2]; 154 147 struct scatterlist *sg; 155 148 int padlen; 149 + int err; 156 150 157 151 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr))) 158 152 goto out; ··· 163 155 164 156 /* If integrity check is required, do this. */ 165 157 if (esp->auth.icv_full_len) { 166 - u8 sum[esp->auth.icv_full_len]; 167 - u8 sum1[alen]; 168 - 169 - esp->auth.icv(esp, skb, 0, skb->len-alen, sum); 158 + u8 sum[alen]; 170 159 171 - if (skb_copy_bits(skb, skb->len-alen, sum1, alen)) 160 + err = esp_mac_digest(esp, skb, 0, skb->len - alen); 161 + if (err) 162 + goto out; 163 + 164 + if (skb_copy_bits(skb, skb->len - alen, sum, alen)) 172 165 BUG(); 173 166 174 - if (unlikely(memcmp(sum, sum1, alen))) { 167 + if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) { 175 168 x->stats.integrity_failed++; 176 169 goto out; 177 170 } ··· 187 178 188 179 /* Get ivec. This can be wrong, check against another impls. */ 189 180 if (esp->conf.ivlen) 190 - crypto_cipher_set_iv(esp->conf.tfm, esph->enc_data, crypto_tfm_alg_ivsize(esp->conf.tfm)); 181 + crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen); 191 182 192 183 sg = &esp->sgbuf[0]; 193 184 ··· 197 188 goto out; 198 189 } 199 190 skb_to_sgvec(skb, sg, sizeof(struct ip_esp_hdr) + esp->conf.ivlen, elen); 200 - crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen); 191 + err = crypto_blkcipher_decrypt(&desc, sg, sg, elen); 201 192 if (unlikely(sg != &esp->sgbuf[0])) 202 193 kfree(sg); 194 + if (unlikely(err)) 195 + return err; 203 196 204 197 if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2)) 205 198 BUG(); ··· 265 254 static u32 esp4_get_max_size(struct xfrm_state *x, int mtu) 266 255 { 267 256 struct esp_data *esp = x->data; 268 - u32 blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4); 257 + u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4); 269 258 270 259 if (x->props.mode) { 271 260 mtu = ALIGN(mtu + 2, blksize); ··· 304 293 if (!esp) 305 294 return; 306 295 307 - crypto_free_tfm(esp->conf.tfm); 296 + crypto_free_blkcipher(esp->conf.tfm); 308 297 esp->conf.tfm = NULL; 309 298 kfree(esp->conf.ivec); 310 299 esp->conf.ivec = NULL; 311 - crypto_free_tfm(esp->auth.tfm); 300 + crypto_free_hash(esp->auth.tfm); 312 301 esp->auth.tfm = NULL; 313 302 kfree(esp->auth.work_icv); 314 303 esp->auth.work_icv = NULL; ··· 318 307 static int esp_init_state(struct xfrm_state *x) 319 308 { 320 309 struct esp_data *esp = NULL; 310 + struct crypto_blkcipher *tfm; 321 311 322 312 /* null auth and encryption can have zero length keys */ 323 313 if (x->aalg) { ··· 334 322 335 323 if (x->aalg) { 336 324 struct xfrm_algo_desc *aalg_desc; 325 + struct crypto_hash *hash; 337 326 338 327 esp->auth.key = x->aalg->alg_key; 339 328 esp->auth.key_len = (x->aalg->alg_key_len+7)/8; 340 - esp->auth.tfm = crypto_alloc_tfm(x->aalg->alg_name, 0); 341 - if (esp->auth.tfm == NULL) 329 + hash = crypto_alloc_hash(x->aalg->alg_name, 0, 330 + CRYPTO_ALG_ASYNC); 331 + if (IS_ERR(hash)) 342 332 goto error; 343 - esp->auth.icv = esp_hmac_digest; 333 + 334 + esp->auth.tfm = hash; 335 + if (crypto_hash_setkey(hash, esp->auth.key, esp->auth.key_len)) 336 + goto error; 344 337 345 338 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 346 339 BUG_ON(!aalg_desc); 347 340 348 341 if (aalg_desc->uinfo.auth.icv_fullbits/8 != 349 - crypto_tfm_alg_digestsize(esp->auth.tfm)) { 342 + crypto_hash_digestsize(hash)) { 350 343 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", 351 344 x->aalg->alg_name, 352 - crypto_tfm_alg_digestsize(esp->auth.tfm), 345 + crypto_hash_digestsize(hash), 353 346 aalg_desc->uinfo.auth.icv_fullbits/8); 354 347 goto error; 355 348 } ··· 368 351 } 369 352 esp->conf.key = x->ealg->alg_key; 370 353 esp->conf.key_len = (x->ealg->alg_key_len+7)/8; 371 - if (x->props.ealgo == SADB_EALG_NULL) 372 - esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_ECB); 373 - else 374 - esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_CBC); 375 - if (esp->conf.tfm == NULL) 354 + tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC); 355 + if (IS_ERR(tfm)) 376 356 goto error; 377 - esp->conf.ivlen = crypto_tfm_alg_ivsize(esp->conf.tfm); 357 + esp->conf.tfm = tfm; 358 + esp->conf.ivlen = crypto_blkcipher_ivsize(tfm); 378 359 esp->conf.padlen = 0; 379 360 if (esp->conf.ivlen) { 380 361 esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL); ··· 380 365 goto error; 381 366 get_random_bytes(esp->conf.ivec, esp->conf.ivlen); 382 367 } 383 - if (crypto_cipher_setkey(esp->conf.tfm, esp->conf.key, esp->conf.key_len)) 368 + if (crypto_blkcipher_setkey(tfm, esp->conf.key, esp->conf.key_len)) 384 369 goto error; 385 370 x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen; 386 371 if (x->props.mode)
+13 -12
net/ipv4/ipcomp.c
··· 32 32 33 33 struct ipcomp_tfms { 34 34 struct list_head list; 35 - struct crypto_tfm **tfms; 35 + struct crypto_comp **tfms; 36 36 int users; 37 37 }; 38 38 ··· 46 46 int err, plen, dlen; 47 47 struct ipcomp_data *ipcd = x->data; 48 48 u8 *start, *scratch; 49 - struct crypto_tfm *tfm; 49 + struct crypto_comp *tfm; 50 50 int cpu; 51 51 52 52 plen = skb->len; ··· 107 107 struct iphdr *iph = skb->nh.iph; 108 108 struct ipcomp_data *ipcd = x->data; 109 109 u8 *start, *scratch; 110 - struct crypto_tfm *tfm; 110 + struct crypto_comp *tfm; 111 111 int cpu; 112 112 113 113 ihlen = iph->ihl * 4; ··· 302 302 return scratches; 303 303 } 304 304 305 - static void ipcomp_free_tfms(struct crypto_tfm **tfms) 305 + static void ipcomp_free_tfms(struct crypto_comp **tfms) 306 306 { 307 307 struct ipcomp_tfms *pos; 308 308 int cpu; ··· 324 324 return; 325 325 326 326 for_each_possible_cpu(cpu) { 327 - struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu); 328 - crypto_free_tfm(tfm); 327 + struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu); 328 + crypto_free_comp(tfm); 329 329 } 330 330 free_percpu(tfms); 331 331 } 332 332 333 - static struct crypto_tfm **ipcomp_alloc_tfms(const char *alg_name) 333 + static struct crypto_comp **ipcomp_alloc_tfms(const char *alg_name) 334 334 { 335 335 struct ipcomp_tfms *pos; 336 - struct crypto_tfm **tfms; 336 + struct crypto_comp **tfms; 337 337 int cpu; 338 338 339 339 /* This can be any valid CPU ID so we don't need locking. */ 340 340 cpu = raw_smp_processor_id(); 341 341 342 342 list_for_each_entry(pos, &ipcomp_tfms_list, list) { 343 - struct crypto_tfm *tfm; 343 + struct crypto_comp *tfm; 344 344 345 345 tfms = pos->tfms; 346 346 tfm = *per_cpu_ptr(tfms, cpu); 347 347 348 - if (!strcmp(crypto_tfm_alg_name(tfm), alg_name)) { 348 + if (!strcmp(crypto_comp_name(tfm), alg_name)) { 349 349 pos->users++; 350 350 return tfms; 351 351 } ··· 359 359 INIT_LIST_HEAD(&pos->list); 360 360 list_add(&pos->list, &ipcomp_tfms_list); 361 361 362 - pos->tfms = tfms = alloc_percpu(struct crypto_tfm *); 362 + pos->tfms = tfms = alloc_percpu(struct crypto_comp *); 363 363 if (!tfms) 364 364 goto error; 365 365 366 366 for_each_possible_cpu(cpu) { 367 - struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0); 367 + struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0, 368 + CRYPTO_ALG_ASYNC); 368 369 if (!tfm) 369 370 goto error; 370 371 *per_cpu_ptr(tfms, cpu) = tfm;
+1
net/ipv6/Kconfig
··· 77 77 select CRYPTO 78 78 select CRYPTO_HMAC 79 79 select CRYPTO_MD5 80 + select CRYPTO_CBC 80 81 select CRYPTO_SHA1 81 82 select CRYPTO_DES 82 83 ---help---
+23 -12
net/ipv6/ah6.c
··· 213 213 ah->spi = x->id.spi; 214 214 ah->seq_no = htonl(++x->replay.oseq); 215 215 xfrm_aevent_doreplay(x); 216 - ahp->icv(ahp, skb, ah->auth_data); 216 + err = ah_mac_digest(ahp, skb, ah->auth_data); 217 + if (err) 218 + goto error_free_iph; 219 + memcpy(ah->auth_data, ahp->work_icv, ahp->icv_trunc_len); 217 220 218 221 err = 0; 219 222 ··· 254 251 u16 hdr_len; 255 252 u16 ah_hlen; 256 253 int nexthdr; 254 + int err = -EINVAL; 257 255 258 256 if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr))) 259 257 goto out; ··· 296 292 memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); 297 293 memset(ah->auth_data, 0, ahp->icv_trunc_len); 298 294 skb_push(skb, hdr_len); 299 - ahp->icv(ahp, skb, ah->auth_data); 300 - if (memcmp(ah->auth_data, auth_data, ahp->icv_trunc_len)) { 295 + err = ah_mac_digest(ahp, skb, ah->auth_data); 296 + if (err) 297 + goto free_out; 298 + err = -EINVAL; 299 + if (memcmp(ahp->work_icv, auth_data, ahp->icv_trunc_len)) { 301 300 LIMIT_NETDEBUG(KERN_WARNING "ipsec ah authentication error\n"); 302 301 x->stats.integrity_failed++; 303 302 goto free_out; ··· 317 310 free_out: 318 311 kfree(tmp_hdr); 319 312 out: 320 - return -EINVAL; 313 + return err; 321 314 } 322 315 323 316 static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, ··· 345 338 { 346 339 struct ah_data *ahp = NULL; 347 340 struct xfrm_algo_desc *aalg_desc; 341 + struct crypto_hash *tfm; 348 342 349 343 if (!x->aalg) 350 344 goto error; ··· 363 355 364 356 ahp->key = x->aalg->alg_key; 365 357 ahp->key_len = (x->aalg->alg_key_len+7)/8; 366 - ahp->tfm = crypto_alloc_tfm(x->aalg->alg_name, 0); 367 - if (!ahp->tfm) 358 + tfm = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC); 359 + if (IS_ERR(tfm)) 368 360 goto error; 369 - ahp->icv = ah_hmac_digest; 361 + 362 + ahp->tfm = tfm; 363 + if (crypto_hash_setkey(tfm, ahp->key, ahp->key_len)) 364 + goto error; 370 365 371 366 /* 372 367 * Lookup the algorithm description maintained by xfrm_algo, 373 368 * verify crypto transform properties, and store information 374 369 * we need for AH processing. This lookup cannot fail here 375 - * after a successful crypto_alloc_tfm(). 370 + * after a successful crypto_alloc_hash(). 376 371 */ 377 372 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 378 373 BUG_ON(!aalg_desc); 379 374 380 375 if (aalg_desc->uinfo.auth.icv_fullbits/8 != 381 - crypto_tfm_alg_digestsize(ahp->tfm)) { 376 + crypto_hash_digestsize(tfm)) { 382 377 printk(KERN_INFO "AH: %s digestsize %u != %hu\n", 383 - x->aalg->alg_name, crypto_tfm_alg_digestsize(ahp->tfm), 378 + x->aalg->alg_name, crypto_hash_digestsize(tfm), 384 379 aalg_desc->uinfo.auth.icv_fullbits/8); 385 380 goto error; 386 381 } ··· 407 396 error: 408 397 if (ahp) { 409 398 kfree(ahp->work_icv); 410 - crypto_free_tfm(ahp->tfm); 399 + crypto_free_hash(ahp->tfm); 411 400 kfree(ahp); 412 401 } 413 402 return -EINVAL; ··· 422 411 423 412 kfree(ahp->work_icv); 424 413 ahp->work_icv = NULL; 425 - crypto_free_tfm(ahp->tfm); 414 + crypto_free_hash(ahp->tfm); 426 415 ahp->tfm = NULL; 427 416 kfree(ahp); 428 417 }
+52 -38
net/ipv6/esp6.c
··· 24 24 * This file is derived from net/ipv4/esp.c 25 25 */ 26 26 27 + #include <linux/err.h> 27 28 #include <linux/module.h> 28 29 #include <net/ip.h> 29 30 #include <net/xfrm.h> ··· 45 44 int hdr_len; 46 45 struct ipv6hdr *top_iph; 47 46 struct ipv6_esp_hdr *esph; 48 - struct crypto_tfm *tfm; 47 + struct crypto_blkcipher *tfm; 48 + struct blkcipher_desc desc; 49 49 struct esp_data *esp; 50 50 struct sk_buff *trailer; 51 51 int blksize; ··· 69 67 70 68 alen = esp->auth.icv_trunc_len; 71 69 tfm = esp->conf.tfm; 72 - blksize = ALIGN(crypto_tfm_alg_blocksize(tfm), 4); 70 + desc.tfm = tfm; 71 + desc.flags = 0; 72 + blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4); 73 73 clen = ALIGN(clen + 2, blksize); 74 74 if (esp->conf.padlen) 75 75 clen = ALIGN(clen, esp->conf.padlen); ··· 100 96 xfrm_aevent_doreplay(x); 101 97 102 98 if (esp->conf.ivlen) 103 - crypto_cipher_set_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); 99 + crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen); 104 100 105 101 do { 106 102 struct scatterlist *sg = &esp->sgbuf[0]; ··· 111 107 goto error; 112 108 } 113 109 skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); 114 - crypto_cipher_encrypt(tfm, sg, sg, clen); 110 + err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); 115 111 if (unlikely(sg != &esp->sgbuf[0])) 116 112 kfree(sg); 117 113 } while (0); 118 114 115 + if (unlikely(err)) 116 + goto error; 117 + 119 118 if (esp->conf.ivlen) { 120 - memcpy(esph->enc_data, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); 121 - crypto_cipher_get_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); 119 + memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen); 120 + crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen); 122 121 } 123 122 124 123 if (esp->auth.icv_full_len) { 125 - esp->auth.icv(esp, skb, (u8*)esph-skb->data, 126 - sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen+clen, trailer->tail); 127 - pskb_put(skb, trailer, alen); 124 + err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data, 125 + sizeof(*esph) + esp->conf.ivlen + clen); 126 + memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen); 128 127 } 129 - 130 - err = 0; 131 128 132 129 error: 133 130 return err; ··· 139 134 struct ipv6hdr *iph; 140 135 struct ipv6_esp_hdr *esph; 141 136 struct esp_data *esp = x->data; 137 + struct crypto_blkcipher *tfm = esp->conf.tfm; 138 + struct blkcipher_desc desc = { .tfm = tfm }; 142 139 struct sk_buff *trailer; 143 - int blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4); 140 + int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4); 144 141 int alen = esp->auth.icv_trunc_len; 145 142 int elen = skb->len - sizeof(struct ipv6_esp_hdr) - esp->conf.ivlen - alen; 146 143 ··· 162 155 163 156 /* If integrity check is required, do this. */ 164 157 if (esp->auth.icv_full_len) { 165 - u8 sum[esp->auth.icv_full_len]; 166 - u8 sum1[alen]; 158 + u8 sum[alen]; 167 159 168 - esp->auth.icv(esp, skb, 0, skb->len-alen, sum); 160 + ret = esp_mac_digest(esp, skb, 0, skb->len - alen); 161 + if (ret) 162 + goto out; 169 163 170 - if (skb_copy_bits(skb, skb->len-alen, sum1, alen)) 164 + if (skb_copy_bits(skb, skb->len - alen, sum, alen)) 171 165 BUG(); 172 166 173 - if (unlikely(memcmp(sum, sum1, alen))) { 167 + if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) { 174 168 x->stats.integrity_failed++; 175 169 ret = -EINVAL; 176 170 goto out; ··· 190 182 191 183 /* Get ivec. This can be wrong, check against another impls. */ 192 184 if (esp->conf.ivlen) 193 - crypto_cipher_set_iv(esp->conf.tfm, esph->enc_data, crypto_tfm_alg_ivsize(esp->conf.tfm)); 185 + crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen); 194 186 195 187 { 196 188 u8 nexthdr[2]; ··· 205 197 } 206 198 } 207 199 skb_to_sgvec(skb, sg, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen, elen); 208 - crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen); 200 + ret = crypto_blkcipher_decrypt(&desc, sg, sg, elen); 209 201 if (unlikely(sg != &esp->sgbuf[0])) 210 202 kfree(sg); 203 + if (unlikely(ret)) 204 + goto out; 211 205 212 206 if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2)) 213 207 BUG(); ··· 235 225 static u32 esp6_get_max_size(struct xfrm_state *x, int mtu) 236 226 { 237 227 struct esp_data *esp = x->data; 238 - u32 blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4); 228 + u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4); 239 229 240 230 if (x->props.mode) { 241 231 mtu = ALIGN(mtu + 2, blksize); ··· 276 266 if (!esp) 277 267 return; 278 268 279 - crypto_free_tfm(esp->conf.tfm); 269 + crypto_free_blkcipher(esp->conf.tfm); 280 270 esp->conf.tfm = NULL; 281 271 kfree(esp->conf.ivec); 282 272 esp->conf.ivec = NULL; 283 - crypto_free_tfm(esp->auth.tfm); 273 + crypto_free_hash(esp->auth.tfm); 284 274 esp->auth.tfm = NULL; 285 275 kfree(esp->auth.work_icv); 286 276 esp->auth.work_icv = NULL; ··· 290 280 static int esp6_init_state(struct xfrm_state *x) 291 281 { 292 282 struct esp_data *esp = NULL; 283 + struct crypto_blkcipher *tfm; 293 284 294 285 /* null auth and encryption can have zero length keys */ 295 286 if (x->aalg) { ··· 309 298 310 299 if (x->aalg) { 311 300 struct xfrm_algo_desc *aalg_desc; 301 + struct crypto_hash *hash; 312 302 313 303 esp->auth.key = x->aalg->alg_key; 314 304 esp->auth.key_len = (x->aalg->alg_key_len+7)/8; 315 - esp->auth.tfm = crypto_alloc_tfm(x->aalg->alg_name, 0); 316 - if (esp->auth.tfm == NULL) 305 + hash = crypto_alloc_hash(x->aalg->alg_name, 0, 306 + CRYPTO_ALG_ASYNC); 307 + if (IS_ERR(hash)) 317 308 goto error; 318 - esp->auth.icv = esp_hmac_digest; 309 + 310 + esp->auth.tfm = hash; 311 + if (crypto_hash_setkey(hash, esp->auth.key, esp->auth.key_len)) 312 + goto error; 319 313 320 314 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 321 315 BUG_ON(!aalg_desc); 322 316 323 317 if (aalg_desc->uinfo.auth.icv_fullbits/8 != 324 - crypto_tfm_alg_digestsize(esp->auth.tfm)) { 325 - printk(KERN_INFO "ESP: %s digestsize %u != %hu\n", 326 - x->aalg->alg_name, 327 - crypto_tfm_alg_digestsize(esp->auth.tfm), 328 - aalg_desc->uinfo.auth.icv_fullbits/8); 329 - goto error; 318 + crypto_hash_digestsize(hash)) { 319 + NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", 320 + x->aalg->alg_name, 321 + crypto_hash_digestsize(hash), 322 + aalg_desc->uinfo.auth.icv_fullbits/8); 323 + goto error; 330 324 } 331 325 332 326 esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; ··· 343 327 } 344 328 esp->conf.key = x->ealg->alg_key; 345 329 esp->conf.key_len = (x->ealg->alg_key_len+7)/8; 346 - if (x->props.ealgo == SADB_EALG_NULL) 347 - esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_ECB); 348 - else 349 - esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_CBC); 350 - if (esp->conf.tfm == NULL) 330 + tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC); 331 + if (IS_ERR(tfm)) 351 332 goto error; 352 - esp->conf.ivlen = crypto_tfm_alg_ivsize(esp->conf.tfm); 333 + esp->conf.tfm = tfm; 334 + esp->conf.ivlen = crypto_blkcipher_ivsize(tfm); 353 335 esp->conf.padlen = 0; 354 336 if (esp->conf.ivlen) { 355 337 esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL); ··· 355 341 goto error; 356 342 get_random_bytes(esp->conf.ivec, esp->conf.ivlen); 357 343 } 358 - if (crypto_cipher_setkey(esp->conf.tfm, esp->conf.key, esp->conf.key_len)) 344 + if (crypto_blkcipher_setkey(tfm, esp->conf.key, esp->conf.key_len)) 359 345 goto error; 360 346 x->props.header_len = sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen; 361 347 if (x->props.mode)
+13 -12
net/ipv6/ipcomp6.c
··· 53 53 54 54 struct ipcomp6_tfms { 55 55 struct list_head list; 56 - struct crypto_tfm **tfms; 56 + struct crypto_comp **tfms; 57 57 int users; 58 58 }; 59 59 ··· 70 70 int plen, dlen; 71 71 struct ipcomp_data *ipcd = x->data; 72 72 u8 *start, *scratch; 73 - struct crypto_tfm *tfm; 73 + struct crypto_comp *tfm; 74 74 int cpu; 75 75 76 76 if (skb_linearize_cow(skb)) ··· 129 129 struct ipcomp_data *ipcd = x->data; 130 130 int plen, dlen; 131 131 u8 *start, *scratch; 132 - struct crypto_tfm *tfm; 132 + struct crypto_comp *tfm; 133 133 int cpu; 134 134 135 135 hdr_len = skb->h.raw - skb->data; ··· 301 301 return scratches; 302 302 } 303 303 304 - static void ipcomp6_free_tfms(struct crypto_tfm **tfms) 304 + static void ipcomp6_free_tfms(struct crypto_comp **tfms) 305 305 { 306 306 struct ipcomp6_tfms *pos; 307 307 int cpu; ··· 323 323 return; 324 324 325 325 for_each_possible_cpu(cpu) { 326 - struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu); 327 - crypto_free_tfm(tfm); 326 + struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu); 327 + crypto_free_comp(tfm); 328 328 } 329 329 free_percpu(tfms); 330 330 } 331 331 332 - static struct crypto_tfm **ipcomp6_alloc_tfms(const char *alg_name) 332 + static struct crypto_comp **ipcomp6_alloc_tfms(const char *alg_name) 333 333 { 334 334 struct ipcomp6_tfms *pos; 335 - struct crypto_tfm **tfms; 335 + struct crypto_comp **tfms; 336 336 int cpu; 337 337 338 338 /* This can be any valid CPU ID so we don't need locking. */ 339 339 cpu = raw_smp_processor_id(); 340 340 341 341 list_for_each_entry(pos, &ipcomp6_tfms_list, list) { 342 - struct crypto_tfm *tfm; 342 + struct crypto_comp *tfm; 343 343 344 344 tfms = pos->tfms; 345 345 tfm = *per_cpu_ptr(tfms, cpu); 346 346 347 - if (!strcmp(crypto_tfm_alg_name(tfm), alg_name)) { 347 + if (!strcmp(crypto_comp_name(tfm), alg_name)) { 348 348 pos->users++; 349 349 return tfms; 350 350 } ··· 358 358 INIT_LIST_HEAD(&pos->list); 359 359 list_add(&pos->list, &ipcomp6_tfms_list); 360 360 361 - pos->tfms = tfms = alloc_percpu(struct crypto_tfm *); 361 + pos->tfms = tfms = alloc_percpu(struct crypto_comp *); 362 362 if (!tfms) 363 363 goto error; 364 364 365 365 for_each_possible_cpu(cpu) { 366 - struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0); 366 + struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0, 367 + CRYPTO_ALG_ASYNC); 367 368 if (!tfm) 368 369 goto error; 369 370 *per_cpu_ptr(tfms, cpu) = tfm;
+1 -1
net/sctp/endpointola.c
··· 173 173 SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return); 174 174 175 175 /* Free up the HMAC transform. */ 176 - sctp_crypto_free_tfm(sctp_sk(ep->base.sk)->hmac); 176 + crypto_free_hash(sctp_sk(ep->base.sk)->hmac); 177 177 178 178 /* Cleanup. */ 179 179 sctp_inq_free(&ep->base.inqueue);
+27 -10
net/sctp/sm_make_chunk.c
··· 1282 1282 1283 1283 retval = kmalloc(*cookie_len, GFP_ATOMIC); 1284 1284 1285 - if (!retval) { 1286 - *cookie_len = 0; 1285 + if (!retval) 1287 1286 goto nodata; 1288 - } 1289 1287 1290 1288 /* Clear this memory since we are sending this data structure 1291 1289 * out on the network. ··· 1319 1321 ntohs(init_chunk->chunk_hdr->length), raw_addrs, addrs_len); 1320 1322 1321 1323 if (sctp_sk(ep->base.sk)->hmac) { 1324 + struct hash_desc desc; 1325 + 1322 1326 /* Sign the message. */ 1323 1327 sg.page = virt_to_page(&cookie->c); 1324 1328 sg.offset = (unsigned long)(&cookie->c) % PAGE_SIZE; 1325 1329 sg.length = bodysize; 1326 1330 keylen = SCTP_SECRET_SIZE; 1327 1331 key = (char *)ep->secret_key[ep->current_key]; 1332 + desc.tfm = sctp_sk(ep->base.sk)->hmac; 1333 + desc.flags = 0; 1328 1334 1329 - sctp_crypto_hmac(sctp_sk(ep->base.sk)->hmac, key, &keylen, 1330 - &sg, 1, cookie->signature); 1335 + if (crypto_hash_setkey(desc.tfm, key, keylen) || 1336 + crypto_hash_digest(&desc, &sg, bodysize, cookie->signature)) 1337 + goto free_cookie; 1331 1338 } 1332 1339 1333 - nodata: 1334 1340 return retval; 1341 + 1342 + free_cookie: 1343 + kfree(retval); 1344 + nodata: 1345 + *cookie_len = 0; 1346 + return NULL; 1335 1347 } 1336 1348 1337 1349 /* Unpack the cookie from COOKIE ECHO chunk, recreating the association. */ ··· 1362 1354 sctp_scope_t scope; 1363 1355 struct sk_buff *skb = chunk->skb; 1364 1356 struct timeval tv; 1357 + struct hash_desc desc; 1365 1358 1366 1359 /* Header size is static data prior to the actual cookie, including 1367 1360 * any padding. ··· 1398 1389 sg.offset = (unsigned long)(bear_cookie) % PAGE_SIZE; 1399 1390 sg.length = bodysize; 1400 1391 key = (char *)ep->secret_key[ep->current_key]; 1392 + desc.tfm = sctp_sk(ep->base.sk)->hmac; 1393 + desc.flags = 0; 1401 1394 1402 1395 memset(digest, 0x00, SCTP_SIGNATURE_SIZE); 1403 - sctp_crypto_hmac(sctp_sk(ep->base.sk)->hmac, key, &keylen, &sg, 1404 - 1, digest); 1396 + if (crypto_hash_setkey(desc.tfm, key, keylen) || 1397 + crypto_hash_digest(&desc, &sg, bodysize, digest)) { 1398 + *error = -SCTP_IERROR_NOMEM; 1399 + goto fail; 1400 + } 1405 1401 1406 1402 if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) { 1407 1403 /* Try the previous key. */ 1408 1404 key = (char *)ep->secret_key[ep->last_key]; 1409 1405 memset(digest, 0x00, SCTP_SIGNATURE_SIZE); 1410 - sctp_crypto_hmac(sctp_sk(ep->base.sk)->hmac, key, &keylen, 1411 - &sg, 1, digest); 1406 + if (crypto_hash_setkey(desc.tfm, key, keylen) || 1407 + crypto_hash_digest(&desc, &sg, bodysize, digest)) { 1408 + *error = -SCTP_IERROR_NOMEM; 1409 + goto fail; 1410 + } 1412 1411 1413 1412 if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) { 1414 1413 /* Yikes! Still bad signature! */
+3 -3
net/sctp/socket.c
··· 4898 4898 int sctp_inet_listen(struct socket *sock, int backlog) 4899 4899 { 4900 4900 struct sock *sk = sock->sk; 4901 - struct crypto_tfm *tfm=NULL; 4901 + struct crypto_hash *tfm = NULL; 4902 4902 int err = -EINVAL; 4903 4903 4904 4904 if (unlikely(backlog < 0)) ··· 4911 4911 4912 4912 /* Allocate HMAC for generating cookie. */ 4913 4913 if (sctp_hmac_alg) { 4914 - tfm = sctp_crypto_alloc_tfm(sctp_hmac_alg, 0); 4914 + tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC); 4915 4915 if (!tfm) { 4916 4916 err = -ENOSYS; 4917 4917 goto out; ··· 4937 4937 sctp_release_sock(sk); 4938 4938 return err; 4939 4939 cleanup: 4940 - sctp_crypto_free_tfm(tfm); 4940 + crypto_free_hash(tfm); 4941 4941 goto out; 4942 4942 } 4943 4943
+56 -39
net/sunrpc/auth_gss/gss_krb5_crypto.c
··· 34 34 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 35 35 */ 36 36 37 + #include <linux/err.h> 37 38 #include <linux/types.h> 38 39 #include <linux/mm.h> 39 40 #include <linux/slab.h> ··· 50 49 51 50 u32 52 51 krb5_encrypt( 53 - struct crypto_tfm *tfm, 52 + struct crypto_blkcipher *tfm, 54 53 void * iv, 55 54 void * in, 56 55 void * out, ··· 59 58 u32 ret = -EINVAL; 60 59 struct scatterlist sg[1]; 61 60 u8 local_iv[16] = {0}; 61 + struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; 62 62 63 63 dprintk("RPC: krb5_encrypt: input data:\n"); 64 64 print_hexl((u32 *)in, length, 0); 65 65 66 - if (length % crypto_tfm_alg_blocksize(tfm) != 0) 66 + if (length % crypto_blkcipher_blocksize(tfm) != 0) 67 67 goto out; 68 68 69 - if (crypto_tfm_alg_ivsize(tfm) > 16) { 69 + if (crypto_blkcipher_ivsize(tfm) > 16) { 70 70 dprintk("RPC: gss_k5encrypt: tfm iv size to large %d\n", 71 - crypto_tfm_alg_ivsize(tfm)); 71 + crypto_blkcipher_ivsize(tfm)); 72 72 goto out; 73 73 } 74 74 75 75 if (iv) 76 - memcpy(local_iv, iv, crypto_tfm_alg_ivsize(tfm)); 76 + memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm)); 77 77 78 78 memcpy(out, in, length); 79 79 sg_set_buf(sg, out, length); 80 80 81 - ret = crypto_cipher_encrypt_iv(tfm, sg, sg, length, local_iv); 81 + ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length); 82 82 83 83 dprintk("RPC: krb5_encrypt: output data:\n"); 84 84 print_hexl((u32 *)out, length, 0); ··· 92 90 93 91 u32 94 92 krb5_decrypt( 95 - struct crypto_tfm *tfm, 93 + struct crypto_blkcipher *tfm, 96 94 void * iv, 97 95 void * in, 98 96 void * out, ··· 101 99 u32 ret = -EINVAL; 102 100 struct scatterlist sg[1]; 103 101 u8 local_iv[16] = {0}; 102 + struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; 104 103 105 104 dprintk("RPC: krb5_decrypt: input data:\n"); 106 105 print_hexl((u32 *)in, length, 0); 107 106 108 - if (length % crypto_tfm_alg_blocksize(tfm) != 0) 107 + if (length % crypto_blkcipher_blocksize(tfm) != 0) 109 108 goto out; 110 109 111 - if (crypto_tfm_alg_ivsize(tfm) > 16) { 110 + if (crypto_blkcipher_ivsize(tfm) > 16) { 112 111 dprintk("RPC: gss_k5decrypt: tfm iv size to large %d\n", 113 - crypto_tfm_alg_ivsize(tfm)); 112 + crypto_blkcipher_ivsize(tfm)); 114 113 goto out; 115 114 } 116 115 if (iv) 117 - memcpy(local_iv,iv, crypto_tfm_alg_ivsize(tfm)); 116 + memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm)); 118 117 119 118 memcpy(out, in, length); 120 119 sg_set_buf(sg, out, length); 121 120 122 - ret = crypto_cipher_decrypt_iv(tfm, sg, sg, length, local_iv); 121 + ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length); 123 122 124 123 dprintk("RPC: krb5_decrypt: output_data:\n"); 125 124 print_hexl((u32 *)out, length, 0); ··· 200 197 static int 201 198 checksummer(struct scatterlist *sg, void *data) 202 199 { 203 - struct crypto_tfm *tfm = (struct crypto_tfm *)data; 200 + struct hash_desc *desc = data; 204 201 205 - crypto_digest_update(tfm, sg, 1); 206 - 207 - return 0; 202 + return crypto_hash_update(desc, sg, sg->length); 208 203 } 209 204 210 205 /* checksum the plaintext data and hdrlen bytes of the token header */ ··· 211 210 int body_offset, struct xdr_netobj *cksum) 212 211 { 213 212 char *cksumname; 214 - struct crypto_tfm *tfm = NULL; /* XXX add to ctx? */ 213 + struct hash_desc desc; /* XXX add to ctx? */ 215 214 struct scatterlist sg[1]; 215 + int err; 216 216 217 217 switch (cksumtype) { 218 218 case CKSUMTYPE_RSA_MD5: ··· 224 222 " unsupported checksum %d", cksumtype); 225 223 return GSS_S_FAILURE; 226 224 } 227 - if (!(tfm = crypto_alloc_tfm(cksumname, CRYPTO_TFM_REQ_MAY_SLEEP))) 225 + desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC); 226 + if (IS_ERR(desc.tfm)) 228 227 return GSS_S_FAILURE; 229 - cksum->len = crypto_tfm_alg_digestsize(tfm); 228 + cksum->len = crypto_hash_digestsize(desc.tfm); 229 + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 230 230 231 - crypto_digest_init(tfm); 231 + err = crypto_hash_init(&desc); 232 + if (err) 233 + goto out; 232 234 sg_set_buf(sg, header, hdrlen); 233 - crypto_digest_update(tfm, sg, 1); 234 - process_xdr_buf(body, body_offset, body->len - body_offset, 235 - checksummer, tfm); 236 - crypto_digest_final(tfm, cksum->data); 237 - crypto_free_tfm(tfm); 238 - return 0; 235 + err = crypto_hash_update(&desc, sg, hdrlen); 236 + if (err) 237 + goto out; 238 + err = process_xdr_buf(body, body_offset, body->len - body_offset, 239 + checksummer, &desc); 240 + if (err) 241 + goto out; 242 + err = crypto_hash_final(&desc, cksum->data); 243 + 244 + out: 245 + crypto_free_hash(desc.tfm); 246 + return err ? GSS_S_FAILURE : 0; 239 247 } 240 248 241 249 EXPORT_SYMBOL(make_checksum); 242 250 243 251 struct encryptor_desc { 244 252 u8 iv[8]; /* XXX hard-coded blocksize */ 245 - struct crypto_tfm *tfm; 253 + struct blkcipher_desc desc; 246 254 int pos; 247 255 struct xdr_buf *outbuf; 248 256 struct page **pages; ··· 297 285 if (thislen == 0) 298 286 return 0; 299 287 300 - ret = crypto_cipher_encrypt_iv(desc->tfm, desc->outfrags, desc->infrags, 301 - thislen, desc->iv); 288 + ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags, 289 + desc->infrags, thislen); 302 290 if (ret) 303 291 return ret; 304 292 if (fraglen) { ··· 317 305 } 318 306 319 307 int 320 - gss_encrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *buf, int offset, 321 - struct page **pages) 308 + gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf, 309 + int offset, struct page **pages) 322 310 { 323 311 int ret; 324 312 struct encryptor_desc desc; 325 313 326 - BUG_ON((buf->len - offset) % crypto_tfm_alg_blocksize(tfm) != 0); 314 + BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0); 327 315 328 316 memset(desc.iv, 0, sizeof(desc.iv)); 329 - desc.tfm = tfm; 317 + desc.desc.tfm = tfm; 318 + desc.desc.info = desc.iv; 319 + desc.desc.flags = 0; 330 320 desc.pos = offset; 331 321 desc.outbuf = buf; 332 322 desc.pages = pages; ··· 343 329 344 330 struct decryptor_desc { 345 331 u8 iv[8]; /* XXX hard-coded blocksize */ 346 - struct crypto_tfm *tfm; 332 + struct blkcipher_desc desc; 347 333 struct scatterlist frags[4]; 348 334 int fragno; 349 335 int fraglen; ··· 369 355 if (thislen == 0) 370 356 return 0; 371 357 372 - ret = crypto_cipher_decrypt_iv(desc->tfm, desc->frags, desc->frags, 373 - thislen, desc->iv); 358 + ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags, 359 + desc->frags, thislen); 374 360 if (ret) 375 361 return ret; 376 362 if (fraglen) { ··· 387 373 } 388 374 389 375 int 390 - gss_decrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *buf, int offset) 376 + gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf, 377 + int offset) 391 378 { 392 379 struct decryptor_desc desc; 393 380 394 381 /* XXXJBF: */ 395 - BUG_ON((buf->len - offset) % crypto_tfm_alg_blocksize(tfm) != 0); 382 + BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0); 396 383 397 384 memset(desc.iv, 0, sizeof(desc.iv)); 398 - desc.tfm = tfm; 385 + desc.desc.tfm = tfm; 386 + desc.desc.info = desc.iv; 387 + desc.desc.flags = 0; 399 388 desc.fragno = 0; 400 389 desc.fraglen = 0; 401 390 return process_xdr_buf(buf, offset, buf->len - offset, decryptor, &desc);
+13 -11
net/sunrpc/auth_gss/gss_krb5_mech.c
··· 34 34 * 35 35 */ 36 36 37 + #include <linux/err.h> 37 38 #include <linux/module.h> 38 39 #include <linux/init.h> 39 40 #include <linux/types.h> ··· 79 78 } 80 79 81 80 static inline const void * 82 - get_key(const void *p, const void *end, struct crypto_tfm **res) 81 + get_key(const void *p, const void *end, struct crypto_blkcipher **res) 83 82 { 84 83 struct xdr_netobj key; 85 - int alg, alg_mode; 84 + int alg; 86 85 char *alg_name; 87 86 88 87 p = simple_get_bytes(p, end, &alg, sizeof(alg)); ··· 94 93 95 94 switch (alg) { 96 95 case ENCTYPE_DES_CBC_RAW: 97 - alg_name = "des"; 98 - alg_mode = CRYPTO_TFM_MODE_CBC; 96 + alg_name = "cbc(des)"; 99 97 break; 100 98 default: 101 99 printk("gss_kerberos_mech: unsupported algorithm %d\n", alg); 102 100 goto out_err_free_key; 103 101 } 104 - if (!(*res = crypto_alloc_tfm(alg_name, alg_mode))) { 102 + *res = crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC); 103 + if (IS_ERR(*res)) { 105 104 printk("gss_kerberos_mech: unable to initialize crypto algorithm %s\n", alg_name); 105 + *res = NULL; 106 106 goto out_err_free_key; 107 107 } 108 - if (crypto_cipher_setkey(*res, key.data, key.len)) { 108 + if (crypto_blkcipher_setkey(*res, key.data, key.len)) { 109 109 printk("gss_kerberos_mech: error setting key for crypto algorithm %s\n", alg_name); 110 110 goto out_err_free_tfm; 111 111 } ··· 115 113 return p; 116 114 117 115 out_err_free_tfm: 118 - crypto_free_tfm(*res); 116 + crypto_free_blkcipher(*res); 119 117 out_err_free_key: 120 118 kfree(key.data); 121 119 p = ERR_PTR(-EINVAL); ··· 174 172 return 0; 175 173 176 174 out_err_free_key2: 177 - crypto_free_tfm(ctx->seq); 175 + crypto_free_blkcipher(ctx->seq); 178 176 out_err_free_key1: 179 - crypto_free_tfm(ctx->enc); 177 + crypto_free_blkcipher(ctx->enc); 180 178 out_err_free_mech: 181 179 kfree(ctx->mech_used.data); 182 180 out_err_free_ctx: ··· 189 187 gss_delete_sec_context_kerberos(void *internal_ctx) { 190 188 struct krb5_ctx *kctx = internal_ctx; 191 189 192 - crypto_free_tfm(kctx->seq); 193 - crypto_free_tfm(kctx->enc); 190 + crypto_free_blkcipher(kctx->seq); 191 + crypto_free_blkcipher(kctx->enc); 194 192 kfree(kctx->mech_used.data); 195 193 kfree(kctx); 196 194 }
+2 -2
net/sunrpc/auth_gss/gss_krb5_seqnum.c
··· 41 41 #endif 42 42 43 43 s32 44 - krb5_make_seq_num(struct crypto_tfm *key, 44 + krb5_make_seq_num(struct crypto_blkcipher *key, 45 45 int direction, 46 46 s32 seqnum, 47 47 unsigned char *cksum, unsigned char *buf) ··· 62 62 } 63 63 64 64 s32 65 - krb5_get_seq_num(struct crypto_tfm *key, 65 + krb5_get_seq_num(struct crypto_blkcipher *key, 66 66 unsigned char *cksum, 67 67 unsigned char *buf, 68 68 int *direction, s32 * seqnum)
+2 -2
net/sunrpc/auth_gss/gss_krb5_wrap.c
··· 149 149 goto out_err; 150 150 } 151 151 152 - blocksize = crypto_tfm_alg_blocksize(kctx->enc); 152 + blocksize = crypto_blkcipher_blocksize(kctx->enc); 153 153 gss_krb5_add_padding(buf, offset, blocksize); 154 154 BUG_ON((buf->len - offset) % blocksize); 155 155 plainlen = blocksize + buf->len - offset; ··· 346 346 /* Copy the data back to the right position. XXX: Would probably be 347 347 * better to copy and encrypt at the same time. */ 348 348 349 - blocksize = crypto_tfm_alg_blocksize(kctx->enc); 349 + blocksize = crypto_blkcipher_blocksize(kctx->enc); 350 350 data_start = ptr + 22 + blocksize; 351 351 orig_start = buf->head[0].iov_base + offset; 352 352 data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
+15 -14
net/sunrpc/auth_gss/gss_spkm3_mech.c
··· 34 34 * 35 35 */ 36 36 37 + #include <linux/err.h> 37 38 #include <linux/module.h> 38 39 #include <linux/init.h> 39 40 #include <linux/types.h> ··· 84 83 } 85 84 86 85 static inline const void * 87 - get_key(const void *p, const void *end, struct crypto_tfm **res, int *resalg) 86 + get_key(const void *p, const void *end, struct crypto_blkcipher **res, 87 + int *resalg) 88 88 { 89 89 struct xdr_netobj key = { 0 }; 90 - int alg_mode,setkey = 0; 90 + int setkey = 0; 91 91 char *alg_name; 92 92 93 93 p = simple_get_bytes(p, end, resalg, sizeof(*resalg)); ··· 100 98 101 99 switch (*resalg) { 102 100 case NID_des_cbc: 103 - alg_name = "des"; 104 - alg_mode = CRYPTO_TFM_MODE_CBC; 101 + alg_name = "cbc(des)"; 105 102 setkey = 1; 106 103 break; 107 104 case NID_cast5_cbc: 108 105 /* XXXX here in name only, not used */ 109 - alg_name = "cast5"; 110 - alg_mode = CRYPTO_TFM_MODE_CBC; 106 + alg_name = "cbc(cast5)"; 111 107 setkey = 0; /* XXX will need to set to 1 */ 112 108 break; 113 109 case NID_md5: ··· 113 113 dprintk("RPC: SPKM3 get_key: NID_md5 zero Key length\n"); 114 114 } 115 115 alg_name = "md5"; 116 - alg_mode = 0; 117 116 setkey = 0; 118 117 break; 119 118 default: 120 119 dprintk("gss_spkm3_mech: unsupported algorithm %d\n", *resalg); 121 120 goto out_err_free_key; 122 121 } 123 - if (!(*res = crypto_alloc_tfm(alg_name, alg_mode))) { 122 + *res = crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC); 123 + if (IS_ERR(*res)) { 124 124 printk("gss_spkm3_mech: unable to initialize crypto algorthm %s\n", alg_name); 125 + *res = NULL; 125 126 goto out_err_free_key; 126 127 } 127 128 if (setkey) { 128 - if (crypto_cipher_setkey(*res, key.data, key.len)) { 129 + if (crypto_blkcipher_setkey(*res, key.data, key.len)) { 129 130 printk("gss_spkm3_mech: error setting key for crypto algorthm %s\n", alg_name); 130 131 goto out_err_free_tfm; 131 132 } ··· 137 136 return p; 138 137 139 138 out_err_free_tfm: 140 - crypto_free_tfm(*res); 139 + crypto_free_blkcipher(*res); 141 140 out_err_free_key: 142 141 if(key.len > 0) 143 142 kfree(key.data); ··· 205 204 return 0; 206 205 207 206 out_err_free_key2: 208 - crypto_free_tfm(ctx->derived_integ_key); 207 + crypto_free_blkcipher(ctx->derived_integ_key); 209 208 out_err_free_key1: 210 - crypto_free_tfm(ctx->derived_conf_key); 209 + crypto_free_blkcipher(ctx->derived_conf_key); 211 210 out_err_free_s_key: 212 211 kfree(ctx->share_key.data); 213 212 out_err_free_mech: ··· 224 223 gss_delete_sec_context_spkm3(void *internal_ctx) { 225 224 struct spkm3_ctx *sctx = internal_ctx; 226 225 227 - crypto_free_tfm(sctx->derived_integ_key); 228 - crypto_free_tfm(sctx->derived_conf_key); 226 + crypto_free_blkcipher(sctx->derived_integ_key); 227 + crypto_free_blkcipher(sctx->derived_conf_key); 229 228 kfree(sctx->share_key.data); 230 229 kfree(sctx->mech_used.data); 231 230 kfree(sctx);
+63 -31
net/xfrm/xfrm_algo.c
··· 30 30 */ 31 31 static struct xfrm_algo_desc aalg_list[] = { 32 32 { 33 - .name = "digest_null", 33 + .name = "hmac(digest_null)", 34 + .compat = "digest_null", 34 35 35 36 .uinfo = { 36 37 .auth = { ··· 48 47 } 49 48 }, 50 49 { 51 - .name = "md5", 50 + .name = "hmac(md5)", 51 + .compat = "md5", 52 52 53 53 .uinfo = { 54 54 .auth = { ··· 66 64 } 67 65 }, 68 66 { 69 - .name = "sha1", 67 + .name = "hmac(sha1)", 68 + .compat = "sha1", 70 69 71 70 .uinfo = { 72 71 .auth = { ··· 84 81 } 85 82 }, 86 83 { 87 - .name = "sha256", 84 + .name = "hmac(sha256)", 85 + .compat = "sha256", 88 86 89 87 .uinfo = { 90 88 .auth = { ··· 102 98 } 103 99 }, 104 100 { 105 - .name = "ripemd160", 101 + .name = "hmac(ripemd160)", 102 + .compat = "ripemd160", 106 103 107 104 .uinfo = { 108 105 .auth = { ··· 123 118 124 119 static struct xfrm_algo_desc ealg_list[] = { 125 120 { 126 - .name = "cipher_null", 121 + .name = "ecb(cipher_null)", 122 + .compat = "cipher_null", 127 123 128 124 .uinfo = { 129 125 .encr = { ··· 141 135 } 142 136 }, 143 137 { 144 - .name = "des", 138 + .name = "cbc(des)", 139 + .compat = "des", 145 140 146 141 .uinfo = { 147 142 .encr = { ··· 159 152 } 160 153 }, 161 154 { 162 - .name = "des3_ede", 155 + .name = "cbc(des3_ede)", 156 + .compat = "des3_ede", 163 157 164 158 .uinfo = { 165 159 .encr = { ··· 177 169 } 178 170 }, 179 171 { 180 - .name = "cast128", 172 + .name = "cbc(cast128)", 173 + .compat = "cast128", 181 174 182 175 .uinfo = { 183 176 .encr = { ··· 195 186 } 196 187 }, 197 188 { 198 - .name = "blowfish", 189 + .name = "cbc(blowfish)", 190 + .compat = "blowfish", 199 191 200 192 .uinfo = { 201 193 .encr = { ··· 213 203 } 214 204 }, 215 205 { 216 - .name = "aes", 206 + .name = "cbc(aes)", 207 + .compat = "aes", 217 208 218 209 .uinfo = { 219 210 .encr = { ··· 231 220 } 232 221 }, 233 222 { 234 - .name = "serpent", 223 + .name = "cbc(serpent)", 224 + .compat = "serpent", 235 225 236 226 .uinfo = { 237 227 .encr = { ··· 249 237 } 250 238 }, 251 239 { 252 - .name = "twofish", 240 + .name = "cbc(twofish)", 241 + .compat = "twofish", 253 242 254 243 .uinfo = { 255 244 .encr = { ··· 363 350 EXPORT_SYMBOL_GPL(xfrm_calg_get_byid); 364 351 365 352 static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list, 366 - int entries, char *name, 367 - int probe) 353 + int entries, u32 type, u32 mask, 354 + char *name, int probe) 368 355 { 369 356 int i, status; 370 357 ··· 372 359 return NULL; 373 360 374 361 for (i = 0; i < entries; i++) { 375 - if (strcmp(name, list[i].name)) 362 + if (strcmp(name, list[i].name) && 363 + (!list[i].compat || strcmp(name, list[i].compat))) 376 364 continue; 377 365 378 366 if (list[i].available) ··· 382 368 if (!probe) 383 369 break; 384 370 385 - status = crypto_alg_available(name, 0); 371 + status = crypto_has_alg(name, type, mask | CRYPTO_ALG_ASYNC); 386 372 if (!status) 387 373 break; 388 374 ··· 394 380 395 381 struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe) 396 382 { 397 - return xfrm_get_byname(aalg_list, aalg_entries(), name, probe); 383 + return xfrm_get_byname(aalg_list, aalg_entries(), 384 + CRYPTO_ALG_TYPE_HASH, CRYPTO_ALG_TYPE_HASH_MASK, 385 + name, probe); 398 386 } 399 387 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname); 400 388 401 389 struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe) 402 390 { 403 - return xfrm_get_byname(ealg_list, ealg_entries(), name, probe); 391 + return xfrm_get_byname(ealg_list, ealg_entries(), 392 + CRYPTO_ALG_TYPE_BLKCIPHER, CRYPTO_ALG_TYPE_MASK, 393 + name, probe); 404 394 } 405 395 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname); 406 396 407 397 struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe) 408 398 { 409 - return xfrm_get_byname(calg_list, calg_entries(), name, probe); 399 + return xfrm_get_byname(calg_list, calg_entries(), 400 + CRYPTO_ALG_TYPE_COMPRESS, CRYPTO_ALG_TYPE_MASK, 401 + name, probe); 410 402 } 411 403 EXPORT_SYMBOL_GPL(xfrm_calg_get_byname); 412 404 ··· 447 427 BUG_ON(in_softirq()); 448 428 449 429 for (i = 0; i < aalg_entries(); i++) { 450 - status = crypto_alg_available(aalg_list[i].name, 0); 430 + status = crypto_has_hash(aalg_list[i].name, 0, 431 + CRYPTO_ALG_ASYNC); 451 432 if (aalg_list[i].available != status) 452 433 aalg_list[i].available = status; 453 434 } 454 435 455 436 for (i = 0; i < ealg_entries(); i++) { 456 - status = crypto_alg_available(ealg_list[i].name, 0); 437 + status = crypto_has_blkcipher(ealg_list[i].name, 0, 438 + CRYPTO_ALG_ASYNC); 457 439 if (ealg_list[i].available != status) 458 440 ealg_list[i].available = status; 459 441 } 460 442 461 443 for (i = 0; i < calg_entries(); i++) { 462 - status = crypto_alg_available(calg_list[i].name, 0); 444 + status = crypto_has_comp(calg_list[i].name, 0, 445 + CRYPTO_ALG_ASYNC); 463 446 if (calg_list[i].available != status) 464 447 calg_list[i].available = status; 465 448 } ··· 494 471 495 472 /* Move to common area: it is shared with AH. */ 496 473 497 - void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm, 498 - int offset, int len, icv_update_fn_t icv_update) 474 + int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc, 475 + int offset, int len, icv_update_fn_t icv_update) 499 476 { 500 477 int start = skb_headlen(skb); 501 478 int i, copy = start - offset; 479 + int err; 502 480 struct scatterlist sg; 503 481 504 482 /* Checksum header. */ ··· 511 487 sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE; 512 488 sg.length = copy; 513 489 514 - icv_update(tfm, &sg, 1); 490 + err = icv_update(desc, &sg, copy); 491 + if (unlikely(err)) 492 + return err; 515 493 516 494 if ((len -= copy) == 0) 517 - return; 495 + return 0; 518 496 offset += copy; 519 497 } 520 498 ··· 536 510 sg.offset = frag->page_offset + offset-start; 537 511 sg.length = copy; 538 512 539 - icv_update(tfm, &sg, 1); 513 + err = icv_update(desc, &sg, copy); 514 + if (unlikely(err)) 515 + return err; 540 516 541 517 if (!(len -= copy)) 542 - return; 518 + return 0; 543 519 offset += copy; 544 520 } 545 521 start = end; ··· 559 531 if ((copy = end - offset) > 0) { 560 532 if (copy > len) 561 533 copy = len; 562 - skb_icv_walk(list, tfm, offset-start, copy, icv_update); 534 + err = skb_icv_walk(list, desc, offset-start, 535 + copy, icv_update); 536 + if (unlikely(err)) 537 + return err; 563 538 if ((len -= copy) == 0) 564 - return; 539 + return 0; 565 540 offset += copy; 566 541 } 567 542 start = end; 568 543 } 569 544 } 570 545 BUG_ON(len); 546 + return 0; 571 547 } 572 548 EXPORT_SYMBOL_GPL(skb_icv_walk); 573 549
+2
net/xfrm/xfrm_user.c
··· 10 10 * 11 11 */ 12 12 13 + #include <linux/crypto.h> 13 14 #include <linux/module.h> 14 15 #include <linux/kernel.h> 15 16 #include <linux/types.h> ··· 213 212 return -ENOMEM; 214 213 215 214 memcpy(p, ualg, len); 215 + strcpy(p->alg_name, algo->name); 216 216 *algpp = p; 217 217 return 0; 218 218 }
+10 -8
security/seclvl.c
··· 16 16 * (at your option) any later version. 17 17 */ 18 18 19 + #include <linux/err.h> 19 20 #include <linux/module.h> 20 21 #include <linux/moduleparam.h> 21 22 #include <linux/kernel.h> ··· 198 197 static int 199 198 plaintext_to_sha1(unsigned char *hash, const char *plaintext, unsigned int len) 200 199 { 201 - struct crypto_tfm *tfm; 200 + struct hash_desc desc; 202 201 struct scatterlist sg; 202 + int err; 203 + 203 204 if (len > PAGE_SIZE) { 204 205 seclvl_printk(0, KERN_ERR, "Plaintext password too large (%d " 205 206 "characters). Largest possible is %lu " 206 207 "bytes.\n", len, PAGE_SIZE); 207 208 return -EINVAL; 208 209 } 209 - tfm = crypto_alloc_tfm("sha1", CRYPTO_TFM_REQ_MAY_SLEEP); 210 - if (tfm == NULL) { 210 + desc.tfm = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC); 211 + if (IS_ERR(desc.tfm)) { 211 212 seclvl_printk(0, KERN_ERR, 212 213 "Failed to load transform for SHA1\n"); 213 214 return -EINVAL; 214 215 } 215 216 sg_init_one(&sg, (u8 *)plaintext, len); 216 - crypto_digest_init(tfm); 217 - crypto_digest_update(tfm, &sg, 1); 218 - crypto_digest_final(tfm, hash); 219 - crypto_free_tfm(tfm); 220 - return 0; 217 + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 218 + err = crypto_hash_digest(&desc, &sg, len, hash); 219 + crypto_free_hash(desc.tfm); 220 + return err; 221 221 } 222 222 223 223 /**