Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://github.com/herbertx/crypto

* git://github.com/herbertx/crypto: (48 commits)
crypto: user - Depend on NET instead of selecting it
crypto: user - Add dependency on NET
crypto: talitos - handle descriptor not found in error path
crypto: user - Initialise match in crypto_alg_match
crypto: testmgr - add twofish tests
crypto: testmgr - add blowfish test-vectors
crypto: Make hifn_795x build depend on !ARCH_DMA_ADDR_T_64BIT
crypto: twofish-x86_64-3way - fix ctr blocksize to 1
crypto: blowfish-x86_64 - fix ctr blocksize to 1
crypto: whirlpool - count rounds from 0
crypto: Add userspace report for compress type algorithms
crypto: Add userspace report for cipher type algorithms
crypto: Add userspace report for rng type algorithms
crypto: Add userspace report for pcompress type algorithms
crypto: Add userspace report for nivaead type algorithms
crypto: Add userspace report for aead type algorithms
crypto: Add userspace report for givcipher type algorithms
crypto: Add userspace report for ablkcipher type algorithms
crypto: Add userspace report for blkcipher type algorithms
crypto: Add userspace report for ahash type algorithms
...

+4049 -208
+23
Documentation/devicetree/bindings/crypto/picochip-spacc.txt
··· 1 + Picochip picoXcell SPAcc (Security Protocol Accelerator) bindings 2 + 3 + Picochip picoXcell devices contain crypto offload engines that may be used for 4 + IPSEC and femtocell layer 2 ciphering. 5 + 6 + Required properties: 7 + - compatible : "picochip,spacc-ipsec" for the IPSEC offload engine 8 + "picochip,spacc-l2" for the femtocell layer 2 ciphering engine. 9 + - reg : Offset and length of the register set for this device 10 + - interrupt-parent : The interrupt controller that controls the SPAcc 11 + interrupt. 12 + - interrupts : The interrupt line from the SPAcc. 13 + - ref-clock : The input clock that drives the SPAcc. 14 + 15 + Example SPAcc node: 16 + 17 + spacc@10000 { 18 + compatible = "picochip,spacc-ipsec"; 19 + reg = <0x100000 0x10000>; 20 + interrupt-parent = <&vic0>; 21 + interrupts = <24>; 22 + ref-clock = <&ipsec_clk>, "ref"; 23 + };
+12
arch/x86/crypto/Makefile
··· 7 7 obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o 8 8 9 9 obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o 10 + obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o 10 11 obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o 12 + obj-$(CONFIG_CRYPTO_TWOFISH_X86_64_3WAY) += twofish-x86_64-3way.o 11 13 obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o 12 14 obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o 13 15 obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o 14 16 15 17 obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o 18 + obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o 16 19 17 20 aes-i586-y := aes-i586-asm_32.o aes_glue.o 18 21 twofish-i586-y := twofish-i586-asm_32.o twofish_glue.o 19 22 salsa20-i586-y := salsa20-i586-asm_32.o salsa20_glue.o 20 23 21 24 aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o 25 + blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o 22 26 twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o 27 + twofish-x86_64-3way-y := twofish-x86_64-asm_64-3way.o twofish_glue_3way.o 23 28 salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o 24 29 25 30 aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o 26 31 27 32 ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o 33 + 34 + # enable AVX support only when $(AS) can actually assemble the instructions 35 + ifeq ($(call as-instr,vpxor %xmm0$(comma)%xmm1$(comma)%xmm2,yes,no),yes) 36 + AFLAGS_sha1_ssse3_asm.o += -DSHA1_ENABLE_AVX_SUPPORT 37 + CFLAGS_sha1_ssse3_glue.o += -DSHA1_ENABLE_AVX_SUPPORT 38 + endif 39 + sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o
+1
arch/x86/crypto/aes_glue.c
··· 4 4 */ 5 5 6 6 #include <crypto/aes.h> 7 + #include <asm/aes.h> 7 8 8 9 asmlinkage void aes_enc_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); 9 10 asmlinkage void aes_dec_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
+390
arch/x86/crypto/blowfish-x86_64-asm_64.S
··· 1 + /* 2 + * Blowfish Cipher Algorithm (x86_64) 3 + * 4 + * Copyright (C) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License as published by 8 + * the Free Software Foundation; either version 2 of the License, or 9 + * (at your option) any later version. 10 + * 11 + * This program is distributed in the hope that it will be useful, 12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 + * GNU General Public License for more details. 15 + * 16 + * You should have received a copy of the GNU General Public License 17 + * along with this program; if not, write to the Free Software 18 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 19 + * USA 20 + * 21 + */ 22 + 23 + .file "blowfish-x86_64-asm.S" 24 + .text 25 + 26 + /* structure of crypto context */ 27 + #define p 0 28 + #define s0 ((16 + 2) * 4) 29 + #define s1 ((16 + 2 + (1 * 256)) * 4) 30 + #define s2 ((16 + 2 + (2 * 256)) * 4) 31 + #define s3 ((16 + 2 + (3 * 256)) * 4) 32 + 33 + /* register macros */ 34 + #define CTX %rdi 35 + #define RIO %rsi 36 + 37 + #define RX0 %rax 38 + #define RX1 %rbx 39 + #define RX2 %rcx 40 + #define RX3 %rdx 41 + 42 + #define RX0d %eax 43 + #define RX1d %ebx 44 + #define RX2d %ecx 45 + #define RX3d %edx 46 + 47 + #define RX0bl %al 48 + #define RX1bl %bl 49 + #define RX2bl %cl 50 + #define RX3bl %dl 51 + 52 + #define RX0bh %ah 53 + #define RX1bh %bh 54 + #define RX2bh %ch 55 + #define RX3bh %dh 56 + 57 + #define RT0 %rbp 58 + #define RT1 %rsi 59 + #define RT2 %r8 60 + #define RT3 %r9 61 + 62 + #define RT0d %ebp 63 + #define RT1d %esi 64 + #define RT2d %r8d 65 + #define RT3d %r9d 66 + 67 + #define RKEY %r10 68 + 69 + /*********************************************************************** 70 + * 1-way blowfish 71 + ***********************************************************************/ 72 + #define F() \ 73 + rorq $16, RX0; \ 74 + movzbl RX0bh, RT0d; \ 75 + movzbl RX0bl, RT1d; \ 76 + rolq $16, RX0; \ 77 + movl s0(CTX,RT0,4), RT0d; \ 78 + addl s1(CTX,RT1,4), RT0d; \ 79 + movzbl RX0bh, RT1d; \ 80 + movzbl RX0bl, RT2d; \ 81 + rolq $32, RX0; \ 82 + xorl s2(CTX,RT1,4), RT0d; \ 83 + addl s3(CTX,RT2,4), RT0d; \ 84 + xorq RT0, RX0; 85 + 86 + #define add_roundkey_enc(n) \ 87 + xorq p+4*(n)(CTX), RX0; 88 + 89 + #define round_enc(n) \ 90 + add_roundkey_enc(n); \ 91 + \ 92 + F(); \ 93 + F(); 94 + 95 + #define add_roundkey_dec(n) \ 96 + movq p+4*(n-1)(CTX), RT0; \ 97 + rorq $32, RT0; \ 98 + xorq RT0, RX0; 99 + 100 + #define round_dec(n) \ 101 + add_roundkey_dec(n); \ 102 + \ 103 + F(); \ 104 + F(); \ 105 + 106 + #define read_block() \ 107 + movq (RIO), RX0; \ 108 + rorq $32, RX0; \ 109 + bswapq RX0; 110 + 111 + #define write_block() \ 112 + bswapq RX0; \ 113 + movq RX0, (RIO); 114 + 115 + #define xor_block() \ 116 + bswapq RX0; \ 117 + xorq RX0, (RIO); 118 + 119 + .align 8 120 + .global __blowfish_enc_blk 121 + .type __blowfish_enc_blk,@function; 122 + 123 + __blowfish_enc_blk: 124 + /* input: 125 + * %rdi: ctx, CTX 126 + * %rsi: dst 127 + * %rdx: src 128 + * %rcx: bool, if true: xor output 129 + */ 130 + movq %rbp, %r11; 131 + 132 + movq %rsi, %r10; 133 + movq %rdx, RIO; 134 + 135 + read_block(); 136 + 137 + round_enc(0); 138 + round_enc(2); 139 + round_enc(4); 140 + round_enc(6); 141 + round_enc(8); 142 + round_enc(10); 143 + round_enc(12); 144 + round_enc(14); 145 + add_roundkey_enc(16); 146 + 147 + movq %r11, %rbp; 148 + 149 + movq %r10, RIO; 150 + test %cl, %cl; 151 + jnz __enc_xor; 152 + 153 + write_block(); 154 + ret; 155 + __enc_xor: 156 + xor_block(); 157 + ret; 158 + 159 + .align 8 160 + .global blowfish_dec_blk 161 + .type blowfish_dec_blk,@function; 162 + 163 + blowfish_dec_blk: 164 + /* input: 165 + * %rdi: ctx, CTX 166 + * %rsi: dst 167 + * %rdx: src 168 + */ 169 + movq %rbp, %r11; 170 + 171 + movq %rsi, %r10; 172 + movq %rdx, RIO; 173 + 174 + read_block(); 175 + 176 + round_dec(17); 177 + round_dec(15); 178 + round_dec(13); 179 + round_dec(11); 180 + round_dec(9); 181 + round_dec(7); 182 + round_dec(5); 183 + round_dec(3); 184 + add_roundkey_dec(1); 185 + 186 + movq %r10, RIO; 187 + write_block(); 188 + 189 + movq %r11, %rbp; 190 + 191 + ret; 192 + 193 + /********************************************************************** 194 + 4-way blowfish, four blocks parallel 195 + **********************************************************************/ 196 + 197 + /* F() for 4-way. Slower when used alone/1-way, but faster when used 198 + * parallel/4-way (tested on AMD Phenom II & Intel Xeon E7330). 199 + */ 200 + #define F4(x) \ 201 + movzbl x ## bh, RT1d; \ 202 + movzbl x ## bl, RT3d; \ 203 + rorq $16, x; \ 204 + movzbl x ## bh, RT0d; \ 205 + movzbl x ## bl, RT2d; \ 206 + rorq $16, x; \ 207 + movl s0(CTX,RT0,4), RT0d; \ 208 + addl s1(CTX,RT2,4), RT0d; \ 209 + xorl s2(CTX,RT1,4), RT0d; \ 210 + addl s3(CTX,RT3,4), RT0d; \ 211 + xorq RT0, x; 212 + 213 + #define add_preloaded_roundkey4() \ 214 + xorq RKEY, RX0; \ 215 + xorq RKEY, RX1; \ 216 + xorq RKEY, RX2; \ 217 + xorq RKEY, RX3; 218 + 219 + #define preload_roundkey_enc(n) \ 220 + movq p+4*(n)(CTX), RKEY; 221 + 222 + #define add_roundkey_enc4(n) \ 223 + add_preloaded_roundkey4(); \ 224 + preload_roundkey_enc(n + 2); 225 + 226 + #define round_enc4(n) \ 227 + add_roundkey_enc4(n); \ 228 + \ 229 + F4(RX0); \ 230 + F4(RX1); \ 231 + F4(RX2); \ 232 + F4(RX3); \ 233 + \ 234 + F4(RX0); \ 235 + F4(RX1); \ 236 + F4(RX2); \ 237 + F4(RX3); 238 + 239 + #define preload_roundkey_dec(n) \ 240 + movq p+4*((n)-1)(CTX), RKEY; \ 241 + rorq $32, RKEY; 242 + 243 + #define add_roundkey_dec4(n) \ 244 + add_preloaded_roundkey4(); \ 245 + preload_roundkey_dec(n - 2); 246 + 247 + #define round_dec4(n) \ 248 + add_roundkey_dec4(n); \ 249 + \ 250 + F4(RX0); \ 251 + F4(RX1); \ 252 + F4(RX2); \ 253 + F4(RX3); \ 254 + \ 255 + F4(RX0); \ 256 + F4(RX1); \ 257 + F4(RX2); \ 258 + F4(RX3); 259 + 260 + #define read_block4() \ 261 + movq (RIO), RX0; \ 262 + rorq $32, RX0; \ 263 + bswapq RX0; \ 264 + \ 265 + movq 8(RIO), RX1; \ 266 + rorq $32, RX1; \ 267 + bswapq RX1; \ 268 + \ 269 + movq 16(RIO), RX2; \ 270 + rorq $32, RX2; \ 271 + bswapq RX2; \ 272 + \ 273 + movq 24(RIO), RX3; \ 274 + rorq $32, RX3; \ 275 + bswapq RX3; 276 + 277 + #define write_block4() \ 278 + bswapq RX0; \ 279 + movq RX0, (RIO); \ 280 + \ 281 + bswapq RX1; \ 282 + movq RX1, 8(RIO); \ 283 + \ 284 + bswapq RX2; \ 285 + movq RX2, 16(RIO); \ 286 + \ 287 + bswapq RX3; \ 288 + movq RX3, 24(RIO); 289 + 290 + #define xor_block4() \ 291 + bswapq RX0; \ 292 + xorq RX0, (RIO); \ 293 + \ 294 + bswapq RX1; \ 295 + xorq RX1, 8(RIO); \ 296 + \ 297 + bswapq RX2; \ 298 + xorq RX2, 16(RIO); \ 299 + \ 300 + bswapq RX3; \ 301 + xorq RX3, 24(RIO); 302 + 303 + .align 8 304 + .global __blowfish_enc_blk_4way 305 + .type __blowfish_enc_blk_4way,@function; 306 + 307 + __blowfish_enc_blk_4way: 308 + /* input: 309 + * %rdi: ctx, CTX 310 + * %rsi: dst 311 + * %rdx: src 312 + * %rcx: bool, if true: xor output 313 + */ 314 + pushq %rbp; 315 + pushq %rbx; 316 + pushq %rcx; 317 + 318 + preload_roundkey_enc(0); 319 + 320 + movq %rsi, %r11; 321 + movq %rdx, RIO; 322 + 323 + read_block4(); 324 + 325 + round_enc4(0); 326 + round_enc4(2); 327 + round_enc4(4); 328 + round_enc4(6); 329 + round_enc4(8); 330 + round_enc4(10); 331 + round_enc4(12); 332 + round_enc4(14); 333 + add_preloaded_roundkey4(); 334 + 335 + popq %rbp; 336 + movq %r11, RIO; 337 + 338 + test %bpl, %bpl; 339 + jnz __enc_xor4; 340 + 341 + write_block4(); 342 + 343 + popq %rbx; 344 + popq %rbp; 345 + ret; 346 + 347 + __enc_xor4: 348 + xor_block4(); 349 + 350 + popq %rbx; 351 + popq %rbp; 352 + ret; 353 + 354 + .align 8 355 + .global blowfish_dec_blk_4way 356 + .type blowfish_dec_blk_4way,@function; 357 + 358 + blowfish_dec_blk_4way: 359 + /* input: 360 + * %rdi: ctx, CTX 361 + * %rsi: dst 362 + * %rdx: src 363 + */ 364 + pushq %rbp; 365 + pushq %rbx; 366 + preload_roundkey_dec(17); 367 + 368 + movq %rsi, %r11; 369 + movq %rdx, RIO; 370 + 371 + read_block4(); 372 + 373 + round_dec4(17); 374 + round_dec4(15); 375 + round_dec4(13); 376 + round_dec4(11); 377 + round_dec4(9); 378 + round_dec4(7); 379 + round_dec4(5); 380 + round_dec4(3); 381 + add_preloaded_roundkey4(); 382 + 383 + movq %r11, RIO; 384 + write_block4(); 385 + 386 + popq %rbx; 387 + popq %rbp; 388 + 389 + ret; 390 +
+492
arch/x86/crypto/blowfish_glue.c
··· 1 + /* 2 + * Glue Code for assembler optimized version of Blowfish 3 + * 4 + * Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> 5 + * 6 + * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by: 7 + * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 8 + * CTR part based on code (crypto/ctr.c) by: 9 + * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com> 10 + * 11 + * This program is free software; you can redistribute it and/or modify 12 + * it under the terms of the GNU General Public License as published by 13 + * the Free Software Foundation; either version 2 of the License, or 14 + * (at your option) any later version. 15 + * 16 + * This program is distributed in the hope that it will be useful, 17 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 + * GNU General Public License for more details. 20 + * 21 + * You should have received a copy of the GNU General Public License 22 + * along with this program; if not, write to the Free Software 23 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 24 + * USA 25 + * 26 + */ 27 + 28 + #include <crypto/blowfish.h> 29 + #include <linux/crypto.h> 30 + #include <linux/init.h> 31 + #include <linux/module.h> 32 + #include <linux/types.h> 33 + #include <crypto/algapi.h> 34 + 35 + /* regular block cipher functions */ 36 + asmlinkage void __blowfish_enc_blk(struct bf_ctx *ctx, u8 *dst, const u8 *src, 37 + bool xor); 38 + asmlinkage void blowfish_dec_blk(struct bf_ctx *ctx, u8 *dst, const u8 *src); 39 + 40 + /* 4-way parallel cipher functions */ 41 + asmlinkage void __blowfish_enc_blk_4way(struct bf_ctx *ctx, u8 *dst, 42 + const u8 *src, bool xor); 43 + asmlinkage void blowfish_dec_blk_4way(struct bf_ctx *ctx, u8 *dst, 44 + const u8 *src); 45 + 46 + static inline void blowfish_enc_blk(struct bf_ctx *ctx, u8 *dst, const u8 *src) 47 + { 48 + __blowfish_enc_blk(ctx, dst, src, false); 49 + } 50 + 51 + static inline void blowfish_enc_blk_xor(struct bf_ctx *ctx, u8 *dst, 52 + const u8 *src) 53 + { 54 + __blowfish_enc_blk(ctx, dst, src, true); 55 + } 56 + 57 + static inline void blowfish_enc_blk_4way(struct bf_ctx *ctx, u8 *dst, 58 + const u8 *src) 59 + { 60 + __blowfish_enc_blk_4way(ctx, dst, src, false); 61 + } 62 + 63 + static inline void blowfish_enc_blk_xor_4way(struct bf_ctx *ctx, u8 *dst, 64 + const u8 *src) 65 + { 66 + __blowfish_enc_blk_4way(ctx, dst, src, true); 67 + } 68 + 69 + static void blowfish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 70 + { 71 + blowfish_enc_blk(crypto_tfm_ctx(tfm), dst, src); 72 + } 73 + 74 + static void blowfish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 75 + { 76 + blowfish_dec_blk(crypto_tfm_ctx(tfm), dst, src); 77 + } 78 + 79 + static struct crypto_alg bf_alg = { 80 + .cra_name = "blowfish", 81 + .cra_driver_name = "blowfish-asm", 82 + .cra_priority = 200, 83 + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 84 + .cra_blocksize = BF_BLOCK_SIZE, 85 + .cra_ctxsize = sizeof(struct bf_ctx), 86 + .cra_alignmask = 3, 87 + .cra_module = THIS_MODULE, 88 + .cra_list = LIST_HEAD_INIT(bf_alg.cra_list), 89 + .cra_u = { 90 + .cipher = { 91 + .cia_min_keysize = BF_MIN_KEY_SIZE, 92 + .cia_max_keysize = BF_MAX_KEY_SIZE, 93 + .cia_setkey = blowfish_setkey, 94 + .cia_encrypt = blowfish_encrypt, 95 + .cia_decrypt = blowfish_decrypt, 96 + } 97 + } 98 + }; 99 + 100 + static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, 101 + void (*fn)(struct bf_ctx *, u8 *, const u8 *), 102 + void (*fn_4way)(struct bf_ctx *, u8 *, const u8 *)) 103 + { 104 + struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 105 + unsigned int bsize = BF_BLOCK_SIZE; 106 + unsigned int nbytes; 107 + int err; 108 + 109 + err = blkcipher_walk_virt(desc, walk); 110 + 111 + while ((nbytes = walk->nbytes)) { 112 + u8 *wsrc = walk->src.virt.addr; 113 + u8 *wdst = walk->dst.virt.addr; 114 + 115 + /* Process four block batch */ 116 + if (nbytes >= bsize * 4) { 117 + do { 118 + fn_4way(ctx, wdst, wsrc); 119 + 120 + wsrc += bsize * 4; 121 + wdst += bsize * 4; 122 + nbytes -= bsize * 4; 123 + } while (nbytes >= bsize * 4); 124 + 125 + if (nbytes < bsize) 126 + goto done; 127 + } 128 + 129 + /* Handle leftovers */ 130 + do { 131 + fn(ctx, wdst, wsrc); 132 + 133 + wsrc += bsize; 134 + wdst += bsize; 135 + nbytes -= bsize; 136 + } while (nbytes >= bsize); 137 + 138 + done: 139 + err = blkcipher_walk_done(desc, walk, nbytes); 140 + } 141 + 142 + return err; 143 + } 144 + 145 + static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 146 + struct scatterlist *src, unsigned int nbytes) 147 + { 148 + struct blkcipher_walk walk; 149 + 150 + blkcipher_walk_init(&walk, dst, src, nbytes); 151 + return ecb_crypt(desc, &walk, blowfish_enc_blk, blowfish_enc_blk_4way); 152 + } 153 + 154 + static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 155 + struct scatterlist *src, unsigned int nbytes) 156 + { 157 + struct blkcipher_walk walk; 158 + 159 + blkcipher_walk_init(&walk, dst, src, nbytes); 160 + return ecb_crypt(desc, &walk, blowfish_dec_blk, blowfish_dec_blk_4way); 161 + } 162 + 163 + static struct crypto_alg blk_ecb_alg = { 164 + .cra_name = "ecb(blowfish)", 165 + .cra_driver_name = "ecb-blowfish-asm", 166 + .cra_priority = 300, 167 + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 168 + .cra_blocksize = BF_BLOCK_SIZE, 169 + .cra_ctxsize = sizeof(struct bf_ctx), 170 + .cra_alignmask = 0, 171 + .cra_type = &crypto_blkcipher_type, 172 + .cra_module = THIS_MODULE, 173 + .cra_list = LIST_HEAD_INIT(blk_ecb_alg.cra_list), 174 + .cra_u = { 175 + .blkcipher = { 176 + .min_keysize = BF_MIN_KEY_SIZE, 177 + .max_keysize = BF_MAX_KEY_SIZE, 178 + .setkey = blowfish_setkey, 179 + .encrypt = ecb_encrypt, 180 + .decrypt = ecb_decrypt, 181 + }, 182 + }, 183 + }; 184 + 185 + static unsigned int __cbc_encrypt(struct blkcipher_desc *desc, 186 + struct blkcipher_walk *walk) 187 + { 188 + struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 189 + unsigned int bsize = BF_BLOCK_SIZE; 190 + unsigned int nbytes = walk->nbytes; 191 + u64 *src = (u64 *)walk->src.virt.addr; 192 + u64 *dst = (u64 *)walk->dst.virt.addr; 193 + u64 *iv = (u64 *)walk->iv; 194 + 195 + do { 196 + *dst = *src ^ *iv; 197 + blowfish_enc_blk(ctx, (u8 *)dst, (u8 *)dst); 198 + iv = dst; 199 + 200 + src += 1; 201 + dst += 1; 202 + nbytes -= bsize; 203 + } while (nbytes >= bsize); 204 + 205 + *(u64 *)walk->iv = *iv; 206 + return nbytes; 207 + } 208 + 209 + static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 210 + struct scatterlist *src, unsigned int nbytes) 211 + { 212 + struct blkcipher_walk walk; 213 + int err; 214 + 215 + blkcipher_walk_init(&walk, dst, src, nbytes); 216 + err = blkcipher_walk_virt(desc, &walk); 217 + 218 + while ((nbytes = walk.nbytes)) { 219 + nbytes = __cbc_encrypt(desc, &walk); 220 + err = blkcipher_walk_done(desc, &walk, nbytes); 221 + } 222 + 223 + return err; 224 + } 225 + 226 + static unsigned int __cbc_decrypt(struct blkcipher_desc *desc, 227 + struct blkcipher_walk *walk) 228 + { 229 + struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 230 + unsigned int bsize = BF_BLOCK_SIZE; 231 + unsigned int nbytes = walk->nbytes; 232 + u64 *src = (u64 *)walk->src.virt.addr; 233 + u64 *dst = (u64 *)walk->dst.virt.addr; 234 + u64 ivs[4 - 1]; 235 + u64 last_iv; 236 + 237 + /* Start of the last block. */ 238 + src += nbytes / bsize - 1; 239 + dst += nbytes / bsize - 1; 240 + 241 + last_iv = *src; 242 + 243 + /* Process four block batch */ 244 + if (nbytes >= bsize * 4) { 245 + do { 246 + nbytes -= bsize * 4 - bsize; 247 + src -= 4 - 1; 248 + dst -= 4 - 1; 249 + 250 + ivs[0] = src[0]; 251 + ivs[1] = src[1]; 252 + ivs[2] = src[2]; 253 + 254 + blowfish_dec_blk_4way(ctx, (u8 *)dst, (u8 *)src); 255 + 256 + dst[1] ^= ivs[0]; 257 + dst[2] ^= ivs[1]; 258 + dst[3] ^= ivs[2]; 259 + 260 + nbytes -= bsize; 261 + if (nbytes < bsize) 262 + goto done; 263 + 264 + *dst ^= *(src - 1); 265 + src -= 1; 266 + dst -= 1; 267 + } while (nbytes >= bsize * 4); 268 + 269 + if (nbytes < bsize) 270 + goto done; 271 + } 272 + 273 + /* Handle leftovers */ 274 + for (;;) { 275 + blowfish_dec_blk(ctx, (u8 *)dst, (u8 *)src); 276 + 277 + nbytes -= bsize; 278 + if (nbytes < bsize) 279 + break; 280 + 281 + *dst ^= *(src - 1); 282 + src -= 1; 283 + dst -= 1; 284 + } 285 + 286 + done: 287 + *dst ^= *(u64 *)walk->iv; 288 + *(u64 *)walk->iv = last_iv; 289 + 290 + return nbytes; 291 + } 292 + 293 + static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 294 + struct scatterlist *src, unsigned int nbytes) 295 + { 296 + struct blkcipher_walk walk; 297 + int err; 298 + 299 + blkcipher_walk_init(&walk, dst, src, nbytes); 300 + err = blkcipher_walk_virt(desc, &walk); 301 + 302 + while ((nbytes = walk.nbytes)) { 303 + nbytes = __cbc_decrypt(desc, &walk); 304 + err = blkcipher_walk_done(desc, &walk, nbytes); 305 + } 306 + 307 + return err; 308 + } 309 + 310 + static struct crypto_alg blk_cbc_alg = { 311 + .cra_name = "cbc(blowfish)", 312 + .cra_driver_name = "cbc-blowfish-asm", 313 + .cra_priority = 300, 314 + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 315 + .cra_blocksize = BF_BLOCK_SIZE, 316 + .cra_ctxsize = sizeof(struct bf_ctx), 317 + .cra_alignmask = 0, 318 + .cra_type = &crypto_blkcipher_type, 319 + .cra_module = THIS_MODULE, 320 + .cra_list = LIST_HEAD_INIT(blk_cbc_alg.cra_list), 321 + .cra_u = { 322 + .blkcipher = { 323 + .min_keysize = BF_MIN_KEY_SIZE, 324 + .max_keysize = BF_MAX_KEY_SIZE, 325 + .ivsize = BF_BLOCK_SIZE, 326 + .setkey = blowfish_setkey, 327 + .encrypt = cbc_encrypt, 328 + .decrypt = cbc_decrypt, 329 + }, 330 + }, 331 + }; 332 + 333 + static void ctr_crypt_final(struct bf_ctx *ctx, struct blkcipher_walk *walk) 334 + { 335 + u8 *ctrblk = walk->iv; 336 + u8 keystream[BF_BLOCK_SIZE]; 337 + u8 *src = walk->src.virt.addr; 338 + u8 *dst = walk->dst.virt.addr; 339 + unsigned int nbytes = walk->nbytes; 340 + 341 + blowfish_enc_blk(ctx, keystream, ctrblk); 342 + crypto_xor(keystream, src, nbytes); 343 + memcpy(dst, keystream, nbytes); 344 + 345 + crypto_inc(ctrblk, BF_BLOCK_SIZE); 346 + } 347 + 348 + static unsigned int __ctr_crypt(struct blkcipher_desc *desc, 349 + struct blkcipher_walk *walk) 350 + { 351 + struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 352 + unsigned int bsize = BF_BLOCK_SIZE; 353 + unsigned int nbytes = walk->nbytes; 354 + u64 *src = (u64 *)walk->src.virt.addr; 355 + u64 *dst = (u64 *)walk->dst.virt.addr; 356 + u64 ctrblk = be64_to_cpu(*(__be64 *)walk->iv); 357 + __be64 ctrblocks[4]; 358 + 359 + /* Process four block batch */ 360 + if (nbytes >= bsize * 4) { 361 + do { 362 + if (dst != src) { 363 + dst[0] = src[0]; 364 + dst[1] = src[1]; 365 + dst[2] = src[2]; 366 + dst[3] = src[3]; 367 + } 368 + 369 + /* create ctrblks for parallel encrypt */ 370 + ctrblocks[0] = cpu_to_be64(ctrblk++); 371 + ctrblocks[1] = cpu_to_be64(ctrblk++); 372 + ctrblocks[2] = cpu_to_be64(ctrblk++); 373 + ctrblocks[3] = cpu_to_be64(ctrblk++); 374 + 375 + blowfish_enc_blk_xor_4way(ctx, (u8 *)dst, 376 + (u8 *)ctrblocks); 377 + 378 + src += 4; 379 + dst += 4; 380 + } while ((nbytes -= bsize * 4) >= bsize * 4); 381 + 382 + if (nbytes < bsize) 383 + goto done; 384 + } 385 + 386 + /* Handle leftovers */ 387 + do { 388 + if (dst != src) 389 + *dst = *src; 390 + 391 + ctrblocks[0] = cpu_to_be64(ctrblk++); 392 + 393 + blowfish_enc_blk_xor(ctx, (u8 *)dst, (u8 *)ctrblocks); 394 + 395 + src += 1; 396 + dst += 1; 397 + } while ((nbytes -= bsize) >= bsize); 398 + 399 + done: 400 + *(__be64 *)walk->iv = cpu_to_be64(ctrblk); 401 + return nbytes; 402 + } 403 + 404 + static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, 405 + struct scatterlist *src, unsigned int nbytes) 406 + { 407 + struct blkcipher_walk walk; 408 + int err; 409 + 410 + blkcipher_walk_init(&walk, dst, src, nbytes); 411 + err = blkcipher_walk_virt_block(desc, &walk, BF_BLOCK_SIZE); 412 + 413 + while ((nbytes = walk.nbytes) >= BF_BLOCK_SIZE) { 414 + nbytes = __ctr_crypt(desc, &walk); 415 + err = blkcipher_walk_done(desc, &walk, nbytes); 416 + } 417 + 418 + if (walk.nbytes) { 419 + ctr_crypt_final(crypto_blkcipher_ctx(desc->tfm), &walk); 420 + err = blkcipher_walk_done(desc, &walk, 0); 421 + } 422 + 423 + return err; 424 + } 425 + 426 + static struct crypto_alg blk_ctr_alg = { 427 + .cra_name = "ctr(blowfish)", 428 + .cra_driver_name = "ctr-blowfish-asm", 429 + .cra_priority = 300, 430 + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 431 + .cra_blocksize = 1, 432 + .cra_ctxsize = sizeof(struct bf_ctx), 433 + .cra_alignmask = 0, 434 + .cra_type = &crypto_blkcipher_type, 435 + .cra_module = THIS_MODULE, 436 + .cra_list = LIST_HEAD_INIT(blk_ctr_alg.cra_list), 437 + .cra_u = { 438 + .blkcipher = { 439 + .min_keysize = BF_MIN_KEY_SIZE, 440 + .max_keysize = BF_MAX_KEY_SIZE, 441 + .ivsize = BF_BLOCK_SIZE, 442 + .setkey = blowfish_setkey, 443 + .encrypt = ctr_crypt, 444 + .decrypt = ctr_crypt, 445 + }, 446 + }, 447 + }; 448 + 449 + static int __init init(void) 450 + { 451 + int err; 452 + 453 + err = crypto_register_alg(&bf_alg); 454 + if (err) 455 + goto bf_err; 456 + err = crypto_register_alg(&blk_ecb_alg); 457 + if (err) 458 + goto ecb_err; 459 + err = crypto_register_alg(&blk_cbc_alg); 460 + if (err) 461 + goto cbc_err; 462 + err = crypto_register_alg(&blk_ctr_alg); 463 + if (err) 464 + goto ctr_err; 465 + 466 + return 0; 467 + 468 + ctr_err: 469 + crypto_unregister_alg(&blk_cbc_alg); 470 + cbc_err: 471 + crypto_unregister_alg(&blk_ecb_alg); 472 + ecb_err: 473 + crypto_unregister_alg(&bf_alg); 474 + bf_err: 475 + return err; 476 + } 477 + 478 + static void __exit fini(void) 479 + { 480 + crypto_unregister_alg(&blk_ctr_alg); 481 + crypto_unregister_alg(&blk_cbc_alg); 482 + crypto_unregister_alg(&blk_ecb_alg); 483 + crypto_unregister_alg(&bf_alg); 484 + } 485 + 486 + module_init(init); 487 + module_exit(fini); 488 + 489 + MODULE_LICENSE("GPL"); 490 + MODULE_DESCRIPTION("Blowfish Cipher Algorithm, asm optimized"); 491 + MODULE_ALIAS("blowfish"); 492 + MODULE_ALIAS("blowfish-asm");
+558
arch/x86/crypto/sha1_ssse3_asm.S
··· 1 + /* 2 + * This is a SIMD SHA-1 implementation. It requires the Intel(R) Supplemental 3 + * SSE3 instruction set extensions introduced in Intel Core Microarchitecture 4 + * processors. CPUs supporting Intel(R) AVX extensions will get an additional 5 + * boost. 6 + * 7 + * This work was inspired by the vectorized implementation of Dean Gaudet. 8 + * Additional information on it can be found at: 9 + * http://www.arctic.org/~dean/crypto/sha1.html 10 + * 11 + * It was improved upon with more efficient vectorization of the message 12 + * scheduling. This implementation has also been optimized for all current and 13 + * several future generations of Intel CPUs. 14 + * 15 + * See this article for more information about the implementation details: 16 + * http://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1/ 17 + * 18 + * Copyright (C) 2010, Intel Corp. 19 + * Authors: Maxim Locktyukhin <maxim.locktyukhin@intel.com> 20 + * Ronen Zohar <ronen.zohar@intel.com> 21 + * 22 + * Converted to AT&T syntax and adapted for inclusion in the Linux kernel: 23 + * Author: Mathias Krause <minipli@googlemail.com> 24 + * 25 + * This program is free software; you can redistribute it and/or modify 26 + * it under the terms of the GNU General Public License as published by 27 + * the Free Software Foundation; either version 2 of the License, or 28 + * (at your option) any later version. 29 + */ 30 + 31 + #define CTX %rdi // arg1 32 + #define BUF %rsi // arg2 33 + #define CNT %rdx // arg3 34 + 35 + #define REG_A %ecx 36 + #define REG_B %esi 37 + #define REG_C %edi 38 + #define REG_D %ebp 39 + #define REG_E %edx 40 + 41 + #define REG_T1 %eax 42 + #define REG_T2 %ebx 43 + 44 + #define K_BASE %r8 45 + #define HASH_PTR %r9 46 + #define BUFFER_PTR %r10 47 + #define BUFFER_END %r11 48 + 49 + #define W_TMP1 %xmm0 50 + #define W_TMP2 %xmm9 51 + 52 + #define W0 %xmm1 53 + #define W4 %xmm2 54 + #define W8 %xmm3 55 + #define W12 %xmm4 56 + #define W16 %xmm5 57 + #define W20 %xmm6 58 + #define W24 %xmm7 59 + #define W28 %xmm8 60 + 61 + #define XMM_SHUFB_BSWAP %xmm10 62 + 63 + /* we keep window of 64 w[i]+K pre-calculated values in a circular buffer */ 64 + #define WK(t) (((t) & 15) * 4)(%rsp) 65 + #define W_PRECALC_AHEAD 16 66 + 67 + /* 68 + * This macro implements the SHA-1 function's body for single 64-byte block 69 + * param: function's name 70 + */ 71 + .macro SHA1_VECTOR_ASM name 72 + .global \name 73 + .type \name, @function 74 + .align 32 75 + \name: 76 + push %rbx 77 + push %rbp 78 + push %r12 79 + 80 + mov %rsp, %r12 81 + sub $64, %rsp # allocate workspace 82 + and $~15, %rsp # align stack 83 + 84 + mov CTX, HASH_PTR 85 + mov BUF, BUFFER_PTR 86 + 87 + shl $6, CNT # multiply by 64 88 + add BUF, CNT 89 + mov CNT, BUFFER_END 90 + 91 + lea K_XMM_AR(%rip), K_BASE 92 + xmm_mov BSWAP_SHUFB_CTL(%rip), XMM_SHUFB_BSWAP 93 + 94 + SHA1_PIPELINED_MAIN_BODY 95 + 96 + # cleanup workspace 97 + mov $8, %ecx 98 + mov %rsp, %rdi 99 + xor %rax, %rax 100 + rep stosq 101 + 102 + mov %r12, %rsp # deallocate workspace 103 + 104 + pop %r12 105 + pop %rbp 106 + pop %rbx 107 + ret 108 + 109 + .size \name, .-\name 110 + .endm 111 + 112 + /* 113 + * This macro implements 80 rounds of SHA-1 for one 64-byte block 114 + */ 115 + .macro SHA1_PIPELINED_MAIN_BODY 116 + INIT_REGALLOC 117 + 118 + mov (HASH_PTR), A 119 + mov 4(HASH_PTR), B 120 + mov 8(HASH_PTR), C 121 + mov 12(HASH_PTR), D 122 + mov 16(HASH_PTR), E 123 + 124 + .set i, 0 125 + .rept W_PRECALC_AHEAD 126 + W_PRECALC i 127 + .set i, (i+1) 128 + .endr 129 + 130 + .align 4 131 + 1: 132 + RR F1,A,B,C,D,E,0 133 + RR F1,D,E,A,B,C,2 134 + RR F1,B,C,D,E,A,4 135 + RR F1,E,A,B,C,D,6 136 + RR F1,C,D,E,A,B,8 137 + 138 + RR F1,A,B,C,D,E,10 139 + RR F1,D,E,A,B,C,12 140 + RR F1,B,C,D,E,A,14 141 + RR F1,E,A,B,C,D,16 142 + RR F1,C,D,E,A,B,18 143 + 144 + RR F2,A,B,C,D,E,20 145 + RR F2,D,E,A,B,C,22 146 + RR F2,B,C,D,E,A,24 147 + RR F2,E,A,B,C,D,26 148 + RR F2,C,D,E,A,B,28 149 + 150 + RR F2,A,B,C,D,E,30 151 + RR F2,D,E,A,B,C,32 152 + RR F2,B,C,D,E,A,34 153 + RR F2,E,A,B,C,D,36 154 + RR F2,C,D,E,A,B,38 155 + 156 + RR F3,A,B,C,D,E,40 157 + RR F3,D,E,A,B,C,42 158 + RR F3,B,C,D,E,A,44 159 + RR F3,E,A,B,C,D,46 160 + RR F3,C,D,E,A,B,48 161 + 162 + RR F3,A,B,C,D,E,50 163 + RR F3,D,E,A,B,C,52 164 + RR F3,B,C,D,E,A,54 165 + RR F3,E,A,B,C,D,56 166 + RR F3,C,D,E,A,B,58 167 + 168 + add $64, BUFFER_PTR # move to the next 64-byte block 169 + cmp BUFFER_END, BUFFER_PTR # if the current is the last one use 170 + cmovae K_BASE, BUFFER_PTR # dummy source to avoid buffer overrun 171 + 172 + RR F4,A,B,C,D,E,60 173 + RR F4,D,E,A,B,C,62 174 + RR F4,B,C,D,E,A,64 175 + RR F4,E,A,B,C,D,66 176 + RR F4,C,D,E,A,B,68 177 + 178 + RR F4,A,B,C,D,E,70 179 + RR F4,D,E,A,B,C,72 180 + RR F4,B,C,D,E,A,74 181 + RR F4,E,A,B,C,D,76 182 + RR F4,C,D,E,A,B,78 183 + 184 + UPDATE_HASH (HASH_PTR), A 185 + UPDATE_HASH 4(HASH_PTR), B 186 + UPDATE_HASH 8(HASH_PTR), C 187 + UPDATE_HASH 12(HASH_PTR), D 188 + UPDATE_HASH 16(HASH_PTR), E 189 + 190 + RESTORE_RENAMED_REGS 191 + cmp K_BASE, BUFFER_PTR # K_BASE means, we reached the end 192 + jne 1b 193 + .endm 194 + 195 + .macro INIT_REGALLOC 196 + .set A, REG_A 197 + .set B, REG_B 198 + .set C, REG_C 199 + .set D, REG_D 200 + .set E, REG_E 201 + .set T1, REG_T1 202 + .set T2, REG_T2 203 + .endm 204 + 205 + .macro RESTORE_RENAMED_REGS 206 + # order is important (REG_C is where it should be) 207 + mov B, REG_B 208 + mov D, REG_D 209 + mov A, REG_A 210 + mov E, REG_E 211 + .endm 212 + 213 + .macro SWAP_REG_NAMES a, b 214 + .set _T, \a 215 + .set \a, \b 216 + .set \b, _T 217 + .endm 218 + 219 + .macro F1 b, c, d 220 + mov \c, T1 221 + SWAP_REG_NAMES \c, T1 222 + xor \d, T1 223 + and \b, T1 224 + xor \d, T1 225 + .endm 226 + 227 + .macro F2 b, c, d 228 + mov \d, T1 229 + SWAP_REG_NAMES \d, T1 230 + xor \c, T1 231 + xor \b, T1 232 + .endm 233 + 234 + .macro F3 b, c ,d 235 + mov \c, T1 236 + SWAP_REG_NAMES \c, T1 237 + mov \b, T2 238 + or \b, T1 239 + and \c, T2 240 + and \d, T1 241 + or T2, T1 242 + .endm 243 + 244 + .macro F4 b, c, d 245 + F2 \b, \c, \d 246 + .endm 247 + 248 + .macro UPDATE_HASH hash, val 249 + add \hash, \val 250 + mov \val, \hash 251 + .endm 252 + 253 + /* 254 + * RR does two rounds of SHA-1 back to back with W[] pre-calc 255 + * t1 = F(b, c, d); e += w(i) 256 + * e += t1; b <<= 30; d += w(i+1); 257 + * t1 = F(a, b, c); 258 + * d += t1; a <<= 5; 259 + * e += a; 260 + * t1 = e; a >>= 7; 261 + * t1 <<= 5; 262 + * d += t1; 263 + */ 264 + .macro RR F, a, b, c, d, e, round 265 + add WK(\round), \e 266 + \F \b, \c, \d # t1 = F(b, c, d); 267 + W_PRECALC (\round + W_PRECALC_AHEAD) 268 + rol $30, \b 269 + add T1, \e 270 + add WK(\round + 1), \d 271 + 272 + \F \a, \b, \c 273 + W_PRECALC (\round + W_PRECALC_AHEAD + 1) 274 + rol $5, \a 275 + add \a, \e 276 + add T1, \d 277 + ror $7, \a # (a <<r 5) >>r 7) => a <<r 30) 278 + 279 + mov \e, T1 280 + SWAP_REG_NAMES \e, T1 281 + 282 + rol $5, T1 283 + add T1, \d 284 + 285 + # write: \a, \b 286 + # rotate: \a<=\d, \b<=\e, \c<=\a, \d<=\b, \e<=\c 287 + .endm 288 + 289 + .macro W_PRECALC r 290 + .set i, \r 291 + 292 + .if (i < 20) 293 + .set K_XMM, 0 294 + .elseif (i < 40) 295 + .set K_XMM, 16 296 + .elseif (i < 60) 297 + .set K_XMM, 32 298 + .elseif (i < 80) 299 + .set K_XMM, 48 300 + .endif 301 + 302 + .if ((i < 16) || ((i >= 80) && (i < (80 + W_PRECALC_AHEAD)))) 303 + .set i, ((\r) % 80) # pre-compute for the next iteration 304 + .if (i == 0) 305 + W_PRECALC_RESET 306 + .endif 307 + W_PRECALC_00_15 308 + .elseif (i<32) 309 + W_PRECALC_16_31 310 + .elseif (i < 80) // rounds 32-79 311 + W_PRECALC_32_79 312 + .endif 313 + .endm 314 + 315 + .macro W_PRECALC_RESET 316 + .set W, W0 317 + .set W_minus_04, W4 318 + .set W_minus_08, W8 319 + .set W_minus_12, W12 320 + .set W_minus_16, W16 321 + .set W_minus_20, W20 322 + .set W_minus_24, W24 323 + .set W_minus_28, W28 324 + .set W_minus_32, W 325 + .endm 326 + 327 + .macro W_PRECALC_ROTATE 328 + .set W_minus_32, W_minus_28 329 + .set W_minus_28, W_minus_24 330 + .set W_minus_24, W_minus_20 331 + .set W_minus_20, W_minus_16 332 + .set W_minus_16, W_minus_12 333 + .set W_minus_12, W_minus_08 334 + .set W_minus_08, W_minus_04 335 + .set W_minus_04, W 336 + .set W, W_minus_32 337 + .endm 338 + 339 + .macro W_PRECALC_SSSE3 340 + 341 + .macro W_PRECALC_00_15 342 + W_PRECALC_00_15_SSSE3 343 + .endm 344 + .macro W_PRECALC_16_31 345 + W_PRECALC_16_31_SSSE3 346 + .endm 347 + .macro W_PRECALC_32_79 348 + W_PRECALC_32_79_SSSE3 349 + .endm 350 + 351 + /* message scheduling pre-compute for rounds 0-15 */ 352 + .macro W_PRECALC_00_15_SSSE3 353 + .if ((i & 3) == 0) 354 + movdqu (i*4)(BUFFER_PTR), W_TMP1 355 + .elseif ((i & 3) == 1) 356 + pshufb XMM_SHUFB_BSWAP, W_TMP1 357 + movdqa W_TMP1, W 358 + .elseif ((i & 3) == 2) 359 + paddd (K_BASE), W_TMP1 360 + .elseif ((i & 3) == 3) 361 + movdqa W_TMP1, WK(i&~3) 362 + W_PRECALC_ROTATE 363 + .endif 364 + .endm 365 + 366 + /* message scheduling pre-compute for rounds 16-31 367 + * 368 + * - calculating last 32 w[i] values in 8 XMM registers 369 + * - pre-calculate K+w[i] values and store to mem, for later load by ALU add 370 + * instruction 371 + * 372 + * some "heavy-lifting" vectorization for rounds 16-31 due to w[i]->w[i-3] 373 + * dependency, but improves for 32-79 374 + */ 375 + .macro W_PRECALC_16_31_SSSE3 376 + # blended scheduling of vector and scalar instruction streams, one 4-wide 377 + # vector iteration / 4 scalar rounds 378 + .if ((i & 3) == 0) 379 + movdqa W_minus_12, W 380 + palignr $8, W_minus_16, W # w[i-14] 381 + movdqa W_minus_04, W_TMP1 382 + psrldq $4, W_TMP1 # w[i-3] 383 + pxor W_minus_08, W 384 + .elseif ((i & 3) == 1) 385 + pxor W_minus_16, W_TMP1 386 + pxor W_TMP1, W 387 + movdqa W, W_TMP2 388 + movdqa W, W_TMP1 389 + pslldq $12, W_TMP2 390 + .elseif ((i & 3) == 2) 391 + psrld $31, W 392 + pslld $1, W_TMP1 393 + por W, W_TMP1 394 + movdqa W_TMP2, W 395 + psrld $30, W_TMP2 396 + pslld $2, W 397 + .elseif ((i & 3) == 3) 398 + pxor W, W_TMP1 399 + pxor W_TMP2, W_TMP1 400 + movdqa W_TMP1, W 401 + paddd K_XMM(K_BASE), W_TMP1 402 + movdqa W_TMP1, WK(i&~3) 403 + W_PRECALC_ROTATE 404 + .endif 405 + .endm 406 + 407 + /* message scheduling pre-compute for rounds 32-79 408 + * 409 + * in SHA-1 specification: w[i] = (w[i-3] ^ w[i-8] ^ w[i-14] ^ w[i-16]) rol 1 410 + * instead we do equal: w[i] = (w[i-6] ^ w[i-16] ^ w[i-28] ^ w[i-32]) rol 2 411 + * allows more efficient vectorization since w[i]=>w[i-3] dependency is broken 412 + */ 413 + .macro W_PRECALC_32_79_SSSE3 414 + .if ((i & 3) == 0) 415 + movdqa W_minus_04, W_TMP1 416 + pxor W_minus_28, W # W is W_minus_32 before xor 417 + palignr $8, W_minus_08, W_TMP1 418 + .elseif ((i & 3) == 1) 419 + pxor W_minus_16, W 420 + pxor W_TMP1, W 421 + movdqa W, W_TMP1 422 + .elseif ((i & 3) == 2) 423 + psrld $30, W 424 + pslld $2, W_TMP1 425 + por W, W_TMP1 426 + .elseif ((i & 3) == 3) 427 + movdqa W_TMP1, W 428 + paddd K_XMM(K_BASE), W_TMP1 429 + movdqa W_TMP1, WK(i&~3) 430 + W_PRECALC_ROTATE 431 + .endif 432 + .endm 433 + 434 + .endm // W_PRECALC_SSSE3 435 + 436 + 437 + #define K1 0x5a827999 438 + #define K2 0x6ed9eba1 439 + #define K3 0x8f1bbcdc 440 + #define K4 0xca62c1d6 441 + 442 + .section .rodata 443 + .align 16 444 + 445 + K_XMM_AR: 446 + .long K1, K1, K1, K1 447 + .long K2, K2, K2, K2 448 + .long K3, K3, K3, K3 449 + .long K4, K4, K4, K4 450 + 451 + BSWAP_SHUFB_CTL: 452 + .long 0x00010203 453 + .long 0x04050607 454 + .long 0x08090a0b 455 + .long 0x0c0d0e0f 456 + 457 + 458 + .section .text 459 + 460 + W_PRECALC_SSSE3 461 + .macro xmm_mov a, b 462 + movdqu \a,\b 463 + .endm 464 + 465 + /* SSSE3 optimized implementation: 466 + * extern "C" void sha1_transform_ssse3(u32 *digest, const char *data, u32 *ws, 467 + * unsigned int rounds); 468 + */ 469 + SHA1_VECTOR_ASM sha1_transform_ssse3 470 + 471 + #ifdef SHA1_ENABLE_AVX_SUPPORT 472 + 473 + .macro W_PRECALC_AVX 474 + 475 + .purgem W_PRECALC_00_15 476 + .macro W_PRECALC_00_15 477 + W_PRECALC_00_15_AVX 478 + .endm 479 + .purgem W_PRECALC_16_31 480 + .macro W_PRECALC_16_31 481 + W_PRECALC_16_31_AVX 482 + .endm 483 + .purgem W_PRECALC_32_79 484 + .macro W_PRECALC_32_79 485 + W_PRECALC_32_79_AVX 486 + .endm 487 + 488 + .macro W_PRECALC_00_15_AVX 489 + .if ((i & 3) == 0) 490 + vmovdqu (i*4)(BUFFER_PTR), W_TMP1 491 + .elseif ((i & 3) == 1) 492 + vpshufb XMM_SHUFB_BSWAP, W_TMP1, W 493 + .elseif ((i & 3) == 2) 494 + vpaddd (K_BASE), W, W_TMP1 495 + .elseif ((i & 3) == 3) 496 + vmovdqa W_TMP1, WK(i&~3) 497 + W_PRECALC_ROTATE 498 + .endif 499 + .endm 500 + 501 + .macro W_PRECALC_16_31_AVX 502 + .if ((i & 3) == 0) 503 + vpalignr $8, W_minus_16, W_minus_12, W # w[i-14] 504 + vpsrldq $4, W_minus_04, W_TMP1 # w[i-3] 505 + vpxor W_minus_08, W, W 506 + vpxor W_minus_16, W_TMP1, W_TMP1 507 + .elseif ((i & 3) == 1) 508 + vpxor W_TMP1, W, W 509 + vpslldq $12, W, W_TMP2 510 + vpslld $1, W, W_TMP1 511 + .elseif ((i & 3) == 2) 512 + vpsrld $31, W, W 513 + vpor W, W_TMP1, W_TMP1 514 + vpslld $2, W_TMP2, W 515 + vpsrld $30, W_TMP2, W_TMP2 516 + .elseif ((i & 3) == 3) 517 + vpxor W, W_TMP1, W_TMP1 518 + vpxor W_TMP2, W_TMP1, W 519 + vpaddd K_XMM(K_BASE), W, W_TMP1 520 + vmovdqu W_TMP1, WK(i&~3) 521 + W_PRECALC_ROTATE 522 + .endif 523 + .endm 524 + 525 + .macro W_PRECALC_32_79_AVX 526 + .if ((i & 3) == 0) 527 + vpalignr $8, W_minus_08, W_minus_04, W_TMP1 528 + vpxor W_minus_28, W, W # W is W_minus_32 before xor 529 + .elseif ((i & 3) == 1) 530 + vpxor W_minus_16, W_TMP1, W_TMP1 531 + vpxor W_TMP1, W, W 532 + .elseif ((i & 3) == 2) 533 + vpslld $2, W, W_TMP1 534 + vpsrld $30, W, W 535 + vpor W, W_TMP1, W 536 + .elseif ((i & 3) == 3) 537 + vpaddd K_XMM(K_BASE), W, W_TMP1 538 + vmovdqu W_TMP1, WK(i&~3) 539 + W_PRECALC_ROTATE 540 + .endif 541 + .endm 542 + 543 + .endm // W_PRECALC_AVX 544 + 545 + W_PRECALC_AVX 546 + .purgem xmm_mov 547 + .macro xmm_mov a, b 548 + vmovdqu \a,\b 549 + .endm 550 + 551 + 552 + /* AVX optimized implementation: 553 + * extern "C" void sha1_transform_avx(u32 *digest, const char *data, u32 *ws, 554 + * unsigned int rounds); 555 + */ 556 + SHA1_VECTOR_ASM sha1_transform_avx 557 + 558 + #endif
+240
arch/x86/crypto/sha1_ssse3_glue.c
··· 1 + /* 2 + * Cryptographic API. 3 + * 4 + * Glue code for the SHA1 Secure Hash Algorithm assembler implementation using 5 + * Supplemental SSE3 instructions. 6 + * 7 + * This file is based on sha1_generic.c 8 + * 9 + * Copyright (c) Alan Smithee. 10 + * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> 11 + * Copyright (c) Jean-Francois Dive <jef@linuxbe.org> 12 + * Copyright (c) Mathias Krause <minipli@googlemail.com> 13 + * 14 + * This program is free software; you can redistribute it and/or modify it 15 + * under the terms of the GNU General Public License as published by the Free 16 + * Software Foundation; either version 2 of the License, or (at your option) 17 + * any later version. 18 + * 19 + */ 20 + 21 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 22 + 23 + #include <crypto/internal/hash.h> 24 + #include <linux/init.h> 25 + #include <linux/module.h> 26 + #include <linux/mm.h> 27 + #include <linux/cryptohash.h> 28 + #include <linux/types.h> 29 + #include <crypto/sha.h> 30 + #include <asm/byteorder.h> 31 + #include <asm/i387.h> 32 + #include <asm/xcr.h> 33 + #include <asm/xsave.h> 34 + 35 + 36 + asmlinkage void sha1_transform_ssse3(u32 *digest, const char *data, 37 + unsigned int rounds); 38 + #ifdef SHA1_ENABLE_AVX_SUPPORT 39 + asmlinkage void sha1_transform_avx(u32 *digest, const char *data, 40 + unsigned int rounds); 41 + #endif 42 + 43 + static asmlinkage void (*sha1_transform_asm)(u32 *, const char *, unsigned int); 44 + 45 + 46 + static int sha1_ssse3_init(struct shash_desc *desc) 47 + { 48 + struct sha1_state *sctx = shash_desc_ctx(desc); 49 + 50 + *sctx = (struct sha1_state){ 51 + .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, 52 + }; 53 + 54 + return 0; 55 + } 56 + 57 + static int __sha1_ssse3_update(struct shash_desc *desc, const u8 *data, 58 + unsigned int len, unsigned int partial) 59 + { 60 + struct sha1_state *sctx = shash_desc_ctx(desc); 61 + unsigned int done = 0; 62 + 63 + sctx->count += len; 64 + 65 + if (partial) { 66 + done = SHA1_BLOCK_SIZE - partial; 67 + memcpy(sctx->buffer + partial, data, done); 68 + sha1_transform_asm(sctx->state, sctx->buffer, 1); 69 + } 70 + 71 + if (len - done >= SHA1_BLOCK_SIZE) { 72 + const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE; 73 + 74 + sha1_transform_asm(sctx->state, data + done, rounds); 75 + done += rounds * SHA1_BLOCK_SIZE; 76 + } 77 + 78 + memcpy(sctx->buffer, data + done, len - done); 79 + 80 + return 0; 81 + } 82 + 83 + static int sha1_ssse3_update(struct shash_desc *desc, const u8 *data, 84 + unsigned int len) 85 + { 86 + struct sha1_state *sctx = shash_desc_ctx(desc); 87 + unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; 88 + int res; 89 + 90 + /* Handle the fast case right here */ 91 + if (partial + len < SHA1_BLOCK_SIZE) { 92 + sctx->count += len; 93 + memcpy(sctx->buffer + partial, data, len); 94 + 95 + return 0; 96 + } 97 + 98 + if (!irq_fpu_usable()) { 99 + res = crypto_sha1_update(desc, data, len); 100 + } else { 101 + kernel_fpu_begin(); 102 + res = __sha1_ssse3_update(desc, data, len, partial); 103 + kernel_fpu_end(); 104 + } 105 + 106 + return res; 107 + } 108 + 109 + 110 + /* Add padding and return the message digest. */ 111 + static int sha1_ssse3_final(struct shash_desc *desc, u8 *out) 112 + { 113 + struct sha1_state *sctx = shash_desc_ctx(desc); 114 + unsigned int i, index, padlen; 115 + __be32 *dst = (__be32 *)out; 116 + __be64 bits; 117 + static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, }; 118 + 119 + bits = cpu_to_be64(sctx->count << 3); 120 + 121 + /* Pad out to 56 mod 64 and append length */ 122 + index = sctx->count % SHA1_BLOCK_SIZE; 123 + padlen = (index < 56) ? (56 - index) : ((SHA1_BLOCK_SIZE+56) - index); 124 + if (!irq_fpu_usable()) { 125 + crypto_sha1_update(desc, padding, padlen); 126 + crypto_sha1_update(desc, (const u8 *)&bits, sizeof(bits)); 127 + } else { 128 + kernel_fpu_begin(); 129 + /* We need to fill a whole block for __sha1_ssse3_update() */ 130 + if (padlen <= 56) { 131 + sctx->count += padlen; 132 + memcpy(sctx->buffer + index, padding, padlen); 133 + } else { 134 + __sha1_ssse3_update(desc, padding, padlen, index); 135 + } 136 + __sha1_ssse3_update(desc, (const u8 *)&bits, sizeof(bits), 56); 137 + kernel_fpu_end(); 138 + } 139 + 140 + /* Store state in digest */ 141 + for (i = 0; i < 5; i++) 142 + dst[i] = cpu_to_be32(sctx->state[i]); 143 + 144 + /* Wipe context */ 145 + memset(sctx, 0, sizeof(*sctx)); 146 + 147 + return 0; 148 + } 149 + 150 + static int sha1_ssse3_export(struct shash_desc *desc, void *out) 151 + { 152 + struct sha1_state *sctx = shash_desc_ctx(desc); 153 + 154 + memcpy(out, sctx, sizeof(*sctx)); 155 + 156 + return 0; 157 + } 158 + 159 + static int sha1_ssse3_import(struct shash_desc *desc, const void *in) 160 + { 161 + struct sha1_state *sctx = shash_desc_ctx(desc); 162 + 163 + memcpy(sctx, in, sizeof(*sctx)); 164 + 165 + return 0; 166 + } 167 + 168 + static struct shash_alg alg = { 169 + .digestsize = SHA1_DIGEST_SIZE, 170 + .init = sha1_ssse3_init, 171 + .update = sha1_ssse3_update, 172 + .final = sha1_ssse3_final, 173 + .export = sha1_ssse3_export, 174 + .import = sha1_ssse3_import, 175 + .descsize = sizeof(struct sha1_state), 176 + .statesize = sizeof(struct sha1_state), 177 + .base = { 178 + .cra_name = "sha1", 179 + .cra_driver_name= "sha1-ssse3", 180 + .cra_priority = 150, 181 + .cra_flags = CRYPTO_ALG_TYPE_SHASH, 182 + .cra_blocksize = SHA1_BLOCK_SIZE, 183 + .cra_module = THIS_MODULE, 184 + } 185 + }; 186 + 187 + #ifdef SHA1_ENABLE_AVX_SUPPORT 188 + static bool __init avx_usable(void) 189 + { 190 + u64 xcr0; 191 + 192 + if (!cpu_has_avx || !cpu_has_osxsave) 193 + return false; 194 + 195 + xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); 196 + if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) { 197 + pr_info("AVX detected but unusable.\n"); 198 + 199 + return false; 200 + } 201 + 202 + return true; 203 + } 204 + #endif 205 + 206 + static int __init sha1_ssse3_mod_init(void) 207 + { 208 + /* test for SSSE3 first */ 209 + if (cpu_has_ssse3) 210 + sha1_transform_asm = sha1_transform_ssse3; 211 + 212 + #ifdef SHA1_ENABLE_AVX_SUPPORT 213 + /* allow AVX to override SSSE3, it's a little faster */ 214 + if (avx_usable()) 215 + sha1_transform_asm = sha1_transform_avx; 216 + #endif 217 + 218 + if (sha1_transform_asm) { 219 + pr_info("Using %s optimized SHA-1 implementation\n", 220 + sha1_transform_asm == sha1_transform_ssse3 ? "SSSE3" 221 + : "AVX"); 222 + return crypto_register_shash(&alg); 223 + } 224 + pr_info("Neither AVX nor SSSE3 is available/usable.\n"); 225 + 226 + return -ENODEV; 227 + } 228 + 229 + static void __exit sha1_ssse3_mod_fini(void) 230 + { 231 + crypto_unregister_shash(&alg); 232 + } 233 + 234 + module_init(sha1_ssse3_mod_init); 235 + module_exit(sha1_ssse3_mod_fini); 236 + 237 + MODULE_LICENSE("GPL"); 238 + MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, Supplemental SSE3 accelerated"); 239 + 240 + MODULE_ALIAS("sha1");
+5 -5
arch/x86/crypto/twofish-i586-asm_32.S
··· 26 26 27 27 #define in_blk 12 /* input byte array address parameter*/ 28 28 #define out_blk 8 /* output byte array address parameter*/ 29 - #define tfm 4 /* Twofish context structure */ 29 + #define ctx 4 /* Twofish context structure */ 30 30 31 31 #define a_offset 0 32 32 #define b_offset 4 ··· 229 229 push %esi 230 230 push %edi 231 231 232 - mov tfm + 16(%esp), %ebp /* abuse the base pointer: set new base bointer to the crypto tfm */ 233 - add $crypto_tfm_ctx_offset, %ebp /* ctx address */ 232 + mov ctx + 16(%esp), %ebp /* abuse the base pointer: set new base 233 + * pointer to the ctx address */ 234 234 mov in_blk+16(%esp),%edi /* input address in edi */ 235 235 236 236 mov (%edi), %eax ··· 285 285 push %edi 286 286 287 287 288 - mov tfm + 16(%esp), %ebp /* abuse the base pointer: set new base bointer to the crypto tfm */ 289 - add $crypto_tfm_ctx_offset, %ebp /* ctx address */ 288 + mov ctx + 16(%esp), %ebp /* abuse the base pointer: set new base 289 + * pointer to the ctx address */ 290 290 mov in_blk+16(%esp),%edi /* input address in edi */ 291 291 292 292 mov (%edi), %eax
+316
arch/x86/crypto/twofish-x86_64-asm_64-3way.S
··· 1 + /* 2 + * Twofish Cipher 3-way parallel algorithm (x86_64) 3 + * 4 + * Copyright (C) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License as published by 8 + * the Free Software Foundation; either version 2 of the License, or 9 + * (at your option) any later version. 10 + * 11 + * This program is distributed in the hope that it will be useful, 12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 + * GNU General Public License for more details. 15 + * 16 + * You should have received a copy of the GNU General Public License 17 + * along with this program; if not, write to the Free Software 18 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 19 + * USA 20 + * 21 + */ 22 + 23 + .file "twofish-x86_64-asm-3way.S" 24 + .text 25 + 26 + /* structure of crypto context */ 27 + #define s0 0 28 + #define s1 1024 29 + #define s2 2048 30 + #define s3 3072 31 + #define w 4096 32 + #define k 4128 33 + 34 + /********************************************************************** 35 + 3-way twofish 36 + **********************************************************************/ 37 + #define CTX %rdi 38 + #define RIO %rdx 39 + 40 + #define RAB0 %rax 41 + #define RAB1 %rbx 42 + #define RAB2 %rcx 43 + 44 + #define RAB0d %eax 45 + #define RAB1d %ebx 46 + #define RAB2d %ecx 47 + 48 + #define RAB0bh %ah 49 + #define RAB1bh %bh 50 + #define RAB2bh %ch 51 + 52 + #define RAB0bl %al 53 + #define RAB1bl %bl 54 + #define RAB2bl %cl 55 + 56 + #define RCD0 %r8 57 + #define RCD1 %r9 58 + #define RCD2 %r10 59 + 60 + #define RCD0d %r8d 61 + #define RCD1d %r9d 62 + #define RCD2d %r10d 63 + 64 + #define RX0 %rbp 65 + #define RX1 %r11 66 + #define RX2 %r12 67 + 68 + #define RX0d %ebp 69 + #define RX1d %r11d 70 + #define RX2d %r12d 71 + 72 + #define RY0 %r13 73 + #define RY1 %r14 74 + #define RY2 %r15 75 + 76 + #define RY0d %r13d 77 + #define RY1d %r14d 78 + #define RY2d %r15d 79 + 80 + #define RT0 %rdx 81 + #define RT1 %rsi 82 + 83 + #define RT0d %edx 84 + #define RT1d %esi 85 + 86 + #define do16bit_ror(rot, op1, op2, T0, T1, tmp1, tmp2, ab, dst) \ 87 + movzbl ab ## bl, tmp2 ## d; \ 88 + movzbl ab ## bh, tmp1 ## d; \ 89 + rorq $(rot), ab; \ 90 + op1##l T0(CTX, tmp2, 4), dst ## d; \ 91 + op2##l T1(CTX, tmp1, 4), dst ## d; 92 + 93 + /* 94 + * Combined G1 & G2 function. Reordered with help of rotates to have moves 95 + * at begining. 96 + */ 97 + #define g1g2_3(ab, cd, Tx0, Tx1, Tx2, Tx3, Ty0, Ty1, Ty2, Ty3, x, y) \ 98 + /* G1,1 && G2,1 */ \ 99 + do16bit_ror(32, mov, xor, Tx0, Tx1, RT0, x ## 0, ab ## 0, x ## 0); \ 100 + do16bit_ror(48, mov, xor, Ty1, Ty2, RT0, y ## 0, ab ## 0, y ## 0); \ 101 + \ 102 + do16bit_ror(32, mov, xor, Tx0, Tx1, RT0, x ## 1, ab ## 1, x ## 1); \ 103 + do16bit_ror(48, mov, xor, Ty1, Ty2, RT0, y ## 1, ab ## 1, y ## 1); \ 104 + \ 105 + do16bit_ror(32, mov, xor, Tx0, Tx1, RT0, x ## 2, ab ## 2, x ## 2); \ 106 + do16bit_ror(48, mov, xor, Ty1, Ty2, RT0, y ## 2, ab ## 2, y ## 2); \ 107 + \ 108 + /* G1,2 && G2,2 */ \ 109 + do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 0, x ## 0); \ 110 + do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 0, y ## 0); \ 111 + xchgq cd ## 0, ab ## 0; \ 112 + \ 113 + do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 1, x ## 1); \ 114 + do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 1, y ## 1); \ 115 + xchgq cd ## 1, ab ## 1; \ 116 + \ 117 + do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 2, x ## 2); \ 118 + do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 2, y ## 2); \ 119 + xchgq cd ## 2, ab ## 2; 120 + 121 + #define enc_round_end(ab, x, y, n) \ 122 + addl y ## d, x ## d; \ 123 + addl x ## d, y ## d; \ 124 + addl k+4*(2*(n))(CTX), x ## d; \ 125 + xorl ab ## d, x ## d; \ 126 + addl k+4*(2*(n)+1)(CTX), y ## d; \ 127 + shrq $32, ab; \ 128 + roll $1, ab ## d; \ 129 + xorl y ## d, ab ## d; \ 130 + shlq $32, ab; \ 131 + rorl $1, x ## d; \ 132 + orq x, ab; 133 + 134 + #define dec_round_end(ba, x, y, n) \ 135 + addl y ## d, x ## d; \ 136 + addl x ## d, y ## d; \ 137 + addl k+4*(2*(n))(CTX), x ## d; \ 138 + addl k+4*(2*(n)+1)(CTX), y ## d; \ 139 + xorl ba ## d, y ## d; \ 140 + shrq $32, ba; \ 141 + roll $1, ba ## d; \ 142 + xorl x ## d, ba ## d; \ 143 + shlq $32, ba; \ 144 + rorl $1, y ## d; \ 145 + orq y, ba; 146 + 147 + #define encrypt_round3(ab, cd, n) \ 148 + g1g2_3(ab, cd, s0, s1, s2, s3, s0, s1, s2, s3, RX, RY); \ 149 + \ 150 + enc_round_end(ab ## 0, RX0, RY0, n); \ 151 + enc_round_end(ab ## 1, RX1, RY1, n); \ 152 + enc_round_end(ab ## 2, RX2, RY2, n); 153 + 154 + #define decrypt_round3(ba, dc, n) \ 155 + g1g2_3(ba, dc, s1, s2, s3, s0, s3, s0, s1, s2, RY, RX); \ 156 + \ 157 + dec_round_end(ba ## 0, RX0, RY0, n); \ 158 + dec_round_end(ba ## 1, RX1, RY1, n); \ 159 + dec_round_end(ba ## 2, RX2, RY2, n); 160 + 161 + #define encrypt_cycle3(ab, cd, n) \ 162 + encrypt_round3(ab, cd, n*2); \ 163 + encrypt_round3(ab, cd, (n*2)+1); 164 + 165 + #define decrypt_cycle3(ba, dc, n) \ 166 + decrypt_round3(ba, dc, (n*2)+1); \ 167 + decrypt_round3(ba, dc, (n*2)); 168 + 169 + #define inpack3(in, n, xy, m) \ 170 + movq 4*(n)(in), xy ## 0; \ 171 + xorq w+4*m(CTX), xy ## 0; \ 172 + \ 173 + movq 4*(4+(n))(in), xy ## 1; \ 174 + xorq w+4*m(CTX), xy ## 1; \ 175 + \ 176 + movq 4*(8+(n))(in), xy ## 2; \ 177 + xorq w+4*m(CTX), xy ## 2; 178 + 179 + #define outunpack3(op, out, n, xy, m) \ 180 + xorq w+4*m(CTX), xy ## 0; \ 181 + op ## q xy ## 0, 4*(n)(out); \ 182 + \ 183 + xorq w+4*m(CTX), xy ## 1; \ 184 + op ## q xy ## 1, 4*(4+(n))(out); \ 185 + \ 186 + xorq w+4*m(CTX), xy ## 2; \ 187 + op ## q xy ## 2, 4*(8+(n))(out); 188 + 189 + #define inpack_enc3() \ 190 + inpack3(RIO, 0, RAB, 0); \ 191 + inpack3(RIO, 2, RCD, 2); 192 + 193 + #define outunpack_enc3(op) \ 194 + outunpack3(op, RIO, 2, RAB, 6); \ 195 + outunpack3(op, RIO, 0, RCD, 4); 196 + 197 + #define inpack_dec3() \ 198 + inpack3(RIO, 0, RAB, 4); \ 199 + rorq $32, RAB0; \ 200 + rorq $32, RAB1; \ 201 + rorq $32, RAB2; \ 202 + inpack3(RIO, 2, RCD, 6); \ 203 + rorq $32, RCD0; \ 204 + rorq $32, RCD1; \ 205 + rorq $32, RCD2; 206 + 207 + #define outunpack_dec3() \ 208 + rorq $32, RCD0; \ 209 + rorq $32, RCD1; \ 210 + rorq $32, RCD2; \ 211 + outunpack3(mov, RIO, 0, RCD, 0); \ 212 + rorq $32, RAB0; \ 213 + rorq $32, RAB1; \ 214 + rorq $32, RAB2; \ 215 + outunpack3(mov, RIO, 2, RAB, 2); 216 + 217 + .align 8 218 + .global __twofish_enc_blk_3way 219 + .type __twofish_enc_blk_3way,@function; 220 + 221 + __twofish_enc_blk_3way: 222 + /* input: 223 + * %rdi: ctx, CTX 224 + * %rsi: dst 225 + * %rdx: src, RIO 226 + * %rcx: bool, if true: xor output 227 + */ 228 + pushq %r15; 229 + pushq %r14; 230 + pushq %r13; 231 + pushq %r12; 232 + pushq %rbp; 233 + pushq %rbx; 234 + 235 + pushq %rcx; /* bool xor */ 236 + pushq %rsi; /* dst */ 237 + 238 + inpack_enc3(); 239 + 240 + encrypt_cycle3(RAB, RCD, 0); 241 + encrypt_cycle3(RAB, RCD, 1); 242 + encrypt_cycle3(RAB, RCD, 2); 243 + encrypt_cycle3(RAB, RCD, 3); 244 + encrypt_cycle3(RAB, RCD, 4); 245 + encrypt_cycle3(RAB, RCD, 5); 246 + encrypt_cycle3(RAB, RCD, 6); 247 + encrypt_cycle3(RAB, RCD, 7); 248 + 249 + popq RIO; /* dst */ 250 + popq %rbp; /* bool xor */ 251 + 252 + testb %bpl, %bpl; 253 + jnz __enc_xor3; 254 + 255 + outunpack_enc3(mov); 256 + 257 + popq %rbx; 258 + popq %rbp; 259 + popq %r12; 260 + popq %r13; 261 + popq %r14; 262 + popq %r15; 263 + ret; 264 + 265 + __enc_xor3: 266 + outunpack_enc3(xor); 267 + 268 + popq %rbx; 269 + popq %rbp; 270 + popq %r12; 271 + popq %r13; 272 + popq %r14; 273 + popq %r15; 274 + ret; 275 + 276 + .global twofish_dec_blk_3way 277 + .type twofish_dec_blk_3way,@function; 278 + 279 + twofish_dec_blk_3way: 280 + /* input: 281 + * %rdi: ctx, CTX 282 + * %rsi: dst 283 + * %rdx: src, RIO 284 + */ 285 + pushq %r15; 286 + pushq %r14; 287 + pushq %r13; 288 + pushq %r12; 289 + pushq %rbp; 290 + pushq %rbx; 291 + 292 + pushq %rsi; /* dst */ 293 + 294 + inpack_dec3(); 295 + 296 + decrypt_cycle3(RAB, RCD, 7); 297 + decrypt_cycle3(RAB, RCD, 6); 298 + decrypt_cycle3(RAB, RCD, 5); 299 + decrypt_cycle3(RAB, RCD, 4); 300 + decrypt_cycle3(RAB, RCD, 3); 301 + decrypt_cycle3(RAB, RCD, 2); 302 + decrypt_cycle3(RAB, RCD, 1); 303 + decrypt_cycle3(RAB, RCD, 0); 304 + 305 + popq RIO; /* dst */ 306 + 307 + outunpack_dec3(); 308 + 309 + popq %rbx; 310 + popq %rbp; 311 + popq %r12; 312 + popq %r13; 313 + popq %r14; 314 + popq %r15; 315 + ret; 316 +
+2 -4
arch/x86/crypto/twofish-x86_64-asm_64.S
··· 221 221 twofish_enc_blk: 222 222 pushq R1 223 223 224 - /* %rdi contains the crypto tfm address */ 224 + /* %rdi contains the ctx address */ 225 225 /* %rsi contains the output address */ 226 226 /* %rdx contains the input address */ 227 - add $crypto_tfm_ctx_offset, %rdi /* set ctx address */ 228 227 /* ctx address is moved to free one non-rex register 229 228 as target for the 8bit high operations */ 230 229 mov %rdi, %r11 ··· 273 274 twofish_dec_blk: 274 275 pushq R1 275 276 276 - /* %rdi contains the crypto tfm address */ 277 + /* %rdi contains the ctx address */ 277 278 /* %rsi contains the output address */ 278 279 /* %rdx contains the input address */ 279 - add $crypto_tfm_ctx_offset, %rdi /* set ctx address */ 280 280 /* ctx address is moved to free one non-rex register 281 281 as target for the 8bit high operations */ 282 282 mov %rdi, %r11
+8 -4
arch/x86/crypto/twofish_glue.c
··· 44 44 #include <linux/module.h> 45 45 #include <linux/types.h> 46 46 47 - asmlinkage void twofish_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 48 - asmlinkage void twofish_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 47 + asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst, 48 + const u8 *src); 49 + EXPORT_SYMBOL_GPL(twofish_enc_blk); 50 + asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst, 51 + const u8 *src); 52 + EXPORT_SYMBOL_GPL(twofish_dec_blk); 49 53 50 54 static void twofish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 51 55 { 52 - twofish_enc_blk(tfm, dst, src); 56 + twofish_enc_blk(crypto_tfm_ctx(tfm), dst, src); 53 57 } 54 58 55 59 static void twofish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 56 60 { 57 - twofish_dec_blk(tfm, dst, src); 61 + twofish_dec_blk(crypto_tfm_ctx(tfm), dst, src); 58 62 } 59 63 60 64 static struct crypto_alg alg = {
+472
arch/x86/crypto/twofish_glue_3way.c
··· 1 + /* 2 + * Glue Code for 3-way parallel assembler optimized version of Twofish 3 + * 4 + * Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> 5 + * 6 + * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by: 7 + * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 8 + * CTR part based on code (crypto/ctr.c) by: 9 + * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com> 10 + * 11 + * This program is free software; you can redistribute it and/or modify 12 + * it under the terms of the GNU General Public License as published by 13 + * the Free Software Foundation; either version 2 of the License, or 14 + * (at your option) any later version. 15 + * 16 + * This program is distributed in the hope that it will be useful, 17 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 + * GNU General Public License for more details. 20 + * 21 + * You should have received a copy of the GNU General Public License 22 + * along with this program; if not, write to the Free Software 23 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 24 + * USA 25 + * 26 + */ 27 + 28 + #include <linux/crypto.h> 29 + #include <linux/init.h> 30 + #include <linux/module.h> 31 + #include <linux/types.h> 32 + #include <crypto/algapi.h> 33 + #include <crypto/twofish.h> 34 + #include <crypto/b128ops.h> 35 + 36 + /* regular block cipher functions from twofish_x86_64 module */ 37 + asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst, 38 + const u8 *src); 39 + asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst, 40 + const u8 *src); 41 + 42 + /* 3-way parallel cipher functions */ 43 + asmlinkage void __twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, 44 + const u8 *src, bool xor); 45 + asmlinkage void twofish_dec_blk_3way(struct twofish_ctx *ctx, u8 *dst, 46 + const u8 *src); 47 + 48 + static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, 49 + const u8 *src) 50 + { 51 + __twofish_enc_blk_3way(ctx, dst, src, false); 52 + } 53 + 54 + static inline void twofish_enc_blk_xor_3way(struct twofish_ctx *ctx, u8 *dst, 55 + const u8 *src) 56 + { 57 + __twofish_enc_blk_3way(ctx, dst, src, true); 58 + } 59 + 60 + static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, 61 + void (*fn)(struct twofish_ctx *, u8 *, const u8 *), 62 + void (*fn_3way)(struct twofish_ctx *, u8 *, const u8 *)) 63 + { 64 + struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 65 + unsigned int bsize = TF_BLOCK_SIZE; 66 + unsigned int nbytes; 67 + int err; 68 + 69 + err = blkcipher_walk_virt(desc, walk); 70 + 71 + while ((nbytes = walk->nbytes)) { 72 + u8 *wsrc = walk->src.virt.addr; 73 + u8 *wdst = walk->dst.virt.addr; 74 + 75 + /* Process three block batch */ 76 + if (nbytes >= bsize * 3) { 77 + do { 78 + fn_3way(ctx, wdst, wsrc); 79 + 80 + wsrc += bsize * 3; 81 + wdst += bsize * 3; 82 + nbytes -= bsize * 3; 83 + } while (nbytes >= bsize * 3); 84 + 85 + if (nbytes < bsize) 86 + goto done; 87 + } 88 + 89 + /* Handle leftovers */ 90 + do { 91 + fn(ctx, wdst, wsrc); 92 + 93 + wsrc += bsize; 94 + wdst += bsize; 95 + nbytes -= bsize; 96 + } while (nbytes >= bsize); 97 + 98 + done: 99 + err = blkcipher_walk_done(desc, walk, nbytes); 100 + } 101 + 102 + return err; 103 + } 104 + 105 + static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 106 + struct scatterlist *src, unsigned int nbytes) 107 + { 108 + struct blkcipher_walk walk; 109 + 110 + blkcipher_walk_init(&walk, dst, src, nbytes); 111 + return ecb_crypt(desc, &walk, twofish_enc_blk, twofish_enc_blk_3way); 112 + } 113 + 114 + static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 115 + struct scatterlist *src, unsigned int nbytes) 116 + { 117 + struct blkcipher_walk walk; 118 + 119 + blkcipher_walk_init(&walk, dst, src, nbytes); 120 + return ecb_crypt(desc, &walk, twofish_dec_blk, twofish_dec_blk_3way); 121 + } 122 + 123 + static struct crypto_alg blk_ecb_alg = { 124 + .cra_name = "ecb(twofish)", 125 + .cra_driver_name = "ecb-twofish-3way", 126 + .cra_priority = 300, 127 + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 128 + .cra_blocksize = TF_BLOCK_SIZE, 129 + .cra_ctxsize = sizeof(struct twofish_ctx), 130 + .cra_alignmask = 0, 131 + .cra_type = &crypto_blkcipher_type, 132 + .cra_module = THIS_MODULE, 133 + .cra_list = LIST_HEAD_INIT(blk_ecb_alg.cra_list), 134 + .cra_u = { 135 + .blkcipher = { 136 + .min_keysize = TF_MIN_KEY_SIZE, 137 + .max_keysize = TF_MAX_KEY_SIZE, 138 + .setkey = twofish_setkey, 139 + .encrypt = ecb_encrypt, 140 + .decrypt = ecb_decrypt, 141 + }, 142 + }, 143 + }; 144 + 145 + static unsigned int __cbc_encrypt(struct blkcipher_desc *desc, 146 + struct blkcipher_walk *walk) 147 + { 148 + struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 149 + unsigned int bsize = TF_BLOCK_SIZE; 150 + unsigned int nbytes = walk->nbytes; 151 + u128 *src = (u128 *)walk->src.virt.addr; 152 + u128 *dst = (u128 *)walk->dst.virt.addr; 153 + u128 *iv = (u128 *)walk->iv; 154 + 155 + do { 156 + u128_xor(dst, src, iv); 157 + twofish_enc_blk(ctx, (u8 *)dst, (u8 *)dst); 158 + iv = dst; 159 + 160 + src += 1; 161 + dst += 1; 162 + nbytes -= bsize; 163 + } while (nbytes >= bsize); 164 + 165 + u128_xor((u128 *)walk->iv, (u128 *)walk->iv, iv); 166 + return nbytes; 167 + } 168 + 169 + static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 170 + struct scatterlist *src, unsigned int nbytes) 171 + { 172 + struct blkcipher_walk walk; 173 + int err; 174 + 175 + blkcipher_walk_init(&walk, dst, src, nbytes); 176 + err = blkcipher_walk_virt(desc, &walk); 177 + 178 + while ((nbytes = walk.nbytes)) { 179 + nbytes = __cbc_encrypt(desc, &walk); 180 + err = blkcipher_walk_done(desc, &walk, nbytes); 181 + } 182 + 183 + return err; 184 + } 185 + 186 + static unsigned int __cbc_decrypt(struct blkcipher_desc *desc, 187 + struct blkcipher_walk *walk) 188 + { 189 + struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 190 + unsigned int bsize = TF_BLOCK_SIZE; 191 + unsigned int nbytes = walk->nbytes; 192 + u128 *src = (u128 *)walk->src.virt.addr; 193 + u128 *dst = (u128 *)walk->dst.virt.addr; 194 + u128 ivs[3 - 1]; 195 + u128 last_iv; 196 + 197 + /* Start of the last block. */ 198 + src += nbytes / bsize - 1; 199 + dst += nbytes / bsize - 1; 200 + 201 + last_iv = *src; 202 + 203 + /* Process three block batch */ 204 + if (nbytes >= bsize * 3) { 205 + do { 206 + nbytes -= bsize * (3 - 1); 207 + src -= 3 - 1; 208 + dst -= 3 - 1; 209 + 210 + ivs[0] = src[0]; 211 + ivs[1] = src[1]; 212 + 213 + twofish_dec_blk_3way(ctx, (u8 *)dst, (u8 *)src); 214 + 215 + u128_xor(dst + 1, dst + 1, ivs + 0); 216 + u128_xor(dst + 2, dst + 2, ivs + 1); 217 + 218 + nbytes -= bsize; 219 + if (nbytes < bsize) 220 + goto done; 221 + 222 + u128_xor(dst, dst, src - 1); 223 + src -= 1; 224 + dst -= 1; 225 + } while (nbytes >= bsize * 3); 226 + 227 + if (nbytes < bsize) 228 + goto done; 229 + } 230 + 231 + /* Handle leftovers */ 232 + for (;;) { 233 + twofish_dec_blk(ctx, (u8 *)dst, (u8 *)src); 234 + 235 + nbytes -= bsize; 236 + if (nbytes < bsize) 237 + break; 238 + 239 + u128_xor(dst, dst, src - 1); 240 + src -= 1; 241 + dst -= 1; 242 + } 243 + 244 + done: 245 + u128_xor(dst, dst, (u128 *)walk->iv); 246 + *(u128 *)walk->iv = last_iv; 247 + 248 + return nbytes; 249 + } 250 + 251 + static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 252 + struct scatterlist *src, unsigned int nbytes) 253 + { 254 + struct blkcipher_walk walk; 255 + int err; 256 + 257 + blkcipher_walk_init(&walk, dst, src, nbytes); 258 + err = blkcipher_walk_virt(desc, &walk); 259 + 260 + while ((nbytes = walk.nbytes)) { 261 + nbytes = __cbc_decrypt(desc, &walk); 262 + err = blkcipher_walk_done(desc, &walk, nbytes); 263 + } 264 + 265 + return err; 266 + } 267 + 268 + static struct crypto_alg blk_cbc_alg = { 269 + .cra_name = "cbc(twofish)", 270 + .cra_driver_name = "cbc-twofish-3way", 271 + .cra_priority = 300, 272 + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 273 + .cra_blocksize = TF_BLOCK_SIZE, 274 + .cra_ctxsize = sizeof(struct twofish_ctx), 275 + .cra_alignmask = 0, 276 + .cra_type = &crypto_blkcipher_type, 277 + .cra_module = THIS_MODULE, 278 + .cra_list = LIST_HEAD_INIT(blk_cbc_alg.cra_list), 279 + .cra_u = { 280 + .blkcipher = { 281 + .min_keysize = TF_MIN_KEY_SIZE, 282 + .max_keysize = TF_MAX_KEY_SIZE, 283 + .ivsize = TF_BLOCK_SIZE, 284 + .setkey = twofish_setkey, 285 + .encrypt = cbc_encrypt, 286 + .decrypt = cbc_decrypt, 287 + }, 288 + }, 289 + }; 290 + 291 + static inline void u128_to_be128(be128 *dst, const u128 *src) 292 + { 293 + dst->a = cpu_to_be64(src->a); 294 + dst->b = cpu_to_be64(src->b); 295 + } 296 + 297 + static inline void be128_to_u128(u128 *dst, const be128 *src) 298 + { 299 + dst->a = be64_to_cpu(src->a); 300 + dst->b = be64_to_cpu(src->b); 301 + } 302 + 303 + static inline void u128_inc(u128 *i) 304 + { 305 + i->b++; 306 + if (!i->b) 307 + i->a++; 308 + } 309 + 310 + static void ctr_crypt_final(struct blkcipher_desc *desc, 311 + struct blkcipher_walk *walk) 312 + { 313 + struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 314 + u8 *ctrblk = walk->iv; 315 + u8 keystream[TF_BLOCK_SIZE]; 316 + u8 *src = walk->src.virt.addr; 317 + u8 *dst = walk->dst.virt.addr; 318 + unsigned int nbytes = walk->nbytes; 319 + 320 + twofish_enc_blk(ctx, keystream, ctrblk); 321 + crypto_xor(keystream, src, nbytes); 322 + memcpy(dst, keystream, nbytes); 323 + 324 + crypto_inc(ctrblk, TF_BLOCK_SIZE); 325 + } 326 + 327 + static unsigned int __ctr_crypt(struct blkcipher_desc *desc, 328 + struct blkcipher_walk *walk) 329 + { 330 + struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 331 + unsigned int bsize = TF_BLOCK_SIZE; 332 + unsigned int nbytes = walk->nbytes; 333 + u128 *src = (u128 *)walk->src.virt.addr; 334 + u128 *dst = (u128 *)walk->dst.virt.addr; 335 + u128 ctrblk; 336 + be128 ctrblocks[3]; 337 + 338 + be128_to_u128(&ctrblk, (be128 *)walk->iv); 339 + 340 + /* Process three block batch */ 341 + if (nbytes >= bsize * 3) { 342 + do { 343 + if (dst != src) { 344 + dst[0] = src[0]; 345 + dst[1] = src[1]; 346 + dst[2] = src[2]; 347 + } 348 + 349 + /* create ctrblks for parallel encrypt */ 350 + u128_to_be128(&ctrblocks[0], &ctrblk); 351 + u128_inc(&ctrblk); 352 + u128_to_be128(&ctrblocks[1], &ctrblk); 353 + u128_inc(&ctrblk); 354 + u128_to_be128(&ctrblocks[2], &ctrblk); 355 + u128_inc(&ctrblk); 356 + 357 + twofish_enc_blk_xor_3way(ctx, (u8 *)dst, 358 + (u8 *)ctrblocks); 359 + 360 + src += 3; 361 + dst += 3; 362 + nbytes -= bsize * 3; 363 + } while (nbytes >= bsize * 3); 364 + 365 + if (nbytes < bsize) 366 + goto done; 367 + } 368 + 369 + /* Handle leftovers */ 370 + do { 371 + if (dst != src) 372 + *dst = *src; 373 + 374 + u128_to_be128(&ctrblocks[0], &ctrblk); 375 + u128_inc(&ctrblk); 376 + 377 + twofish_enc_blk(ctx, (u8 *)ctrblocks, (u8 *)ctrblocks); 378 + u128_xor(dst, dst, (u128 *)ctrblocks); 379 + 380 + src += 1; 381 + dst += 1; 382 + nbytes -= bsize; 383 + } while (nbytes >= bsize); 384 + 385 + done: 386 + u128_to_be128((be128 *)walk->iv, &ctrblk); 387 + return nbytes; 388 + } 389 + 390 + static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, 391 + struct scatterlist *src, unsigned int nbytes) 392 + { 393 + struct blkcipher_walk walk; 394 + int err; 395 + 396 + blkcipher_walk_init(&walk, dst, src, nbytes); 397 + err = blkcipher_walk_virt_block(desc, &walk, TF_BLOCK_SIZE); 398 + 399 + while ((nbytes = walk.nbytes) >= TF_BLOCK_SIZE) { 400 + nbytes = __ctr_crypt(desc, &walk); 401 + err = blkcipher_walk_done(desc, &walk, nbytes); 402 + } 403 + 404 + if (walk.nbytes) { 405 + ctr_crypt_final(desc, &walk); 406 + err = blkcipher_walk_done(desc, &walk, 0); 407 + } 408 + 409 + return err; 410 + } 411 + 412 + static struct crypto_alg blk_ctr_alg = { 413 + .cra_name = "ctr(twofish)", 414 + .cra_driver_name = "ctr-twofish-3way", 415 + .cra_priority = 300, 416 + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 417 + .cra_blocksize = 1, 418 + .cra_ctxsize = sizeof(struct twofish_ctx), 419 + .cra_alignmask = 0, 420 + .cra_type = &crypto_blkcipher_type, 421 + .cra_module = THIS_MODULE, 422 + .cra_list = LIST_HEAD_INIT(blk_ctr_alg.cra_list), 423 + .cra_u = { 424 + .blkcipher = { 425 + .min_keysize = TF_MIN_KEY_SIZE, 426 + .max_keysize = TF_MAX_KEY_SIZE, 427 + .ivsize = TF_BLOCK_SIZE, 428 + .setkey = twofish_setkey, 429 + .encrypt = ctr_crypt, 430 + .decrypt = ctr_crypt, 431 + }, 432 + }, 433 + }; 434 + 435 + int __init init(void) 436 + { 437 + int err; 438 + 439 + err = crypto_register_alg(&blk_ecb_alg); 440 + if (err) 441 + goto ecb_err; 442 + err = crypto_register_alg(&blk_cbc_alg); 443 + if (err) 444 + goto cbc_err; 445 + err = crypto_register_alg(&blk_ctr_alg); 446 + if (err) 447 + goto ctr_err; 448 + 449 + return 0; 450 + 451 + ctr_err: 452 + crypto_unregister_alg(&blk_cbc_alg); 453 + cbc_err: 454 + crypto_unregister_alg(&blk_ecb_alg); 455 + ecb_err: 456 + return err; 457 + } 458 + 459 + void __exit fini(void) 460 + { 461 + crypto_unregister_alg(&blk_ctr_alg); 462 + crypto_unregister_alg(&blk_cbc_alg); 463 + crypto_unregister_alg(&blk_ecb_alg); 464 + } 465 + 466 + module_init(init); 467 + module_exit(fini); 468 + 469 + MODULE_LICENSE("GPL"); 470 + MODULE_DESCRIPTION("Twofish Cipher Algorithm, 3-way parallel asm optimized"); 471 + MODULE_ALIAS("twofish"); 472 + MODULE_ALIAS("twofish-asm");
+3
arch/x86/include/asm/cpufeature.h
··· 259 259 #define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM) 260 260 #define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2) 261 261 #define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3) 262 + #define cpu_has_ssse3 boot_cpu_has(X86_FEATURE_SSSE3) 262 263 #define cpu_has_aes boot_cpu_has(X86_FEATURE_AES) 264 + #define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX) 263 265 #define cpu_has_ht boot_cpu_has(X86_FEATURE_HT) 264 266 #define cpu_has_mp boot_cpu_has(X86_FEATURE_MP) 265 267 #define cpu_has_nx boot_cpu_has(X86_FEATURE_NX) ··· 289 287 #define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2) 290 288 #define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) 291 289 #define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) 290 + #define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE) 292 291 #define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) 293 292 #define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) 294 293 #define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
+63
crypto/Kconfig
··· 100 100 select CRYPTO_BLKCIPHER2 101 101 select CRYPTO_PCOMP2 102 102 103 + config CRYPTO_USER 104 + tristate "Userspace cryptographic algorithm configuration" 105 + depends on NET 106 + select CRYPTO_MANAGER 107 + help 108 + Userapace configuration for cryptographic instantiations such as 109 + cbc(aes). 110 + 103 111 config CRYPTO_MANAGER_DISABLE_TESTS 104 112 bool "Disable run-time self tests" 105 113 default y ··· 415 407 help 416 408 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). 417 409 410 + config CRYPTO_SHA1_SSSE3 411 + tristate "SHA1 digest algorithm (SSSE3/AVX)" 412 + depends on X86 && 64BIT 413 + select CRYPTO_SHA1 414 + select CRYPTO_HASH 415 + help 416 + SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented 417 + using Supplemental SSE3 (SSSE3) instructions or Advanced Vector 418 + Extensions (AVX), when available. 419 + 418 420 config CRYPTO_SHA256 419 421 tristate "SHA224 and SHA256 digest algorithm" 420 422 select CRYPTO_HASH ··· 608 590 config CRYPTO_BLOWFISH 609 591 tristate "Blowfish cipher algorithm" 610 592 select CRYPTO_ALGAPI 593 + select CRYPTO_BLOWFISH_COMMON 611 594 help 612 595 Blowfish cipher algorithm, by Bruce Schneier. 596 + 597 + This is a variable key length cipher which can use keys from 32 598 + bits to 448 bits in length. It's fast, simple and specifically 599 + designed for use on "large microprocessors". 600 + 601 + See also: 602 + <http://www.schneier.com/blowfish.html> 603 + 604 + config CRYPTO_BLOWFISH_COMMON 605 + tristate 606 + help 607 + Common parts of the Blowfish cipher algorithm shared by the 608 + generic c and the assembler implementations. 609 + 610 + See also: 611 + <http://www.schneier.com/blowfish.html> 612 + 613 + config CRYPTO_BLOWFISH_X86_64 614 + tristate "Blowfish cipher algorithm (x86_64)" 615 + depends on (X86 || UML_X86) && 64BIT 616 + select CRYPTO_ALGAPI 617 + select CRYPTO_BLOWFISH_COMMON 618 + help 619 + Blowfish cipher algorithm (x86_64), by Bruce Schneier. 613 620 614 621 This is a variable key length cipher which can use keys from 32 615 622 bits to 448 bits in length. It's fast, simple and specifically ··· 832 789 candidate cipher by researchers at CounterPane Systems. It is a 833 790 16 round block cipher supporting key sizes of 128, 192, and 256 834 791 bits. 792 + 793 + See also: 794 + <http://www.schneier.com/twofish.html> 795 + 796 + config CRYPTO_TWOFISH_X86_64_3WAY 797 + tristate "Twofish cipher algorithm (x86_64, 3-way parallel)" 798 + depends on (X86 || UML_X86) && 64BIT 799 + select CRYPTO_ALGAPI 800 + select CRYPTO_TWOFISH_COMMON 801 + select CRYPTO_TWOFISH_X86_64 802 + help 803 + Twofish cipher algorithm (x86_64, 3-way parallel). 804 + 805 + Twofish was submitted as an AES (Advanced Encryption Standard) 806 + candidate cipher by researchers at CounterPane Systems. It is a 807 + 16 round block cipher supporting key sizes of 128, 192, and 256 808 + bits. 809 + 810 + This module provides Twofish cipher algorithm that processes three 811 + blocks parallel, utilizing resources of out-of-order CPUs better. 835 812 836 813 See also: 837 814 <http://www.schneier.com/twofish.html>
+3 -1
crypto/Makefile
··· 31 31 cryptomgr-y := algboss.o testmgr.o 32 32 33 33 obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o 34 + obj-$(CONFIG_CRYPTO_USER) += crypto_user.o 34 35 obj-$(CONFIG_CRYPTO_HMAC) += hmac.o 35 36 obj-$(CONFIG_CRYPTO_VMAC) += vmac.o 36 37 obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o ··· 61 60 obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o 62 61 obj-$(CONFIG_CRYPTO_DES) += des_generic.o 63 62 obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o 64 - obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o 63 + obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish_generic.o 64 + obj-$(CONFIG_CRYPTO_BLOWFISH_COMMON) += blowfish_common.o 65 65 obj-$(CONFIG_CRYPTO_TWOFISH) += twofish_generic.o 66 66 obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o 67 67 obj-$(CONFIG_CRYPTO_SERPENT) += serpent.o
+48
crypto/ablkcipher.c
··· 23 23 #include <linux/sched.h> 24 24 #include <linux/slab.h> 25 25 #include <linux/seq_file.h> 26 + #include <linux/cryptouser.h> 27 + #include <net/netlink.h> 26 28 27 29 #include <crypto/scatterwalk.h> 28 30 ··· 383 381 return 0; 384 382 } 385 383 384 + static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) 385 + { 386 + struct crypto_report_blkcipher rblkcipher; 387 + 388 + snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "ablkcipher"); 389 + snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s", 390 + alg->cra_ablkcipher.geniv ?: "<default>"); 391 + 392 + rblkcipher.blocksize = alg->cra_blocksize; 393 + rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize; 394 + rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize; 395 + rblkcipher.ivsize = alg->cra_ablkcipher.ivsize; 396 + 397 + NLA_PUT(skb, CRYPTOCFGA_REPORT_BLKCIPHER, 398 + sizeof(struct crypto_report_blkcipher), &rblkcipher); 399 + 400 + return 0; 401 + 402 + nla_put_failure: 403 + return -EMSGSIZE; 404 + } 405 + 386 406 static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg) 387 407 __attribute__ ((unused)); 388 408 static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg) ··· 427 403 #ifdef CONFIG_PROC_FS 428 404 .show = crypto_ablkcipher_show, 429 405 #endif 406 + .report = crypto_ablkcipher_report, 430 407 }; 431 408 EXPORT_SYMBOL_GPL(crypto_ablkcipher_type); 432 409 ··· 457 432 return 0; 458 433 } 459 434 435 + static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg) 436 + { 437 + struct crypto_report_blkcipher rblkcipher; 438 + 439 + snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "givcipher"); 440 + snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s", 441 + alg->cra_ablkcipher.geniv ?: "<built-in>"); 442 + 443 + rblkcipher.blocksize = alg->cra_blocksize; 444 + rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize; 445 + rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize; 446 + rblkcipher.ivsize = alg->cra_ablkcipher.ivsize; 447 + 448 + NLA_PUT(skb, CRYPTOCFGA_REPORT_BLKCIPHER, 449 + sizeof(struct crypto_report_blkcipher), &rblkcipher); 450 + 451 + return 0; 452 + 453 + nla_put_failure: 454 + return -EMSGSIZE; 455 + } 456 + 460 457 static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg) 461 458 __attribute__ ((unused)); 462 459 static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg) ··· 501 454 #ifdef CONFIG_PROC_FS 502 455 .show = crypto_givcipher_show, 503 456 #endif 457 + .report = crypto_givcipher_report, 504 458 }; 505 459 EXPORT_SYMBOL_GPL(crypto_givcipher_type); 506 460
+48
crypto/aead.c
··· 21 21 #include <linux/sched.h> 22 22 #include <linux/slab.h> 23 23 #include <linux/seq_file.h> 24 + #include <linux/cryptouser.h> 25 + #include <net/netlink.h> 24 26 25 27 #include "internal.h" 26 28 ··· 111 109 return 0; 112 110 } 113 111 112 + static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg) 113 + { 114 + struct crypto_report_aead raead; 115 + struct aead_alg *aead = &alg->cra_aead; 116 + 117 + snprintf(raead.type, CRYPTO_MAX_ALG_NAME, "%s", "aead"); 118 + snprintf(raead.geniv, CRYPTO_MAX_ALG_NAME, "%s", 119 + aead->geniv ?: "<built-in>"); 120 + 121 + raead.blocksize = alg->cra_blocksize; 122 + raead.maxauthsize = aead->maxauthsize; 123 + raead.ivsize = aead->ivsize; 124 + 125 + NLA_PUT(skb, CRYPTOCFGA_REPORT_AEAD, 126 + sizeof(struct crypto_report_aead), &raead); 127 + 128 + return 0; 129 + 130 + nla_put_failure: 131 + return -EMSGSIZE; 132 + } 133 + 114 134 static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg) 115 135 __attribute__ ((unused)); 116 136 static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg) ··· 154 130 #ifdef CONFIG_PROC_FS 155 131 .show = crypto_aead_show, 156 132 #endif 133 + .report = crypto_aead_report, 157 134 }; 158 135 EXPORT_SYMBOL_GPL(crypto_aead_type); 159 136 ··· 190 165 return 0; 191 166 } 192 167 168 + static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg) 169 + { 170 + struct crypto_report_aead raead; 171 + struct aead_alg *aead = &alg->cra_aead; 172 + 173 + snprintf(raead.type, CRYPTO_MAX_ALG_NAME, "%s", "nivaead"); 174 + snprintf(raead.geniv, CRYPTO_MAX_ALG_NAME, "%s", aead->geniv); 175 + 176 + raead.blocksize = alg->cra_blocksize; 177 + raead.maxauthsize = aead->maxauthsize; 178 + raead.ivsize = aead->ivsize; 179 + 180 + NLA_PUT(skb, CRYPTOCFGA_REPORT_AEAD, 181 + sizeof(struct crypto_report_aead), &raead); 182 + 183 + return 0; 184 + 185 + nla_put_failure: 186 + return -EMSGSIZE; 187 + } 188 + 189 + 193 190 static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg) 194 191 __attribute__ ((unused)); 195 192 static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg) ··· 233 186 #ifdef CONFIG_PROC_FS 234 187 .show = crypto_nivaead_show, 235 188 #endif 189 + .report = crypto_nivaead_report, 236 190 }; 237 191 EXPORT_SYMBOL_GPL(crypto_nivaead_type); 238 192
+21
crypto/ahash.c
··· 21 21 #include <linux/sched.h> 22 22 #include <linux/slab.h> 23 23 #include <linux/seq_file.h> 24 + #include <linux/cryptouser.h> 25 + #include <net/netlink.h> 24 26 25 27 #include "internal.h" 26 28 ··· 399 397 return sizeof(struct crypto_shash *); 400 398 } 401 399 400 + static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg) 401 + { 402 + struct crypto_report_hash rhash; 403 + 404 + snprintf(rhash.type, CRYPTO_MAX_ALG_NAME, "%s", "ahash"); 405 + 406 + rhash.blocksize = alg->cra_blocksize; 407 + rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize; 408 + 409 + NLA_PUT(skb, CRYPTOCFGA_REPORT_HASH, 410 + sizeof(struct crypto_report_hash), &rhash); 411 + 412 + return 0; 413 + 414 + nla_put_failure: 415 + return -EMSGSIZE; 416 + } 417 + 402 418 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) 403 419 __attribute__ ((unused)); 404 420 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) ··· 435 415 #ifdef CONFIG_PROC_FS 436 416 .show = crypto_ahash_show, 437 417 #endif 418 + .report = crypto_ahash_report, 438 419 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 439 420 .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, 440 421 .type = CRYPTO_ALG_TYPE_AHASH,
+6 -6
crypto/algapi.c
··· 22 22 23 23 #include "internal.h" 24 24 25 - static void crypto_remove_final(struct list_head *list); 26 - 27 25 static LIST_HEAD(crypto_template_list); 28 26 29 27 void crypto_larval_error(const char *name, u32 type, u32 mask) ··· 127 129 BUG_ON(!list_empty(&inst->alg.cra_users)); 128 130 } 129 131 130 - static void crypto_remove_spawns(struct crypto_alg *alg, 131 - struct list_head *list, 132 - struct crypto_alg *nalg) 132 + void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, 133 + struct crypto_alg *nalg) 133 134 { 134 135 u32 new_type = (nalg ?: alg)->cra_flags; 135 136 struct crypto_spawn *spawn, *n; ··· 174 177 crypto_remove_spawn(spawn, list); 175 178 } 176 179 } 180 + EXPORT_SYMBOL_GPL(crypto_remove_spawns); 177 181 178 182 static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg) 179 183 { ··· 319 321 } 320 322 EXPORT_SYMBOL_GPL(crypto_alg_tested); 321 323 322 - static void crypto_remove_final(struct list_head *list) 324 + void crypto_remove_final(struct list_head *list) 323 325 { 324 326 struct crypto_alg *alg; 325 327 struct crypto_alg *n; ··· 329 331 crypto_alg_put(alg); 330 332 } 331 333 } 334 + EXPORT_SYMBOL_GPL(crypto_remove_final); 332 335 333 336 static void crypto_wait_for_test(struct crypto_larval *larval) 334 337 { ··· 492 493 goto err; 493 494 494 495 inst->alg.cra_module = tmpl->module; 496 + inst->alg.cra_flags |= CRYPTO_ALG_INSTANCE; 495 497 496 498 down_write(&crypto_alg_sem); 497 499
+25
crypto/blkcipher.c
··· 24 24 #include <linux/seq_file.h> 25 25 #include <linux/slab.h> 26 26 #include <linux/string.h> 27 + #include <linux/cryptouser.h> 28 + #include <net/netlink.h> 27 29 28 30 #include "internal.h" 29 31 ··· 494 492 return crypto_init_blkcipher_ops_async(tfm); 495 493 } 496 494 495 + static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) 496 + { 497 + struct crypto_report_blkcipher rblkcipher; 498 + 499 + snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "blkcipher"); 500 + snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s", 501 + alg->cra_blkcipher.geniv ?: "<default>"); 502 + 503 + rblkcipher.blocksize = alg->cra_blocksize; 504 + rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize; 505 + rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize; 506 + rblkcipher.ivsize = alg->cra_blkcipher.ivsize; 507 + 508 + NLA_PUT(skb, CRYPTOCFGA_REPORT_BLKCIPHER, 509 + sizeof(struct crypto_report_blkcipher), &rblkcipher); 510 + 511 + return 0; 512 + 513 + nla_put_failure: 514 + return -EMSGSIZE; 515 + } 516 + 497 517 static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) 498 518 __attribute__ ((unused)); 499 519 static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) ··· 535 511 #ifdef CONFIG_PROC_FS 536 512 .show = crypto_blkcipher_show, 537 513 #endif 514 + .report = crypto_blkcipher_report, 538 515 }; 539 516 EXPORT_SYMBOL_GPL(crypto_blkcipher_type); 540 517
+9 -89
crypto/blowfish.c crypto/blowfish_common.c
··· 1 1 /* 2 2 * Cryptographic API. 3 3 * 4 + * Common Blowfish algorithm parts shared between the c and assembler 5 + * implementations. 6 + * 4 7 * Blowfish Cipher Algorithm, by Bruce Schneier. 5 8 * http://www.counterpane.com/blowfish.html 6 9 * ··· 25 22 #include <asm/byteorder.h> 26 23 #include <linux/crypto.h> 27 24 #include <linux/types.h> 28 - 29 - #define BF_BLOCK_SIZE 8 30 - #define BF_MIN_KEY_SIZE 4 31 - #define BF_MAX_KEY_SIZE 56 32 - 33 - struct bf_ctx { 34 - u32 p[18]; 35 - u32 s[1024]; 36 - }; 25 + #include <crypto/blowfish.h> 37 26 38 27 static const u32 bf_pbox[16 + 2] = { 39 28 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, ··· 304 309 #define GET32_0(x) (((x) >> (24)) & (0xff)) 305 310 306 311 #define bf_F(x) (((S[GET32_0(x)] + S[256 + GET32_1(x)]) ^ \ 307 - S[512 + GET32_2(x)]) + S[768 + GET32_3(x)]) 312 + S[512 + GET32_2(x)]) + S[768 + GET32_3(x)]) 308 313 309 - #define ROUND(a, b, n) b ^= P[n]; a ^= bf_F (b) 314 + #define ROUND(a, b, n) ({ b ^= P[n]; a ^= bf_F(b); }) 310 315 311 316 /* 312 317 * The blowfish encipher, processes 64-bit blocks. ··· 343 348 dst[1] = yl; 344 349 } 345 350 346 - static void bf_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 347 - { 348 - const __be32 *in_blk = (const __be32 *)src; 349 - __be32 *const out_blk = (__be32 *)dst; 350 - u32 in32[2], out32[2]; 351 - 352 - in32[0] = be32_to_cpu(in_blk[0]); 353 - in32[1] = be32_to_cpu(in_blk[1]); 354 - encrypt_block(crypto_tfm_ctx(tfm), out32, in32); 355 - out_blk[0] = cpu_to_be32(out32[0]); 356 - out_blk[1] = cpu_to_be32(out32[1]); 357 - } 358 - 359 - static void bf_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 360 - { 361 - struct bf_ctx *ctx = crypto_tfm_ctx(tfm); 362 - const __be32 *in_blk = (const __be32 *)src; 363 - __be32 *const out_blk = (__be32 *)dst; 364 - const u32 *P = ctx->p; 365 - const u32 *S = ctx->s; 366 - u32 yl = be32_to_cpu(in_blk[0]); 367 - u32 yr = be32_to_cpu(in_blk[1]); 368 - 369 - ROUND(yr, yl, 17); 370 - ROUND(yl, yr, 16); 371 - ROUND(yr, yl, 15); 372 - ROUND(yl, yr, 14); 373 - ROUND(yr, yl, 13); 374 - ROUND(yl, yr, 12); 375 - ROUND(yr, yl, 11); 376 - ROUND(yl, yr, 10); 377 - ROUND(yr, yl, 9); 378 - ROUND(yl, yr, 8); 379 - ROUND(yr, yl, 7); 380 - ROUND(yl, yr, 6); 381 - ROUND(yr, yl, 5); 382 - ROUND(yl, yr, 4); 383 - ROUND(yr, yl, 3); 384 - ROUND(yl, yr, 2); 385 - 386 - yl ^= P[1]; 387 - yr ^= P[0]; 388 - 389 - out_blk[0] = cpu_to_be32(yr); 390 - out_blk[1] = cpu_to_be32(yl); 391 - } 392 - 393 351 /* 394 352 * Calculates the blowfish S and P boxes for encryption and decryption. 395 353 */ 396 - static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) 354 + int blowfish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) 397 355 { 398 356 struct bf_ctx *ctx = crypto_tfm_ctx(tfm); 399 357 u32 *P = ctx->p; ··· 396 448 /* Bruce says not to bother with the weak key check. */ 397 449 return 0; 398 450 } 399 - 400 - static struct crypto_alg alg = { 401 - .cra_name = "blowfish", 402 - .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 403 - .cra_blocksize = BF_BLOCK_SIZE, 404 - .cra_ctxsize = sizeof(struct bf_ctx), 405 - .cra_alignmask = 3, 406 - .cra_module = THIS_MODULE, 407 - .cra_list = LIST_HEAD_INIT(alg.cra_list), 408 - .cra_u = { .cipher = { 409 - .cia_min_keysize = BF_MIN_KEY_SIZE, 410 - .cia_max_keysize = BF_MAX_KEY_SIZE, 411 - .cia_setkey = bf_setkey, 412 - .cia_encrypt = bf_encrypt, 413 - .cia_decrypt = bf_decrypt } } 414 - }; 415 - 416 - static int __init blowfish_mod_init(void) 417 - { 418 - return crypto_register_alg(&alg); 419 - } 420 - 421 - static void __exit blowfish_mod_fini(void) 422 - { 423 - crypto_unregister_alg(&alg); 424 - } 425 - 426 - module_init(blowfish_mod_init); 427 - module_exit(blowfish_mod_fini); 451 + EXPORT_SYMBOL_GPL(blowfish_setkey); 428 452 429 453 MODULE_LICENSE("GPL"); 430 - MODULE_DESCRIPTION("Blowfish Cipher Algorithm"); 454 + MODULE_DESCRIPTION("Blowfish Cipher common functions");
+142
crypto/blowfish_generic.c
··· 1 + /* 2 + * Cryptographic API. 3 + * 4 + * Blowfish Cipher Algorithm, by Bruce Schneier. 5 + * http://www.counterpane.com/blowfish.html 6 + * 7 + * Adapted from Kerneli implementation. 8 + * 9 + * Copyright (c) Herbert Valerio Riedel <hvr@hvrlab.org> 10 + * Copyright (c) Kyle McMartin <kyle@debian.org> 11 + * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> 12 + * 13 + * This program is free software; you can redistribute it and/or modify 14 + * it under the terms of the GNU General Public License as published by 15 + * the Free Software Foundation; either version 2 of the License, or 16 + * (at your option) any later version. 17 + * 18 + */ 19 + #include <linux/init.h> 20 + #include <linux/module.h> 21 + #include <linux/mm.h> 22 + #include <asm/byteorder.h> 23 + #include <linux/crypto.h> 24 + #include <linux/types.h> 25 + #include <crypto/blowfish.h> 26 + 27 + /* 28 + * Round loop unrolling macros, S is a pointer to a S-Box array 29 + * organized in 4 unsigned longs at a row. 30 + */ 31 + #define GET32_3(x) (((x) & 0xff)) 32 + #define GET32_2(x) (((x) >> (8)) & (0xff)) 33 + #define GET32_1(x) (((x) >> (16)) & (0xff)) 34 + #define GET32_0(x) (((x) >> (24)) & (0xff)) 35 + 36 + #define bf_F(x) (((S[GET32_0(x)] + S[256 + GET32_1(x)]) ^ \ 37 + S[512 + GET32_2(x)]) + S[768 + GET32_3(x)]) 38 + 39 + #define ROUND(a, b, n) ({ b ^= P[n]; a ^= bf_F(b); }) 40 + 41 + static void bf_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 42 + { 43 + struct bf_ctx *ctx = crypto_tfm_ctx(tfm); 44 + const __be32 *in_blk = (const __be32 *)src; 45 + __be32 *const out_blk = (__be32 *)dst; 46 + const u32 *P = ctx->p; 47 + const u32 *S = ctx->s; 48 + u32 yl = be32_to_cpu(in_blk[0]); 49 + u32 yr = be32_to_cpu(in_blk[1]); 50 + 51 + ROUND(yr, yl, 0); 52 + ROUND(yl, yr, 1); 53 + ROUND(yr, yl, 2); 54 + ROUND(yl, yr, 3); 55 + ROUND(yr, yl, 4); 56 + ROUND(yl, yr, 5); 57 + ROUND(yr, yl, 6); 58 + ROUND(yl, yr, 7); 59 + ROUND(yr, yl, 8); 60 + ROUND(yl, yr, 9); 61 + ROUND(yr, yl, 10); 62 + ROUND(yl, yr, 11); 63 + ROUND(yr, yl, 12); 64 + ROUND(yl, yr, 13); 65 + ROUND(yr, yl, 14); 66 + ROUND(yl, yr, 15); 67 + 68 + yl ^= P[16]; 69 + yr ^= P[17]; 70 + 71 + out_blk[0] = cpu_to_be32(yr); 72 + out_blk[1] = cpu_to_be32(yl); 73 + } 74 + 75 + static void bf_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 76 + { 77 + struct bf_ctx *ctx = crypto_tfm_ctx(tfm); 78 + const __be32 *in_blk = (const __be32 *)src; 79 + __be32 *const out_blk = (__be32 *)dst; 80 + const u32 *P = ctx->p; 81 + const u32 *S = ctx->s; 82 + u32 yl = be32_to_cpu(in_blk[0]); 83 + u32 yr = be32_to_cpu(in_blk[1]); 84 + 85 + ROUND(yr, yl, 17); 86 + ROUND(yl, yr, 16); 87 + ROUND(yr, yl, 15); 88 + ROUND(yl, yr, 14); 89 + ROUND(yr, yl, 13); 90 + ROUND(yl, yr, 12); 91 + ROUND(yr, yl, 11); 92 + ROUND(yl, yr, 10); 93 + ROUND(yr, yl, 9); 94 + ROUND(yl, yr, 8); 95 + ROUND(yr, yl, 7); 96 + ROUND(yl, yr, 6); 97 + ROUND(yr, yl, 5); 98 + ROUND(yl, yr, 4); 99 + ROUND(yr, yl, 3); 100 + ROUND(yl, yr, 2); 101 + 102 + yl ^= P[1]; 103 + yr ^= P[0]; 104 + 105 + out_blk[0] = cpu_to_be32(yr); 106 + out_blk[1] = cpu_to_be32(yl); 107 + } 108 + 109 + static struct crypto_alg alg = { 110 + .cra_name = "blowfish", 111 + .cra_driver_name = "blowfish-generic", 112 + .cra_priority = 100, 113 + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 114 + .cra_blocksize = BF_BLOCK_SIZE, 115 + .cra_ctxsize = sizeof(struct bf_ctx), 116 + .cra_alignmask = 3, 117 + .cra_module = THIS_MODULE, 118 + .cra_list = LIST_HEAD_INIT(alg.cra_list), 119 + .cra_u = { .cipher = { 120 + .cia_min_keysize = BF_MIN_KEY_SIZE, 121 + .cia_max_keysize = BF_MAX_KEY_SIZE, 122 + .cia_setkey = blowfish_setkey, 123 + .cia_encrypt = bf_encrypt, 124 + .cia_decrypt = bf_decrypt } } 125 + }; 126 + 127 + static int __init blowfish_mod_init(void) 128 + { 129 + return crypto_register_alg(&alg); 130 + } 131 + 132 + static void __exit blowfish_mod_fini(void) 133 + { 134 + crypto_unregister_alg(&alg); 135 + } 136 + 137 + module_init(blowfish_mod_init); 138 + module_exit(blowfish_mod_fini); 139 + 140 + MODULE_LICENSE("GPL"); 141 + MODULE_DESCRIPTION("Blowfish Cipher Algorithm"); 142 + MODULE_ALIAS("blowfish");
+1 -1
crypto/cryptd.c
··· 945 945 crypto_unregister_template(&cryptd_tmpl); 946 946 } 947 947 948 - module_init(cryptd_init); 948 + subsys_initcall(cryptd_init); 949 949 module_exit(cryptd_exit); 950 950 951 951 MODULE_LICENSE("GPL");
+438
crypto/crypto_user.c
··· 1 + /* 2 + * Crypto user configuration API. 3 + * 4 + * Copyright (C) 2011 secunet Security Networks AG 5 + * Copyright (C) 2011 Steffen Klassert <steffen.klassert@secunet.com> 6 + * 7 + * This program is free software; you can redistribute it and/or modify it 8 + * under the terms and conditions of the GNU General Public License, 9 + * version 2, as published by the Free Software Foundation. 10 + * 11 + * This program is distributed in the hope it will be useful, but WITHOUT 12 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 + * more details. 15 + * 16 + * You should have received a copy of the GNU General Public License along with 17 + * this program; if not, write to the Free Software Foundation, Inc., 18 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 19 + */ 20 + 21 + #include <linux/module.h> 22 + #include <linux/crypto.h> 23 + #include <linux/cryptouser.h> 24 + #include <net/netlink.h> 25 + #include <linux/security.h> 26 + #include <net/net_namespace.h> 27 + #include "internal.h" 28 + 29 + DEFINE_MUTEX(crypto_cfg_mutex); 30 + 31 + /* The crypto netlink socket */ 32 + static struct sock *crypto_nlsk; 33 + 34 + struct crypto_dump_info { 35 + struct sk_buff *in_skb; 36 + struct sk_buff *out_skb; 37 + u32 nlmsg_seq; 38 + u16 nlmsg_flags; 39 + }; 40 + 41 + static struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact) 42 + { 43 + struct crypto_alg *q, *alg = NULL; 44 + 45 + down_read(&crypto_alg_sem); 46 + 47 + if (list_empty(&crypto_alg_list)) 48 + return NULL; 49 + 50 + list_for_each_entry(q, &crypto_alg_list, cra_list) { 51 + int match = 0; 52 + 53 + if ((q->cra_flags ^ p->cru_type) & p->cru_mask) 54 + continue; 55 + 56 + if (strlen(p->cru_driver_name)) 57 + match = !strcmp(q->cra_driver_name, 58 + p->cru_driver_name); 59 + else if (!exact) 60 + match = !strcmp(q->cra_name, p->cru_name); 61 + 62 + if (match) { 63 + alg = q; 64 + break; 65 + } 66 + } 67 + 68 + up_read(&crypto_alg_sem); 69 + 70 + return alg; 71 + } 72 + 73 + static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg) 74 + { 75 + struct crypto_report_cipher rcipher; 76 + 77 + snprintf(rcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "cipher"); 78 + 79 + rcipher.blocksize = alg->cra_blocksize; 80 + rcipher.min_keysize = alg->cra_cipher.cia_min_keysize; 81 + rcipher.max_keysize = alg->cra_cipher.cia_max_keysize; 82 + 83 + NLA_PUT(skb, CRYPTOCFGA_REPORT_CIPHER, 84 + sizeof(struct crypto_report_cipher), &rcipher); 85 + 86 + return 0; 87 + 88 + nla_put_failure: 89 + return -EMSGSIZE; 90 + } 91 + 92 + static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg) 93 + { 94 + struct crypto_report_comp rcomp; 95 + 96 + snprintf(rcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "compression"); 97 + 98 + NLA_PUT(skb, CRYPTOCFGA_REPORT_COMPRESS, 99 + sizeof(struct crypto_report_comp), &rcomp); 100 + 101 + return 0; 102 + 103 + nla_put_failure: 104 + return -EMSGSIZE; 105 + } 106 + 107 + static int crypto_report_one(struct crypto_alg *alg, 108 + struct crypto_user_alg *ualg, struct sk_buff *skb) 109 + { 110 + memcpy(&ualg->cru_name, &alg->cra_name, sizeof(ualg->cru_name)); 111 + memcpy(&ualg->cru_driver_name, &alg->cra_driver_name, 112 + sizeof(ualg->cru_driver_name)); 113 + memcpy(&ualg->cru_module_name, module_name(alg->cra_module), 114 + CRYPTO_MAX_ALG_NAME); 115 + 116 + ualg->cru_flags = alg->cra_flags; 117 + ualg->cru_refcnt = atomic_read(&alg->cra_refcnt); 118 + 119 + NLA_PUT_U32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority); 120 + 121 + if (alg->cra_flags & CRYPTO_ALG_LARVAL) { 122 + struct crypto_report_larval rl; 123 + 124 + snprintf(rl.type, CRYPTO_MAX_ALG_NAME, "%s", "larval"); 125 + 126 + NLA_PUT(skb, CRYPTOCFGA_REPORT_LARVAL, 127 + sizeof(struct crypto_report_larval), &rl); 128 + 129 + goto out; 130 + } 131 + 132 + if (alg->cra_type && alg->cra_type->report) { 133 + if (alg->cra_type->report(skb, alg)) 134 + goto nla_put_failure; 135 + 136 + goto out; 137 + } 138 + 139 + switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) { 140 + case CRYPTO_ALG_TYPE_CIPHER: 141 + if (crypto_report_cipher(skb, alg)) 142 + goto nla_put_failure; 143 + 144 + break; 145 + case CRYPTO_ALG_TYPE_COMPRESS: 146 + if (crypto_report_comp(skb, alg)) 147 + goto nla_put_failure; 148 + 149 + break; 150 + } 151 + 152 + out: 153 + return 0; 154 + 155 + nla_put_failure: 156 + return -EMSGSIZE; 157 + } 158 + 159 + static int crypto_report_alg(struct crypto_alg *alg, 160 + struct crypto_dump_info *info) 161 + { 162 + struct sk_buff *in_skb = info->in_skb; 163 + struct sk_buff *skb = info->out_skb; 164 + struct nlmsghdr *nlh; 165 + struct crypto_user_alg *ualg; 166 + int err = 0; 167 + 168 + nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, info->nlmsg_seq, 169 + CRYPTO_MSG_GETALG, sizeof(*ualg), info->nlmsg_flags); 170 + if (!nlh) { 171 + err = -EMSGSIZE; 172 + goto out; 173 + } 174 + 175 + ualg = nlmsg_data(nlh); 176 + 177 + err = crypto_report_one(alg, ualg, skb); 178 + if (err) { 179 + nlmsg_cancel(skb, nlh); 180 + goto out; 181 + } 182 + 183 + nlmsg_end(skb, nlh); 184 + 185 + out: 186 + return err; 187 + } 188 + 189 + static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, 190 + struct nlattr **attrs) 191 + { 192 + struct crypto_user_alg *p = nlmsg_data(in_nlh); 193 + struct crypto_alg *alg; 194 + struct sk_buff *skb; 195 + struct crypto_dump_info info; 196 + int err; 197 + 198 + if (!p->cru_driver_name) 199 + return -EINVAL; 200 + 201 + alg = crypto_alg_match(p, 1); 202 + if (!alg) 203 + return -ENOENT; 204 + 205 + skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); 206 + if (!skb) 207 + return -ENOMEM; 208 + 209 + info.in_skb = in_skb; 210 + info.out_skb = skb; 211 + info.nlmsg_seq = in_nlh->nlmsg_seq; 212 + info.nlmsg_flags = 0; 213 + 214 + err = crypto_report_alg(alg, &info); 215 + if (err) 216 + return err; 217 + 218 + return nlmsg_unicast(crypto_nlsk, skb, NETLINK_CB(in_skb).pid); 219 + } 220 + 221 + static int crypto_dump_report(struct sk_buff *skb, struct netlink_callback *cb) 222 + { 223 + struct crypto_alg *alg; 224 + struct crypto_dump_info info; 225 + int err; 226 + 227 + if (cb->args[0]) 228 + goto out; 229 + 230 + cb->args[0] = 1; 231 + 232 + info.in_skb = cb->skb; 233 + info.out_skb = skb; 234 + info.nlmsg_seq = cb->nlh->nlmsg_seq; 235 + info.nlmsg_flags = NLM_F_MULTI; 236 + 237 + list_for_each_entry(alg, &crypto_alg_list, cra_list) { 238 + err = crypto_report_alg(alg, &info); 239 + if (err) 240 + goto out_err; 241 + } 242 + 243 + out: 244 + return skb->len; 245 + out_err: 246 + return err; 247 + } 248 + 249 + static int crypto_dump_report_done(struct netlink_callback *cb) 250 + { 251 + return 0; 252 + } 253 + 254 + static int crypto_update_alg(struct sk_buff *skb, struct nlmsghdr *nlh, 255 + struct nlattr **attrs) 256 + { 257 + struct crypto_alg *alg; 258 + struct crypto_user_alg *p = nlmsg_data(nlh); 259 + struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL]; 260 + LIST_HEAD(list); 261 + 262 + if (priority && !strlen(p->cru_driver_name)) 263 + return -EINVAL; 264 + 265 + alg = crypto_alg_match(p, 1); 266 + if (!alg) 267 + return -ENOENT; 268 + 269 + down_write(&crypto_alg_sem); 270 + 271 + crypto_remove_spawns(alg, &list, NULL); 272 + 273 + if (priority) 274 + alg->cra_priority = nla_get_u32(priority); 275 + 276 + up_write(&crypto_alg_sem); 277 + 278 + crypto_remove_final(&list); 279 + 280 + return 0; 281 + } 282 + 283 + static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh, 284 + struct nlattr **attrs) 285 + { 286 + struct crypto_alg *alg; 287 + struct crypto_user_alg *p = nlmsg_data(nlh); 288 + 289 + alg = crypto_alg_match(p, 1); 290 + if (!alg) 291 + return -ENOENT; 292 + 293 + /* We can not unregister core algorithms such as aes-generic. 294 + * We would loose the reference in the crypto_alg_list to this algorithm 295 + * if we try to unregister. Unregistering such an algorithm without 296 + * removing the module is not possible, so we restrict to crypto 297 + * instances that are build from templates. */ 298 + if (!(alg->cra_flags & CRYPTO_ALG_INSTANCE)) 299 + return -EINVAL; 300 + 301 + if (atomic_read(&alg->cra_refcnt) != 1) 302 + return -EBUSY; 303 + 304 + return crypto_unregister_alg(alg); 305 + } 306 + 307 + static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh, 308 + struct nlattr **attrs) 309 + { 310 + int exact; 311 + const char *name; 312 + struct crypto_alg *alg; 313 + struct crypto_user_alg *p = nlmsg_data(nlh); 314 + struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL]; 315 + 316 + if (strlen(p->cru_driver_name)) 317 + exact = 1; 318 + 319 + if (priority && !exact) 320 + return -EINVAL; 321 + 322 + alg = crypto_alg_match(p, exact); 323 + if (alg) 324 + return -EEXIST; 325 + 326 + if (strlen(p->cru_driver_name)) 327 + name = p->cru_driver_name; 328 + else 329 + name = p->cru_name; 330 + 331 + alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask); 332 + if (IS_ERR(alg)) 333 + return PTR_ERR(alg); 334 + 335 + down_write(&crypto_alg_sem); 336 + 337 + if (priority) 338 + alg->cra_priority = nla_get_u32(priority); 339 + 340 + up_write(&crypto_alg_sem); 341 + 342 + crypto_mod_put(alg); 343 + 344 + return 0; 345 + } 346 + 347 + #define MSGSIZE(type) sizeof(struct type) 348 + 349 + static const int crypto_msg_min[CRYPTO_NR_MSGTYPES] = { 350 + [CRYPTO_MSG_NEWALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg), 351 + [CRYPTO_MSG_DELALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg), 352 + [CRYPTO_MSG_UPDATEALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg), 353 + [CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg), 354 + }; 355 + 356 + static const struct nla_policy crypto_policy[CRYPTOCFGA_MAX+1] = { 357 + [CRYPTOCFGA_PRIORITY_VAL] = { .type = NLA_U32}, 358 + }; 359 + 360 + #undef MSGSIZE 361 + 362 + static struct crypto_link { 363 + int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **); 364 + int (*dump)(struct sk_buff *, struct netlink_callback *); 365 + int (*done)(struct netlink_callback *); 366 + } crypto_dispatch[CRYPTO_NR_MSGTYPES] = { 367 + [CRYPTO_MSG_NEWALG - CRYPTO_MSG_BASE] = { .doit = crypto_add_alg}, 368 + [CRYPTO_MSG_DELALG - CRYPTO_MSG_BASE] = { .doit = crypto_del_alg}, 369 + [CRYPTO_MSG_UPDATEALG - CRYPTO_MSG_BASE] = { .doit = crypto_update_alg}, 370 + [CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE] = { .doit = crypto_report, 371 + .dump = crypto_dump_report, 372 + .done = crypto_dump_report_done}, 373 + }; 374 + 375 + static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 376 + { 377 + struct nlattr *attrs[CRYPTOCFGA_MAX+1]; 378 + struct crypto_link *link; 379 + int type, err; 380 + 381 + type = nlh->nlmsg_type; 382 + if (type > CRYPTO_MSG_MAX) 383 + return -EINVAL; 384 + 385 + type -= CRYPTO_MSG_BASE; 386 + link = &crypto_dispatch[type]; 387 + 388 + if (security_netlink_recv(skb, CAP_NET_ADMIN)) 389 + return -EPERM; 390 + 391 + if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) && 392 + (nlh->nlmsg_flags & NLM_F_DUMP))) { 393 + if (link->dump == NULL) 394 + return -EINVAL; 395 + 396 + return netlink_dump_start(crypto_nlsk, skb, nlh, 397 + link->dump, link->done, 0); 398 + } 399 + 400 + err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX, 401 + crypto_policy); 402 + if (err < 0) 403 + return err; 404 + 405 + if (link->doit == NULL) 406 + return -EINVAL; 407 + 408 + return link->doit(skb, nlh, attrs); 409 + } 410 + 411 + static void crypto_netlink_rcv(struct sk_buff *skb) 412 + { 413 + mutex_lock(&crypto_cfg_mutex); 414 + netlink_rcv_skb(skb, &crypto_user_rcv_msg); 415 + mutex_unlock(&crypto_cfg_mutex); 416 + } 417 + 418 + static int __init crypto_user_init(void) 419 + { 420 + crypto_nlsk = netlink_kernel_create(&init_net, NETLINK_CRYPTO, 421 + 0, crypto_netlink_rcv, 422 + NULL, THIS_MODULE); 423 + if (!crypto_nlsk) 424 + return -ENOMEM; 425 + 426 + return 0; 427 + } 428 + 429 + static void __exit crypto_user_exit(void) 430 + { 431 + netlink_kernel_release(crypto_nlsk); 432 + } 433 + 434 + module_init(crypto_user_init); 435 + module_exit(crypto_user_exit); 436 + MODULE_LICENSE("GPL"); 437 + MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>"); 438 + MODULE_DESCRIPTION("Crypto userspace configuration API");
+3
crypto/internal.h
··· 86 86 void crypto_larval_error(const char *name, u32 type, u32 mask); 87 87 void crypto_alg_tested(const char *name, int err); 88 88 89 + void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, 90 + struct crypto_alg *nalg); 91 + void crypto_remove_final(struct list_head *list); 89 92 void crypto_shoot_alg(struct crypto_alg *alg); 90 93 struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, 91 94 u32 mask);
+18
crypto/pcompress.c
··· 24 24 #include <linux/module.h> 25 25 #include <linux/seq_file.h> 26 26 #include <linux/string.h> 27 + #include <linux/cryptouser.h> 28 + #include <net/netlink.h> 27 29 28 30 #include <crypto/compress.h> 29 31 #include <crypto/internal/compress.h> ··· 48 46 return 0; 49 47 } 50 48 49 + static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg) 50 + { 51 + struct crypto_report_comp rpcomp; 52 + 53 + snprintf(rpcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "pcomp"); 54 + 55 + NLA_PUT(skb, CRYPTOCFGA_REPORT_COMPRESS, 56 + sizeof(struct crypto_report_comp), &rpcomp); 57 + 58 + return 0; 59 + 60 + nla_put_failure: 61 + return -EMSGSIZE; 62 + } 63 + 51 64 static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg) 52 65 __attribute__ ((unused)); 53 66 static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg) ··· 77 60 #ifdef CONFIG_PROC_FS 78 61 .show = crypto_pcomp_show, 79 62 #endif 63 + .report = crypto_pcomp_report, 80 64 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 81 65 .maskset = CRYPTO_ALG_TYPE_MASK, 82 66 .type = CRYPTO_ALG_TYPE_PCOMPRESS,
+20
crypto/rng.c
··· 21 21 #include <linux/seq_file.h> 22 22 #include <linux/slab.h> 23 23 #include <linux/string.h> 24 + #include <linux/cryptouser.h> 25 + #include <net/netlink.h> 24 26 25 27 static DEFINE_MUTEX(crypto_default_rng_lock); 26 28 struct crypto_rng *crypto_default_rng; ··· 60 58 return 0; 61 59 } 62 60 61 + static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg) 62 + { 63 + struct crypto_report_rng rrng; 64 + 65 + snprintf(rrng.type, CRYPTO_MAX_ALG_NAME, "%s", "rng"); 66 + 67 + rrng.seedsize = alg->cra_rng.seedsize; 68 + 69 + NLA_PUT(skb, CRYPTOCFGA_REPORT_RNG, 70 + sizeof(struct crypto_report_rng), &rrng); 71 + 72 + return 0; 73 + 74 + nla_put_failure: 75 + return -EMSGSIZE; 76 + } 77 + 63 78 static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg) 64 79 __attribute__ ((unused)); 65 80 static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg) ··· 97 78 #ifdef CONFIG_PROC_FS 98 79 .show = crypto_rng_show, 99 80 #endif 81 + .report = crypto_rng_report, 100 82 }; 101 83 EXPORT_SYMBOL_GPL(crypto_rng_type); 102 84
+5 -4
crypto/sha1_generic.c
··· 36 36 return 0; 37 37 } 38 38 39 - static int sha1_update(struct shash_desc *desc, const u8 *data, 39 + int crypto_sha1_update(struct shash_desc *desc, const u8 *data, 40 40 unsigned int len) 41 41 { 42 42 struct sha1_state *sctx = shash_desc_ctx(desc); ··· 71 71 72 72 return 0; 73 73 } 74 + EXPORT_SYMBOL(crypto_sha1_update); 74 75 75 76 76 77 /* Add padding and return the message digest. */ ··· 88 87 /* Pad out to 56 mod 64 */ 89 88 index = sctx->count & 0x3f; 90 89 padlen = (index < 56) ? (56 - index) : ((64+56) - index); 91 - sha1_update(desc, padding, padlen); 90 + crypto_sha1_update(desc, padding, padlen); 92 91 93 92 /* Append length */ 94 - sha1_update(desc, (const u8 *)&bits, sizeof(bits)); 93 + crypto_sha1_update(desc, (const u8 *)&bits, sizeof(bits)); 95 94 96 95 /* Store state in digest */ 97 96 for (i = 0; i < 5; i++) ··· 122 121 static struct shash_alg alg = { 123 122 .digestsize = SHA1_DIGEST_SIZE, 124 123 .init = sha1_init, 125 - .update = sha1_update, 124 + .update = crypto_sha1_update, 126 125 .final = sha1_final, 127 126 .export = sha1_export, 128 127 .import = sha1_import,
+21
crypto/shash.c
··· 17 17 #include <linux/module.h> 18 18 #include <linux/slab.h> 19 19 #include <linux/seq_file.h> 20 + #include <linux/cryptouser.h> 21 + #include <net/netlink.h> 20 22 21 23 #include "internal.h" 22 24 ··· 524 522 return alg->cra_ctxsize; 525 523 } 526 524 525 + static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg) 526 + { 527 + struct crypto_report_hash rhash; 528 + struct shash_alg *salg = __crypto_shash_alg(alg); 529 + 530 + snprintf(rhash.type, CRYPTO_MAX_ALG_NAME, "%s", "shash"); 531 + rhash.blocksize = alg->cra_blocksize; 532 + rhash.digestsize = salg->digestsize; 533 + 534 + NLA_PUT(skb, CRYPTOCFGA_REPORT_HASH, 535 + sizeof(struct crypto_report_hash), &rhash); 536 + 537 + return 0; 538 + 539 + nla_put_failure: 540 + return -EMSGSIZE; 541 + } 542 + 527 543 static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg) 528 544 __attribute__ ((unused)); 529 545 static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg) ··· 561 541 #ifdef CONFIG_PROC_FS 562 542 .show = crypto_shash_show, 563 543 #endif 544 + .report = crypto_shash_report, 564 545 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 565 546 .maskset = CRYPTO_ALG_TYPE_MASK, 566 547 .type = CRYPTO_ALG_TYPE_SHASH,
+10
crypto/tcrypt.c
··· 782 782 case 7: 783 783 ret += tcrypt_test("ecb(blowfish)"); 784 784 ret += tcrypt_test("cbc(blowfish)"); 785 + ret += tcrypt_test("ctr(blowfish)"); 785 786 break; 786 787 787 788 case 8: 788 789 ret += tcrypt_test("ecb(twofish)"); 789 790 ret += tcrypt_test("cbc(twofish)"); 791 + ret += tcrypt_test("ctr(twofish)"); 790 792 break; 791 793 792 794 case 9: ··· 1041 1039 speed_template_16_24_32); 1042 1040 test_cipher_speed("cbc(twofish)", DECRYPT, sec, NULL, 0, 1043 1041 speed_template_16_24_32); 1042 + test_cipher_speed("ctr(twofish)", ENCRYPT, sec, NULL, 0, 1043 + speed_template_16_24_32); 1044 + test_cipher_speed("ctr(twofish)", DECRYPT, sec, NULL, 0, 1045 + speed_template_16_24_32); 1044 1046 break; 1045 1047 1046 1048 case 203: ··· 1055 1049 test_cipher_speed("cbc(blowfish)", ENCRYPT, sec, NULL, 0, 1056 1050 speed_template_8_32); 1057 1051 test_cipher_speed("cbc(blowfish)", DECRYPT, sec, NULL, 0, 1052 + speed_template_8_32); 1053 + test_cipher_speed("ctr(blowfish)", ENCRYPT, sec, NULL, 0, 1054 + speed_template_8_32); 1055 + test_cipher_speed("ctr(blowfish)", DECRYPT, sec, NULL, 0, 1058 1056 speed_template_8_32); 1059 1057 break; 1060 1058
+30
crypto/testmgr.c
··· 1756 1756 } 1757 1757 } 1758 1758 }, { 1759 + .alg = "ctr(blowfish)", 1760 + .test = alg_test_skcipher, 1761 + .suite = { 1762 + .cipher = { 1763 + .enc = { 1764 + .vecs = bf_ctr_enc_tv_template, 1765 + .count = BF_CTR_ENC_TEST_VECTORS 1766 + }, 1767 + .dec = { 1768 + .vecs = bf_ctr_dec_tv_template, 1769 + .count = BF_CTR_DEC_TEST_VECTORS 1770 + } 1771 + } 1772 + } 1773 + }, { 1774 + .alg = "ctr(twofish)", 1775 + .test = alg_test_skcipher, 1776 + .suite = { 1777 + .cipher = { 1778 + .enc = { 1779 + .vecs = tf_ctr_enc_tv_template, 1780 + .count = TF_CTR_ENC_TEST_VECTORS 1781 + }, 1782 + .dec = { 1783 + .vecs = tf_ctr_dec_tv_template, 1784 + .count = TF_CTR_DEC_TEST_VECTORS 1785 + } 1786 + } 1787 + } 1788 + }, { 1759 1789 .alg = "cts(cbc(aes))", 1760 1790 .test = alg_test_skcipher, 1761 1791 .suite = {
+390 -8
crypto/testmgr.h
··· 2391 2391 /* 2392 2392 * Blowfish test vectors. 2393 2393 */ 2394 - #define BF_ENC_TEST_VECTORS 6 2395 - #define BF_DEC_TEST_VECTORS 6 2396 - #define BF_CBC_ENC_TEST_VECTORS 1 2397 - #define BF_CBC_DEC_TEST_VECTORS 1 2394 + #define BF_ENC_TEST_VECTORS 7 2395 + #define BF_DEC_TEST_VECTORS 7 2396 + #define BF_CBC_ENC_TEST_VECTORS 2 2397 + #define BF_CBC_DEC_TEST_VECTORS 2 2398 + #define BF_CTR_ENC_TEST_VECTORS 2 2399 + #define BF_CTR_DEC_TEST_VECTORS 2 2398 2400 2399 2401 static struct cipher_testvec bf_enc_tv_template[] = { 2400 2402 { /* DES test vectors from OpenSSL */ ··· 2450 2448 .ilen = 8, 2451 2449 .result = "\xc0\x45\x04\x01\x2e\x4e\x1f\x53", 2452 2450 .rlen = 8, 2451 + }, { /* Generated with Crypto++ */ 2452 + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" 2453 + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" 2454 + "\x27\x04\xE1\x27\x04\xE1\xBE\x9B" 2455 + "\x78\xBE\x9B\x78\x55\x32\x0F\x55", 2456 + .klen = 32, 2457 + .input = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" 2458 + "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" 2459 + "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" 2460 + "\xAC\x20\xB7\x4E\xE5\x59\xF0\x87" 2461 + "\x1E\x92\x29\xC0\x34\xCB\x62\xF9", 2462 + .ilen = 40, 2463 + .result = "\x96\x87\x3D\x0C\x7B\xFB\xBD\x1F" 2464 + "\xE3\xC1\x99\x6D\x39\xD4\xC2\x7D" 2465 + "\xD7\x87\xA1\xF2\xDF\x51\x71\x26" 2466 + "\xC2\xF4\x6D\xFF\xF6\xCD\x6B\x40" 2467 + "\xE1\xB3\xBF\xD4\x38\x2B\xC8\x3B", 2468 + .rlen = 40, 2453 2469 }, 2454 2470 }; 2455 2471 ··· 2523 2503 .ilen = 8, 2524 2504 .result = "\xfe\xdc\xba\x98\x76\x54\x32\x10", 2525 2505 .rlen = 8, 2506 + }, { /* Generated with Crypto++ */ 2507 + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" 2508 + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" 2509 + "\x27\x04\xE1\x27\x04\xE1\xBE\x9B" 2510 + "\x78\xBE\x9B\x78\x55\x32\x0F\x55", 2511 + .klen = 32, 2512 + .input = "\x96\x87\x3D\x0C\x7B\xFB\xBD\x1F" 2513 + "\xE3\xC1\x99\x6D\x39\xD4\xC2\x7D" 2514 + "\xD7\x87\xA1\xF2\xDF\x51\x71\x26" 2515 + "\xC2\xF4\x6D\xFF\xF6\xCD\x6B\x40" 2516 + "\xE1\xB3\xBF\xD4\x38\x2B\xC8\x3B", 2517 + .ilen = 40, 2518 + .result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" 2519 + "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" 2520 + "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" 2521 + "\xAC\x20\xB7\x4E\xE5\x59\xF0\x87" 2522 + "\x1E\x92\x29\xC0\x34\xCB\x62\xF9", 2523 + .rlen = 40, 2526 2524 }, 2527 2525 }; 2528 2526 ··· 2560 2522 "\x58\xde\xb9\xe7\x15\x46\x16\xd9" 2561 2523 "\x59\xf1\x65\x2b\xd5\xff\x92\xcc", 2562 2524 .rlen = 32, 2525 + }, { /* Generated with Crypto++ */ 2526 + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" 2527 + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" 2528 + "\x27\x04\xE1\x27\x04\xE1\xBE\x9B" 2529 + "\x78\xBE\x9B\x78\x55\x32\x0F\x55", 2530 + .klen = 32, 2531 + .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F", 2532 + .input = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" 2533 + "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" 2534 + "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" 2535 + "\xAC\x20\xB7\x4E\xE5\x59\xF0\x87" 2536 + "\x1E\x92\x29\xC0\x34\xCB\x62\xF9", 2537 + .ilen = 40, 2538 + .result = "\xB4\xFE\xA5\xBB\x3D\x2C\x27\x06" 2539 + "\x06\x2B\x3A\x92\xB2\xF5\x5E\x62" 2540 + "\x84\xCD\xF7\x66\x7E\x41\x6C\x8E" 2541 + "\x1B\xD9\x02\xB6\x48\xB0\x87\x25" 2542 + "\x01\x9C\x93\x63\x51\x60\x82\xD2", 2543 + .rlen = 40, 2563 2544 }, 2564 2545 }; 2565 2546 ··· 2598 2541 "\x68\x65\x20\x74\x69\x6d\x65\x20" 2599 2542 "\x66\x6f\x72\x20\x00\x00\x00\x00", 2600 2543 .rlen = 32, 2544 + }, { /* Generated with Crypto++ */ 2545 + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" 2546 + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" 2547 + "\x27\x04\xE1\x27\x04\xE1\xBE\x9B" 2548 + "\x78\xBE\x9B\x78\x55\x32\x0F\x55", 2549 + .klen = 32, 2550 + .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F", 2551 + .input = "\xB4\xFE\xA5\xBB\x3D\x2C\x27\x06" 2552 + "\x06\x2B\x3A\x92\xB2\xF5\x5E\x62" 2553 + "\x84\xCD\xF7\x66\x7E\x41\x6C\x8E" 2554 + "\x1B\xD9\x02\xB6\x48\xB0\x87\x25" 2555 + "\x01\x9C\x93\x63\x51\x60\x82\xD2", 2556 + .ilen = 40, 2557 + .result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" 2558 + "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" 2559 + "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" 2560 + "\xAC\x20\xB7\x4E\xE5\x59\xF0\x87" 2561 + "\x1E\x92\x29\xC0\x34\xCB\x62\xF9", 2562 + .rlen = 40, 2563 + }, 2564 + }; 2565 + 2566 + static struct cipher_testvec bf_ctr_enc_tv_template[] = { 2567 + { /* Generated with Crypto++ */ 2568 + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" 2569 + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" 2570 + "\x27\x04\xE1\x27\x04\xE1\xBE\x9B" 2571 + "\x78\xBE\x9B\x78\x55\x32\x0F\x55", 2572 + .klen = 32, 2573 + .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F", 2574 + .input = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" 2575 + "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" 2576 + "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" 2577 + "\xAC\x20\xB7\x4E\xE5\x59\xF0\x87" 2578 + "\x1E\x92\x29\xC0\x34\xCB\x62\xF9", 2579 + .ilen = 40, 2580 + .result = "\xC7\xA3\xDF\xB9\x05\xF4\x9E\x8D" 2581 + "\x9E\xDF\x38\x18\x83\x07\xEF\xC1" 2582 + "\x93\x3C\xAA\xAA\xFE\x06\x42\xCC" 2583 + "\x0D\x70\x86\x5A\x44\xAD\x85\x17" 2584 + "\xE4\x1F\x5E\xA5\x89\xAC\x32\xBC", 2585 + .rlen = 40, 2586 + }, { /* Generated with Crypto++ */ 2587 + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" 2588 + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" 2589 + "\x27\x04\xE1\x27\x04\xE1\xBE\x9B" 2590 + "\x78\xBE\x9B\x78\x55\x32\x0F\x55", 2591 + .klen = 32, 2592 + .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F", 2593 + .input = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" 2594 + "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" 2595 + "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" 2596 + "\xAC\x20\xB7\x4E\xE5\x59\xF0\x87" 2597 + "\x1E\x92\x29\xC0\x34\xCB\x62\xF9" 2598 + "\x6D\x04\x9B", 2599 + .ilen = 43, 2600 + .result = "\xC7\xA3\xDF\xB9\x05\xF4\x9E\x8D" 2601 + "\x9E\xDF\x38\x18\x83\x07\xEF\xC1" 2602 + "\x93\x3C\xAA\xAA\xFE\x06\x42\xCC" 2603 + "\x0D\x70\x86\x5A\x44\xAD\x85\x17" 2604 + "\xE4\x1F\x5E\xA5\x89\xAC\x32\xBC" 2605 + "\x3D\xA7\xE9", 2606 + .rlen = 43, 2607 + }, 2608 + }; 2609 + 2610 + static struct cipher_testvec bf_ctr_dec_tv_template[] = { 2611 + { /* Generated with Crypto++ */ 2612 + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" 2613 + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" 2614 + "\x27\x04\xE1\x27\x04\xE1\xBE\x9B" 2615 + "\x78\xBE\x9B\x78\x55\x32\x0F\x55", 2616 + .klen = 32, 2617 + .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F", 2618 + .input = "\xC7\xA3\xDF\xB9\x05\xF4\x9E\x8D" 2619 + "\x9E\xDF\x38\x18\x83\x07\xEF\xC1" 2620 + "\x93\x3C\xAA\xAA\xFE\x06\x42\xCC" 2621 + "\x0D\x70\x86\x5A\x44\xAD\x85\x17" 2622 + "\xE4\x1F\x5E\xA5\x89\xAC\x32\xBC", 2623 + .ilen = 40, 2624 + .result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" 2625 + "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" 2626 + "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" 2627 + "\xAC\x20\xB7\x4E\xE5\x59\xF0\x87" 2628 + "\x1E\x92\x29\xC0\x34\xCB\x62\xF9", 2629 + .rlen = 40, 2630 + }, { /* Generated with Crypto++ */ 2631 + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" 2632 + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" 2633 + "\x27\x04\xE1\x27\x04\xE1\xBE\x9B" 2634 + "\x78\xBE\x9B\x78\x55\x32\x0F\x55", 2635 + .klen = 32, 2636 + .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F", 2637 + .input = "\xC7\xA3\xDF\xB9\x05\xF4\x9E\x8D" 2638 + "\x9E\xDF\x38\x18\x83\x07\xEF\xC1" 2639 + "\x93\x3C\xAA\xAA\xFE\x06\x42\xCC" 2640 + "\x0D\x70\x86\x5A\x44\xAD\x85\x17" 2641 + "\xE4\x1F\x5E\xA5\x89\xAC\x32\xBC" 2642 + "\x3D\xA7\xE9", 2643 + .ilen = 43, 2644 + .result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" 2645 + "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" 2646 + "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" 2647 + "\xAC\x20\xB7\x4E\xE5\x59\xF0\x87" 2648 + "\x1E\x92\x29\xC0\x34\xCB\x62\xF9" 2649 + "\x6D\x04\x9B", 2650 + .rlen = 43, 2601 2651 }, 2602 2652 }; 2603 2653 2604 2654 /* 2605 2655 * Twofish test vectors. 2606 2656 */ 2607 - #define TF_ENC_TEST_VECTORS 3 2608 - #define TF_DEC_TEST_VECTORS 3 2609 - #define TF_CBC_ENC_TEST_VECTORS 4 2610 - #define TF_CBC_DEC_TEST_VECTORS 4 2657 + #define TF_ENC_TEST_VECTORS 4 2658 + #define TF_DEC_TEST_VECTORS 4 2659 + #define TF_CBC_ENC_TEST_VECTORS 5 2660 + #define TF_CBC_DEC_TEST_VECTORS 5 2661 + #define TF_CTR_ENC_TEST_VECTORS 2 2662 + #define TF_CTR_DEC_TEST_VECTORS 2 2611 2663 2612 2664 static struct cipher_testvec tf_enc_tv_template[] = { 2613 2665 { ··· 2748 2582 .result = "\x37\x52\x7b\xe0\x05\x23\x34\xb8" 2749 2583 "\x9f\x0c\xfc\xca\xe8\x7c\xfa\x20", 2750 2584 .rlen = 16, 2585 + }, { /* Generated with Crypto++ */ 2586 + .key = "\x3F\x85\x62\x3F\x1C\xF9\xD6\x1C" 2587 + "\xF9\xD6\xB3\x90\x6D\x4A\x90\x6D" 2588 + "\x4A\x27\x04\xE1\x27\x04\xE1\xBE" 2589 + "\x9B\x78\xBE\x9B\x78\x55\x32\x0F", 2590 + .klen = 32, 2591 + .input = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" 2592 + "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" 2593 + "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" 2594 + "\xAC\x20\xB7\x4E\xE5\x59\xF0\x87" 2595 + "\x1E\x92\x29\xC0\x34\xCB\x62\xF9" 2596 + "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48" 2597 + "\xDF\x76\x0D\x81\x18\xAF\x23\xBA" 2598 + "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C", 2599 + .ilen = 64, 2600 + .result = "\x88\xCB\x1E\xC2\xAF\x8A\x97\xFF" 2601 + "\xF6\x90\x46\x9C\x4A\x0F\x08\xDC" 2602 + "\xDE\xAB\xAD\xFA\xFC\xA8\xC2\x3D" 2603 + "\xE0\xE4\x8B\x3F\xD5\xA3\xF7\x14" 2604 + "\x34\x9E\xB6\x08\xB2\xDD\xA8\xF5" 2605 + "\xDF\xFA\xC7\xE8\x09\x50\x76\x08" 2606 + "\xA2\xB6\x6A\x59\xC0\x2B\x6D\x05" 2607 + "\x89\xF6\x82\xF0\xD3\xDB\x06\x02", 2608 + .rlen = 64, 2751 2609 }, 2752 2610 }; 2753 2611 ··· 2805 2615 .ilen = 16, 2806 2616 .result = zeroed_string, 2807 2617 .rlen = 16, 2618 + }, { /* Generated with Crypto++ */ 2619 + .key = "\x3F\x85\x62\x3F\x1C\xF9\xD6\x1C" 2620 + "\xF9\xD6\xB3\x90\x6D\x4A\x90\x6D" 2621 + "\x4A\x27\x04\xE1\x27\x04\xE1\xBE" 2622 + "\x9B\x78\xBE\x9B\x78\x55\x32\x0F", 2623 + .klen = 32, 2624 + .input = "\x88\xCB\x1E\xC2\xAF\x8A\x97\xFF" 2625 + "\xF6\x90\x46\x9C\x4A\x0F\x08\xDC" 2626 + "\xDE\xAB\xAD\xFA\xFC\xA8\xC2\x3D" 2627 + "\xE0\xE4\x8B\x3F\xD5\xA3\xF7\x14" 2628 + "\x34\x9E\xB6\x08\xB2\xDD\xA8\xF5" 2629 + "\xDF\xFA\xC7\xE8\x09\x50\x76\x08" 2630 + "\xA2\xB6\x6A\x59\xC0\x2B\x6D\x05" 2631 + "\x89\xF6\x82\xF0\xD3\xDB\x06\x02", 2632 + .ilen = 64, 2633 + .result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" 2634 + "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" 2635 + "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" 2636 + "\xAC\x20\xB7\x4E\xE5\x59\xF0\x87" 2637 + "\x1E\x92\x29\xC0\x34\xCB\x62\xF9" 2638 + "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48" 2639 + "\xDF\x76\x0D\x81\x18\xAF\x23\xBA" 2640 + "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C", 2641 + .rlen = 64, 2808 2642 }, 2809 2643 }; 2810 2644 ··· 2875 2661 "\x05\xef\x8c\x61\xa8\x11\x58\x26" 2876 2662 "\x34\xba\x5c\xb7\x10\x6a\xa6\x41", 2877 2663 .rlen = 48, 2664 + }, { /* Generated with Crypto++ */ 2665 + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" 2666 + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" 2667 + "\x27\x04\xE1\x27\x04\xE1\xBE\x9B" 2668 + "\x78\xBE\x9B\x78\x55\x32\x0F\x55", 2669 + .klen = 32, 2670 + .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" 2671 + "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64", 2672 + .input = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" 2673 + "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" 2674 + "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" 2675 + "\xAC\x20\xB7\x4E\xE5\x59\xF0\x87" 2676 + "\x1E\x92\x29\xC0\x34\xCB\x62\xF9" 2677 + "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48" 2678 + "\xDF\x76\x0D\x81\x18\xAF\x23\xBA" 2679 + "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C", 2680 + .ilen = 64, 2681 + .result = "\xC8\xFF\xF2\x53\xA6\x27\x09\xD1" 2682 + "\x33\x38\xC2\xC0\x0C\x14\x7E\xB5" 2683 + "\x26\x1B\x05\x0C\x05\x12\x3F\xC0" 2684 + "\xF9\x1C\x02\x28\x40\x96\x6F\xD0" 2685 + "\x3D\x32\xDF\xDA\x56\x00\x6E\xEE" 2686 + "\x5B\x2A\x72\x9D\xC2\x4D\x19\xBC" 2687 + "\x8C\x53\xFA\x87\x6F\xDD\x81\xA3" 2688 + "\xB1\xD3\x44\x65\xDF\xE7\x63\x38", 2689 + .rlen = 64, 2878 2690 }, 2879 2691 }; 2880 2692 ··· 2947 2707 .ilen = 48, 2948 2708 .result = zeroed_string, 2949 2709 .rlen = 48, 2710 + }, { /* Generated with Crypto++ */ 2711 + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" 2712 + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" 2713 + "\x27\x04\xE1\x27\x04\xE1\xBE\x9B" 2714 + "\x78\xBE\x9B\x78\x55\x32\x0F\x55", 2715 + .klen = 32, 2716 + .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" 2717 + "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64", 2718 + .input = "\xC8\xFF\xF2\x53\xA6\x27\x09\xD1" 2719 + "\x33\x38\xC2\xC0\x0C\x14\x7E\xB5" 2720 + "\x26\x1B\x05\x0C\x05\x12\x3F\xC0" 2721 + "\xF9\x1C\x02\x28\x40\x96\x6F\xD0" 2722 + "\x3D\x32\xDF\xDA\x56\x00\x6E\xEE" 2723 + "\x5B\x2A\x72\x9D\xC2\x4D\x19\xBC" 2724 + "\x8C\x53\xFA\x87\x6F\xDD\x81\xA3" 2725 + "\xB1\xD3\x44\x65\xDF\xE7\x63\x38", 2726 + .ilen = 64, 2727 + .result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" 2728 + "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" 2729 + "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" 2730 + "\xAC\x20\xB7\x4E\xE5\x59\xF0\x87" 2731 + "\x1E\x92\x29\xC0\x34\xCB\x62\xF9" 2732 + "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48" 2733 + "\xDF\x76\x0D\x81\x18\xAF\x23\xBA" 2734 + "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C", 2735 + .rlen = 64, 2736 + }, 2737 + }; 2738 + 2739 + static struct cipher_testvec tf_ctr_enc_tv_template[] = { 2740 + { /* Generated with Crypto++ */ 2741 + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" 2742 + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" 2743 + "\x27\x04\xE1\x27\x04\xE1\xBE\x9B" 2744 + "\x78\xBE\x9B\x78\x55\x32\x0F\x55", 2745 + .klen = 32, 2746 + .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" 2747 + "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64", 2748 + .input = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" 2749 + "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" 2750 + "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" 2751 + "\xAC\x20\xB7\x4E\xE5\x59\xF0\x87" 2752 + "\x1E\x92\x29\xC0\x34\xCB\x62\xF9" 2753 + "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48" 2754 + "\xDF\x76\x0D\x81\x18\xAF\x23\xBA" 2755 + "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C", 2756 + .ilen = 64, 2757 + .result = "\xDF\xDD\x69\xFA\xB0\x2E\xFD\xFE" 2758 + "\x70\x9E\xC5\x4B\xC9\xD4\xA1\x30" 2759 + "\x26\x9B\x89\xA1\xEE\x43\xE0\x52" 2760 + "\x55\x17\x4E\xC7\x0E\x33\x1F\xF1" 2761 + "\x9F\x8D\x40\x9F\x24\xFD\x92\xA0" 2762 + "\xBC\x8F\x35\xDD\x67\x38\xD8\xAA" 2763 + "\xCF\xF8\x48\xCA\xFB\xE4\x5C\x60" 2764 + "\x01\x41\x21\x12\x38\xAB\x52\x4F", 2765 + .rlen = 64, 2766 + }, { /* Generated with Crypto++ */ 2767 + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" 2768 + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" 2769 + "\x27\x04\xE1\x27\x04\xE1\xBE\x9B" 2770 + "\x78\xBE\x9B\x78\x55\x32\x0F\x55", 2771 + .klen = 32, 2772 + .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" 2773 + "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64", 2774 + .input = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" 2775 + "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" 2776 + "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" 2777 + "\xAC\x20\xB7\x4E\xE5\x59\xF0\x87" 2778 + "\x1E\x92\x29\xC0\x34\xCB\x62\xF9" 2779 + "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48" 2780 + "\xDF\x76\x0D\x81\x18\xAF\x23\xBA" 2781 + "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C" 2782 + "\xC3\x37\xCE", 2783 + .ilen = 67, 2784 + .result = "\xDF\xDD\x69\xFA\xB0\x2E\xFD\xFE" 2785 + "\x70\x9E\xC5\x4B\xC9\xD4\xA1\x30" 2786 + "\x26\x9B\x89\xA1\xEE\x43\xE0\x52" 2787 + "\x55\x17\x4E\xC7\x0E\x33\x1F\xF1" 2788 + "\x9F\x8D\x40\x9F\x24\xFD\x92\xA0" 2789 + "\xBC\x8F\x35\xDD\x67\x38\xD8\xAA" 2790 + "\xCF\xF8\x48\xCA\xFB\xE4\x5C\x60" 2791 + "\x01\x41\x21\x12\x38\xAB\x52\x4F" 2792 + "\xA8\x57\x20", 2793 + .rlen = 67, 2794 + }, 2795 + }; 2796 + 2797 + static struct cipher_testvec tf_ctr_dec_tv_template[] = { 2798 + { /* Generated with Crypto++ */ 2799 + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" 2800 + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" 2801 + "\x27\x04\xE1\x27\x04\xE1\xBE\x9B" 2802 + "\x78\xBE\x9B\x78\x55\x32\x0F\x55", 2803 + .klen = 32, 2804 + .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" 2805 + "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64", 2806 + .input = "\xDF\xDD\x69\xFA\xB0\x2E\xFD\xFE" 2807 + "\x70\x9E\xC5\x4B\xC9\xD4\xA1\x30" 2808 + "\x26\x9B\x89\xA1\xEE\x43\xE0\x52" 2809 + "\x55\x17\x4E\xC7\x0E\x33\x1F\xF1" 2810 + "\x9F\x8D\x40\x9F\x24\xFD\x92\xA0" 2811 + "\xBC\x8F\x35\xDD\x67\x38\xD8\xAA" 2812 + "\xCF\xF8\x48\xCA\xFB\xE4\x5C\x60" 2813 + "\x01\x41\x21\x12\x38\xAB\x52\x4F", 2814 + .ilen = 64, 2815 + .result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" 2816 + "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" 2817 + "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" 2818 + "\xAC\x20\xB7\x4E\xE5\x59\xF0\x87" 2819 + "\x1E\x92\x29\xC0\x34\xCB\x62\xF9" 2820 + "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48" 2821 + "\xDF\x76\x0D\x81\x18\xAF\x23\xBA" 2822 + "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C", 2823 + .rlen = 64, 2824 + }, { /* Generated with Crypto++ */ 2825 + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" 2826 + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" 2827 + "\x27\x04\xE1\x27\x04\xE1\xBE\x9B" 2828 + "\x78\xBE\x9B\x78\x55\x32\x0F\x55", 2829 + .klen = 32, 2830 + .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" 2831 + "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64", 2832 + .input = "\xDF\xDD\x69\xFA\xB0\x2E\xFD\xFE" 2833 + "\x70\x9E\xC5\x4B\xC9\xD4\xA1\x30" 2834 + "\x26\x9B\x89\xA1\xEE\x43\xE0\x52" 2835 + "\x55\x17\x4E\xC7\x0E\x33\x1F\xF1" 2836 + "\x9F\x8D\x40\x9F\x24\xFD\x92\xA0" 2837 + "\xBC\x8F\x35\xDD\x67\x38\xD8\xAA" 2838 + "\xCF\xF8\x48\xCA\xFB\xE4\x5C\x60" 2839 + "\x01\x41\x21\x12\x38\xAB\x52\x4F" 2840 + "\xA8\x57\x20", 2841 + .ilen = 67, 2842 + .result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" 2843 + "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" 2844 + "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" 2845 + "\xAC\x20\xB7\x4E\xE5\x59\xF0\x87" 2846 + "\x1E\x92\x29\xC0\x34\xCB\x62\xF9" 2847 + "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48" 2848 + "\xDF\x76\x0D\x81\x18\xAF\x23\xBA" 2849 + "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C" 2850 + "\xC3\x37\xCE", 2851 + .rlen = 67, 2950 2852 }, 2951 2853 }; 2952 2854
+12 -6
crypto/wp512.c
··· 762 762 0x86228644a411c286ULL, 763 763 }; 764 764 765 - static const u64 rc[WHIRLPOOL_ROUNDS + 1] = { 766 - 0x0000000000000000ULL, 0x1823c6e887b8014fULL, 0x36a6d2f5796f9152ULL, 767 - 0x60bc9b8ea30c7b35ULL, 0x1de0d7c22e4bfe57ULL, 0x157737e59ff04adaULL, 768 - 0x58c9290ab1a06b85ULL, 0xbd5d10f4cb3e0567ULL, 0xe427418ba77d95d8ULL, 769 - 0xfbee7c66dd17479eULL, 0xca2dbf07ad5a8333ULL, 765 + static const u64 rc[WHIRLPOOL_ROUNDS] = { 766 + 0x1823c6e887b8014fULL, 767 + 0x36a6d2f5796f9152ULL, 768 + 0x60bc9b8ea30c7b35ULL, 769 + 0x1de0d7c22e4bfe57ULL, 770 + 0x157737e59ff04adaULL, 771 + 0x58c9290ab1a06b85ULL, 772 + 0xbd5d10f4cb3e0567ULL, 773 + 0xe427418ba77d95d8ULL, 774 + 0xfbee7c66dd17479eULL, 775 + 0xca2dbf07ad5a8333ULL, 770 776 }; 771 777 772 778 /** ··· 799 793 state[6] = block[6] ^ (K[6] = wctx->hash[6]); 800 794 state[7] = block[7] ^ (K[7] = wctx->hash[7]); 801 795 802 - for (r = 1; r <= WHIRLPOOL_ROUNDS; r++) { 796 + for (r = 0; r < WHIRLPOOL_ROUNDS; r++) { 803 797 804 798 L[0] = C0[(int)(K[0] >> 56) ] ^ 805 799 C1[(int)(K[7] >> 48) & 0xff] ^
+2 -1
drivers/crypto/Kconfig
··· 200 200 select CRYPTO_BLKCIPHER 201 201 select HW_RANDOM if CRYPTO_DEV_HIFN_795X_RNG 202 202 depends on PCI 203 + depends on !ARCH_DMA_ADDR_T_64BIT 203 204 help 204 205 This option allows you to have support for HIFN 795x crypto adapters. 205 206 ··· 267 266 268 267 config CRYPTO_DEV_PICOXCELL 269 268 tristate "Support for picoXcell IPSEC and Layer2 crypto engines" 270 - depends on ARCH_PICOXCELL 269 + depends on ARCH_PICOXCELL && HAVE_CLK 271 270 select CRYPTO_AES 272 271 select CRYPTO_AUTHENC 273 272 select CRYPTO_ALGAPI
+2 -4
drivers/crypto/hifn_795x.c
··· 2744 2744 unsigned int freq; 2745 2745 int err; 2746 2746 2747 - if (sizeof(dma_addr_t) > 4) { 2748 - printk(KERN_INFO "HIFN supports only 32-bit addresses.\n"); 2749 - return -EINVAL; 2750 - } 2747 + /* HIFN supports only 32-bit addresses */ 2748 + BUILD_BUG_ON(sizeof(dma_addr_t) != 4); 2751 2749 2752 2750 if (strncmp(hifn_pll_ref, "ext", 3) && 2753 2751 strncmp(hifn_pll_ref, "pci", 3)) {
+2 -2
drivers/crypto/n2_core.c
··· 1006 1006 1007 1007 spin_unlock_irqrestore(&qp->lock, flags); 1008 1008 1009 + out: 1009 1010 put_cpu(); 1010 1011 1011 - out: 1012 1012 n2_chunk_complete(req, NULL); 1013 1013 return err; 1014 1014 } ··· 1096 1096 1097 1097 spin_unlock_irqrestore(&qp->lock, flags); 1098 1098 1099 + out: 1099 1100 put_cpu(); 1100 1101 1101 - out: 1102 1102 n2_chunk_complete(req, err ? NULL : final_iv_addr); 1103 1103 return err; 1104 1104 }
+1 -3
drivers/crypto/padlock-aes.c
··· 508 508 int ret; 509 509 struct cpuinfo_x86 *c = &cpu_data(0); 510 510 511 - if (!cpu_has_xcrypt) { 512 - printk(KERN_NOTICE PFX "VIA PadLock not detected.\n"); 511 + if (!cpu_has_xcrypt) 513 512 return -ENODEV; 514 - } 515 513 516 514 if (!cpu_has_xcrypt_enabled) { 517 515 printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
+59 -62
drivers/crypto/picoxcell_crypto.c
··· 34 34 #include <linux/io.h> 35 35 #include <linux/list.h> 36 36 #include <linux/module.h> 37 + #include <linux/of.h> 37 38 #include <linux/platform_device.h> 38 39 #include <linux/pm.h> 39 40 #include <linux/rtnetlink.h> ··· 1242 1241 spin_unlock_irqrestore(&engine->hw_lock, flags); 1243 1242 1244 1243 list_for_each_entry_safe(req, tmp, &completed, list) { 1245 - req->complete(req); 1246 1244 list_del(&req->list); 1245 + req->complete(req); 1247 1246 } 1248 1247 } 1249 1248 ··· 1658 1657 }, 1659 1658 }; 1660 1659 1661 - static int __devinit spacc_probe(struct platform_device *pdev, 1662 - unsigned max_ctxs, size_t cipher_pg_sz, 1663 - size_t hash_pg_sz, size_t fifo_sz, 1664 - struct spacc_alg *algs, size_t num_algs) 1660 + #ifdef CONFIG_OF 1661 + static const struct of_device_id spacc_of_id_table[] = { 1662 + { .compatible = "picochip,spacc-ipsec" }, 1663 + { .compatible = "picochip,spacc-l2" }, 1664 + {} 1665 + }; 1666 + #else /* CONFIG_OF */ 1667 + #define spacc_of_id_table NULL 1668 + #endif /* CONFIG_OF */ 1669 + 1670 + static bool spacc_is_compatible(struct platform_device *pdev, 1671 + const char *spacc_type) 1672 + { 1673 + const struct platform_device_id *platid = platform_get_device_id(pdev); 1674 + 1675 + if (platid && !strcmp(platid->name, spacc_type)) 1676 + return true; 1677 + 1678 + #ifdef CONFIG_OF 1679 + if (of_device_is_compatible(pdev->dev.of_node, spacc_type)) 1680 + return true; 1681 + #endif /* CONFIG_OF */ 1682 + 1683 + return false; 1684 + } 1685 + 1686 + static int __devinit spacc_probe(struct platform_device *pdev) 1665 1687 { 1666 1688 int i, err, ret = -EINVAL; 1667 1689 struct resource *mem, *irq; ··· 1693 1669 if (!engine) 1694 1670 return -ENOMEM; 1695 1671 1696 - engine->max_ctxs = max_ctxs; 1697 - engine->cipher_pg_sz = cipher_pg_sz; 1698 - engine->hash_pg_sz = hash_pg_sz; 1699 - engine->fifo_sz = fifo_sz; 1700 - engine->algs = algs; 1701 - engine->num_algs = num_algs; 1702 - engine->name = dev_name(&pdev->dev); 1672 + if (spacc_is_compatible(pdev, "picochip,spacc-ipsec")) { 1673 + engine->max_ctxs = SPACC_CRYPTO_IPSEC_MAX_CTXS; 1674 + engine->cipher_pg_sz = SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ; 1675 + engine->hash_pg_sz = SPACC_CRYPTO_IPSEC_HASH_PG_SZ; 1676 + engine->fifo_sz = SPACC_CRYPTO_IPSEC_FIFO_SZ; 1677 + engine->algs = ipsec_engine_algs; 1678 + engine->num_algs = ARRAY_SIZE(ipsec_engine_algs); 1679 + } else if (spacc_is_compatible(pdev, "picochip,spacc-l2")) { 1680 + engine->max_ctxs = SPACC_CRYPTO_L2_MAX_CTXS; 1681 + engine->cipher_pg_sz = SPACC_CRYPTO_L2_CIPHER_PG_SZ; 1682 + engine->hash_pg_sz = SPACC_CRYPTO_L2_HASH_PG_SZ; 1683 + engine->fifo_sz = SPACC_CRYPTO_L2_FIFO_SZ; 1684 + engine->algs = l2_engine_algs; 1685 + engine->num_algs = ARRAY_SIZE(l2_engine_algs); 1686 + } else { 1687 + return -EINVAL; 1688 + } 1689 + 1690 + engine->name = dev_name(&pdev->dev); 1703 1691 1704 1692 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1705 1693 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); ··· 1747 1711 1748 1712 spin_lock_init(&engine->hw_lock); 1749 1713 1750 - engine->clk = clk_get(&pdev->dev, NULL); 1714 + engine->clk = clk_get(&pdev->dev, "ref"); 1751 1715 if (IS_ERR(engine->clk)) { 1752 1716 dev_info(&pdev->dev, "clk unavailable\n"); 1753 1717 device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh); ··· 1836 1800 return 0; 1837 1801 } 1838 1802 1839 - static int __devinit ipsec_probe(struct platform_device *pdev) 1840 - { 1841 - return spacc_probe(pdev, SPACC_CRYPTO_IPSEC_MAX_CTXS, 1842 - SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ, 1843 - SPACC_CRYPTO_IPSEC_HASH_PG_SZ, 1844 - SPACC_CRYPTO_IPSEC_FIFO_SZ, ipsec_engine_algs, 1845 - ARRAY_SIZE(ipsec_engine_algs)); 1846 - } 1847 - 1848 - static struct platform_driver ipsec_driver = { 1849 - .probe = ipsec_probe, 1850 - .remove = __devexit_p(spacc_remove), 1851 - .driver = { 1852 - .name = "picoxcell-ipsec", 1853 - #ifdef CONFIG_PM 1854 - .pm = &spacc_pm_ops, 1855 - #endif /* CONFIG_PM */ 1856 - }, 1803 + static const struct platform_device_id spacc_id_table[] = { 1804 + { "picochip,spacc-ipsec", }, 1805 + { "picochip,spacc-l2", }, 1857 1806 }; 1858 1807 1859 - static int __devinit l2_probe(struct platform_device *pdev) 1860 - { 1861 - return spacc_probe(pdev, SPACC_CRYPTO_L2_MAX_CTXS, 1862 - SPACC_CRYPTO_L2_CIPHER_PG_SZ, 1863 - SPACC_CRYPTO_L2_HASH_PG_SZ, SPACC_CRYPTO_L2_FIFO_SZ, 1864 - l2_engine_algs, ARRAY_SIZE(l2_engine_algs)); 1865 - } 1866 - 1867 - static struct platform_driver l2_driver = { 1868 - .probe = l2_probe, 1808 + static struct platform_driver spacc_driver = { 1809 + .probe = spacc_probe, 1869 1810 .remove = __devexit_p(spacc_remove), 1870 1811 .driver = { 1871 - .name = "picoxcell-l2", 1812 + .name = "picochip,spacc", 1872 1813 #ifdef CONFIG_PM 1873 1814 .pm = &spacc_pm_ops, 1874 1815 #endif /* CONFIG_PM */ 1816 + .of_match_table = spacc_of_id_table, 1875 1817 }, 1818 + .id_table = spacc_id_table, 1876 1819 }; 1877 1820 1878 1821 static int __init spacc_init(void) 1879 1822 { 1880 - int ret = platform_driver_register(&ipsec_driver); 1881 - if (ret) { 1882 - pr_err("failed to register ipsec spacc driver"); 1883 - goto out; 1884 - } 1885 - 1886 - ret = platform_driver_register(&l2_driver); 1887 - if (ret) { 1888 - pr_err("failed to register l2 spacc driver"); 1889 - goto l2_failed; 1890 - } 1891 - 1892 - return 0; 1893 - 1894 - l2_failed: 1895 - platform_driver_unregister(&ipsec_driver); 1896 - out: 1897 - return ret; 1823 + return platform_driver_register(&spacc_driver); 1898 1824 } 1899 1825 module_init(spacc_init); 1900 1826 1901 1827 static void __exit spacc_exit(void) 1902 1828 { 1903 - platform_driver_unregister(&ipsec_driver); 1904 - platform_driver_unregister(&l2_driver); 1829 + platform_driver_unregister(&spacc_driver); 1905 1830 } 1906 1831 module_exit(spacc_exit); 1907 1832
+10 -8
drivers/crypto/talitos.c
··· 416 416 /* 417 417 * locate current (offending) descriptor 418 418 */ 419 - static struct talitos_desc *current_desc(struct device *dev, int ch) 419 + static u32 current_desc_hdr(struct device *dev, int ch) 420 420 { 421 421 struct talitos_private *priv = dev_get_drvdata(dev); 422 422 int tail = priv->chan[ch].tail; ··· 428 428 tail = (tail + 1) & (priv->fifo_len - 1); 429 429 if (tail == priv->chan[ch].tail) { 430 430 dev_err(dev, "couldn't locate current descriptor\n"); 431 - return NULL; 431 + return 0; 432 432 } 433 433 } 434 434 435 - return priv->chan[ch].fifo[tail].desc; 435 + return priv->chan[ch].fifo[tail].desc->hdr; 436 436 } 437 437 438 438 /* 439 439 * user diagnostics; report root cause of error based on execution unit status 440 440 */ 441 - static void report_eu_error(struct device *dev, int ch, 442 - struct talitos_desc *desc) 441 + static void report_eu_error(struct device *dev, int ch, u32 desc_hdr) 443 442 { 444 443 struct talitos_private *priv = dev_get_drvdata(dev); 445 444 int i; 446 445 447 - switch (desc->hdr & DESC_HDR_SEL0_MASK) { 446 + if (!desc_hdr) 447 + desc_hdr = in_be32(priv->reg + TALITOS_DESCBUF(ch)); 448 + 449 + switch (desc_hdr & DESC_HDR_SEL0_MASK) { 448 450 case DESC_HDR_SEL0_AFEU: 449 451 dev_err(dev, "AFEUISR 0x%08x_%08x\n", 450 452 in_be32(priv->reg + TALITOS_AFEUISR), ··· 490 488 break; 491 489 } 492 490 493 - switch (desc->hdr & DESC_HDR_SEL1_MASK) { 491 + switch (desc_hdr & DESC_HDR_SEL1_MASK) { 494 492 case DESC_HDR_SEL1_MDEUA: 495 493 case DESC_HDR_SEL1_MDEUB: 496 494 dev_err(dev, "MDEUISR 0x%08x_%08x\n", ··· 552 550 if (v_lo & TALITOS_CCPSR_LO_IEU) 553 551 dev_err(dev, "invalid execution unit error\n"); 554 552 if (v_lo & TALITOS_CCPSR_LO_EU) 555 - report_eu_error(dev, ch, current_desc(dev, ch)); 553 + report_eu_error(dev, ch, current_desc_hdr(dev, ch)); 556 554 if (v_lo & TALITOS_CCPSR_LO_GB) 557 555 dev_err(dev, "gather boundary error\n"); 558 556 if (v_lo & TALITOS_CCPSR_LO_GRL)
+2
include/crypto/algapi.h
··· 15 15 #include <linux/crypto.h> 16 16 #include <linux/list.h> 17 17 #include <linux/kernel.h> 18 + #include <linux/skbuff.h> 18 19 19 20 struct module; 20 21 struct rtattr; ··· 27 26 int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask); 28 27 int (*init_tfm)(struct crypto_tfm *tfm); 29 28 void (*show)(struct seq_file *m, struct crypto_alg *alg); 29 + int (*report)(struct sk_buff *skb, struct crypto_alg *alg); 30 30 struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask); 31 31 32 32 unsigned int type;
+23
include/crypto/blowfish.h
··· 1 + /* 2 + * Common values for blowfish algorithms 3 + */ 4 + 5 + #ifndef _CRYPTO_BLOWFISH_H 6 + #define _CRYPTO_BLOWFISH_H 7 + 8 + #include <linux/types.h> 9 + #include <linux/crypto.h> 10 + 11 + #define BF_BLOCK_SIZE 8 12 + #define BF_MIN_KEY_SIZE 4 13 + #define BF_MAX_KEY_SIZE 56 14 + 15 + struct bf_ctx { 16 + u32 p[18]; 17 + u32 s[1024]; 18 + }; 19 + 20 + int blowfish_setkey(struct crypto_tfm *tfm, const u8 *key, 21 + unsigned int key_len); 22 + 23 + #endif
+5
include/crypto/sha.h
··· 82 82 u8 buf[SHA512_BLOCK_SIZE]; 83 83 }; 84 84 85 + struct shash_desc; 86 + 87 + extern int crypto_sha1_update(struct shash_desc *desc, const u8 *data, 88 + unsigned int len); 89 + 85 90 #endif
+5
include/linux/crypto.h
··· 72 72 #define CRYPTO_ALG_TESTED 0x00000400 73 73 74 74 /* 75 + * Set if the algorithm is an instance that is build from templates. 76 + */ 77 + #define CRYPTO_ALG_INSTANCE 0x00000800 78 + 79 + /* 75 80 * Transform masks and values (for crt_flags). 76 81 */ 77 82 #define CRYPTO_TFM_REQ_MASK 0x000fff00
+102
include/linux/cryptouser.h
··· 1 + /* 2 + * Crypto user configuration API. 3 + * 4 + * Copyright (C) 2011 secunet Security Networks AG 5 + * Copyright (C) 2011 Steffen Klassert <steffen.klassert@secunet.com> 6 + * 7 + * This program is free software; you can redistribute it and/or modify it 8 + * under the terms and conditions of the GNU General Public License, 9 + * version 2, as published by the Free Software Foundation. 10 + * 11 + * This program is distributed in the hope it will be useful, but WITHOUT 12 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 + * more details. 15 + * 16 + * You should have received a copy of the GNU General Public License along with 17 + * this program; if not, write to the Free Software Foundation, Inc., 18 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 19 + */ 20 + 21 + /* Netlink configuration messages. */ 22 + enum { 23 + CRYPTO_MSG_BASE = 0x10, 24 + CRYPTO_MSG_NEWALG = 0x10, 25 + CRYPTO_MSG_DELALG, 26 + CRYPTO_MSG_UPDATEALG, 27 + CRYPTO_MSG_GETALG, 28 + __CRYPTO_MSG_MAX 29 + }; 30 + #define CRYPTO_MSG_MAX (__CRYPTO_MSG_MAX - 1) 31 + #define CRYPTO_NR_MSGTYPES (CRYPTO_MSG_MAX + 1 - CRYPTO_MSG_BASE) 32 + 33 + #define CRYPTO_MAX_NAME CRYPTO_MAX_ALG_NAME 34 + 35 + /* Netlink message attributes. */ 36 + enum crypto_attr_type_t { 37 + CRYPTOCFGA_UNSPEC, 38 + CRYPTOCFGA_PRIORITY_VAL, /* __u32 */ 39 + CRYPTOCFGA_REPORT_LARVAL, /* struct crypto_report_larval */ 40 + CRYPTOCFGA_REPORT_HASH, /* struct crypto_report_hash */ 41 + CRYPTOCFGA_REPORT_BLKCIPHER, /* struct crypto_report_blkcipher */ 42 + CRYPTOCFGA_REPORT_AEAD, /* struct crypto_report_aead */ 43 + CRYPTOCFGA_REPORT_COMPRESS, /* struct crypto_report_comp */ 44 + CRYPTOCFGA_REPORT_RNG, /* struct crypto_report_rng */ 45 + CRYPTOCFGA_REPORT_CIPHER, /* struct crypto_report_cipher */ 46 + __CRYPTOCFGA_MAX 47 + 48 + #define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1) 49 + }; 50 + 51 + struct crypto_user_alg { 52 + char cru_name[CRYPTO_MAX_ALG_NAME]; 53 + char cru_driver_name[CRYPTO_MAX_ALG_NAME]; 54 + char cru_module_name[CRYPTO_MAX_ALG_NAME]; 55 + __u32 cru_type; 56 + __u32 cru_mask; 57 + __u32 cru_refcnt; 58 + __u32 cru_flags; 59 + }; 60 + 61 + struct crypto_report_larval { 62 + char type[CRYPTO_MAX_NAME]; 63 + }; 64 + 65 + struct crypto_report_hash { 66 + char type[CRYPTO_MAX_NAME]; 67 + unsigned int blocksize; 68 + unsigned int digestsize; 69 + }; 70 + 71 + struct crypto_report_cipher { 72 + char type[CRYPTO_MAX_ALG_NAME]; 73 + unsigned int blocksize; 74 + unsigned int min_keysize; 75 + unsigned int max_keysize; 76 + }; 77 + 78 + struct crypto_report_blkcipher { 79 + char type[CRYPTO_MAX_NAME]; 80 + char geniv[CRYPTO_MAX_NAME]; 81 + unsigned int blocksize; 82 + unsigned int min_keysize; 83 + unsigned int max_keysize; 84 + unsigned int ivsize; 85 + }; 86 + 87 + struct crypto_report_aead { 88 + char type[CRYPTO_MAX_NAME]; 89 + char geniv[CRYPTO_MAX_NAME]; 90 + unsigned int blocksize; 91 + unsigned int maxauthsize; 92 + unsigned int ivsize; 93 + }; 94 + 95 + struct crypto_report_comp { 96 + char type[CRYPTO_MAX_NAME]; 97 + }; 98 + 99 + struct crypto_report_rng { 100 + char type[CRYPTO_MAX_NAME]; 101 + unsigned int seedsize; 102 + };
+1
include/linux/netlink.h
··· 25 25 #define NETLINK_SCSITRANSPORT 18 /* SCSI Transports */ 26 26 #define NETLINK_ECRYPTFS 19 27 27 #define NETLINK_RDMA 20 28 + #define NETLINK_CRYPTO 21 /* Crypto layer */ 28 29 29 30 #define MAX_LINKS 32 30 31