Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
crypto: aesni-intel - Fix irq_fpu_usable usage
crypto: padlock-sha - Fix stack alignment

+17 -7
+5 -5
arch/x86/crypto/aesni-intel_glue.c
··· 82 82 return -EINVAL; 83 83 } 84 84 85 - if (irq_fpu_usable()) 85 + if (!irq_fpu_usable()) 86 86 err = crypto_aes_expand_key(ctx, in_key, key_len); 87 87 else { 88 88 kernel_fpu_begin(); ··· 103 103 { 104 104 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); 105 105 106 - if (irq_fpu_usable()) 106 + if (!irq_fpu_usable()) 107 107 crypto_aes_encrypt_x86(ctx, dst, src); 108 108 else { 109 109 kernel_fpu_begin(); ··· 116 116 { 117 117 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); 118 118 119 - if (irq_fpu_usable()) 119 + if (!irq_fpu_usable()) 120 120 crypto_aes_decrypt_x86(ctx, dst, src); 121 121 else { 122 122 kernel_fpu_begin(); ··· 342 342 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 343 343 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); 344 344 345 - if (irq_fpu_usable()) { 345 + if (!irq_fpu_usable()) { 346 346 struct ablkcipher_request *cryptd_req = 347 347 ablkcipher_request_ctx(req); 348 348 memcpy(cryptd_req, req, sizeof(*req)); ··· 363 363 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 364 364 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); 365 365 366 - if (irq_fpu_usable()) { 366 + if (!irq_fpu_usable()) { 367 367 struct ablkcipher_request *cryptd_req = 368 368 ablkcipher_request_ctx(req); 369 369 memcpy(cryptd_req, req, sizeof(*req));
+12 -2
drivers/crypto/padlock-sha.c
··· 24 24 #include <asm/i387.h> 25 25 #include "padlock.h" 26 26 27 + #ifdef CONFIG_64BIT 28 + #define STACK_ALIGN 16 29 + #else 30 + #define STACK_ALIGN 4 31 + #endif 32 + 27 33 struct padlock_sha_desc { 28 34 struct shash_desc fallback; 29 35 }; ··· 70 64 /* We can't store directly to *out as it may be unaligned. */ 71 65 /* BTW Don't reduce the buffer size below 128 Bytes! 72 66 * PadLock microcode needs it that big. */ 73 - char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT))); 67 + char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ 68 + ((aligned(STACK_ALIGN))); 69 + char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); 74 70 struct padlock_sha_desc *dctx = shash_desc_ctx(desc); 75 71 struct sha1_state state; 76 72 unsigned int space; ··· 136 128 /* We can't store directly to *out as it may be unaligned. */ 137 129 /* BTW Don't reduce the buffer size below 128 Bytes! 138 130 * PadLock microcode needs it that big. */ 139 - char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT))); 131 + char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ 132 + ((aligned(STACK_ALIGN))); 133 + char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); 140 134 struct padlock_sha_desc *dctx = shash_desc_ctx(desc); 141 135 struct sha256_state state; 142 136 unsigned int space;