crypto: aesni-intel - fixed problem with packets that are not multiple of 64bytes

This patch fixes problem with packets that are not multiple of 64bytes.

Signed-off-by: Adrian Hoban <adrian.hoban@intel.com>
Signed-off-by: Aidan O'Mahony <aidan.o.mahony@intel.com>
Signed-off-by: Gabriele Paoloni <gabriele.paoloni@intel.com>
Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by Tadeusz Struk and committed by Herbert Xu 60af520c 16c29daf

+16 -3
+4 -1
arch/x86/crypto/aesni-intel_asm.S
··· 1612 movdqa SHUF_MASK(%rip), %xmm10 1613 PSHUFB_XMM %xmm10, %xmm0 1614 1615 ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn) 1616 sub $16, %r11 1617 add %r13, %r11 ··· 1635 # GHASH computation for the last <16 byte block 1636 sub %r13, %r11 1637 add $16, %r11 1638 - PSHUFB_XMM %xmm10, %xmm1 1639 1640 # shuffle xmm0 back to output as ciphertext 1641
··· 1612 movdqa SHUF_MASK(%rip), %xmm10 1613 PSHUFB_XMM %xmm10, %xmm0 1614 1615 + 1616 ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn) 1617 sub $16, %r11 1618 add %r13, %r11 ··· 1634 # GHASH computation for the last <16 byte block 1635 sub %r13, %r11 1636 add $16, %r11 1637 + 1638 + movdqa SHUF_MASK(%rip), %xmm10 1639 + PSHUFB_XMM %xmm10, %xmm0 1640 1641 # shuffle xmm0 back to output as ciphertext 1642
+12 -2
arch/x86/crypto/aesni-intel_glue.c
··· 828 struct cryptd_aead *cryptd_tfm; 829 struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *) 830 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); 831 cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0); 832 if (IS_ERR(cryptd_tfm)) 833 return PTR_ERR(cryptd_tfm); 834 ctx->cryptd_tfm = cryptd_tfm; 835 tfm->crt_aead.reqsize = sizeof(struct aead_request) 836 + crypto_aead_reqsize(&cryptd_tfm->base); ··· 929 int ret = 0; 930 struct crypto_tfm *tfm = crypto_aead_tfm(parent); 931 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); 932 u8 *new_key_mem = NULL; 933 934 if (key_len < 4) { ··· 975 goto exit; 976 } 977 ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); 978 exit: 979 kfree(new_key_mem); 980 return ret; ··· 1007 int ret; 1008 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1009 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); 1010 - struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); 1011 1012 if (!irq_fpu_usable()) { 1013 struct aead_request *cryptd_req = ··· 1015 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); 1016 return crypto_aead_encrypt(cryptd_req); 1017 } else { 1018 kernel_fpu_begin(); 1019 ret = cryptd_child->base.crt_aead.encrypt(req); 1020 kernel_fpu_end(); ··· 1028 int ret; 1029 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1030 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); 1031 - struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); 1032 1033 if (!irq_fpu_usable()) { 1034 struct aead_request *cryptd_req = ··· 1036 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); 1037 return crypto_aead_decrypt(cryptd_req); 1038 } else { 1039 kernel_fpu_begin(); 1040 ret = cryptd_child->base.crt_aead.decrypt(req); 1041 kernel_fpu_end();
··· 828 struct cryptd_aead *cryptd_tfm; 829 struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *) 830 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); 831 + struct crypto_aead *cryptd_child; 832 + struct aesni_rfc4106_gcm_ctx *child_ctx; 833 cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0); 834 if (IS_ERR(cryptd_tfm)) 835 return PTR_ERR(cryptd_tfm); 836 + 837 + cryptd_child = cryptd_aead_child(cryptd_tfm); 838 + child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child); 839 + memcpy(child_ctx, ctx, sizeof(*ctx)); 840 ctx->cryptd_tfm = cryptd_tfm; 841 tfm->crt_aead.reqsize = sizeof(struct aead_request) 842 + crypto_aead_reqsize(&cryptd_tfm->base); ··· 923 int ret = 0; 924 struct crypto_tfm *tfm = crypto_aead_tfm(parent); 925 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); 926 + struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); 927 + struct aesni_rfc4106_gcm_ctx *child_ctx = 928 + aesni_rfc4106_gcm_ctx_get(cryptd_child); 929 u8 *new_key_mem = NULL; 930 931 if (key_len < 4) { ··· 966 goto exit; 967 } 968 ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); 969 + memcpy(child_ctx, ctx, sizeof(*ctx)); 970 exit: 971 kfree(new_key_mem); 972 return ret; ··· 997 int ret; 998 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 999 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); 1000 1001 if (!irq_fpu_usable()) { 1002 struct aead_request *cryptd_req = ··· 1006 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); 1007 return crypto_aead_encrypt(cryptd_req); 1008 } else { 1009 + struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); 1010 kernel_fpu_begin(); 1011 ret = cryptd_child->base.crt_aead.encrypt(req); 1012 kernel_fpu_end(); ··· 1018 int ret; 1019 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1020 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); 1021 1022 if (!irq_fpu_usable()) { 1023 struct aead_request *cryptd_req = ··· 1027 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); 1028 return crypto_aead_decrypt(cryptd_req); 1029 } else { 1030 + struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); 1031 kernel_fpu_begin(); 1032 ret = cryptd_child->base.crt_aead.decrypt(req); 1033 kernel_fpu_end();