Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: qat - honor CRYPTO_TFM_REQ_MAY_SLEEP flag

If a request has the flag CRYPTO_TFM_REQ_MAY_SLEEP set, allocate memory
using the flag GFP_KERNEL otherwise use GFP_ATOMIC.

Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Reviewed-by: Adam Guerin <adam.guerin@intel.com>
Reviewed-by: Wojciech Ziemba <wojciech.ziemba@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Giovanni Cabiddu and committed by
Herbert Xu
8fb203c6 2acbb877

+27 -14
+12 -7
drivers/crypto/qat/qat_common/qat_algs.c
··· 703 703 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, 704 704 struct scatterlist *sgl, 705 705 struct scatterlist *sglout, 706 - struct qat_crypto_request *qat_req) 706 + struct qat_crypto_request *qat_req, 707 + gfp_t flags) 707 708 { 708 709 struct device *dev = &GET_DEV(inst->accel_dev); 709 710 int i, sg_nctr = 0; ··· 724 723 qat_req->buf.sgl_dst_valid = false; 725 724 726 725 if (n > QAT_MAX_BUFF_DESC) { 727 - bufl = kzalloc_node(sz, GFP_ATOMIC, node); 726 + bufl = kzalloc_node(sz, flags, node); 728 727 if (unlikely(!bufl)) 729 728 return -ENOMEM; 730 729 } else { ··· 766 765 sg_nctr = 0; 767 766 768 767 if (n > QAT_MAX_BUFF_DESC) { 769 - buflout = kzalloc_node(sz_out, GFP_ATOMIC, node); 768 + buflout = kzalloc_node(sz_out, flags, node); 770 769 if (unlikely(!buflout)) 771 770 goto err_in; 772 771 } else { ··· 967 966 struct icp_qat_fw_la_auth_req_params *auth_param; 968 967 struct icp_qat_fw_la_bulk_req *msg; 969 968 int digst_size = crypto_aead_authsize(aead_tfm); 969 + gfp_t f = qat_algs_alloc_flags(&areq->base); 970 970 int ret; 971 971 u32 cipher_len; 972 972 ··· 975 973 if (cipher_len % AES_BLOCK_SIZE != 0) 976 974 return -EINVAL; 977 975 978 - ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req); 976 + ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req, f); 979 977 if (unlikely(ret)) 980 978 return ret; 981 979 ··· 1010 1008 struct qat_crypto_request *qat_req = aead_request_ctx(areq); 1011 1009 struct icp_qat_fw_la_cipher_req_params *cipher_param; 1012 1010 struct icp_qat_fw_la_auth_req_params *auth_param; 1011 + gfp_t f = qat_algs_alloc_flags(&areq->base); 1013 1012 struct icp_qat_fw_la_bulk_req *msg; 1014 1013 u8 *iv = areq->iv; 1015 1014 int ret; ··· 1018 1015 if (areq->cryptlen % AES_BLOCK_SIZE != 0) 1019 1016 return -EINVAL; 1020 1017 1021 - ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req); 1018 + ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req, f); 1022 1019 if (unlikely(ret)) 1023 1020 return ret; 1024 1021 ··· 1196 1193 struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm); 1197 1194 struct qat_crypto_request *qat_req = skcipher_request_ctx(req); 1198 1195 struct icp_qat_fw_la_cipher_req_params *cipher_param; 1196 + gfp_t f = qat_algs_alloc_flags(&req->base); 1199 1197 struct icp_qat_fw_la_bulk_req *msg; 1200 1198 int ret; 1201 1199 1202 1200 if (req->cryptlen == 0) 1203 1201 return 0; 1204 1202 1205 - ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req); 1203 + ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req, f); 1206 1204 if (unlikely(ret)) 1207 1205 return ret; 1208 1206 ··· 1262 1258 struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm); 1263 1259 struct qat_crypto_request *qat_req = skcipher_request_ctx(req); 1264 1260 struct icp_qat_fw_la_cipher_req_params *cipher_param; 1261 + gfp_t f = qat_algs_alloc_flags(&req->base); 1265 1262 struct icp_qat_fw_la_bulk_req *msg; 1266 1263 int ret; 1267 1264 1268 1265 if (req->cryptlen == 0) 1269 1266 return 0; 1270 1267 1271 - ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req); 1268 + ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req, f); 1272 1269 if (unlikely(ret)) 1273 1270 return ret; 1274 1271
+10 -7
drivers/crypto/qat/qat_common/qat_asym_algs.c
··· 224 224 struct qat_asym_request *qat_req = 225 225 PTR_ALIGN(kpp_request_ctx(req), 64); 226 226 struct icp_qat_fw_pke_request *msg = &qat_req->req; 227 - int ret; 227 + gfp_t flags = qat_algs_alloc_flags(&req->base); 228 228 int n_input_params = 0; 229 229 u8 *vaddr; 230 + int ret; 230 231 231 232 if (unlikely(!ctx->xa)) 232 233 return -EINVAL; ··· 292 291 } else { 293 292 int shift = ctx->p_size - req->src_len; 294 293 295 - qat_req->src_align = kzalloc(ctx->p_size, GFP_KERNEL); 294 + qat_req->src_align = kzalloc(ctx->p_size, flags); 296 295 if (unlikely(!qat_req->src_align)) 297 296 return ret; 298 297 ··· 318 317 qat_req->dst_align = NULL; 319 318 vaddr = sg_virt(req->dst); 320 319 } else { 321 - qat_req->dst_align = kzalloc(ctx->p_size, GFP_KERNEL); 320 + qat_req->dst_align = kzalloc(ctx->p_size, flags); 322 321 if (unlikely(!qat_req->dst_align)) 323 322 goto unmap_src; 324 323 ··· 651 650 struct qat_asym_request *qat_req = 652 651 PTR_ALIGN(akcipher_request_ctx(req), 64); 653 652 struct icp_qat_fw_pke_request *msg = &qat_req->req; 653 + gfp_t flags = qat_algs_alloc_flags(&req->base); 654 654 u8 *vaddr; 655 655 int ret; 656 656 ··· 698 696 } else { 699 697 int shift = ctx->key_sz - req->src_len; 700 698 701 - qat_req->src_align = kzalloc(ctx->key_sz, GFP_KERNEL); 699 + qat_req->src_align = kzalloc(ctx->key_sz, flags); 702 700 if (unlikely(!qat_req->src_align)) 703 701 return ret; 704 702 ··· 716 714 qat_req->dst_align = NULL; 717 715 vaddr = sg_virt(req->dst); 718 716 } else { 719 - qat_req->dst_align = kzalloc(ctx->key_sz, GFP_KERNEL); 717 + qat_req->dst_align = kzalloc(ctx->key_sz, flags); 720 718 if (unlikely(!qat_req->dst_align)) 721 719 goto unmap_src; 722 720 vaddr = qat_req->dst_align; ··· 785 783 struct qat_asym_request *qat_req = 786 784 PTR_ALIGN(akcipher_request_ctx(req), 64); 787 785 struct icp_qat_fw_pke_request *msg = &qat_req->req; 786 + gfp_t flags = qat_algs_alloc_flags(&req->base); 788 787 u8 *vaddr; 789 788 int ret; 790 789 ··· 842 839 } else { 843 840 int shift = ctx->key_sz - req->src_len; 844 841 845 - qat_req->src_align = kzalloc(ctx->key_sz, GFP_KERNEL); 842 + qat_req->src_align = kzalloc(ctx->key_sz, flags); 846 843 if (unlikely(!qat_req->src_align)) 847 844 return ret; 848 845 ··· 860 857 qat_req->dst_align = NULL; 861 858 vaddr = sg_virt(req->dst); 862 859 } else { 863 - qat_req->dst_align = kzalloc(ctx->key_sz, GFP_KERNEL); 860 + qat_req->dst_align = kzalloc(ctx->key_sz, flags); 864 861 if (unlikely(!qat_req->dst_align)) 865 862 goto unmap_src; 866 863 vaddr = qat_req->dst_align;
+5
drivers/crypto/qat/qat_common/qat_crypto.h
··· 109 109 return true; 110 110 } 111 111 112 + static inline gfp_t qat_algs_alloc_flags(struct crypto_async_request *req) 113 + { 114 + return req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; 115 + } 116 + 112 117 #endif