Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: qat - Add support for RSA algorithm

Add RSA support to QAT driver.
Removed unused RNG rings.

Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Tadeusz Struk and committed by
Herbert Xu
a9905320 28cfaf67

+779 -39
+2
drivers/crypto/qat/Kconfig
··· 3 3 select CRYPTO_AEAD 4 4 select CRYPTO_AUTHENC 5 5 select CRYPTO_BLKCIPHER 6 + select CRYPTO_AKCIPHER 6 7 select CRYPTO_HMAC 7 8 select CRYPTO_SHA1 8 9 select CRYPTO_SHA256 9 10 select CRYPTO_SHA512 10 11 select FW_LOADER 12 + select ASN1 11 13 12 14 config CRYPTO_DEV_QAT_DH895xCC 13 15 tristate "Support for Intel(R) DH895xCC"
+1
drivers/crypto/qat/qat_common/.gitignore
··· 1 + *-asn1.[ch]
+5
drivers/crypto/qat/qat_common/Makefile
··· 1 + $(obj)/qat_rsakey-asn1.o: $(obj)/qat_rsakey-asn1.c $(obj)/qat_rsakey-asn1.h 2 + clean-files += qat_rsakey-asn1.c qat_rsakey-asn1.h 3 + 1 4 obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o 2 5 intel_qat-objs := adf_cfg.o \ 3 6 adf_ctl_drv.o \ ··· 11 8 adf_transport.o \ 12 9 qat_crypto.o \ 13 10 qat_algs.o \ 11 + qat_rsakey-asn1.o \ 12 + qat_asym_algs.o \ 14 13 qat_uclo.o \ 15 14 qat_hal.o 16 15
+9 -1
drivers/crypto/qat/qat_common/adf_common_drv.h
··· 55 55 56 56 #define ADF_MAJOR_VERSION 0 57 57 #define ADF_MINOR_VERSION 1 58 - #define ADF_BUILD_VERSION 3 58 + #define ADF_BUILD_VERSION 4 59 59 #define ADF_DRV_VERSION __stringify(ADF_MAJOR_VERSION) "." \ 60 60 __stringify(ADF_MINOR_VERSION) "." \ 61 61 __stringify(ADF_BUILD_VERSION) ··· 93 93 struct list_head list; 94 94 int admin; 95 95 }; 96 + 97 + static inline int get_current_node(void) 98 + { 99 + return cpu_data(current_thread_info()->cpu).phys_proc_id; 100 + } 96 101 97 102 int adf_service_register(struct service_hndl *service); 98 103 int adf_service_unregister(struct service_hndl *service); ··· 146 141 struct qat_crypto_instance *qat_crypto_get_instance_node(int node); 147 142 void qat_crypto_put_instance(struct qat_crypto_instance *inst); 148 143 void qat_alg_callback(void *resp); 144 + void qat_alg_asym_callback(void *resp); 149 145 int qat_algs_init(void); 150 146 void qat_algs_exit(void); 151 147 int qat_algs_register(void); 152 148 int qat_algs_unregister(void); 149 + int qat_asym_algs_register(void); 150 + void qat_asym_algs_unregister(void); 153 151 154 152 int qat_hal_init(struct adf_accel_dev *accel_dev); 155 153 void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle);
+3 -1
drivers/crypto/qat/qat_common/adf_init.c
··· 257 257 clear_bit(ADF_STATUS_STARTING, &accel_dev->status); 258 258 set_bit(ADF_STATUS_STARTED, &accel_dev->status); 259 259 260 - if (qat_algs_register()) { 260 + if (qat_algs_register() || qat_asym_algs_register()) { 261 261 dev_err(&GET_DEV(accel_dev), 262 262 "Failed to register crypto algs\n"); 263 263 set_bit(ADF_STATUS_STARTING, &accel_dev->status); ··· 295 295 if (qat_algs_unregister()) 296 296 dev_err(&GET_DEV(accel_dev), 297 297 "Failed to unregister crypto algs\n"); 298 + 299 + qat_asym_algs_unregister(); 298 300 299 301 list_for_each(list_itr, &service_table) { 300 302 service = list_entry(list_itr, struct service_hndl, list);
+2
drivers/crypto/qat/qat_common/icp_qat_fw.h
··· 249 249 250 250 #define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7 251 251 #define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1 252 + #define QAT_COMN_RESP_PKE_STATUS_BITPOS 6 253 + #define QAT_COMN_RESP_PKE_STATUS_MASK 0x1 252 254 #define QAT_COMN_RESP_CMP_STATUS_BITPOS 5 253 255 #define QAT_COMN_RESP_CMP_STATUS_MASK 0x1 254 256 #define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4
+112
drivers/crypto/qat/qat_common/icp_qat_fw_pke.h
··· 1 + /* 2 + This file is provided under a dual BSD/GPLv2 license. When using or 3 + redistributing this file, you may do so under either license. 4 + 5 + GPL LICENSE SUMMARY 6 + Copyright(c) 2014 Intel Corporation. 7 + This program is free software; you can redistribute it and/or modify 8 + it under the terms of version 2 of the GNU General Public License as 9 + published by the Free Software Foundation. 10 + 11 + This program is distributed in the hope that it will be useful, but 12 + WITHOUT ANY WARRANTY; without even the implied warranty of 13 + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 + General Public License for more details. 15 + 16 + Contact Information: 17 + qat-linux@intel.com 18 + 19 + BSD LICENSE 20 + Copyright(c) 2014 Intel Corporation. 21 + Redistribution and use in source and binary forms, with or without 22 + modification, are permitted provided that the following conditions 23 + are met: 24 + 25 + * Redistributions of source code must retain the above copyright 26 + notice, this list of conditions and the following disclaimer. 27 + * Redistributions in binary form must reproduce the above copyright 28 + notice, this list of conditions and the following disclaimer in 29 + the documentation and/or other materials provided with the 30 + distribution. 31 + * Neither the name of Intel Corporation nor the names of its 32 + contributors may be used to endorse or promote products derived 33 + from this software without specific prior written permission. 34 + 35 + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 36 + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 37 + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 38 + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 39 + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 40 + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 41 + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 42 + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 43 + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 44 + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 45 + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 46 + */ 47 + #ifndef _ICP_QAT_FW_PKE_ 48 + #define _ICP_QAT_FW_PKE_ 49 + 50 + #include "icp_qat_fw.h" 51 + 52 + struct icp_qat_fw_req_hdr_pke_cd_pars { 53 + u64 content_desc_addr; 54 + u32 content_desc_resrvd; 55 + u32 func_id; 56 + }; 57 + 58 + struct icp_qat_fw_req_pke_mid { 59 + u64 opaque; 60 + u64 src_data_addr; 61 + u64 dest_data_addr; 62 + }; 63 + 64 + struct icp_qat_fw_req_pke_hdr { 65 + u8 resrvd1; 66 + u8 resrvd2; 67 + u8 service_type; 68 + u8 hdr_flags; 69 + u16 comn_req_flags; 70 + u16 resrvd4; 71 + struct icp_qat_fw_req_hdr_pke_cd_pars cd_pars; 72 + }; 73 + 74 + struct icp_qat_fw_pke_request { 75 + struct icp_qat_fw_req_pke_hdr pke_hdr; 76 + struct icp_qat_fw_req_pke_mid pke_mid; 77 + u8 output_param_count; 78 + u8 input_param_count; 79 + u16 resrvd1; 80 + u32 resrvd2; 81 + u64 next_req_adr; 82 + }; 83 + 84 + struct icp_qat_fw_resp_pke_hdr { 85 + u8 resrvd1; 86 + u8 resrvd2; 87 + u8 response_type; 88 + u8 hdr_flags; 89 + u16 comn_resp_flags; 90 + u16 resrvd4; 91 + }; 92 + 93 + struct icp_qat_fw_pke_resp { 94 + struct icp_qat_fw_resp_pke_hdr pke_resp_hdr; 95 + u64 opaque; 96 + u64 src_data_addr; 97 + u64 dest_data_addr; 98 + }; 99 + 100 + #define ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS 7 101 + #define ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK 0x1 102 + #define ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(status_word) \ 103 + QAT_FIELD_GET(((status_word >> ICP_QAT_FW_COMN_ONE_BYTE_SHIFT) & \ 104 + ICP_QAT_FW_COMN_SINGLE_BYTE_MASK), \ 105 + QAT_COMN_RESP_PKE_STATUS_BITPOS, \ 106 + QAT_COMN_RESP_PKE_STATUS_MASK) 107 + 108 + #define ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(hdr_t, val) \ 109 + QAT_FIELD_SET((hdr_t.hdr_flags), (val), \ 110 + ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS, \ 111 + ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK) 112 + #endif
-5
drivers/crypto/qat/qat_common/qat_algs.c
··· 129 129 spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */ 130 130 }; 131 131 132 - static int get_current_node(void) 133 - { 134 - return cpu_data(current_thread_info()->cpu).phys_proc_id; 135 - } 136 - 137 132 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg) 138 133 { 139 134 switch (qat_hash_alg) {
+639
drivers/crypto/qat/qat_common/qat_asym_algs.c
··· 1 + /* 2 + This file is provided under a dual BSD/GPLv2 license. When using or 3 + redistributing this file, you may do so under either license. 4 + 5 + GPL LICENSE SUMMARY 6 + Copyright(c) 2014 Intel Corporation. 7 + This program is free software; you can redistribute it and/or modify 8 + it under the terms of version 2 of the GNU General Public License as 9 + published by the Free Software Foundation. 10 + 11 + This program is distributed in the hope that it will be useful, but 12 + WITHOUT ANY WARRANTY; without even the implied warranty of 13 + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 + General Public License for more details. 15 + 16 + Contact Information: 17 + qat-linux@intel.com 18 + 19 + BSD LICENSE 20 + Copyright(c) 2014 Intel Corporation. 21 + Redistribution and use in source and binary forms, with or without 22 + modification, are permitted provided that the following conditions 23 + are met: 24 + 25 + * Redistributions of source code must retain the above copyright 26 + notice, this list of conditions and the following disclaimer. 27 + * Redistributions in binary form must reproduce the above copyright 28 + notice, this list of conditions and the following disclaimer in 29 + the documentation and/or other materials provided with the 30 + distribution. 31 + * Neither the name of Intel Corporation nor the names of its 32 + contributors may be used to endorse or promote products derived 33 + from this software without specific prior written permission. 34 + 35 + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 36 + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 37 + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 38 + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 39 + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 40 + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 41 + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 42 + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 43 + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 44 + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 45 + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 46 + */ 47 + 48 + #include <linux/module.h> 49 + #include <crypto/internal/rsa.h> 50 + #include <crypto/internal/akcipher.h> 51 + #include <crypto/akcipher.h> 52 + #include <linux/dma-mapping.h> 53 + #include <linux/fips.h> 54 + #include "qat_rsakey-asn1.h" 55 + #include "icp_qat_fw_pke.h" 56 + #include "adf_accel_devices.h" 57 + #include "adf_transport.h" 58 + #include "adf_common_drv.h" 59 + #include "qat_crypto.h" 60 + 61 + struct qat_rsa_input_params { 62 + union { 63 + struct { 64 + dma_addr_t m; 65 + dma_addr_t e; 66 + dma_addr_t n; 67 + } enc; 68 + struct { 69 + dma_addr_t c; 70 + dma_addr_t d; 71 + dma_addr_t n; 72 + } dec; 73 + u64 in_tab[8]; 74 + }; 75 + } __packed __aligned(64); 76 + 77 + struct qat_rsa_output_params { 78 + union { 79 + struct { 80 + dma_addr_t c; 81 + } enc; 82 + struct { 83 + dma_addr_t m; 84 + } dec; 85 + u64 out_tab[8]; 86 + }; 87 + } __packed __aligned(64); 88 + 89 + struct qat_rsa_ctx { 90 + char *n; 91 + char *e; 92 + char *d; 93 + dma_addr_t dma_n; 94 + dma_addr_t dma_e; 95 + dma_addr_t dma_d; 96 + unsigned int key_sz; 97 + struct qat_crypto_instance *inst; 98 + } __packed __aligned(64); 99 + 100 + struct qat_rsa_request { 101 + struct qat_rsa_input_params in; 102 + struct qat_rsa_output_params out; 103 + dma_addr_t phy_in; 104 + dma_addr_t phy_out; 105 + char *src_align; 106 + struct icp_qat_fw_pke_request req; 107 + struct qat_rsa_ctx *ctx; 108 + int err; 109 + } __aligned(64); 110 + 111 + static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp) 112 + { 113 + struct akcipher_request *areq = (void *)(__force long)resp->opaque; 114 + struct qat_rsa_request *req = PTR_ALIGN(akcipher_request_ctx(areq), 64); 115 + struct device *dev = &GET_DEV(req->ctx->inst->accel_dev); 116 + int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET( 117 + resp->pke_resp_hdr.comn_resp_flags); 118 + char *ptr = areq->dst; 119 + 120 + err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL; 121 + 122 + if (req->src_align) 123 + dma_free_coherent(dev, req->ctx->key_sz, req->src_align, 124 + req->in.enc.m); 125 + else 126 + dma_unmap_single(dev, req->in.enc.m, req->ctx->key_sz, 127 + DMA_TO_DEVICE); 128 + 129 + dma_unmap_single(dev, req->out.enc.c, req->ctx->key_sz, 130 + DMA_FROM_DEVICE); 131 + dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params), 132 + DMA_TO_DEVICE); 133 + dma_unmap_single(dev, req->phy_out, 134 + sizeof(struct qat_rsa_output_params), 135 + DMA_TO_DEVICE); 136 + 137 + areq->dst_len = req->ctx->key_sz; 138 + /* Need to set the corect length of the output */ 139 + while (!(*ptr) && areq->dst_len) { 140 + areq->dst_len--; 141 + ptr++; 142 + } 143 + 144 + if (areq->dst_len != req->ctx->key_sz) 145 + memcpy(areq->dst, ptr, areq->dst_len); 146 + 147 + akcipher_request_complete(areq, err); 148 + } 149 + 150 + void qat_alg_asym_callback(void *_resp) 151 + { 152 + struct icp_qat_fw_pke_resp *resp = _resp; 153 + 154 + qat_rsa_cb(resp); 155 + } 156 + 157 + #define PKE_RSA_EP_512 0x1c161b21 158 + #define PKE_RSA_EP_1024 0x35111bf7 159 + #define PKE_RSA_EP_1536 0x4d111cdc 160 + #define PKE_RSA_EP_2048 0x6e111dba 161 + #define PKE_RSA_EP_3072 0x7d111ea3 162 + #define PKE_RSA_EP_4096 0xa5101f7e 163 + 164 + static unsigned long qat_rsa_enc_fn_id(unsigned int len) 165 + { 166 + unsigned int bitslen = len << 3; 167 + 168 + switch (bitslen) { 169 + case 512: 170 + return PKE_RSA_EP_512; 171 + case 1024: 172 + return PKE_RSA_EP_1024; 173 + case 1536: 174 + return PKE_RSA_EP_1536; 175 + case 2048: 176 + return PKE_RSA_EP_2048; 177 + case 3072: 178 + return PKE_RSA_EP_3072; 179 + case 4096: 180 + return PKE_RSA_EP_4096; 181 + default: 182 + return 0; 183 + }; 184 + } 185 + 186 + #define PKE_RSA_DP1_512 0x1c161b3c 187 + #define PKE_RSA_DP1_1024 0x35111c12 188 + #define PKE_RSA_DP1_1536 0x4d111cf7 189 + #define PKE_RSA_DP1_2048 0x6e111dda 190 + #define PKE_RSA_DP1_3072 0x7d111ebe 191 + #define PKE_RSA_DP1_4096 0xa5101f98 192 + 193 + static unsigned long qat_rsa_dec_fn_id(unsigned int len) 194 + { 195 + unsigned int bitslen = len << 3; 196 + 197 + switch (bitslen) { 198 + case 512: 199 + return PKE_RSA_DP1_512; 200 + case 1024: 201 + return PKE_RSA_DP1_1024; 202 + case 1536: 203 + return PKE_RSA_DP1_1536; 204 + case 2048: 205 + return PKE_RSA_DP1_2048; 206 + case 3072: 207 + return PKE_RSA_DP1_3072; 208 + case 4096: 209 + return PKE_RSA_DP1_4096; 210 + default: 211 + return 0; 212 + }; 213 + } 214 + 215 + static int qat_rsa_enc(struct akcipher_request *req) 216 + { 217 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 218 + struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 219 + struct qat_crypto_instance *inst = ctx->inst; 220 + struct device *dev = &GET_DEV(inst->accel_dev); 221 + struct qat_rsa_request *qat_req = 222 + PTR_ALIGN(akcipher_request_ctx(req), 64); 223 + struct icp_qat_fw_pke_request *msg = &qat_req->req; 224 + int ret, ctr = 0; 225 + 226 + if (unlikely(!ctx->n || !ctx->e)) 227 + return -EINVAL; 228 + 229 + if (req->dst_len < ctx->key_sz) { 230 + req->dst_len = ctx->key_sz; 231 + return -EOVERFLOW; 232 + } 233 + memset(msg, '\0', sizeof(*msg)); 234 + ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr, 235 + ICP_QAT_FW_COMN_REQ_FLAG_SET); 236 + msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz); 237 + if (unlikely(!msg->pke_hdr.cd_pars.func_id)) 238 + return -EINVAL; 239 + 240 + qat_req->ctx = ctx; 241 + msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE; 242 + msg->pke_hdr.comn_req_flags = 243 + ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT, 244 + QAT_COMN_CD_FLD_TYPE_64BIT_ADR); 245 + 246 + qat_req->in.enc.e = ctx->dma_e; 247 + qat_req->in.enc.n = ctx->dma_n; 248 + ret = -ENOMEM; 249 + 250 + /* 251 + * src can be of any size in valid range, but HW expects it to be the 252 + * same as modulo n so in case it is different we need to allocate a 253 + * new buf and copy src data. 254 + * In other case we just need to map the user provided buffer. 255 + */ 256 + if (req->src_len < ctx->key_sz) { 257 + int shift = ctx->key_sz - req->src_len; 258 + 259 + qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, 260 + &qat_req->in.enc.m, 261 + GFP_KERNEL); 262 + if (unlikely(!qat_req->src_align)) 263 + return ret; 264 + 265 + memcpy(qat_req->src_align + shift, req->src, req->src_len); 266 + } else { 267 + qat_req->src_align = NULL; 268 + qat_req->in.enc.m = dma_map_single(dev, req->src, req->src_len, 269 + DMA_TO_DEVICE); 270 + } 271 + qat_req->in.in_tab[3] = 0; 272 + qat_req->out.enc.c = dma_map_single(dev, req->dst, req->dst_len, 273 + DMA_FROM_DEVICE); 274 + qat_req->out.out_tab[1] = 0; 275 + qat_req->phy_in = dma_map_single(dev, &qat_req->in.enc.m, 276 + sizeof(struct qat_rsa_input_params), 277 + DMA_TO_DEVICE); 278 + qat_req->phy_out = dma_map_single(dev, &qat_req->out.enc.c, 279 + sizeof(struct qat_rsa_output_params), 280 + DMA_TO_DEVICE); 281 + 282 + if (unlikely((!qat_req->src_align && 283 + dma_mapping_error(dev, qat_req->in.enc.m)) || 284 + dma_mapping_error(dev, qat_req->out.enc.c) || 285 + dma_mapping_error(dev, qat_req->phy_in) || 286 + dma_mapping_error(dev, qat_req->phy_out))) 287 + goto unmap; 288 + 289 + msg->pke_mid.src_data_addr = qat_req->phy_in; 290 + msg->pke_mid.dest_data_addr = qat_req->phy_out; 291 + msg->pke_mid.opaque = (uint64_t)(__force long)req; 292 + msg->input_param_count = 3; 293 + msg->output_param_count = 1; 294 + do { 295 + ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg); 296 + } while (ret == -EBUSY && ctr++ < 100); 297 + 298 + if (!ret) 299 + return -EINPROGRESS; 300 + unmap: 301 + if (qat_req->src_align) 302 + dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, 303 + qat_req->in.enc.m); 304 + else 305 + if (!dma_mapping_error(dev, qat_req->in.enc.m)) 306 + dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz, 307 + DMA_TO_DEVICE); 308 + if (!dma_mapping_error(dev, qat_req->out.enc.c)) 309 + dma_unmap_single(dev, qat_req->out.enc.c, ctx->key_sz, 310 + DMA_FROM_DEVICE); 311 + if (!dma_mapping_error(dev, qat_req->phy_in)) 312 + dma_unmap_single(dev, qat_req->phy_in, 313 + sizeof(struct qat_rsa_input_params), 314 + DMA_TO_DEVICE); 315 + if (!dma_mapping_error(dev, qat_req->phy_out)) 316 + dma_unmap_single(dev, qat_req->phy_out, 317 + sizeof(struct qat_rsa_output_params), 318 + DMA_TO_DEVICE); 319 + return ret; 320 + } 321 + 322 + static int qat_rsa_dec(struct akcipher_request *req) 323 + { 324 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 325 + struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 326 + struct qat_crypto_instance *inst = ctx->inst; 327 + struct device *dev = &GET_DEV(inst->accel_dev); 328 + struct qat_rsa_request *qat_req = 329 + PTR_ALIGN(akcipher_request_ctx(req), 64); 330 + struct icp_qat_fw_pke_request *msg = &qat_req->req; 331 + int ret, ctr = 0; 332 + 333 + if (unlikely(!ctx->n || !ctx->d)) 334 + return -EINVAL; 335 + 336 + if (req->dst_len < ctx->key_sz) { 337 + req->dst_len = ctx->key_sz; 338 + return -EOVERFLOW; 339 + } 340 + memset(msg, '\0', sizeof(*msg)); 341 + ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr, 342 + ICP_QAT_FW_COMN_REQ_FLAG_SET); 343 + msg->pke_hdr.cd_pars.func_id = qat_rsa_dec_fn_id(ctx->key_sz); 344 + if (unlikely(!msg->pke_hdr.cd_pars.func_id)) 345 + return -EINVAL; 346 + 347 + qat_req->ctx = ctx; 348 + msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE; 349 + msg->pke_hdr.comn_req_flags = 350 + ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT, 351 + QAT_COMN_CD_FLD_TYPE_64BIT_ADR); 352 + 353 + qat_req->in.dec.d = ctx->dma_d; 354 + qat_req->in.dec.n = ctx->dma_n; 355 + ret = -ENOMEM; 356 + 357 + /* 358 + * src can be of any size in valid range, but HW expects it to be the 359 + * same as modulo n so in case it is different we need to allocate a 360 + * new buf and copy src data. 361 + * In other case we just need to map the user provided buffer. 362 + */ 363 + if (req->src_len < ctx->key_sz) { 364 + int shift = ctx->key_sz - req->src_len; 365 + 366 + qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, 367 + &qat_req->in.dec.c, 368 + GFP_KERNEL); 369 + if (unlikely(!qat_req->src_align)) 370 + return ret; 371 + 372 + memcpy(qat_req->src_align + shift, req->src, req->src_len); 373 + } else { 374 + qat_req->src_align = NULL; 375 + qat_req->in.dec.c = dma_map_single(dev, req->src, req->src_len, 376 + DMA_TO_DEVICE); 377 + } 378 + qat_req->in.in_tab[3] = 0; 379 + qat_req->out.dec.m = dma_map_single(dev, req->dst, req->dst_len, 380 + DMA_FROM_DEVICE); 381 + qat_req->out.out_tab[1] = 0; 382 + qat_req->phy_in = dma_map_single(dev, &qat_req->in.dec.c, 383 + sizeof(struct qat_rsa_input_params), 384 + DMA_TO_DEVICE); 385 + qat_req->phy_out = dma_map_single(dev, &qat_req->out.dec.m, 386 + sizeof(struct qat_rsa_output_params), 387 + DMA_TO_DEVICE); 388 + 389 + if (unlikely((!qat_req->src_align && 390 + dma_mapping_error(dev, qat_req->in.dec.c)) || 391 + dma_mapping_error(dev, qat_req->out.dec.m) || 392 + dma_mapping_error(dev, qat_req->phy_in) || 393 + dma_mapping_error(dev, qat_req->phy_out))) 394 + goto unmap; 395 + 396 + msg->pke_mid.src_data_addr = qat_req->phy_in; 397 + msg->pke_mid.dest_data_addr = qat_req->phy_out; 398 + msg->pke_mid.opaque = (uint64_t)(__force long)req; 399 + msg->input_param_count = 3; 400 + msg->output_param_count = 1; 401 + do { 402 + ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg); 403 + } while (ret == -EBUSY && ctr++ < 100); 404 + 405 + if (!ret) 406 + return -EINPROGRESS; 407 + unmap: 408 + if (qat_req->src_align) 409 + dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, 410 + qat_req->in.dec.c); 411 + else 412 + if (!dma_mapping_error(dev, qat_req->in.dec.c)) 413 + dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz, 414 + DMA_TO_DEVICE); 415 + if (!dma_mapping_error(dev, qat_req->out.dec.m)) 416 + dma_unmap_single(dev, qat_req->out.dec.m, ctx->key_sz, 417 + DMA_FROM_DEVICE); 418 + if (!dma_mapping_error(dev, qat_req->phy_in)) 419 + dma_unmap_single(dev, qat_req->phy_in, 420 + sizeof(struct qat_rsa_input_params), 421 + DMA_TO_DEVICE); 422 + if (!dma_mapping_error(dev, qat_req->phy_out)) 423 + dma_unmap_single(dev, qat_req->phy_out, 424 + sizeof(struct qat_rsa_output_params), 425 + DMA_TO_DEVICE); 426 + return ret; 427 + } 428 + 429 + int qat_rsa_get_n(void *context, size_t hdrlen, unsigned char tag, 430 + const void *value, size_t vlen) 431 + { 432 + struct qat_rsa_ctx *ctx = context; 433 + struct qat_crypto_instance *inst = ctx->inst; 434 + struct device *dev = &GET_DEV(inst->accel_dev); 435 + const char *ptr = value; 436 + int ret; 437 + 438 + while (!*ptr && vlen) { 439 + ptr++; 440 + vlen--; 441 + } 442 + 443 + ctx->key_sz = vlen; 444 + ret = -EINVAL; 445 + /* In FIPS mode only allow key size 2K & 3K */ 446 + if (fips_enabled && (ctx->key_sz != 256 || ctx->key_sz != 384)) { 447 + pr_err("QAT: RSA: key size not allowed in FIPS mode\n"); 448 + goto err; 449 + } 450 + /* invalid key size provided */ 451 + if (!qat_rsa_enc_fn_id(ctx->key_sz)) 452 + goto err; 453 + 454 + ret = -ENOMEM; 455 + ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL); 456 + if (!ctx->n) 457 + goto err; 458 + 459 + memcpy(ctx->n, ptr, ctx->key_sz); 460 + return 0; 461 + err: 462 + ctx->key_sz = 0; 463 + ctx->n = NULL; 464 + return ret; 465 + } 466 + 467 + int qat_rsa_get_e(void *context, size_t hdrlen, unsigned char tag, 468 + const void *value, size_t vlen) 469 + { 470 + struct qat_rsa_ctx *ctx = context; 471 + struct qat_crypto_instance *inst = ctx->inst; 472 + struct device *dev = &GET_DEV(inst->accel_dev); 473 + const char *ptr = value; 474 + 475 + while (!*ptr && vlen) { 476 + ptr++; 477 + vlen--; 478 + } 479 + 480 + if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) { 481 + ctx->e = NULL; 482 + return -EINVAL; 483 + } 484 + 485 + ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL); 486 + if (!ctx->e) { 487 + ctx->e = NULL; 488 + return -ENOMEM; 489 + } 490 + memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen); 491 + return 0; 492 + } 493 + 494 + int qat_rsa_get_d(void *context, size_t hdrlen, unsigned char tag, 495 + const void *value, size_t vlen) 496 + { 497 + struct qat_rsa_ctx *ctx = context; 498 + struct qat_crypto_instance *inst = ctx->inst; 499 + struct device *dev = &GET_DEV(inst->accel_dev); 500 + const char *ptr = value; 501 + int ret; 502 + 503 + while (!*ptr && vlen) { 504 + ptr++; 505 + vlen--; 506 + } 507 + 508 + ret = -EINVAL; 509 + if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) 510 + goto err; 511 + 512 + /* In FIPS mode only allow key size 2K & 3K */ 513 + if (fips_enabled && (vlen != 256 || vlen != 384)) { 514 + pr_err("QAT: RSA: key size not allowed in FIPS mode\n"); 515 + goto err; 516 + } 517 + 518 + ret = -ENOMEM; 519 + ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL); 520 + if (!ctx->n) 521 + goto err; 522 + 523 + memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen); 524 + return 0; 525 + err: 526 + ctx->d = NULL; 527 + return ret; 528 + } 529 + 530 + static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key, 531 + unsigned int keylen) 532 + { 533 + struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 534 + struct device *dev = &GET_DEV(ctx->inst->accel_dev); 535 + int ret; 536 + 537 + /* Free the old key if any */ 538 + if (ctx->n) 539 + dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n); 540 + if (ctx->e) 541 + dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e); 542 + if (ctx->d) { 543 + memset(ctx->d, '\0', ctx->key_sz); 544 + dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d); 545 + } 546 + 547 + ctx->n = NULL; 548 + ctx->e = NULL; 549 + ctx->d = NULL; 550 + ret = asn1_ber_decoder(&qat_rsakey_decoder, ctx, key, keylen); 551 + if (ret < 0) 552 + goto free; 553 + 554 + if (!ctx->n || !ctx->e) { 555 + /* invalid key provided */ 556 + ret = -EINVAL; 557 + goto free; 558 + } 559 + 560 + return 0; 561 + free: 562 + if (ctx->d) { 563 + memset(ctx->d, '\0', ctx->key_sz); 564 + dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d); 565 + ctx->d = NULL; 566 + } 567 + if (ctx->e) { 568 + dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e); 569 + ctx->e = NULL; 570 + } 571 + if (ctx->n) { 572 + dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n); 573 + ctx->n = NULL; 574 + ctx->key_sz = 0; 575 + } 576 + return ret; 577 + } 578 + 579 + static int qat_rsa_init_tfm(struct crypto_akcipher *tfm) 580 + { 581 + struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 582 + struct qat_crypto_instance *inst = 583 + qat_crypto_get_instance_node(get_current_node()); 584 + 585 + if (!inst) 586 + return -EINVAL; 587 + 588 + ctx->key_sz = 0; 589 + ctx->inst = inst; 590 + return 0; 591 + } 592 + 593 + static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm) 594 + { 595 + struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 596 + struct device *dev = &GET_DEV(ctx->inst->accel_dev); 597 + 598 + if (ctx->n) 599 + dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n); 600 + if (ctx->e) 601 + dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e); 602 + if (ctx->d) { 603 + memset(ctx->d, '\0', ctx->key_sz); 604 + dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d); 605 + } 606 + qat_crypto_put_instance(ctx->inst); 607 + ctx->n = NULL; 608 + ctx->d = NULL; 609 + ctx->d = NULL; 610 + } 611 + 612 + static struct akcipher_alg rsa = { 613 + .encrypt = qat_rsa_enc, 614 + .decrypt = qat_rsa_dec, 615 + .sign = qat_rsa_dec, 616 + .verify = qat_rsa_enc, 617 + .setkey = qat_rsa_setkey, 618 + .init = qat_rsa_init_tfm, 619 + .exit = qat_rsa_exit_tfm, 620 + .reqsize = sizeof(struct qat_rsa_request) + 64, 621 + .base = { 622 + .cra_name = "rsa", 623 + .cra_driver_name = "qat-rsa", 624 + .cra_priority = 1000, 625 + .cra_module = THIS_MODULE, 626 + .cra_ctxsize = sizeof(struct qat_rsa_ctx), 627 + }, 628 + }; 629 + 630 + int qat_asym_algs_register(void) 631 + { 632 + rsa.base.cra_flags = 0; 633 + return crypto_register_akcipher(&rsa); 634 + } 635 + 636 + void qat_asym_algs_unregister(void) 637 + { 638 + crypto_unregister_akcipher(&rsa); 639 + }
+1 -18
drivers/crypto/qat/qat_common/qat_crypto.c
··· 88 88 if (inst->pke_rx) 89 89 adf_remove_ring(inst->pke_rx); 90 90 91 - if (inst->rnd_tx) 92 - adf_remove_ring(inst->rnd_tx); 93 - 94 - if (inst->rnd_rx) 95 - adf_remove_ring(inst->rnd_rx); 96 - 97 91 list_del(list_ptr); 98 92 kfree(inst); 99 93 } ··· 196 202 msg_size, key, NULL, 0, &inst->sym_tx)) 197 203 goto err; 198 204 199 - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i); 200 - if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, 201 - msg_size, key, NULL, 0, &inst->rnd_tx)) 202 - goto err; 203 - 204 205 msg_size = msg_size >> 1; 205 206 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i); 206 207 if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, ··· 209 220 &inst->sym_rx)) 210 221 goto err; 211 222 212 - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i); 213 - if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, 214 - msg_size, key, qat_alg_callback, 0, 215 - &inst->rnd_rx)) 216 - goto err; 217 - 218 223 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); 219 224 if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, 220 - msg_size, key, qat_alg_callback, 0, 225 + msg_size, key, qat_alg_asym_callback, 0, 221 226 &inst->pke_rx)) 222 227 goto err; 223 228 }
-2
drivers/crypto/qat/qat_common/qat_crypto.h
··· 57 57 struct adf_etr_ring_data *sym_rx; 58 58 struct adf_etr_ring_data *pke_tx; 59 59 struct adf_etr_ring_data *pke_rx; 60 - struct adf_etr_ring_data *rnd_tx; 61 - struct adf_etr_ring_data *rnd_rx; 62 60 struct adf_accel_dev *accel_dev; 63 61 struct list_head list; 64 62 unsigned long state;
+5
drivers/crypto/qat/qat_common/qat_rsakey.asn1
··· 1 + RsaKey ::= SEQUENCE { 2 + n INTEGER ({ qat_rsa_get_n }), 3 + e INTEGER ({ qat_rsa_get_e }), 4 + d INTEGER ({ qat_rsa_get_d }) 5 + }
-12
drivers/crypto/qat/qat_dh895xcc/adf_drv.c
··· 167 167 key, (void *)&val, ADF_DEC)) 168 168 goto err; 169 169 170 - val = 4; 171 - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i); 172 - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, 173 - key, (void *)&val, ADF_DEC)) 174 - goto err; 175 - 176 170 val = 8; 177 171 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); 178 172 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ··· 175 181 176 182 val = 10; 177 183 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i); 178 - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, 179 - key, (void *)&val, ADF_DEC)) 180 - goto err; 181 - 182 - val = 12; 183 - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i); 184 184 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, 185 185 key, (void *)&val, ADF_DEC)) 186 186 goto err;