at v4.16 431 lines 12 kB view raw
1/* 2 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 and 6 * only version 2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 */ 13 14#include <linux/device.h> 15#include <linux/interrupt.h> 16#include <linux/types.h> 17#include <crypto/aes.h> 18#include <crypto/des.h> 19#include <crypto/internal/skcipher.h> 20 21#include "cipher.h" 22 23static LIST_HEAD(ablkcipher_algs); 24 25static void qce_ablkcipher_done(void *data) 26{ 27 struct crypto_async_request *async_req = data; 28 struct ablkcipher_request *req = ablkcipher_request_cast(async_req); 29 struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); 30 struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm); 31 struct qce_device *qce = tmpl->qce; 32 enum dma_data_direction dir_src, dir_dst; 33 u32 status; 34 int error; 35 bool diff_dst; 36 37 diff_dst = (req->src != req->dst) ? true : false; 38 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; 39 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; 40 41 error = qce_dma_terminate_all(&qce->dma); 42 if (error) 43 dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n", 44 error); 45 46 if (diff_dst) 47 dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src); 48 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); 49 50 sg_free_table(&rctx->dst_tbl); 51 52 error = qce_check_status(qce, &status); 53 if (error < 0) 54 dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status); 55 56 qce->async_req_done(tmpl->qce, error); 57} 58 59static int 60qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req) 61{ 62 struct ablkcipher_request *req = ablkcipher_request_cast(async_req); 63 struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); 64 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 65 struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm); 66 struct qce_device *qce = tmpl->qce; 67 enum dma_data_direction dir_src, dir_dst; 68 struct scatterlist *sg; 69 bool diff_dst; 70 gfp_t gfp; 71 int ret; 72 73 rctx->iv = req->info; 74 rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher); 75 rctx->cryptlen = req->nbytes; 76 77 diff_dst = (req->src != req->dst) ? true : false; 78 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; 79 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; 80 81 rctx->src_nents = sg_nents_for_len(req->src, req->nbytes); 82 if (diff_dst) 83 rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes); 84 else 85 rctx->dst_nents = rctx->src_nents; 86 if (rctx->src_nents < 0) { 87 dev_err(qce->dev, "Invalid numbers of src SG.\n"); 88 return rctx->src_nents; 89 } 90 if (rctx->dst_nents < 0) { 91 dev_err(qce->dev, "Invalid numbers of dst SG.\n"); 92 return -rctx->dst_nents; 93 } 94 95 rctx->dst_nents += 1; 96 97 gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 98 GFP_KERNEL : GFP_ATOMIC; 99 100 ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp); 101 if (ret) 102 return ret; 103 104 sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); 105 106 sg = qce_sgtable_add(&rctx->dst_tbl, req->dst); 107 if (IS_ERR(sg)) { 108 ret = PTR_ERR(sg); 109 goto error_free; 110 } 111 112 sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg); 113 if (IS_ERR(sg)) { 114 ret = PTR_ERR(sg); 115 goto error_free; 116 } 117 118 sg_mark_end(sg); 119 rctx->dst_sg = rctx->dst_tbl.sgl; 120 121 ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); 122 if (ret < 0) 123 goto error_free; 124 125 if (diff_dst) { 126 ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src); 127 if (ret < 0) 128 goto error_unmap_dst; 129 rctx->src_sg = req->src; 130 } else { 131 rctx->src_sg = rctx->dst_sg; 132 } 133 134 ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents, 135 rctx->dst_sg, rctx->dst_nents, 136 qce_ablkcipher_done, async_req); 137 if (ret) 138 goto error_unmap_src; 139 140 qce_dma_issue_pending(&qce->dma); 141 142 ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0); 143 if (ret) 144 goto error_terminate; 145 146 return 0; 147 148error_terminate: 149 qce_dma_terminate_all(&qce->dma); 150error_unmap_src: 151 if (diff_dst) 152 dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src); 153error_unmap_dst: 154 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); 155error_free: 156 sg_free_table(&rctx->dst_tbl); 157 return ret; 158} 159 160static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key, 161 unsigned int keylen) 162{ 163 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk); 164 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); 165 unsigned long flags = to_cipher_tmpl(tfm)->alg_flags; 166 int ret; 167 168 if (!key || !keylen) 169 return -EINVAL; 170 171 if (IS_AES(flags)) { 172 switch (keylen) { 173 case AES_KEYSIZE_128: 174 case AES_KEYSIZE_256: 175 break; 176 default: 177 goto fallback; 178 } 179 } else if (IS_DES(flags)) { 180 u32 tmp[DES_EXPKEY_WORDS]; 181 182 ret = des_ekey(tmp, key); 183 if (!ret && crypto_ablkcipher_get_flags(ablk) & 184 CRYPTO_TFM_REQ_WEAK_KEY) 185 goto weakkey; 186 } 187 188 ctx->enc_keylen = keylen; 189 memcpy(ctx->enc_key, key, keylen); 190 return 0; 191fallback: 192 ret = crypto_skcipher_setkey(ctx->fallback, key, keylen); 193 if (!ret) 194 ctx->enc_keylen = keylen; 195 return ret; 196weakkey: 197 crypto_ablkcipher_set_flags(ablk, CRYPTO_TFM_RES_WEAK_KEY); 198 return -EINVAL; 199} 200 201static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt) 202{ 203 struct crypto_tfm *tfm = 204 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); 205 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); 206 struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); 207 struct qce_alg_template *tmpl = to_cipher_tmpl(tfm); 208 int ret; 209 210 rctx->flags = tmpl->alg_flags; 211 rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT; 212 213 if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 && 214 ctx->enc_keylen != AES_KEYSIZE_256) { 215 SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); 216 217 skcipher_request_set_tfm(subreq, ctx->fallback); 218 skcipher_request_set_callback(subreq, req->base.flags, 219 NULL, NULL); 220 skcipher_request_set_crypt(subreq, req->src, req->dst, 221 req->nbytes, req->info); 222 ret = encrypt ? crypto_skcipher_encrypt(subreq) : 223 crypto_skcipher_decrypt(subreq); 224 skcipher_request_zero(subreq); 225 return ret; 226 } 227 228 return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base); 229} 230 231static int qce_ablkcipher_encrypt(struct ablkcipher_request *req) 232{ 233 return qce_ablkcipher_crypt(req, 1); 234} 235 236static int qce_ablkcipher_decrypt(struct ablkcipher_request *req) 237{ 238 return qce_ablkcipher_crypt(req, 0); 239} 240 241static int qce_ablkcipher_init(struct crypto_tfm *tfm) 242{ 243 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); 244 245 memset(ctx, 0, sizeof(*ctx)); 246 tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx); 247 248 ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(tfm), 0, 249 CRYPTO_ALG_ASYNC | 250 CRYPTO_ALG_NEED_FALLBACK); 251 return PTR_ERR_OR_ZERO(ctx->fallback); 252} 253 254static void qce_ablkcipher_exit(struct crypto_tfm *tfm) 255{ 256 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); 257 258 crypto_free_skcipher(ctx->fallback); 259} 260 261struct qce_ablkcipher_def { 262 unsigned long flags; 263 const char *name; 264 const char *drv_name; 265 unsigned int blocksize; 266 unsigned int ivsize; 267 unsigned int min_keysize; 268 unsigned int max_keysize; 269}; 270 271static const struct qce_ablkcipher_def ablkcipher_def[] = { 272 { 273 .flags = QCE_ALG_AES | QCE_MODE_ECB, 274 .name = "ecb(aes)", 275 .drv_name = "ecb-aes-qce", 276 .blocksize = AES_BLOCK_SIZE, 277 .ivsize = AES_BLOCK_SIZE, 278 .min_keysize = AES_MIN_KEY_SIZE, 279 .max_keysize = AES_MAX_KEY_SIZE, 280 }, 281 { 282 .flags = QCE_ALG_AES | QCE_MODE_CBC, 283 .name = "cbc(aes)", 284 .drv_name = "cbc-aes-qce", 285 .blocksize = AES_BLOCK_SIZE, 286 .ivsize = AES_BLOCK_SIZE, 287 .min_keysize = AES_MIN_KEY_SIZE, 288 .max_keysize = AES_MAX_KEY_SIZE, 289 }, 290 { 291 .flags = QCE_ALG_AES | QCE_MODE_CTR, 292 .name = "ctr(aes)", 293 .drv_name = "ctr-aes-qce", 294 .blocksize = AES_BLOCK_SIZE, 295 .ivsize = AES_BLOCK_SIZE, 296 .min_keysize = AES_MIN_KEY_SIZE, 297 .max_keysize = AES_MAX_KEY_SIZE, 298 }, 299 { 300 .flags = QCE_ALG_AES | QCE_MODE_XTS, 301 .name = "xts(aes)", 302 .drv_name = "xts-aes-qce", 303 .blocksize = AES_BLOCK_SIZE, 304 .ivsize = AES_BLOCK_SIZE, 305 .min_keysize = AES_MIN_KEY_SIZE, 306 .max_keysize = AES_MAX_KEY_SIZE, 307 }, 308 { 309 .flags = QCE_ALG_DES | QCE_MODE_ECB, 310 .name = "ecb(des)", 311 .drv_name = "ecb-des-qce", 312 .blocksize = DES_BLOCK_SIZE, 313 .ivsize = 0, 314 .min_keysize = DES_KEY_SIZE, 315 .max_keysize = DES_KEY_SIZE, 316 }, 317 { 318 .flags = QCE_ALG_DES | QCE_MODE_CBC, 319 .name = "cbc(des)", 320 .drv_name = "cbc-des-qce", 321 .blocksize = DES_BLOCK_SIZE, 322 .ivsize = DES_BLOCK_SIZE, 323 .min_keysize = DES_KEY_SIZE, 324 .max_keysize = DES_KEY_SIZE, 325 }, 326 { 327 .flags = QCE_ALG_3DES | QCE_MODE_ECB, 328 .name = "ecb(des3_ede)", 329 .drv_name = "ecb-3des-qce", 330 .blocksize = DES3_EDE_BLOCK_SIZE, 331 .ivsize = 0, 332 .min_keysize = DES3_EDE_KEY_SIZE, 333 .max_keysize = DES3_EDE_KEY_SIZE, 334 }, 335 { 336 .flags = QCE_ALG_3DES | QCE_MODE_CBC, 337 .name = "cbc(des3_ede)", 338 .drv_name = "cbc-3des-qce", 339 .blocksize = DES3_EDE_BLOCK_SIZE, 340 .ivsize = DES3_EDE_BLOCK_SIZE, 341 .min_keysize = DES3_EDE_KEY_SIZE, 342 .max_keysize = DES3_EDE_KEY_SIZE, 343 }, 344}; 345 346static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def, 347 struct qce_device *qce) 348{ 349 struct qce_alg_template *tmpl; 350 struct crypto_alg *alg; 351 int ret; 352 353 tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL); 354 if (!tmpl) 355 return -ENOMEM; 356 357 alg = &tmpl->alg.crypto; 358 359 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); 360 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 361 def->drv_name); 362 363 alg->cra_blocksize = def->blocksize; 364 alg->cra_ablkcipher.ivsize = def->ivsize; 365 alg->cra_ablkcipher.min_keysize = def->min_keysize; 366 alg->cra_ablkcipher.max_keysize = def->max_keysize; 367 alg->cra_ablkcipher.setkey = qce_ablkcipher_setkey; 368 alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt; 369 alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt; 370 371 alg->cra_priority = 300; 372 alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC | 373 CRYPTO_ALG_NEED_FALLBACK; 374 alg->cra_ctxsize = sizeof(struct qce_cipher_ctx); 375 alg->cra_alignmask = 0; 376 alg->cra_type = &crypto_ablkcipher_type; 377 alg->cra_module = THIS_MODULE; 378 alg->cra_init = qce_ablkcipher_init; 379 alg->cra_exit = qce_ablkcipher_exit; 380 INIT_LIST_HEAD(&alg->cra_list); 381 382 INIT_LIST_HEAD(&tmpl->entry); 383 tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER; 384 tmpl->alg_flags = def->flags; 385 tmpl->qce = qce; 386 387 ret = crypto_register_alg(alg); 388 if (ret) { 389 kfree(tmpl); 390 dev_err(qce->dev, "%s registration failed\n", alg->cra_name); 391 return ret; 392 } 393 394 list_add_tail(&tmpl->entry, &ablkcipher_algs); 395 dev_dbg(qce->dev, "%s is registered\n", alg->cra_name); 396 return 0; 397} 398 399static void qce_ablkcipher_unregister(struct qce_device *qce) 400{ 401 struct qce_alg_template *tmpl, *n; 402 403 list_for_each_entry_safe(tmpl, n, &ablkcipher_algs, entry) { 404 crypto_unregister_alg(&tmpl->alg.crypto); 405 list_del(&tmpl->entry); 406 kfree(tmpl); 407 } 408} 409 410static int qce_ablkcipher_register(struct qce_device *qce) 411{ 412 int ret, i; 413 414 for (i = 0; i < ARRAY_SIZE(ablkcipher_def); i++) { 415 ret = qce_ablkcipher_register_one(&ablkcipher_def[i], qce); 416 if (ret) 417 goto err; 418 } 419 420 return 0; 421err: 422 qce_ablkcipher_unregister(qce); 423 return ret; 424} 425 426const struct qce_algo_ops ablkcipher_ops = { 427 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 428 .register_algs = qce_ablkcipher_register, 429 .unregister_algs = qce_ablkcipher_unregister, 430 .async_req_handle = qce_ablkcipher_async_req_handle, 431};