Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2/*
3 * caam - Freescale FSL CAAM support for Public Key Cryptography
4 *
5 * Copyright 2016 Freescale Semiconductor, Inc.
6 * Copyright 2018-2019 NXP
7 *
8 * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
9 * all the desired key parameters, input and output pointers.
10 */
11#include "compat.h"
12#include "regs.h"
13#include "intern.h"
14#include "jr.h"
15#include "error.h"
16#include "desc_constr.h"
17#include "sg_sw_sec4.h"
18#include "caampkc.h"
19#include <linux/dma-mapping.h>
20#include <linux/kernel.h>
21
22#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB)
23#define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
24 SIZEOF_RSA_PRIV_F1_PDB)
25#define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
26 SIZEOF_RSA_PRIV_F2_PDB)
27#define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
28 SIZEOF_RSA_PRIV_F3_PDB)
29#define CAAM_RSA_MAX_INPUT_SIZE 512 /* for a 4096-bit modulus */
30
31/* buffer filled with zeros, used for padding */
32static u8 *zero_buffer;
33
34/*
35 * variable used to avoid double free of resources in case
36 * algorithm registration was unsuccessful
37 */
38static bool init_done;
39
40struct caam_akcipher_alg {
41 struct akcipher_alg akcipher;
42 bool registered;
43};
44
45static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
46 struct akcipher_request *req)
47{
48 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
49
50 dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
51 dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE);
52
53 if (edesc->sec4_sg_bytes)
54 dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
55 DMA_TO_DEVICE);
56}
57
58static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
59 struct akcipher_request *req)
60{
61 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
62 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
63 struct caam_rsa_key *key = &ctx->key;
64 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
65
66 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
67 dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
68}
69
70static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
71 struct akcipher_request *req)
72{
73 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
74 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
75 struct caam_rsa_key *key = &ctx->key;
76 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
77
78 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
79 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
80}
81
82static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
83 struct akcipher_request *req)
84{
85 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
86 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
87 struct caam_rsa_key *key = &ctx->key;
88 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
89 size_t p_sz = key->p_sz;
90 size_t q_sz = key->q_sz;
91
92 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
93 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
94 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
95 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
96 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
97}
98
99static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
100 struct akcipher_request *req)
101{
102 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
103 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
104 struct caam_rsa_key *key = &ctx->key;
105 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
106 size_t p_sz = key->p_sz;
107 size_t q_sz = key->q_sz;
108
109 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
110 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
111 dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
112 dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
113 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
114 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
115 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
116}
117
118/* RSA Job Completion handler */
119static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
120{
121 struct akcipher_request *req = context;
122 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
123 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
124 struct rsa_edesc *edesc;
125 int ecode = 0;
126 bool has_bklog;
127
128 if (err)
129 ecode = caam_jr_strstatus(dev, err);
130
131 edesc = req_ctx->edesc;
132 has_bklog = edesc->bklog;
133
134 rsa_pub_unmap(dev, edesc, req);
135 rsa_io_unmap(dev, edesc, req);
136 kfree(edesc);
137
138 /*
139 * If no backlog flag, the completion of the request is done
140 * by CAAM, not crypto engine.
141 */
142 if (!has_bklog)
143 akcipher_request_complete(req, ecode);
144 else
145 crypto_finalize_akcipher_request(jrp->engine, req, ecode);
146}
147
148static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
149 void *context)
150{
151 struct akcipher_request *req = context;
152 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
153 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
154 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
155 struct caam_rsa_key *key = &ctx->key;
156 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
157 struct rsa_edesc *edesc;
158 int ecode = 0;
159 bool has_bklog;
160
161 if (err)
162 ecode = caam_jr_strstatus(dev, err);
163
164 edesc = req_ctx->edesc;
165 has_bklog = edesc->bklog;
166
167 switch (key->priv_form) {
168 case FORM1:
169 rsa_priv_f1_unmap(dev, edesc, req);
170 break;
171 case FORM2:
172 rsa_priv_f2_unmap(dev, edesc, req);
173 break;
174 case FORM3:
175 rsa_priv_f3_unmap(dev, edesc, req);
176 }
177
178 rsa_io_unmap(dev, edesc, req);
179 kfree(edesc);
180
181 /*
182 * If no backlog flag, the completion of the request is done
183 * by CAAM, not crypto engine.
184 */
185 if (!has_bklog)
186 akcipher_request_complete(req, ecode);
187 else
188 crypto_finalize_akcipher_request(jrp->engine, req, ecode);
189}
190
191/**
192 * caam_rsa_count_leading_zeros - Count leading zeros, need it to strip,
193 * from a given scatterlist
194 *
195 * @sgl : scatterlist to count zeros from
196 * @nbytes: number of zeros, in bytes, to strip
197 * @flags : operation flags
198 */
199static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
200 unsigned int nbytes,
201 unsigned int flags)
202{
203 struct sg_mapping_iter miter;
204 int lzeros, ents;
205 unsigned int len;
206 unsigned int tbytes = nbytes;
207 const u8 *buff;
208
209 ents = sg_nents_for_len(sgl, nbytes);
210 if (ents < 0)
211 return ents;
212
213 sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
214
215 lzeros = 0;
216 len = 0;
217 while (nbytes > 0) {
218 /* do not strip more than given bytes */
219 while (len && !*buff && lzeros < nbytes) {
220 lzeros++;
221 len--;
222 buff++;
223 }
224
225 if (len && *buff)
226 break;
227
228 sg_miter_next(&miter);
229 buff = miter.addr;
230 len = miter.length;
231
232 nbytes -= lzeros;
233 lzeros = 0;
234 }
235
236 miter.consumed = lzeros;
237 sg_miter_stop(&miter);
238 nbytes -= lzeros;
239
240 return tbytes - nbytes;
241}
242
243static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
244 size_t desclen)
245{
246 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
247 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
248 struct device *dev = ctx->dev;
249 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
250 struct caam_rsa_key *key = &ctx->key;
251 struct rsa_edesc *edesc;
252 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
253 GFP_KERNEL : GFP_ATOMIC;
254 int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
255 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
256 int src_nents, dst_nents;
257 int mapped_src_nents, mapped_dst_nents;
258 unsigned int diff_size = 0;
259 int lzeros;
260
261 if (req->src_len > key->n_sz) {
262 /*
263 * strip leading zeros and
264 * return the number of zeros to skip
265 */
266 lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len -
267 key->n_sz, sg_flags);
268 if (lzeros < 0)
269 return ERR_PTR(lzeros);
270
271 req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src,
272 lzeros);
273 req_ctx->fixup_src_len = req->src_len - lzeros;
274 } else {
275 /*
276 * input src is less then n key modulus,
277 * so there will be zero padding
278 */
279 diff_size = key->n_sz - req->src_len;
280 req_ctx->fixup_src = req->src;
281 req_ctx->fixup_src_len = req->src_len;
282 }
283
284 src_nents = sg_nents_for_len(req_ctx->fixup_src,
285 req_ctx->fixup_src_len);
286 dst_nents = sg_nents_for_len(req->dst, req->dst_len);
287
288 mapped_src_nents = dma_map_sg(dev, req_ctx->fixup_src, src_nents,
289 DMA_TO_DEVICE);
290 if (unlikely(!mapped_src_nents)) {
291 dev_err(dev, "unable to map source\n");
292 return ERR_PTR(-ENOMEM);
293 }
294 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
295 DMA_FROM_DEVICE);
296 if (unlikely(!mapped_dst_nents)) {
297 dev_err(dev, "unable to map destination\n");
298 goto src_fail;
299 }
300
301 if (!diff_size && mapped_src_nents == 1)
302 sec4_sg_len = 0; /* no need for an input hw s/g table */
303 else
304 sec4_sg_len = mapped_src_nents + !!diff_size;
305 sec4_sg_index = sec4_sg_len;
306
307 if (mapped_dst_nents > 1)
308 sec4_sg_len += pad_sg_nents(mapped_dst_nents);
309 else
310 sec4_sg_len = pad_sg_nents(sec4_sg_len);
311
312 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
313
314 /* allocate space for base edesc, hw desc commands and link tables */
315 edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes, flags);
316 if (!edesc)
317 goto dst_fail;
318
319 edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
320 if (diff_size)
321 dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size,
322 0);
323
324 if (sec4_sg_index)
325 sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
326 edesc->sec4_sg + !!diff_size, 0);
327
328 if (mapped_dst_nents > 1)
329 sg_to_sec4_sg_last(req->dst, req->dst_len,
330 edesc->sec4_sg + sec4_sg_index, 0);
331
332 /* Save nents for later use in Job Descriptor */
333 edesc->src_nents = src_nents;
334 edesc->dst_nents = dst_nents;
335
336 req_ctx->edesc = edesc;
337
338 if (!sec4_sg_bytes)
339 return edesc;
340
341 edesc->mapped_src_nents = mapped_src_nents;
342 edesc->mapped_dst_nents = mapped_dst_nents;
343
344 edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
345 sec4_sg_bytes, DMA_TO_DEVICE);
346 if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
347 dev_err(dev, "unable to map S/G table\n");
348 goto sec4_sg_fail;
349 }
350
351 edesc->sec4_sg_bytes = sec4_sg_bytes;
352
353 print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ",
354 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
355 edesc->sec4_sg_bytes, 1);
356
357 return edesc;
358
359sec4_sg_fail:
360 kfree(edesc);
361dst_fail:
362 dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
363src_fail:
364 dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
365 return ERR_PTR(-ENOMEM);
366}
367
368static int akcipher_do_one_req(struct crypto_engine *engine, void *areq)
369{
370 struct akcipher_request *req = container_of(areq,
371 struct akcipher_request,
372 base);
373 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
374 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
375 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
376 struct device *jrdev = ctx->dev;
377 u32 *desc = req_ctx->edesc->hw_desc;
378 int ret;
379
380 req_ctx->edesc->bklog = true;
381
382 ret = caam_jr_enqueue(jrdev, desc, req_ctx->akcipher_op_done, req);
383
384 if (ret == -ENOSPC && engine->retry_support)
385 return ret;
386
387 if (ret != -EINPROGRESS) {
388 rsa_pub_unmap(jrdev, req_ctx->edesc, req);
389 rsa_io_unmap(jrdev, req_ctx->edesc, req);
390 kfree(req_ctx->edesc);
391 } else {
392 ret = 0;
393 }
394
395 return ret;
396}
397
398static int set_rsa_pub_pdb(struct akcipher_request *req,
399 struct rsa_edesc *edesc)
400{
401 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
402 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
403 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
404 struct caam_rsa_key *key = &ctx->key;
405 struct device *dev = ctx->dev;
406 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
407 int sec4_sg_index = 0;
408
409 pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
410 if (dma_mapping_error(dev, pdb->n_dma)) {
411 dev_err(dev, "Unable to map RSA modulus memory\n");
412 return -ENOMEM;
413 }
414
415 pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
416 if (dma_mapping_error(dev, pdb->e_dma)) {
417 dev_err(dev, "Unable to map RSA public exponent memory\n");
418 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
419 return -ENOMEM;
420 }
421
422 if (edesc->mapped_src_nents > 1) {
423 pdb->sgf |= RSA_PDB_SGF_F;
424 pdb->f_dma = edesc->sec4_sg_dma;
425 sec4_sg_index += edesc->mapped_src_nents;
426 } else {
427 pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
428 }
429
430 if (edesc->mapped_dst_nents > 1) {
431 pdb->sgf |= RSA_PDB_SGF_G;
432 pdb->g_dma = edesc->sec4_sg_dma +
433 sec4_sg_index * sizeof(struct sec4_sg_entry);
434 } else {
435 pdb->g_dma = sg_dma_address(req->dst);
436 }
437
438 pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
439 pdb->f_len = req_ctx->fixup_src_len;
440
441 return 0;
442}
443
444static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
445 struct rsa_edesc *edesc)
446{
447 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
448 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
449 struct caam_rsa_key *key = &ctx->key;
450 struct device *dev = ctx->dev;
451 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
452 int sec4_sg_index = 0;
453
454 pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
455 if (dma_mapping_error(dev, pdb->n_dma)) {
456 dev_err(dev, "Unable to map modulus memory\n");
457 return -ENOMEM;
458 }
459
460 pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
461 if (dma_mapping_error(dev, pdb->d_dma)) {
462 dev_err(dev, "Unable to map RSA private exponent memory\n");
463 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
464 return -ENOMEM;
465 }
466
467 if (edesc->mapped_src_nents > 1) {
468 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
469 pdb->g_dma = edesc->sec4_sg_dma;
470 sec4_sg_index += edesc->mapped_src_nents;
471
472 } else {
473 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
474
475 pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
476 }
477
478 if (edesc->mapped_dst_nents > 1) {
479 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
480 pdb->f_dma = edesc->sec4_sg_dma +
481 sec4_sg_index * sizeof(struct sec4_sg_entry);
482 } else {
483 pdb->f_dma = sg_dma_address(req->dst);
484 }
485
486 pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
487
488 return 0;
489}
490
491static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
492 struct rsa_edesc *edesc)
493{
494 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
495 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
496 struct caam_rsa_key *key = &ctx->key;
497 struct device *dev = ctx->dev;
498 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
499 int sec4_sg_index = 0;
500 size_t p_sz = key->p_sz;
501 size_t q_sz = key->q_sz;
502
503 pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
504 if (dma_mapping_error(dev, pdb->d_dma)) {
505 dev_err(dev, "Unable to map RSA private exponent memory\n");
506 return -ENOMEM;
507 }
508
509 pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
510 if (dma_mapping_error(dev, pdb->p_dma)) {
511 dev_err(dev, "Unable to map RSA prime factor p memory\n");
512 goto unmap_d;
513 }
514
515 pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
516 if (dma_mapping_error(dev, pdb->q_dma)) {
517 dev_err(dev, "Unable to map RSA prime factor q memory\n");
518 goto unmap_p;
519 }
520
521 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
522 if (dma_mapping_error(dev, pdb->tmp1_dma)) {
523 dev_err(dev, "Unable to map RSA tmp1 memory\n");
524 goto unmap_q;
525 }
526
527 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
528 if (dma_mapping_error(dev, pdb->tmp2_dma)) {
529 dev_err(dev, "Unable to map RSA tmp2 memory\n");
530 goto unmap_tmp1;
531 }
532
533 if (edesc->mapped_src_nents > 1) {
534 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
535 pdb->g_dma = edesc->sec4_sg_dma;
536 sec4_sg_index += edesc->mapped_src_nents;
537 } else {
538 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
539
540 pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
541 }
542
543 if (edesc->mapped_dst_nents > 1) {
544 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
545 pdb->f_dma = edesc->sec4_sg_dma +
546 sec4_sg_index * sizeof(struct sec4_sg_entry);
547 } else {
548 pdb->f_dma = sg_dma_address(req->dst);
549 }
550
551 pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
552 pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
553
554 return 0;
555
556unmap_tmp1:
557 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
558unmap_q:
559 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
560unmap_p:
561 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
562unmap_d:
563 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
564
565 return -ENOMEM;
566}
567
568static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
569 struct rsa_edesc *edesc)
570{
571 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
572 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
573 struct caam_rsa_key *key = &ctx->key;
574 struct device *dev = ctx->dev;
575 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
576 int sec4_sg_index = 0;
577 size_t p_sz = key->p_sz;
578 size_t q_sz = key->q_sz;
579
580 pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
581 if (dma_mapping_error(dev, pdb->p_dma)) {
582 dev_err(dev, "Unable to map RSA prime factor p memory\n");
583 return -ENOMEM;
584 }
585
586 pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
587 if (dma_mapping_error(dev, pdb->q_dma)) {
588 dev_err(dev, "Unable to map RSA prime factor q memory\n");
589 goto unmap_p;
590 }
591
592 pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
593 if (dma_mapping_error(dev, pdb->dp_dma)) {
594 dev_err(dev, "Unable to map RSA exponent dp memory\n");
595 goto unmap_q;
596 }
597
598 pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
599 if (dma_mapping_error(dev, pdb->dq_dma)) {
600 dev_err(dev, "Unable to map RSA exponent dq memory\n");
601 goto unmap_dp;
602 }
603
604 pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
605 if (dma_mapping_error(dev, pdb->c_dma)) {
606 dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
607 goto unmap_dq;
608 }
609
610 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
611 if (dma_mapping_error(dev, pdb->tmp1_dma)) {
612 dev_err(dev, "Unable to map RSA tmp1 memory\n");
613 goto unmap_qinv;
614 }
615
616 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
617 if (dma_mapping_error(dev, pdb->tmp2_dma)) {
618 dev_err(dev, "Unable to map RSA tmp2 memory\n");
619 goto unmap_tmp1;
620 }
621
622 if (edesc->mapped_src_nents > 1) {
623 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
624 pdb->g_dma = edesc->sec4_sg_dma;
625 sec4_sg_index += edesc->mapped_src_nents;
626 } else {
627 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
628
629 pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
630 }
631
632 if (edesc->mapped_dst_nents > 1) {
633 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
634 pdb->f_dma = edesc->sec4_sg_dma +
635 sec4_sg_index * sizeof(struct sec4_sg_entry);
636 } else {
637 pdb->f_dma = sg_dma_address(req->dst);
638 }
639
640 pdb->sgf |= key->n_sz;
641 pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
642
643 return 0;
644
645unmap_tmp1:
646 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
647unmap_qinv:
648 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
649unmap_dq:
650 dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
651unmap_dp:
652 dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
653unmap_q:
654 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
655unmap_p:
656 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
657
658 return -ENOMEM;
659}
660
661static int akcipher_enqueue_req(struct device *jrdev,
662 void (*cbk)(struct device *jrdev, u32 *desc,
663 u32 err, void *context),
664 struct akcipher_request *req)
665{
666 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
667 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
668 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
669 struct caam_rsa_key *key = &ctx->key;
670 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
671 struct rsa_edesc *edesc = req_ctx->edesc;
672 u32 *desc = edesc->hw_desc;
673 int ret;
674
675 req_ctx->akcipher_op_done = cbk;
676 /*
677 * Only the backlog request are sent to crypto-engine since the others
678 * can be handled by CAAM, if free, especially since JR has up to 1024
679 * entries (more than the 10 entries from crypto-engine).
680 */
681 if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
682 ret = crypto_transfer_akcipher_request_to_engine(jrpriv->engine,
683 req);
684 else
685 ret = caam_jr_enqueue(jrdev, desc, cbk, req);
686
687 if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
688 switch (key->priv_form) {
689 case FORM1:
690 rsa_priv_f1_unmap(jrdev, edesc, req);
691 break;
692 case FORM2:
693 rsa_priv_f2_unmap(jrdev, edesc, req);
694 break;
695 case FORM3:
696 rsa_priv_f3_unmap(jrdev, edesc, req);
697 break;
698 default:
699 rsa_pub_unmap(jrdev, edesc, req);
700 }
701 rsa_io_unmap(jrdev, edesc, req);
702 kfree(edesc);
703 }
704
705 return ret;
706}
707
708static int caam_rsa_enc(struct akcipher_request *req)
709{
710 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
711 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
712 struct caam_rsa_key *key = &ctx->key;
713 struct device *jrdev = ctx->dev;
714 struct rsa_edesc *edesc;
715 int ret;
716
717 if (unlikely(!key->n || !key->e))
718 return -EINVAL;
719
720 if (req->dst_len < key->n_sz) {
721 req->dst_len = key->n_sz;
722 dev_err(jrdev, "Output buffer length less than parameter n\n");
723 return -EOVERFLOW;
724 }
725
726 /* Allocate extended descriptor */
727 edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
728 if (IS_ERR(edesc))
729 return PTR_ERR(edesc);
730
731 /* Set RSA Encrypt Protocol Data Block */
732 ret = set_rsa_pub_pdb(req, edesc);
733 if (ret)
734 goto init_fail;
735
736 /* Initialize Job Descriptor */
737 init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
738
739 return akcipher_enqueue_req(jrdev, rsa_pub_done, req);
740
741init_fail:
742 rsa_io_unmap(jrdev, edesc, req);
743 kfree(edesc);
744 return ret;
745}
746
747static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
748{
749 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
750 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
751 struct device *jrdev = ctx->dev;
752 struct rsa_edesc *edesc;
753 int ret;
754
755 /* Allocate extended descriptor */
756 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
757 if (IS_ERR(edesc))
758 return PTR_ERR(edesc);
759
760 /* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
761 ret = set_rsa_priv_f1_pdb(req, edesc);
762 if (ret)
763 goto init_fail;
764
765 /* Initialize Job Descriptor */
766 init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
767
768 return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
769
770init_fail:
771 rsa_io_unmap(jrdev, edesc, req);
772 kfree(edesc);
773 return ret;
774}
775
776static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
777{
778 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
779 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
780 struct device *jrdev = ctx->dev;
781 struct rsa_edesc *edesc;
782 int ret;
783
784 /* Allocate extended descriptor */
785 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
786 if (IS_ERR(edesc))
787 return PTR_ERR(edesc);
788
789 /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
790 ret = set_rsa_priv_f2_pdb(req, edesc);
791 if (ret)
792 goto init_fail;
793
794 /* Initialize Job Descriptor */
795 init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
796
797 return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
798
799init_fail:
800 rsa_io_unmap(jrdev, edesc, req);
801 kfree(edesc);
802 return ret;
803}
804
805static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
806{
807 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
808 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
809 struct device *jrdev = ctx->dev;
810 struct rsa_edesc *edesc;
811 int ret;
812
813 /* Allocate extended descriptor */
814 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
815 if (IS_ERR(edesc))
816 return PTR_ERR(edesc);
817
818 /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
819 ret = set_rsa_priv_f3_pdb(req, edesc);
820 if (ret)
821 goto init_fail;
822
823 /* Initialize Job Descriptor */
824 init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
825
826 return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
827
828init_fail:
829 rsa_io_unmap(jrdev, edesc, req);
830 kfree(edesc);
831 return ret;
832}
833
834static int caam_rsa_dec(struct akcipher_request *req)
835{
836 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
837 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
838 struct caam_rsa_key *key = &ctx->key;
839 int ret;
840
841 if (unlikely(!key->n || !key->d))
842 return -EINVAL;
843
844 if (req->dst_len < key->n_sz) {
845 req->dst_len = key->n_sz;
846 dev_err(ctx->dev, "Output buffer length less than parameter n\n");
847 return -EOVERFLOW;
848 }
849
850 if (key->priv_form == FORM3)
851 ret = caam_rsa_dec_priv_f3(req);
852 else if (key->priv_form == FORM2)
853 ret = caam_rsa_dec_priv_f2(req);
854 else
855 ret = caam_rsa_dec_priv_f1(req);
856
857 return ret;
858}
859
860static void caam_rsa_free_key(struct caam_rsa_key *key)
861{
862 kfree_sensitive(key->d);
863 kfree_sensitive(key->p);
864 kfree_sensitive(key->q);
865 kfree_sensitive(key->dp);
866 kfree_sensitive(key->dq);
867 kfree_sensitive(key->qinv);
868 kfree_sensitive(key->tmp1);
869 kfree_sensitive(key->tmp2);
870 kfree(key->e);
871 kfree(key->n);
872 memset(key, 0, sizeof(*key));
873}
874
875static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
876{
877 while (!**ptr && *nbytes) {
878 (*ptr)++;
879 (*nbytes)--;
880 }
881}
882
883/**
884 * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
885 * dP, dQ and qInv could decode to less than corresponding p, q length, as the
886 * BER-encoding requires that the minimum number of bytes be used to encode the
887 * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
888 * length.
889 *
890 * @ptr : pointer to {dP, dQ, qInv} CRT member
891 * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
892 * @dstlen: length in bytes of corresponding p or q prime factor
893 */
894static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
895{
896 u8 *dst;
897
898 caam_rsa_drop_leading_zeros(&ptr, &nbytes);
899 if (!nbytes)
900 return NULL;
901
902 dst = kzalloc(dstlen, GFP_KERNEL);
903 if (!dst)
904 return NULL;
905
906 memcpy(dst + (dstlen - nbytes), ptr, nbytes);
907
908 return dst;
909}
910
911/**
912 * caam_read_raw_data - Read a raw byte stream as a positive integer.
913 * The function skips buffer's leading zeros, copies the remained data
914 * to a buffer allocated in the GFP_KERNEL zone and returns
915 * the address of the new buffer.
916 *
917 * @buf : The data to read
918 * @nbytes: The amount of data to read
919 */
920static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
921{
922
923 caam_rsa_drop_leading_zeros(&buf, nbytes);
924 if (!*nbytes)
925 return NULL;
926
927 return kmemdup(buf, *nbytes, GFP_KERNEL);
928}
929
930static int caam_rsa_check_key_length(unsigned int len)
931{
932 if (len > 4096)
933 return -EINVAL;
934 return 0;
935}
936
937static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
938 unsigned int keylen)
939{
940 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
941 struct rsa_key raw_key = {NULL};
942 struct caam_rsa_key *rsa_key = &ctx->key;
943 int ret;
944
945 /* Free the old RSA key if any */
946 caam_rsa_free_key(rsa_key);
947
948 ret = rsa_parse_pub_key(&raw_key, key, keylen);
949 if (ret)
950 return ret;
951
952 /* Copy key in DMA zone */
953 rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_KERNEL);
954 if (!rsa_key->e)
955 goto err;
956
957 /*
958 * Skip leading zeros and copy the positive integer to a buffer
959 * allocated in the GFP_KERNEL zone. The decryption descriptor
960 * expects a positive integer for the RSA modulus and uses its length as
961 * decryption output length.
962 */
963 rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
964 if (!rsa_key->n)
965 goto err;
966
967 if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
968 caam_rsa_free_key(rsa_key);
969 return -EINVAL;
970 }
971
972 rsa_key->e_sz = raw_key.e_sz;
973 rsa_key->n_sz = raw_key.n_sz;
974
975 return 0;
976err:
977 caam_rsa_free_key(rsa_key);
978 return -ENOMEM;
979}
980
981static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
982 struct rsa_key *raw_key)
983{
984 struct caam_rsa_key *rsa_key = &ctx->key;
985 size_t p_sz = raw_key->p_sz;
986 size_t q_sz = raw_key->q_sz;
987 unsigned aligned_size;
988
989 rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
990 if (!rsa_key->p)
991 return;
992 rsa_key->p_sz = p_sz;
993
994 rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
995 if (!rsa_key->q)
996 goto free_p;
997 rsa_key->q_sz = q_sz;
998
999 aligned_size = ALIGN(raw_key->p_sz, dma_get_cache_alignment());
1000 rsa_key->tmp1 = kzalloc(aligned_size, GFP_KERNEL);
1001 if (!rsa_key->tmp1)
1002 goto free_q;
1003
1004 aligned_size = ALIGN(raw_key->q_sz, dma_get_cache_alignment());
1005 rsa_key->tmp2 = kzalloc(aligned_size, GFP_KERNEL);
1006 if (!rsa_key->tmp2)
1007 goto free_tmp1;
1008
1009 rsa_key->priv_form = FORM2;
1010
1011 rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
1012 if (!rsa_key->dp)
1013 goto free_tmp2;
1014
1015 rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
1016 if (!rsa_key->dq)
1017 goto free_dp;
1018
1019 rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
1020 q_sz);
1021 if (!rsa_key->qinv)
1022 goto free_dq;
1023
1024 rsa_key->priv_form = FORM3;
1025
1026 return;
1027
1028free_dq:
1029 kfree_sensitive(rsa_key->dq);
1030free_dp:
1031 kfree_sensitive(rsa_key->dp);
1032free_tmp2:
1033 kfree_sensitive(rsa_key->tmp2);
1034free_tmp1:
1035 kfree_sensitive(rsa_key->tmp1);
1036free_q:
1037 kfree_sensitive(rsa_key->q);
1038free_p:
1039 kfree_sensitive(rsa_key->p);
1040}
1041
1042static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
1043 unsigned int keylen)
1044{
1045 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
1046 struct rsa_key raw_key = {NULL};
1047 struct caam_rsa_key *rsa_key = &ctx->key;
1048 int ret;
1049
1050 /* Free the old RSA key if any */
1051 caam_rsa_free_key(rsa_key);
1052
1053 ret = rsa_parse_priv_key(&raw_key, key, keylen);
1054 if (ret)
1055 return ret;
1056
1057 /* Copy key in DMA zone */
1058 rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_KERNEL);
1059 if (!rsa_key->d)
1060 goto err;
1061
1062 rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_KERNEL);
1063 if (!rsa_key->e)
1064 goto err;
1065
1066 /*
1067 * Skip leading zeros and copy the positive integer to a buffer
1068 * allocated in the GFP_KERNEL zone. The decryption descriptor
1069 * expects a positive integer for the RSA modulus and uses its length as
1070 * decryption output length.
1071 */
1072 rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
1073 if (!rsa_key->n)
1074 goto err;
1075
1076 if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
1077 caam_rsa_free_key(rsa_key);
1078 return -EINVAL;
1079 }
1080
1081 rsa_key->d_sz = raw_key.d_sz;
1082 rsa_key->e_sz = raw_key.e_sz;
1083 rsa_key->n_sz = raw_key.n_sz;
1084
1085 caam_rsa_set_priv_key_form(ctx, &raw_key);
1086
1087 return 0;
1088
1089err:
1090 caam_rsa_free_key(rsa_key);
1091 return -ENOMEM;
1092}
1093
1094static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
1095{
1096 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
1097
1098 return ctx->key.n_sz;
1099}
1100
1101/* Per session pkc's driver context creation function */
1102static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
1103{
1104 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
1105
1106 akcipher_set_reqsize(tfm, sizeof(struct caam_rsa_req_ctx));
1107
1108 ctx->dev = caam_jr_alloc();
1109
1110 if (IS_ERR(ctx->dev)) {
1111 pr_err("Job Ring Device allocation for transform failed\n");
1112 return PTR_ERR(ctx->dev);
1113 }
1114
1115 ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer,
1116 CAAM_RSA_MAX_INPUT_SIZE - 1,
1117 DMA_TO_DEVICE);
1118 if (dma_mapping_error(ctx->dev, ctx->padding_dma)) {
1119 dev_err(ctx->dev, "unable to map padding\n");
1120 caam_jr_free(ctx->dev);
1121 return -ENOMEM;
1122 }
1123
1124 ctx->enginectx.op.do_one_request = akcipher_do_one_req;
1125
1126 return 0;
1127}
1128
1129/* Per session pkc's driver context cleanup function */
1130static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
1131{
1132 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
1133 struct caam_rsa_key *key = &ctx->key;
1134
1135 dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE -
1136 1, DMA_TO_DEVICE);
1137 caam_rsa_free_key(key);
1138 caam_jr_free(ctx->dev);
1139}
1140
1141static struct caam_akcipher_alg caam_rsa = {
1142 .akcipher = {
1143 .encrypt = caam_rsa_enc,
1144 .decrypt = caam_rsa_dec,
1145 .set_pub_key = caam_rsa_set_pub_key,
1146 .set_priv_key = caam_rsa_set_priv_key,
1147 .max_size = caam_rsa_max_size,
1148 .init = caam_rsa_init_tfm,
1149 .exit = caam_rsa_exit_tfm,
1150 .base = {
1151 .cra_name = "rsa",
1152 .cra_driver_name = "rsa-caam",
1153 .cra_priority = 3000,
1154 .cra_module = THIS_MODULE,
1155 .cra_ctxsize = sizeof(struct caam_rsa_ctx) +
1156 CRYPTO_DMA_PADDING,
1157 },
1158 }
1159};
1160
1161/* Public Key Cryptography module initialization handler */
1162int caam_pkc_init(struct device *ctrldev)
1163{
1164 struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1165 u32 pk_inst, pkha;
1166 int err;
1167 init_done = false;
1168
1169 /* Determine public key hardware accelerator presence. */
1170 if (priv->era < 10) {
1171 pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1172 CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
1173 } else {
1174 pkha = rd_reg32(&priv->ctrl->vreg.pkha);
1175 pk_inst = pkha & CHA_VER_NUM_MASK;
1176
1177 /*
1178 * Newer CAAMs support partially disabled functionality. If this is the
1179 * case, the number is non-zero, but this bit is set to indicate that
1180 * no encryption or decryption is supported. Only signing and verifying
1181 * is supported.
1182 */
1183 if (pkha & CHA_VER_MISC_PKHA_NO_CRYPT)
1184 pk_inst = 0;
1185 }
1186
1187 /* Do not register algorithms if PKHA is not present. */
1188 if (!pk_inst)
1189 return 0;
1190
1191 /* allocate zero buffer, used for padding input */
1192 zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_KERNEL);
1193 if (!zero_buffer)
1194 return -ENOMEM;
1195
1196 err = crypto_register_akcipher(&caam_rsa.akcipher);
1197
1198 if (err) {
1199 kfree(zero_buffer);
1200 dev_warn(ctrldev, "%s alg registration failed\n",
1201 caam_rsa.akcipher.base.cra_driver_name);
1202 } else {
1203 init_done = true;
1204 caam_rsa.registered = true;
1205 dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
1206 }
1207
1208 return err;
1209}
1210
1211void caam_pkc_exit(void)
1212{
1213 if (!init_done)
1214 return;
1215
1216 if (caam_rsa.registered)
1217 crypto_unregister_akcipher(&caam_rsa.akcipher);
1218
1219 kfree(zero_buffer);
1220}