Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2/*
3 * caam - Freescale FSL CAAM support for Public Key Cryptography
4 *
5 * Copyright 2016 Freescale Semiconductor, Inc.
6 *
7 * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
8 * all the desired key parameters, input and output pointers.
9 */
10#include "compat.h"
11#include "regs.h"
12#include "intern.h"
13#include "jr.h"
14#include "error.h"
15#include "desc_constr.h"
16#include "sg_sw_sec4.h"
17#include "caampkc.h"
18
19#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
20#define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
21 sizeof(struct rsa_priv_f1_pdb))
22#define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
23 sizeof(struct rsa_priv_f2_pdb))
24#define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
25 sizeof(struct rsa_priv_f3_pdb))
26
27static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
28 struct akcipher_request *req)
29{
30 dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
31 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
32
33 if (edesc->sec4_sg_bytes)
34 dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
35 DMA_TO_DEVICE);
36}
37
38static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
39 struct akcipher_request *req)
40{
41 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
42 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
43 struct caam_rsa_key *key = &ctx->key;
44 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
45
46 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
47 dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
48}
49
50static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
51 struct akcipher_request *req)
52{
53 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
54 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
55 struct caam_rsa_key *key = &ctx->key;
56 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
57
58 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
59 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
60}
61
62static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
63 struct akcipher_request *req)
64{
65 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
66 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
67 struct caam_rsa_key *key = &ctx->key;
68 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
69 size_t p_sz = key->p_sz;
70 size_t q_sz = key->q_sz;
71
72 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
73 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
74 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
75 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
76 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
77}
78
79static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
80 struct akcipher_request *req)
81{
82 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
83 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
84 struct caam_rsa_key *key = &ctx->key;
85 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
86 size_t p_sz = key->p_sz;
87 size_t q_sz = key->q_sz;
88
89 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
90 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
91 dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
92 dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
93 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
94 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
95 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
96}
97
98/* RSA Job Completion handler */
99static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
100{
101 struct akcipher_request *req = context;
102 struct rsa_edesc *edesc;
103
104 if (err)
105 caam_jr_strstatus(dev, err);
106
107 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
108
109 rsa_pub_unmap(dev, edesc, req);
110 rsa_io_unmap(dev, edesc, req);
111 kfree(edesc);
112
113 akcipher_request_complete(req, err);
114}
115
116static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
117 void *context)
118{
119 struct akcipher_request *req = context;
120 struct rsa_edesc *edesc;
121
122 if (err)
123 caam_jr_strstatus(dev, err);
124
125 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
126
127 rsa_priv_f1_unmap(dev, edesc, req);
128 rsa_io_unmap(dev, edesc, req);
129 kfree(edesc);
130
131 akcipher_request_complete(req, err);
132}
133
134static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
135 void *context)
136{
137 struct akcipher_request *req = context;
138 struct rsa_edesc *edesc;
139
140 if (err)
141 caam_jr_strstatus(dev, err);
142
143 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
144
145 rsa_priv_f2_unmap(dev, edesc, req);
146 rsa_io_unmap(dev, edesc, req);
147 kfree(edesc);
148
149 akcipher_request_complete(req, err);
150}
151
152static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
153 void *context)
154{
155 struct akcipher_request *req = context;
156 struct rsa_edesc *edesc;
157
158 if (err)
159 caam_jr_strstatus(dev, err);
160
161 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
162
163 rsa_priv_f3_unmap(dev, edesc, req);
164 rsa_io_unmap(dev, edesc, req);
165 kfree(edesc);
166
167 akcipher_request_complete(req, err);
168}
169
170static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
171 unsigned int nbytes,
172 unsigned int flags)
173{
174 struct sg_mapping_iter miter;
175 int lzeros, ents;
176 unsigned int len;
177 unsigned int tbytes = nbytes;
178 const u8 *buff;
179
180 ents = sg_nents_for_len(sgl, nbytes);
181 if (ents < 0)
182 return ents;
183
184 sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
185
186 lzeros = 0;
187 len = 0;
188 while (nbytes > 0) {
189 while (len && !*buff) {
190 lzeros++;
191 len--;
192 buff++;
193 }
194
195 if (len && *buff)
196 break;
197
198 sg_miter_next(&miter);
199 buff = miter.addr;
200 len = miter.length;
201
202 nbytes -= lzeros;
203 lzeros = 0;
204 }
205
206 miter.consumed = lzeros;
207 sg_miter_stop(&miter);
208 nbytes -= lzeros;
209
210 return tbytes - nbytes;
211}
212
213static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
214 size_t desclen)
215{
216 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
217 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
218 struct device *dev = ctx->dev;
219 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
220 struct rsa_edesc *edesc;
221 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
222 GFP_KERNEL : GFP_ATOMIC;
223 int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
224 int sgc;
225 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
226 int src_nents, dst_nents;
227 int lzeros;
228
229 lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags);
230 if (lzeros < 0)
231 return ERR_PTR(lzeros);
232
233 req->src_len -= lzeros;
234 req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros);
235
236 src_nents = sg_nents_for_len(req->src, req->src_len);
237 dst_nents = sg_nents_for_len(req->dst, req->dst_len);
238
239 if (src_nents > 1)
240 sec4_sg_len = src_nents;
241 if (dst_nents > 1)
242 sec4_sg_len += dst_nents;
243
244 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
245
246 /* allocate space for base edesc, hw desc commands and link tables */
247 edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
248 GFP_DMA | flags);
249 if (!edesc)
250 return ERR_PTR(-ENOMEM);
251
252 sgc = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
253 if (unlikely(!sgc)) {
254 dev_err(dev, "unable to map source\n");
255 goto src_fail;
256 }
257
258 sgc = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
259 if (unlikely(!sgc)) {
260 dev_err(dev, "unable to map destination\n");
261 goto dst_fail;
262 }
263
264 edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
265
266 sec4_sg_index = 0;
267 if (src_nents > 1) {
268 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
269 sec4_sg_index += src_nents;
270 }
271 if (dst_nents > 1)
272 sg_to_sec4_sg_last(req->dst, dst_nents,
273 edesc->sec4_sg + sec4_sg_index, 0);
274
275 /* Save nents for later use in Job Descriptor */
276 edesc->src_nents = src_nents;
277 edesc->dst_nents = dst_nents;
278
279 if (!sec4_sg_bytes)
280 return edesc;
281
282 edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
283 sec4_sg_bytes, DMA_TO_DEVICE);
284 if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
285 dev_err(dev, "unable to map S/G table\n");
286 goto sec4_sg_fail;
287 }
288
289 edesc->sec4_sg_bytes = sec4_sg_bytes;
290
291 return edesc;
292
293sec4_sg_fail:
294 dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
295dst_fail:
296 dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
297src_fail:
298 kfree(edesc);
299 return ERR_PTR(-ENOMEM);
300}
301
302static int set_rsa_pub_pdb(struct akcipher_request *req,
303 struct rsa_edesc *edesc)
304{
305 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
306 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
307 struct caam_rsa_key *key = &ctx->key;
308 struct device *dev = ctx->dev;
309 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
310 int sec4_sg_index = 0;
311
312 pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
313 if (dma_mapping_error(dev, pdb->n_dma)) {
314 dev_err(dev, "Unable to map RSA modulus memory\n");
315 return -ENOMEM;
316 }
317
318 pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
319 if (dma_mapping_error(dev, pdb->e_dma)) {
320 dev_err(dev, "Unable to map RSA public exponent memory\n");
321 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
322 return -ENOMEM;
323 }
324
325 if (edesc->src_nents > 1) {
326 pdb->sgf |= RSA_PDB_SGF_F;
327 pdb->f_dma = edesc->sec4_sg_dma;
328 sec4_sg_index += edesc->src_nents;
329 } else {
330 pdb->f_dma = sg_dma_address(req->src);
331 }
332
333 if (edesc->dst_nents > 1) {
334 pdb->sgf |= RSA_PDB_SGF_G;
335 pdb->g_dma = edesc->sec4_sg_dma +
336 sec4_sg_index * sizeof(struct sec4_sg_entry);
337 } else {
338 pdb->g_dma = sg_dma_address(req->dst);
339 }
340
341 pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
342 pdb->f_len = req->src_len;
343
344 return 0;
345}
346
347static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
348 struct rsa_edesc *edesc)
349{
350 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
351 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
352 struct caam_rsa_key *key = &ctx->key;
353 struct device *dev = ctx->dev;
354 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
355 int sec4_sg_index = 0;
356
357 pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
358 if (dma_mapping_error(dev, pdb->n_dma)) {
359 dev_err(dev, "Unable to map modulus memory\n");
360 return -ENOMEM;
361 }
362
363 pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
364 if (dma_mapping_error(dev, pdb->d_dma)) {
365 dev_err(dev, "Unable to map RSA private exponent memory\n");
366 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
367 return -ENOMEM;
368 }
369
370 if (edesc->src_nents > 1) {
371 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
372 pdb->g_dma = edesc->sec4_sg_dma;
373 sec4_sg_index += edesc->src_nents;
374 } else {
375 pdb->g_dma = sg_dma_address(req->src);
376 }
377
378 if (edesc->dst_nents > 1) {
379 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
380 pdb->f_dma = edesc->sec4_sg_dma +
381 sec4_sg_index * sizeof(struct sec4_sg_entry);
382 } else {
383 pdb->f_dma = sg_dma_address(req->dst);
384 }
385
386 pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
387
388 return 0;
389}
390
391static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
392 struct rsa_edesc *edesc)
393{
394 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
395 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
396 struct caam_rsa_key *key = &ctx->key;
397 struct device *dev = ctx->dev;
398 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
399 int sec4_sg_index = 0;
400 size_t p_sz = key->p_sz;
401 size_t q_sz = key->q_sz;
402
403 pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
404 if (dma_mapping_error(dev, pdb->d_dma)) {
405 dev_err(dev, "Unable to map RSA private exponent memory\n");
406 return -ENOMEM;
407 }
408
409 pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
410 if (dma_mapping_error(dev, pdb->p_dma)) {
411 dev_err(dev, "Unable to map RSA prime factor p memory\n");
412 goto unmap_d;
413 }
414
415 pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
416 if (dma_mapping_error(dev, pdb->q_dma)) {
417 dev_err(dev, "Unable to map RSA prime factor q memory\n");
418 goto unmap_p;
419 }
420
421 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
422 if (dma_mapping_error(dev, pdb->tmp1_dma)) {
423 dev_err(dev, "Unable to map RSA tmp1 memory\n");
424 goto unmap_q;
425 }
426
427 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
428 if (dma_mapping_error(dev, pdb->tmp2_dma)) {
429 dev_err(dev, "Unable to map RSA tmp2 memory\n");
430 goto unmap_tmp1;
431 }
432
433 if (edesc->src_nents > 1) {
434 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
435 pdb->g_dma = edesc->sec4_sg_dma;
436 sec4_sg_index += edesc->src_nents;
437 } else {
438 pdb->g_dma = sg_dma_address(req->src);
439 }
440
441 if (edesc->dst_nents > 1) {
442 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
443 pdb->f_dma = edesc->sec4_sg_dma +
444 sec4_sg_index * sizeof(struct sec4_sg_entry);
445 } else {
446 pdb->f_dma = sg_dma_address(req->dst);
447 }
448
449 pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
450 pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
451
452 return 0;
453
454unmap_tmp1:
455 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
456unmap_q:
457 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
458unmap_p:
459 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
460unmap_d:
461 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
462
463 return -ENOMEM;
464}
465
466static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
467 struct rsa_edesc *edesc)
468{
469 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
470 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
471 struct caam_rsa_key *key = &ctx->key;
472 struct device *dev = ctx->dev;
473 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
474 int sec4_sg_index = 0;
475 size_t p_sz = key->p_sz;
476 size_t q_sz = key->q_sz;
477
478 pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
479 if (dma_mapping_error(dev, pdb->p_dma)) {
480 dev_err(dev, "Unable to map RSA prime factor p memory\n");
481 return -ENOMEM;
482 }
483
484 pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
485 if (dma_mapping_error(dev, pdb->q_dma)) {
486 dev_err(dev, "Unable to map RSA prime factor q memory\n");
487 goto unmap_p;
488 }
489
490 pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
491 if (dma_mapping_error(dev, pdb->dp_dma)) {
492 dev_err(dev, "Unable to map RSA exponent dp memory\n");
493 goto unmap_q;
494 }
495
496 pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
497 if (dma_mapping_error(dev, pdb->dq_dma)) {
498 dev_err(dev, "Unable to map RSA exponent dq memory\n");
499 goto unmap_dp;
500 }
501
502 pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
503 if (dma_mapping_error(dev, pdb->c_dma)) {
504 dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
505 goto unmap_dq;
506 }
507
508 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
509 if (dma_mapping_error(dev, pdb->tmp1_dma)) {
510 dev_err(dev, "Unable to map RSA tmp1 memory\n");
511 goto unmap_qinv;
512 }
513
514 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
515 if (dma_mapping_error(dev, pdb->tmp2_dma)) {
516 dev_err(dev, "Unable to map RSA tmp2 memory\n");
517 goto unmap_tmp1;
518 }
519
520 if (edesc->src_nents > 1) {
521 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
522 pdb->g_dma = edesc->sec4_sg_dma;
523 sec4_sg_index += edesc->src_nents;
524 } else {
525 pdb->g_dma = sg_dma_address(req->src);
526 }
527
528 if (edesc->dst_nents > 1) {
529 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
530 pdb->f_dma = edesc->sec4_sg_dma +
531 sec4_sg_index * sizeof(struct sec4_sg_entry);
532 } else {
533 pdb->f_dma = sg_dma_address(req->dst);
534 }
535
536 pdb->sgf |= key->n_sz;
537 pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
538
539 return 0;
540
541unmap_tmp1:
542 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
543unmap_qinv:
544 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
545unmap_dq:
546 dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
547unmap_dp:
548 dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
549unmap_q:
550 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
551unmap_p:
552 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
553
554 return -ENOMEM;
555}
556
557static int caam_rsa_enc(struct akcipher_request *req)
558{
559 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
560 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
561 struct caam_rsa_key *key = &ctx->key;
562 struct device *jrdev = ctx->dev;
563 struct rsa_edesc *edesc;
564 int ret;
565
566 if (unlikely(!key->n || !key->e))
567 return -EINVAL;
568
569 if (req->dst_len < key->n_sz) {
570 req->dst_len = key->n_sz;
571 dev_err(jrdev, "Output buffer length less than parameter n\n");
572 return -EOVERFLOW;
573 }
574
575 /* Allocate extended descriptor */
576 edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
577 if (IS_ERR(edesc))
578 return PTR_ERR(edesc);
579
580 /* Set RSA Encrypt Protocol Data Block */
581 ret = set_rsa_pub_pdb(req, edesc);
582 if (ret)
583 goto init_fail;
584
585 /* Initialize Job Descriptor */
586 init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
587
588 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_pub_done, req);
589 if (!ret)
590 return -EINPROGRESS;
591
592 rsa_pub_unmap(jrdev, edesc, req);
593
594init_fail:
595 rsa_io_unmap(jrdev, edesc, req);
596 kfree(edesc);
597 return ret;
598}
599
600static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
601{
602 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
603 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
604 struct device *jrdev = ctx->dev;
605 struct rsa_edesc *edesc;
606 int ret;
607
608 /* Allocate extended descriptor */
609 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
610 if (IS_ERR(edesc))
611 return PTR_ERR(edesc);
612
613 /* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
614 ret = set_rsa_priv_f1_pdb(req, edesc);
615 if (ret)
616 goto init_fail;
617
618 /* Initialize Job Descriptor */
619 init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
620
621 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f1_done, req);
622 if (!ret)
623 return -EINPROGRESS;
624
625 rsa_priv_f1_unmap(jrdev, edesc, req);
626
627init_fail:
628 rsa_io_unmap(jrdev, edesc, req);
629 kfree(edesc);
630 return ret;
631}
632
633static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
634{
635 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
636 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
637 struct device *jrdev = ctx->dev;
638 struct rsa_edesc *edesc;
639 int ret;
640
641 /* Allocate extended descriptor */
642 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
643 if (IS_ERR(edesc))
644 return PTR_ERR(edesc);
645
646 /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
647 ret = set_rsa_priv_f2_pdb(req, edesc);
648 if (ret)
649 goto init_fail;
650
651 /* Initialize Job Descriptor */
652 init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
653
654 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
655 if (!ret)
656 return -EINPROGRESS;
657
658 rsa_priv_f2_unmap(jrdev, edesc, req);
659
660init_fail:
661 rsa_io_unmap(jrdev, edesc, req);
662 kfree(edesc);
663 return ret;
664}
665
666static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
667{
668 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
669 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
670 struct device *jrdev = ctx->dev;
671 struct rsa_edesc *edesc;
672 int ret;
673
674 /* Allocate extended descriptor */
675 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
676 if (IS_ERR(edesc))
677 return PTR_ERR(edesc);
678
679 /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
680 ret = set_rsa_priv_f3_pdb(req, edesc);
681 if (ret)
682 goto init_fail;
683
684 /* Initialize Job Descriptor */
685 init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
686
687 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req);
688 if (!ret)
689 return -EINPROGRESS;
690
691 rsa_priv_f3_unmap(jrdev, edesc, req);
692
693init_fail:
694 rsa_io_unmap(jrdev, edesc, req);
695 kfree(edesc);
696 return ret;
697}
698
699static int caam_rsa_dec(struct akcipher_request *req)
700{
701 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
702 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
703 struct caam_rsa_key *key = &ctx->key;
704 int ret;
705
706 if (unlikely(!key->n || !key->d))
707 return -EINVAL;
708
709 if (req->dst_len < key->n_sz) {
710 req->dst_len = key->n_sz;
711 dev_err(ctx->dev, "Output buffer length less than parameter n\n");
712 return -EOVERFLOW;
713 }
714
715 if (key->priv_form == FORM3)
716 ret = caam_rsa_dec_priv_f3(req);
717 else if (key->priv_form == FORM2)
718 ret = caam_rsa_dec_priv_f2(req);
719 else
720 ret = caam_rsa_dec_priv_f1(req);
721
722 return ret;
723}
724
725static void caam_rsa_free_key(struct caam_rsa_key *key)
726{
727 kzfree(key->d);
728 kzfree(key->p);
729 kzfree(key->q);
730 kzfree(key->dp);
731 kzfree(key->dq);
732 kzfree(key->qinv);
733 kzfree(key->tmp1);
734 kzfree(key->tmp2);
735 kfree(key->e);
736 kfree(key->n);
737 memset(key, 0, sizeof(*key));
738}
739
740static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
741{
742 while (!**ptr && *nbytes) {
743 (*ptr)++;
744 (*nbytes)--;
745 }
746}
747
748/**
749 * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
750 * dP, dQ and qInv could decode to less than corresponding p, q length, as the
751 * BER-encoding requires that the minimum number of bytes be used to encode the
752 * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
753 * length.
754 *
755 * @ptr : pointer to {dP, dQ, qInv} CRT member
756 * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
757 * @dstlen: length in bytes of corresponding p or q prime factor
758 */
759static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
760{
761 u8 *dst;
762
763 caam_rsa_drop_leading_zeros(&ptr, &nbytes);
764 if (!nbytes)
765 return NULL;
766
767 dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
768 if (!dst)
769 return NULL;
770
771 memcpy(dst + (dstlen - nbytes), ptr, nbytes);
772
773 return dst;
774}
775
776/**
777 * caam_read_raw_data - Read a raw byte stream as a positive integer.
778 * The function skips buffer's leading zeros, copies the remained data
779 * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
780 * the address of the new buffer.
781 *
782 * @buf : The data to read
783 * @nbytes: The amount of data to read
784 */
785static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
786{
787
788 caam_rsa_drop_leading_zeros(&buf, nbytes);
789 if (!*nbytes)
790 return NULL;
791
792 return kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL);
793}
794
795static int caam_rsa_check_key_length(unsigned int len)
796{
797 if (len > 4096)
798 return -EINVAL;
799 return 0;
800}
801
802static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
803 unsigned int keylen)
804{
805 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
806 struct rsa_key raw_key = {NULL};
807 struct caam_rsa_key *rsa_key = &ctx->key;
808 int ret;
809
810 /* Free the old RSA key if any */
811 caam_rsa_free_key(rsa_key);
812
813 ret = rsa_parse_pub_key(&raw_key, key, keylen);
814 if (ret)
815 return ret;
816
817 /* Copy key in DMA zone */
818 rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
819 if (!rsa_key->e)
820 goto err;
821
822 /*
823 * Skip leading zeros and copy the positive integer to a buffer
824 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
825 * expects a positive integer for the RSA modulus and uses its length as
826 * decryption output length.
827 */
828 rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
829 if (!rsa_key->n)
830 goto err;
831
832 if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
833 caam_rsa_free_key(rsa_key);
834 return -EINVAL;
835 }
836
837 rsa_key->e_sz = raw_key.e_sz;
838 rsa_key->n_sz = raw_key.n_sz;
839
840 memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
841
842 return 0;
843err:
844 caam_rsa_free_key(rsa_key);
845 return -ENOMEM;
846}
847
848static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
849 struct rsa_key *raw_key)
850{
851 struct caam_rsa_key *rsa_key = &ctx->key;
852 size_t p_sz = raw_key->p_sz;
853 size_t q_sz = raw_key->q_sz;
854
855 rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
856 if (!rsa_key->p)
857 return;
858 rsa_key->p_sz = p_sz;
859
860 rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
861 if (!rsa_key->q)
862 goto free_p;
863 rsa_key->q_sz = q_sz;
864
865 rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
866 if (!rsa_key->tmp1)
867 goto free_q;
868
869 rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
870 if (!rsa_key->tmp2)
871 goto free_tmp1;
872
873 rsa_key->priv_form = FORM2;
874
875 rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
876 if (!rsa_key->dp)
877 goto free_tmp2;
878
879 rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
880 if (!rsa_key->dq)
881 goto free_dp;
882
883 rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
884 q_sz);
885 if (!rsa_key->qinv)
886 goto free_dq;
887
888 rsa_key->priv_form = FORM3;
889
890 return;
891
892free_dq:
893 kzfree(rsa_key->dq);
894free_dp:
895 kzfree(rsa_key->dp);
896free_tmp2:
897 kzfree(rsa_key->tmp2);
898free_tmp1:
899 kzfree(rsa_key->tmp1);
900free_q:
901 kzfree(rsa_key->q);
902free_p:
903 kzfree(rsa_key->p);
904}
905
906static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
907 unsigned int keylen)
908{
909 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
910 struct rsa_key raw_key = {NULL};
911 struct caam_rsa_key *rsa_key = &ctx->key;
912 int ret;
913
914 /* Free the old RSA key if any */
915 caam_rsa_free_key(rsa_key);
916
917 ret = rsa_parse_priv_key(&raw_key, key, keylen);
918 if (ret)
919 return ret;
920
921 /* Copy key in DMA zone */
922 rsa_key->d = kzalloc(raw_key.d_sz, GFP_DMA | GFP_KERNEL);
923 if (!rsa_key->d)
924 goto err;
925
926 rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
927 if (!rsa_key->e)
928 goto err;
929
930 /*
931 * Skip leading zeros and copy the positive integer to a buffer
932 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
933 * expects a positive integer for the RSA modulus and uses its length as
934 * decryption output length.
935 */
936 rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
937 if (!rsa_key->n)
938 goto err;
939
940 if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
941 caam_rsa_free_key(rsa_key);
942 return -EINVAL;
943 }
944
945 rsa_key->d_sz = raw_key.d_sz;
946 rsa_key->e_sz = raw_key.e_sz;
947 rsa_key->n_sz = raw_key.n_sz;
948
949 memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
950 memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
951
952 caam_rsa_set_priv_key_form(ctx, &raw_key);
953
954 return 0;
955
956err:
957 caam_rsa_free_key(rsa_key);
958 return -ENOMEM;
959}
960
961static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
962{
963 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
964
965 return ctx->key.n_sz;
966}
967
968/* Per session pkc's driver context creation function */
969static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
970{
971 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
972
973 ctx->dev = caam_jr_alloc();
974
975 if (IS_ERR(ctx->dev)) {
976 pr_err("Job Ring Device allocation for transform failed\n");
977 return PTR_ERR(ctx->dev);
978 }
979
980 return 0;
981}
982
983/* Per session pkc's driver context cleanup function */
984static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
985{
986 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
987 struct caam_rsa_key *key = &ctx->key;
988
989 caam_rsa_free_key(key);
990 caam_jr_free(ctx->dev);
991}
992
993static struct akcipher_alg caam_rsa = {
994 .encrypt = caam_rsa_enc,
995 .decrypt = caam_rsa_dec,
996 .sign = caam_rsa_dec,
997 .verify = caam_rsa_enc,
998 .set_pub_key = caam_rsa_set_pub_key,
999 .set_priv_key = caam_rsa_set_priv_key,
1000 .max_size = caam_rsa_max_size,
1001 .init = caam_rsa_init_tfm,
1002 .exit = caam_rsa_exit_tfm,
1003 .reqsize = sizeof(struct caam_rsa_req_ctx),
1004 .base = {
1005 .cra_name = "rsa",
1006 .cra_driver_name = "rsa-caam",
1007 .cra_priority = 3000,
1008 .cra_module = THIS_MODULE,
1009 .cra_ctxsize = sizeof(struct caam_rsa_ctx),
1010 },
1011};
1012
1013/* Public Key Cryptography module initialization handler */
1014static int __init caam_pkc_init(void)
1015{
1016 struct device_node *dev_node;
1017 struct platform_device *pdev;
1018 struct device *ctrldev;
1019 struct caam_drv_private *priv;
1020 u32 cha_inst, pk_inst;
1021 int err;
1022
1023 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1024 if (!dev_node) {
1025 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1026 if (!dev_node)
1027 return -ENODEV;
1028 }
1029
1030 pdev = of_find_device_by_node(dev_node);
1031 if (!pdev) {
1032 of_node_put(dev_node);
1033 return -ENODEV;
1034 }
1035
1036 ctrldev = &pdev->dev;
1037 priv = dev_get_drvdata(ctrldev);
1038 of_node_put(dev_node);
1039
1040 /*
1041 * If priv is NULL, it's probably because the caam driver wasn't
1042 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1043 */
1044 if (!priv)
1045 return -ENODEV;
1046
1047 /* Determine public key hardware accelerator presence. */
1048 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
1049 pk_inst = (cha_inst & CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
1050
1051 /* Do not register algorithms if PKHA is not present. */
1052 if (!pk_inst)
1053 return -ENODEV;
1054
1055 err = crypto_register_akcipher(&caam_rsa);
1056 if (err)
1057 dev_warn(ctrldev, "%s alg registration failed\n",
1058 caam_rsa.base.cra_driver_name);
1059 else
1060 dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
1061
1062 return err;
1063}
1064
1065static void __exit caam_pkc_exit(void)
1066{
1067 crypto_unregister_akcipher(&caam_rsa);
1068}
1069
1070module_init(caam_pkc_init);
1071module_exit(caam_pkc_exit);
1072
1073MODULE_LICENSE("Dual BSD/GPL");
1074MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API");
1075MODULE_AUTHOR("Freescale Semiconductor");