Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Crypto acceleration support for Rockchip RK3288
4 *
5 * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
6 *
7 * Author: Zain Wang <zain.wang@rock-chips.com>
8 *
9 * Some ideas are from marvell/cesa.c and s5p-sss.c driver.
10 */
11#include <linux/device.h>
12#include <asm/unaligned.h>
13#include <linux/iopoll.h>
14#include "rk3288_crypto.h"
15
16/*
17 * IC can not process zero message hash,
18 * so we put the fixed hash out when met zero message.
19 */
20
21static bool rk_ahash_need_fallback(struct ahash_request *req)
22{
23 struct scatterlist *sg;
24
25 sg = req->src;
26 while (sg) {
27 if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
28 return true;
29 }
30 if (sg->length % 4) {
31 return true;
32 }
33 sg = sg_next(sg);
34 }
35 return false;
36}
37
38static int rk_ahash_digest_fb(struct ahash_request *areq)
39{
40 struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
41 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
42 struct rk_ahash_ctx *tfmctx = crypto_ahash_ctx(tfm);
43 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
44 struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
45
46 algt->stat_fb++;
47
48 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
49 rctx->fallback_req.base.flags = areq->base.flags &
50 CRYPTO_TFM_REQ_MAY_SLEEP;
51
52 rctx->fallback_req.nbytes = areq->nbytes;
53 rctx->fallback_req.src = areq->src;
54 rctx->fallback_req.result = areq->result;
55
56 return crypto_ahash_digest(&rctx->fallback_req);
57}
58
59static int zero_message_process(struct ahash_request *req)
60{
61 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
62 int rk_digest_size = crypto_ahash_digestsize(tfm);
63
64 switch (rk_digest_size) {
65 case SHA1_DIGEST_SIZE:
66 memcpy(req->result, sha1_zero_message_hash, rk_digest_size);
67 break;
68 case SHA256_DIGEST_SIZE:
69 memcpy(req->result, sha256_zero_message_hash, rk_digest_size);
70 break;
71 case MD5_DIGEST_SIZE:
72 memcpy(req->result, md5_zero_message_hash, rk_digest_size);
73 break;
74 default:
75 return -EINVAL;
76 }
77
78 return 0;
79}
80
81static void rk_ahash_reg_init(struct ahash_request *req,
82 struct rk_crypto_info *dev)
83{
84 struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
85 int reg_status;
86
87 reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) |
88 RK_CRYPTO_HASH_FLUSH | _SBF(0xffff, 16);
89 CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status);
90
91 reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL);
92 reg_status &= (~RK_CRYPTO_HASH_FLUSH);
93 reg_status |= _SBF(0xffff, 16);
94 CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status);
95
96 memset_io(dev->reg + RK_CRYPTO_HASH_DOUT_0, 0, 32);
97
98 CRYPTO_WRITE(dev, RK_CRYPTO_INTENA, RK_CRYPTO_HRDMA_ERR_ENA |
99 RK_CRYPTO_HRDMA_DONE_ENA);
100
101 CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, RK_CRYPTO_HRDMA_ERR_INT |
102 RK_CRYPTO_HRDMA_DONE_INT);
103
104 CRYPTO_WRITE(dev, RK_CRYPTO_HASH_CTRL, rctx->mode |
105 RK_CRYPTO_HASH_SWAP_DO);
106
107 CRYPTO_WRITE(dev, RK_CRYPTO_CONF, RK_CRYPTO_BYTESWAP_HRFIFO |
108 RK_CRYPTO_BYTESWAP_BRFIFO |
109 RK_CRYPTO_BYTESWAP_BTFIFO);
110
111 CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, req->nbytes);
112}
113
114static int rk_ahash_init(struct ahash_request *req)
115{
116 struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
117 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
118 struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
119
120 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
121 rctx->fallback_req.base.flags = req->base.flags &
122 CRYPTO_TFM_REQ_MAY_SLEEP;
123
124 return crypto_ahash_init(&rctx->fallback_req);
125}
126
127static int rk_ahash_update(struct ahash_request *req)
128{
129 struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
130 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
131 struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
132
133 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
134 rctx->fallback_req.base.flags = req->base.flags &
135 CRYPTO_TFM_REQ_MAY_SLEEP;
136 rctx->fallback_req.nbytes = req->nbytes;
137 rctx->fallback_req.src = req->src;
138
139 return crypto_ahash_update(&rctx->fallback_req);
140}
141
142static int rk_ahash_final(struct ahash_request *req)
143{
144 struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
145 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
146 struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
147
148 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
149 rctx->fallback_req.base.flags = req->base.flags &
150 CRYPTO_TFM_REQ_MAY_SLEEP;
151 rctx->fallback_req.result = req->result;
152
153 return crypto_ahash_final(&rctx->fallback_req);
154}
155
156static int rk_ahash_finup(struct ahash_request *req)
157{
158 struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
159 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
160 struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
161
162 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
163 rctx->fallback_req.base.flags = req->base.flags &
164 CRYPTO_TFM_REQ_MAY_SLEEP;
165
166 rctx->fallback_req.nbytes = req->nbytes;
167 rctx->fallback_req.src = req->src;
168 rctx->fallback_req.result = req->result;
169
170 return crypto_ahash_finup(&rctx->fallback_req);
171}
172
173static int rk_ahash_import(struct ahash_request *req, const void *in)
174{
175 struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
176 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
177 struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
178
179 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
180 rctx->fallback_req.base.flags = req->base.flags &
181 CRYPTO_TFM_REQ_MAY_SLEEP;
182
183 return crypto_ahash_import(&rctx->fallback_req, in);
184}
185
186static int rk_ahash_export(struct ahash_request *req, void *out)
187{
188 struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
189 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
190 struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
191
192 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
193 rctx->fallback_req.base.flags = req->base.flags &
194 CRYPTO_TFM_REQ_MAY_SLEEP;
195
196 return crypto_ahash_export(&rctx->fallback_req, out);
197}
198
199static int rk_ahash_digest(struct ahash_request *req)
200{
201 struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
202 struct rk_crypto_info *dev;
203 struct crypto_engine *engine;
204
205 if (rk_ahash_need_fallback(req))
206 return rk_ahash_digest_fb(req);
207
208 if (!req->nbytes)
209 return zero_message_process(req);
210
211 dev = get_rk_crypto();
212
213 rctx->dev = dev;
214 engine = dev->engine;
215
216 return crypto_transfer_hash_request_to_engine(engine, req);
217}
218
219static void crypto_ahash_dma_start(struct rk_crypto_info *dev, struct scatterlist *sg)
220{
221 CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, sg_dma_address(sg));
222 CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, sg_dma_len(sg) / 4);
223 CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_HASH_START |
224 (RK_CRYPTO_HASH_START << 16));
225}
226
227static int rk_hash_prepare(struct crypto_engine *engine, void *breq)
228{
229 struct ahash_request *areq = container_of(breq, struct ahash_request, base);
230 struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
231 struct rk_crypto_info *rkc = rctx->dev;
232 int ret;
233
234 ret = dma_map_sg(rkc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
235 if (ret <= 0)
236 return -EINVAL;
237
238 rctx->nrsg = ret;
239
240 return 0;
241}
242
243static int rk_hash_unprepare(struct crypto_engine *engine, void *breq)
244{
245 struct ahash_request *areq = container_of(breq, struct ahash_request, base);
246 struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
247 struct rk_crypto_info *rkc = rctx->dev;
248
249 dma_unmap_sg(rkc->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE);
250 return 0;
251}
252
253static int rk_hash_run(struct crypto_engine *engine, void *breq)
254{
255 struct ahash_request *areq = container_of(breq, struct ahash_request, base);
256 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
257 struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
258 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
259 struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
260 struct scatterlist *sg = areq->src;
261 struct rk_crypto_info *rkc = rctx->dev;
262 int err = 0;
263 int i;
264 u32 v;
265
266 err = pm_runtime_resume_and_get(rkc->dev);
267 if (err)
268 return err;
269
270 rctx->mode = 0;
271
272 algt->stat_req++;
273 rkc->nreq++;
274
275 switch (crypto_ahash_digestsize(tfm)) {
276 case SHA1_DIGEST_SIZE:
277 rctx->mode = RK_CRYPTO_HASH_SHA1;
278 break;
279 case SHA256_DIGEST_SIZE:
280 rctx->mode = RK_CRYPTO_HASH_SHA256;
281 break;
282 case MD5_DIGEST_SIZE:
283 rctx->mode = RK_CRYPTO_HASH_MD5;
284 break;
285 default:
286 err = -EINVAL;
287 goto theend;
288 }
289
290 rk_ahash_reg_init(areq, rkc);
291
292 while (sg) {
293 reinit_completion(&rkc->complete);
294 rkc->status = 0;
295 crypto_ahash_dma_start(rkc, sg);
296 wait_for_completion_interruptible_timeout(&rkc->complete,
297 msecs_to_jiffies(2000));
298 if (!rkc->status) {
299 dev_err(rkc->dev, "DMA timeout\n");
300 err = -EFAULT;
301 goto theend;
302 }
303 sg = sg_next(sg);
304 }
305
306 /*
307 * it will take some time to process date after last dma
308 * transmission.
309 *
310 * waiting time is relative with the last date len,
311 * so cannot set a fixed time here.
312 * 10us makes system not call here frequently wasting
313 * efficiency, and make it response quickly when dma
314 * complete.
315 */
316 readl_poll_timeout(rkc->reg + RK_CRYPTO_HASH_STS, v, v == 0, 10, 1000);
317
318 for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) {
319 v = readl(rkc->reg + RK_CRYPTO_HASH_DOUT_0 + i * 4);
320 put_unaligned_le32(v, areq->result + i * 4);
321 }
322
323theend:
324 pm_runtime_put_autosuspend(rkc->dev);
325
326 local_bh_disable();
327 crypto_finalize_hash_request(engine, breq, err);
328 local_bh_enable();
329
330 return 0;
331}
332
333static int rk_cra_hash_init(struct crypto_tfm *tfm)
334{
335 struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
336 const char *alg_name = crypto_tfm_alg_name(tfm);
337 struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
338 struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
339
340 /* for fallback */
341 tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0,
342 CRYPTO_ALG_NEED_FALLBACK);
343 if (IS_ERR(tctx->fallback_tfm)) {
344 dev_err(algt->dev->dev, "Could not load fallback driver.\n");
345 return PTR_ERR(tctx->fallback_tfm);
346 }
347
348 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
349 sizeof(struct rk_ahash_rctx) +
350 crypto_ahash_reqsize(tctx->fallback_tfm));
351
352 tctx->enginectx.op.do_one_request = rk_hash_run;
353 tctx->enginectx.op.prepare_request = rk_hash_prepare;
354 tctx->enginectx.op.unprepare_request = rk_hash_unprepare;
355
356 return 0;
357}
358
359static void rk_cra_hash_exit(struct crypto_tfm *tfm)
360{
361 struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
362
363 crypto_free_ahash(tctx->fallback_tfm);
364}
365
366struct rk_crypto_tmp rk_ahash_sha1 = {
367 .type = CRYPTO_ALG_TYPE_AHASH,
368 .alg.hash = {
369 .init = rk_ahash_init,
370 .update = rk_ahash_update,
371 .final = rk_ahash_final,
372 .finup = rk_ahash_finup,
373 .export = rk_ahash_export,
374 .import = rk_ahash_import,
375 .digest = rk_ahash_digest,
376 .halg = {
377 .digestsize = SHA1_DIGEST_SIZE,
378 .statesize = sizeof(struct sha1_state),
379 .base = {
380 .cra_name = "sha1",
381 .cra_driver_name = "rk-sha1",
382 .cra_priority = 300,
383 .cra_flags = CRYPTO_ALG_ASYNC |
384 CRYPTO_ALG_NEED_FALLBACK,
385 .cra_blocksize = SHA1_BLOCK_SIZE,
386 .cra_ctxsize = sizeof(struct rk_ahash_ctx),
387 .cra_alignmask = 3,
388 .cra_init = rk_cra_hash_init,
389 .cra_exit = rk_cra_hash_exit,
390 .cra_module = THIS_MODULE,
391 }
392 }
393 }
394};
395
396struct rk_crypto_tmp rk_ahash_sha256 = {
397 .type = CRYPTO_ALG_TYPE_AHASH,
398 .alg.hash = {
399 .init = rk_ahash_init,
400 .update = rk_ahash_update,
401 .final = rk_ahash_final,
402 .finup = rk_ahash_finup,
403 .export = rk_ahash_export,
404 .import = rk_ahash_import,
405 .digest = rk_ahash_digest,
406 .halg = {
407 .digestsize = SHA256_DIGEST_SIZE,
408 .statesize = sizeof(struct sha256_state),
409 .base = {
410 .cra_name = "sha256",
411 .cra_driver_name = "rk-sha256",
412 .cra_priority = 300,
413 .cra_flags = CRYPTO_ALG_ASYNC |
414 CRYPTO_ALG_NEED_FALLBACK,
415 .cra_blocksize = SHA256_BLOCK_SIZE,
416 .cra_ctxsize = sizeof(struct rk_ahash_ctx),
417 .cra_alignmask = 3,
418 .cra_init = rk_cra_hash_init,
419 .cra_exit = rk_cra_hash_exit,
420 .cra_module = THIS_MODULE,
421 }
422 }
423 }
424};
425
426struct rk_crypto_tmp rk_ahash_md5 = {
427 .type = CRYPTO_ALG_TYPE_AHASH,
428 .alg.hash = {
429 .init = rk_ahash_init,
430 .update = rk_ahash_update,
431 .final = rk_ahash_final,
432 .finup = rk_ahash_finup,
433 .export = rk_ahash_export,
434 .import = rk_ahash_import,
435 .digest = rk_ahash_digest,
436 .halg = {
437 .digestsize = MD5_DIGEST_SIZE,
438 .statesize = sizeof(struct md5_state),
439 .base = {
440 .cra_name = "md5",
441 .cra_driver_name = "rk-md5",
442 .cra_priority = 300,
443 .cra_flags = CRYPTO_ALG_ASYNC |
444 CRYPTO_ALG_NEED_FALLBACK,
445 .cra_blocksize = SHA1_BLOCK_SIZE,
446 .cra_ctxsize = sizeof(struct rk_ahash_ctx),
447 .cra_alignmask = 3,
448 .cra_init = rk_cra_hash_init,
449 .cra_exit = rk_cra_hash_exit,
450 .cra_module = THIS_MODULE,
451 }
452 }
453 }
454};