Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <crypto/algapi.h>
7#include <crypto/hash.h>
8#include <crypto/md5.h>
9#include <crypto/sm3.h>
10#include <crypto/internal/hash.h>
11
12#include "cc_driver.h"
13#include "cc_request_mgr.h"
14#include "cc_buffer_mgr.h"
15#include "cc_hash.h"
16#include "cc_sram_mgr.h"
17
18#define CC_MAX_HASH_SEQ_LEN 12
19#define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE
20#define CC_SM3_HASH_LEN_SIZE 8
21
22struct cc_hash_handle {
23 cc_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
24 cc_sram_addr_t larval_digest_sram_addr; /* const value in SRAM */
25 struct list_head hash_list;
26};
27
28static const u32 cc_digest_len_init[] = {
29 0x00000040, 0x00000000, 0x00000000, 0x00000000 };
30static const u32 cc_md5_init[] = {
31 SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
32static const u32 cc_sha1_init[] = {
33 SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
34static const u32 cc_sha224_init[] = {
35 SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
36 SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
37static const u32 cc_sha256_init[] = {
38 SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
39 SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
40static const u32 cc_digest_len_sha512_init[] = {
41 0x00000080, 0x00000000, 0x00000000, 0x00000000 };
42static u64 cc_sha384_init[] = {
43 SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
44 SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
45static u64 cc_sha512_init[] = {
46 SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
47 SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
48static const u32 cc_sm3_init[] = {
49 SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE,
50 SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA };
51
52static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
53 unsigned int *seq_size);
54
55static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
56 unsigned int *seq_size);
57
58static const void *cc_larval_digest(struct device *dev, u32 mode);
59
60struct cc_hash_alg {
61 struct list_head entry;
62 int hash_mode;
63 int hw_mode;
64 int inter_digestsize;
65 struct cc_drvdata *drvdata;
66 struct ahash_alg ahash_alg;
67};
68
69struct hash_key_req_ctx {
70 u32 keylen;
71 dma_addr_t key_dma_addr;
72 u8 *key;
73};
74
75/* hash per-session context */
76struct cc_hash_ctx {
77 struct cc_drvdata *drvdata;
78 /* holds the origin digest; the digest after "setkey" if HMAC,*
79 * the initial digest if HASH.
80 */
81 u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
82 u8 opad_tmp_keys_buff[CC_MAX_OPAD_KEYS_SIZE] ____cacheline_aligned;
83
84 dma_addr_t opad_tmp_keys_dma_addr ____cacheline_aligned;
85 dma_addr_t digest_buff_dma_addr;
86 /* use for hmac with key large then mode block size */
87 struct hash_key_req_ctx key_params;
88 int hash_mode;
89 int hw_mode;
90 int inter_digestsize;
91 unsigned int hash_len;
92 struct completion setkey_comp;
93 bool is_hmac;
94};
95
96static void cc_set_desc(struct ahash_req_ctx *areq_ctx, struct cc_hash_ctx *ctx,
97 unsigned int flow_mode, struct cc_hw_desc desc[],
98 bool is_not_last_data, unsigned int *seq_size);
99
100static void cc_set_endianity(u32 mode, struct cc_hw_desc *desc)
101{
102 if (mode == DRV_HASH_MD5 || mode == DRV_HASH_SHA384 ||
103 mode == DRV_HASH_SHA512) {
104 set_bytes_swap(desc, 1);
105 } else {
106 set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
107 }
108}
109
110static int cc_map_result(struct device *dev, struct ahash_req_ctx *state,
111 unsigned int digestsize)
112{
113 state->digest_result_dma_addr =
114 dma_map_single(dev, state->digest_result_buff,
115 digestsize, DMA_BIDIRECTIONAL);
116 if (dma_mapping_error(dev, state->digest_result_dma_addr)) {
117 dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
118 digestsize);
119 return -ENOMEM;
120 }
121 dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
122 digestsize, state->digest_result_buff,
123 &state->digest_result_dma_addr);
124
125 return 0;
126}
127
128static void cc_init_req(struct device *dev, struct ahash_req_ctx *state,
129 struct cc_hash_ctx *ctx)
130{
131 bool is_hmac = ctx->is_hmac;
132
133 memset(state, 0, sizeof(*state));
134
135 if (is_hmac) {
136 if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC &&
137 ctx->hw_mode != DRV_CIPHER_CMAC) {
138 dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr,
139 ctx->inter_digestsize,
140 DMA_BIDIRECTIONAL);
141
142 memcpy(state->digest_buff, ctx->digest_buff,
143 ctx->inter_digestsize);
144 if (ctx->hash_mode == DRV_HASH_SHA512 ||
145 ctx->hash_mode == DRV_HASH_SHA384)
146 memcpy(state->digest_bytes_len,
147 cc_digest_len_sha512_init,
148 ctx->hash_len);
149 else
150 memcpy(state->digest_bytes_len,
151 cc_digest_len_init,
152 ctx->hash_len);
153 }
154
155 if (ctx->hash_mode != DRV_HASH_NULL) {
156 dma_sync_single_for_cpu(dev,
157 ctx->opad_tmp_keys_dma_addr,
158 ctx->inter_digestsize,
159 DMA_BIDIRECTIONAL);
160 memcpy(state->opad_digest_buff,
161 ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
162 }
163 } else { /*hash*/
164 /* Copy the initial digests if hash flow. */
165 const void *larval = cc_larval_digest(dev, ctx->hash_mode);
166
167 memcpy(state->digest_buff, larval, ctx->inter_digestsize);
168 }
169}
170
171static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
172 struct cc_hash_ctx *ctx)
173{
174 bool is_hmac = ctx->is_hmac;
175
176 state->digest_buff_dma_addr =
177 dma_map_single(dev, state->digest_buff,
178 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
179 if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
180 dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
181 ctx->inter_digestsize, state->digest_buff);
182 return -EINVAL;
183 }
184 dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
185 ctx->inter_digestsize, state->digest_buff,
186 &state->digest_buff_dma_addr);
187
188 if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
189 state->digest_bytes_len_dma_addr =
190 dma_map_single(dev, state->digest_bytes_len,
191 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
192 if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
193 dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
194 HASH_MAX_LEN_SIZE, state->digest_bytes_len);
195 goto unmap_digest_buf;
196 }
197 dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
198 HASH_MAX_LEN_SIZE, state->digest_bytes_len,
199 &state->digest_bytes_len_dma_addr);
200 }
201
202 if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
203 state->opad_digest_dma_addr =
204 dma_map_single(dev, state->opad_digest_buff,
205 ctx->inter_digestsize,
206 DMA_BIDIRECTIONAL);
207 if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
208 dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
209 ctx->inter_digestsize,
210 state->opad_digest_buff);
211 goto unmap_digest_len;
212 }
213 dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
214 ctx->inter_digestsize, state->opad_digest_buff,
215 &state->opad_digest_dma_addr);
216 }
217
218 return 0;
219
220unmap_digest_len:
221 if (state->digest_bytes_len_dma_addr) {
222 dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
223 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
224 state->digest_bytes_len_dma_addr = 0;
225 }
226unmap_digest_buf:
227 if (state->digest_buff_dma_addr) {
228 dma_unmap_single(dev, state->digest_buff_dma_addr,
229 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
230 state->digest_buff_dma_addr = 0;
231 }
232
233 return -EINVAL;
234}
235
236static void cc_unmap_req(struct device *dev, struct ahash_req_ctx *state,
237 struct cc_hash_ctx *ctx)
238{
239 if (state->digest_buff_dma_addr) {
240 dma_unmap_single(dev, state->digest_buff_dma_addr,
241 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
242 dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
243 &state->digest_buff_dma_addr);
244 state->digest_buff_dma_addr = 0;
245 }
246 if (state->digest_bytes_len_dma_addr) {
247 dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
248 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
249 dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
250 &state->digest_bytes_len_dma_addr);
251 state->digest_bytes_len_dma_addr = 0;
252 }
253 if (state->opad_digest_dma_addr) {
254 dma_unmap_single(dev, state->opad_digest_dma_addr,
255 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
256 dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
257 &state->opad_digest_dma_addr);
258 state->opad_digest_dma_addr = 0;
259 }
260}
261
262static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state,
263 unsigned int digestsize, u8 *result)
264{
265 if (state->digest_result_dma_addr) {
266 dma_unmap_single(dev, state->digest_result_dma_addr, digestsize,
267 DMA_BIDIRECTIONAL);
268 dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
269 state->digest_result_buff,
270 &state->digest_result_dma_addr, digestsize);
271 memcpy(result, state->digest_result_buff, digestsize);
272 }
273 state->digest_result_dma_addr = 0;
274}
275
276static void cc_update_complete(struct device *dev, void *cc_req, int err)
277{
278 struct ahash_request *req = (struct ahash_request *)cc_req;
279 struct ahash_req_ctx *state = ahash_request_ctx(req);
280 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
281 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
282
283 dev_dbg(dev, "req=%pK\n", req);
284
285 if (err != -EINPROGRESS) {
286 /* Not a BACKLOG notification */
287 cc_unmap_hash_request(dev, state, req->src, false);
288 cc_unmap_req(dev, state, ctx);
289 }
290
291 ahash_request_complete(req, err);
292}
293
294static void cc_digest_complete(struct device *dev, void *cc_req, int err)
295{
296 struct ahash_request *req = (struct ahash_request *)cc_req;
297 struct ahash_req_ctx *state = ahash_request_ctx(req);
298 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
299 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
300 u32 digestsize = crypto_ahash_digestsize(tfm);
301
302 dev_dbg(dev, "req=%pK\n", req);
303
304 if (err != -EINPROGRESS) {
305 /* Not a BACKLOG notification */
306 cc_unmap_hash_request(dev, state, req->src, false);
307 cc_unmap_result(dev, state, digestsize, req->result);
308 cc_unmap_req(dev, state, ctx);
309 }
310
311 ahash_request_complete(req, err);
312}
313
314static void cc_hash_complete(struct device *dev, void *cc_req, int err)
315{
316 struct ahash_request *req = (struct ahash_request *)cc_req;
317 struct ahash_req_ctx *state = ahash_request_ctx(req);
318 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
319 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
320 u32 digestsize = crypto_ahash_digestsize(tfm);
321
322 dev_dbg(dev, "req=%pK\n", req);
323
324 if (err != -EINPROGRESS) {
325 /* Not a BACKLOG notification */
326 cc_unmap_hash_request(dev, state, req->src, false);
327 cc_unmap_result(dev, state, digestsize, req->result);
328 cc_unmap_req(dev, state, ctx);
329 }
330
331 ahash_request_complete(req, err);
332}
333
334static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
335 int idx)
336{
337 struct ahash_req_ctx *state = ahash_request_ctx(req);
338 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
339 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
340 u32 digestsize = crypto_ahash_digestsize(tfm);
341
342 /* Get final MAC result */
343 hw_desc_init(&desc[idx]);
344 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
345 /* TODO */
346 set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
347 NS_BIT, 1);
348 set_queue_last_ind(ctx->drvdata, &desc[idx]);
349 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
350 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
351 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
352 cc_set_endianity(ctx->hash_mode, &desc[idx]);
353 idx++;
354
355 return idx;
356}
357
358static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req,
359 int idx)
360{
361 struct ahash_req_ctx *state = ahash_request_ctx(req);
362 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
363 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
364 u32 digestsize = crypto_ahash_digestsize(tfm);
365
366 /* store the hash digest result in the context */
367 hw_desc_init(&desc[idx]);
368 set_cipher_mode(&desc[idx], ctx->hw_mode);
369 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, digestsize,
370 NS_BIT, 0);
371 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
372 cc_set_endianity(ctx->hash_mode, &desc[idx]);
373 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
374 idx++;
375
376 /* Loading hash opad xor key state */
377 hw_desc_init(&desc[idx]);
378 set_cipher_mode(&desc[idx], ctx->hw_mode);
379 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
380 ctx->inter_digestsize, NS_BIT);
381 set_flow_mode(&desc[idx], S_DIN_to_HASH);
382 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
383 idx++;
384
385 /* Load the hash current length */
386 hw_desc_init(&desc[idx]);
387 set_cipher_mode(&desc[idx], ctx->hw_mode);
388 set_din_sram(&desc[idx],
389 cc_digest_len_addr(ctx->drvdata, ctx->hash_mode),
390 ctx->hash_len);
391 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
392 set_flow_mode(&desc[idx], S_DIN_to_HASH);
393 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
394 idx++;
395
396 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
397 hw_desc_init(&desc[idx]);
398 set_din_no_dma(&desc[idx], 0, 0xfffff0);
399 set_dout_no_dma(&desc[idx], 0, 0, 1);
400 idx++;
401
402 /* Perform HASH update */
403 hw_desc_init(&desc[idx]);
404 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
405 digestsize, NS_BIT);
406 set_flow_mode(&desc[idx], DIN_HASH);
407 idx++;
408
409 return idx;
410}
411
412static int cc_hash_digest(struct ahash_request *req)
413{
414 struct ahash_req_ctx *state = ahash_request_ctx(req);
415 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
416 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
417 u32 digestsize = crypto_ahash_digestsize(tfm);
418 struct scatterlist *src = req->src;
419 unsigned int nbytes = req->nbytes;
420 u8 *result = req->result;
421 struct device *dev = drvdata_to_dev(ctx->drvdata);
422 bool is_hmac = ctx->is_hmac;
423 struct cc_crypto_req cc_req = {};
424 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
425 cc_sram_addr_t larval_digest_addr =
426 cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
427 int idx = 0;
428 int rc = 0;
429 gfp_t flags = cc_gfp_flags(&req->base);
430
431 dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
432 nbytes);
433
434 cc_init_req(dev, state, ctx);
435
436 if (cc_map_req(dev, state, ctx)) {
437 dev_err(dev, "map_ahash_source() failed\n");
438 return -ENOMEM;
439 }
440
441 if (cc_map_result(dev, state, digestsize)) {
442 dev_err(dev, "map_ahash_digest() failed\n");
443 cc_unmap_req(dev, state, ctx);
444 return -ENOMEM;
445 }
446
447 if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
448 flags)) {
449 dev_err(dev, "map_ahash_request_final() failed\n");
450 cc_unmap_result(dev, state, digestsize, result);
451 cc_unmap_req(dev, state, ctx);
452 return -ENOMEM;
453 }
454
455 /* Setup request structure */
456 cc_req.user_cb = cc_digest_complete;
457 cc_req.user_arg = req;
458
459 /* If HMAC then load hash IPAD xor key, if HASH then load initial
460 * digest
461 */
462 hw_desc_init(&desc[idx]);
463 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
464 if (is_hmac) {
465 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
466 ctx->inter_digestsize, NS_BIT);
467 } else {
468 set_din_sram(&desc[idx], larval_digest_addr,
469 ctx->inter_digestsize);
470 }
471 set_flow_mode(&desc[idx], S_DIN_to_HASH);
472 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
473 idx++;
474
475 /* Load the hash current length */
476 hw_desc_init(&desc[idx]);
477 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
478
479 if (is_hmac) {
480 set_din_type(&desc[idx], DMA_DLLI,
481 state->digest_bytes_len_dma_addr,
482 ctx->hash_len, NS_BIT);
483 } else {
484 set_din_const(&desc[idx], 0, ctx->hash_len);
485 if (nbytes)
486 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
487 else
488 set_cipher_do(&desc[idx], DO_PAD);
489 }
490 set_flow_mode(&desc[idx], S_DIN_to_HASH);
491 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
492 idx++;
493
494 cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
495
496 if (is_hmac) {
497 /* HW last hash block padding (aka. "DO_PAD") */
498 hw_desc_init(&desc[idx]);
499 set_cipher_mode(&desc[idx], ctx->hw_mode);
500 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
501 ctx->hash_len, NS_BIT, 0);
502 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
503 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
504 set_cipher_do(&desc[idx], DO_PAD);
505 idx++;
506
507 idx = cc_fin_hmac(desc, req, idx);
508 }
509
510 idx = cc_fin_result(desc, req, idx);
511
512 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
513 if (rc != -EINPROGRESS && rc != -EBUSY) {
514 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
515 cc_unmap_hash_request(dev, state, src, true);
516 cc_unmap_result(dev, state, digestsize, result);
517 cc_unmap_req(dev, state, ctx);
518 }
519 return rc;
520}
521
522static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx,
523 struct ahash_req_ctx *state, unsigned int idx)
524{
525 /* Restore hash digest */
526 hw_desc_init(&desc[idx]);
527 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
528 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
529 ctx->inter_digestsize, NS_BIT);
530 set_flow_mode(&desc[idx], S_DIN_to_HASH);
531 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
532 idx++;
533
534 /* Restore hash current length */
535 hw_desc_init(&desc[idx]);
536 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
537 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
538 set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
539 ctx->hash_len, NS_BIT);
540 set_flow_mode(&desc[idx], S_DIN_to_HASH);
541 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
542 idx++;
543
544 cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
545
546 return idx;
547}
548
549static int cc_hash_update(struct ahash_request *req)
550{
551 struct ahash_req_ctx *state = ahash_request_ctx(req);
552 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
553 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
554 unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
555 struct scatterlist *src = req->src;
556 unsigned int nbytes = req->nbytes;
557 struct device *dev = drvdata_to_dev(ctx->drvdata);
558 struct cc_crypto_req cc_req = {};
559 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
560 u32 idx = 0;
561 int rc;
562 gfp_t flags = cc_gfp_flags(&req->base);
563
564 dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
565 "hmac" : "hash", nbytes);
566
567 if (nbytes == 0) {
568 /* no real updates required */
569 return 0;
570 }
571
572 rc = cc_map_hash_request_update(ctx->drvdata, state, src, nbytes,
573 block_size, flags);
574 if (rc) {
575 if (rc == 1) {
576 dev_dbg(dev, " data size not require HW update %x\n",
577 nbytes);
578 /* No hardware updates are required */
579 return 0;
580 }
581 dev_err(dev, "map_ahash_request_update() failed\n");
582 return -ENOMEM;
583 }
584
585 if (cc_map_req(dev, state, ctx)) {
586 dev_err(dev, "map_ahash_source() failed\n");
587 cc_unmap_hash_request(dev, state, src, true);
588 return -EINVAL;
589 }
590
591 /* Setup request structure */
592 cc_req.user_cb = cc_update_complete;
593 cc_req.user_arg = req;
594
595 idx = cc_restore_hash(desc, ctx, state, idx);
596
597 /* store the hash digest result in context */
598 hw_desc_init(&desc[idx]);
599 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
600 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
601 ctx->inter_digestsize, NS_BIT, 0);
602 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
603 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
604 idx++;
605
606 /* store current hash length in context */
607 hw_desc_init(&desc[idx]);
608 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
609 set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
610 ctx->hash_len, NS_BIT, 1);
611 set_queue_last_ind(ctx->drvdata, &desc[idx]);
612 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
613 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
614 idx++;
615
616 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
617 if (rc != -EINPROGRESS && rc != -EBUSY) {
618 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
619 cc_unmap_hash_request(dev, state, src, true);
620 cc_unmap_req(dev, state, ctx);
621 }
622 return rc;
623}
624
625static int cc_do_finup(struct ahash_request *req, bool update)
626{
627 struct ahash_req_ctx *state = ahash_request_ctx(req);
628 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
629 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
630 u32 digestsize = crypto_ahash_digestsize(tfm);
631 struct scatterlist *src = req->src;
632 unsigned int nbytes = req->nbytes;
633 u8 *result = req->result;
634 struct device *dev = drvdata_to_dev(ctx->drvdata);
635 bool is_hmac = ctx->is_hmac;
636 struct cc_crypto_req cc_req = {};
637 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
638 unsigned int idx = 0;
639 int rc;
640 gfp_t flags = cc_gfp_flags(&req->base);
641
642 dev_dbg(dev, "===== %s-%s (%d) ====\n", is_hmac ? "hmac" : "hash",
643 update ? "finup" : "final", nbytes);
644
645 if (cc_map_req(dev, state, ctx)) {
646 dev_err(dev, "map_ahash_source() failed\n");
647 return -EINVAL;
648 }
649
650 if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, update,
651 flags)) {
652 dev_err(dev, "map_ahash_request_final() failed\n");
653 cc_unmap_req(dev, state, ctx);
654 return -ENOMEM;
655 }
656 if (cc_map_result(dev, state, digestsize)) {
657 dev_err(dev, "map_ahash_digest() failed\n");
658 cc_unmap_hash_request(dev, state, src, true);
659 cc_unmap_req(dev, state, ctx);
660 return -ENOMEM;
661 }
662
663 /* Setup request structure */
664 cc_req.user_cb = cc_hash_complete;
665 cc_req.user_arg = req;
666
667 idx = cc_restore_hash(desc, ctx, state, idx);
668
669 /* Pad the hash */
670 hw_desc_init(&desc[idx]);
671 set_cipher_do(&desc[idx], DO_PAD);
672 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
673 set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
674 ctx->hash_len, NS_BIT, 0);
675 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
676 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
677 idx++;
678
679 if (is_hmac)
680 idx = cc_fin_hmac(desc, req, idx);
681
682 idx = cc_fin_result(desc, req, idx);
683
684 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
685 if (rc != -EINPROGRESS && rc != -EBUSY) {
686 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
687 cc_unmap_hash_request(dev, state, src, true);
688 cc_unmap_result(dev, state, digestsize, result);
689 cc_unmap_req(dev, state, ctx);
690 }
691 return rc;
692}
693
694static int cc_hash_finup(struct ahash_request *req)
695{
696 return cc_do_finup(req, true);
697}
698
699
700static int cc_hash_final(struct ahash_request *req)
701{
702 return cc_do_finup(req, false);
703}
704
705static int cc_hash_init(struct ahash_request *req)
706{
707 struct ahash_req_ctx *state = ahash_request_ctx(req);
708 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
709 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
710 struct device *dev = drvdata_to_dev(ctx->drvdata);
711
712 dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
713
714 cc_init_req(dev, state, ctx);
715
716 return 0;
717}
718
719static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
720 unsigned int keylen)
721{
722 unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
723 struct cc_crypto_req cc_req = {};
724 struct cc_hash_ctx *ctx = NULL;
725 int blocksize = 0;
726 int digestsize = 0;
727 int i, idx = 0, rc = 0;
728 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
729 cc_sram_addr_t larval_addr;
730 struct device *dev;
731
732 ctx = crypto_ahash_ctx(ahash);
733 dev = drvdata_to_dev(ctx->drvdata);
734 dev_dbg(dev, "start keylen: %d", keylen);
735
736 blocksize = crypto_tfm_alg_blocksize(&ahash->base);
737 digestsize = crypto_ahash_digestsize(ahash);
738
739 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
740
741 /* The keylen value distinguishes HASH in case keylen is ZERO bytes,
742 * any NON-ZERO value utilizes HMAC flow
743 */
744 ctx->key_params.keylen = keylen;
745 ctx->key_params.key_dma_addr = 0;
746 ctx->is_hmac = true;
747 ctx->key_params.key = NULL;
748
749 if (keylen) {
750 ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
751 if (!ctx->key_params.key)
752 return -ENOMEM;
753
754 ctx->key_params.key_dma_addr =
755 dma_map_single(dev, (void *)ctx->key_params.key, keylen,
756 DMA_TO_DEVICE);
757 if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
758 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
759 ctx->key_params.key, keylen);
760 kzfree(ctx->key_params.key);
761 return -ENOMEM;
762 }
763 dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
764 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
765
766 if (keylen > blocksize) {
767 /* Load hash initial state */
768 hw_desc_init(&desc[idx]);
769 set_cipher_mode(&desc[idx], ctx->hw_mode);
770 set_din_sram(&desc[idx], larval_addr,
771 ctx->inter_digestsize);
772 set_flow_mode(&desc[idx], S_DIN_to_HASH);
773 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
774 idx++;
775
776 /* Load the hash current length*/
777 hw_desc_init(&desc[idx]);
778 set_cipher_mode(&desc[idx], ctx->hw_mode);
779 set_din_const(&desc[idx], 0, ctx->hash_len);
780 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
781 set_flow_mode(&desc[idx], S_DIN_to_HASH);
782 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
783 idx++;
784
785 hw_desc_init(&desc[idx]);
786 set_din_type(&desc[idx], DMA_DLLI,
787 ctx->key_params.key_dma_addr, keylen,
788 NS_BIT);
789 set_flow_mode(&desc[idx], DIN_HASH);
790 idx++;
791
792 /* Get hashed key */
793 hw_desc_init(&desc[idx]);
794 set_cipher_mode(&desc[idx], ctx->hw_mode);
795 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
796 digestsize, NS_BIT, 0);
797 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
798 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
799 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
800 cc_set_endianity(ctx->hash_mode, &desc[idx]);
801 idx++;
802
803 hw_desc_init(&desc[idx]);
804 set_din_const(&desc[idx], 0, (blocksize - digestsize));
805 set_flow_mode(&desc[idx], BYPASS);
806 set_dout_dlli(&desc[idx],
807 (ctx->opad_tmp_keys_dma_addr +
808 digestsize),
809 (blocksize - digestsize), NS_BIT, 0);
810 idx++;
811 } else {
812 hw_desc_init(&desc[idx]);
813 set_din_type(&desc[idx], DMA_DLLI,
814 ctx->key_params.key_dma_addr, keylen,
815 NS_BIT);
816 set_flow_mode(&desc[idx], BYPASS);
817 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
818 keylen, NS_BIT, 0);
819 idx++;
820
821 if ((blocksize - keylen)) {
822 hw_desc_init(&desc[idx]);
823 set_din_const(&desc[idx], 0,
824 (blocksize - keylen));
825 set_flow_mode(&desc[idx], BYPASS);
826 set_dout_dlli(&desc[idx],
827 (ctx->opad_tmp_keys_dma_addr +
828 keylen), (blocksize - keylen),
829 NS_BIT, 0);
830 idx++;
831 }
832 }
833 } else {
834 hw_desc_init(&desc[idx]);
835 set_din_const(&desc[idx], 0, blocksize);
836 set_flow_mode(&desc[idx], BYPASS);
837 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr),
838 blocksize, NS_BIT, 0);
839 idx++;
840 }
841
842 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
843 if (rc) {
844 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
845 goto out;
846 }
847
848 /* calc derived HMAC key */
849 for (idx = 0, i = 0; i < 2; i++) {
850 /* Load hash initial state */
851 hw_desc_init(&desc[idx]);
852 set_cipher_mode(&desc[idx], ctx->hw_mode);
853 set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize);
854 set_flow_mode(&desc[idx], S_DIN_to_HASH);
855 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
856 idx++;
857
858 /* Load the hash current length*/
859 hw_desc_init(&desc[idx]);
860 set_cipher_mode(&desc[idx], ctx->hw_mode);
861 set_din_const(&desc[idx], 0, ctx->hash_len);
862 set_flow_mode(&desc[idx], S_DIN_to_HASH);
863 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
864 idx++;
865
866 /* Prepare ipad key */
867 hw_desc_init(&desc[idx]);
868 set_xor_val(&desc[idx], hmac_pad_const[i]);
869 set_cipher_mode(&desc[idx], ctx->hw_mode);
870 set_flow_mode(&desc[idx], S_DIN_to_HASH);
871 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
872 idx++;
873
874 /* Perform HASH update */
875 hw_desc_init(&desc[idx]);
876 set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
877 blocksize, NS_BIT);
878 set_cipher_mode(&desc[idx], ctx->hw_mode);
879 set_xor_active(&desc[idx]);
880 set_flow_mode(&desc[idx], DIN_HASH);
881 idx++;
882
883 /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest
884 * of the first HASH "update" state)
885 */
886 hw_desc_init(&desc[idx]);
887 set_cipher_mode(&desc[idx], ctx->hw_mode);
888 if (i > 0) /* Not first iteration */
889 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
890 ctx->inter_digestsize, NS_BIT, 0);
891 else /* First iteration */
892 set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr,
893 ctx->inter_digestsize, NS_BIT, 0);
894 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
895 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
896 idx++;
897 }
898
899 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
900
901out:
902 if (ctx->key_params.key_dma_addr) {
903 dma_unmap_single(dev, ctx->key_params.key_dma_addr,
904 ctx->key_params.keylen, DMA_TO_DEVICE);
905 dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
906 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
907 }
908
909 kzfree(ctx->key_params.key);
910
911 return rc;
912}
913
914static int cc_xcbc_setkey(struct crypto_ahash *ahash,
915 const u8 *key, unsigned int keylen)
916{
917 struct cc_crypto_req cc_req = {};
918 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
919 struct device *dev = drvdata_to_dev(ctx->drvdata);
920 int rc = 0;
921 unsigned int idx = 0;
922 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
923
924 dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
925
926 switch (keylen) {
927 case AES_KEYSIZE_128:
928 case AES_KEYSIZE_192:
929 case AES_KEYSIZE_256:
930 break;
931 default:
932 return -EINVAL;
933 }
934
935 ctx->key_params.keylen = keylen;
936
937 ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
938 if (!ctx->key_params.key)
939 return -ENOMEM;
940
941 ctx->key_params.key_dma_addr =
942 dma_map_single(dev, ctx->key_params.key, keylen, DMA_TO_DEVICE);
943 if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
944 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
945 key, keylen);
946 kzfree(ctx->key_params.key);
947 return -ENOMEM;
948 }
949 dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
950 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
951
952 ctx->is_hmac = true;
953 /* 1. Load the AES key */
954 hw_desc_init(&desc[idx]);
955 set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr,
956 keylen, NS_BIT);
957 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
958 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
959 set_key_size_aes(&desc[idx], keylen);
960 set_flow_mode(&desc[idx], S_DIN_to_AES);
961 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
962 idx++;
963
964 hw_desc_init(&desc[idx]);
965 set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
966 set_flow_mode(&desc[idx], DIN_AES_DOUT);
967 set_dout_dlli(&desc[idx],
968 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
969 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
970 idx++;
971
972 hw_desc_init(&desc[idx]);
973 set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
974 set_flow_mode(&desc[idx], DIN_AES_DOUT);
975 set_dout_dlli(&desc[idx],
976 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
977 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
978 idx++;
979
980 hw_desc_init(&desc[idx]);
981 set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
982 set_flow_mode(&desc[idx], DIN_AES_DOUT);
983 set_dout_dlli(&desc[idx],
984 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
985 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
986 idx++;
987
988 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
989
990 dma_unmap_single(dev, ctx->key_params.key_dma_addr,
991 ctx->key_params.keylen, DMA_TO_DEVICE);
992 dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
993 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
994
995 kzfree(ctx->key_params.key);
996
997 return rc;
998}
999
1000static int cc_cmac_setkey(struct crypto_ahash *ahash,
1001 const u8 *key, unsigned int keylen)
1002{
1003 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1004 struct device *dev = drvdata_to_dev(ctx->drvdata);
1005
1006 dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
1007
1008 ctx->is_hmac = true;
1009
1010 switch (keylen) {
1011 case AES_KEYSIZE_128:
1012 case AES_KEYSIZE_192:
1013 case AES_KEYSIZE_256:
1014 break;
1015 default:
1016 return -EINVAL;
1017 }
1018
1019 ctx->key_params.keylen = keylen;
1020
1021 /* STAT_PHASE_1: Copy key to ctx */
1022
1023 dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr,
1024 keylen, DMA_TO_DEVICE);
1025
1026 memcpy(ctx->opad_tmp_keys_buff, key, keylen);
1027 if (keylen == 24) {
1028 memset(ctx->opad_tmp_keys_buff + 24, 0,
1029 CC_AES_KEY_SIZE_MAX - 24);
1030 }
1031
1032 dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
1033 keylen, DMA_TO_DEVICE);
1034
1035 ctx->key_params.keylen = keylen;
1036
1037 return 0;
1038}
1039
1040static void cc_free_ctx(struct cc_hash_ctx *ctx)
1041{
1042 struct device *dev = drvdata_to_dev(ctx->drvdata);
1043
1044 if (ctx->digest_buff_dma_addr) {
1045 dma_unmap_single(dev, ctx->digest_buff_dma_addr,
1046 sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1047 dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
1048 &ctx->digest_buff_dma_addr);
1049 ctx->digest_buff_dma_addr = 0;
1050 }
1051 if (ctx->opad_tmp_keys_dma_addr) {
1052 dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
1053 sizeof(ctx->opad_tmp_keys_buff),
1054 DMA_BIDIRECTIONAL);
1055 dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
1056 &ctx->opad_tmp_keys_dma_addr);
1057 ctx->opad_tmp_keys_dma_addr = 0;
1058 }
1059
1060 ctx->key_params.keylen = 0;
1061}
1062
1063static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
1064{
1065 struct device *dev = drvdata_to_dev(ctx->drvdata);
1066
1067 ctx->key_params.keylen = 0;
1068
1069 ctx->digest_buff_dma_addr =
1070 dma_map_single(dev, (void *)ctx->digest_buff,
1071 sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1072 if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
1073 dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
1074 sizeof(ctx->digest_buff), ctx->digest_buff);
1075 goto fail;
1076 }
1077 dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
1078 sizeof(ctx->digest_buff), ctx->digest_buff,
1079 &ctx->digest_buff_dma_addr);
1080
1081 ctx->opad_tmp_keys_dma_addr =
1082 dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff,
1083 sizeof(ctx->opad_tmp_keys_buff),
1084 DMA_BIDIRECTIONAL);
1085 if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
1086 dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
1087 sizeof(ctx->opad_tmp_keys_buff),
1088 ctx->opad_tmp_keys_buff);
1089 goto fail;
1090 }
1091 dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
1092 sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
1093 &ctx->opad_tmp_keys_dma_addr);
1094
1095 ctx->is_hmac = false;
1096 return 0;
1097
1098fail:
1099 cc_free_ctx(ctx);
1100 return -ENOMEM;
1101}
1102
1103static int cc_get_hash_len(struct crypto_tfm *tfm)
1104{
1105 struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1106
1107 if (ctx->hash_mode == DRV_HASH_SM3)
1108 return CC_SM3_HASH_LEN_SIZE;
1109 else
1110 return cc_get_default_hash_len(ctx->drvdata);
1111}
1112
1113static int cc_cra_init(struct crypto_tfm *tfm)
1114{
1115 struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1116 struct hash_alg_common *hash_alg_common =
1117 container_of(tfm->__crt_alg, struct hash_alg_common, base);
1118 struct ahash_alg *ahash_alg =
1119 container_of(hash_alg_common, struct ahash_alg, halg);
1120 struct cc_hash_alg *cc_alg =
1121 container_of(ahash_alg, struct cc_hash_alg, ahash_alg);
1122
1123 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1124 sizeof(struct ahash_req_ctx));
1125
1126 ctx->hash_mode = cc_alg->hash_mode;
1127 ctx->hw_mode = cc_alg->hw_mode;
1128 ctx->inter_digestsize = cc_alg->inter_digestsize;
1129 ctx->drvdata = cc_alg->drvdata;
1130 ctx->hash_len = cc_get_hash_len(tfm);
1131 return cc_alloc_ctx(ctx);
1132}
1133
1134static void cc_cra_exit(struct crypto_tfm *tfm)
1135{
1136 struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1137 struct device *dev = drvdata_to_dev(ctx->drvdata);
1138
1139 dev_dbg(dev, "cc_cra_exit");
1140 cc_free_ctx(ctx);
1141}
1142
1143static int cc_mac_update(struct ahash_request *req)
1144{
1145 struct ahash_req_ctx *state = ahash_request_ctx(req);
1146 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1147 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1148 struct device *dev = drvdata_to_dev(ctx->drvdata);
1149 unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1150 struct cc_crypto_req cc_req = {};
1151 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1152 int rc;
1153 u32 idx = 0;
1154 gfp_t flags = cc_gfp_flags(&req->base);
1155
1156 if (req->nbytes == 0) {
1157 /* no real updates required */
1158 return 0;
1159 }
1160
1161 state->xcbc_count++;
1162
1163 rc = cc_map_hash_request_update(ctx->drvdata, state, req->src,
1164 req->nbytes, block_size, flags);
1165 if (rc) {
1166 if (rc == 1) {
1167 dev_dbg(dev, " data size not require HW update %x\n",
1168 req->nbytes);
1169 /* No hardware updates are required */
1170 return 0;
1171 }
1172 dev_err(dev, "map_ahash_request_update() failed\n");
1173 return -ENOMEM;
1174 }
1175
1176 if (cc_map_req(dev, state, ctx)) {
1177 dev_err(dev, "map_ahash_source() failed\n");
1178 return -EINVAL;
1179 }
1180
1181 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1182 cc_setup_xcbc(req, desc, &idx);
1183 else
1184 cc_setup_cmac(req, desc, &idx);
1185
1186 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
1187
1188 /* store the hash digest result in context */
1189 hw_desc_init(&desc[idx]);
1190 set_cipher_mode(&desc[idx], ctx->hw_mode);
1191 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1192 ctx->inter_digestsize, NS_BIT, 1);
1193 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1194 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1195 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1196 idx++;
1197
1198 /* Setup request structure */
1199 cc_req.user_cb = (void *)cc_update_complete;
1200 cc_req.user_arg = (void *)req;
1201
1202 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1203 if (rc != -EINPROGRESS && rc != -EBUSY) {
1204 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1205 cc_unmap_hash_request(dev, state, req->src, true);
1206 cc_unmap_req(dev, state, ctx);
1207 }
1208 return rc;
1209}
1210
1211static int cc_mac_final(struct ahash_request *req)
1212{
1213 struct ahash_req_ctx *state = ahash_request_ctx(req);
1214 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1215 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1216 struct device *dev = drvdata_to_dev(ctx->drvdata);
1217 struct cc_crypto_req cc_req = {};
1218 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1219 int idx = 0;
1220 int rc = 0;
1221 u32 key_size, key_len;
1222 u32 digestsize = crypto_ahash_digestsize(tfm);
1223 gfp_t flags = cc_gfp_flags(&req->base);
1224 u32 rem_cnt = *cc_hash_buf_cnt(state);
1225
1226 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1227 key_size = CC_AES_128_BIT_KEY_SIZE;
1228 key_len = CC_AES_128_BIT_KEY_SIZE;
1229 } else {
1230 key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
1231 ctx->key_params.keylen;
1232 key_len = ctx->key_params.keylen;
1233 }
1234
1235 dev_dbg(dev, "===== final xcbc reminder (%d) ====\n", rem_cnt);
1236
1237 if (cc_map_req(dev, state, ctx)) {
1238 dev_err(dev, "map_ahash_source() failed\n");
1239 return -EINVAL;
1240 }
1241
1242 if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1243 req->nbytes, 0, flags)) {
1244 dev_err(dev, "map_ahash_request_final() failed\n");
1245 cc_unmap_req(dev, state, ctx);
1246 return -ENOMEM;
1247 }
1248
1249 if (cc_map_result(dev, state, digestsize)) {
1250 dev_err(dev, "map_ahash_digest() failed\n");
1251 cc_unmap_hash_request(dev, state, req->src, true);
1252 cc_unmap_req(dev, state, ctx);
1253 return -ENOMEM;
1254 }
1255
1256 /* Setup request structure */
1257 cc_req.user_cb = (void *)cc_hash_complete;
1258 cc_req.user_arg = (void *)req;
1259
1260 if (state->xcbc_count && rem_cnt == 0) {
1261 /* Load key for ECB decryption */
1262 hw_desc_init(&desc[idx]);
1263 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1264 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
1265 set_din_type(&desc[idx], DMA_DLLI,
1266 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
1267 key_size, NS_BIT);
1268 set_key_size_aes(&desc[idx], key_len);
1269 set_flow_mode(&desc[idx], S_DIN_to_AES);
1270 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1271 idx++;
1272
1273 /* Initiate decryption of block state to previous
1274 * block_state-XOR-M[n]
1275 */
1276 hw_desc_init(&desc[idx]);
1277 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
1278 CC_AES_BLOCK_SIZE, NS_BIT);
1279 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1280 CC_AES_BLOCK_SIZE, NS_BIT, 0);
1281 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1282 idx++;
1283
1284 /* Memory Barrier: wait for axi write to complete */
1285 hw_desc_init(&desc[idx]);
1286 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1287 set_dout_no_dma(&desc[idx], 0, 0, 1);
1288 idx++;
1289 }
1290
1291 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1292 cc_setup_xcbc(req, desc, &idx);
1293 else
1294 cc_setup_cmac(req, desc, &idx);
1295
1296 if (state->xcbc_count == 0) {
1297 hw_desc_init(&desc[idx]);
1298 set_cipher_mode(&desc[idx], ctx->hw_mode);
1299 set_key_size_aes(&desc[idx], key_len);
1300 set_cmac_size0_mode(&desc[idx]);
1301 set_flow_mode(&desc[idx], S_DIN_to_AES);
1302 idx++;
1303 } else if (rem_cnt > 0) {
1304 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1305 } else {
1306 hw_desc_init(&desc[idx]);
1307 set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
1308 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1309 idx++;
1310 }
1311
1312 /* Get final MAC result */
1313 hw_desc_init(&desc[idx]);
1314 /* TODO */
1315 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1316 digestsize, NS_BIT, 1);
1317 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1318 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1319 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1320 set_cipher_mode(&desc[idx], ctx->hw_mode);
1321 idx++;
1322
1323 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1324 if (rc != -EINPROGRESS && rc != -EBUSY) {
1325 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1326 cc_unmap_hash_request(dev, state, req->src, true);
1327 cc_unmap_result(dev, state, digestsize, req->result);
1328 cc_unmap_req(dev, state, ctx);
1329 }
1330 return rc;
1331}
1332
1333static int cc_mac_finup(struct ahash_request *req)
1334{
1335 struct ahash_req_ctx *state = ahash_request_ctx(req);
1336 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1337 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1338 struct device *dev = drvdata_to_dev(ctx->drvdata);
1339 struct cc_crypto_req cc_req = {};
1340 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1341 int idx = 0;
1342 int rc = 0;
1343 u32 key_len = 0;
1344 u32 digestsize = crypto_ahash_digestsize(tfm);
1345 gfp_t flags = cc_gfp_flags(&req->base);
1346
1347 dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
1348 if (state->xcbc_count > 0 && req->nbytes == 0) {
1349 dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
1350 return cc_mac_final(req);
1351 }
1352
1353 if (cc_map_req(dev, state, ctx)) {
1354 dev_err(dev, "map_ahash_source() failed\n");
1355 return -EINVAL;
1356 }
1357
1358 if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1359 req->nbytes, 1, flags)) {
1360 dev_err(dev, "map_ahash_request_final() failed\n");
1361 cc_unmap_req(dev, state, ctx);
1362 return -ENOMEM;
1363 }
1364 if (cc_map_result(dev, state, digestsize)) {
1365 dev_err(dev, "map_ahash_digest() failed\n");
1366 cc_unmap_hash_request(dev, state, req->src, true);
1367 cc_unmap_req(dev, state, ctx);
1368 return -ENOMEM;
1369 }
1370
1371 /* Setup request structure */
1372 cc_req.user_cb = (void *)cc_hash_complete;
1373 cc_req.user_arg = (void *)req;
1374
1375 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1376 key_len = CC_AES_128_BIT_KEY_SIZE;
1377 cc_setup_xcbc(req, desc, &idx);
1378 } else {
1379 key_len = ctx->key_params.keylen;
1380 cc_setup_cmac(req, desc, &idx);
1381 }
1382
1383 if (req->nbytes == 0) {
1384 hw_desc_init(&desc[idx]);
1385 set_cipher_mode(&desc[idx], ctx->hw_mode);
1386 set_key_size_aes(&desc[idx], key_len);
1387 set_cmac_size0_mode(&desc[idx]);
1388 set_flow_mode(&desc[idx], S_DIN_to_AES);
1389 idx++;
1390 } else {
1391 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1392 }
1393
1394 /* Get final MAC result */
1395 hw_desc_init(&desc[idx]);
1396 /* TODO */
1397 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1398 digestsize, NS_BIT, 1);
1399 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1400 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1401 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1402 set_cipher_mode(&desc[idx], ctx->hw_mode);
1403 idx++;
1404
1405 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1406 if (rc != -EINPROGRESS && rc != -EBUSY) {
1407 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1408 cc_unmap_hash_request(dev, state, req->src, true);
1409 cc_unmap_result(dev, state, digestsize, req->result);
1410 cc_unmap_req(dev, state, ctx);
1411 }
1412 return rc;
1413}
1414
1415static int cc_mac_digest(struct ahash_request *req)
1416{
1417 struct ahash_req_ctx *state = ahash_request_ctx(req);
1418 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1419 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1420 struct device *dev = drvdata_to_dev(ctx->drvdata);
1421 u32 digestsize = crypto_ahash_digestsize(tfm);
1422 struct cc_crypto_req cc_req = {};
1423 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1424 u32 key_len;
1425 unsigned int idx = 0;
1426 int rc;
1427 gfp_t flags = cc_gfp_flags(&req->base);
1428
1429 dev_dbg(dev, "===== -digest mac (%d) ====\n", req->nbytes);
1430
1431 cc_init_req(dev, state, ctx);
1432
1433 if (cc_map_req(dev, state, ctx)) {
1434 dev_err(dev, "map_ahash_source() failed\n");
1435 return -ENOMEM;
1436 }
1437 if (cc_map_result(dev, state, digestsize)) {
1438 dev_err(dev, "map_ahash_digest() failed\n");
1439 cc_unmap_req(dev, state, ctx);
1440 return -ENOMEM;
1441 }
1442
1443 if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1444 req->nbytes, 1, flags)) {
1445 dev_err(dev, "map_ahash_request_final() failed\n");
1446 cc_unmap_req(dev, state, ctx);
1447 return -ENOMEM;
1448 }
1449
1450 /* Setup request structure */
1451 cc_req.user_cb = (void *)cc_digest_complete;
1452 cc_req.user_arg = (void *)req;
1453
1454 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1455 key_len = CC_AES_128_BIT_KEY_SIZE;
1456 cc_setup_xcbc(req, desc, &idx);
1457 } else {
1458 key_len = ctx->key_params.keylen;
1459 cc_setup_cmac(req, desc, &idx);
1460 }
1461
1462 if (req->nbytes == 0) {
1463 hw_desc_init(&desc[idx]);
1464 set_cipher_mode(&desc[idx], ctx->hw_mode);
1465 set_key_size_aes(&desc[idx], key_len);
1466 set_cmac_size0_mode(&desc[idx]);
1467 set_flow_mode(&desc[idx], S_DIN_to_AES);
1468 idx++;
1469 } else {
1470 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1471 }
1472
1473 /* Get final MAC result */
1474 hw_desc_init(&desc[idx]);
1475 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1476 CC_AES_BLOCK_SIZE, NS_BIT, 1);
1477 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1478 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1479 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1480 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1481 set_cipher_mode(&desc[idx], ctx->hw_mode);
1482 idx++;
1483
1484 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1485 if (rc != -EINPROGRESS && rc != -EBUSY) {
1486 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1487 cc_unmap_hash_request(dev, state, req->src, true);
1488 cc_unmap_result(dev, state, digestsize, req->result);
1489 cc_unmap_req(dev, state, ctx);
1490 }
1491 return rc;
1492}
1493
1494static int cc_hash_export(struct ahash_request *req, void *out)
1495{
1496 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1497 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1498 struct ahash_req_ctx *state = ahash_request_ctx(req);
1499 u8 *curr_buff = cc_hash_buf(state);
1500 u32 curr_buff_cnt = *cc_hash_buf_cnt(state);
1501 const u32 tmp = CC_EXPORT_MAGIC;
1502
1503 memcpy(out, &tmp, sizeof(u32));
1504 out += sizeof(u32);
1505
1506 memcpy(out, state->digest_buff, ctx->inter_digestsize);
1507 out += ctx->inter_digestsize;
1508
1509 memcpy(out, state->digest_bytes_len, ctx->hash_len);
1510 out += ctx->hash_len;
1511
1512 memcpy(out, &curr_buff_cnt, sizeof(u32));
1513 out += sizeof(u32);
1514
1515 memcpy(out, curr_buff, curr_buff_cnt);
1516
1517 return 0;
1518}
1519
1520static int cc_hash_import(struct ahash_request *req, const void *in)
1521{
1522 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1523 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1524 struct device *dev = drvdata_to_dev(ctx->drvdata);
1525 struct ahash_req_ctx *state = ahash_request_ctx(req);
1526 u32 tmp;
1527
1528 memcpy(&tmp, in, sizeof(u32));
1529 if (tmp != CC_EXPORT_MAGIC)
1530 return -EINVAL;
1531 in += sizeof(u32);
1532
1533 cc_init_req(dev, state, ctx);
1534
1535 memcpy(state->digest_buff, in, ctx->inter_digestsize);
1536 in += ctx->inter_digestsize;
1537
1538 memcpy(state->digest_bytes_len, in, ctx->hash_len);
1539 in += ctx->hash_len;
1540
1541 /* Sanity check the data as much as possible */
1542 memcpy(&tmp, in, sizeof(u32));
1543 if (tmp > CC_MAX_HASH_BLCK_SIZE)
1544 return -EINVAL;
1545 in += sizeof(u32);
1546
1547 state->buf_cnt[0] = tmp;
1548 memcpy(state->buffers[0], in, tmp);
1549
1550 return 0;
1551}
1552
1553struct cc_hash_template {
1554 char name[CRYPTO_MAX_ALG_NAME];
1555 char driver_name[CRYPTO_MAX_ALG_NAME];
1556 char mac_name[CRYPTO_MAX_ALG_NAME];
1557 char mac_driver_name[CRYPTO_MAX_ALG_NAME];
1558 unsigned int blocksize;
1559 bool is_mac;
1560 bool synchronize;
1561 struct ahash_alg template_ahash;
1562 int hash_mode;
1563 int hw_mode;
1564 int inter_digestsize;
1565 struct cc_drvdata *drvdata;
1566 u32 min_hw_rev;
1567 enum cc_std_body std_body;
1568};
1569
1570#define CC_STATE_SIZE(_x) \
1571 ((_x) + HASH_MAX_LEN_SIZE + CC_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
1572
1573/* hash descriptors */
1574static struct cc_hash_template driver_hash[] = {
1575 //Asynchronize hash template
1576 {
1577 .name = "sha1",
1578 .driver_name = "sha1-ccree",
1579 .mac_name = "hmac(sha1)",
1580 .mac_driver_name = "hmac-sha1-ccree",
1581 .blocksize = SHA1_BLOCK_SIZE,
1582 .is_mac = true,
1583 .synchronize = false,
1584 .template_ahash = {
1585 .init = cc_hash_init,
1586 .update = cc_hash_update,
1587 .final = cc_hash_final,
1588 .finup = cc_hash_finup,
1589 .digest = cc_hash_digest,
1590 .export = cc_hash_export,
1591 .import = cc_hash_import,
1592 .setkey = cc_hash_setkey,
1593 .halg = {
1594 .digestsize = SHA1_DIGEST_SIZE,
1595 .statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
1596 },
1597 },
1598 .hash_mode = DRV_HASH_SHA1,
1599 .hw_mode = DRV_HASH_HW_SHA1,
1600 .inter_digestsize = SHA1_DIGEST_SIZE,
1601 .min_hw_rev = CC_HW_REV_630,
1602 .std_body = CC_STD_NIST,
1603 },
1604 {
1605 .name = "sha256",
1606 .driver_name = "sha256-ccree",
1607 .mac_name = "hmac(sha256)",
1608 .mac_driver_name = "hmac-sha256-ccree",
1609 .blocksize = SHA256_BLOCK_SIZE,
1610 .is_mac = true,
1611 .template_ahash = {
1612 .init = cc_hash_init,
1613 .update = cc_hash_update,
1614 .final = cc_hash_final,
1615 .finup = cc_hash_finup,
1616 .digest = cc_hash_digest,
1617 .export = cc_hash_export,
1618 .import = cc_hash_import,
1619 .setkey = cc_hash_setkey,
1620 .halg = {
1621 .digestsize = SHA256_DIGEST_SIZE,
1622 .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
1623 },
1624 },
1625 .hash_mode = DRV_HASH_SHA256,
1626 .hw_mode = DRV_HASH_HW_SHA256,
1627 .inter_digestsize = SHA256_DIGEST_SIZE,
1628 .min_hw_rev = CC_HW_REV_630,
1629 .std_body = CC_STD_NIST,
1630 },
1631 {
1632 .name = "sha224",
1633 .driver_name = "sha224-ccree",
1634 .mac_name = "hmac(sha224)",
1635 .mac_driver_name = "hmac-sha224-ccree",
1636 .blocksize = SHA224_BLOCK_SIZE,
1637 .is_mac = true,
1638 .template_ahash = {
1639 .init = cc_hash_init,
1640 .update = cc_hash_update,
1641 .final = cc_hash_final,
1642 .finup = cc_hash_finup,
1643 .digest = cc_hash_digest,
1644 .export = cc_hash_export,
1645 .import = cc_hash_import,
1646 .setkey = cc_hash_setkey,
1647 .halg = {
1648 .digestsize = SHA224_DIGEST_SIZE,
1649 .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE),
1650 },
1651 },
1652 .hash_mode = DRV_HASH_SHA224,
1653 .hw_mode = DRV_HASH_HW_SHA256,
1654 .inter_digestsize = SHA256_DIGEST_SIZE,
1655 .min_hw_rev = CC_HW_REV_630,
1656 .std_body = CC_STD_NIST,
1657 },
1658 {
1659 .name = "sha384",
1660 .driver_name = "sha384-ccree",
1661 .mac_name = "hmac(sha384)",
1662 .mac_driver_name = "hmac-sha384-ccree",
1663 .blocksize = SHA384_BLOCK_SIZE,
1664 .is_mac = true,
1665 .template_ahash = {
1666 .init = cc_hash_init,
1667 .update = cc_hash_update,
1668 .final = cc_hash_final,
1669 .finup = cc_hash_finup,
1670 .digest = cc_hash_digest,
1671 .export = cc_hash_export,
1672 .import = cc_hash_import,
1673 .setkey = cc_hash_setkey,
1674 .halg = {
1675 .digestsize = SHA384_DIGEST_SIZE,
1676 .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1677 },
1678 },
1679 .hash_mode = DRV_HASH_SHA384,
1680 .hw_mode = DRV_HASH_HW_SHA512,
1681 .inter_digestsize = SHA512_DIGEST_SIZE,
1682 .min_hw_rev = CC_HW_REV_712,
1683 .std_body = CC_STD_NIST,
1684 },
1685 {
1686 .name = "sha512",
1687 .driver_name = "sha512-ccree",
1688 .mac_name = "hmac(sha512)",
1689 .mac_driver_name = "hmac-sha512-ccree",
1690 .blocksize = SHA512_BLOCK_SIZE,
1691 .is_mac = true,
1692 .template_ahash = {
1693 .init = cc_hash_init,
1694 .update = cc_hash_update,
1695 .final = cc_hash_final,
1696 .finup = cc_hash_finup,
1697 .digest = cc_hash_digest,
1698 .export = cc_hash_export,
1699 .import = cc_hash_import,
1700 .setkey = cc_hash_setkey,
1701 .halg = {
1702 .digestsize = SHA512_DIGEST_SIZE,
1703 .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1704 },
1705 },
1706 .hash_mode = DRV_HASH_SHA512,
1707 .hw_mode = DRV_HASH_HW_SHA512,
1708 .inter_digestsize = SHA512_DIGEST_SIZE,
1709 .min_hw_rev = CC_HW_REV_712,
1710 .std_body = CC_STD_NIST,
1711 },
1712 {
1713 .name = "md5",
1714 .driver_name = "md5-ccree",
1715 .mac_name = "hmac(md5)",
1716 .mac_driver_name = "hmac-md5-ccree",
1717 .blocksize = MD5_HMAC_BLOCK_SIZE,
1718 .is_mac = true,
1719 .template_ahash = {
1720 .init = cc_hash_init,
1721 .update = cc_hash_update,
1722 .final = cc_hash_final,
1723 .finup = cc_hash_finup,
1724 .digest = cc_hash_digest,
1725 .export = cc_hash_export,
1726 .import = cc_hash_import,
1727 .setkey = cc_hash_setkey,
1728 .halg = {
1729 .digestsize = MD5_DIGEST_SIZE,
1730 .statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
1731 },
1732 },
1733 .hash_mode = DRV_HASH_MD5,
1734 .hw_mode = DRV_HASH_HW_MD5,
1735 .inter_digestsize = MD5_DIGEST_SIZE,
1736 .min_hw_rev = CC_HW_REV_630,
1737 .std_body = CC_STD_NIST,
1738 },
1739 {
1740 .name = "sm3",
1741 .driver_name = "sm3-ccree",
1742 .blocksize = SM3_BLOCK_SIZE,
1743 .is_mac = false,
1744 .template_ahash = {
1745 .init = cc_hash_init,
1746 .update = cc_hash_update,
1747 .final = cc_hash_final,
1748 .finup = cc_hash_finup,
1749 .digest = cc_hash_digest,
1750 .export = cc_hash_export,
1751 .import = cc_hash_import,
1752 .setkey = cc_hash_setkey,
1753 .halg = {
1754 .digestsize = SM3_DIGEST_SIZE,
1755 .statesize = CC_STATE_SIZE(SM3_DIGEST_SIZE),
1756 },
1757 },
1758 .hash_mode = DRV_HASH_SM3,
1759 .hw_mode = DRV_HASH_HW_SM3,
1760 .inter_digestsize = SM3_DIGEST_SIZE,
1761 .min_hw_rev = CC_HW_REV_713,
1762 .std_body = CC_STD_OSCCA,
1763 },
1764 {
1765 .mac_name = "xcbc(aes)",
1766 .mac_driver_name = "xcbc-aes-ccree",
1767 .blocksize = AES_BLOCK_SIZE,
1768 .is_mac = true,
1769 .template_ahash = {
1770 .init = cc_hash_init,
1771 .update = cc_mac_update,
1772 .final = cc_mac_final,
1773 .finup = cc_mac_finup,
1774 .digest = cc_mac_digest,
1775 .setkey = cc_xcbc_setkey,
1776 .export = cc_hash_export,
1777 .import = cc_hash_import,
1778 .halg = {
1779 .digestsize = AES_BLOCK_SIZE,
1780 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1781 },
1782 },
1783 .hash_mode = DRV_HASH_NULL,
1784 .hw_mode = DRV_CIPHER_XCBC_MAC,
1785 .inter_digestsize = AES_BLOCK_SIZE,
1786 .min_hw_rev = CC_HW_REV_630,
1787 .std_body = CC_STD_NIST,
1788 },
1789 {
1790 .mac_name = "cmac(aes)",
1791 .mac_driver_name = "cmac-aes-ccree",
1792 .blocksize = AES_BLOCK_SIZE,
1793 .is_mac = true,
1794 .template_ahash = {
1795 .init = cc_hash_init,
1796 .update = cc_mac_update,
1797 .final = cc_mac_final,
1798 .finup = cc_mac_finup,
1799 .digest = cc_mac_digest,
1800 .setkey = cc_cmac_setkey,
1801 .export = cc_hash_export,
1802 .import = cc_hash_import,
1803 .halg = {
1804 .digestsize = AES_BLOCK_SIZE,
1805 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1806 },
1807 },
1808 .hash_mode = DRV_HASH_NULL,
1809 .hw_mode = DRV_CIPHER_CMAC,
1810 .inter_digestsize = AES_BLOCK_SIZE,
1811 .min_hw_rev = CC_HW_REV_630,
1812 .std_body = CC_STD_NIST,
1813 },
1814};
1815
1816static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
1817 struct device *dev, bool keyed)
1818{
1819 struct cc_hash_alg *t_crypto_alg;
1820 struct crypto_alg *alg;
1821 struct ahash_alg *halg;
1822
1823 t_crypto_alg = kzalloc(sizeof(*t_crypto_alg), GFP_KERNEL);
1824 if (!t_crypto_alg)
1825 return ERR_PTR(-ENOMEM);
1826
1827 t_crypto_alg->ahash_alg = template->template_ahash;
1828 halg = &t_crypto_alg->ahash_alg;
1829 alg = &halg->halg.base;
1830
1831 if (keyed) {
1832 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1833 template->mac_name);
1834 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1835 template->mac_driver_name);
1836 } else {
1837 halg->setkey = NULL;
1838 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1839 template->name);
1840 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1841 template->driver_name);
1842 }
1843 alg->cra_module = THIS_MODULE;
1844 alg->cra_ctxsize = sizeof(struct cc_hash_ctx);
1845 alg->cra_priority = CC_CRA_PRIO;
1846 alg->cra_blocksize = template->blocksize;
1847 alg->cra_alignmask = 0;
1848 alg->cra_exit = cc_cra_exit;
1849
1850 alg->cra_init = cc_cra_init;
1851 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
1852
1853 t_crypto_alg->hash_mode = template->hash_mode;
1854 t_crypto_alg->hw_mode = template->hw_mode;
1855 t_crypto_alg->inter_digestsize = template->inter_digestsize;
1856
1857 return t_crypto_alg;
1858}
1859
1860int cc_init_hash_sram(struct cc_drvdata *drvdata)
1861{
1862 struct cc_hash_handle *hash_handle = drvdata->hash_handle;
1863 cc_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
1864 unsigned int larval_seq_len = 0;
1865 struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
1866 bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712);
1867 bool sm3_supported = (drvdata->hw_rev >= CC_HW_REV_713);
1868 int rc = 0;
1869
1870 /* Copy-to-sram digest-len */
1871 cc_set_sram_desc(cc_digest_len_init, sram_buff_ofs,
1872 ARRAY_SIZE(cc_digest_len_init), larval_seq,
1873 &larval_seq_len);
1874 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1875 if (rc)
1876 goto init_digest_const_err;
1877
1878 sram_buff_ofs += sizeof(cc_digest_len_init);
1879 larval_seq_len = 0;
1880
1881 if (large_sha_supported) {
1882 /* Copy-to-sram digest-len for sha384/512 */
1883 cc_set_sram_desc(cc_digest_len_sha512_init, sram_buff_ofs,
1884 ARRAY_SIZE(cc_digest_len_sha512_init),
1885 larval_seq, &larval_seq_len);
1886 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1887 if (rc)
1888 goto init_digest_const_err;
1889
1890 sram_buff_ofs += sizeof(cc_digest_len_sha512_init);
1891 larval_seq_len = 0;
1892 }
1893
1894 /* The initial digests offset */
1895 hash_handle->larval_digest_sram_addr = sram_buff_ofs;
1896
1897 /* Copy-to-sram initial SHA* digests */
1898 cc_set_sram_desc(cc_md5_init, sram_buff_ofs, ARRAY_SIZE(cc_md5_init),
1899 larval_seq, &larval_seq_len);
1900 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1901 if (rc)
1902 goto init_digest_const_err;
1903 sram_buff_ofs += sizeof(cc_md5_init);
1904 larval_seq_len = 0;
1905
1906 cc_set_sram_desc(cc_sha1_init, sram_buff_ofs,
1907 ARRAY_SIZE(cc_sha1_init), larval_seq,
1908 &larval_seq_len);
1909 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1910 if (rc)
1911 goto init_digest_const_err;
1912 sram_buff_ofs += sizeof(cc_sha1_init);
1913 larval_seq_len = 0;
1914
1915 cc_set_sram_desc(cc_sha224_init, sram_buff_ofs,
1916 ARRAY_SIZE(cc_sha224_init), larval_seq,
1917 &larval_seq_len);
1918 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1919 if (rc)
1920 goto init_digest_const_err;
1921 sram_buff_ofs += sizeof(cc_sha224_init);
1922 larval_seq_len = 0;
1923
1924 cc_set_sram_desc(cc_sha256_init, sram_buff_ofs,
1925 ARRAY_SIZE(cc_sha256_init), larval_seq,
1926 &larval_seq_len);
1927 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1928 if (rc)
1929 goto init_digest_const_err;
1930 sram_buff_ofs += sizeof(cc_sha256_init);
1931 larval_seq_len = 0;
1932
1933 if (sm3_supported) {
1934 cc_set_sram_desc(cc_sm3_init, sram_buff_ofs,
1935 ARRAY_SIZE(cc_sm3_init), larval_seq,
1936 &larval_seq_len);
1937 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1938 if (rc)
1939 goto init_digest_const_err;
1940 sram_buff_ofs += sizeof(cc_sm3_init);
1941 larval_seq_len = 0;
1942 }
1943
1944 if (large_sha_supported) {
1945 cc_set_sram_desc((u32 *)cc_sha384_init, sram_buff_ofs,
1946 (ARRAY_SIZE(cc_sha384_init) * 2), larval_seq,
1947 &larval_seq_len);
1948 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1949 if (rc)
1950 goto init_digest_const_err;
1951 sram_buff_ofs += sizeof(cc_sha384_init);
1952 larval_seq_len = 0;
1953
1954 cc_set_sram_desc((u32 *)cc_sha512_init, sram_buff_ofs,
1955 (ARRAY_SIZE(cc_sha512_init) * 2), larval_seq,
1956 &larval_seq_len);
1957 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1958 if (rc)
1959 goto init_digest_const_err;
1960 }
1961
1962init_digest_const_err:
1963 return rc;
1964}
1965
1966static void __init cc_swap_dwords(u32 *buf, unsigned long size)
1967{
1968 int i;
1969 u32 tmp;
1970
1971 for (i = 0; i < size; i += 2) {
1972 tmp = buf[i];
1973 buf[i] = buf[i + 1];
1974 buf[i + 1] = tmp;
1975 }
1976}
1977
1978/*
1979 * Due to the way the HW works we need to swap every
1980 * double word in the SHA384 and SHA512 larval hashes
1981 */
1982void __init cc_hash_global_init(void)
1983{
1984 cc_swap_dwords((u32 *)&cc_sha384_init, (ARRAY_SIZE(cc_sha384_init) * 2));
1985 cc_swap_dwords((u32 *)&cc_sha512_init, (ARRAY_SIZE(cc_sha512_init) * 2));
1986}
1987
1988int cc_hash_alloc(struct cc_drvdata *drvdata)
1989{
1990 struct cc_hash_handle *hash_handle;
1991 cc_sram_addr_t sram_buff;
1992 u32 sram_size_to_alloc;
1993 struct device *dev = drvdata_to_dev(drvdata);
1994 int rc = 0;
1995 int alg;
1996
1997 hash_handle = kzalloc(sizeof(*hash_handle), GFP_KERNEL);
1998 if (!hash_handle)
1999 return -ENOMEM;
2000
2001 INIT_LIST_HEAD(&hash_handle->hash_list);
2002 drvdata->hash_handle = hash_handle;
2003
2004 sram_size_to_alloc = sizeof(cc_digest_len_init) +
2005 sizeof(cc_md5_init) +
2006 sizeof(cc_sha1_init) +
2007 sizeof(cc_sha224_init) +
2008 sizeof(cc_sha256_init);
2009
2010 if (drvdata->hw_rev >= CC_HW_REV_713)
2011 sram_size_to_alloc += sizeof(cc_sm3_init);
2012
2013 if (drvdata->hw_rev >= CC_HW_REV_712)
2014 sram_size_to_alloc += sizeof(cc_digest_len_sha512_init) +
2015 sizeof(cc_sha384_init) + sizeof(cc_sha512_init);
2016
2017 sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
2018 if (sram_buff == NULL_SRAM_ADDR) {
2019 dev_err(dev, "SRAM pool exhausted\n");
2020 rc = -ENOMEM;
2021 goto fail;
2022 }
2023
2024 /* The initial digest-len offset */
2025 hash_handle->digest_len_sram_addr = sram_buff;
2026
2027 /*must be set before the alg registration as it is being used there*/
2028 rc = cc_init_hash_sram(drvdata);
2029 if (rc) {
2030 dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
2031 goto fail;
2032 }
2033
2034 /* ahash registration */
2035 for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
2036 struct cc_hash_alg *t_alg;
2037 int hw_mode = driver_hash[alg].hw_mode;
2038
2039 /* Check that the HW revision and variants are suitable */
2040 if ((driver_hash[alg].min_hw_rev > drvdata->hw_rev) ||
2041 !(drvdata->std_bodies & driver_hash[alg].std_body))
2042 continue;
2043
2044 if (driver_hash[alg].is_mac) {
2045 /* register hmac version */
2046 t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true);
2047 if (IS_ERR(t_alg)) {
2048 rc = PTR_ERR(t_alg);
2049 dev_err(dev, "%s alg allocation failed\n",
2050 driver_hash[alg].driver_name);
2051 goto fail;
2052 }
2053 t_alg->drvdata = drvdata;
2054
2055 rc = crypto_register_ahash(&t_alg->ahash_alg);
2056 if (rc) {
2057 dev_err(dev, "%s alg registration failed\n",
2058 driver_hash[alg].driver_name);
2059 kfree(t_alg);
2060 goto fail;
2061 } else {
2062 list_add_tail(&t_alg->entry,
2063 &hash_handle->hash_list);
2064 }
2065 }
2066 if (hw_mode == DRV_CIPHER_XCBC_MAC ||
2067 hw_mode == DRV_CIPHER_CMAC)
2068 continue;
2069
2070 /* register hash version */
2071 t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, false);
2072 if (IS_ERR(t_alg)) {
2073 rc = PTR_ERR(t_alg);
2074 dev_err(dev, "%s alg allocation failed\n",
2075 driver_hash[alg].driver_name);
2076 goto fail;
2077 }
2078 t_alg->drvdata = drvdata;
2079
2080 rc = crypto_register_ahash(&t_alg->ahash_alg);
2081 if (rc) {
2082 dev_err(dev, "%s alg registration failed\n",
2083 driver_hash[alg].driver_name);
2084 kfree(t_alg);
2085 goto fail;
2086 } else {
2087 list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2088 }
2089 }
2090
2091 return 0;
2092
2093fail:
2094 kfree(drvdata->hash_handle);
2095 drvdata->hash_handle = NULL;
2096 return rc;
2097}
2098
2099int cc_hash_free(struct cc_drvdata *drvdata)
2100{
2101 struct cc_hash_alg *t_hash_alg, *hash_n;
2102 struct cc_hash_handle *hash_handle = drvdata->hash_handle;
2103
2104 if (hash_handle) {
2105 list_for_each_entry_safe(t_hash_alg, hash_n,
2106 &hash_handle->hash_list, entry) {
2107 crypto_unregister_ahash(&t_hash_alg->ahash_alg);
2108 list_del(&t_hash_alg->entry);
2109 kfree(t_hash_alg);
2110 }
2111
2112 kfree(hash_handle);
2113 drvdata->hash_handle = NULL;
2114 }
2115 return 0;
2116}
2117
2118static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
2119 unsigned int *seq_size)
2120{
2121 unsigned int idx = *seq_size;
2122 struct ahash_req_ctx *state = ahash_request_ctx(areq);
2123 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2124 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2125
2126 /* Setup XCBC MAC K1 */
2127 hw_desc_init(&desc[idx]);
2128 set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2129 XCBC_MAC_K1_OFFSET),
2130 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2131 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2132 set_hash_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC, ctx->hash_mode);
2133 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2134 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2135 set_flow_mode(&desc[idx], S_DIN_to_AES);
2136 idx++;
2137
2138 /* Setup XCBC MAC K2 */
2139 hw_desc_init(&desc[idx]);
2140 set_din_type(&desc[idx], DMA_DLLI,
2141 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
2142 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2143 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
2144 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2145 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2146 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2147 set_flow_mode(&desc[idx], S_DIN_to_AES);
2148 idx++;
2149
2150 /* Setup XCBC MAC K3 */
2151 hw_desc_init(&desc[idx]);
2152 set_din_type(&desc[idx], DMA_DLLI,
2153 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
2154 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2155 set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
2156 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2157 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2158 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2159 set_flow_mode(&desc[idx], S_DIN_to_AES);
2160 idx++;
2161
2162 /* Loading MAC state */
2163 hw_desc_init(&desc[idx]);
2164 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2165 CC_AES_BLOCK_SIZE, NS_BIT);
2166 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2167 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2168 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2169 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2170 set_flow_mode(&desc[idx], S_DIN_to_AES);
2171 idx++;
2172 *seq_size = idx;
2173}
2174
2175static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
2176 unsigned int *seq_size)
2177{
2178 unsigned int idx = *seq_size;
2179 struct ahash_req_ctx *state = ahash_request_ctx(areq);
2180 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2181 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2182
2183 /* Setup CMAC Key */
2184 hw_desc_init(&desc[idx]);
2185 set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
2186 ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
2187 ctx->key_params.keylen), NS_BIT);
2188 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2189 set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2190 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2191 set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2192 set_flow_mode(&desc[idx], S_DIN_to_AES);
2193 idx++;
2194
2195 /* Load MAC state */
2196 hw_desc_init(&desc[idx]);
2197 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2198 CC_AES_BLOCK_SIZE, NS_BIT);
2199 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2200 set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2201 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2202 set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2203 set_flow_mode(&desc[idx], S_DIN_to_AES);
2204 idx++;
2205 *seq_size = idx;
2206}
2207
2208static void cc_set_desc(struct ahash_req_ctx *areq_ctx,
2209 struct cc_hash_ctx *ctx, unsigned int flow_mode,
2210 struct cc_hw_desc desc[], bool is_not_last_data,
2211 unsigned int *seq_size)
2212{
2213 unsigned int idx = *seq_size;
2214 struct device *dev = drvdata_to_dev(ctx->drvdata);
2215
2216 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_DLLI) {
2217 hw_desc_init(&desc[idx]);
2218 set_din_type(&desc[idx], DMA_DLLI,
2219 sg_dma_address(areq_ctx->curr_sg),
2220 areq_ctx->curr_sg->length, NS_BIT);
2221 set_flow_mode(&desc[idx], flow_mode);
2222 idx++;
2223 } else {
2224 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
2225 dev_dbg(dev, " NULL mode\n");
2226 /* nothing to build */
2227 return;
2228 }
2229 /* bypass */
2230 hw_desc_init(&desc[idx]);
2231 set_din_type(&desc[idx], DMA_DLLI,
2232 areq_ctx->mlli_params.mlli_dma_addr,
2233 areq_ctx->mlli_params.mlli_len, NS_BIT);
2234 set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr,
2235 areq_ctx->mlli_params.mlli_len);
2236 set_flow_mode(&desc[idx], BYPASS);
2237 idx++;
2238 /* process */
2239 hw_desc_init(&desc[idx]);
2240 set_din_type(&desc[idx], DMA_MLLI,
2241 ctx->drvdata->mlli_sram_addr,
2242 areq_ctx->mlli_nents, NS_BIT);
2243 set_flow_mode(&desc[idx], flow_mode);
2244 idx++;
2245 }
2246 if (is_not_last_data)
2247 set_din_not_last_indication(&desc[(idx - 1)]);
2248 /* return updated desc sequence size */
2249 *seq_size = idx;
2250}
2251
2252static const void *cc_larval_digest(struct device *dev, u32 mode)
2253{
2254 switch (mode) {
2255 case DRV_HASH_MD5:
2256 return cc_md5_init;
2257 case DRV_HASH_SHA1:
2258 return cc_sha1_init;
2259 case DRV_HASH_SHA224:
2260 return cc_sha224_init;
2261 case DRV_HASH_SHA256:
2262 return cc_sha256_init;
2263 case DRV_HASH_SHA384:
2264 return cc_sha384_init;
2265 case DRV_HASH_SHA512:
2266 return cc_sha512_init;
2267 case DRV_HASH_SM3:
2268 return cc_sm3_init;
2269 default:
2270 dev_err(dev, "Invalid hash mode (%d)\n", mode);
2271 return cc_md5_init;
2272 }
2273}
2274
2275/*!
2276 * Gets the address of the initial digest in SRAM
2277 * according to the given hash mode
2278 *
2279 * \param drvdata
2280 * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
2281 *
2282 * \return u32 The address of the initial digest in SRAM
2283 */
2284cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode)
2285{
2286 struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
2287 struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
2288 struct device *dev = drvdata_to_dev(_drvdata);
2289 bool sm3_supported = (_drvdata->hw_rev >= CC_HW_REV_713);
2290 cc_sram_addr_t addr;
2291
2292 switch (mode) {
2293 case DRV_HASH_NULL:
2294 break; /*Ignore*/
2295 case DRV_HASH_MD5:
2296 return (hash_handle->larval_digest_sram_addr);
2297 case DRV_HASH_SHA1:
2298 return (hash_handle->larval_digest_sram_addr +
2299 sizeof(cc_md5_init));
2300 case DRV_HASH_SHA224:
2301 return (hash_handle->larval_digest_sram_addr +
2302 sizeof(cc_md5_init) +
2303 sizeof(cc_sha1_init));
2304 case DRV_HASH_SHA256:
2305 return (hash_handle->larval_digest_sram_addr +
2306 sizeof(cc_md5_init) +
2307 sizeof(cc_sha1_init) +
2308 sizeof(cc_sha224_init));
2309 case DRV_HASH_SM3:
2310 return (hash_handle->larval_digest_sram_addr +
2311 sizeof(cc_md5_init) +
2312 sizeof(cc_sha1_init) +
2313 sizeof(cc_sha224_init) +
2314 sizeof(cc_sha256_init));
2315 case DRV_HASH_SHA384:
2316 addr = (hash_handle->larval_digest_sram_addr +
2317 sizeof(cc_md5_init) +
2318 sizeof(cc_sha1_init) +
2319 sizeof(cc_sha224_init) +
2320 sizeof(cc_sha256_init));
2321 if (sm3_supported)
2322 addr += sizeof(cc_sm3_init);
2323 return addr;
2324 case DRV_HASH_SHA512:
2325 addr = (hash_handle->larval_digest_sram_addr +
2326 sizeof(cc_md5_init) +
2327 sizeof(cc_sha1_init) +
2328 sizeof(cc_sha224_init) +
2329 sizeof(cc_sha256_init) +
2330 sizeof(cc_sha384_init));
2331 if (sm3_supported)
2332 addr += sizeof(cc_sm3_init);
2333 return addr;
2334 default:
2335 dev_err(dev, "Invalid hash mode (%d)\n", mode);
2336 }
2337
2338 /*This is valid wrong value to avoid kernel crash*/
2339 return hash_handle->larval_digest_sram_addr;
2340}
2341
2342cc_sram_addr_t
2343cc_digest_len_addr(void *drvdata, u32 mode)
2344{
2345 struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
2346 struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
2347 cc_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
2348
2349 switch (mode) {
2350 case DRV_HASH_SHA1:
2351 case DRV_HASH_SHA224:
2352 case DRV_HASH_SHA256:
2353 case DRV_HASH_MD5:
2354 return digest_len_addr;
2355 case DRV_HASH_SHA384:
2356 case DRV_HASH_SHA512:
2357 return digest_len_addr + sizeof(cc_digest_len_init);
2358 default:
2359 return digest_len_addr; /*to avoid kernel crash*/
2360 }
2361}