Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <crypto/algapi.h>
7#include <crypto/hash.h>
8#include <crypto/md5.h>
9#include <crypto/sm3.h>
10#include <crypto/internal/hash.h>
11
12#include "cc_driver.h"
13#include "cc_request_mgr.h"
14#include "cc_buffer_mgr.h"
15#include "cc_hash.h"
16#include "cc_sram_mgr.h"
17
18#define CC_MAX_HASH_SEQ_LEN 12
19#define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE
20#define CC_SM3_HASH_LEN_SIZE 8
21
22struct cc_hash_handle {
23 cc_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
24 cc_sram_addr_t larval_digest_sram_addr; /* const value in SRAM */
25 struct list_head hash_list;
26};
27
28static const u32 digest_len_init[] = {
29 0x00000040, 0x00000000, 0x00000000, 0x00000000 };
30static const u32 md5_init[] = {
31 SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
32static const u32 sha1_init[] = {
33 SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
34static const u32 sha224_init[] = {
35 SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
36 SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
37static const u32 sha256_init[] = {
38 SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
39 SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
40static const u32 digest_len_sha512_init[] = {
41 0x00000080, 0x00000000, 0x00000000, 0x00000000 };
42static u64 sha384_init[] = {
43 SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
44 SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
45static u64 sha512_init[] = {
46 SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
47 SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
48static const u32 sm3_init[] = {
49 SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE,
50 SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA };
51
52static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
53 unsigned int *seq_size);
54
55static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
56 unsigned int *seq_size);
57
58static const void *cc_larval_digest(struct device *dev, u32 mode);
59
60struct cc_hash_alg {
61 struct list_head entry;
62 int hash_mode;
63 int hw_mode;
64 int inter_digestsize;
65 struct cc_drvdata *drvdata;
66 struct ahash_alg ahash_alg;
67};
68
69struct hash_key_req_ctx {
70 u32 keylen;
71 dma_addr_t key_dma_addr;
72 u8 *key;
73};
74
75/* hash per-session context */
76struct cc_hash_ctx {
77 struct cc_drvdata *drvdata;
78 /* holds the origin digest; the digest after "setkey" if HMAC,*
79 * the initial digest if HASH.
80 */
81 u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
82 u8 opad_tmp_keys_buff[CC_MAX_OPAD_KEYS_SIZE] ____cacheline_aligned;
83
84 dma_addr_t opad_tmp_keys_dma_addr ____cacheline_aligned;
85 dma_addr_t digest_buff_dma_addr;
86 /* use for hmac with key large then mode block size */
87 struct hash_key_req_ctx key_params;
88 int hash_mode;
89 int hw_mode;
90 int inter_digestsize;
91 unsigned int hash_len;
92 struct completion setkey_comp;
93 bool is_hmac;
94};
95
96static void cc_set_desc(struct ahash_req_ctx *areq_ctx, struct cc_hash_ctx *ctx,
97 unsigned int flow_mode, struct cc_hw_desc desc[],
98 bool is_not_last_data, unsigned int *seq_size);
99
100static void cc_set_endianity(u32 mode, struct cc_hw_desc *desc)
101{
102 if (mode == DRV_HASH_MD5 || mode == DRV_HASH_SHA384 ||
103 mode == DRV_HASH_SHA512) {
104 set_bytes_swap(desc, 1);
105 } else {
106 set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
107 }
108}
109
110static int cc_map_result(struct device *dev, struct ahash_req_ctx *state,
111 unsigned int digestsize)
112{
113 state->digest_result_dma_addr =
114 dma_map_single(dev, state->digest_result_buff,
115 digestsize, DMA_BIDIRECTIONAL);
116 if (dma_mapping_error(dev, state->digest_result_dma_addr)) {
117 dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
118 digestsize);
119 return -ENOMEM;
120 }
121 dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
122 digestsize, state->digest_result_buff,
123 &state->digest_result_dma_addr);
124
125 return 0;
126}
127
128static void cc_init_req(struct device *dev, struct ahash_req_ctx *state,
129 struct cc_hash_ctx *ctx)
130{
131 bool is_hmac = ctx->is_hmac;
132
133 memset(state, 0, sizeof(*state));
134
135 if (is_hmac) {
136 if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC &&
137 ctx->hw_mode != DRV_CIPHER_CMAC) {
138 dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr,
139 ctx->inter_digestsize,
140 DMA_BIDIRECTIONAL);
141
142 memcpy(state->digest_buff, ctx->digest_buff,
143 ctx->inter_digestsize);
144 if (ctx->hash_mode == DRV_HASH_SHA512 ||
145 ctx->hash_mode == DRV_HASH_SHA384)
146 memcpy(state->digest_bytes_len,
147 digest_len_sha512_init,
148 ctx->hash_len);
149 else
150 memcpy(state->digest_bytes_len, digest_len_init,
151 ctx->hash_len);
152 }
153
154 if (ctx->hash_mode != DRV_HASH_NULL) {
155 dma_sync_single_for_cpu(dev,
156 ctx->opad_tmp_keys_dma_addr,
157 ctx->inter_digestsize,
158 DMA_BIDIRECTIONAL);
159 memcpy(state->opad_digest_buff,
160 ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
161 }
162 } else { /*hash*/
163 /* Copy the initial digests if hash flow. */
164 const void *larval = cc_larval_digest(dev, ctx->hash_mode);
165
166 memcpy(state->digest_buff, larval, ctx->inter_digestsize);
167 }
168}
169
170static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
171 struct cc_hash_ctx *ctx)
172{
173 bool is_hmac = ctx->is_hmac;
174
175 state->digest_buff_dma_addr =
176 dma_map_single(dev, state->digest_buff,
177 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
178 if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
179 dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
180 ctx->inter_digestsize, state->digest_buff);
181 return -EINVAL;
182 }
183 dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
184 ctx->inter_digestsize, state->digest_buff,
185 &state->digest_buff_dma_addr);
186
187 if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
188 state->digest_bytes_len_dma_addr =
189 dma_map_single(dev, state->digest_bytes_len,
190 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
191 if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
192 dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
193 HASH_MAX_LEN_SIZE, state->digest_bytes_len);
194 goto unmap_digest_buf;
195 }
196 dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
197 HASH_MAX_LEN_SIZE, state->digest_bytes_len,
198 &state->digest_bytes_len_dma_addr);
199 }
200
201 if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
202 state->opad_digest_dma_addr =
203 dma_map_single(dev, state->opad_digest_buff,
204 ctx->inter_digestsize,
205 DMA_BIDIRECTIONAL);
206 if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
207 dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
208 ctx->inter_digestsize,
209 state->opad_digest_buff);
210 goto unmap_digest_len;
211 }
212 dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
213 ctx->inter_digestsize, state->opad_digest_buff,
214 &state->opad_digest_dma_addr);
215 }
216
217 return 0;
218
219unmap_digest_len:
220 if (state->digest_bytes_len_dma_addr) {
221 dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
222 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
223 state->digest_bytes_len_dma_addr = 0;
224 }
225unmap_digest_buf:
226 if (state->digest_buff_dma_addr) {
227 dma_unmap_single(dev, state->digest_buff_dma_addr,
228 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
229 state->digest_buff_dma_addr = 0;
230 }
231
232 return -EINVAL;
233}
234
235static void cc_unmap_req(struct device *dev, struct ahash_req_ctx *state,
236 struct cc_hash_ctx *ctx)
237{
238 if (state->digest_buff_dma_addr) {
239 dma_unmap_single(dev, state->digest_buff_dma_addr,
240 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
241 dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
242 &state->digest_buff_dma_addr);
243 state->digest_buff_dma_addr = 0;
244 }
245 if (state->digest_bytes_len_dma_addr) {
246 dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
247 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
248 dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
249 &state->digest_bytes_len_dma_addr);
250 state->digest_bytes_len_dma_addr = 0;
251 }
252 if (state->opad_digest_dma_addr) {
253 dma_unmap_single(dev, state->opad_digest_dma_addr,
254 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
255 dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
256 &state->opad_digest_dma_addr);
257 state->opad_digest_dma_addr = 0;
258 }
259}
260
261static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state,
262 unsigned int digestsize, u8 *result)
263{
264 if (state->digest_result_dma_addr) {
265 dma_unmap_single(dev, state->digest_result_dma_addr, digestsize,
266 DMA_BIDIRECTIONAL);
267 dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
268 state->digest_result_buff,
269 &state->digest_result_dma_addr, digestsize);
270 memcpy(result, state->digest_result_buff, digestsize);
271 }
272 state->digest_result_dma_addr = 0;
273}
274
275static void cc_update_complete(struct device *dev, void *cc_req, int err)
276{
277 struct ahash_request *req = (struct ahash_request *)cc_req;
278 struct ahash_req_ctx *state = ahash_request_ctx(req);
279 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
280 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
281
282 dev_dbg(dev, "req=%pK\n", req);
283
284 if (err != -EINPROGRESS) {
285 /* Not a BACKLOG notification */
286 cc_unmap_hash_request(dev, state, req->src, false);
287 cc_unmap_req(dev, state, ctx);
288 }
289
290 ahash_request_complete(req, err);
291}
292
293static void cc_digest_complete(struct device *dev, void *cc_req, int err)
294{
295 struct ahash_request *req = (struct ahash_request *)cc_req;
296 struct ahash_req_ctx *state = ahash_request_ctx(req);
297 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
298 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
299 u32 digestsize = crypto_ahash_digestsize(tfm);
300
301 dev_dbg(dev, "req=%pK\n", req);
302
303 if (err != -EINPROGRESS) {
304 /* Not a BACKLOG notification */
305 cc_unmap_hash_request(dev, state, req->src, false);
306 cc_unmap_result(dev, state, digestsize, req->result);
307 cc_unmap_req(dev, state, ctx);
308 }
309
310 ahash_request_complete(req, err);
311}
312
313static void cc_hash_complete(struct device *dev, void *cc_req, int err)
314{
315 struct ahash_request *req = (struct ahash_request *)cc_req;
316 struct ahash_req_ctx *state = ahash_request_ctx(req);
317 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
318 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
319 u32 digestsize = crypto_ahash_digestsize(tfm);
320
321 dev_dbg(dev, "req=%pK\n", req);
322
323 if (err != -EINPROGRESS) {
324 /* Not a BACKLOG notification */
325 cc_unmap_hash_request(dev, state, req->src, false);
326 cc_unmap_result(dev, state, digestsize, req->result);
327 cc_unmap_req(dev, state, ctx);
328 }
329
330 ahash_request_complete(req, err);
331}
332
333static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
334 int idx)
335{
336 struct ahash_req_ctx *state = ahash_request_ctx(req);
337 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
338 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
339 u32 digestsize = crypto_ahash_digestsize(tfm);
340
341 /* Get final MAC result */
342 hw_desc_init(&desc[idx]);
343 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
344 /* TODO */
345 set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
346 NS_BIT, 1);
347 set_queue_last_ind(ctx->drvdata, &desc[idx]);
348 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
349 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
350 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
351 cc_set_endianity(ctx->hash_mode, &desc[idx]);
352 idx++;
353
354 return idx;
355}
356
357static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req,
358 int idx)
359{
360 struct ahash_req_ctx *state = ahash_request_ctx(req);
361 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
362 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
363 u32 digestsize = crypto_ahash_digestsize(tfm);
364
365 /* store the hash digest result in the context */
366 hw_desc_init(&desc[idx]);
367 set_cipher_mode(&desc[idx], ctx->hw_mode);
368 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, digestsize,
369 NS_BIT, 0);
370 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
371 cc_set_endianity(ctx->hash_mode, &desc[idx]);
372 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
373 idx++;
374
375 /* Loading hash opad xor key state */
376 hw_desc_init(&desc[idx]);
377 set_cipher_mode(&desc[idx], ctx->hw_mode);
378 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
379 ctx->inter_digestsize, NS_BIT);
380 set_flow_mode(&desc[idx], S_DIN_to_HASH);
381 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
382 idx++;
383
384 /* Load the hash current length */
385 hw_desc_init(&desc[idx]);
386 set_cipher_mode(&desc[idx], ctx->hw_mode);
387 set_din_sram(&desc[idx],
388 cc_digest_len_addr(ctx->drvdata, ctx->hash_mode),
389 ctx->hash_len);
390 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
391 set_flow_mode(&desc[idx], S_DIN_to_HASH);
392 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
393 idx++;
394
395 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
396 hw_desc_init(&desc[idx]);
397 set_din_no_dma(&desc[idx], 0, 0xfffff0);
398 set_dout_no_dma(&desc[idx], 0, 0, 1);
399 idx++;
400
401 /* Perform HASH update */
402 hw_desc_init(&desc[idx]);
403 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
404 digestsize, NS_BIT);
405 set_flow_mode(&desc[idx], DIN_HASH);
406 idx++;
407
408 return idx;
409}
410
411static int cc_hash_digest(struct ahash_request *req)
412{
413 struct ahash_req_ctx *state = ahash_request_ctx(req);
414 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
415 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
416 u32 digestsize = crypto_ahash_digestsize(tfm);
417 struct scatterlist *src = req->src;
418 unsigned int nbytes = req->nbytes;
419 u8 *result = req->result;
420 struct device *dev = drvdata_to_dev(ctx->drvdata);
421 bool is_hmac = ctx->is_hmac;
422 struct cc_crypto_req cc_req = {};
423 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
424 cc_sram_addr_t larval_digest_addr =
425 cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
426 int idx = 0;
427 int rc = 0;
428 gfp_t flags = cc_gfp_flags(&req->base);
429
430 dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
431 nbytes);
432
433 cc_init_req(dev, state, ctx);
434
435 if (cc_map_req(dev, state, ctx)) {
436 dev_err(dev, "map_ahash_source() failed\n");
437 return -ENOMEM;
438 }
439
440 if (cc_map_result(dev, state, digestsize)) {
441 dev_err(dev, "map_ahash_digest() failed\n");
442 cc_unmap_req(dev, state, ctx);
443 return -ENOMEM;
444 }
445
446 if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
447 flags)) {
448 dev_err(dev, "map_ahash_request_final() failed\n");
449 cc_unmap_result(dev, state, digestsize, result);
450 cc_unmap_req(dev, state, ctx);
451 return -ENOMEM;
452 }
453
454 /* Setup request structure */
455 cc_req.user_cb = cc_digest_complete;
456 cc_req.user_arg = req;
457
458 /* If HMAC then load hash IPAD xor key, if HASH then load initial
459 * digest
460 */
461 hw_desc_init(&desc[idx]);
462 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
463 if (is_hmac) {
464 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
465 ctx->inter_digestsize, NS_BIT);
466 } else {
467 set_din_sram(&desc[idx], larval_digest_addr,
468 ctx->inter_digestsize);
469 }
470 set_flow_mode(&desc[idx], S_DIN_to_HASH);
471 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
472 idx++;
473
474 /* Load the hash current length */
475 hw_desc_init(&desc[idx]);
476 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
477
478 if (is_hmac) {
479 set_din_type(&desc[idx], DMA_DLLI,
480 state->digest_bytes_len_dma_addr,
481 ctx->hash_len, NS_BIT);
482 } else {
483 set_din_const(&desc[idx], 0, ctx->hash_len);
484 if (nbytes)
485 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
486 else
487 set_cipher_do(&desc[idx], DO_PAD);
488 }
489 set_flow_mode(&desc[idx], S_DIN_to_HASH);
490 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
491 idx++;
492
493 cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
494
495 if (is_hmac) {
496 /* HW last hash block padding (aka. "DO_PAD") */
497 hw_desc_init(&desc[idx]);
498 set_cipher_mode(&desc[idx], ctx->hw_mode);
499 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
500 ctx->hash_len, NS_BIT, 0);
501 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
502 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
503 set_cipher_do(&desc[idx], DO_PAD);
504 idx++;
505
506 idx = cc_fin_hmac(desc, req, idx);
507 }
508
509 idx = cc_fin_result(desc, req, idx);
510
511 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
512 if (rc != -EINPROGRESS && rc != -EBUSY) {
513 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
514 cc_unmap_hash_request(dev, state, src, true);
515 cc_unmap_result(dev, state, digestsize, result);
516 cc_unmap_req(dev, state, ctx);
517 }
518 return rc;
519}
520
521static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx,
522 struct ahash_req_ctx *state, unsigned int idx)
523{
524 /* Restore hash digest */
525 hw_desc_init(&desc[idx]);
526 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
527 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
528 ctx->inter_digestsize, NS_BIT);
529 set_flow_mode(&desc[idx], S_DIN_to_HASH);
530 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
531 idx++;
532
533 /* Restore hash current length */
534 hw_desc_init(&desc[idx]);
535 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
536 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
537 set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
538 ctx->hash_len, NS_BIT);
539 set_flow_mode(&desc[idx], S_DIN_to_HASH);
540 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
541 idx++;
542
543 cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
544
545 return idx;
546}
547
548static int cc_hash_update(struct ahash_request *req)
549{
550 struct ahash_req_ctx *state = ahash_request_ctx(req);
551 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
552 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
553 unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
554 struct scatterlist *src = req->src;
555 unsigned int nbytes = req->nbytes;
556 struct device *dev = drvdata_to_dev(ctx->drvdata);
557 struct cc_crypto_req cc_req = {};
558 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
559 u32 idx = 0;
560 int rc;
561 gfp_t flags = cc_gfp_flags(&req->base);
562
563 dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
564 "hmac" : "hash", nbytes);
565
566 if (nbytes == 0) {
567 /* no real updates required */
568 return 0;
569 }
570
571 rc = cc_map_hash_request_update(ctx->drvdata, state, src, nbytes,
572 block_size, flags);
573 if (rc) {
574 if (rc == 1) {
575 dev_dbg(dev, " data size not require HW update %x\n",
576 nbytes);
577 /* No hardware updates are required */
578 return 0;
579 }
580 dev_err(dev, "map_ahash_request_update() failed\n");
581 return -ENOMEM;
582 }
583
584 if (cc_map_req(dev, state, ctx)) {
585 dev_err(dev, "map_ahash_source() failed\n");
586 cc_unmap_hash_request(dev, state, src, true);
587 return -EINVAL;
588 }
589
590 /* Setup request structure */
591 cc_req.user_cb = cc_update_complete;
592 cc_req.user_arg = req;
593
594 idx = cc_restore_hash(desc, ctx, state, idx);
595
596 /* store the hash digest result in context */
597 hw_desc_init(&desc[idx]);
598 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
599 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
600 ctx->inter_digestsize, NS_BIT, 0);
601 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
602 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
603 idx++;
604
605 /* store current hash length in context */
606 hw_desc_init(&desc[idx]);
607 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
608 set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
609 ctx->hash_len, NS_BIT, 1);
610 set_queue_last_ind(ctx->drvdata, &desc[idx]);
611 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
612 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
613 idx++;
614
615 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
616 if (rc != -EINPROGRESS && rc != -EBUSY) {
617 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
618 cc_unmap_hash_request(dev, state, src, true);
619 cc_unmap_req(dev, state, ctx);
620 }
621 return rc;
622}
623
624static int cc_do_finup(struct ahash_request *req, bool update)
625{
626 struct ahash_req_ctx *state = ahash_request_ctx(req);
627 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
628 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
629 u32 digestsize = crypto_ahash_digestsize(tfm);
630 struct scatterlist *src = req->src;
631 unsigned int nbytes = req->nbytes;
632 u8 *result = req->result;
633 struct device *dev = drvdata_to_dev(ctx->drvdata);
634 bool is_hmac = ctx->is_hmac;
635 struct cc_crypto_req cc_req = {};
636 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
637 unsigned int idx = 0;
638 int rc;
639 gfp_t flags = cc_gfp_flags(&req->base);
640
641 dev_dbg(dev, "===== %s-%s (%d) ====\n", is_hmac ? "hmac" : "hash",
642 update ? "finup" : "final", nbytes);
643
644 if (cc_map_req(dev, state, ctx)) {
645 dev_err(dev, "map_ahash_source() failed\n");
646 return -EINVAL;
647 }
648
649 if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, update,
650 flags)) {
651 dev_err(dev, "map_ahash_request_final() failed\n");
652 cc_unmap_req(dev, state, ctx);
653 return -ENOMEM;
654 }
655 if (cc_map_result(dev, state, digestsize)) {
656 dev_err(dev, "map_ahash_digest() failed\n");
657 cc_unmap_hash_request(dev, state, src, true);
658 cc_unmap_req(dev, state, ctx);
659 return -ENOMEM;
660 }
661
662 /* Setup request structure */
663 cc_req.user_cb = cc_hash_complete;
664 cc_req.user_arg = req;
665
666 idx = cc_restore_hash(desc, ctx, state, idx);
667
668 /* Pad the hash */
669 hw_desc_init(&desc[idx]);
670 set_cipher_do(&desc[idx], DO_PAD);
671 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
672 set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
673 ctx->hash_len, NS_BIT, 0);
674 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
675 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
676 idx++;
677
678 if (is_hmac)
679 idx = cc_fin_hmac(desc, req, idx);
680
681 idx = cc_fin_result(desc, req, idx);
682
683 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
684 if (rc != -EINPROGRESS && rc != -EBUSY) {
685 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
686 cc_unmap_hash_request(dev, state, src, true);
687 cc_unmap_result(dev, state, digestsize, result);
688 cc_unmap_req(dev, state, ctx);
689 }
690 return rc;
691}
692
693static int cc_hash_finup(struct ahash_request *req)
694{
695 return cc_do_finup(req, true);
696}
697
698
699static int cc_hash_final(struct ahash_request *req)
700{
701 return cc_do_finup(req, false);
702}
703
704static int cc_hash_init(struct ahash_request *req)
705{
706 struct ahash_req_ctx *state = ahash_request_ctx(req);
707 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
708 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
709 struct device *dev = drvdata_to_dev(ctx->drvdata);
710
711 dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
712
713 cc_init_req(dev, state, ctx);
714
715 return 0;
716}
717
718static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
719 unsigned int keylen)
720{
721 unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
722 struct cc_crypto_req cc_req = {};
723 struct cc_hash_ctx *ctx = NULL;
724 int blocksize = 0;
725 int digestsize = 0;
726 int i, idx = 0, rc = 0;
727 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
728 cc_sram_addr_t larval_addr;
729 struct device *dev;
730
731 ctx = crypto_ahash_ctx(ahash);
732 dev = drvdata_to_dev(ctx->drvdata);
733 dev_dbg(dev, "start keylen: %d", keylen);
734
735 blocksize = crypto_tfm_alg_blocksize(&ahash->base);
736 digestsize = crypto_ahash_digestsize(ahash);
737
738 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
739
740 /* The keylen value distinguishes HASH in case keylen is ZERO bytes,
741 * any NON-ZERO value utilizes HMAC flow
742 */
743 ctx->key_params.keylen = keylen;
744 ctx->key_params.key_dma_addr = 0;
745 ctx->is_hmac = true;
746 ctx->key_params.key = NULL;
747
748 if (keylen) {
749 ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
750 if (!ctx->key_params.key)
751 return -ENOMEM;
752
753 ctx->key_params.key_dma_addr =
754 dma_map_single(dev, (void *)ctx->key_params.key, keylen,
755 DMA_TO_DEVICE);
756 if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
757 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
758 ctx->key_params.key, keylen);
759 kzfree(ctx->key_params.key);
760 return -ENOMEM;
761 }
762 dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
763 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
764
765 if (keylen > blocksize) {
766 /* Load hash initial state */
767 hw_desc_init(&desc[idx]);
768 set_cipher_mode(&desc[idx], ctx->hw_mode);
769 set_din_sram(&desc[idx], larval_addr,
770 ctx->inter_digestsize);
771 set_flow_mode(&desc[idx], S_DIN_to_HASH);
772 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
773 idx++;
774
775 /* Load the hash current length*/
776 hw_desc_init(&desc[idx]);
777 set_cipher_mode(&desc[idx], ctx->hw_mode);
778 set_din_const(&desc[idx], 0, ctx->hash_len);
779 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
780 set_flow_mode(&desc[idx], S_DIN_to_HASH);
781 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
782 idx++;
783
784 hw_desc_init(&desc[idx]);
785 set_din_type(&desc[idx], DMA_DLLI,
786 ctx->key_params.key_dma_addr, keylen,
787 NS_BIT);
788 set_flow_mode(&desc[idx], DIN_HASH);
789 idx++;
790
791 /* Get hashed key */
792 hw_desc_init(&desc[idx]);
793 set_cipher_mode(&desc[idx], ctx->hw_mode);
794 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
795 digestsize, NS_BIT, 0);
796 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
797 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
798 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
799 cc_set_endianity(ctx->hash_mode, &desc[idx]);
800 idx++;
801
802 hw_desc_init(&desc[idx]);
803 set_din_const(&desc[idx], 0, (blocksize - digestsize));
804 set_flow_mode(&desc[idx], BYPASS);
805 set_dout_dlli(&desc[idx],
806 (ctx->opad_tmp_keys_dma_addr +
807 digestsize),
808 (blocksize - digestsize), NS_BIT, 0);
809 idx++;
810 } else {
811 hw_desc_init(&desc[idx]);
812 set_din_type(&desc[idx], DMA_DLLI,
813 ctx->key_params.key_dma_addr, keylen,
814 NS_BIT);
815 set_flow_mode(&desc[idx], BYPASS);
816 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
817 keylen, NS_BIT, 0);
818 idx++;
819
820 if ((blocksize - keylen)) {
821 hw_desc_init(&desc[idx]);
822 set_din_const(&desc[idx], 0,
823 (blocksize - keylen));
824 set_flow_mode(&desc[idx], BYPASS);
825 set_dout_dlli(&desc[idx],
826 (ctx->opad_tmp_keys_dma_addr +
827 keylen), (blocksize - keylen),
828 NS_BIT, 0);
829 idx++;
830 }
831 }
832 } else {
833 hw_desc_init(&desc[idx]);
834 set_din_const(&desc[idx], 0, blocksize);
835 set_flow_mode(&desc[idx], BYPASS);
836 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr),
837 blocksize, NS_BIT, 0);
838 idx++;
839 }
840
841 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
842 if (rc) {
843 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
844 goto out;
845 }
846
847 /* calc derived HMAC key */
848 for (idx = 0, i = 0; i < 2; i++) {
849 /* Load hash initial state */
850 hw_desc_init(&desc[idx]);
851 set_cipher_mode(&desc[idx], ctx->hw_mode);
852 set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize);
853 set_flow_mode(&desc[idx], S_DIN_to_HASH);
854 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
855 idx++;
856
857 /* Load the hash current length*/
858 hw_desc_init(&desc[idx]);
859 set_cipher_mode(&desc[idx], ctx->hw_mode);
860 set_din_const(&desc[idx], 0, ctx->hash_len);
861 set_flow_mode(&desc[idx], S_DIN_to_HASH);
862 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
863 idx++;
864
865 /* Prepare ipad key */
866 hw_desc_init(&desc[idx]);
867 set_xor_val(&desc[idx], hmac_pad_const[i]);
868 set_cipher_mode(&desc[idx], ctx->hw_mode);
869 set_flow_mode(&desc[idx], S_DIN_to_HASH);
870 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
871 idx++;
872
873 /* Perform HASH update */
874 hw_desc_init(&desc[idx]);
875 set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
876 blocksize, NS_BIT);
877 set_cipher_mode(&desc[idx], ctx->hw_mode);
878 set_xor_active(&desc[idx]);
879 set_flow_mode(&desc[idx], DIN_HASH);
880 idx++;
881
882 /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest
883 * of the first HASH "update" state)
884 */
885 hw_desc_init(&desc[idx]);
886 set_cipher_mode(&desc[idx], ctx->hw_mode);
887 if (i > 0) /* Not first iteration */
888 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
889 ctx->inter_digestsize, NS_BIT, 0);
890 else /* First iteration */
891 set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr,
892 ctx->inter_digestsize, NS_BIT, 0);
893 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
894 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
895 idx++;
896 }
897
898 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
899
900out:
901 if (rc)
902 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
903
904 if (ctx->key_params.key_dma_addr) {
905 dma_unmap_single(dev, ctx->key_params.key_dma_addr,
906 ctx->key_params.keylen, DMA_TO_DEVICE);
907 dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
908 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
909 }
910
911 kzfree(ctx->key_params.key);
912
913 return rc;
914}
915
916static int cc_xcbc_setkey(struct crypto_ahash *ahash,
917 const u8 *key, unsigned int keylen)
918{
919 struct cc_crypto_req cc_req = {};
920 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
921 struct device *dev = drvdata_to_dev(ctx->drvdata);
922 int rc = 0;
923 unsigned int idx = 0;
924 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
925
926 dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
927
928 switch (keylen) {
929 case AES_KEYSIZE_128:
930 case AES_KEYSIZE_192:
931 case AES_KEYSIZE_256:
932 break;
933 default:
934 return -EINVAL;
935 }
936
937 ctx->key_params.keylen = keylen;
938
939 ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
940 if (!ctx->key_params.key)
941 return -ENOMEM;
942
943 ctx->key_params.key_dma_addr =
944 dma_map_single(dev, ctx->key_params.key, keylen, DMA_TO_DEVICE);
945 if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
946 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
947 key, keylen);
948 kzfree(ctx->key_params.key);
949 return -ENOMEM;
950 }
951 dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
952 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
953
954 ctx->is_hmac = true;
955 /* 1. Load the AES key */
956 hw_desc_init(&desc[idx]);
957 set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr,
958 keylen, NS_BIT);
959 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
960 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
961 set_key_size_aes(&desc[idx], keylen);
962 set_flow_mode(&desc[idx], S_DIN_to_AES);
963 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
964 idx++;
965
966 hw_desc_init(&desc[idx]);
967 set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
968 set_flow_mode(&desc[idx], DIN_AES_DOUT);
969 set_dout_dlli(&desc[idx],
970 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
971 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
972 idx++;
973
974 hw_desc_init(&desc[idx]);
975 set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
976 set_flow_mode(&desc[idx], DIN_AES_DOUT);
977 set_dout_dlli(&desc[idx],
978 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
979 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
980 idx++;
981
982 hw_desc_init(&desc[idx]);
983 set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
984 set_flow_mode(&desc[idx], DIN_AES_DOUT);
985 set_dout_dlli(&desc[idx],
986 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
987 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
988 idx++;
989
990 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
991
992 if (rc)
993 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
994
995 dma_unmap_single(dev, ctx->key_params.key_dma_addr,
996 ctx->key_params.keylen, DMA_TO_DEVICE);
997 dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
998 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
999
1000 kzfree(ctx->key_params.key);
1001
1002 return rc;
1003}
1004
1005static int cc_cmac_setkey(struct crypto_ahash *ahash,
1006 const u8 *key, unsigned int keylen)
1007{
1008 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1009 struct device *dev = drvdata_to_dev(ctx->drvdata);
1010
1011 dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
1012
1013 ctx->is_hmac = true;
1014
1015 switch (keylen) {
1016 case AES_KEYSIZE_128:
1017 case AES_KEYSIZE_192:
1018 case AES_KEYSIZE_256:
1019 break;
1020 default:
1021 return -EINVAL;
1022 }
1023
1024 ctx->key_params.keylen = keylen;
1025
1026 /* STAT_PHASE_1: Copy key to ctx */
1027
1028 dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr,
1029 keylen, DMA_TO_DEVICE);
1030
1031 memcpy(ctx->opad_tmp_keys_buff, key, keylen);
1032 if (keylen == 24) {
1033 memset(ctx->opad_tmp_keys_buff + 24, 0,
1034 CC_AES_KEY_SIZE_MAX - 24);
1035 }
1036
1037 dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
1038 keylen, DMA_TO_DEVICE);
1039
1040 ctx->key_params.keylen = keylen;
1041
1042 return 0;
1043}
1044
1045static void cc_free_ctx(struct cc_hash_ctx *ctx)
1046{
1047 struct device *dev = drvdata_to_dev(ctx->drvdata);
1048
1049 if (ctx->digest_buff_dma_addr) {
1050 dma_unmap_single(dev, ctx->digest_buff_dma_addr,
1051 sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1052 dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
1053 &ctx->digest_buff_dma_addr);
1054 ctx->digest_buff_dma_addr = 0;
1055 }
1056 if (ctx->opad_tmp_keys_dma_addr) {
1057 dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
1058 sizeof(ctx->opad_tmp_keys_buff),
1059 DMA_BIDIRECTIONAL);
1060 dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
1061 &ctx->opad_tmp_keys_dma_addr);
1062 ctx->opad_tmp_keys_dma_addr = 0;
1063 }
1064
1065 ctx->key_params.keylen = 0;
1066}
1067
1068static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
1069{
1070 struct device *dev = drvdata_to_dev(ctx->drvdata);
1071
1072 ctx->key_params.keylen = 0;
1073
1074 ctx->digest_buff_dma_addr =
1075 dma_map_single(dev, (void *)ctx->digest_buff,
1076 sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1077 if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
1078 dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
1079 sizeof(ctx->digest_buff), ctx->digest_buff);
1080 goto fail;
1081 }
1082 dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
1083 sizeof(ctx->digest_buff), ctx->digest_buff,
1084 &ctx->digest_buff_dma_addr);
1085
1086 ctx->opad_tmp_keys_dma_addr =
1087 dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff,
1088 sizeof(ctx->opad_tmp_keys_buff),
1089 DMA_BIDIRECTIONAL);
1090 if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
1091 dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
1092 sizeof(ctx->opad_tmp_keys_buff),
1093 ctx->opad_tmp_keys_buff);
1094 goto fail;
1095 }
1096 dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
1097 sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
1098 &ctx->opad_tmp_keys_dma_addr);
1099
1100 ctx->is_hmac = false;
1101 return 0;
1102
1103fail:
1104 cc_free_ctx(ctx);
1105 return -ENOMEM;
1106}
1107
1108static int cc_get_hash_len(struct crypto_tfm *tfm)
1109{
1110 struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1111
1112 if (ctx->hash_mode == DRV_HASH_SM3)
1113 return CC_SM3_HASH_LEN_SIZE;
1114 else
1115 return cc_get_default_hash_len(ctx->drvdata);
1116}
1117
1118static int cc_cra_init(struct crypto_tfm *tfm)
1119{
1120 struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1121 struct hash_alg_common *hash_alg_common =
1122 container_of(tfm->__crt_alg, struct hash_alg_common, base);
1123 struct ahash_alg *ahash_alg =
1124 container_of(hash_alg_common, struct ahash_alg, halg);
1125 struct cc_hash_alg *cc_alg =
1126 container_of(ahash_alg, struct cc_hash_alg, ahash_alg);
1127
1128 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1129 sizeof(struct ahash_req_ctx));
1130
1131 ctx->hash_mode = cc_alg->hash_mode;
1132 ctx->hw_mode = cc_alg->hw_mode;
1133 ctx->inter_digestsize = cc_alg->inter_digestsize;
1134 ctx->drvdata = cc_alg->drvdata;
1135 ctx->hash_len = cc_get_hash_len(tfm);
1136 return cc_alloc_ctx(ctx);
1137}
1138
1139static void cc_cra_exit(struct crypto_tfm *tfm)
1140{
1141 struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1142 struct device *dev = drvdata_to_dev(ctx->drvdata);
1143
1144 dev_dbg(dev, "cc_cra_exit");
1145 cc_free_ctx(ctx);
1146}
1147
1148static int cc_mac_update(struct ahash_request *req)
1149{
1150 struct ahash_req_ctx *state = ahash_request_ctx(req);
1151 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1152 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1153 struct device *dev = drvdata_to_dev(ctx->drvdata);
1154 unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1155 struct cc_crypto_req cc_req = {};
1156 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1157 int rc;
1158 u32 idx = 0;
1159 gfp_t flags = cc_gfp_flags(&req->base);
1160
1161 if (req->nbytes == 0) {
1162 /* no real updates required */
1163 return 0;
1164 }
1165
1166 state->xcbc_count++;
1167
1168 rc = cc_map_hash_request_update(ctx->drvdata, state, req->src,
1169 req->nbytes, block_size, flags);
1170 if (rc) {
1171 if (rc == 1) {
1172 dev_dbg(dev, " data size not require HW update %x\n",
1173 req->nbytes);
1174 /* No hardware updates are required */
1175 return 0;
1176 }
1177 dev_err(dev, "map_ahash_request_update() failed\n");
1178 return -ENOMEM;
1179 }
1180
1181 if (cc_map_req(dev, state, ctx)) {
1182 dev_err(dev, "map_ahash_source() failed\n");
1183 return -EINVAL;
1184 }
1185
1186 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1187 cc_setup_xcbc(req, desc, &idx);
1188 else
1189 cc_setup_cmac(req, desc, &idx);
1190
1191 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
1192
1193 /* store the hash digest result in context */
1194 hw_desc_init(&desc[idx]);
1195 set_cipher_mode(&desc[idx], ctx->hw_mode);
1196 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1197 ctx->inter_digestsize, NS_BIT, 1);
1198 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1199 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1200 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1201 idx++;
1202
1203 /* Setup request structure */
1204 cc_req.user_cb = (void *)cc_update_complete;
1205 cc_req.user_arg = (void *)req;
1206
1207 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1208 if (rc != -EINPROGRESS && rc != -EBUSY) {
1209 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1210 cc_unmap_hash_request(dev, state, req->src, true);
1211 cc_unmap_req(dev, state, ctx);
1212 }
1213 return rc;
1214}
1215
1216static int cc_mac_final(struct ahash_request *req)
1217{
1218 struct ahash_req_ctx *state = ahash_request_ctx(req);
1219 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1220 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1221 struct device *dev = drvdata_to_dev(ctx->drvdata);
1222 struct cc_crypto_req cc_req = {};
1223 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1224 int idx = 0;
1225 int rc = 0;
1226 u32 key_size, key_len;
1227 u32 digestsize = crypto_ahash_digestsize(tfm);
1228 gfp_t flags = cc_gfp_flags(&req->base);
1229 u32 rem_cnt = *cc_hash_buf_cnt(state);
1230
1231 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1232 key_size = CC_AES_128_BIT_KEY_SIZE;
1233 key_len = CC_AES_128_BIT_KEY_SIZE;
1234 } else {
1235 key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
1236 ctx->key_params.keylen;
1237 key_len = ctx->key_params.keylen;
1238 }
1239
1240 dev_dbg(dev, "===== final xcbc reminder (%d) ====\n", rem_cnt);
1241
1242 if (cc_map_req(dev, state, ctx)) {
1243 dev_err(dev, "map_ahash_source() failed\n");
1244 return -EINVAL;
1245 }
1246
1247 if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1248 req->nbytes, 0, flags)) {
1249 dev_err(dev, "map_ahash_request_final() failed\n");
1250 cc_unmap_req(dev, state, ctx);
1251 return -ENOMEM;
1252 }
1253
1254 if (cc_map_result(dev, state, digestsize)) {
1255 dev_err(dev, "map_ahash_digest() failed\n");
1256 cc_unmap_hash_request(dev, state, req->src, true);
1257 cc_unmap_req(dev, state, ctx);
1258 return -ENOMEM;
1259 }
1260
1261 /* Setup request structure */
1262 cc_req.user_cb = (void *)cc_hash_complete;
1263 cc_req.user_arg = (void *)req;
1264
1265 if (state->xcbc_count && rem_cnt == 0) {
1266 /* Load key for ECB decryption */
1267 hw_desc_init(&desc[idx]);
1268 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1269 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
1270 set_din_type(&desc[idx], DMA_DLLI,
1271 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
1272 key_size, NS_BIT);
1273 set_key_size_aes(&desc[idx], key_len);
1274 set_flow_mode(&desc[idx], S_DIN_to_AES);
1275 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1276 idx++;
1277
1278 /* Initiate decryption of block state to previous
1279 * block_state-XOR-M[n]
1280 */
1281 hw_desc_init(&desc[idx]);
1282 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
1283 CC_AES_BLOCK_SIZE, NS_BIT);
1284 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1285 CC_AES_BLOCK_SIZE, NS_BIT, 0);
1286 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1287 idx++;
1288
1289 /* Memory Barrier: wait for axi write to complete */
1290 hw_desc_init(&desc[idx]);
1291 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1292 set_dout_no_dma(&desc[idx], 0, 0, 1);
1293 idx++;
1294 }
1295
1296 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1297 cc_setup_xcbc(req, desc, &idx);
1298 else
1299 cc_setup_cmac(req, desc, &idx);
1300
1301 if (state->xcbc_count == 0) {
1302 hw_desc_init(&desc[idx]);
1303 set_cipher_mode(&desc[idx], ctx->hw_mode);
1304 set_key_size_aes(&desc[idx], key_len);
1305 set_cmac_size0_mode(&desc[idx]);
1306 set_flow_mode(&desc[idx], S_DIN_to_AES);
1307 idx++;
1308 } else if (rem_cnt > 0) {
1309 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1310 } else {
1311 hw_desc_init(&desc[idx]);
1312 set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
1313 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1314 idx++;
1315 }
1316
1317 /* Get final MAC result */
1318 hw_desc_init(&desc[idx]);
1319 /* TODO */
1320 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1321 digestsize, NS_BIT, 1);
1322 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1323 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1324 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1325 set_cipher_mode(&desc[idx], ctx->hw_mode);
1326 idx++;
1327
1328 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1329 if (rc != -EINPROGRESS && rc != -EBUSY) {
1330 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1331 cc_unmap_hash_request(dev, state, req->src, true);
1332 cc_unmap_result(dev, state, digestsize, req->result);
1333 cc_unmap_req(dev, state, ctx);
1334 }
1335 return rc;
1336}
1337
1338static int cc_mac_finup(struct ahash_request *req)
1339{
1340 struct ahash_req_ctx *state = ahash_request_ctx(req);
1341 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1342 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1343 struct device *dev = drvdata_to_dev(ctx->drvdata);
1344 struct cc_crypto_req cc_req = {};
1345 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1346 int idx = 0;
1347 int rc = 0;
1348 u32 key_len = 0;
1349 u32 digestsize = crypto_ahash_digestsize(tfm);
1350 gfp_t flags = cc_gfp_flags(&req->base);
1351
1352 dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
1353 if (state->xcbc_count > 0 && req->nbytes == 0) {
1354 dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
1355 return cc_mac_final(req);
1356 }
1357
1358 if (cc_map_req(dev, state, ctx)) {
1359 dev_err(dev, "map_ahash_source() failed\n");
1360 return -EINVAL;
1361 }
1362
1363 if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1364 req->nbytes, 1, flags)) {
1365 dev_err(dev, "map_ahash_request_final() failed\n");
1366 cc_unmap_req(dev, state, ctx);
1367 return -ENOMEM;
1368 }
1369 if (cc_map_result(dev, state, digestsize)) {
1370 dev_err(dev, "map_ahash_digest() failed\n");
1371 cc_unmap_hash_request(dev, state, req->src, true);
1372 cc_unmap_req(dev, state, ctx);
1373 return -ENOMEM;
1374 }
1375
1376 /* Setup request structure */
1377 cc_req.user_cb = (void *)cc_hash_complete;
1378 cc_req.user_arg = (void *)req;
1379
1380 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1381 key_len = CC_AES_128_BIT_KEY_SIZE;
1382 cc_setup_xcbc(req, desc, &idx);
1383 } else {
1384 key_len = ctx->key_params.keylen;
1385 cc_setup_cmac(req, desc, &idx);
1386 }
1387
1388 if (req->nbytes == 0) {
1389 hw_desc_init(&desc[idx]);
1390 set_cipher_mode(&desc[idx], ctx->hw_mode);
1391 set_key_size_aes(&desc[idx], key_len);
1392 set_cmac_size0_mode(&desc[idx]);
1393 set_flow_mode(&desc[idx], S_DIN_to_AES);
1394 idx++;
1395 } else {
1396 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1397 }
1398
1399 /* Get final MAC result */
1400 hw_desc_init(&desc[idx]);
1401 /* TODO */
1402 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1403 digestsize, NS_BIT, 1);
1404 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1405 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1406 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1407 set_cipher_mode(&desc[idx], ctx->hw_mode);
1408 idx++;
1409
1410 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1411 if (rc != -EINPROGRESS && rc != -EBUSY) {
1412 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1413 cc_unmap_hash_request(dev, state, req->src, true);
1414 cc_unmap_result(dev, state, digestsize, req->result);
1415 cc_unmap_req(dev, state, ctx);
1416 }
1417 return rc;
1418}
1419
1420static int cc_mac_digest(struct ahash_request *req)
1421{
1422 struct ahash_req_ctx *state = ahash_request_ctx(req);
1423 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1424 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1425 struct device *dev = drvdata_to_dev(ctx->drvdata);
1426 u32 digestsize = crypto_ahash_digestsize(tfm);
1427 struct cc_crypto_req cc_req = {};
1428 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1429 u32 key_len;
1430 unsigned int idx = 0;
1431 int rc;
1432 gfp_t flags = cc_gfp_flags(&req->base);
1433
1434 dev_dbg(dev, "===== -digest mac (%d) ====\n", req->nbytes);
1435
1436 cc_init_req(dev, state, ctx);
1437
1438 if (cc_map_req(dev, state, ctx)) {
1439 dev_err(dev, "map_ahash_source() failed\n");
1440 return -ENOMEM;
1441 }
1442 if (cc_map_result(dev, state, digestsize)) {
1443 dev_err(dev, "map_ahash_digest() failed\n");
1444 cc_unmap_req(dev, state, ctx);
1445 return -ENOMEM;
1446 }
1447
1448 if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1449 req->nbytes, 1, flags)) {
1450 dev_err(dev, "map_ahash_request_final() failed\n");
1451 cc_unmap_req(dev, state, ctx);
1452 return -ENOMEM;
1453 }
1454
1455 /* Setup request structure */
1456 cc_req.user_cb = (void *)cc_digest_complete;
1457 cc_req.user_arg = (void *)req;
1458
1459 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1460 key_len = CC_AES_128_BIT_KEY_SIZE;
1461 cc_setup_xcbc(req, desc, &idx);
1462 } else {
1463 key_len = ctx->key_params.keylen;
1464 cc_setup_cmac(req, desc, &idx);
1465 }
1466
1467 if (req->nbytes == 0) {
1468 hw_desc_init(&desc[idx]);
1469 set_cipher_mode(&desc[idx], ctx->hw_mode);
1470 set_key_size_aes(&desc[idx], key_len);
1471 set_cmac_size0_mode(&desc[idx]);
1472 set_flow_mode(&desc[idx], S_DIN_to_AES);
1473 idx++;
1474 } else {
1475 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1476 }
1477
1478 /* Get final MAC result */
1479 hw_desc_init(&desc[idx]);
1480 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1481 CC_AES_BLOCK_SIZE, NS_BIT, 1);
1482 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1483 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1484 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1485 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1486 set_cipher_mode(&desc[idx], ctx->hw_mode);
1487 idx++;
1488
1489 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1490 if (rc != -EINPROGRESS && rc != -EBUSY) {
1491 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1492 cc_unmap_hash_request(dev, state, req->src, true);
1493 cc_unmap_result(dev, state, digestsize, req->result);
1494 cc_unmap_req(dev, state, ctx);
1495 }
1496 return rc;
1497}
1498
1499static int cc_hash_export(struct ahash_request *req, void *out)
1500{
1501 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1502 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1503 struct ahash_req_ctx *state = ahash_request_ctx(req);
1504 u8 *curr_buff = cc_hash_buf(state);
1505 u32 curr_buff_cnt = *cc_hash_buf_cnt(state);
1506 const u32 tmp = CC_EXPORT_MAGIC;
1507
1508 memcpy(out, &tmp, sizeof(u32));
1509 out += sizeof(u32);
1510
1511 memcpy(out, state->digest_buff, ctx->inter_digestsize);
1512 out += ctx->inter_digestsize;
1513
1514 memcpy(out, state->digest_bytes_len, ctx->hash_len);
1515 out += ctx->hash_len;
1516
1517 memcpy(out, &curr_buff_cnt, sizeof(u32));
1518 out += sizeof(u32);
1519
1520 memcpy(out, curr_buff, curr_buff_cnt);
1521
1522 return 0;
1523}
1524
1525static int cc_hash_import(struct ahash_request *req, const void *in)
1526{
1527 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1528 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1529 struct device *dev = drvdata_to_dev(ctx->drvdata);
1530 struct ahash_req_ctx *state = ahash_request_ctx(req);
1531 u32 tmp;
1532
1533 memcpy(&tmp, in, sizeof(u32));
1534 if (tmp != CC_EXPORT_MAGIC)
1535 return -EINVAL;
1536 in += sizeof(u32);
1537
1538 cc_init_req(dev, state, ctx);
1539
1540 memcpy(state->digest_buff, in, ctx->inter_digestsize);
1541 in += ctx->inter_digestsize;
1542
1543 memcpy(state->digest_bytes_len, in, ctx->hash_len);
1544 in += ctx->hash_len;
1545
1546 /* Sanity check the data as much as possible */
1547 memcpy(&tmp, in, sizeof(u32));
1548 if (tmp > CC_MAX_HASH_BLCK_SIZE)
1549 return -EINVAL;
1550 in += sizeof(u32);
1551
1552 state->buf_cnt[0] = tmp;
1553 memcpy(state->buffers[0], in, tmp);
1554
1555 return 0;
1556}
1557
1558struct cc_hash_template {
1559 char name[CRYPTO_MAX_ALG_NAME];
1560 char driver_name[CRYPTO_MAX_ALG_NAME];
1561 char mac_name[CRYPTO_MAX_ALG_NAME];
1562 char mac_driver_name[CRYPTO_MAX_ALG_NAME];
1563 unsigned int blocksize;
1564 bool is_mac;
1565 bool synchronize;
1566 struct ahash_alg template_ahash;
1567 int hash_mode;
1568 int hw_mode;
1569 int inter_digestsize;
1570 struct cc_drvdata *drvdata;
1571 u32 min_hw_rev;
1572 enum cc_std_body std_body;
1573};
1574
1575#define CC_STATE_SIZE(_x) \
1576 ((_x) + HASH_MAX_LEN_SIZE + CC_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
1577
1578/* hash descriptors */
1579static struct cc_hash_template driver_hash[] = {
1580 //Asynchronize hash template
1581 {
1582 .name = "sha1",
1583 .driver_name = "sha1-ccree",
1584 .mac_name = "hmac(sha1)",
1585 .mac_driver_name = "hmac-sha1-ccree",
1586 .blocksize = SHA1_BLOCK_SIZE,
1587 .is_mac = true,
1588 .synchronize = false,
1589 .template_ahash = {
1590 .init = cc_hash_init,
1591 .update = cc_hash_update,
1592 .final = cc_hash_final,
1593 .finup = cc_hash_finup,
1594 .digest = cc_hash_digest,
1595 .export = cc_hash_export,
1596 .import = cc_hash_import,
1597 .setkey = cc_hash_setkey,
1598 .halg = {
1599 .digestsize = SHA1_DIGEST_SIZE,
1600 .statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
1601 },
1602 },
1603 .hash_mode = DRV_HASH_SHA1,
1604 .hw_mode = DRV_HASH_HW_SHA1,
1605 .inter_digestsize = SHA1_DIGEST_SIZE,
1606 .min_hw_rev = CC_HW_REV_630,
1607 .std_body = CC_STD_NIST,
1608 },
1609 {
1610 .name = "sha256",
1611 .driver_name = "sha256-ccree",
1612 .mac_name = "hmac(sha256)",
1613 .mac_driver_name = "hmac-sha256-ccree",
1614 .blocksize = SHA256_BLOCK_SIZE,
1615 .is_mac = true,
1616 .template_ahash = {
1617 .init = cc_hash_init,
1618 .update = cc_hash_update,
1619 .final = cc_hash_final,
1620 .finup = cc_hash_finup,
1621 .digest = cc_hash_digest,
1622 .export = cc_hash_export,
1623 .import = cc_hash_import,
1624 .setkey = cc_hash_setkey,
1625 .halg = {
1626 .digestsize = SHA256_DIGEST_SIZE,
1627 .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
1628 },
1629 },
1630 .hash_mode = DRV_HASH_SHA256,
1631 .hw_mode = DRV_HASH_HW_SHA256,
1632 .inter_digestsize = SHA256_DIGEST_SIZE,
1633 .min_hw_rev = CC_HW_REV_630,
1634 .std_body = CC_STD_NIST,
1635 },
1636 {
1637 .name = "sha224",
1638 .driver_name = "sha224-ccree",
1639 .mac_name = "hmac(sha224)",
1640 .mac_driver_name = "hmac-sha224-ccree",
1641 .blocksize = SHA224_BLOCK_SIZE,
1642 .is_mac = true,
1643 .template_ahash = {
1644 .init = cc_hash_init,
1645 .update = cc_hash_update,
1646 .final = cc_hash_final,
1647 .finup = cc_hash_finup,
1648 .digest = cc_hash_digest,
1649 .export = cc_hash_export,
1650 .import = cc_hash_import,
1651 .setkey = cc_hash_setkey,
1652 .halg = {
1653 .digestsize = SHA224_DIGEST_SIZE,
1654 .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE),
1655 },
1656 },
1657 .hash_mode = DRV_HASH_SHA224,
1658 .hw_mode = DRV_HASH_HW_SHA256,
1659 .inter_digestsize = SHA256_DIGEST_SIZE,
1660 .min_hw_rev = CC_HW_REV_630,
1661 .std_body = CC_STD_NIST,
1662 },
1663 {
1664 .name = "sha384",
1665 .driver_name = "sha384-ccree",
1666 .mac_name = "hmac(sha384)",
1667 .mac_driver_name = "hmac-sha384-ccree",
1668 .blocksize = SHA384_BLOCK_SIZE,
1669 .is_mac = true,
1670 .template_ahash = {
1671 .init = cc_hash_init,
1672 .update = cc_hash_update,
1673 .final = cc_hash_final,
1674 .finup = cc_hash_finup,
1675 .digest = cc_hash_digest,
1676 .export = cc_hash_export,
1677 .import = cc_hash_import,
1678 .setkey = cc_hash_setkey,
1679 .halg = {
1680 .digestsize = SHA384_DIGEST_SIZE,
1681 .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1682 },
1683 },
1684 .hash_mode = DRV_HASH_SHA384,
1685 .hw_mode = DRV_HASH_HW_SHA512,
1686 .inter_digestsize = SHA512_DIGEST_SIZE,
1687 .min_hw_rev = CC_HW_REV_712,
1688 .std_body = CC_STD_NIST,
1689 },
1690 {
1691 .name = "sha512",
1692 .driver_name = "sha512-ccree",
1693 .mac_name = "hmac(sha512)",
1694 .mac_driver_name = "hmac-sha512-ccree",
1695 .blocksize = SHA512_BLOCK_SIZE,
1696 .is_mac = true,
1697 .template_ahash = {
1698 .init = cc_hash_init,
1699 .update = cc_hash_update,
1700 .final = cc_hash_final,
1701 .finup = cc_hash_finup,
1702 .digest = cc_hash_digest,
1703 .export = cc_hash_export,
1704 .import = cc_hash_import,
1705 .setkey = cc_hash_setkey,
1706 .halg = {
1707 .digestsize = SHA512_DIGEST_SIZE,
1708 .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1709 },
1710 },
1711 .hash_mode = DRV_HASH_SHA512,
1712 .hw_mode = DRV_HASH_HW_SHA512,
1713 .inter_digestsize = SHA512_DIGEST_SIZE,
1714 .min_hw_rev = CC_HW_REV_712,
1715 .std_body = CC_STD_NIST,
1716 },
1717 {
1718 .name = "md5",
1719 .driver_name = "md5-ccree",
1720 .mac_name = "hmac(md5)",
1721 .mac_driver_name = "hmac-md5-ccree",
1722 .blocksize = MD5_HMAC_BLOCK_SIZE,
1723 .is_mac = true,
1724 .template_ahash = {
1725 .init = cc_hash_init,
1726 .update = cc_hash_update,
1727 .final = cc_hash_final,
1728 .finup = cc_hash_finup,
1729 .digest = cc_hash_digest,
1730 .export = cc_hash_export,
1731 .import = cc_hash_import,
1732 .setkey = cc_hash_setkey,
1733 .halg = {
1734 .digestsize = MD5_DIGEST_SIZE,
1735 .statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
1736 },
1737 },
1738 .hash_mode = DRV_HASH_MD5,
1739 .hw_mode = DRV_HASH_HW_MD5,
1740 .inter_digestsize = MD5_DIGEST_SIZE,
1741 .min_hw_rev = CC_HW_REV_630,
1742 .std_body = CC_STD_NIST,
1743 },
1744 {
1745 .name = "sm3",
1746 .driver_name = "sm3-ccree",
1747 .blocksize = SM3_BLOCK_SIZE,
1748 .is_mac = false,
1749 .template_ahash = {
1750 .init = cc_hash_init,
1751 .update = cc_hash_update,
1752 .final = cc_hash_final,
1753 .finup = cc_hash_finup,
1754 .digest = cc_hash_digest,
1755 .export = cc_hash_export,
1756 .import = cc_hash_import,
1757 .setkey = cc_hash_setkey,
1758 .halg = {
1759 .digestsize = SM3_DIGEST_SIZE,
1760 .statesize = CC_STATE_SIZE(SM3_DIGEST_SIZE),
1761 },
1762 },
1763 .hash_mode = DRV_HASH_SM3,
1764 .hw_mode = DRV_HASH_HW_SM3,
1765 .inter_digestsize = SM3_DIGEST_SIZE,
1766 .min_hw_rev = CC_HW_REV_713,
1767 .std_body = CC_STD_OSCCA,
1768 },
1769 {
1770 .mac_name = "xcbc(aes)",
1771 .mac_driver_name = "xcbc-aes-ccree",
1772 .blocksize = AES_BLOCK_SIZE,
1773 .is_mac = true,
1774 .template_ahash = {
1775 .init = cc_hash_init,
1776 .update = cc_mac_update,
1777 .final = cc_mac_final,
1778 .finup = cc_mac_finup,
1779 .digest = cc_mac_digest,
1780 .setkey = cc_xcbc_setkey,
1781 .export = cc_hash_export,
1782 .import = cc_hash_import,
1783 .halg = {
1784 .digestsize = AES_BLOCK_SIZE,
1785 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1786 },
1787 },
1788 .hash_mode = DRV_HASH_NULL,
1789 .hw_mode = DRV_CIPHER_XCBC_MAC,
1790 .inter_digestsize = AES_BLOCK_SIZE,
1791 .min_hw_rev = CC_HW_REV_630,
1792 .std_body = CC_STD_NIST,
1793 },
1794 {
1795 .mac_name = "cmac(aes)",
1796 .mac_driver_name = "cmac-aes-ccree",
1797 .blocksize = AES_BLOCK_SIZE,
1798 .is_mac = true,
1799 .template_ahash = {
1800 .init = cc_hash_init,
1801 .update = cc_mac_update,
1802 .final = cc_mac_final,
1803 .finup = cc_mac_finup,
1804 .digest = cc_mac_digest,
1805 .setkey = cc_cmac_setkey,
1806 .export = cc_hash_export,
1807 .import = cc_hash_import,
1808 .halg = {
1809 .digestsize = AES_BLOCK_SIZE,
1810 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1811 },
1812 },
1813 .hash_mode = DRV_HASH_NULL,
1814 .hw_mode = DRV_CIPHER_CMAC,
1815 .inter_digestsize = AES_BLOCK_SIZE,
1816 .min_hw_rev = CC_HW_REV_630,
1817 .std_body = CC_STD_NIST,
1818 },
1819};
1820
1821static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
1822 struct device *dev, bool keyed)
1823{
1824 struct cc_hash_alg *t_crypto_alg;
1825 struct crypto_alg *alg;
1826 struct ahash_alg *halg;
1827
1828 t_crypto_alg = kzalloc(sizeof(*t_crypto_alg), GFP_KERNEL);
1829 if (!t_crypto_alg)
1830 return ERR_PTR(-ENOMEM);
1831
1832 t_crypto_alg->ahash_alg = template->template_ahash;
1833 halg = &t_crypto_alg->ahash_alg;
1834 alg = &halg->halg.base;
1835
1836 if (keyed) {
1837 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1838 template->mac_name);
1839 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1840 template->mac_driver_name);
1841 } else {
1842 halg->setkey = NULL;
1843 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1844 template->name);
1845 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1846 template->driver_name);
1847 }
1848 alg->cra_module = THIS_MODULE;
1849 alg->cra_ctxsize = sizeof(struct cc_hash_ctx);
1850 alg->cra_priority = CC_CRA_PRIO;
1851 alg->cra_blocksize = template->blocksize;
1852 alg->cra_alignmask = 0;
1853 alg->cra_exit = cc_cra_exit;
1854
1855 alg->cra_init = cc_cra_init;
1856 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
1857
1858 t_crypto_alg->hash_mode = template->hash_mode;
1859 t_crypto_alg->hw_mode = template->hw_mode;
1860 t_crypto_alg->inter_digestsize = template->inter_digestsize;
1861
1862 return t_crypto_alg;
1863}
1864
1865int cc_init_hash_sram(struct cc_drvdata *drvdata)
1866{
1867 struct cc_hash_handle *hash_handle = drvdata->hash_handle;
1868 cc_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
1869 unsigned int larval_seq_len = 0;
1870 struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
1871 bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712);
1872 bool sm3_supported = (drvdata->hw_rev >= CC_HW_REV_713);
1873 int rc = 0;
1874
1875 /* Copy-to-sram digest-len */
1876 cc_set_sram_desc(digest_len_init, sram_buff_ofs,
1877 ARRAY_SIZE(digest_len_init), larval_seq,
1878 &larval_seq_len);
1879 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1880 if (rc)
1881 goto init_digest_const_err;
1882
1883 sram_buff_ofs += sizeof(digest_len_init);
1884 larval_seq_len = 0;
1885
1886 if (large_sha_supported) {
1887 /* Copy-to-sram digest-len for sha384/512 */
1888 cc_set_sram_desc(digest_len_sha512_init, sram_buff_ofs,
1889 ARRAY_SIZE(digest_len_sha512_init),
1890 larval_seq, &larval_seq_len);
1891 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1892 if (rc)
1893 goto init_digest_const_err;
1894
1895 sram_buff_ofs += sizeof(digest_len_sha512_init);
1896 larval_seq_len = 0;
1897 }
1898
1899 /* The initial digests offset */
1900 hash_handle->larval_digest_sram_addr = sram_buff_ofs;
1901
1902 /* Copy-to-sram initial SHA* digests */
1903 cc_set_sram_desc(md5_init, sram_buff_ofs, ARRAY_SIZE(md5_init),
1904 larval_seq, &larval_seq_len);
1905 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1906 if (rc)
1907 goto init_digest_const_err;
1908 sram_buff_ofs += sizeof(md5_init);
1909 larval_seq_len = 0;
1910
1911 cc_set_sram_desc(sha1_init, sram_buff_ofs,
1912 ARRAY_SIZE(sha1_init), larval_seq,
1913 &larval_seq_len);
1914 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1915 if (rc)
1916 goto init_digest_const_err;
1917 sram_buff_ofs += sizeof(sha1_init);
1918 larval_seq_len = 0;
1919
1920 cc_set_sram_desc(sha224_init, sram_buff_ofs,
1921 ARRAY_SIZE(sha224_init), larval_seq,
1922 &larval_seq_len);
1923 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1924 if (rc)
1925 goto init_digest_const_err;
1926 sram_buff_ofs += sizeof(sha224_init);
1927 larval_seq_len = 0;
1928
1929 cc_set_sram_desc(sha256_init, sram_buff_ofs,
1930 ARRAY_SIZE(sha256_init), larval_seq,
1931 &larval_seq_len);
1932 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1933 if (rc)
1934 goto init_digest_const_err;
1935 sram_buff_ofs += sizeof(sha256_init);
1936 larval_seq_len = 0;
1937
1938 if (sm3_supported) {
1939 cc_set_sram_desc(sm3_init, sram_buff_ofs,
1940 ARRAY_SIZE(sm3_init), larval_seq,
1941 &larval_seq_len);
1942 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1943 if (rc)
1944 goto init_digest_const_err;
1945 sram_buff_ofs += sizeof(sm3_init);
1946 larval_seq_len = 0;
1947 }
1948
1949 if (large_sha_supported) {
1950 cc_set_sram_desc((u32 *)sha384_init, sram_buff_ofs,
1951 (ARRAY_SIZE(sha384_init) * 2), larval_seq,
1952 &larval_seq_len);
1953 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1954 if (rc)
1955 goto init_digest_const_err;
1956 sram_buff_ofs += sizeof(sha384_init);
1957 larval_seq_len = 0;
1958
1959 cc_set_sram_desc((u32 *)sha512_init, sram_buff_ofs,
1960 (ARRAY_SIZE(sha512_init) * 2), larval_seq,
1961 &larval_seq_len);
1962 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1963 if (rc)
1964 goto init_digest_const_err;
1965 }
1966
1967init_digest_const_err:
1968 return rc;
1969}
1970
1971static void __init cc_swap_dwords(u32 *buf, unsigned long size)
1972{
1973 int i;
1974 u32 tmp;
1975
1976 for (i = 0; i < size; i += 2) {
1977 tmp = buf[i];
1978 buf[i] = buf[i + 1];
1979 buf[i + 1] = tmp;
1980 }
1981}
1982
1983/*
1984 * Due to the way the HW works we need to swap every
1985 * double word in the SHA384 and SHA512 larval hashes
1986 */
1987void __init cc_hash_global_init(void)
1988{
1989 cc_swap_dwords((u32 *)&sha384_init, (ARRAY_SIZE(sha384_init) * 2));
1990 cc_swap_dwords((u32 *)&sha512_init, (ARRAY_SIZE(sha512_init) * 2));
1991}
1992
1993int cc_hash_alloc(struct cc_drvdata *drvdata)
1994{
1995 struct cc_hash_handle *hash_handle;
1996 cc_sram_addr_t sram_buff;
1997 u32 sram_size_to_alloc;
1998 struct device *dev = drvdata_to_dev(drvdata);
1999 int rc = 0;
2000 int alg;
2001
2002 hash_handle = kzalloc(sizeof(*hash_handle), GFP_KERNEL);
2003 if (!hash_handle)
2004 return -ENOMEM;
2005
2006 INIT_LIST_HEAD(&hash_handle->hash_list);
2007 drvdata->hash_handle = hash_handle;
2008
2009 sram_size_to_alloc = sizeof(digest_len_init) +
2010 sizeof(md5_init) +
2011 sizeof(sha1_init) +
2012 sizeof(sha224_init) +
2013 sizeof(sha256_init);
2014
2015 if (drvdata->hw_rev >= CC_HW_REV_713)
2016 sram_size_to_alloc += sizeof(sm3_init);
2017
2018 if (drvdata->hw_rev >= CC_HW_REV_712)
2019 sram_size_to_alloc += sizeof(digest_len_sha512_init) +
2020 sizeof(sha384_init) + sizeof(sha512_init);
2021
2022 sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
2023 if (sram_buff == NULL_SRAM_ADDR) {
2024 dev_err(dev, "SRAM pool exhausted\n");
2025 rc = -ENOMEM;
2026 goto fail;
2027 }
2028
2029 /* The initial digest-len offset */
2030 hash_handle->digest_len_sram_addr = sram_buff;
2031
2032 /*must be set before the alg registration as it is being used there*/
2033 rc = cc_init_hash_sram(drvdata);
2034 if (rc) {
2035 dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
2036 goto fail;
2037 }
2038
2039 /* ahash registration */
2040 for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
2041 struct cc_hash_alg *t_alg;
2042 int hw_mode = driver_hash[alg].hw_mode;
2043
2044 /* Check that the HW revision and variants are suitable */
2045 if ((driver_hash[alg].min_hw_rev > drvdata->hw_rev) ||
2046 !(drvdata->std_bodies & driver_hash[alg].std_body))
2047 continue;
2048
2049 if (driver_hash[alg].is_mac) {
2050 /* register hmac version */
2051 t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true);
2052 if (IS_ERR(t_alg)) {
2053 rc = PTR_ERR(t_alg);
2054 dev_err(dev, "%s alg allocation failed\n",
2055 driver_hash[alg].driver_name);
2056 goto fail;
2057 }
2058 t_alg->drvdata = drvdata;
2059
2060 rc = crypto_register_ahash(&t_alg->ahash_alg);
2061 if (rc) {
2062 dev_err(dev, "%s alg registration failed\n",
2063 driver_hash[alg].driver_name);
2064 kfree(t_alg);
2065 goto fail;
2066 } else {
2067 list_add_tail(&t_alg->entry,
2068 &hash_handle->hash_list);
2069 }
2070 }
2071 if (hw_mode == DRV_CIPHER_XCBC_MAC ||
2072 hw_mode == DRV_CIPHER_CMAC)
2073 continue;
2074
2075 /* register hash version */
2076 t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, false);
2077 if (IS_ERR(t_alg)) {
2078 rc = PTR_ERR(t_alg);
2079 dev_err(dev, "%s alg allocation failed\n",
2080 driver_hash[alg].driver_name);
2081 goto fail;
2082 }
2083 t_alg->drvdata = drvdata;
2084
2085 rc = crypto_register_ahash(&t_alg->ahash_alg);
2086 if (rc) {
2087 dev_err(dev, "%s alg registration failed\n",
2088 driver_hash[alg].driver_name);
2089 kfree(t_alg);
2090 goto fail;
2091 } else {
2092 list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2093 }
2094 }
2095
2096 return 0;
2097
2098fail:
2099 kfree(drvdata->hash_handle);
2100 drvdata->hash_handle = NULL;
2101 return rc;
2102}
2103
2104int cc_hash_free(struct cc_drvdata *drvdata)
2105{
2106 struct cc_hash_alg *t_hash_alg, *hash_n;
2107 struct cc_hash_handle *hash_handle = drvdata->hash_handle;
2108
2109 if (hash_handle) {
2110 list_for_each_entry_safe(t_hash_alg, hash_n,
2111 &hash_handle->hash_list, entry) {
2112 crypto_unregister_ahash(&t_hash_alg->ahash_alg);
2113 list_del(&t_hash_alg->entry);
2114 kfree(t_hash_alg);
2115 }
2116
2117 kfree(hash_handle);
2118 drvdata->hash_handle = NULL;
2119 }
2120 return 0;
2121}
2122
2123static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
2124 unsigned int *seq_size)
2125{
2126 unsigned int idx = *seq_size;
2127 struct ahash_req_ctx *state = ahash_request_ctx(areq);
2128 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2129 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2130
2131 /* Setup XCBC MAC K1 */
2132 hw_desc_init(&desc[idx]);
2133 set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2134 XCBC_MAC_K1_OFFSET),
2135 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2136 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2137 set_hash_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC, ctx->hash_mode);
2138 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2139 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2140 set_flow_mode(&desc[idx], S_DIN_to_AES);
2141 idx++;
2142
2143 /* Setup XCBC MAC K2 */
2144 hw_desc_init(&desc[idx]);
2145 set_din_type(&desc[idx], DMA_DLLI,
2146 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
2147 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2148 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
2149 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2150 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2151 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2152 set_flow_mode(&desc[idx], S_DIN_to_AES);
2153 idx++;
2154
2155 /* Setup XCBC MAC K3 */
2156 hw_desc_init(&desc[idx]);
2157 set_din_type(&desc[idx], DMA_DLLI,
2158 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
2159 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2160 set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
2161 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2162 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2163 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2164 set_flow_mode(&desc[idx], S_DIN_to_AES);
2165 idx++;
2166
2167 /* Loading MAC state */
2168 hw_desc_init(&desc[idx]);
2169 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2170 CC_AES_BLOCK_SIZE, NS_BIT);
2171 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2172 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2173 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2174 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2175 set_flow_mode(&desc[idx], S_DIN_to_AES);
2176 idx++;
2177 *seq_size = idx;
2178}
2179
2180static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
2181 unsigned int *seq_size)
2182{
2183 unsigned int idx = *seq_size;
2184 struct ahash_req_ctx *state = ahash_request_ctx(areq);
2185 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2186 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2187
2188 /* Setup CMAC Key */
2189 hw_desc_init(&desc[idx]);
2190 set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
2191 ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
2192 ctx->key_params.keylen), NS_BIT);
2193 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2194 set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2195 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2196 set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2197 set_flow_mode(&desc[idx], S_DIN_to_AES);
2198 idx++;
2199
2200 /* Load MAC state */
2201 hw_desc_init(&desc[idx]);
2202 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2203 CC_AES_BLOCK_SIZE, NS_BIT);
2204 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2205 set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2206 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2207 set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2208 set_flow_mode(&desc[idx], S_DIN_to_AES);
2209 idx++;
2210 *seq_size = idx;
2211}
2212
2213static void cc_set_desc(struct ahash_req_ctx *areq_ctx,
2214 struct cc_hash_ctx *ctx, unsigned int flow_mode,
2215 struct cc_hw_desc desc[], bool is_not_last_data,
2216 unsigned int *seq_size)
2217{
2218 unsigned int idx = *seq_size;
2219 struct device *dev = drvdata_to_dev(ctx->drvdata);
2220
2221 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_DLLI) {
2222 hw_desc_init(&desc[idx]);
2223 set_din_type(&desc[idx], DMA_DLLI,
2224 sg_dma_address(areq_ctx->curr_sg),
2225 areq_ctx->curr_sg->length, NS_BIT);
2226 set_flow_mode(&desc[idx], flow_mode);
2227 idx++;
2228 } else {
2229 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
2230 dev_dbg(dev, " NULL mode\n");
2231 /* nothing to build */
2232 return;
2233 }
2234 /* bypass */
2235 hw_desc_init(&desc[idx]);
2236 set_din_type(&desc[idx], DMA_DLLI,
2237 areq_ctx->mlli_params.mlli_dma_addr,
2238 areq_ctx->mlli_params.mlli_len, NS_BIT);
2239 set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr,
2240 areq_ctx->mlli_params.mlli_len);
2241 set_flow_mode(&desc[idx], BYPASS);
2242 idx++;
2243 /* process */
2244 hw_desc_init(&desc[idx]);
2245 set_din_type(&desc[idx], DMA_MLLI,
2246 ctx->drvdata->mlli_sram_addr,
2247 areq_ctx->mlli_nents, NS_BIT);
2248 set_flow_mode(&desc[idx], flow_mode);
2249 idx++;
2250 }
2251 if (is_not_last_data)
2252 set_din_not_last_indication(&desc[(idx - 1)]);
2253 /* return updated desc sequence size */
2254 *seq_size = idx;
2255}
2256
2257static const void *cc_larval_digest(struct device *dev, u32 mode)
2258{
2259 switch (mode) {
2260 case DRV_HASH_MD5:
2261 return md5_init;
2262 case DRV_HASH_SHA1:
2263 return sha1_init;
2264 case DRV_HASH_SHA224:
2265 return sha224_init;
2266 case DRV_HASH_SHA256:
2267 return sha256_init;
2268 case DRV_HASH_SHA384:
2269 return sha384_init;
2270 case DRV_HASH_SHA512:
2271 return sha512_init;
2272 case DRV_HASH_SM3:
2273 return sm3_init;
2274 default:
2275 dev_err(dev, "Invalid hash mode (%d)\n", mode);
2276 return md5_init;
2277 }
2278}
2279
2280/*!
2281 * Gets the address of the initial digest in SRAM
2282 * according to the given hash mode
2283 *
2284 * \param drvdata
2285 * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
2286 *
2287 * \return u32 The address of the initial digest in SRAM
2288 */
2289cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode)
2290{
2291 struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
2292 struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
2293 struct device *dev = drvdata_to_dev(_drvdata);
2294 bool sm3_supported = (_drvdata->hw_rev >= CC_HW_REV_713);
2295 cc_sram_addr_t addr;
2296
2297 switch (mode) {
2298 case DRV_HASH_NULL:
2299 break; /*Ignore*/
2300 case DRV_HASH_MD5:
2301 return (hash_handle->larval_digest_sram_addr);
2302 case DRV_HASH_SHA1:
2303 return (hash_handle->larval_digest_sram_addr +
2304 sizeof(md5_init));
2305 case DRV_HASH_SHA224:
2306 return (hash_handle->larval_digest_sram_addr +
2307 sizeof(md5_init) +
2308 sizeof(sha1_init));
2309 case DRV_HASH_SHA256:
2310 return (hash_handle->larval_digest_sram_addr +
2311 sizeof(md5_init) +
2312 sizeof(sha1_init) +
2313 sizeof(sha224_init));
2314 case DRV_HASH_SM3:
2315 return (hash_handle->larval_digest_sram_addr +
2316 sizeof(md5_init) +
2317 sizeof(sha1_init) +
2318 sizeof(sha224_init) +
2319 sizeof(sha256_init));
2320 case DRV_HASH_SHA384:
2321 addr = (hash_handle->larval_digest_sram_addr +
2322 sizeof(md5_init) +
2323 sizeof(sha1_init) +
2324 sizeof(sha224_init) +
2325 sizeof(sha256_init));
2326 if (sm3_supported)
2327 addr += sizeof(sm3_init);
2328 return addr;
2329 case DRV_HASH_SHA512:
2330 addr = (hash_handle->larval_digest_sram_addr +
2331 sizeof(md5_init) +
2332 sizeof(sha1_init) +
2333 sizeof(sha224_init) +
2334 sizeof(sha256_init) +
2335 sizeof(sha384_init));
2336 if (sm3_supported)
2337 addr += sizeof(sm3_init);
2338 return addr;
2339 default:
2340 dev_err(dev, "Invalid hash mode (%d)\n", mode);
2341 }
2342
2343 /*This is valid wrong value to avoid kernel crash*/
2344 return hash_handle->larval_digest_sram_addr;
2345}
2346
2347cc_sram_addr_t
2348cc_digest_len_addr(void *drvdata, u32 mode)
2349{
2350 struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
2351 struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
2352 cc_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
2353
2354 switch (mode) {
2355 case DRV_HASH_SHA1:
2356 case DRV_HASH_SHA224:
2357 case DRV_HASH_SHA256:
2358 case DRV_HASH_MD5:
2359 return digest_len_addr;
2360#if (CC_DEV_SHA_MAX > 256)
2361 case DRV_HASH_SHA384:
2362 case DRV_HASH_SHA512:
2363 return digest_len_addr + sizeof(digest_len_init);
2364#endif
2365 default:
2366 return digest_len_addr; /*to avoid kernel crash*/
2367 }
2368}