Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4#include <crypto/internal/aead.h>
5#include <crypto/authenc.h>
6#include <crypto/scatterwalk.h>
7#include <linux/dmapool.h>
8#include <linux/dma-mapping.h>
9
10#include "cc_buffer_mgr.h"
11#include "cc_lli_defs.h"
12#include "cc_cipher.h"
13#include "cc_hash.h"
14#include "cc_aead.h"
15
16enum dma_buffer_type {
17 DMA_NULL_TYPE = -1,
18 DMA_SGL_TYPE = 1,
19 DMA_BUFF_TYPE = 2,
20};
21
22struct buff_mgr_handle {
23 struct dma_pool *mlli_buffs_pool;
24};
25
26union buffer_array_entry {
27 struct scatterlist *sgl;
28 dma_addr_t buffer_dma;
29};
30
31struct buffer_array {
32 unsigned int num_of_buffers;
33 union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
34 unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
35 int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
36 int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
37 enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI];
38 bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
39 u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
40};
41
42static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type)
43{
44 switch (type) {
45 case CC_DMA_BUF_NULL:
46 return "BUF_NULL";
47 case CC_DMA_BUF_DLLI:
48 return "BUF_DLLI";
49 case CC_DMA_BUF_MLLI:
50 return "BUF_MLLI";
51 default:
52 return "BUF_INVALID";
53 }
54}
55
56/**
57 * cc_copy_mac() - Copy MAC to temporary location
58 *
59 * @dev: device object
60 * @req: aead request object
61 * @dir: [IN] copy from/to sgl
62 */
63static void cc_copy_mac(struct device *dev, struct aead_request *req,
64 enum cc_sg_cpy_direct dir)
65{
66 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
67 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
68 u32 skip = req->assoclen + req->cryptlen;
69
70 if (areq_ctx->is_gcm4543)
71 skip += crypto_aead_ivsize(tfm);
72
73 cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
74 (skip - areq_ctx->req_authsize), skip, dir);
75}
76
77/**
78 * cc_get_sgl_nents() - Get scatterlist number of entries.
79 *
80 * @sg_list: SG list
81 * @nbytes: [IN] Total SGL data bytes.
82 * @lbytes: [OUT] Returns the amount of bytes at the last entry
83 */
84static unsigned int cc_get_sgl_nents(struct device *dev,
85 struct scatterlist *sg_list,
86 unsigned int nbytes, u32 *lbytes,
87 bool *is_chained)
88{
89 unsigned int nents = 0;
90
91 while (nbytes && sg_list) {
92 if (sg_list->length) {
93 nents++;
94 /* get the number of bytes in the last entry */
95 *lbytes = nbytes;
96 nbytes -= (sg_list->length > nbytes) ?
97 nbytes : sg_list->length;
98 sg_list = sg_next(sg_list);
99 } else {
100 sg_list = (struct scatterlist *)sg_page(sg_list);
101 if (is_chained)
102 *is_chained = true;
103 }
104 }
105 dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
106 return nents;
107}
108
109/**
110 * cc_zero_sgl() - Zero scatter scatter list data.
111 *
112 * @sgl:
113 */
114void cc_zero_sgl(struct scatterlist *sgl, u32 data_len)
115{
116 struct scatterlist *current_sg = sgl;
117 int sg_index = 0;
118
119 while (sg_index <= data_len) {
120 if (!current_sg) {
121 /* reached the end of the sgl --> just return back */
122 return;
123 }
124 memset(sg_virt(current_sg), 0, current_sg->length);
125 sg_index += current_sg->length;
126 current_sg = sg_next(current_sg);
127 }
128}
129
130/**
131 * cc_copy_sg_portion() - Copy scatter list data,
132 * from to_skip to end, to dest and vice versa
133 *
134 * @dest:
135 * @sg:
136 * @to_skip:
137 * @end:
138 * @direct:
139 */
140void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
141 u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
142{
143 u32 nents, lbytes;
144
145 nents = cc_get_sgl_nents(dev, sg, end, &lbytes, NULL);
146 sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
147 (direct == CC_SG_TO_BUF));
148}
149
150static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma,
151 u32 buff_size, u32 *curr_nents,
152 u32 **mlli_entry_pp)
153{
154 u32 *mlli_entry_p = *mlli_entry_pp;
155 u32 new_nents;
156
157 /* Verify there is no memory overflow*/
158 new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
159 if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) {
160 dev_err(dev, "Too many mlli entries. current %d max %d\n",
161 new_nents, MAX_NUM_OF_TOTAL_MLLI_ENTRIES);
162 return -ENOMEM;
163 }
164
165 /*handle buffer longer than 64 kbytes */
166 while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
167 cc_lli_set_addr(mlli_entry_p, buff_dma);
168 cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
169 dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
170 *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
171 mlli_entry_p[LLI_WORD1_OFFSET]);
172 buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
173 buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
174 mlli_entry_p = mlli_entry_p + 2;
175 (*curr_nents)++;
176 }
177 /*Last entry */
178 cc_lli_set_addr(mlli_entry_p, buff_dma);
179 cc_lli_set_size(mlli_entry_p, buff_size);
180 dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
181 *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
182 mlli_entry_p[LLI_WORD1_OFFSET]);
183 mlli_entry_p = mlli_entry_p + 2;
184 *mlli_entry_pp = mlli_entry_p;
185 (*curr_nents)++;
186 return 0;
187}
188
189static int cc_render_sg_to_mlli(struct device *dev, struct scatterlist *sgl,
190 u32 sgl_data_len, u32 sgl_offset,
191 u32 *curr_nents, u32 **mlli_entry_pp)
192{
193 struct scatterlist *curr_sgl = sgl;
194 u32 *mlli_entry_p = *mlli_entry_pp;
195 s32 rc = 0;
196
197 for ( ; (curr_sgl && sgl_data_len);
198 curr_sgl = sg_next(curr_sgl)) {
199 u32 entry_data_len =
200 (sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ?
201 sg_dma_len(curr_sgl) - sgl_offset :
202 sgl_data_len;
203 sgl_data_len -= entry_data_len;
204 rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) +
205 sgl_offset, entry_data_len,
206 curr_nents, &mlli_entry_p);
207 if (rc)
208 return rc;
209
210 sgl_offset = 0;
211 }
212 *mlli_entry_pp = mlli_entry_p;
213 return 0;
214}
215
216static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
217 struct mlli_params *mlli_params, gfp_t flags)
218{
219 u32 *mlli_p;
220 u32 total_nents = 0, prev_total_nents = 0;
221 int rc = 0, i;
222
223 dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers);
224
225 /* Allocate memory from the pointed pool */
226 mlli_params->mlli_virt_addr =
227 dma_pool_alloc(mlli_params->curr_pool, flags,
228 &mlli_params->mlli_dma_addr);
229 if (!mlli_params->mlli_virt_addr) {
230 dev_err(dev, "dma_pool_alloc() failed\n");
231 rc = -ENOMEM;
232 goto build_mlli_exit;
233 }
234 /* Point to start of MLLI */
235 mlli_p = (u32 *)mlli_params->mlli_virt_addr;
236 /* go over all SG's and link it to one MLLI table */
237 for (i = 0; i < sg_data->num_of_buffers; i++) {
238 union buffer_array_entry *entry = &sg_data->entry[i];
239 u32 tot_len = sg_data->total_data_len[i];
240 u32 offset = sg_data->offset[i];
241
242 if (sg_data->type[i] == DMA_SGL_TYPE)
243 rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len,
244 offset, &total_nents,
245 &mlli_p);
246 else /*DMA_BUFF_TYPE*/
247 rc = cc_render_buff_to_mlli(dev, entry->buffer_dma,
248 tot_len, &total_nents,
249 &mlli_p);
250 if (rc)
251 return rc;
252
253 /* set last bit in the current table */
254 if (sg_data->mlli_nents[i]) {
255 /*Calculate the current MLLI table length for the
256 *length field in the descriptor
257 */
258 *sg_data->mlli_nents[i] +=
259 (total_nents - prev_total_nents);
260 prev_total_nents = total_nents;
261 }
262 }
263
264 /* Set MLLI size for the bypass operation */
265 mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
266
267 dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
268 mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr,
269 mlli_params->mlli_len);
270
271build_mlli_exit:
272 return rc;
273}
274
275static void cc_add_buffer_entry(struct device *dev,
276 struct buffer_array *sgl_data,
277 dma_addr_t buffer_dma, unsigned int buffer_len,
278 bool is_last_entry, u32 *mlli_nents)
279{
280 unsigned int index = sgl_data->num_of_buffers;
281
282 dev_dbg(dev, "index=%u single_buff=%pad buffer_len=0x%08X is_last=%d\n",
283 index, &buffer_dma, buffer_len, is_last_entry);
284 sgl_data->nents[index] = 1;
285 sgl_data->entry[index].buffer_dma = buffer_dma;
286 sgl_data->offset[index] = 0;
287 sgl_data->total_data_len[index] = buffer_len;
288 sgl_data->type[index] = DMA_BUFF_TYPE;
289 sgl_data->is_last[index] = is_last_entry;
290 sgl_data->mlli_nents[index] = mlli_nents;
291 if (sgl_data->mlli_nents[index])
292 *sgl_data->mlli_nents[index] = 0;
293 sgl_data->num_of_buffers++;
294}
295
296static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
297 unsigned int nents, struct scatterlist *sgl,
298 unsigned int data_len, unsigned int data_offset,
299 bool is_last_table, u32 *mlli_nents)
300{
301 unsigned int index = sgl_data->num_of_buffers;
302
303 dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
304 index, nents, sgl, data_len, is_last_table);
305 sgl_data->nents[index] = nents;
306 sgl_data->entry[index].sgl = sgl;
307 sgl_data->offset[index] = data_offset;
308 sgl_data->total_data_len[index] = data_len;
309 sgl_data->type[index] = DMA_SGL_TYPE;
310 sgl_data->is_last[index] = is_last_table;
311 sgl_data->mlli_nents[index] = mlli_nents;
312 if (sgl_data->mlli_nents[index])
313 *sgl_data->mlli_nents[index] = 0;
314 sgl_data->num_of_buffers++;
315}
316
317static int cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
318 enum dma_data_direction direction)
319{
320 u32 i, j;
321 struct scatterlist *l_sg = sg;
322
323 for (i = 0; i < nents; i++) {
324 if (!l_sg)
325 break;
326 if (dma_map_sg(dev, l_sg, 1, direction) != 1) {
327 dev_err(dev, "dma_map_page() sg buffer failed\n");
328 goto err;
329 }
330 l_sg = sg_next(l_sg);
331 }
332 return nents;
333
334err:
335 /* Restore mapped parts */
336 for (j = 0; j < i; j++) {
337 if (!sg)
338 break;
339 dma_unmap_sg(dev, sg, 1, direction);
340 sg = sg_next(sg);
341 }
342 return 0;
343}
344
345static int cc_map_sg(struct device *dev, struct scatterlist *sg,
346 unsigned int nbytes, int direction, u32 *nents,
347 u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
348{
349 bool is_chained = false;
350
351 if (sg_is_last(sg)) {
352 /* One entry only case -set to DLLI */
353 if (dma_map_sg(dev, sg, 1, direction) != 1) {
354 dev_err(dev, "dma_map_sg() single buffer failed\n");
355 return -ENOMEM;
356 }
357 dev_dbg(dev, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
358 &sg_dma_address(sg), sg_page(sg), sg_virt(sg),
359 sg->offset, sg->length);
360 *lbytes = nbytes;
361 *nents = 1;
362 *mapped_nents = 1;
363 } else { /*sg_is_last*/
364 *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes,
365 &is_chained);
366 if (*nents > max_sg_nents) {
367 *nents = 0;
368 dev_err(dev, "Too many fragments. current %d max %d\n",
369 *nents, max_sg_nents);
370 return -ENOMEM;
371 }
372 if (!is_chained) {
373 /* In case of mmu the number of mapped nents might
374 * be changed from the original sgl nents
375 */
376 *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
377 if (*mapped_nents == 0) {
378 *nents = 0;
379 dev_err(dev, "dma_map_sg() sg buffer failed\n");
380 return -ENOMEM;
381 }
382 } else {
383 /*In this case the driver maps entry by entry so it
384 * must have the same nents before and after map
385 */
386 *mapped_nents = cc_dma_map_sg(dev, sg, *nents,
387 direction);
388 if (*mapped_nents != *nents) {
389 *nents = *mapped_nents;
390 dev_err(dev, "dma_map_sg() sg buffer failed\n");
391 return -ENOMEM;
392 }
393 }
394 }
395
396 return 0;
397}
398
399static int
400cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx,
401 u8 *config_data, struct buffer_array *sg_data,
402 unsigned int assoclen)
403{
404 dev_dbg(dev, " handle additional data config set to DLLI\n");
405 /* create sg for the current buffer */
406 sg_init_one(&areq_ctx->ccm_adata_sg, config_data,
407 AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
408 if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) {
409 dev_err(dev, "dma_map_sg() config buffer failed\n");
410 return -ENOMEM;
411 }
412 dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
413 &sg_dma_address(&areq_ctx->ccm_adata_sg),
414 sg_page(&areq_ctx->ccm_adata_sg),
415 sg_virt(&areq_ctx->ccm_adata_sg),
416 areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length);
417 /* prepare for case of MLLI */
418 if (assoclen > 0) {
419 cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg,
420 (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
421 0, false, NULL);
422 }
423 return 0;
424}
425
426static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
427 u8 *curr_buff, u32 curr_buff_cnt,
428 struct buffer_array *sg_data)
429{
430 dev_dbg(dev, " handle curr buff %x set to DLLI\n", curr_buff_cnt);
431 /* create sg for the current buffer */
432 sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
433 if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) {
434 dev_err(dev, "dma_map_sg() src buffer failed\n");
435 return -ENOMEM;
436 }
437 dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
438 &sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
439 sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
440 areq_ctx->buff_sg->length);
441 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
442 areq_ctx->curr_sg = areq_ctx->buff_sg;
443 areq_ctx->in_nents = 0;
444 /* prepare for case of MLLI */
445 cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0,
446 false, NULL);
447 return 0;
448}
449
450void cc_unmap_cipher_request(struct device *dev, void *ctx,
451 unsigned int ivsize, struct scatterlist *src,
452 struct scatterlist *dst)
453{
454 struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
455
456 if (req_ctx->gen_ctx.iv_dma_addr) {
457 dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
458 &req_ctx->gen_ctx.iv_dma_addr, ivsize);
459 dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
460 ivsize, DMA_TO_DEVICE);
461 }
462 /* Release pool */
463 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
464 req_ctx->mlli_params.mlli_virt_addr) {
465 dma_pool_free(req_ctx->mlli_params.curr_pool,
466 req_ctx->mlli_params.mlli_virt_addr,
467 req_ctx->mlli_params.mlli_dma_addr);
468 }
469
470 dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
471 dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
472
473 if (src != dst) {
474 dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
475 dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
476 }
477}
478
479int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
480 unsigned int ivsize, unsigned int nbytes,
481 void *info, struct scatterlist *src,
482 struct scatterlist *dst, gfp_t flags)
483{
484 struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
485 struct mlli_params *mlli_params = &req_ctx->mlli_params;
486 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
487 struct device *dev = drvdata_to_dev(drvdata);
488 struct buffer_array sg_data;
489 u32 dummy = 0;
490 int rc = 0;
491 u32 mapped_nents = 0;
492
493 req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
494 mlli_params->curr_pool = NULL;
495 sg_data.num_of_buffers = 0;
496
497 /* Map IV buffer */
498 if (ivsize) {
499 dump_byte_array("iv", (u8 *)info, ivsize);
500 req_ctx->gen_ctx.iv_dma_addr =
501 dma_map_single(dev, (void *)info,
502 ivsize, DMA_TO_DEVICE);
503 if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
504 dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
505 ivsize, info);
506 return -ENOMEM;
507 }
508 dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
509 ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
510 } else {
511 req_ctx->gen_ctx.iv_dma_addr = 0;
512 }
513
514 /* Map the src SGL */
515 rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
516 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
517 if (rc)
518 goto cipher_exit;
519 if (mapped_nents > 1)
520 req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
521
522 if (src == dst) {
523 /* Handle inplace operation */
524 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
525 req_ctx->out_nents = 0;
526 cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
527 nbytes, 0, true,
528 &req_ctx->in_mlli_nents);
529 }
530 } else {
531 /* Map the dst sg */
532 rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
533 &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
534 &dummy, &mapped_nents);
535 if (rc)
536 goto cipher_exit;
537 if (mapped_nents > 1)
538 req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
539
540 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
541 cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
542 nbytes, 0, true,
543 &req_ctx->in_mlli_nents);
544 cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst,
545 nbytes, 0, true,
546 &req_ctx->out_mlli_nents);
547 }
548 }
549
550 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
551 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
552 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
553 if (rc)
554 goto cipher_exit;
555 }
556
557 dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
558 cc_dma_buf_type(req_ctx->dma_buf_type));
559
560 return 0;
561
562cipher_exit:
563 cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
564 return rc;
565}
566
567void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
568{
569 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
570 unsigned int hw_iv_size = areq_ctx->hw_iv_size;
571 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
572 struct cc_drvdata *drvdata = dev_get_drvdata(dev);
573 u32 dummy;
574 bool chained;
575 u32 size_to_unmap = 0;
576
577 if (areq_ctx->mac_buf_dma_addr) {
578 dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
579 MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
580 }
581
582 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
583 if (areq_ctx->hkey_dma_addr) {
584 dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
585 AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
586 }
587
588 if (areq_ctx->gcm_block_len_dma_addr) {
589 dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
590 AES_BLOCK_SIZE, DMA_TO_DEVICE);
591 }
592
593 if (areq_ctx->gcm_iv_inc1_dma_addr) {
594 dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
595 AES_BLOCK_SIZE, DMA_TO_DEVICE);
596 }
597
598 if (areq_ctx->gcm_iv_inc2_dma_addr) {
599 dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
600 AES_BLOCK_SIZE, DMA_TO_DEVICE);
601 }
602 }
603
604 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
605 if (areq_ctx->ccm_iv0_dma_addr) {
606 dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
607 AES_BLOCK_SIZE, DMA_TO_DEVICE);
608 }
609
610 dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
611 }
612 if (areq_ctx->gen_ctx.iv_dma_addr) {
613 dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
614 hw_iv_size, DMA_BIDIRECTIONAL);
615 }
616
617 /* Release pool */
618 if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
619 areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) &&
620 (areq_ctx->mlli_params.mlli_virt_addr)) {
621 dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
622 &areq_ctx->mlli_params.mlli_dma_addr,
623 areq_ctx->mlli_params.mlli_virt_addr);
624 dma_pool_free(areq_ctx->mlli_params.curr_pool,
625 areq_ctx->mlli_params.mlli_virt_addr,
626 areq_ctx->mlli_params.mlli_dma_addr);
627 }
628
629 dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
630 sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
631 req->assoclen, req->cryptlen);
632 size_to_unmap = req->assoclen + req->cryptlen;
633 if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
634 size_to_unmap += areq_ctx->req_authsize;
635 if (areq_ctx->is_gcm4543)
636 size_to_unmap += crypto_aead_ivsize(tfm);
637
638 dma_unmap_sg(dev, req->src,
639 cc_get_sgl_nents(dev, req->src, size_to_unmap,
640 &dummy, &chained),
641 DMA_BIDIRECTIONAL);
642 if (req->src != req->dst) {
643 dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
644 sg_virt(req->dst));
645 dma_unmap_sg(dev, req->dst,
646 cc_get_sgl_nents(dev, req->dst, size_to_unmap,
647 &dummy, &chained),
648 DMA_BIDIRECTIONAL);
649 }
650 if (drvdata->coherent &&
651 areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
652 req->src == req->dst) {
653 /* copy back mac from temporary location to deal with possible
654 * data memory overriding that caused by cache coherence
655 * problem.
656 */
657 cc_copy_mac(dev, req, CC_SG_FROM_BUF);
658 }
659}
660
661static int cc_get_aead_icv_nents(struct device *dev, struct scatterlist *sgl,
662 unsigned int sgl_nents, unsigned int authsize,
663 u32 last_entry_data_size,
664 bool *is_icv_fragmented)
665{
666 unsigned int icv_max_size = 0;
667 unsigned int icv_required_size = authsize > last_entry_data_size ?
668 (authsize - last_entry_data_size) :
669 authsize;
670 unsigned int nents;
671 unsigned int i;
672
673 if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) {
674 *is_icv_fragmented = false;
675 return 0;
676 }
677
678 for (i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
679 if (!sgl)
680 break;
681 sgl = sg_next(sgl);
682 }
683
684 if (sgl)
685 icv_max_size = sgl->length;
686
687 if (last_entry_data_size > authsize) {
688 /* ICV attached to data in last entry (not fragmented!) */
689 nents = 0;
690 *is_icv_fragmented = false;
691 } else if (last_entry_data_size == authsize) {
692 /* ICV placed in whole last entry (not fragmented!) */
693 nents = 1;
694 *is_icv_fragmented = false;
695 } else if (icv_max_size > icv_required_size) {
696 nents = 1;
697 *is_icv_fragmented = true;
698 } else if (icv_max_size == icv_required_size) {
699 nents = 2;
700 *is_icv_fragmented = true;
701 } else {
702 dev_err(dev, "Unsupported num. of ICV fragments (> %d)\n",
703 MAX_ICV_NENTS_SUPPORTED);
704 nents = -1; /*unsupported*/
705 }
706 dev_dbg(dev, "is_frag=%s icv_nents=%u\n",
707 (*is_icv_fragmented ? "true" : "false"), nents);
708
709 return nents;
710}
711
712static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
713 struct aead_request *req,
714 struct buffer_array *sg_data,
715 bool is_last, bool do_chain)
716{
717 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
718 unsigned int hw_iv_size = areq_ctx->hw_iv_size;
719 struct device *dev = drvdata_to_dev(drvdata);
720 int rc = 0;
721
722 if (!req->iv) {
723 areq_ctx->gen_ctx.iv_dma_addr = 0;
724 goto chain_iv_exit;
725 }
726
727 areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
728 hw_iv_size,
729 DMA_BIDIRECTIONAL);
730 if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
731 dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
732 hw_iv_size, req->iv);
733 rc = -ENOMEM;
734 goto chain_iv_exit;
735 }
736
737 dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
738 hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
739 // TODO: what about CTR?? ask Ron
740 if (do_chain && areq_ctx->plaintext_authenticate_only) {
741 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
742 unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
743 unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
744 /* Chain to given list */
745 cc_add_buffer_entry(dev, sg_data,
746 (areq_ctx->gen_ctx.iv_dma_addr + iv_ofs),
747 iv_size_to_authenc, is_last,
748 &areq_ctx->assoc.mlli_nents);
749 areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
750 }
751
752chain_iv_exit:
753 return rc;
754}
755
756static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
757 struct aead_request *req,
758 struct buffer_array *sg_data,
759 bool is_last, bool do_chain)
760{
761 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
762 int rc = 0;
763 u32 mapped_nents = 0;
764 struct scatterlist *current_sg = req->src;
765 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
766 unsigned int sg_index = 0;
767 u32 size_of_assoc = req->assoclen;
768 struct device *dev = drvdata_to_dev(drvdata);
769
770 if (areq_ctx->is_gcm4543)
771 size_of_assoc += crypto_aead_ivsize(tfm);
772
773 if (!sg_data) {
774 rc = -EINVAL;
775 goto chain_assoc_exit;
776 }
777
778 if (req->assoclen == 0) {
779 areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
780 areq_ctx->assoc.nents = 0;
781 areq_ctx->assoc.mlli_nents = 0;
782 dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n",
783 cc_dma_buf_type(areq_ctx->assoc_buff_type),
784 areq_ctx->assoc.nents);
785 goto chain_assoc_exit;
786 }
787
788 //iterate over the sgl to see how many entries are for associated data
789 //it is assumed that if we reach here , the sgl is already mapped
790 sg_index = current_sg->length;
791 //the first entry in the scatter list contains all the associated data
792 if (sg_index > size_of_assoc) {
793 mapped_nents++;
794 } else {
795 while (sg_index <= size_of_assoc) {
796 current_sg = sg_next(current_sg);
797 /* if have reached the end of the sgl, then this is
798 * unexpected
799 */
800 if (!current_sg) {
801 dev_err(dev, "reached end of sg list. unexpected\n");
802 return -EINVAL;
803 }
804 sg_index += current_sg->length;
805 mapped_nents++;
806 }
807 }
808 if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
809 dev_err(dev, "Too many fragments. current %d max %d\n",
810 mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
811 return -ENOMEM;
812 }
813 areq_ctx->assoc.nents = mapped_nents;
814
815 /* in CCM case we have additional entry for
816 * ccm header configurations
817 */
818 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
819 if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
820 dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n",
821 (areq_ctx->assoc.nents + 1),
822 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
823 rc = -ENOMEM;
824 goto chain_assoc_exit;
825 }
826 }
827
828 if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null)
829 areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI;
830 else
831 areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
832
833 if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
834 dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
835 cc_dma_buf_type(areq_ctx->assoc_buff_type),
836 areq_ctx->assoc.nents);
837 cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
838 req->assoclen, 0, is_last,
839 &areq_ctx->assoc.mlli_nents);
840 areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
841 }
842
843chain_assoc_exit:
844 return rc;
845}
846
847static void cc_prepare_aead_data_dlli(struct aead_request *req,
848 u32 *src_last_bytes, u32 *dst_last_bytes)
849{
850 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
851 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
852 unsigned int authsize = areq_ctx->req_authsize;
853
854 areq_ctx->is_icv_fragmented = false;
855 if (req->src == req->dst) {
856 /*INPLACE*/
857 areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
858 (*src_last_bytes - authsize);
859 areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
860 (*src_last_bytes - authsize);
861 } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
862 /*NON-INPLACE and DECRYPT*/
863 areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
864 (*src_last_bytes - authsize);
865 areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
866 (*src_last_bytes - authsize);
867 } else {
868 /*NON-INPLACE and ENCRYPT*/
869 areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->dst_sgl) +
870 (*dst_last_bytes - authsize);
871 areq_ctx->icv_virt_addr = sg_virt(areq_ctx->dst_sgl) +
872 (*dst_last_bytes - authsize);
873 }
874}
875
876static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
877 struct aead_request *req,
878 struct buffer_array *sg_data,
879 u32 *src_last_bytes, u32 *dst_last_bytes,
880 bool is_last_table)
881{
882 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
883 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
884 unsigned int authsize = areq_ctx->req_authsize;
885 int rc = 0, icv_nents;
886 struct device *dev = drvdata_to_dev(drvdata);
887 struct scatterlist *sg;
888
889 if (req->src == req->dst) {
890 /*INPLACE*/
891 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
892 areq_ctx->src_sgl, areq_ctx->cryptlen,
893 areq_ctx->src_offset, is_last_table,
894 &areq_ctx->src.mlli_nents);
895
896 icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
897 areq_ctx->src.nents,
898 authsize, *src_last_bytes,
899 &areq_ctx->is_icv_fragmented);
900 if (icv_nents < 0) {
901 rc = -ENOTSUPP;
902 goto prepare_data_mlli_exit;
903 }
904
905 if (areq_ctx->is_icv_fragmented) {
906 /* Backup happens only when ICV is fragmented, ICV
907 * verification is made by CPU compare in order to
908 * simplify MAC verification upon request completion
909 */
910 if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
911 /* In coherent platforms (e.g. ACP)
912 * already copying ICV for any
913 * INPLACE-DECRYPT operation, hence
914 * we must neglect this code.
915 */
916 if (!drvdata->coherent)
917 cc_copy_mac(dev, req, CC_SG_TO_BUF);
918
919 areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
920 } else {
921 areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
922 areq_ctx->icv_dma_addr =
923 areq_ctx->mac_buf_dma_addr;
924 }
925 } else { /* Contig. ICV */
926 sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
927 /*Should hanlde if the sg is not contig.*/
928 areq_ctx->icv_dma_addr = sg_dma_address(sg) +
929 (*src_last_bytes - authsize);
930 areq_ctx->icv_virt_addr = sg_virt(sg) +
931 (*src_last_bytes - authsize);
932 }
933
934 } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
935 /*NON-INPLACE and DECRYPT*/
936 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
937 areq_ctx->src_sgl, areq_ctx->cryptlen,
938 areq_ctx->src_offset, is_last_table,
939 &areq_ctx->src.mlli_nents);
940 cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
941 areq_ctx->dst_sgl, areq_ctx->cryptlen,
942 areq_ctx->dst_offset, is_last_table,
943 &areq_ctx->dst.mlli_nents);
944
945 icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
946 areq_ctx->src.nents,
947 authsize, *src_last_bytes,
948 &areq_ctx->is_icv_fragmented);
949 if (icv_nents < 0) {
950 rc = -ENOTSUPP;
951 goto prepare_data_mlli_exit;
952 }
953
954 /* Backup happens only when ICV is fragmented, ICV
955 * verification is made by CPU compare in order to simplify
956 * MAC verification upon request completion
957 */
958 if (areq_ctx->is_icv_fragmented) {
959 cc_copy_mac(dev, req, CC_SG_TO_BUF);
960 areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
961
962 } else { /* Contig. ICV */
963 sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
964 /*Should hanlde if the sg is not contig.*/
965 areq_ctx->icv_dma_addr = sg_dma_address(sg) +
966 (*src_last_bytes - authsize);
967 areq_ctx->icv_virt_addr = sg_virt(sg) +
968 (*src_last_bytes - authsize);
969 }
970
971 } else {
972 /*NON-INPLACE and ENCRYPT*/
973 cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
974 areq_ctx->dst_sgl, areq_ctx->cryptlen,
975 areq_ctx->dst_offset, is_last_table,
976 &areq_ctx->dst.mlli_nents);
977 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
978 areq_ctx->src_sgl, areq_ctx->cryptlen,
979 areq_ctx->src_offset, is_last_table,
980 &areq_ctx->src.mlli_nents);
981
982 icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->dst_sgl,
983 areq_ctx->dst.nents,
984 authsize, *dst_last_bytes,
985 &areq_ctx->is_icv_fragmented);
986 if (icv_nents < 0) {
987 rc = -ENOTSUPP;
988 goto prepare_data_mlli_exit;
989 }
990
991 if (!areq_ctx->is_icv_fragmented) {
992 sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
993 /* Contig. ICV */
994 areq_ctx->icv_dma_addr = sg_dma_address(sg) +
995 (*dst_last_bytes - authsize);
996 areq_ctx->icv_virt_addr = sg_virt(sg) +
997 (*dst_last_bytes - authsize);
998 } else {
999 areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
1000 areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
1001 }
1002 }
1003
1004prepare_data_mlli_exit:
1005 return rc;
1006}
1007
1008static int cc_aead_chain_data(struct cc_drvdata *drvdata,
1009 struct aead_request *req,
1010 struct buffer_array *sg_data,
1011 bool is_last_table, bool do_chain)
1012{
1013 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1014 struct device *dev = drvdata_to_dev(drvdata);
1015 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
1016 unsigned int authsize = areq_ctx->req_authsize;
1017 unsigned int src_last_bytes = 0, dst_last_bytes = 0;
1018 int rc = 0;
1019 u32 src_mapped_nents = 0, dst_mapped_nents = 0;
1020 u32 offset = 0;
1021 /* non-inplace mode */
1022 unsigned int size_for_map = req->assoclen + req->cryptlen;
1023 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1024 u32 sg_index = 0;
1025 bool chained = false;
1026 bool is_gcm4543 = areq_ctx->is_gcm4543;
1027 u32 size_to_skip = req->assoclen;
1028
1029 if (is_gcm4543)
1030 size_to_skip += crypto_aead_ivsize(tfm);
1031
1032 offset = size_to_skip;
1033
1034 if (!sg_data)
1035 return -EINVAL;
1036
1037 areq_ctx->src_sgl = req->src;
1038 areq_ctx->dst_sgl = req->dst;
1039
1040 if (is_gcm4543)
1041 size_for_map += crypto_aead_ivsize(tfm);
1042
1043 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1044 authsize : 0;
1045 src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
1046 &src_last_bytes, &chained);
1047 sg_index = areq_ctx->src_sgl->length;
1048 //check where the data starts
1049 while (sg_index <= size_to_skip) {
1050 offset -= areq_ctx->src_sgl->length;
1051 areq_ctx->src_sgl = sg_next(areq_ctx->src_sgl);
1052 //if have reached the end of the sgl, then this is unexpected
1053 if (!areq_ctx->src_sgl) {
1054 dev_err(dev, "reached end of sg list. unexpected\n");
1055 return -EINVAL;
1056 }
1057 sg_index += areq_ctx->src_sgl->length;
1058 src_mapped_nents--;
1059 }
1060 if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
1061 dev_err(dev, "Too many fragments. current %d max %d\n",
1062 src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
1063 return -ENOMEM;
1064 }
1065
1066 areq_ctx->src.nents = src_mapped_nents;
1067
1068 areq_ctx->src_offset = offset;
1069
1070 if (req->src != req->dst) {
1071 size_for_map = req->assoclen + req->cryptlen;
1072 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1073 authsize : 0;
1074 if (is_gcm4543)
1075 size_for_map += crypto_aead_ivsize(tfm);
1076
1077 rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
1078 &areq_ctx->dst.nents,
1079 LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
1080 &dst_mapped_nents);
1081 if (rc)
1082 goto chain_data_exit;
1083 }
1084
1085 dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
1086 &dst_last_bytes, &chained);
1087 sg_index = areq_ctx->dst_sgl->length;
1088 offset = size_to_skip;
1089
1090 //check where the data starts
1091 while (sg_index <= size_to_skip) {
1092 offset -= areq_ctx->dst_sgl->length;
1093 areq_ctx->dst_sgl = sg_next(areq_ctx->dst_sgl);
1094 //if have reached the end of the sgl, then this is unexpected
1095 if (!areq_ctx->dst_sgl) {
1096 dev_err(dev, "reached end of sg list. unexpected\n");
1097 return -EINVAL;
1098 }
1099 sg_index += areq_ctx->dst_sgl->length;
1100 dst_mapped_nents--;
1101 }
1102 if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
1103 dev_err(dev, "Too many fragments. current %d max %d\n",
1104 dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
1105 return -ENOMEM;
1106 }
1107 areq_ctx->dst.nents = dst_mapped_nents;
1108 areq_ctx->dst_offset = offset;
1109 if (src_mapped_nents > 1 ||
1110 dst_mapped_nents > 1 ||
1111 do_chain) {
1112 areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
1113 rc = cc_prepare_aead_data_mlli(drvdata, req, sg_data,
1114 &src_last_bytes,
1115 &dst_last_bytes, is_last_table);
1116 } else {
1117 areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
1118 cc_prepare_aead_data_dlli(req, &src_last_bytes,
1119 &dst_last_bytes);
1120 }
1121
1122chain_data_exit:
1123 return rc;
1124}
1125
1126static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata,
1127 struct aead_request *req)
1128{
1129 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1130 u32 curr_mlli_size = 0;
1131
1132 if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
1133 areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
1134 curr_mlli_size = areq_ctx->assoc.mlli_nents *
1135 LLI_ENTRY_BYTE_SIZE;
1136 }
1137
1138 if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
1139 /*Inplace case dst nents equal to src nents*/
1140 if (req->src == req->dst) {
1141 areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
1142 areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
1143 curr_mlli_size;
1144 areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
1145 if (!areq_ctx->is_single_pass)
1146 areq_ctx->assoc.mlli_nents +=
1147 areq_ctx->src.mlli_nents;
1148 } else {
1149 if (areq_ctx->gen_ctx.op_type ==
1150 DRV_CRYPTO_DIRECTION_DECRYPT) {
1151 areq_ctx->src.sram_addr =
1152 drvdata->mlli_sram_addr +
1153 curr_mlli_size;
1154 areq_ctx->dst.sram_addr =
1155 areq_ctx->src.sram_addr +
1156 areq_ctx->src.mlli_nents *
1157 LLI_ENTRY_BYTE_SIZE;
1158 if (!areq_ctx->is_single_pass)
1159 areq_ctx->assoc.mlli_nents +=
1160 areq_ctx->src.mlli_nents;
1161 } else {
1162 areq_ctx->dst.sram_addr =
1163 drvdata->mlli_sram_addr +
1164 curr_mlli_size;
1165 areq_ctx->src.sram_addr =
1166 areq_ctx->dst.sram_addr +
1167 areq_ctx->dst.mlli_nents *
1168 LLI_ENTRY_BYTE_SIZE;
1169 if (!areq_ctx->is_single_pass)
1170 areq_ctx->assoc.mlli_nents +=
1171 areq_ctx->dst.mlli_nents;
1172 }
1173 }
1174 }
1175}
1176
1177int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
1178{
1179 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1180 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1181 struct device *dev = drvdata_to_dev(drvdata);
1182 struct buffer_array sg_data;
1183 unsigned int authsize = areq_ctx->req_authsize;
1184 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1185 int rc = 0;
1186 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1187 bool is_gcm4543 = areq_ctx->is_gcm4543;
1188 dma_addr_t dma_addr;
1189 u32 mapped_nents = 0;
1190 u32 dummy = 0; /*used for the assoc data fragments */
1191 u32 size_to_map = 0;
1192 gfp_t flags = cc_gfp_flags(&req->base);
1193
1194 mlli_params->curr_pool = NULL;
1195 sg_data.num_of_buffers = 0;
1196
1197 /* copy mac to a temporary location to deal with possible
1198 * data memory overriding that caused by cache coherence problem.
1199 */
1200 if (drvdata->coherent &&
1201 areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
1202 req->src == req->dst)
1203 cc_copy_mac(dev, req, CC_SG_TO_BUF);
1204
1205 /* cacluate the size for cipher remove ICV in decrypt*/
1206 areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
1207 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1208 req->cryptlen :
1209 (req->cryptlen - authsize);
1210
1211 dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
1212 DMA_BIDIRECTIONAL);
1213 if (dma_mapping_error(dev, dma_addr)) {
1214 dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
1215 MAX_MAC_SIZE, areq_ctx->mac_buf);
1216 rc = -ENOMEM;
1217 goto aead_map_failure;
1218 }
1219 areq_ctx->mac_buf_dma_addr = dma_addr;
1220
1221 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
1222 void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
1223
1224 dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE,
1225 DMA_TO_DEVICE);
1226
1227 if (dma_mapping_error(dev, dma_addr)) {
1228 dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
1229 AES_BLOCK_SIZE, addr);
1230 areq_ctx->ccm_iv0_dma_addr = 0;
1231 rc = -ENOMEM;
1232 goto aead_map_failure;
1233 }
1234 areq_ctx->ccm_iv0_dma_addr = dma_addr;
1235
1236 rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
1237 &sg_data, req->assoclen);
1238 if (rc)
1239 goto aead_map_failure;
1240 }
1241
1242 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
1243 dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
1244 DMA_BIDIRECTIONAL);
1245 if (dma_mapping_error(dev, dma_addr)) {
1246 dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
1247 AES_BLOCK_SIZE, areq_ctx->hkey);
1248 rc = -ENOMEM;
1249 goto aead_map_failure;
1250 }
1251 areq_ctx->hkey_dma_addr = dma_addr;
1252
1253 dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
1254 AES_BLOCK_SIZE, DMA_TO_DEVICE);
1255 if (dma_mapping_error(dev, dma_addr)) {
1256 dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
1257 AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
1258 rc = -ENOMEM;
1259 goto aead_map_failure;
1260 }
1261 areq_ctx->gcm_block_len_dma_addr = dma_addr;
1262
1263 dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1,
1264 AES_BLOCK_SIZE, DMA_TO_DEVICE);
1265
1266 if (dma_mapping_error(dev, dma_addr)) {
1267 dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
1268 AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
1269 areq_ctx->gcm_iv_inc1_dma_addr = 0;
1270 rc = -ENOMEM;
1271 goto aead_map_failure;
1272 }
1273 areq_ctx->gcm_iv_inc1_dma_addr = dma_addr;
1274
1275 dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2,
1276 AES_BLOCK_SIZE, DMA_TO_DEVICE);
1277
1278 if (dma_mapping_error(dev, dma_addr)) {
1279 dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
1280 AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
1281 areq_ctx->gcm_iv_inc2_dma_addr = 0;
1282 rc = -ENOMEM;
1283 goto aead_map_failure;
1284 }
1285 areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
1286 }
1287
1288 size_to_map = req->cryptlen + req->assoclen;
1289 if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
1290 size_to_map += authsize;
1291
1292 if (is_gcm4543)
1293 size_to_map += crypto_aead_ivsize(tfm);
1294 rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
1295 &areq_ctx->src.nents,
1296 (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
1297 LLI_MAX_NUM_OF_DATA_ENTRIES),
1298 &dummy, &mapped_nents);
1299 if (rc)
1300 goto aead_map_failure;
1301
1302 if (areq_ctx->is_single_pass) {
1303 /*
1304 * Create MLLI table for:
1305 * (1) Assoc. data
1306 * (2) Src/Dst SGLs
1307 * Note: IV is contg. buffer (not an SGL)
1308 */
1309 rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false);
1310 if (rc)
1311 goto aead_map_failure;
1312 rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false);
1313 if (rc)
1314 goto aead_map_failure;
1315 rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false);
1316 if (rc)
1317 goto aead_map_failure;
1318 } else { /* DOUBLE-PASS flow */
1319 /*
1320 * Prepare MLLI table(s) in this order:
1321 *
1322 * If ENCRYPT/DECRYPT (inplace):
1323 * (1) MLLI table for assoc
1324 * (2) IV entry (chained right after end of assoc)
1325 * (3) MLLI for src/dst (inplace operation)
1326 *
1327 * If ENCRYPT (non-inplace)
1328 * (1) MLLI table for assoc
1329 * (2) IV entry (chained right after end of assoc)
1330 * (3) MLLI for dst
1331 * (4) MLLI for src
1332 *
1333 * If DECRYPT (non-inplace)
1334 * (1) MLLI table for assoc
1335 * (2) IV entry (chained right after end of assoc)
1336 * (3) MLLI for src
1337 * (4) MLLI for dst
1338 */
1339 rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true);
1340 if (rc)
1341 goto aead_map_failure;
1342 rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true);
1343 if (rc)
1344 goto aead_map_failure;
1345 rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true);
1346 if (rc)
1347 goto aead_map_failure;
1348 }
1349
1350 /* Mlli support -start building the MLLI according to the above
1351 * results
1352 */
1353 if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1354 areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
1355 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1356 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1357 if (rc)
1358 goto aead_map_failure;
1359
1360 cc_update_aead_mlli_nents(drvdata, req);
1361 dev_dbg(dev, "assoc params mn %d\n",
1362 areq_ctx->assoc.mlli_nents);
1363 dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents);
1364 dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents);
1365 }
1366 return 0;
1367
1368aead_map_failure:
1369 cc_unmap_aead_request(dev, req);
1370 return rc;
1371}
1372
1373int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
1374 struct scatterlist *src, unsigned int nbytes,
1375 bool do_update, gfp_t flags)
1376{
1377 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1378 struct device *dev = drvdata_to_dev(drvdata);
1379 u8 *curr_buff = cc_hash_buf(areq_ctx);
1380 u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1381 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1382 struct buffer_array sg_data;
1383 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1384 int rc = 0;
1385 u32 dummy = 0;
1386 u32 mapped_nents = 0;
1387
1388 dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
1389 curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1390 /* Init the type of the dma buffer */
1391 areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1392 mlli_params->curr_pool = NULL;
1393 sg_data.num_of_buffers = 0;
1394 areq_ctx->in_nents = 0;
1395
1396 if (nbytes == 0 && *curr_buff_cnt == 0) {
1397 /* nothing to do */
1398 return 0;
1399 }
1400
1401 /*TODO: copy data in case that buffer is enough for operation */
1402 /* map the previous buffer */
1403 if (*curr_buff_cnt) {
1404 rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1405 &sg_data);
1406 if (rc)
1407 return rc;
1408 }
1409
1410 if (src && nbytes > 0 && do_update) {
1411 rc = cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
1412 &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
1413 &dummy, &mapped_nents);
1414 if (rc)
1415 goto unmap_curr_buff;
1416 if (src && mapped_nents == 1 &&
1417 areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1418 memcpy(areq_ctx->buff_sg, src,
1419 sizeof(struct scatterlist));
1420 areq_ctx->buff_sg->length = nbytes;
1421 areq_ctx->curr_sg = areq_ctx->buff_sg;
1422 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1423 } else {
1424 areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1425 }
1426 }
1427
1428 /*build mlli */
1429 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1430 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1431 /* add the src data to the sg_data */
1432 cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
1433 0, true, &areq_ctx->mlli_nents);
1434 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1435 if (rc)
1436 goto fail_unmap_din;
1437 }
1438 /* change the buffer index for the unmap function */
1439 areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
1440 dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n",
1441 cc_dma_buf_type(areq_ctx->data_dma_buf_type));
1442 return 0;
1443
1444fail_unmap_din:
1445 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1446
1447unmap_curr_buff:
1448 if (*curr_buff_cnt)
1449 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1450
1451 return rc;
1452}
1453
1454int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
1455 struct scatterlist *src, unsigned int nbytes,
1456 unsigned int block_size, gfp_t flags)
1457{
1458 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1459 struct device *dev = drvdata_to_dev(drvdata);
1460 u8 *curr_buff = cc_hash_buf(areq_ctx);
1461 u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1462 u8 *next_buff = cc_next_buf(areq_ctx);
1463 u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx);
1464 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1465 unsigned int update_data_len;
1466 u32 total_in_len = nbytes + *curr_buff_cnt;
1467 struct buffer_array sg_data;
1468 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1469 unsigned int swap_index = 0;
1470 int rc = 0;
1471 u32 dummy = 0;
1472 u32 mapped_nents = 0;
1473
1474 dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
1475 curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1476 /* Init the type of the dma buffer */
1477 areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1478 mlli_params->curr_pool = NULL;
1479 areq_ctx->curr_sg = NULL;
1480 sg_data.num_of_buffers = 0;
1481 areq_ctx->in_nents = 0;
1482
1483 if (total_in_len < block_size) {
1484 dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
1485 curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
1486 areq_ctx->in_nents =
1487 cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL);
1488 sg_copy_to_buffer(src, areq_ctx->in_nents,
1489 &curr_buff[*curr_buff_cnt], nbytes);
1490 *curr_buff_cnt += nbytes;
1491 return 1;
1492 }
1493
1494 /* Calculate the residue size*/
1495 *next_buff_cnt = total_in_len & (block_size - 1);
1496 /* update data len */
1497 update_data_len = total_in_len - *next_buff_cnt;
1498
1499 dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n",
1500 *next_buff_cnt, update_data_len);
1501
1502 /* Copy the new residue to next buffer */
1503 if (*next_buff_cnt) {
1504 dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
1505 next_buff, (update_data_len - *curr_buff_cnt),
1506 *next_buff_cnt);
1507 cc_copy_sg_portion(dev, next_buff, src,
1508 (update_data_len - *curr_buff_cnt),
1509 nbytes, CC_SG_TO_BUF);
1510 /* change the buffer index for next operation */
1511 swap_index = 1;
1512 }
1513
1514 if (*curr_buff_cnt) {
1515 rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1516 &sg_data);
1517 if (rc)
1518 return rc;
1519 /* change the buffer index for next operation */
1520 swap_index = 1;
1521 }
1522
1523 if (update_data_len > *curr_buff_cnt) {
1524 rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
1525 DMA_TO_DEVICE, &areq_ctx->in_nents,
1526 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
1527 &mapped_nents);
1528 if (rc)
1529 goto unmap_curr_buff;
1530 if (mapped_nents == 1 &&
1531 areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1532 /* only one entry in the SG and no previous data */
1533 memcpy(areq_ctx->buff_sg, src,
1534 sizeof(struct scatterlist));
1535 areq_ctx->buff_sg->length = update_data_len;
1536 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1537 areq_ctx->curr_sg = areq_ctx->buff_sg;
1538 } else {
1539 areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1540 }
1541 }
1542
1543 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1544 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1545 /* add the src data to the sg_data */
1546 cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
1547 (update_data_len - *curr_buff_cnt), 0, true,
1548 &areq_ctx->mlli_nents);
1549 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1550 if (rc)
1551 goto fail_unmap_din;
1552 }
1553 areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
1554
1555 return 0;
1556
1557fail_unmap_din:
1558 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1559
1560unmap_curr_buff:
1561 if (*curr_buff_cnt)
1562 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1563
1564 return rc;
1565}
1566
1567void cc_unmap_hash_request(struct device *dev, void *ctx,
1568 struct scatterlist *src, bool do_revert)
1569{
1570 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1571 u32 *prev_len = cc_next_buf_cnt(areq_ctx);
1572
1573 /*In case a pool was set, a table was
1574 *allocated and should be released
1575 */
1576 if (areq_ctx->mlli_params.curr_pool) {
1577 dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
1578 &areq_ctx->mlli_params.mlli_dma_addr,
1579 areq_ctx->mlli_params.mlli_virt_addr);
1580 dma_pool_free(areq_ctx->mlli_params.curr_pool,
1581 areq_ctx->mlli_params.mlli_virt_addr,
1582 areq_ctx->mlli_params.mlli_dma_addr);
1583 }
1584
1585 if (src && areq_ctx->in_nents) {
1586 dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
1587 sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
1588 dma_unmap_sg(dev, src,
1589 areq_ctx->in_nents, DMA_TO_DEVICE);
1590 }
1591
1592 if (*prev_len) {
1593 dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
1594 sg_virt(areq_ctx->buff_sg),
1595 &sg_dma_address(areq_ctx->buff_sg),
1596 sg_dma_len(areq_ctx->buff_sg));
1597 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1598 if (!do_revert) {
1599 /* clean the previous data length for update
1600 * operation
1601 */
1602 *prev_len = 0;
1603 } else {
1604 areq_ctx->buff_index ^= 1;
1605 }
1606 }
1607}
1608
1609int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
1610{
1611 struct buff_mgr_handle *buff_mgr_handle;
1612 struct device *dev = drvdata_to_dev(drvdata);
1613
1614 buff_mgr_handle = kmalloc(sizeof(*buff_mgr_handle), GFP_KERNEL);
1615 if (!buff_mgr_handle)
1616 return -ENOMEM;
1617
1618 drvdata->buff_mgr_handle = buff_mgr_handle;
1619
1620 buff_mgr_handle->mlli_buffs_pool =
1621 dma_pool_create("dx_single_mlli_tables", dev,
1622 MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
1623 LLI_ENTRY_BYTE_SIZE,
1624 MLLI_TABLE_MIN_ALIGNMENT, 0);
1625
1626 if (!buff_mgr_handle->mlli_buffs_pool)
1627 goto error;
1628
1629 return 0;
1630
1631error:
1632 cc_buffer_mgr_fini(drvdata);
1633 return -ENOMEM;
1634}
1635
1636int cc_buffer_mgr_fini(struct cc_drvdata *drvdata)
1637{
1638 struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
1639
1640 if (buff_mgr_handle) {
1641 dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool);
1642 kfree(drvdata->buff_mgr_handle);
1643 drvdata->buff_mgr_handle = NULL;
1644 }
1645 return 0;
1646}