Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/**
2 * AMCC SoC PPC4xx Crypto Driver
3 *
4 * Copyright (c) 2008 Applied Micro Circuits Corporation.
5 * All rights reserved. James Hsiao <jhsiao@amcc.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * This file implements AMCC crypto offload Linux device driver for use with
18 * Linux CryptoAPI.
19 */
20
21#include <linux/kernel.h>
22#include <linux/interrupt.h>
23#include <linux/spinlock_types.h>
24#include <linux/random.h>
25#include <linux/scatterlist.h>
26#include <linux/crypto.h>
27#include <linux/dma-mapping.h>
28#include <linux/platform_device.h>
29#include <linux/init.h>
30#include <linux/module.h>
31#include <linux/of_address.h>
32#include <linux/of_irq.h>
33#include <linux/of_platform.h>
34#include <linux/slab.h>
35#include <asm/dcr.h>
36#include <asm/dcr-regs.h>
37#include <asm/cacheflush.h>
38#include <crypto/aead.h>
39#include <crypto/aes.h>
40#include <crypto/ctr.h>
41#include <crypto/gcm.h>
42#include <crypto/sha.h>
43#include <crypto/rng.h>
44#include <crypto/scatterwalk.h>
45#include <crypto/skcipher.h>
46#include <crypto/internal/aead.h>
47#include <crypto/internal/rng.h>
48#include <crypto/internal/skcipher.h>
49#include "crypto4xx_reg_def.h"
50#include "crypto4xx_core.h"
51#include "crypto4xx_sa.h"
52#include "crypto4xx_trng.h"
53
54#define PPC4XX_SEC_VERSION_STR "0.5"
55
56/**
57 * PPC4xx Crypto Engine Initialization Routine
58 */
59static void crypto4xx_hw_init(struct crypto4xx_device *dev)
60{
61 union ce_ring_size ring_size;
62 union ce_ring_control ring_ctrl;
63 union ce_part_ring_size part_ring_size;
64 union ce_io_threshold io_threshold;
65 u32 rand_num;
66 union ce_pe_dma_cfg pe_dma_cfg;
67 u32 device_ctrl;
68
69 writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG);
70 /* setup pe dma, include reset sg, pdr and pe, then release reset */
71 pe_dma_cfg.w = 0;
72 pe_dma_cfg.bf.bo_sgpd_en = 1;
73 pe_dma_cfg.bf.bo_data_en = 0;
74 pe_dma_cfg.bf.bo_sa_en = 1;
75 pe_dma_cfg.bf.bo_pd_en = 1;
76 pe_dma_cfg.bf.dynamic_sa_en = 1;
77 pe_dma_cfg.bf.reset_sg = 1;
78 pe_dma_cfg.bf.reset_pdr = 1;
79 pe_dma_cfg.bf.reset_pe = 1;
80 writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
81 /* un reset pe,sg and pdr */
82 pe_dma_cfg.bf.pe_mode = 0;
83 pe_dma_cfg.bf.reset_sg = 0;
84 pe_dma_cfg.bf.reset_pdr = 0;
85 pe_dma_cfg.bf.reset_pe = 0;
86 pe_dma_cfg.bf.bo_td_en = 0;
87 writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
88 writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_PDR_BASE);
89 writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_RDR_BASE);
90 writel(PPC4XX_PRNG_CTRL_AUTO_EN, dev->ce_base + CRYPTO4XX_PRNG_CTRL);
91 get_random_bytes(&rand_num, sizeof(rand_num));
92 writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_L);
93 get_random_bytes(&rand_num, sizeof(rand_num));
94 writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_H);
95 ring_size.w = 0;
96 ring_size.bf.ring_offset = PPC4XX_PD_SIZE;
97 ring_size.bf.ring_size = PPC4XX_NUM_PD;
98 writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE);
99 ring_ctrl.w = 0;
100 writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL);
101 device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
102 device_ctrl |= PPC4XX_DC_3DES_EN;
103 writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
104 writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE);
105 writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE);
106 part_ring_size.w = 0;
107 part_ring_size.bf.sdr_size = PPC4XX_SDR_SIZE;
108 part_ring_size.bf.gdr_size = PPC4XX_GDR_SIZE;
109 writel(part_ring_size.w, dev->ce_base + CRYPTO4XX_PART_RING_SIZE);
110 writel(PPC4XX_SD_BUFFER_SIZE, dev->ce_base + CRYPTO4XX_PART_RING_CFG);
111 io_threshold.w = 0;
112 io_threshold.bf.output_threshold = PPC4XX_OUTPUT_THRESHOLD;
113 io_threshold.bf.input_threshold = PPC4XX_INPUT_THRESHOLD;
114 writel(io_threshold.w, dev->ce_base + CRYPTO4XX_IO_THRESHOLD);
115 writel(0, dev->ce_base + CRYPTO4XX_PDR_BASE_UADDR);
116 writel(0, dev->ce_base + CRYPTO4XX_RDR_BASE_UADDR);
117 writel(0, dev->ce_base + CRYPTO4XX_PKT_SRC_UADDR);
118 writel(0, dev->ce_base + CRYPTO4XX_PKT_DEST_UADDR);
119 writel(0, dev->ce_base + CRYPTO4XX_SA_UADDR);
120 writel(0, dev->ce_base + CRYPTO4XX_GATH_RING_BASE_UADDR);
121 writel(0, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE_UADDR);
122 /* un reset pe,sg and pdr */
123 pe_dma_cfg.bf.pe_mode = 1;
124 pe_dma_cfg.bf.reset_sg = 0;
125 pe_dma_cfg.bf.reset_pdr = 0;
126 pe_dma_cfg.bf.reset_pe = 0;
127 pe_dma_cfg.bf.bo_td_en = 0;
128 writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
129 /*clear all pending interrupt*/
130 writel(PPC4XX_INTERRUPT_CLR, dev->ce_base + CRYPTO4XX_INT_CLR);
131 writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
132 writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
133 writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
134 if (dev->is_revb) {
135 writel(PPC4XX_INT_TIMEOUT_CNT_REVB << 10,
136 dev->ce_base + CRYPTO4XX_INT_TIMEOUT_CNT);
137 writel(PPC4XX_PD_DONE_INT | PPC4XX_TMO_ERR_INT,
138 dev->ce_base + CRYPTO4XX_INT_EN);
139 } else {
140 writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
141 }
142}
143
144int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
145{
146 ctx->sa_in = kcalloc(size, 4, GFP_ATOMIC);
147 if (ctx->sa_in == NULL)
148 return -ENOMEM;
149
150 ctx->sa_out = kcalloc(size, 4, GFP_ATOMIC);
151 if (ctx->sa_out == NULL) {
152 kfree(ctx->sa_in);
153 ctx->sa_in = NULL;
154 return -ENOMEM;
155 }
156
157 ctx->sa_len = size;
158
159 return 0;
160}
161
162void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
163{
164 kfree(ctx->sa_in);
165 ctx->sa_in = NULL;
166 kfree(ctx->sa_out);
167 ctx->sa_out = NULL;
168 ctx->sa_len = 0;
169}
170
171/**
172 * alloc memory for the gather ring
173 * no need to alloc buf for the ring
174 * gdr_tail, gdr_head and gdr_count are initialized by this function
175 */
176static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
177{
178 int i;
179 dev->pdr = dma_alloc_coherent(dev->core_dev->device,
180 sizeof(struct ce_pd) * PPC4XX_NUM_PD,
181 &dev->pdr_pa, GFP_ATOMIC);
182 if (!dev->pdr)
183 return -ENOMEM;
184
185 dev->pdr_uinfo = kcalloc(PPC4XX_NUM_PD, sizeof(struct pd_uinfo),
186 GFP_KERNEL);
187 if (!dev->pdr_uinfo) {
188 dma_free_coherent(dev->core_dev->device,
189 sizeof(struct ce_pd) * PPC4XX_NUM_PD,
190 dev->pdr,
191 dev->pdr_pa);
192 return -ENOMEM;
193 }
194 memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
195 dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
196 sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
197 &dev->shadow_sa_pool_pa,
198 GFP_ATOMIC);
199 if (!dev->shadow_sa_pool)
200 return -ENOMEM;
201
202 dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device,
203 sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
204 &dev->shadow_sr_pool_pa, GFP_ATOMIC);
205 if (!dev->shadow_sr_pool)
206 return -ENOMEM;
207 for (i = 0; i < PPC4XX_NUM_PD; i++) {
208 struct ce_pd *pd = &dev->pdr[i];
209 struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[i];
210
211 pd->sa = dev->shadow_sa_pool_pa +
212 sizeof(union shadow_sa_buf) * i;
213
214 /* alloc 256 bytes which is enough for any kind of dynamic sa */
215 pd_uinfo->sa_va = &dev->shadow_sa_pool[i].sa;
216
217 /* alloc state record */
218 pd_uinfo->sr_va = &dev->shadow_sr_pool[i];
219 pd_uinfo->sr_pa = dev->shadow_sr_pool_pa +
220 sizeof(struct sa_state_record) * i;
221 }
222
223 return 0;
224}
225
226static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
227{
228 if (dev->pdr)
229 dma_free_coherent(dev->core_dev->device,
230 sizeof(struct ce_pd) * PPC4XX_NUM_PD,
231 dev->pdr, dev->pdr_pa);
232
233 if (dev->shadow_sa_pool)
234 dma_free_coherent(dev->core_dev->device,
235 sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
236 dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
237
238 if (dev->shadow_sr_pool)
239 dma_free_coherent(dev->core_dev->device,
240 sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
241 dev->shadow_sr_pool, dev->shadow_sr_pool_pa);
242
243 kfree(dev->pdr_uinfo);
244}
245
246static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev)
247{
248 u32 retval;
249 u32 tmp;
250
251 retval = dev->pdr_head;
252 tmp = (dev->pdr_head + 1) % PPC4XX_NUM_PD;
253
254 if (tmp == dev->pdr_tail)
255 return ERING_WAS_FULL;
256
257 dev->pdr_head = tmp;
258
259 return retval;
260}
261
262static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
263{
264 struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
265 u32 tail;
266 unsigned long flags;
267
268 spin_lock_irqsave(&dev->core_dev->lock, flags);
269 pd_uinfo->state = PD_ENTRY_FREE;
270
271 if (dev->pdr_tail != PPC4XX_LAST_PD)
272 dev->pdr_tail++;
273 else
274 dev->pdr_tail = 0;
275 tail = dev->pdr_tail;
276 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
277
278 return tail;
279}
280
281/**
282 * alloc memory for the gather ring
283 * no need to alloc buf for the ring
284 * gdr_tail, gdr_head and gdr_count are initialized by this function
285 */
286static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
287{
288 dev->gdr = dma_alloc_coherent(dev->core_dev->device,
289 sizeof(struct ce_gd) * PPC4XX_NUM_GD,
290 &dev->gdr_pa, GFP_ATOMIC);
291 if (!dev->gdr)
292 return -ENOMEM;
293
294 return 0;
295}
296
297static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
298{
299 dma_free_coherent(dev->core_dev->device,
300 sizeof(struct ce_gd) * PPC4XX_NUM_GD,
301 dev->gdr, dev->gdr_pa);
302}
303
304/*
305 * when this function is called.
306 * preemption or interrupt must be disabled
307 */
308static u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
309{
310 u32 retval;
311 u32 tmp;
312
313 if (n >= PPC4XX_NUM_GD)
314 return ERING_WAS_FULL;
315
316 retval = dev->gdr_head;
317 tmp = (dev->gdr_head + n) % PPC4XX_NUM_GD;
318 if (dev->gdr_head > dev->gdr_tail) {
319 if (tmp < dev->gdr_head && tmp >= dev->gdr_tail)
320 return ERING_WAS_FULL;
321 } else if (dev->gdr_head < dev->gdr_tail) {
322 if (tmp < dev->gdr_head || tmp >= dev->gdr_tail)
323 return ERING_WAS_FULL;
324 }
325 dev->gdr_head = tmp;
326
327 return retval;
328}
329
330static u32 crypto4xx_put_gd_to_gdr(struct crypto4xx_device *dev)
331{
332 unsigned long flags;
333
334 spin_lock_irqsave(&dev->core_dev->lock, flags);
335 if (dev->gdr_tail == dev->gdr_head) {
336 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
337 return 0;
338 }
339
340 if (dev->gdr_tail != PPC4XX_LAST_GD)
341 dev->gdr_tail++;
342 else
343 dev->gdr_tail = 0;
344
345 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
346
347 return 0;
348}
349
350static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
351 dma_addr_t *gd_dma, u32 idx)
352{
353 *gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx;
354
355 return &dev->gdr[idx];
356}
357
358/**
359 * alloc memory for the scatter ring
360 * need to alloc buf for the ring
361 * sdr_tail, sdr_head and sdr_count are initialized by this function
362 */
363static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
364{
365 int i;
366
367 /* alloc memory for scatter descriptor ring */
368 dev->sdr = dma_alloc_coherent(dev->core_dev->device,
369 sizeof(struct ce_sd) * PPC4XX_NUM_SD,
370 &dev->sdr_pa, GFP_ATOMIC);
371 if (!dev->sdr)
372 return -ENOMEM;
373
374 dev->scatter_buffer_va =
375 dma_alloc_coherent(dev->core_dev->device,
376 PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
377 &dev->scatter_buffer_pa, GFP_ATOMIC);
378 if (!dev->scatter_buffer_va) {
379 dma_free_coherent(dev->core_dev->device,
380 sizeof(struct ce_sd) * PPC4XX_NUM_SD,
381 dev->sdr, dev->sdr_pa);
382 return -ENOMEM;
383 }
384
385 for (i = 0; i < PPC4XX_NUM_SD; i++) {
386 dev->sdr[i].ptr = dev->scatter_buffer_pa +
387 PPC4XX_SD_BUFFER_SIZE * i;
388 }
389
390 return 0;
391}
392
393static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
394{
395 if (dev->sdr)
396 dma_free_coherent(dev->core_dev->device,
397 sizeof(struct ce_sd) * PPC4XX_NUM_SD,
398 dev->sdr, dev->sdr_pa);
399
400 if (dev->scatter_buffer_va)
401 dma_free_coherent(dev->core_dev->device,
402 PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
403 dev->scatter_buffer_va,
404 dev->scatter_buffer_pa);
405}
406
407/*
408 * when this function is called.
409 * preemption or interrupt must be disabled
410 */
411static u32 crypto4xx_get_n_sd(struct crypto4xx_device *dev, int n)
412{
413 u32 retval;
414 u32 tmp;
415
416 if (n >= PPC4XX_NUM_SD)
417 return ERING_WAS_FULL;
418
419 retval = dev->sdr_head;
420 tmp = (dev->sdr_head + n) % PPC4XX_NUM_SD;
421 if (dev->sdr_head > dev->gdr_tail) {
422 if (tmp < dev->sdr_head && tmp >= dev->sdr_tail)
423 return ERING_WAS_FULL;
424 } else if (dev->sdr_head < dev->sdr_tail) {
425 if (tmp < dev->sdr_head || tmp >= dev->sdr_tail)
426 return ERING_WAS_FULL;
427 } /* the head = tail, or empty case is already take cared */
428 dev->sdr_head = tmp;
429
430 return retval;
431}
432
433static u32 crypto4xx_put_sd_to_sdr(struct crypto4xx_device *dev)
434{
435 unsigned long flags;
436
437 spin_lock_irqsave(&dev->core_dev->lock, flags);
438 if (dev->sdr_tail == dev->sdr_head) {
439 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
440 return 0;
441 }
442 if (dev->sdr_tail != PPC4XX_LAST_SD)
443 dev->sdr_tail++;
444 else
445 dev->sdr_tail = 0;
446 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
447
448 return 0;
449}
450
451static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev,
452 dma_addr_t *sd_dma, u32 idx)
453{
454 *sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx;
455
456 return &dev->sdr[idx];
457}
458
459static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
460 struct ce_pd *pd,
461 struct pd_uinfo *pd_uinfo,
462 u32 nbytes,
463 struct scatterlist *dst)
464{
465 unsigned int first_sd = pd_uinfo->first_sd;
466 unsigned int last_sd;
467 unsigned int overflow = 0;
468 unsigned int to_copy;
469 unsigned int dst_start = 0;
470
471 /*
472 * Because the scatter buffers are all neatly organized in one
473 * big continuous ringbuffer; scatterwalk_map_and_copy() can
474 * be instructed to copy a range of buffers in one go.
475 */
476
477 last_sd = (first_sd + pd_uinfo->num_sd);
478 if (last_sd > PPC4XX_LAST_SD) {
479 last_sd = PPC4XX_LAST_SD;
480 overflow = last_sd % PPC4XX_NUM_SD;
481 }
482
483 while (nbytes) {
484 void *buf = dev->scatter_buffer_va +
485 first_sd * PPC4XX_SD_BUFFER_SIZE;
486
487 to_copy = min(nbytes, PPC4XX_SD_BUFFER_SIZE *
488 (1 + last_sd - first_sd));
489 scatterwalk_map_and_copy(buf, dst, dst_start, to_copy, 1);
490 nbytes -= to_copy;
491
492 if (overflow) {
493 first_sd = 0;
494 last_sd = overflow;
495 dst_start += to_copy;
496 overflow = 0;
497 }
498 }
499}
500
501static void crypto4xx_copy_digest_to_dst(void *dst,
502 struct pd_uinfo *pd_uinfo,
503 struct crypto4xx_ctx *ctx)
504{
505 struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
506
507 if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
508 memcpy(dst, pd_uinfo->sr_va->save_digest,
509 SA_HASH_ALG_SHA1_DIGEST_SIZE);
510 }
511}
512
513static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
514 struct pd_uinfo *pd_uinfo)
515{
516 int i;
517 if (pd_uinfo->num_gd) {
518 for (i = 0; i < pd_uinfo->num_gd; i++)
519 crypto4xx_put_gd_to_gdr(dev);
520 pd_uinfo->first_gd = 0xffffffff;
521 pd_uinfo->num_gd = 0;
522 }
523 if (pd_uinfo->num_sd) {
524 for (i = 0; i < pd_uinfo->num_sd; i++)
525 crypto4xx_put_sd_to_sdr(dev);
526
527 pd_uinfo->first_sd = 0xffffffff;
528 pd_uinfo->num_sd = 0;
529 }
530}
531
532static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
533 struct pd_uinfo *pd_uinfo,
534 struct ce_pd *pd)
535{
536 struct skcipher_request *req;
537 struct scatterlist *dst;
538 dma_addr_t addr;
539
540 req = skcipher_request_cast(pd_uinfo->async_req);
541
542 if (pd_uinfo->using_sd) {
543 crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
544 req->cryptlen, req->dst);
545 } else {
546 dst = pd_uinfo->dest_va;
547 addr = dma_map_page(dev->core_dev->device, sg_page(dst),
548 dst->offset, dst->length, DMA_FROM_DEVICE);
549 }
550
551 if (pd_uinfo->sa_va->sa_command_0.bf.save_iv == SA_SAVE_IV) {
552 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
553
554 crypto4xx_memcpy_from_le32((u32 *)req->iv,
555 pd_uinfo->sr_va->save_iv,
556 crypto_skcipher_ivsize(skcipher));
557 }
558
559 crypto4xx_ret_sg_desc(dev, pd_uinfo);
560
561 if (pd_uinfo->state & PD_ENTRY_BUSY)
562 skcipher_request_complete(req, -EINPROGRESS);
563 skcipher_request_complete(req, 0);
564}
565
566static void crypto4xx_ahash_done(struct crypto4xx_device *dev,
567 struct pd_uinfo *pd_uinfo)
568{
569 struct crypto4xx_ctx *ctx;
570 struct ahash_request *ahash_req;
571
572 ahash_req = ahash_request_cast(pd_uinfo->async_req);
573 ctx = crypto_tfm_ctx(ahash_req->base.tfm);
574
575 crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo,
576 crypto_tfm_ctx(ahash_req->base.tfm));
577 crypto4xx_ret_sg_desc(dev, pd_uinfo);
578
579 if (pd_uinfo->state & PD_ENTRY_BUSY)
580 ahash_request_complete(ahash_req, -EINPROGRESS);
581 ahash_request_complete(ahash_req, 0);
582}
583
584static void crypto4xx_aead_done(struct crypto4xx_device *dev,
585 struct pd_uinfo *pd_uinfo,
586 struct ce_pd *pd)
587{
588 struct aead_request *aead_req = container_of(pd_uinfo->async_req,
589 struct aead_request, base);
590 struct scatterlist *dst = pd_uinfo->dest_va;
591 size_t cp_len = crypto_aead_authsize(
592 crypto_aead_reqtfm(aead_req));
593 u32 icv[AES_BLOCK_SIZE];
594 int err = 0;
595
596 if (pd_uinfo->using_sd) {
597 crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
598 pd->pd_ctl_len.bf.pkt_len,
599 dst);
600 } else {
601 dma_unmap_page(dev->core_dev->device, pd->dest, dst->length,
602 DMA_FROM_DEVICE);
603 }
604
605 if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) {
606 /* append icv at the end */
607 crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest,
608 sizeof(icv));
609
610 scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen,
611 cp_len, 1);
612 } else {
613 /* check icv at the end */
614 scatterwalk_map_and_copy(icv, aead_req->src,
615 aead_req->assoclen + aead_req->cryptlen -
616 cp_len, cp_len, 0);
617
618 crypto4xx_memcpy_from_le32(icv, icv, sizeof(icv));
619
620 if (crypto_memneq(icv, pd_uinfo->sr_va->save_digest, cp_len))
621 err = -EBADMSG;
622 }
623
624 crypto4xx_ret_sg_desc(dev, pd_uinfo);
625
626 if (pd->pd_ctl.bf.status & 0xff) {
627 if (!__ratelimit(&dev->aead_ratelimit)) {
628 if (pd->pd_ctl.bf.status & 2)
629 pr_err("pad fail error\n");
630 if (pd->pd_ctl.bf.status & 4)
631 pr_err("seqnum fail\n");
632 if (pd->pd_ctl.bf.status & 8)
633 pr_err("error _notify\n");
634 pr_err("aead return err status = 0x%02x\n",
635 pd->pd_ctl.bf.status & 0xff);
636 pr_err("pd pad_ctl = 0x%08x\n",
637 pd->pd_ctl.bf.pd_pad_ctl);
638 }
639 err = -EINVAL;
640 }
641
642 if (pd_uinfo->state & PD_ENTRY_BUSY)
643 aead_request_complete(aead_req, -EINPROGRESS);
644
645 aead_request_complete(aead_req, err);
646}
647
648static void crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
649{
650 struct ce_pd *pd = &dev->pdr[idx];
651 struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
652
653 switch (crypto_tfm_alg_type(pd_uinfo->async_req->tfm)) {
654 case CRYPTO_ALG_TYPE_SKCIPHER:
655 crypto4xx_cipher_done(dev, pd_uinfo, pd);
656 break;
657 case CRYPTO_ALG_TYPE_AEAD:
658 crypto4xx_aead_done(dev, pd_uinfo, pd);
659 break;
660 case CRYPTO_ALG_TYPE_AHASH:
661 crypto4xx_ahash_done(dev, pd_uinfo);
662 break;
663 }
664}
665
666static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
667{
668 crypto4xx_destroy_pdr(core_dev->dev);
669 crypto4xx_destroy_gdr(core_dev->dev);
670 crypto4xx_destroy_sdr(core_dev->dev);
671 iounmap(core_dev->dev->ce_base);
672 kfree(core_dev->dev);
673 kfree(core_dev);
674}
675
676static u32 get_next_gd(u32 current)
677{
678 if (current != PPC4XX_LAST_GD)
679 return current + 1;
680 else
681 return 0;
682}
683
684static u32 get_next_sd(u32 current)
685{
686 if (current != PPC4XX_LAST_SD)
687 return current + 1;
688 else
689 return 0;
690}
691
692int crypto4xx_build_pd(struct crypto_async_request *req,
693 struct crypto4xx_ctx *ctx,
694 struct scatterlist *src,
695 struct scatterlist *dst,
696 const unsigned int datalen,
697 const __le32 *iv, const u32 iv_len,
698 const struct dynamic_sa_ctl *req_sa,
699 const unsigned int sa_len,
700 const unsigned int assoclen,
701 struct scatterlist *_dst)
702{
703 struct crypto4xx_device *dev = ctx->dev;
704 struct dynamic_sa_ctl *sa;
705 struct ce_gd *gd;
706 struct ce_pd *pd;
707 u32 num_gd, num_sd;
708 u32 fst_gd = 0xffffffff;
709 u32 fst_sd = 0xffffffff;
710 u32 pd_entry;
711 unsigned long flags;
712 struct pd_uinfo *pd_uinfo;
713 unsigned int nbytes = datalen;
714 size_t offset_to_sr_ptr;
715 u32 gd_idx = 0;
716 int tmp;
717 bool is_busy;
718
719 /* figure how many gd are needed */
720 tmp = sg_nents_for_len(src, assoclen + datalen);
721 if (tmp < 0) {
722 dev_err(dev->core_dev->device, "Invalid number of src SG.\n");
723 return tmp;
724 }
725 if (tmp == 1)
726 tmp = 0;
727 num_gd = tmp;
728
729 if (assoclen) {
730 nbytes += assoclen;
731 dst = scatterwalk_ffwd(_dst, dst, assoclen);
732 }
733
734 /* figure how many sd are needed */
735 if (sg_is_last(dst)) {
736 num_sd = 0;
737 } else {
738 if (datalen > PPC4XX_SD_BUFFER_SIZE) {
739 num_sd = datalen / PPC4XX_SD_BUFFER_SIZE;
740 if (datalen % PPC4XX_SD_BUFFER_SIZE)
741 num_sd++;
742 } else {
743 num_sd = 1;
744 }
745 }
746
747 /*
748 * The follow section of code needs to be protected
749 * The gather ring and scatter ring needs to be consecutive
750 * In case of run out of any kind of descriptor, the descriptor
751 * already got must be return the original place.
752 */
753 spin_lock_irqsave(&dev->core_dev->lock, flags);
754 /*
755 * Let the caller know to slow down, once more than 13/16ths = 81%
756 * of the available data contexts are being used simultaneously.
757 *
758 * With PPC4XX_NUM_PD = 256, this will leave a "backlog queue" for
759 * 31 more contexts. Before new requests have to be rejected.
760 */
761 if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
762 is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
763 ((PPC4XX_NUM_PD * 13) / 16);
764 } else {
765 /*
766 * To fix contention issues between ipsec (no blacklog) and
767 * dm-crypto (backlog) reserve 32 entries for "no backlog"
768 * data contexts.
769 */
770 is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
771 ((PPC4XX_NUM_PD * 15) / 16);
772
773 if (is_busy) {
774 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
775 return -EBUSY;
776 }
777 }
778
779 if (num_gd) {
780 fst_gd = crypto4xx_get_n_gd(dev, num_gd);
781 if (fst_gd == ERING_WAS_FULL) {
782 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
783 return -EAGAIN;
784 }
785 }
786 if (num_sd) {
787 fst_sd = crypto4xx_get_n_sd(dev, num_sd);
788 if (fst_sd == ERING_WAS_FULL) {
789 if (num_gd)
790 dev->gdr_head = fst_gd;
791 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
792 return -EAGAIN;
793 }
794 }
795 pd_entry = crypto4xx_get_pd_from_pdr_nolock(dev);
796 if (pd_entry == ERING_WAS_FULL) {
797 if (num_gd)
798 dev->gdr_head = fst_gd;
799 if (num_sd)
800 dev->sdr_head = fst_sd;
801 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
802 return -EAGAIN;
803 }
804 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
805
806 pd = &dev->pdr[pd_entry];
807 pd->sa_len = sa_len;
808
809 pd_uinfo = &dev->pdr_uinfo[pd_entry];
810 pd_uinfo->async_req = req;
811 pd_uinfo->num_gd = num_gd;
812 pd_uinfo->num_sd = num_sd;
813
814 if (iv_len)
815 memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len);
816
817 sa = pd_uinfo->sa_va;
818 memcpy(sa, req_sa, sa_len * 4);
819
820 sa->sa_command_1.bf.hash_crypto_offset = (assoclen >> 2);
821 offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(sa);
822 *(u32 *)((unsigned long)sa + offset_to_sr_ptr) = pd_uinfo->sr_pa;
823
824 if (num_gd) {
825 dma_addr_t gd_dma;
826 struct scatterlist *sg;
827
828 /* get first gd we are going to use */
829 gd_idx = fst_gd;
830 pd_uinfo->first_gd = fst_gd;
831 pd_uinfo->num_gd = num_gd;
832 gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
833 pd->src = gd_dma;
834 /* enable gather */
835 sa->sa_command_0.bf.gather = 1;
836 /* walk the sg, and setup gather array */
837
838 sg = src;
839 while (nbytes) {
840 size_t len;
841
842 len = min(sg->length, nbytes);
843 gd->ptr = dma_map_page(dev->core_dev->device,
844 sg_page(sg), sg->offset, len, DMA_TO_DEVICE);
845 gd->ctl_len.len = len;
846 gd->ctl_len.done = 0;
847 gd->ctl_len.ready = 1;
848 if (len >= nbytes)
849 break;
850
851 nbytes -= sg->length;
852 gd_idx = get_next_gd(gd_idx);
853 gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
854 sg = sg_next(sg);
855 }
856 } else {
857 pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src),
858 src->offset, min(nbytes, src->length),
859 DMA_TO_DEVICE);
860 /*
861 * Disable gather in sa command
862 */
863 sa->sa_command_0.bf.gather = 0;
864 /*
865 * Indicate gather array is not used
866 */
867 pd_uinfo->first_gd = 0xffffffff;
868 pd_uinfo->num_gd = 0;
869 }
870 if (sg_is_last(dst)) {
871 /*
872 * we know application give us dst a whole piece of memory
873 * no need to use scatter ring.
874 */
875 pd_uinfo->using_sd = 0;
876 pd_uinfo->first_sd = 0xffffffff;
877 pd_uinfo->num_sd = 0;
878 pd_uinfo->dest_va = dst;
879 sa->sa_command_0.bf.scatter = 0;
880 pd->dest = (u32)dma_map_page(dev->core_dev->device,
881 sg_page(dst), dst->offset,
882 min(datalen, dst->length),
883 DMA_TO_DEVICE);
884 } else {
885 dma_addr_t sd_dma;
886 struct ce_sd *sd = NULL;
887
888 u32 sd_idx = fst_sd;
889 nbytes = datalen;
890 sa->sa_command_0.bf.scatter = 1;
891 pd_uinfo->using_sd = 1;
892 pd_uinfo->dest_va = dst;
893 pd_uinfo->first_sd = fst_sd;
894 pd_uinfo->num_sd = num_sd;
895 sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
896 pd->dest = sd_dma;
897 /* setup scatter descriptor */
898 sd->ctl.done = 0;
899 sd->ctl.rdy = 1;
900 /* sd->ptr should be setup by sd_init routine*/
901 if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
902 nbytes -= PPC4XX_SD_BUFFER_SIZE;
903 else
904 nbytes = 0;
905 while (nbytes) {
906 sd_idx = get_next_sd(sd_idx);
907 sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
908 /* setup scatter descriptor */
909 sd->ctl.done = 0;
910 sd->ctl.rdy = 1;
911 if (nbytes >= PPC4XX_SD_BUFFER_SIZE) {
912 nbytes -= PPC4XX_SD_BUFFER_SIZE;
913 } else {
914 /*
915 * SD entry can hold PPC4XX_SD_BUFFER_SIZE,
916 * which is more than nbytes, so done.
917 */
918 nbytes = 0;
919 }
920 }
921 }
922
923 pd->pd_ctl.w = PD_CTL_HOST_READY |
924 ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) |
925 (crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ?
926 PD_CTL_HASH_FINAL : 0);
927 pd->pd_ctl_len.w = 0x00400000 | (assoclen + datalen);
928 pd_uinfo->state = PD_ENTRY_INUSE | (is_busy ? PD_ENTRY_BUSY : 0);
929
930 wmb();
931 /* write any value to push engine to read a pd */
932 writel(0, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
933 writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
934 return is_busy ? -EBUSY : -EINPROGRESS;
935}
936
937/**
938 * Algorithm Registration Functions
939 */
940static void crypto4xx_ctx_init(struct crypto4xx_alg *amcc_alg,
941 struct crypto4xx_ctx *ctx)
942{
943 ctx->dev = amcc_alg->dev;
944 ctx->sa_in = NULL;
945 ctx->sa_out = NULL;
946 ctx->sa_len = 0;
947}
948
949static int crypto4xx_sk_init(struct crypto_skcipher *sk)
950{
951 struct skcipher_alg *alg = crypto_skcipher_alg(sk);
952 struct crypto4xx_alg *amcc_alg;
953 struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(sk);
954
955 if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
956 ctx->sw_cipher.cipher =
957 crypto_alloc_skcipher(alg->base.cra_name, 0,
958 CRYPTO_ALG_NEED_FALLBACK |
959 CRYPTO_ALG_ASYNC);
960 if (IS_ERR(ctx->sw_cipher.cipher))
961 return PTR_ERR(ctx->sw_cipher.cipher);
962
963 crypto_skcipher_set_reqsize(sk,
964 sizeof(struct skcipher_request) + 32 +
965 crypto_skcipher_reqsize(ctx->sw_cipher.cipher));
966 }
967
968 amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.cipher);
969 crypto4xx_ctx_init(amcc_alg, ctx);
970 return 0;
971}
972
973static void crypto4xx_common_exit(struct crypto4xx_ctx *ctx)
974{
975 crypto4xx_free_sa(ctx);
976}
977
978static void crypto4xx_sk_exit(struct crypto_skcipher *sk)
979{
980 struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(sk);
981
982 crypto4xx_common_exit(ctx);
983 if (ctx->sw_cipher.cipher)
984 crypto_free_skcipher(ctx->sw_cipher.cipher);
985}
986
987static int crypto4xx_aead_init(struct crypto_aead *tfm)
988{
989 struct aead_alg *alg = crypto_aead_alg(tfm);
990 struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
991 struct crypto4xx_alg *amcc_alg;
992
993 ctx->sw_cipher.aead = crypto_alloc_aead(alg->base.cra_name, 0,
994 CRYPTO_ALG_NEED_FALLBACK |
995 CRYPTO_ALG_ASYNC);
996 if (IS_ERR(ctx->sw_cipher.aead))
997 return PTR_ERR(ctx->sw_cipher.aead);
998
999 amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.aead);
1000 crypto4xx_ctx_init(amcc_alg, ctx);
1001 crypto_aead_set_reqsize(tfm, max(sizeof(struct aead_request) + 32 +
1002 crypto_aead_reqsize(ctx->sw_cipher.aead),
1003 sizeof(struct crypto4xx_aead_reqctx)));
1004 return 0;
1005}
1006
1007static void crypto4xx_aead_exit(struct crypto_aead *tfm)
1008{
1009 struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
1010
1011 crypto4xx_common_exit(ctx);
1012 crypto_free_aead(ctx->sw_cipher.aead);
1013}
1014
1015static int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
1016 struct crypto4xx_alg_common *crypto_alg,
1017 int array_size)
1018{
1019 struct crypto4xx_alg *alg;
1020 int i;
1021 int rc = 0;
1022
1023 for (i = 0; i < array_size; i++) {
1024 alg = kzalloc(sizeof(struct crypto4xx_alg), GFP_KERNEL);
1025 if (!alg)
1026 return -ENOMEM;
1027
1028 alg->alg = crypto_alg[i];
1029 alg->dev = sec_dev;
1030
1031 switch (alg->alg.type) {
1032 case CRYPTO_ALG_TYPE_AEAD:
1033 rc = crypto_register_aead(&alg->alg.u.aead);
1034 break;
1035
1036 case CRYPTO_ALG_TYPE_AHASH:
1037 rc = crypto_register_ahash(&alg->alg.u.hash);
1038 break;
1039
1040 case CRYPTO_ALG_TYPE_RNG:
1041 rc = crypto_register_rng(&alg->alg.u.rng);
1042 break;
1043
1044 default:
1045 rc = crypto_register_skcipher(&alg->alg.u.cipher);
1046 break;
1047 }
1048
1049 if (rc)
1050 kfree(alg);
1051 else
1052 list_add_tail(&alg->entry, &sec_dev->alg_list);
1053 }
1054
1055 return 0;
1056}
1057
1058static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
1059{
1060 struct crypto4xx_alg *alg, *tmp;
1061
1062 list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
1063 list_del(&alg->entry);
1064 switch (alg->alg.type) {
1065 case CRYPTO_ALG_TYPE_AHASH:
1066 crypto_unregister_ahash(&alg->alg.u.hash);
1067 break;
1068
1069 case CRYPTO_ALG_TYPE_AEAD:
1070 crypto_unregister_aead(&alg->alg.u.aead);
1071 break;
1072
1073 case CRYPTO_ALG_TYPE_RNG:
1074 crypto_unregister_rng(&alg->alg.u.rng);
1075 break;
1076
1077 default:
1078 crypto_unregister_skcipher(&alg->alg.u.cipher);
1079 }
1080 kfree(alg);
1081 }
1082}
1083
1084static void crypto4xx_bh_tasklet_cb(unsigned long data)
1085{
1086 struct device *dev = (struct device *)data;
1087 struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1088 struct pd_uinfo *pd_uinfo;
1089 struct ce_pd *pd;
1090 u32 tail = core_dev->dev->pdr_tail;
1091 u32 head = core_dev->dev->pdr_head;
1092
1093 do {
1094 pd_uinfo = &core_dev->dev->pdr_uinfo[tail];
1095 pd = &core_dev->dev->pdr[tail];
1096 if ((pd_uinfo->state & PD_ENTRY_INUSE) &&
1097 ((READ_ONCE(pd->pd_ctl.w) &
1098 (PD_CTL_PE_DONE | PD_CTL_HOST_READY)) ==
1099 PD_CTL_PE_DONE)) {
1100 crypto4xx_pd_done(core_dev->dev, tail);
1101 tail = crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
1102 } else {
1103 /* if tail not done, break */
1104 break;
1105 }
1106 } while (head != tail);
1107}
1108
1109/**
1110 * Top Half of isr.
1111 */
1112static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data,
1113 u32 clr_val)
1114{
1115 struct device *dev = (struct device *)data;
1116 struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1117
1118 writel(clr_val, core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
1119 tasklet_schedule(&core_dev->tasklet);
1120
1121 return IRQ_HANDLED;
1122}
1123
1124static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
1125{
1126 return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR);
1127}
1128
1129static irqreturn_t crypto4xx_ce_interrupt_handler_revb(int irq, void *data)
1130{
1131 return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR |
1132 PPC4XX_TMO_ERR_INT);
1133}
1134
1135static int ppc4xx_prng_data_read(struct crypto4xx_device *dev,
1136 u8 *data, unsigned int max)
1137{
1138 unsigned int i, curr = 0;
1139 u32 val[2];
1140
1141 do {
1142 /* trigger PRN generation */
1143 writel(PPC4XX_PRNG_CTRL_AUTO_EN,
1144 dev->ce_base + CRYPTO4XX_PRNG_CTRL);
1145
1146 for (i = 0; i < 1024; i++) {
1147 /* usually 19 iterations are enough */
1148 if ((readl(dev->ce_base + CRYPTO4XX_PRNG_STAT) &
1149 CRYPTO4XX_PRNG_STAT_BUSY))
1150 continue;
1151
1152 val[0] = readl_be(dev->ce_base + CRYPTO4XX_PRNG_RES_0);
1153 val[1] = readl_be(dev->ce_base + CRYPTO4XX_PRNG_RES_1);
1154 break;
1155 }
1156 if (i == 1024)
1157 return -ETIMEDOUT;
1158
1159 if ((max - curr) >= 8) {
1160 memcpy(data, &val, 8);
1161 data += 8;
1162 curr += 8;
1163 } else {
1164 /* copy only remaining bytes */
1165 memcpy(data, &val, max - curr);
1166 break;
1167 }
1168 } while (curr < max);
1169
1170 return curr;
1171}
1172
1173static int crypto4xx_prng_generate(struct crypto_rng *tfm,
1174 const u8 *src, unsigned int slen,
1175 u8 *dstn, unsigned int dlen)
1176{
1177 struct rng_alg *alg = crypto_rng_alg(tfm);
1178 struct crypto4xx_alg *amcc_alg;
1179 struct crypto4xx_device *dev;
1180 int ret;
1181
1182 amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.rng);
1183 dev = amcc_alg->dev;
1184
1185 mutex_lock(&dev->core_dev->rng_lock);
1186 ret = ppc4xx_prng_data_read(dev, dstn, dlen);
1187 mutex_unlock(&dev->core_dev->rng_lock);
1188 return ret;
1189}
1190
1191
1192static int crypto4xx_prng_seed(struct crypto_rng *tfm, const u8 *seed,
1193 unsigned int slen)
1194{
1195 return 0;
1196}
1197
1198/**
1199 * Supported Crypto Algorithms
1200 */
1201static struct crypto4xx_alg_common crypto4xx_alg[] = {
1202 /* Crypto AES modes */
1203 { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1204 .base = {
1205 .cra_name = "cbc(aes)",
1206 .cra_driver_name = "cbc-aes-ppc4xx",
1207 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1208 .cra_flags = CRYPTO_ALG_ASYNC |
1209 CRYPTO_ALG_KERN_DRIVER_ONLY,
1210 .cra_blocksize = AES_BLOCK_SIZE,
1211 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1212 .cra_module = THIS_MODULE,
1213 },
1214 .min_keysize = AES_MIN_KEY_SIZE,
1215 .max_keysize = AES_MAX_KEY_SIZE,
1216 .ivsize = AES_IV_SIZE,
1217 .setkey = crypto4xx_setkey_aes_cbc,
1218 .encrypt = crypto4xx_encrypt_iv,
1219 .decrypt = crypto4xx_decrypt_iv,
1220 .init = crypto4xx_sk_init,
1221 .exit = crypto4xx_sk_exit,
1222 } },
1223 { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1224 .base = {
1225 .cra_name = "cfb(aes)",
1226 .cra_driver_name = "cfb-aes-ppc4xx",
1227 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1228 .cra_flags = CRYPTO_ALG_ASYNC |
1229 CRYPTO_ALG_KERN_DRIVER_ONLY,
1230 .cra_blocksize = AES_BLOCK_SIZE,
1231 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1232 .cra_module = THIS_MODULE,
1233 },
1234 .min_keysize = AES_MIN_KEY_SIZE,
1235 .max_keysize = AES_MAX_KEY_SIZE,
1236 .ivsize = AES_IV_SIZE,
1237 .setkey = crypto4xx_setkey_aes_cfb,
1238 .encrypt = crypto4xx_encrypt_iv,
1239 .decrypt = crypto4xx_decrypt_iv,
1240 .init = crypto4xx_sk_init,
1241 .exit = crypto4xx_sk_exit,
1242 } },
1243 { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1244 .base = {
1245 .cra_name = "ctr(aes)",
1246 .cra_driver_name = "ctr-aes-ppc4xx",
1247 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1248 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
1249 CRYPTO_ALG_ASYNC |
1250 CRYPTO_ALG_KERN_DRIVER_ONLY,
1251 .cra_blocksize = AES_BLOCK_SIZE,
1252 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1253 .cra_module = THIS_MODULE,
1254 },
1255 .min_keysize = AES_MIN_KEY_SIZE,
1256 .max_keysize = AES_MAX_KEY_SIZE,
1257 .ivsize = AES_IV_SIZE,
1258 .setkey = crypto4xx_setkey_aes_ctr,
1259 .encrypt = crypto4xx_encrypt_ctr,
1260 .decrypt = crypto4xx_decrypt_ctr,
1261 .init = crypto4xx_sk_init,
1262 .exit = crypto4xx_sk_exit,
1263 } },
1264 { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1265 .base = {
1266 .cra_name = "rfc3686(ctr(aes))",
1267 .cra_driver_name = "rfc3686-ctr-aes-ppc4xx",
1268 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1269 .cra_flags = CRYPTO_ALG_ASYNC |
1270 CRYPTO_ALG_KERN_DRIVER_ONLY,
1271 .cra_blocksize = AES_BLOCK_SIZE,
1272 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1273 .cra_module = THIS_MODULE,
1274 },
1275 .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
1276 .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
1277 .ivsize = CTR_RFC3686_IV_SIZE,
1278 .setkey = crypto4xx_setkey_rfc3686,
1279 .encrypt = crypto4xx_rfc3686_encrypt,
1280 .decrypt = crypto4xx_rfc3686_decrypt,
1281 .init = crypto4xx_sk_init,
1282 .exit = crypto4xx_sk_exit,
1283 } },
1284 { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1285 .base = {
1286 .cra_name = "ecb(aes)",
1287 .cra_driver_name = "ecb-aes-ppc4xx",
1288 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1289 .cra_flags = CRYPTO_ALG_ASYNC |
1290 CRYPTO_ALG_KERN_DRIVER_ONLY,
1291 .cra_blocksize = AES_BLOCK_SIZE,
1292 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1293 .cra_module = THIS_MODULE,
1294 },
1295 .min_keysize = AES_MIN_KEY_SIZE,
1296 .max_keysize = AES_MAX_KEY_SIZE,
1297 .setkey = crypto4xx_setkey_aes_ecb,
1298 .encrypt = crypto4xx_encrypt_noiv,
1299 .decrypt = crypto4xx_decrypt_noiv,
1300 .init = crypto4xx_sk_init,
1301 .exit = crypto4xx_sk_exit,
1302 } },
1303 { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1304 .base = {
1305 .cra_name = "ofb(aes)",
1306 .cra_driver_name = "ofb-aes-ppc4xx",
1307 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1308 .cra_flags = CRYPTO_ALG_ASYNC |
1309 CRYPTO_ALG_KERN_DRIVER_ONLY,
1310 .cra_blocksize = AES_BLOCK_SIZE,
1311 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1312 .cra_module = THIS_MODULE,
1313 },
1314 .min_keysize = AES_MIN_KEY_SIZE,
1315 .max_keysize = AES_MAX_KEY_SIZE,
1316 .ivsize = AES_IV_SIZE,
1317 .setkey = crypto4xx_setkey_aes_ofb,
1318 .encrypt = crypto4xx_encrypt_iv,
1319 .decrypt = crypto4xx_decrypt_iv,
1320 .init = crypto4xx_sk_init,
1321 .exit = crypto4xx_sk_exit,
1322 } },
1323
1324 /* AEAD */
1325 { .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
1326 .setkey = crypto4xx_setkey_aes_ccm,
1327 .setauthsize = crypto4xx_setauthsize_aead,
1328 .encrypt = crypto4xx_encrypt_aes_ccm,
1329 .decrypt = crypto4xx_decrypt_aes_ccm,
1330 .init = crypto4xx_aead_init,
1331 .exit = crypto4xx_aead_exit,
1332 .ivsize = AES_BLOCK_SIZE,
1333 .maxauthsize = 16,
1334 .base = {
1335 .cra_name = "ccm(aes)",
1336 .cra_driver_name = "ccm-aes-ppc4xx",
1337 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1338 .cra_flags = CRYPTO_ALG_ASYNC |
1339 CRYPTO_ALG_NEED_FALLBACK |
1340 CRYPTO_ALG_KERN_DRIVER_ONLY,
1341 .cra_blocksize = 1,
1342 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1343 .cra_module = THIS_MODULE,
1344 },
1345 } },
1346 { .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
1347 .setkey = crypto4xx_setkey_aes_gcm,
1348 .setauthsize = crypto4xx_setauthsize_aead,
1349 .encrypt = crypto4xx_encrypt_aes_gcm,
1350 .decrypt = crypto4xx_decrypt_aes_gcm,
1351 .init = crypto4xx_aead_init,
1352 .exit = crypto4xx_aead_exit,
1353 .ivsize = GCM_AES_IV_SIZE,
1354 .maxauthsize = 16,
1355 .base = {
1356 .cra_name = "gcm(aes)",
1357 .cra_driver_name = "gcm-aes-ppc4xx",
1358 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1359 .cra_flags = CRYPTO_ALG_ASYNC |
1360 CRYPTO_ALG_NEED_FALLBACK |
1361 CRYPTO_ALG_KERN_DRIVER_ONLY,
1362 .cra_blocksize = 1,
1363 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1364 .cra_module = THIS_MODULE,
1365 },
1366 } },
1367 { .type = CRYPTO_ALG_TYPE_RNG, .u.rng = {
1368 .base = {
1369 .cra_name = "stdrng",
1370 .cra_driver_name = "crypto4xx_rng",
1371 .cra_priority = 300,
1372 .cra_ctxsize = 0,
1373 .cra_module = THIS_MODULE,
1374 },
1375 .generate = crypto4xx_prng_generate,
1376 .seed = crypto4xx_prng_seed,
1377 .seedsize = 0,
1378 } },
1379};
1380
1381/**
1382 * Module Initialization Routine
1383 */
1384static int crypto4xx_probe(struct platform_device *ofdev)
1385{
1386 int rc;
1387 struct resource res;
1388 struct device *dev = &ofdev->dev;
1389 struct crypto4xx_core_device *core_dev;
1390 u32 pvr;
1391 bool is_revb = true;
1392
1393 rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
1394 if (rc)
1395 return -ENODEV;
1396
1397 if (of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto")) {
1398 mtdcri(SDR0, PPC460EX_SDR0_SRST,
1399 mfdcri(SDR0, PPC460EX_SDR0_SRST) | PPC460EX_CE_RESET);
1400 mtdcri(SDR0, PPC460EX_SDR0_SRST,
1401 mfdcri(SDR0, PPC460EX_SDR0_SRST) & ~PPC460EX_CE_RESET);
1402 } else if (of_find_compatible_node(NULL, NULL,
1403 "amcc,ppc405ex-crypto")) {
1404 mtdcri(SDR0, PPC405EX_SDR0_SRST,
1405 mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
1406 mtdcri(SDR0, PPC405EX_SDR0_SRST,
1407 mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
1408 is_revb = false;
1409 } else if (of_find_compatible_node(NULL, NULL,
1410 "amcc,ppc460sx-crypto")) {
1411 mtdcri(SDR0, PPC460SX_SDR0_SRST,
1412 mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET);
1413 mtdcri(SDR0, PPC460SX_SDR0_SRST,
1414 mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET);
1415 } else {
1416 printk(KERN_ERR "Crypto Function Not supported!\n");
1417 return -EINVAL;
1418 }
1419
1420 core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL);
1421 if (!core_dev)
1422 return -ENOMEM;
1423
1424 dev_set_drvdata(dev, core_dev);
1425 core_dev->ofdev = ofdev;
1426 core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL);
1427 rc = -ENOMEM;
1428 if (!core_dev->dev)
1429 goto err_alloc_dev;
1430
1431 /*
1432 * Older version of 460EX/GT have a hardware bug.
1433 * Hence they do not support H/W based security intr coalescing
1434 */
1435 pvr = mfspr(SPRN_PVR);
1436 if (is_revb && ((pvr >> 4) == 0x130218A)) {
1437 u32 min = PVR_MIN(pvr);
1438
1439 if (min < 4) {
1440 dev_info(dev, "RevA detected - disable interrupt coalescing\n");
1441 is_revb = false;
1442 }
1443 }
1444
1445 core_dev->dev->core_dev = core_dev;
1446 core_dev->dev->is_revb = is_revb;
1447 core_dev->device = dev;
1448 mutex_init(&core_dev->rng_lock);
1449 spin_lock_init(&core_dev->lock);
1450 INIT_LIST_HEAD(&core_dev->dev->alg_list);
1451 ratelimit_default_init(&core_dev->dev->aead_ratelimit);
1452 rc = crypto4xx_build_pdr(core_dev->dev);
1453 if (rc)
1454 goto err_build_pdr;
1455
1456 rc = crypto4xx_build_gdr(core_dev->dev);
1457 if (rc)
1458 goto err_build_pdr;
1459
1460 rc = crypto4xx_build_sdr(core_dev->dev);
1461 if (rc)
1462 goto err_build_sdr;
1463
1464 /* Init tasklet for bottom half processing */
1465 tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
1466 (unsigned long) dev);
1467
1468 core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
1469 if (!core_dev->dev->ce_base) {
1470 dev_err(dev, "failed to of_iomap\n");
1471 rc = -ENOMEM;
1472 goto err_iomap;
1473 }
1474
1475 /* Register for Crypto isr, Crypto Engine IRQ */
1476 core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
1477 rc = request_irq(core_dev->irq, is_revb ?
1478 crypto4xx_ce_interrupt_handler_revb :
1479 crypto4xx_ce_interrupt_handler, 0,
1480 KBUILD_MODNAME, dev);
1481 if (rc)
1482 goto err_request_irq;
1483
1484 /* need to setup pdr, rdr, gdr and sdr before this */
1485 crypto4xx_hw_init(core_dev->dev);
1486
1487 /* Register security algorithms with Linux CryptoAPI */
1488 rc = crypto4xx_register_alg(core_dev->dev, crypto4xx_alg,
1489 ARRAY_SIZE(crypto4xx_alg));
1490 if (rc)
1491 goto err_start_dev;
1492
1493 ppc4xx_trng_probe(core_dev);
1494 return 0;
1495
1496err_start_dev:
1497 free_irq(core_dev->irq, dev);
1498err_request_irq:
1499 irq_dispose_mapping(core_dev->irq);
1500 iounmap(core_dev->dev->ce_base);
1501err_iomap:
1502 tasklet_kill(&core_dev->tasklet);
1503err_build_sdr:
1504 crypto4xx_destroy_sdr(core_dev->dev);
1505 crypto4xx_destroy_gdr(core_dev->dev);
1506err_build_pdr:
1507 crypto4xx_destroy_pdr(core_dev->dev);
1508 kfree(core_dev->dev);
1509err_alloc_dev:
1510 kfree(core_dev);
1511
1512 return rc;
1513}
1514
1515static int crypto4xx_remove(struct platform_device *ofdev)
1516{
1517 struct device *dev = &ofdev->dev;
1518 struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1519
1520 ppc4xx_trng_remove(core_dev);
1521
1522 free_irq(core_dev->irq, dev);
1523 irq_dispose_mapping(core_dev->irq);
1524
1525 tasklet_kill(&core_dev->tasklet);
1526 /* Un-register with Linux CryptoAPI */
1527 crypto4xx_unregister_alg(core_dev->dev);
1528 mutex_destroy(&core_dev->rng_lock);
1529 /* Free all allocated memory */
1530 crypto4xx_stop_all(core_dev);
1531
1532 return 0;
1533}
1534
1535static const struct of_device_id crypto4xx_match[] = {
1536 { .compatible = "amcc,ppc4xx-crypto",},
1537 { },
1538};
1539MODULE_DEVICE_TABLE(of, crypto4xx_match);
1540
1541static struct platform_driver crypto4xx_driver = {
1542 .driver = {
1543 .name = KBUILD_MODNAME,
1544 .of_match_table = crypto4xx_match,
1545 },
1546 .probe = crypto4xx_probe,
1547 .remove = crypto4xx_remove,
1548};
1549
1550module_platform_driver(crypto4xx_driver);
1551
1552MODULE_LICENSE("GPL");
1553MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");
1554MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator");