Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0+
2/* * CAAM control-plane driver backend
3 * Controller-level driver, kernel property detection, initialization
4 *
5 * Copyright 2008-2012 Freescale Semiconductor, Inc.
6 * Copyright 2018 NXP
7 */
8
9#include <linux/device.h>
10#include <linux/of_address.h>
11#include <linux/of_irq.h>
12#include <linux/sys_soc.h>
13
14#include "compat.h"
15#include "regs.h"
16#include "intern.h"
17#include "jr.h"
18#include "desc_constr.h"
19#include "ctrl.h"
20
21bool caam_little_end;
22EXPORT_SYMBOL(caam_little_end);
23bool caam_dpaa2;
24EXPORT_SYMBOL(caam_dpaa2);
25bool caam_imx;
26EXPORT_SYMBOL(caam_imx);
27
28#ifdef CONFIG_CAAM_QI
29#include "qi.h"
30#endif
31
32/*
33 * i.MX targets tend to have clock control subsystems that can
34 * enable/disable clocking to our device.
35 */
36static inline struct clk *caam_drv_identify_clk(struct device *dev,
37 char *clk_name)
38{
39 return caam_imx ? devm_clk_get(dev, clk_name) : NULL;
40}
41
42/*
43 * Descriptor to instantiate RNG State Handle 0 in normal mode and
44 * load the JDKEK, TDKEK and TDSK registers
45 */
46static void build_instantiation_desc(u32 *desc, int handle, int do_sk)
47{
48 u32 *jump_cmd, op_flags;
49
50 init_job_desc(desc, 0);
51
52 op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
53 (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT;
54
55 /* INIT RNG in non-test mode */
56 append_operation(desc, op_flags);
57
58 if (!handle && do_sk) {
59 /*
60 * For SH0, Secure Keys must be generated as well
61 */
62
63 /* wait for done */
64 jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
65 set_jump_tgt_here(desc, jump_cmd);
66
67 /*
68 * load 1 to clear written reg:
69 * resets the done interrrupt and returns the RNG to idle.
70 */
71 append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
72
73 /* Initialize State Handle */
74 append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
75 OP_ALG_AAI_RNG4_SK);
76 }
77
78 append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
79}
80
81/* Descriptor for deinstantiation of State Handle 0 of the RNG block. */
82static void build_deinstantiation_desc(u32 *desc, int handle)
83{
84 init_job_desc(desc, 0);
85
86 /* Uninstantiate State Handle 0 */
87 append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
88 (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL);
89
90 append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
91}
92
93/*
94 * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of
95 * the software (no JR/QI used).
96 * @ctrldev - pointer to device
97 * @status - descriptor status, after being run
98 *
99 * Return: - 0 if no error occurred
100 * - -ENODEV if the DECO couldn't be acquired
101 * - -EAGAIN if an error occurred while executing the descriptor
102 */
103static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
104 u32 *status)
105{
106 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
107 struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
108 struct caam_deco __iomem *deco = ctrlpriv->deco;
109 unsigned int timeout = 100000;
110 u32 deco_dbg_reg, deco_state, flags;
111 int i;
112
113
114 if (ctrlpriv->virt_en == 1) {
115 clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0);
116
117 while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
118 --timeout)
119 cpu_relax();
120
121 timeout = 100000;
122 }
123
124 clrsetbits_32(&ctrl->deco_rq, 0, DECORR_RQD0ENABLE);
125
126 while (!(rd_reg32(&ctrl->deco_rq) & DECORR_DEN0) &&
127 --timeout)
128 cpu_relax();
129
130 if (!timeout) {
131 dev_err(ctrldev, "failed to acquire DECO 0\n");
132 clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0);
133 return -ENODEV;
134 }
135
136 for (i = 0; i < desc_len(desc); i++)
137 wr_reg32(&deco->descbuf[i], caam32_to_cpu(*(desc + i)));
138
139 flags = DECO_JQCR_WHL;
140 /*
141 * If the descriptor length is longer than 4 words, then the
142 * FOUR bit in JRCTRL register must be set.
143 */
144 if (desc_len(desc) >= 4)
145 flags |= DECO_JQCR_FOUR;
146
147 /* Instruct the DECO to execute it */
148 clrsetbits_32(&deco->jr_ctl_hi, 0, flags);
149
150 timeout = 10000000;
151 do {
152 deco_dbg_reg = rd_reg32(&deco->desc_dbg);
153
154 if (ctrlpriv->era < 10)
155 deco_state = (deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) >>
156 DESC_DBG_DECO_STAT_SHIFT;
157 else
158 deco_state = (rd_reg32(&deco->dbg_exec) &
159 DESC_DER_DECO_STAT_MASK) >>
160 DESC_DER_DECO_STAT_SHIFT;
161
162 /*
163 * If an error occured in the descriptor, then
164 * the DECO status field will be set to 0x0D
165 */
166 if (deco_state == DECO_STAT_HOST_ERR)
167 break;
168
169 cpu_relax();
170 } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
171
172 *status = rd_reg32(&deco->op_status_hi) &
173 DECO_OP_STATUS_HI_ERR_MASK;
174
175 if (ctrlpriv->virt_en == 1)
176 clrsetbits_32(&ctrl->deco_rsr, DECORSR_JR0, 0);
177
178 /* Mark the DECO as free */
179 clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0);
180
181 if (!timeout)
182 return -EAGAIN;
183
184 return 0;
185}
186
187/*
188 * instantiate_rng - builds and executes a descriptor on DECO0,
189 * which initializes the RNG block.
190 * @ctrldev - pointer to device
191 * @state_handle_mask - bitmask containing the instantiation status
192 * for the RNG4 state handles which exist in
193 * the RNG4 block: 1 if it's been instantiated
194 * by an external entry, 0 otherwise.
195 * @gen_sk - generate data to be loaded into the JDKEK, TDKEK and TDSK;
196 * Caution: this can be done only once; if the keys need to be
197 * regenerated, a POR is required
198 *
199 * Return: - 0 if no error occurred
200 * - -ENOMEM if there isn't enough memory to allocate the descriptor
201 * - -ENODEV if DECO0 couldn't be acquired
202 * - -EAGAIN if an error occurred when executing the descriptor
203 * f.i. there was a RNG hardware error due to not "good enough"
204 * entropy being aquired.
205 */
206static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
207 int gen_sk)
208{
209 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
210 struct caam_ctrl __iomem *ctrl;
211 u32 *desc, status = 0, rdsta_val;
212 int ret = 0, sh_idx;
213
214 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
215 desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
216 if (!desc)
217 return -ENOMEM;
218
219 for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
220 /*
221 * If the corresponding bit is set, this state handle
222 * was initialized by somebody else, so it's left alone.
223 */
224 if ((1 << sh_idx) & state_handle_mask)
225 continue;
226
227 /* Create the descriptor for instantiating RNG State Handle */
228 build_instantiation_desc(desc, sh_idx, gen_sk);
229
230 /* Try to run it through DECO0 */
231 ret = run_descriptor_deco0(ctrldev, desc, &status);
232
233 /*
234 * If ret is not 0, or descriptor status is not 0, then
235 * something went wrong. No need to try the next state
236 * handle (if available), bail out here.
237 * Also, if for some reason, the State Handle didn't get
238 * instantiated although the descriptor has finished
239 * without any error (HW optimizations for later
240 * CAAM eras), then try again.
241 */
242 if (ret)
243 break;
244
245 rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
246 if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
247 !(rdsta_val & (1 << sh_idx))) {
248 ret = -EAGAIN;
249 break;
250 }
251
252 dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
253 /* Clear the contents before recreating the descriptor */
254 memset(desc, 0x00, CAAM_CMD_SZ * 7);
255 }
256
257 kfree(desc);
258
259 return ret;
260}
261
262/*
263 * deinstantiate_rng - builds and executes a descriptor on DECO0,
264 * which deinitializes the RNG block.
265 * @ctrldev - pointer to device
266 * @state_handle_mask - bitmask containing the instantiation status
267 * for the RNG4 state handles which exist in
268 * the RNG4 block: 1 if it's been instantiated
269 *
270 * Return: - 0 if no error occurred
271 * - -ENOMEM if there isn't enough memory to allocate the descriptor
272 * - -ENODEV if DECO0 couldn't be acquired
273 * - -EAGAIN if an error occurred when executing the descriptor
274 */
275static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
276{
277 u32 *desc, status;
278 int sh_idx, ret = 0;
279
280 desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
281 if (!desc)
282 return -ENOMEM;
283
284 for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
285 /*
286 * If the corresponding bit is set, then it means the state
287 * handle was initialized by us, and thus it needs to be
288 * deinitialized as well
289 */
290 if ((1 << sh_idx) & state_handle_mask) {
291 /*
292 * Create the descriptor for deinstantating this state
293 * handle
294 */
295 build_deinstantiation_desc(desc, sh_idx);
296
297 /* Try to run it through DECO0 */
298 ret = run_descriptor_deco0(ctrldev, desc, &status);
299
300 if (ret ||
301 (status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
302 dev_err(ctrldev,
303 "Failed to deinstantiate RNG4 SH%d\n",
304 sh_idx);
305 break;
306 }
307 dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx);
308 }
309 }
310
311 kfree(desc);
312
313 return ret;
314}
315
316static int caam_remove(struct platform_device *pdev)
317{
318 struct device *ctrldev;
319 struct caam_drv_private *ctrlpriv;
320 struct caam_ctrl __iomem *ctrl;
321
322 ctrldev = &pdev->dev;
323 ctrlpriv = dev_get_drvdata(ctrldev);
324 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
325
326 /* Remove platform devices under the crypto node */
327 of_platform_depopulate(ctrldev);
328
329#ifdef CONFIG_CAAM_QI
330 if (ctrlpriv->qidev)
331 caam_qi_shutdown(ctrlpriv->qidev);
332#endif
333
334 /*
335 * De-initialize RNG state handles initialized by this driver.
336 * In case of SoCs with Management Complex, RNG is managed by MC f/w.
337 */
338 if (!ctrlpriv->mc_en && ctrlpriv->rng4_sh_init)
339 deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
340
341 /* Shut down debug views */
342#ifdef CONFIG_DEBUG_FS
343 debugfs_remove_recursive(ctrlpriv->dfs_root);
344#endif
345
346 /* Unmap controller region */
347 iounmap(ctrl);
348
349 /* shut clocks off before finalizing shutdown */
350 clk_disable_unprepare(ctrlpriv->caam_ipg);
351 if (ctrlpriv->caam_mem)
352 clk_disable_unprepare(ctrlpriv->caam_mem);
353 clk_disable_unprepare(ctrlpriv->caam_aclk);
354 if (ctrlpriv->caam_emi_slow)
355 clk_disable_unprepare(ctrlpriv->caam_emi_slow);
356 return 0;
357}
358
359/*
360 * kick_trng - sets the various parameters for enabling the initialization
361 * of the RNG4 block in CAAM
362 * @pdev - pointer to the platform device
363 * @ent_delay - Defines the length (in system clocks) of each entropy sample.
364 */
365static void kick_trng(struct platform_device *pdev, int ent_delay)
366{
367 struct device *ctrldev = &pdev->dev;
368 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
369 struct caam_ctrl __iomem *ctrl;
370 struct rng4tst __iomem *r4tst;
371 u32 val;
372
373 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
374 r4tst = &ctrl->r4tst[0];
375
376 /* put RNG4 into program mode */
377 clrsetbits_32(&r4tst->rtmctl, 0, RTMCTL_PRGM);
378
379 /*
380 * Performance-wise, it does not make sense to
381 * set the delay to a value that is lower
382 * than the last one that worked (i.e. the state handles
383 * were instantiated properly. Thus, instead of wasting
384 * time trying to set the values controlling the sample
385 * frequency, the function simply returns.
386 */
387 val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
388 >> RTSDCTL_ENT_DLY_SHIFT;
389 if (ent_delay <= val)
390 goto start_rng;
391
392 val = rd_reg32(&r4tst->rtsdctl);
393 val = (val & ~RTSDCTL_ENT_DLY_MASK) |
394 (ent_delay << RTSDCTL_ENT_DLY_SHIFT);
395 wr_reg32(&r4tst->rtsdctl, val);
396 /* min. freq. count, equal to 1/4 of the entropy sample length */
397 wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
398 /* disable maximum frequency count */
399 wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
400 /* read the control register */
401 val = rd_reg32(&r4tst->rtmctl);
402start_rng:
403 /*
404 * select raw sampling in both entropy shifter
405 * and statistical checker; ; put RNG4 into run mode
406 */
407 clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
408}
409
410static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
411{
412 static const struct {
413 u16 ip_id;
414 u8 maj_rev;
415 u8 era;
416 } id[] = {
417 {0x0A10, 1, 1},
418 {0x0A10, 2, 2},
419 {0x0A12, 1, 3},
420 {0x0A14, 1, 3},
421 {0x0A14, 2, 4},
422 {0x0A16, 1, 4},
423 {0x0A10, 3, 4},
424 {0x0A11, 1, 4},
425 {0x0A18, 1, 4},
426 {0x0A11, 2, 5},
427 {0x0A12, 2, 5},
428 {0x0A13, 1, 5},
429 {0x0A1C, 1, 5}
430 };
431 u32 ccbvid, id_ms;
432 u8 maj_rev, era;
433 u16 ip_id;
434 int i;
435
436 ccbvid = rd_reg32(&ctrl->perfmon.ccb_id);
437 era = (ccbvid & CCBVID_ERA_MASK) >> CCBVID_ERA_SHIFT;
438 if (era) /* This is '0' prior to CAAM ERA-6 */
439 return era;
440
441 id_ms = rd_reg32(&ctrl->perfmon.caam_id_ms);
442 ip_id = (id_ms & SECVID_MS_IPID_MASK) >> SECVID_MS_IPID_SHIFT;
443 maj_rev = (id_ms & SECVID_MS_MAJ_REV_MASK) >> SECVID_MS_MAJ_REV_SHIFT;
444
445 for (i = 0; i < ARRAY_SIZE(id); i++)
446 if (id[i].ip_id == ip_id && id[i].maj_rev == maj_rev)
447 return id[i].era;
448
449 return -ENOTSUPP;
450}
451
452/**
453 * caam_get_era() - Return the ERA of the SEC on SoC, based
454 * on "sec-era" optional property in the DTS. This property is updated
455 * by u-boot.
456 * In case this property is not passed an attempt to retrieve the CAAM
457 * era via register reads will be made.
458 **/
459static int caam_get_era(struct caam_ctrl __iomem *ctrl)
460{
461 struct device_node *caam_node;
462 int ret;
463 u32 prop;
464
465 caam_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
466 ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop);
467 of_node_put(caam_node);
468
469 if (!ret)
470 return prop;
471 else
472 return caam_get_era_from_hw(ctrl);
473}
474
475static const struct of_device_id caam_match[] = {
476 {
477 .compatible = "fsl,sec-v4.0",
478 },
479 {
480 .compatible = "fsl,sec4.0",
481 },
482 {},
483};
484MODULE_DEVICE_TABLE(of, caam_match);
485
486/* Probe routine for CAAM top (controller) level */
487static int caam_probe(struct platform_device *pdev)
488{
489 int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
490 u64 caam_id;
491 static const struct soc_device_attribute imx_soc[] = {
492 {.family = "Freescale i.MX"},
493 {},
494 };
495 struct device *dev;
496 struct device_node *nprop, *np;
497 struct caam_ctrl __iomem *ctrl;
498 struct caam_drv_private *ctrlpriv;
499 struct clk *clk;
500#ifdef CONFIG_DEBUG_FS
501 struct caam_perfmon *perfmon;
502#endif
503 u32 scfgr, comp_params;
504 u8 rng_vid;
505 int pg_size;
506 int BLOCK_OFFSET = 0;
507
508 ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
509 if (!ctrlpriv)
510 return -ENOMEM;
511
512 dev = &pdev->dev;
513 dev_set_drvdata(dev, ctrlpriv);
514 nprop = pdev->dev.of_node;
515
516 caam_imx = (bool)soc_device_match(imx_soc);
517
518 /* Enable clocking */
519 clk = caam_drv_identify_clk(&pdev->dev, "ipg");
520 if (IS_ERR(clk)) {
521 ret = PTR_ERR(clk);
522 dev_err(&pdev->dev,
523 "can't identify CAAM ipg clk: %d\n", ret);
524 return ret;
525 }
526 ctrlpriv->caam_ipg = clk;
527
528 if (!of_machine_is_compatible("fsl,imx7d") &&
529 !of_machine_is_compatible("fsl,imx7s")) {
530 clk = caam_drv_identify_clk(&pdev->dev, "mem");
531 if (IS_ERR(clk)) {
532 ret = PTR_ERR(clk);
533 dev_err(&pdev->dev,
534 "can't identify CAAM mem clk: %d\n", ret);
535 return ret;
536 }
537 ctrlpriv->caam_mem = clk;
538 }
539
540 clk = caam_drv_identify_clk(&pdev->dev, "aclk");
541 if (IS_ERR(clk)) {
542 ret = PTR_ERR(clk);
543 dev_err(&pdev->dev,
544 "can't identify CAAM aclk clk: %d\n", ret);
545 return ret;
546 }
547 ctrlpriv->caam_aclk = clk;
548
549 if (!of_machine_is_compatible("fsl,imx6ul") &&
550 !of_machine_is_compatible("fsl,imx7d") &&
551 !of_machine_is_compatible("fsl,imx7s")) {
552 clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
553 if (IS_ERR(clk)) {
554 ret = PTR_ERR(clk);
555 dev_err(&pdev->dev,
556 "can't identify CAAM emi_slow clk: %d\n", ret);
557 return ret;
558 }
559 ctrlpriv->caam_emi_slow = clk;
560 }
561
562 ret = clk_prepare_enable(ctrlpriv->caam_ipg);
563 if (ret < 0) {
564 dev_err(&pdev->dev, "can't enable CAAM ipg clock: %d\n", ret);
565 return ret;
566 }
567
568 if (ctrlpriv->caam_mem) {
569 ret = clk_prepare_enable(ctrlpriv->caam_mem);
570 if (ret < 0) {
571 dev_err(&pdev->dev, "can't enable CAAM secure mem clock: %d\n",
572 ret);
573 goto disable_caam_ipg;
574 }
575 }
576
577 ret = clk_prepare_enable(ctrlpriv->caam_aclk);
578 if (ret < 0) {
579 dev_err(&pdev->dev, "can't enable CAAM aclk clock: %d\n", ret);
580 goto disable_caam_mem;
581 }
582
583 if (ctrlpriv->caam_emi_slow) {
584 ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
585 if (ret < 0) {
586 dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
587 ret);
588 goto disable_caam_aclk;
589 }
590 }
591
592 /* Get configuration properties from device tree */
593 /* First, get register page */
594 ctrl = of_iomap(nprop, 0);
595 if (ctrl == NULL) {
596 dev_err(dev, "caam: of_iomap() failed\n");
597 ret = -ENOMEM;
598 goto disable_caam_emi_slow;
599 }
600
601 caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
602 (CSTA_PLEND | CSTA_ALT_PLEND));
603
604 /* Finding the page size for using the CTPR_MS register */
605 comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
606 pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
607
608 /* Allocating the BLOCK_OFFSET based on the supported page size on
609 * the platform
610 */
611 if (pg_size == 0)
612 BLOCK_OFFSET = PG_SIZE_4K;
613 else
614 BLOCK_OFFSET = PG_SIZE_64K;
615
616 ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl;
617 ctrlpriv->assure = (struct caam_assurance __iomem __force *)
618 ((__force uint8_t *)ctrl +
619 BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
620 );
621 ctrlpriv->deco = (struct caam_deco __iomem __force *)
622 ((__force uint8_t *)ctrl +
623 BLOCK_OFFSET * DECO_BLOCK_NUMBER
624 );
625
626 /* Get the IRQ of the controller (for security violations only) */
627 ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0);
628
629 /*
630 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
631 * long pointers in master configuration register.
632 * In case of SoCs with Management Complex, MC f/w performs
633 * the configuration.
634 */
635 caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
636 np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc");
637 ctrlpriv->mc_en = !!np;
638 of_node_put(np);
639
640 if (!ctrlpriv->mc_en)
641 clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
642 MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
643 MCFGR_WDENABLE | MCFGR_LARGE_BURST |
644 (sizeof(dma_addr_t) == sizeof(u64) ?
645 MCFGR_LONG_PTR : 0));
646
647 /*
648 * Read the Compile Time paramters and SCFGR to determine
649 * if Virtualization is enabled for this platform
650 */
651 scfgr = rd_reg32(&ctrl->scfgr);
652
653 ctrlpriv->virt_en = 0;
654 if (comp_params & CTPR_MS_VIRT_EN_INCL) {
655 /* VIRT_EN_INCL = 1 & VIRT_EN_POR = 1 or
656 * VIRT_EN_INCL = 1 & VIRT_EN_POR = 0 & SCFGR_VIRT_EN = 1
657 */
658 if ((comp_params & CTPR_MS_VIRT_EN_POR) ||
659 (!(comp_params & CTPR_MS_VIRT_EN_POR) &&
660 (scfgr & SCFGR_VIRT_EN)))
661 ctrlpriv->virt_en = 1;
662 } else {
663 /* VIRT_EN_INCL = 0 && VIRT_EN_POR_VALUE = 1 */
664 if (comp_params & CTPR_MS_VIRT_EN_POR)
665 ctrlpriv->virt_en = 1;
666 }
667
668 if (ctrlpriv->virt_en == 1)
669 clrsetbits_32(&ctrl->jrstart, 0, JRSTART_JR0_START |
670 JRSTART_JR1_START | JRSTART_JR2_START |
671 JRSTART_JR3_START);
672
673 if (sizeof(dma_addr_t) == sizeof(u64)) {
674 if (caam_dpaa2)
675 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
676 else if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
677 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
678 else
679 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
680 } else {
681 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
682 }
683 if (ret) {
684 dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
685 goto iounmap_ctrl;
686 }
687
688 ctrlpriv->era = caam_get_era(ctrl);
689
690 ret = of_platform_populate(nprop, caam_match, NULL, dev);
691 if (ret) {
692 dev_err(dev, "JR platform devices creation error\n");
693 goto iounmap_ctrl;
694 }
695
696#ifdef CONFIG_DEBUG_FS
697 /*
698 * FIXME: needs better naming distinction, as some amalgamation of
699 * "caam" and nprop->full_name. The OF name isn't distinctive,
700 * but does separate instances
701 */
702 perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
703
704 ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
705 ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
706#endif
707
708 ring = 0;
709 for_each_available_child_of_node(nprop, np)
710 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
711 of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
712 ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
713 ((__force uint8_t *)ctrl +
714 (ring + JR_BLOCK_NUMBER) *
715 BLOCK_OFFSET
716 );
717 ctrlpriv->total_jobrs++;
718 ring++;
719 }
720
721 /* Check to see if (DPAA 1.x) QI present. If so, enable */
722 ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
723 if (ctrlpriv->qi_present && !caam_dpaa2) {
724 ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
725 ((__force uint8_t *)ctrl +
726 BLOCK_OFFSET * QI_BLOCK_NUMBER
727 );
728 /* This is all that's required to physically enable QI */
729 wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
730
731 /* If QMAN driver is present, init CAAM-QI backend */
732#ifdef CONFIG_CAAM_QI
733 ret = caam_qi_init(pdev);
734 if (ret)
735 dev_err(dev, "caam qi i/f init failed: %d\n", ret);
736#endif
737 }
738
739 /* If no QI and no rings specified, quit and go home */
740 if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
741 dev_err(dev, "no queues configured, terminating\n");
742 ret = -ENOMEM;
743 goto caam_remove;
744 }
745
746 if (ctrlpriv->era < 10)
747 rng_vid = (rd_reg32(&ctrl->perfmon.cha_id_ls) &
748 CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
749 else
750 rng_vid = (rd_reg32(&ctrl->vreg.rng) & CHA_VER_VID_MASK) >>
751 CHA_VER_VID_SHIFT;
752
753 /*
754 * If SEC has RNG version >= 4 and RNG state handle has not been
755 * already instantiated, do RNG instantiation
756 * In case of SoCs with Management Complex, RNG is managed by MC f/w.
757 */
758 if (!ctrlpriv->mc_en && rng_vid >= 4) {
759 ctrlpriv->rng4_sh_init =
760 rd_reg32(&ctrl->r4tst[0].rdsta);
761 /*
762 * If the secure keys (TDKEK, JDKEK, TDSK), were already
763 * generated, signal this to the function that is instantiating
764 * the state handles. An error would occur if RNG4 attempts
765 * to regenerate these keys before the next POR.
766 */
767 gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
768 ctrlpriv->rng4_sh_init &= RDSTA_IFMASK;
769 do {
770 int inst_handles =
771 rd_reg32(&ctrl->r4tst[0].rdsta) &
772 RDSTA_IFMASK;
773 /*
774 * If either SH were instantiated by somebody else
775 * (e.g. u-boot) then it is assumed that the entropy
776 * parameters are properly set and thus the function
777 * setting these (kick_trng(...)) is skipped.
778 * Also, if a handle was instantiated, do not change
779 * the TRNG parameters.
780 */
781 if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
782 dev_info(dev,
783 "Entropy delay = %u\n",
784 ent_delay);
785 kick_trng(pdev, ent_delay);
786 ent_delay += 400;
787 }
788 /*
789 * if instantiate_rng(...) fails, the loop will rerun
790 * and the kick_trng(...) function will modfiy the
791 * upper and lower limits of the entropy sampling
792 * interval, leading to a sucessful initialization of
793 * the RNG.
794 */
795 ret = instantiate_rng(dev, inst_handles,
796 gen_sk);
797 if (ret == -EAGAIN)
798 /*
799 * if here, the loop will rerun,
800 * so don't hog the CPU
801 */
802 cpu_relax();
803 } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
804 if (ret) {
805 dev_err(dev, "failed to instantiate RNG");
806 goto caam_remove;
807 }
808 /*
809 * Set handles init'ed by this module as the complement of the
810 * already initialized ones
811 */
812 ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
813
814 /* Enable RDB bit so that RNG works faster */
815 clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE);
816 }
817
818 /* NOTE: RTIC detection ought to go here, around Si time */
819
820 caam_id = (u64)rd_reg32(&ctrl->perfmon.caam_id_ms) << 32 |
821 (u64)rd_reg32(&ctrl->perfmon.caam_id_ls);
822
823 /* Report "alive" for developer to see */
824 dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
825 ctrlpriv->era);
826 dev_info(dev, "job rings = %d, qi = %d\n",
827 ctrlpriv->total_jobrs, ctrlpriv->qi_present);
828
829#ifdef CONFIG_DEBUG_FS
830 debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
831 ctrlpriv->ctl, &perfmon->req_dequeued,
832 &caam_fops_u64_ro);
833 debugfs_create_file("ob_rq_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
834 ctrlpriv->ctl, &perfmon->ob_enc_req,
835 &caam_fops_u64_ro);
836 debugfs_create_file("ib_rq_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
837 ctrlpriv->ctl, &perfmon->ib_dec_req,
838 &caam_fops_u64_ro);
839 debugfs_create_file("ob_bytes_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
840 ctrlpriv->ctl, &perfmon->ob_enc_bytes,
841 &caam_fops_u64_ro);
842 debugfs_create_file("ob_bytes_protected", S_IRUSR | S_IRGRP | S_IROTH,
843 ctrlpriv->ctl, &perfmon->ob_prot_bytes,
844 &caam_fops_u64_ro);
845 debugfs_create_file("ib_bytes_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
846 ctrlpriv->ctl, &perfmon->ib_dec_bytes,
847 &caam_fops_u64_ro);
848 debugfs_create_file("ib_bytes_validated", S_IRUSR | S_IRGRP | S_IROTH,
849 ctrlpriv->ctl, &perfmon->ib_valid_bytes,
850 &caam_fops_u64_ro);
851
852 /* Controller level - global status values */
853 debugfs_create_file("fault_addr", S_IRUSR | S_IRGRP | S_IROTH,
854 ctrlpriv->ctl, &perfmon->faultaddr,
855 &caam_fops_u32_ro);
856 debugfs_create_file("fault_detail", S_IRUSR | S_IRGRP | S_IROTH,
857 ctrlpriv->ctl, &perfmon->faultdetail,
858 &caam_fops_u32_ro);
859 debugfs_create_file("fault_status", S_IRUSR | S_IRGRP | S_IROTH,
860 ctrlpriv->ctl, &perfmon->status,
861 &caam_fops_u32_ro);
862
863 /* Internal covering keys (useful in non-secure mode only) */
864 ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
865 ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
866 ctrlpriv->ctl_kek = debugfs_create_blob("kek",
867 S_IRUSR |
868 S_IRGRP | S_IROTH,
869 ctrlpriv->ctl,
870 &ctrlpriv->ctl_kek_wrap);
871
872 ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
873 ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
874 ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
875 S_IRUSR |
876 S_IRGRP | S_IROTH,
877 ctrlpriv->ctl,
878 &ctrlpriv->ctl_tkek_wrap);
879
880 ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
881 ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
882 ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
883 S_IRUSR |
884 S_IRGRP | S_IROTH,
885 ctrlpriv->ctl,
886 &ctrlpriv->ctl_tdsk_wrap);
887#endif
888 return 0;
889
890caam_remove:
891 caam_remove(pdev);
892 return ret;
893
894iounmap_ctrl:
895 iounmap(ctrl);
896disable_caam_emi_slow:
897 if (ctrlpriv->caam_emi_slow)
898 clk_disable_unprepare(ctrlpriv->caam_emi_slow);
899disable_caam_aclk:
900 clk_disable_unprepare(ctrlpriv->caam_aclk);
901disable_caam_mem:
902 if (ctrlpriv->caam_mem)
903 clk_disable_unprepare(ctrlpriv->caam_mem);
904disable_caam_ipg:
905 clk_disable_unprepare(ctrlpriv->caam_ipg);
906 return ret;
907}
908
909static struct platform_driver caam_driver = {
910 .driver = {
911 .name = "caam",
912 .of_match_table = caam_match,
913 },
914 .probe = caam_probe,
915 .remove = caam_remove,
916};
917
918module_platform_driver(caam_driver);
919
920MODULE_LICENSE("GPL");
921MODULE_DESCRIPTION("FSL CAAM request backend");
922MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");