Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0+
2/* * CAAM control-plane driver backend
3 * Controller-level driver, kernel property detection, initialization
4 *
5 * Copyright 2008-2012 Freescale Semiconductor, Inc.
6 * Copyright 2018-2019 NXP
7 */
8
9#include <linux/device.h>
10#include <linux/of_address.h>
11#include <linux/of_irq.h>
12#include <linux/sys_soc.h>
13
14#include "compat.h"
15#include "regs.h"
16#include "intern.h"
17#include "jr.h"
18#include "desc_constr.h"
19#include "ctrl.h"
20
21bool caam_dpaa2;
22EXPORT_SYMBOL(caam_dpaa2);
23
24#ifdef CONFIG_CAAM_QI
25#include "qi.h"
26#endif
27
28/*
29 * Descriptor to instantiate RNG State Handle 0 in normal mode and
30 * load the JDKEK, TDKEK and TDSK registers
31 */
32static void build_instantiation_desc(u32 *desc, int handle, int do_sk)
33{
34 u32 *jump_cmd, op_flags;
35
36 init_job_desc(desc, 0);
37
38 op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
39 (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT;
40
41 /* INIT RNG in non-test mode */
42 append_operation(desc, op_flags);
43
44 if (!handle && do_sk) {
45 /*
46 * For SH0, Secure Keys must be generated as well
47 */
48
49 /* wait for done */
50 jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
51 set_jump_tgt_here(desc, jump_cmd);
52
53 /*
54 * load 1 to clear written reg:
55 * resets the done interrrupt and returns the RNG to idle.
56 */
57 append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
58
59 /* Initialize State Handle */
60 append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
61 OP_ALG_AAI_RNG4_SK);
62 }
63
64 append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
65}
66
67/* Descriptor for deinstantiation of State Handle 0 of the RNG block. */
68static void build_deinstantiation_desc(u32 *desc, int handle)
69{
70 init_job_desc(desc, 0);
71
72 /* Uninstantiate State Handle 0 */
73 append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
74 (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL);
75
76 append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
77}
78
79/*
80 * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of
81 * the software (no JR/QI used).
82 * @ctrldev - pointer to device
83 * @status - descriptor status, after being run
84 *
85 * Return: - 0 if no error occurred
86 * - -ENODEV if the DECO couldn't be acquired
87 * - -EAGAIN if an error occurred while executing the descriptor
88 */
89static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
90 u32 *status)
91{
92 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
93 struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
94 struct caam_deco __iomem *deco = ctrlpriv->deco;
95 unsigned int timeout = 100000;
96 u32 deco_dbg_reg, deco_state, flags;
97 int i;
98
99
100 if (ctrlpriv->virt_en == 1 ||
101 /*
102 * Apparently on i.MX8MQ it doesn't matter if virt_en == 1
103 * and the following steps should be performed regardless
104 */
105 of_machine_is_compatible("fsl,imx8mq")) {
106 clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0);
107
108 while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
109 --timeout)
110 cpu_relax();
111
112 timeout = 100000;
113 }
114
115 clrsetbits_32(&ctrl->deco_rq, 0, DECORR_RQD0ENABLE);
116
117 while (!(rd_reg32(&ctrl->deco_rq) & DECORR_DEN0) &&
118 --timeout)
119 cpu_relax();
120
121 if (!timeout) {
122 dev_err(ctrldev, "failed to acquire DECO 0\n");
123 clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0);
124 return -ENODEV;
125 }
126
127 for (i = 0; i < desc_len(desc); i++)
128 wr_reg32(&deco->descbuf[i], caam32_to_cpu(*(desc + i)));
129
130 flags = DECO_JQCR_WHL;
131 /*
132 * If the descriptor length is longer than 4 words, then the
133 * FOUR bit in JRCTRL register must be set.
134 */
135 if (desc_len(desc) >= 4)
136 flags |= DECO_JQCR_FOUR;
137
138 /* Instruct the DECO to execute it */
139 clrsetbits_32(&deco->jr_ctl_hi, 0, flags);
140
141 timeout = 10000000;
142 do {
143 deco_dbg_reg = rd_reg32(&deco->desc_dbg);
144
145 if (ctrlpriv->era < 10)
146 deco_state = (deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) >>
147 DESC_DBG_DECO_STAT_SHIFT;
148 else
149 deco_state = (rd_reg32(&deco->dbg_exec) &
150 DESC_DER_DECO_STAT_MASK) >>
151 DESC_DER_DECO_STAT_SHIFT;
152
153 /*
154 * If an error occured in the descriptor, then
155 * the DECO status field will be set to 0x0D
156 */
157 if (deco_state == DECO_STAT_HOST_ERR)
158 break;
159
160 cpu_relax();
161 } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
162
163 *status = rd_reg32(&deco->op_status_hi) &
164 DECO_OP_STATUS_HI_ERR_MASK;
165
166 if (ctrlpriv->virt_en == 1)
167 clrsetbits_32(&ctrl->deco_rsr, DECORSR_JR0, 0);
168
169 /* Mark the DECO as free */
170 clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0);
171
172 if (!timeout)
173 return -EAGAIN;
174
175 return 0;
176}
177
178/*
179 * deinstantiate_rng - builds and executes a descriptor on DECO0,
180 * which deinitializes the RNG block.
181 * @ctrldev - pointer to device
182 * @state_handle_mask - bitmask containing the instantiation status
183 * for the RNG4 state handles which exist in
184 * the RNG4 block: 1 if it's been instantiated
185 *
186 * Return: - 0 if no error occurred
187 * - -ENOMEM if there isn't enough memory to allocate the descriptor
188 * - -ENODEV if DECO0 couldn't be acquired
189 * - -EAGAIN if an error occurred when executing the descriptor
190 */
191static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
192{
193 u32 *desc, status;
194 int sh_idx, ret = 0;
195
196 desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
197 if (!desc)
198 return -ENOMEM;
199
200 for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
201 /*
202 * If the corresponding bit is set, then it means the state
203 * handle was initialized by us, and thus it needs to be
204 * deinitialized as well
205 */
206 if ((1 << sh_idx) & state_handle_mask) {
207 /*
208 * Create the descriptor for deinstantating this state
209 * handle
210 */
211 build_deinstantiation_desc(desc, sh_idx);
212
213 /* Try to run it through DECO0 */
214 ret = run_descriptor_deco0(ctrldev, desc, &status);
215
216 if (ret ||
217 (status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
218 dev_err(ctrldev,
219 "Failed to deinstantiate RNG4 SH%d\n",
220 sh_idx);
221 break;
222 }
223 dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx);
224 }
225 }
226
227 kfree(desc);
228
229 return ret;
230}
231
232static void devm_deinstantiate_rng(void *data)
233{
234 struct device *ctrldev = data;
235 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
236
237 /*
238 * De-initialize RNG state handles initialized by this driver.
239 * In case of SoCs with Management Complex, RNG is managed by MC f/w.
240 */
241 if (ctrlpriv->rng4_sh_init)
242 deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
243}
244
245/*
246 * instantiate_rng - builds and executes a descriptor on DECO0,
247 * which initializes the RNG block.
248 * @ctrldev - pointer to device
249 * @state_handle_mask - bitmask containing the instantiation status
250 * for the RNG4 state handles which exist in
251 * the RNG4 block: 1 if it's been instantiated
252 * by an external entry, 0 otherwise.
253 * @gen_sk - generate data to be loaded into the JDKEK, TDKEK and TDSK;
254 * Caution: this can be done only once; if the keys need to be
255 * regenerated, a POR is required
256 *
257 * Return: - 0 if no error occurred
258 * - -ENOMEM if there isn't enough memory to allocate the descriptor
259 * - -ENODEV if DECO0 couldn't be acquired
260 * - -EAGAIN if an error occurred when executing the descriptor
261 * f.i. there was a RNG hardware error due to not "good enough"
262 * entropy being aquired.
263 */
264static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
265 int gen_sk)
266{
267 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
268 struct caam_ctrl __iomem *ctrl;
269 u32 *desc, status = 0, rdsta_val;
270 int ret = 0, sh_idx;
271
272 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
273 desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
274 if (!desc)
275 return -ENOMEM;
276
277 for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
278 /*
279 * If the corresponding bit is set, this state handle
280 * was initialized by somebody else, so it's left alone.
281 */
282 if ((1 << sh_idx) & state_handle_mask)
283 continue;
284
285 /* Create the descriptor for instantiating RNG State Handle */
286 build_instantiation_desc(desc, sh_idx, gen_sk);
287
288 /* Try to run it through DECO0 */
289 ret = run_descriptor_deco0(ctrldev, desc, &status);
290
291 /*
292 * If ret is not 0, or descriptor status is not 0, then
293 * something went wrong. No need to try the next state
294 * handle (if available), bail out here.
295 * Also, if for some reason, the State Handle didn't get
296 * instantiated although the descriptor has finished
297 * without any error (HW optimizations for later
298 * CAAM eras), then try again.
299 */
300 if (ret)
301 break;
302
303 rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
304 if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
305 !(rdsta_val & (1 << sh_idx))) {
306 ret = -EAGAIN;
307 break;
308 }
309
310 dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
311 /* Clear the contents before recreating the descriptor */
312 memset(desc, 0x00, CAAM_CMD_SZ * 7);
313 }
314
315 kfree(desc);
316
317 if (!ret)
318 ret = devm_add_action_or_reset(ctrldev, devm_deinstantiate_rng,
319 ctrldev);
320
321 return ret;
322}
323
324/*
325 * kick_trng - sets the various parameters for enabling the initialization
326 * of the RNG4 block in CAAM
327 * @pdev - pointer to the platform device
328 * @ent_delay - Defines the length (in system clocks) of each entropy sample.
329 */
330static void kick_trng(struct platform_device *pdev, int ent_delay)
331{
332 struct device *ctrldev = &pdev->dev;
333 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
334 struct caam_ctrl __iomem *ctrl;
335 struct rng4tst __iomem *r4tst;
336 u32 val;
337
338 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
339 r4tst = &ctrl->r4tst[0];
340
341 /* put RNG4 into program mode */
342 clrsetbits_32(&r4tst->rtmctl, 0, RTMCTL_PRGM);
343
344 /*
345 * Performance-wise, it does not make sense to
346 * set the delay to a value that is lower
347 * than the last one that worked (i.e. the state handles
348 * were instantiated properly. Thus, instead of wasting
349 * time trying to set the values controlling the sample
350 * frequency, the function simply returns.
351 */
352 val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
353 >> RTSDCTL_ENT_DLY_SHIFT;
354 if (ent_delay <= val)
355 goto start_rng;
356
357 val = rd_reg32(&r4tst->rtsdctl);
358 val = (val & ~RTSDCTL_ENT_DLY_MASK) |
359 (ent_delay << RTSDCTL_ENT_DLY_SHIFT);
360 wr_reg32(&r4tst->rtsdctl, val);
361 /* min. freq. count, equal to 1/4 of the entropy sample length */
362 wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
363 /* disable maximum frequency count */
364 wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
365 /* read the control register */
366 val = rd_reg32(&r4tst->rtmctl);
367start_rng:
368 /*
369 * select raw sampling in both entropy shifter
370 * and statistical checker; ; put RNG4 into run mode
371 */
372 clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
373}
374
375static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
376{
377 static const struct {
378 u16 ip_id;
379 u8 maj_rev;
380 u8 era;
381 } id[] = {
382 {0x0A10, 1, 1},
383 {0x0A10, 2, 2},
384 {0x0A12, 1, 3},
385 {0x0A14, 1, 3},
386 {0x0A14, 2, 4},
387 {0x0A16, 1, 4},
388 {0x0A10, 3, 4},
389 {0x0A11, 1, 4},
390 {0x0A18, 1, 4},
391 {0x0A11, 2, 5},
392 {0x0A12, 2, 5},
393 {0x0A13, 1, 5},
394 {0x0A1C, 1, 5}
395 };
396 u32 ccbvid, id_ms;
397 u8 maj_rev, era;
398 u16 ip_id;
399 int i;
400
401 ccbvid = rd_reg32(&ctrl->perfmon.ccb_id);
402 era = (ccbvid & CCBVID_ERA_MASK) >> CCBVID_ERA_SHIFT;
403 if (era) /* This is '0' prior to CAAM ERA-6 */
404 return era;
405
406 id_ms = rd_reg32(&ctrl->perfmon.caam_id_ms);
407 ip_id = (id_ms & SECVID_MS_IPID_MASK) >> SECVID_MS_IPID_SHIFT;
408 maj_rev = (id_ms & SECVID_MS_MAJ_REV_MASK) >> SECVID_MS_MAJ_REV_SHIFT;
409
410 for (i = 0; i < ARRAY_SIZE(id); i++)
411 if (id[i].ip_id == ip_id && id[i].maj_rev == maj_rev)
412 return id[i].era;
413
414 return -ENOTSUPP;
415}
416
417/**
418 * caam_get_era() - Return the ERA of the SEC on SoC, based
419 * on "sec-era" optional property in the DTS. This property is updated
420 * by u-boot.
421 * In case this property is not passed an attempt to retrieve the CAAM
422 * era via register reads will be made.
423 **/
424static int caam_get_era(struct caam_ctrl __iomem *ctrl)
425{
426 struct device_node *caam_node;
427 int ret;
428 u32 prop;
429
430 caam_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
431 ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop);
432 of_node_put(caam_node);
433
434 if (!ret)
435 return prop;
436 else
437 return caam_get_era_from_hw(ctrl);
438}
439
440/*
441 * ERRATA: imx6 devices (imx6D, imx6Q, imx6DL, imx6S, imx6DP and imx6QP)
442 * have an issue wherein AXI bus transactions may not occur in the correct
443 * order. This isn't a problem running single descriptors, but can be if
444 * running multiple concurrent descriptors. Reworking the driver to throttle
445 * to single requests is impractical, thus the workaround is to limit the AXI
446 * pipeline to a depth of 1 (from it's default of 4) to preclude this situation
447 * from occurring.
448 */
449static void handle_imx6_err005766(u32 *mcr)
450{
451 if (of_machine_is_compatible("fsl,imx6q") ||
452 of_machine_is_compatible("fsl,imx6dl") ||
453 of_machine_is_compatible("fsl,imx6qp"))
454 clrsetbits_32(mcr, MCFGR_AXIPIPE_MASK,
455 1 << MCFGR_AXIPIPE_SHIFT);
456}
457
458static const struct of_device_id caam_match[] = {
459 {
460 .compatible = "fsl,sec-v4.0",
461 },
462 {
463 .compatible = "fsl,sec4.0",
464 },
465 {},
466};
467MODULE_DEVICE_TABLE(of, caam_match);
468
469struct caam_imx_data {
470 const struct clk_bulk_data *clks;
471 int num_clks;
472};
473
474static const struct clk_bulk_data caam_imx6_clks[] = {
475 { .id = "ipg" },
476 { .id = "mem" },
477 { .id = "aclk" },
478 { .id = "emi_slow" },
479};
480
481static const struct caam_imx_data caam_imx6_data = {
482 .clks = caam_imx6_clks,
483 .num_clks = ARRAY_SIZE(caam_imx6_clks),
484};
485
486static const struct clk_bulk_data caam_imx7_clks[] = {
487 { .id = "ipg" },
488 { .id = "aclk" },
489};
490
491static const struct caam_imx_data caam_imx7_data = {
492 .clks = caam_imx7_clks,
493 .num_clks = ARRAY_SIZE(caam_imx7_clks),
494};
495
496static const struct clk_bulk_data caam_imx6ul_clks[] = {
497 { .id = "ipg" },
498 { .id = "mem" },
499 { .id = "aclk" },
500};
501
502static const struct caam_imx_data caam_imx6ul_data = {
503 .clks = caam_imx6ul_clks,
504 .num_clks = ARRAY_SIZE(caam_imx6ul_clks),
505};
506
507static const struct soc_device_attribute caam_imx_soc_table[] = {
508 { .soc_id = "i.MX6UL", .data = &caam_imx6ul_data },
509 { .soc_id = "i.MX6*", .data = &caam_imx6_data },
510 { .soc_id = "i.MX7*", .data = &caam_imx7_data },
511 { .soc_id = "i.MX8MQ", .data = &caam_imx7_data },
512 { .family = "Freescale i.MX" },
513 { /* sentinel */ }
514};
515
516static void disable_clocks(void *data)
517{
518 struct caam_drv_private *ctrlpriv = data;
519
520 clk_bulk_disable_unprepare(ctrlpriv->num_clks, ctrlpriv->clks);
521}
522
523static int init_clocks(struct device *dev, const struct caam_imx_data *data)
524{
525 struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
526 int ret;
527
528 ctrlpriv->num_clks = data->num_clks;
529 ctrlpriv->clks = devm_kmemdup(dev, data->clks,
530 data->num_clks * sizeof(data->clks[0]),
531 GFP_KERNEL);
532 if (!ctrlpriv->clks)
533 return -ENOMEM;
534
535 ret = devm_clk_bulk_get(dev, ctrlpriv->num_clks, ctrlpriv->clks);
536 if (ret) {
537 dev_err(dev,
538 "Failed to request all necessary clocks\n");
539 return ret;
540 }
541
542 ret = clk_bulk_prepare_enable(ctrlpriv->num_clks, ctrlpriv->clks);
543 if (ret) {
544 dev_err(dev,
545 "Failed to prepare/enable all necessary clocks\n");
546 return ret;
547 }
548
549 return devm_add_action_or_reset(dev, disable_clocks, ctrlpriv);
550}
551
552#ifdef CONFIG_DEBUG_FS
553static void caam_remove_debugfs(void *root)
554{
555 debugfs_remove_recursive(root);
556}
557#endif
558
559/* Probe routine for CAAM top (controller) level */
560static int caam_probe(struct platform_device *pdev)
561{
562 int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
563 u64 caam_id;
564 const struct soc_device_attribute *imx_soc_match;
565 struct device *dev;
566 struct device_node *nprop, *np;
567 struct caam_ctrl __iomem *ctrl;
568 struct caam_drv_private *ctrlpriv;
569#ifdef CONFIG_DEBUG_FS
570 struct caam_perfmon *perfmon;
571 struct dentry *dfs_root;
572#endif
573 u32 scfgr, comp_params;
574 u8 rng_vid;
575 int pg_size;
576 int BLOCK_OFFSET = 0;
577
578 ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
579 if (!ctrlpriv)
580 return -ENOMEM;
581
582 dev = &pdev->dev;
583 dev_set_drvdata(dev, ctrlpriv);
584 nprop = pdev->dev.of_node;
585
586 imx_soc_match = soc_device_match(caam_imx_soc_table);
587 caam_imx = (bool)imx_soc_match;
588
589 if (imx_soc_match) {
590 if (!imx_soc_match->data) {
591 dev_err(dev, "No clock data provided for i.MX SoC");
592 return -EINVAL;
593 }
594
595 ret = init_clocks(dev, imx_soc_match->data);
596 if (ret)
597 return ret;
598 }
599
600
601 /* Get configuration properties from device tree */
602 /* First, get register page */
603 ctrl = devm_of_iomap(dev, nprop, 0, NULL);
604 ret = PTR_ERR_OR_ZERO(ctrl);
605 if (ret) {
606 dev_err(dev, "caam: of_iomap() failed\n");
607 return ret;
608 }
609
610 caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
611 (CSTA_PLEND | CSTA_ALT_PLEND));
612 comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
613 if (comp_params & CTPR_MS_PS && rd_reg32(&ctrl->mcr) & MCFGR_LONG_PTR)
614 caam_ptr_sz = sizeof(u64);
615 else
616 caam_ptr_sz = sizeof(u32);
617 caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
618 ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
619
620#ifdef CONFIG_CAAM_QI
621 /* If (DPAA 1.x) QI present, check whether dependencies are available */
622 if (ctrlpriv->qi_present && !caam_dpaa2) {
623 ret = qman_is_probed();
624 if (!ret) {
625 return -EPROBE_DEFER;
626 } else if (ret < 0) {
627 dev_err(dev, "failing probe due to qman probe error\n");
628 return -ENODEV;
629 }
630
631 ret = qman_portals_probed();
632 if (!ret) {
633 return -EPROBE_DEFER;
634 } else if (ret < 0) {
635 dev_err(dev, "failing probe due to qman portals probe error\n");
636 return -ENODEV;
637 }
638 }
639#endif
640
641 /* Allocating the BLOCK_OFFSET based on the supported page size on
642 * the platform
643 */
644 pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
645 if (pg_size == 0)
646 BLOCK_OFFSET = PG_SIZE_4K;
647 else
648 BLOCK_OFFSET = PG_SIZE_64K;
649
650 ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl;
651 ctrlpriv->assure = (struct caam_assurance __iomem __force *)
652 ((__force uint8_t *)ctrl +
653 BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
654 );
655 ctrlpriv->deco = (struct caam_deco __iomem __force *)
656 ((__force uint8_t *)ctrl +
657 BLOCK_OFFSET * DECO_BLOCK_NUMBER
658 );
659
660 /* Get the IRQ of the controller (for security violations only) */
661 ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0);
662
663 /*
664 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
665 * long pointers in master configuration register.
666 * In case of SoCs with Management Complex, MC f/w performs
667 * the configuration.
668 */
669 np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc");
670 ctrlpriv->mc_en = !!np;
671 of_node_put(np);
672
673 if (!ctrlpriv->mc_en)
674 clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
675 MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
676 MCFGR_WDENABLE | MCFGR_LARGE_BURST |
677 (sizeof(dma_addr_t) == sizeof(u64) ?
678 MCFGR_LONG_PTR : 0));
679
680 handle_imx6_err005766(&ctrl->mcr);
681
682 /*
683 * Read the Compile Time paramters and SCFGR to determine
684 * if Virtualization is enabled for this platform
685 */
686 scfgr = rd_reg32(&ctrl->scfgr);
687
688 ctrlpriv->virt_en = 0;
689 if (comp_params & CTPR_MS_VIRT_EN_INCL) {
690 /* VIRT_EN_INCL = 1 & VIRT_EN_POR = 1 or
691 * VIRT_EN_INCL = 1 & VIRT_EN_POR = 0 & SCFGR_VIRT_EN = 1
692 */
693 if ((comp_params & CTPR_MS_VIRT_EN_POR) ||
694 (!(comp_params & CTPR_MS_VIRT_EN_POR) &&
695 (scfgr & SCFGR_VIRT_EN)))
696 ctrlpriv->virt_en = 1;
697 } else {
698 /* VIRT_EN_INCL = 0 && VIRT_EN_POR_VALUE = 1 */
699 if (comp_params & CTPR_MS_VIRT_EN_POR)
700 ctrlpriv->virt_en = 1;
701 }
702
703 if (ctrlpriv->virt_en == 1)
704 clrsetbits_32(&ctrl->jrstart, 0, JRSTART_JR0_START |
705 JRSTART_JR1_START | JRSTART_JR2_START |
706 JRSTART_JR3_START);
707
708 ret = dma_set_mask_and_coherent(dev, caam_get_dma_mask(dev));
709 if (ret) {
710 dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
711 return ret;
712 }
713
714 ctrlpriv->era = caam_get_era(ctrl);
715 ctrlpriv->domain = iommu_get_domain_for_dev(dev);
716
717#ifdef CONFIG_DEBUG_FS
718 /*
719 * FIXME: needs better naming distinction, as some amalgamation of
720 * "caam" and nprop->full_name. The OF name isn't distinctive,
721 * but does separate instances
722 */
723 perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
724
725 dfs_root = debugfs_create_dir(dev_name(dev), NULL);
726 ret = devm_add_action_or_reset(dev, caam_remove_debugfs, dfs_root);
727 if (ret)
728 return ret;
729
730 ctrlpriv->ctl = debugfs_create_dir("ctl", dfs_root);
731#endif
732
733 /* Check to see if (DPAA 1.x) QI present. If so, enable */
734 if (ctrlpriv->qi_present && !caam_dpaa2) {
735 ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
736 ((__force uint8_t *)ctrl +
737 BLOCK_OFFSET * QI_BLOCK_NUMBER
738 );
739 /* This is all that's required to physically enable QI */
740 wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
741
742 /* If QMAN driver is present, init CAAM-QI backend */
743#ifdef CONFIG_CAAM_QI
744 ret = caam_qi_init(pdev);
745 if (ret)
746 dev_err(dev, "caam qi i/f init failed: %d\n", ret);
747#endif
748 }
749
750 ring = 0;
751 for_each_available_child_of_node(nprop, np)
752 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
753 of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
754 ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
755 ((__force uint8_t *)ctrl +
756 (ring + JR_BLOCK_NUMBER) *
757 BLOCK_OFFSET
758 );
759 ctrlpriv->total_jobrs++;
760 ring++;
761 }
762
763 /* If no QI and no rings specified, quit and go home */
764 if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
765 dev_err(dev, "no queues configured, terminating\n");
766 return -ENOMEM;
767 }
768
769 if (ctrlpriv->era < 10)
770 rng_vid = (rd_reg32(&ctrl->perfmon.cha_id_ls) &
771 CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
772 else
773 rng_vid = (rd_reg32(&ctrl->vreg.rng) & CHA_VER_VID_MASK) >>
774 CHA_VER_VID_SHIFT;
775
776 /*
777 * If SEC has RNG version >= 4 and RNG state handle has not been
778 * already instantiated, do RNG instantiation
779 * In case of SoCs with Management Complex, RNG is managed by MC f/w.
780 */
781 if (!ctrlpriv->mc_en && rng_vid >= 4) {
782 ctrlpriv->rng4_sh_init =
783 rd_reg32(&ctrl->r4tst[0].rdsta);
784 /*
785 * If the secure keys (TDKEK, JDKEK, TDSK), were already
786 * generated, signal this to the function that is instantiating
787 * the state handles. An error would occur if RNG4 attempts
788 * to regenerate these keys before the next POR.
789 */
790 gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
791 ctrlpriv->rng4_sh_init &= RDSTA_IFMASK;
792 do {
793 int inst_handles =
794 rd_reg32(&ctrl->r4tst[0].rdsta) &
795 RDSTA_IFMASK;
796 /*
797 * If either SH were instantiated by somebody else
798 * (e.g. u-boot) then it is assumed that the entropy
799 * parameters are properly set and thus the function
800 * setting these (kick_trng(...)) is skipped.
801 * Also, if a handle was instantiated, do not change
802 * the TRNG parameters.
803 */
804 if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
805 dev_info(dev,
806 "Entropy delay = %u\n",
807 ent_delay);
808 kick_trng(pdev, ent_delay);
809 ent_delay += 400;
810 }
811 /*
812 * if instantiate_rng(...) fails, the loop will rerun
813 * and the kick_trng(...) function will modfiy the
814 * upper and lower limits of the entropy sampling
815 * interval, leading to a sucessful initialization of
816 * the RNG.
817 */
818 ret = instantiate_rng(dev, inst_handles,
819 gen_sk);
820 if (ret == -EAGAIN)
821 /*
822 * if here, the loop will rerun,
823 * so don't hog the CPU
824 */
825 cpu_relax();
826 } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
827 if (ret) {
828 dev_err(dev, "failed to instantiate RNG");
829 return ret;
830 }
831 /*
832 * Set handles init'ed by this module as the complement of the
833 * already initialized ones
834 */
835 ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
836
837 /* Enable RDB bit so that RNG works faster */
838 clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE);
839 }
840
841 /* NOTE: RTIC detection ought to go here, around Si time */
842
843 caam_id = (u64)rd_reg32(&ctrl->perfmon.caam_id_ms) << 32 |
844 (u64)rd_reg32(&ctrl->perfmon.caam_id_ls);
845
846 /* Report "alive" for developer to see */
847 dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
848 ctrlpriv->era);
849 dev_info(dev, "job rings = %d, qi = %d\n",
850 ctrlpriv->total_jobrs, ctrlpriv->qi_present);
851
852#ifdef CONFIG_DEBUG_FS
853 debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
854 ctrlpriv->ctl, &perfmon->req_dequeued,
855 &caam_fops_u64_ro);
856 debugfs_create_file("ob_rq_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
857 ctrlpriv->ctl, &perfmon->ob_enc_req,
858 &caam_fops_u64_ro);
859 debugfs_create_file("ib_rq_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
860 ctrlpriv->ctl, &perfmon->ib_dec_req,
861 &caam_fops_u64_ro);
862 debugfs_create_file("ob_bytes_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
863 ctrlpriv->ctl, &perfmon->ob_enc_bytes,
864 &caam_fops_u64_ro);
865 debugfs_create_file("ob_bytes_protected", S_IRUSR | S_IRGRP | S_IROTH,
866 ctrlpriv->ctl, &perfmon->ob_prot_bytes,
867 &caam_fops_u64_ro);
868 debugfs_create_file("ib_bytes_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
869 ctrlpriv->ctl, &perfmon->ib_dec_bytes,
870 &caam_fops_u64_ro);
871 debugfs_create_file("ib_bytes_validated", S_IRUSR | S_IRGRP | S_IROTH,
872 ctrlpriv->ctl, &perfmon->ib_valid_bytes,
873 &caam_fops_u64_ro);
874
875 /* Controller level - global status values */
876 debugfs_create_file("fault_addr", S_IRUSR | S_IRGRP | S_IROTH,
877 ctrlpriv->ctl, &perfmon->faultaddr,
878 &caam_fops_u32_ro);
879 debugfs_create_file("fault_detail", S_IRUSR | S_IRGRP | S_IROTH,
880 ctrlpriv->ctl, &perfmon->faultdetail,
881 &caam_fops_u32_ro);
882 debugfs_create_file("fault_status", S_IRUSR | S_IRGRP | S_IROTH,
883 ctrlpriv->ctl, &perfmon->status,
884 &caam_fops_u32_ro);
885
886 /* Internal covering keys (useful in non-secure mode only) */
887 ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
888 ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
889 debugfs_create_blob("kek", S_IRUSR | S_IRGRP | S_IROTH, ctrlpriv->ctl,
890 &ctrlpriv->ctl_kek_wrap);
891
892 ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
893 ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
894 debugfs_create_blob("tkek", S_IRUSR | S_IRGRP | S_IROTH, ctrlpriv->ctl,
895 &ctrlpriv->ctl_tkek_wrap);
896
897 ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
898 ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
899 debugfs_create_blob("tdsk", S_IRUSR | S_IRGRP | S_IROTH, ctrlpriv->ctl,
900 &ctrlpriv->ctl_tdsk_wrap);
901#endif
902
903 ret = devm_of_platform_populate(dev);
904 if (ret)
905 dev_err(dev, "JR platform devices creation error\n");
906
907 return ret;
908}
909
910static struct platform_driver caam_driver = {
911 .driver = {
912 .name = "caam",
913 .of_match_table = caam_match,
914 },
915 .probe = caam_probe,
916};
917
918module_platform_driver(caam_driver);
919
920MODULE_LICENSE("GPL");
921MODULE_DESCRIPTION("FSL CAAM request backend");
922MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");