]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/crypto/caam/ctrl.c
perf test: Fix vmlinux failure on s390x part 2
[mirror_ubuntu-bionic-kernel.git] / drivers / crypto / caam / ctrl.c
1 /* * CAAM control-plane driver backend
2 * Controller-level driver, kernel property detection, initialization
3 *
4 * Copyright 2008-2012 Freescale Semiconductor, Inc.
5 */
6
7 #include <linux/device.h>
8 #include <linux/of_address.h>
9 #include <linux/of_irq.h>
10
11 #include "compat.h"
12 #include "regs.h"
13 #include "intern.h"
14 #include "jr.h"
15 #include "desc_constr.h"
16 #include "ctrl.h"
17
18 bool caam_little_end;
19 EXPORT_SYMBOL(caam_little_end);
20 bool caam_dpaa2;
21 EXPORT_SYMBOL(caam_dpaa2);
22
23 #ifdef CONFIG_CAAM_QI
24 #include "qi.h"
25 #endif
26
27 /*
28 * i.MX targets tend to have clock control subsystems that can
29 * enable/disable clocking to our device.
30 */
31 #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
32 static inline struct clk *caam_drv_identify_clk(struct device *dev,
33 char *clk_name)
34 {
35 return devm_clk_get(dev, clk_name);
36 }
37 #else
38 static inline struct clk *caam_drv_identify_clk(struct device *dev,
39 char *clk_name)
40 {
41 return NULL;
42 }
43 #endif
44
45 /*
46 * Descriptor to instantiate RNG State Handle 0 in normal mode and
47 * load the JDKEK, TDKEK and TDSK registers
48 */
49 static void build_instantiation_desc(u32 *desc, int handle, int do_sk)
50 {
51 u32 *jump_cmd, op_flags;
52
53 init_job_desc(desc, 0);
54
55 op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
56 (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT;
57
58 /* INIT RNG in non-test mode */
59 append_operation(desc, op_flags);
60
61 if (!handle && do_sk) {
62 /*
63 * For SH0, Secure Keys must be generated as well
64 */
65
66 /* wait for done */
67 jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
68 set_jump_tgt_here(desc, jump_cmd);
69
70 /*
71 * load 1 to clear written reg:
72 * resets the done interrrupt and returns the RNG to idle.
73 */
74 append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
75
76 /* Initialize State Handle */
77 append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
78 OP_ALG_AAI_RNG4_SK);
79 }
80
81 append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
82 }
83
84 /* Descriptor for deinstantiation of State Handle 0 of the RNG block. */
85 static void build_deinstantiation_desc(u32 *desc, int handle)
86 {
87 init_job_desc(desc, 0);
88
89 /* Uninstantiate State Handle 0 */
90 append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
91 (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL);
92
93 append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
94 }
95
96 /*
97 * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of
98 * the software (no JR/QI used).
99 * @ctrldev - pointer to device
100 * @status - descriptor status, after being run
101 *
102 * Return: - 0 if no error occurred
103 * - -ENODEV if the DECO couldn't be acquired
104 * - -EAGAIN if an error occurred while executing the descriptor
105 */
106 static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
107 u32 *status)
108 {
109 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
110 struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
111 struct caam_deco __iomem *deco = ctrlpriv->deco;
112 unsigned int timeout = 100000;
113 u32 deco_dbg_reg, flags;
114 int i;
115
116
117 if (ctrlpriv->virt_en == 1) {
118 clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0);
119
120 while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
121 --timeout)
122 cpu_relax();
123
124 timeout = 100000;
125 }
126
127 clrsetbits_32(&ctrl->deco_rq, 0, DECORR_RQD0ENABLE);
128
129 while (!(rd_reg32(&ctrl->deco_rq) & DECORR_DEN0) &&
130 --timeout)
131 cpu_relax();
132
133 if (!timeout) {
134 dev_err(ctrldev, "failed to acquire DECO 0\n");
135 clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0);
136 return -ENODEV;
137 }
138
139 for (i = 0; i < desc_len(desc); i++)
140 wr_reg32(&deco->descbuf[i], caam32_to_cpu(*(desc + i)));
141
142 flags = DECO_JQCR_WHL;
143 /*
144 * If the descriptor length is longer than 4 words, then the
145 * FOUR bit in JRCTRL register must be set.
146 */
147 if (desc_len(desc) >= 4)
148 flags |= DECO_JQCR_FOUR;
149
150 /* Instruct the DECO to execute it */
151 clrsetbits_32(&deco->jr_ctl_hi, 0, flags);
152
153 timeout = 10000000;
154 do {
155 deco_dbg_reg = rd_reg32(&deco->desc_dbg);
156 /*
157 * If an error occured in the descriptor, then
158 * the DECO status field will be set to 0x0D
159 */
160 if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
161 DESC_DBG_DECO_STAT_HOST_ERR)
162 break;
163 cpu_relax();
164 } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
165
166 *status = rd_reg32(&deco->op_status_hi) &
167 DECO_OP_STATUS_HI_ERR_MASK;
168
169 if (ctrlpriv->virt_en == 1)
170 clrsetbits_32(&ctrl->deco_rsr, DECORSR_JR0, 0);
171
172 /* Mark the DECO as free */
173 clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0);
174
175 if (!timeout)
176 return -EAGAIN;
177
178 return 0;
179 }
180
181 /*
182 * instantiate_rng - builds and executes a descriptor on DECO0,
183 * which initializes the RNG block.
184 * @ctrldev - pointer to device
185 * @state_handle_mask - bitmask containing the instantiation status
186 * for the RNG4 state handles which exist in
187 * the RNG4 block: 1 if it's been instantiated
188 * by an external entry, 0 otherwise.
189 * @gen_sk - generate data to be loaded into the JDKEK, TDKEK and TDSK;
190 * Caution: this can be done only once; if the keys need to be
191 * regenerated, a POR is required
192 *
193 * Return: - 0 if no error occurred
194 * - -ENOMEM if there isn't enough memory to allocate the descriptor
195 * - -ENODEV if DECO0 couldn't be acquired
196 * - -EAGAIN if an error occurred when executing the descriptor
197 * f.i. there was a RNG hardware error due to not "good enough"
198 * entropy being aquired.
199 */
200 static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
201 int gen_sk)
202 {
203 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
204 struct caam_ctrl __iomem *ctrl;
205 u32 *desc, status = 0, rdsta_val;
206 int ret = 0, sh_idx;
207
208 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
209 desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
210 if (!desc)
211 return -ENOMEM;
212
213 for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
214 /*
215 * If the corresponding bit is set, this state handle
216 * was initialized by somebody else, so it's left alone.
217 */
218 if ((1 << sh_idx) & state_handle_mask)
219 continue;
220
221 /* Create the descriptor for instantiating RNG State Handle */
222 build_instantiation_desc(desc, sh_idx, gen_sk);
223
224 /* Try to run it through DECO0 */
225 ret = run_descriptor_deco0(ctrldev, desc, &status);
226
227 /*
228 * If ret is not 0, or descriptor status is not 0, then
229 * something went wrong. No need to try the next state
230 * handle (if available), bail out here.
231 * Also, if for some reason, the State Handle didn't get
232 * instantiated although the descriptor has finished
233 * without any error (HW optimizations for later
234 * CAAM eras), then try again.
235 */
236 rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
237 if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
238 !(rdsta_val & (1 << sh_idx)))
239 ret = -EAGAIN;
240 if (ret)
241 break;
242 dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
243 /* Clear the contents before recreating the descriptor */
244 memset(desc, 0x00, CAAM_CMD_SZ * 7);
245 }
246
247 kfree(desc);
248
249 return ret;
250 }
251
252 /*
253 * deinstantiate_rng - builds and executes a descriptor on DECO0,
254 * which deinitializes the RNG block.
255 * @ctrldev - pointer to device
256 * @state_handle_mask - bitmask containing the instantiation status
257 * for the RNG4 state handles which exist in
258 * the RNG4 block: 1 if it's been instantiated
259 *
260 * Return: - 0 if no error occurred
261 * - -ENOMEM if there isn't enough memory to allocate the descriptor
262 * - -ENODEV if DECO0 couldn't be acquired
263 * - -EAGAIN if an error occurred when executing the descriptor
264 */
265 static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
266 {
267 u32 *desc, status;
268 int sh_idx, ret = 0;
269
270 desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
271 if (!desc)
272 return -ENOMEM;
273
274 for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
275 /*
276 * If the corresponding bit is set, then it means the state
277 * handle was initialized by us, and thus it needs to be
278 * deinitialized as well
279 */
280 if ((1 << sh_idx) & state_handle_mask) {
281 /*
282 * Create the descriptor for deinstantating this state
283 * handle
284 */
285 build_deinstantiation_desc(desc, sh_idx);
286
287 /* Try to run it through DECO0 */
288 ret = run_descriptor_deco0(ctrldev, desc, &status);
289
290 if (ret ||
291 (status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
292 dev_err(ctrldev,
293 "Failed to deinstantiate RNG4 SH%d\n",
294 sh_idx);
295 break;
296 }
297 dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx);
298 }
299 }
300
301 kfree(desc);
302
303 return ret;
304 }
305
306 static int caam_remove(struct platform_device *pdev)
307 {
308 struct device *ctrldev;
309 struct caam_drv_private *ctrlpriv;
310 struct caam_ctrl __iomem *ctrl;
311
312 ctrldev = &pdev->dev;
313 ctrlpriv = dev_get_drvdata(ctrldev);
314 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
315
316 /* Remove platform devices under the crypto node */
317 of_platform_depopulate(ctrldev);
318
319 #ifdef CONFIG_CAAM_QI
320 if (ctrlpriv->qidev)
321 caam_qi_shutdown(ctrlpriv->qidev);
322 #endif
323
324 /*
325 * De-initialize RNG state handles initialized by this driver.
326 * In case of DPAA 2.x, RNG is managed by MC firmware.
327 */
328 if (!caam_dpaa2 && ctrlpriv->rng4_sh_init)
329 deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
330
331 /* Shut down debug views */
332 #ifdef CONFIG_DEBUG_FS
333 debugfs_remove_recursive(ctrlpriv->dfs_root);
334 #endif
335
336 /* Unmap controller region */
337 iounmap(ctrl);
338
339 /* shut clocks off before finalizing shutdown */
340 clk_disable_unprepare(ctrlpriv->caam_ipg);
341 clk_disable_unprepare(ctrlpriv->caam_mem);
342 clk_disable_unprepare(ctrlpriv->caam_aclk);
343 if (ctrlpriv->caam_emi_slow)
344 clk_disable_unprepare(ctrlpriv->caam_emi_slow);
345 return 0;
346 }
347
348 /*
349 * kick_trng - sets the various parameters for enabling the initialization
350 * of the RNG4 block in CAAM
351 * @pdev - pointer to the platform device
352 * @ent_delay - Defines the length (in system clocks) of each entropy sample.
353 */
354 static void kick_trng(struct platform_device *pdev, int ent_delay)
355 {
356 struct device *ctrldev = &pdev->dev;
357 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
358 struct caam_ctrl __iomem *ctrl;
359 struct rng4tst __iomem *r4tst;
360 u32 val;
361
362 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
363 r4tst = &ctrl->r4tst[0];
364
365 /* put RNG4 into program mode */
366 clrsetbits_32(&r4tst->rtmctl, 0, RTMCTL_PRGM);
367
368 /*
369 * Performance-wise, it does not make sense to
370 * set the delay to a value that is lower
371 * than the last one that worked (i.e. the state handles
372 * were instantiated properly. Thus, instead of wasting
373 * time trying to set the values controlling the sample
374 * frequency, the function simply returns.
375 */
376 val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
377 >> RTSDCTL_ENT_DLY_SHIFT;
378 if (ent_delay <= val)
379 goto start_rng;
380
381 val = rd_reg32(&r4tst->rtsdctl);
382 val = (val & ~RTSDCTL_ENT_DLY_MASK) |
383 (ent_delay << RTSDCTL_ENT_DLY_SHIFT);
384 wr_reg32(&r4tst->rtsdctl, val);
385 /* min. freq. count, equal to 1/4 of the entropy sample length */
386 wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
387 /* disable maximum frequency count */
388 wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
389 /* read the control register */
390 val = rd_reg32(&r4tst->rtmctl);
391 start_rng:
392 /*
393 * select raw sampling in both entropy shifter
394 * and statistical checker; ; put RNG4 into run mode
395 */
396 clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
397 }
398
399 /**
400 * caam_get_era() - Return the ERA of the SEC on SoC, based
401 * on "sec-era" propery in the DTS. This property is updated by u-boot.
402 **/
403 int caam_get_era(void)
404 {
405 struct device_node *caam_node;
406 int ret;
407 u32 prop;
408
409 caam_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
410 ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop);
411 of_node_put(caam_node);
412
413 return ret ? -ENOTSUPP : prop;
414 }
415 EXPORT_SYMBOL(caam_get_era);
416
417 static const struct of_device_id caam_match[] = {
418 {
419 .compatible = "fsl,sec-v4.0",
420 },
421 {
422 .compatible = "fsl,sec4.0",
423 },
424 {},
425 };
426 MODULE_DEVICE_TABLE(of, caam_match);
427
428 /* Probe routine for CAAM top (controller) level */
429 static int caam_probe(struct platform_device *pdev)
430 {
431 int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
432 u64 caam_id;
433 struct device *dev;
434 struct device_node *nprop, *np;
435 struct caam_ctrl __iomem *ctrl;
436 struct caam_drv_private *ctrlpriv;
437 struct clk *clk;
438 #ifdef CONFIG_DEBUG_FS
439 struct caam_perfmon *perfmon;
440 #endif
441 u32 scfgr, comp_params;
442 u32 cha_vid_ls;
443 int pg_size;
444 int BLOCK_OFFSET = 0;
445
446 ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
447 if (!ctrlpriv)
448 return -ENOMEM;
449
450 dev = &pdev->dev;
451 dev_set_drvdata(dev, ctrlpriv);
452 nprop = pdev->dev.of_node;
453
454 /* Enable clocking */
455 clk = caam_drv_identify_clk(&pdev->dev, "ipg");
456 if (IS_ERR(clk)) {
457 ret = PTR_ERR(clk);
458 dev_err(&pdev->dev,
459 "can't identify CAAM ipg clk: %d\n", ret);
460 return ret;
461 }
462 ctrlpriv->caam_ipg = clk;
463
464 clk = caam_drv_identify_clk(&pdev->dev, "mem");
465 if (IS_ERR(clk)) {
466 ret = PTR_ERR(clk);
467 dev_err(&pdev->dev,
468 "can't identify CAAM mem clk: %d\n", ret);
469 return ret;
470 }
471 ctrlpriv->caam_mem = clk;
472
473 clk = caam_drv_identify_clk(&pdev->dev, "aclk");
474 if (IS_ERR(clk)) {
475 ret = PTR_ERR(clk);
476 dev_err(&pdev->dev,
477 "can't identify CAAM aclk clk: %d\n", ret);
478 return ret;
479 }
480 ctrlpriv->caam_aclk = clk;
481
482 if (!of_machine_is_compatible("fsl,imx6ul")) {
483 clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
484 if (IS_ERR(clk)) {
485 ret = PTR_ERR(clk);
486 dev_err(&pdev->dev,
487 "can't identify CAAM emi_slow clk: %d\n", ret);
488 return ret;
489 }
490 ctrlpriv->caam_emi_slow = clk;
491 }
492
493 ret = clk_prepare_enable(ctrlpriv->caam_ipg);
494 if (ret < 0) {
495 dev_err(&pdev->dev, "can't enable CAAM ipg clock: %d\n", ret);
496 return ret;
497 }
498
499 ret = clk_prepare_enable(ctrlpriv->caam_mem);
500 if (ret < 0) {
501 dev_err(&pdev->dev, "can't enable CAAM secure mem clock: %d\n",
502 ret);
503 goto disable_caam_ipg;
504 }
505
506 ret = clk_prepare_enable(ctrlpriv->caam_aclk);
507 if (ret < 0) {
508 dev_err(&pdev->dev, "can't enable CAAM aclk clock: %d\n", ret);
509 goto disable_caam_mem;
510 }
511
512 if (ctrlpriv->caam_emi_slow) {
513 ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
514 if (ret < 0) {
515 dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
516 ret);
517 goto disable_caam_aclk;
518 }
519 }
520
521 /* Get configuration properties from device tree */
522 /* First, get register page */
523 ctrl = of_iomap(nprop, 0);
524 if (ctrl == NULL) {
525 dev_err(dev, "caam: of_iomap() failed\n");
526 ret = -ENOMEM;
527 goto disable_caam_emi_slow;
528 }
529
530 caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
531 (CSTA_PLEND | CSTA_ALT_PLEND));
532
533 /* Finding the page size for using the CTPR_MS register */
534 comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
535 pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
536
537 /* Allocating the BLOCK_OFFSET based on the supported page size on
538 * the platform
539 */
540 if (pg_size == 0)
541 BLOCK_OFFSET = PG_SIZE_4K;
542 else
543 BLOCK_OFFSET = PG_SIZE_64K;
544
545 ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl;
546 ctrlpriv->assure = (struct caam_assurance __iomem __force *)
547 ((__force uint8_t *)ctrl +
548 BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
549 );
550 ctrlpriv->deco = (struct caam_deco __iomem __force *)
551 ((__force uint8_t *)ctrl +
552 BLOCK_OFFSET * DECO_BLOCK_NUMBER
553 );
554
555 /* Get the IRQ of the controller (for security violations only) */
556 ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0);
557
558 /*
559 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
560 * long pointers in master configuration register.
561 * In case of DPAA 2.x, Management Complex firmware performs
562 * the configuration.
563 */
564 caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
565 if (!caam_dpaa2)
566 clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
567 MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
568 MCFGR_WDENABLE | MCFGR_LARGE_BURST |
569 (sizeof(dma_addr_t) == sizeof(u64) ?
570 MCFGR_LONG_PTR : 0));
571
572 /*
573 * Read the Compile Time paramters and SCFGR to determine
574 * if Virtualization is enabled for this platform
575 */
576 scfgr = rd_reg32(&ctrl->scfgr);
577
578 ctrlpriv->virt_en = 0;
579 if (comp_params & CTPR_MS_VIRT_EN_INCL) {
580 /* VIRT_EN_INCL = 1 & VIRT_EN_POR = 1 or
581 * VIRT_EN_INCL = 1 & VIRT_EN_POR = 0 & SCFGR_VIRT_EN = 1
582 */
583 if ((comp_params & CTPR_MS_VIRT_EN_POR) ||
584 (!(comp_params & CTPR_MS_VIRT_EN_POR) &&
585 (scfgr & SCFGR_VIRT_EN)))
586 ctrlpriv->virt_en = 1;
587 } else {
588 /* VIRT_EN_INCL = 0 && VIRT_EN_POR_VALUE = 1 */
589 if (comp_params & CTPR_MS_VIRT_EN_POR)
590 ctrlpriv->virt_en = 1;
591 }
592
593 if (ctrlpriv->virt_en == 1)
594 clrsetbits_32(&ctrl->jrstart, 0, JRSTART_JR0_START |
595 JRSTART_JR1_START | JRSTART_JR2_START |
596 JRSTART_JR3_START);
597
598 if (sizeof(dma_addr_t) == sizeof(u64)) {
599 if (caam_dpaa2)
600 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
601 else if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
602 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
603 else
604 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
605 } else {
606 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
607 }
608 if (ret) {
609 dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
610 goto iounmap_ctrl;
611 }
612
613 ret = of_platform_populate(nprop, caam_match, NULL, dev);
614 if (ret) {
615 dev_err(dev, "JR platform devices creation error\n");
616 goto iounmap_ctrl;
617 }
618
619 #ifdef CONFIG_DEBUG_FS
620 /*
621 * FIXME: needs better naming distinction, as some amalgamation of
622 * "caam" and nprop->full_name. The OF name isn't distinctive,
623 * but does separate instances
624 */
625 perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
626
627 ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
628 ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
629 #endif
630
631 ring = 0;
632 for_each_available_child_of_node(nprop, np)
633 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
634 of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
635 ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
636 ((__force uint8_t *)ctrl +
637 (ring + JR_BLOCK_NUMBER) *
638 BLOCK_OFFSET
639 );
640 ctrlpriv->total_jobrs++;
641 ring++;
642 }
643
644 /* Check to see if (DPAA 1.x) QI present. If so, enable */
645 ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
646 if (ctrlpriv->qi_present && !caam_dpaa2) {
647 ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
648 ((__force uint8_t *)ctrl +
649 BLOCK_OFFSET * QI_BLOCK_NUMBER
650 );
651 /* This is all that's required to physically enable QI */
652 wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
653
654 /* If QMAN driver is present, init CAAM-QI backend */
655 #ifdef CONFIG_CAAM_QI
656 ret = caam_qi_init(pdev);
657 if (ret)
658 dev_err(dev, "caam qi i/f init failed: %d\n", ret);
659 #endif
660 }
661
662 /* If no QI and no rings specified, quit and go home */
663 if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
664 dev_err(dev, "no queues configured, terminating\n");
665 ret = -ENOMEM;
666 goto caam_remove;
667 }
668
669 cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
670
671 /*
672 * If SEC has RNG version >= 4 and RNG state handle has not been
673 * already instantiated, do RNG instantiation
674 * In case of DPAA 2.x, RNG is managed by MC firmware.
675 */
676 if (!caam_dpaa2 &&
677 (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
678 ctrlpriv->rng4_sh_init =
679 rd_reg32(&ctrl->r4tst[0].rdsta);
680 /*
681 * If the secure keys (TDKEK, JDKEK, TDSK), were already
682 * generated, signal this to the function that is instantiating
683 * the state handles. An error would occur if RNG4 attempts
684 * to regenerate these keys before the next POR.
685 */
686 gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
687 ctrlpriv->rng4_sh_init &= RDSTA_IFMASK;
688 do {
689 int inst_handles =
690 rd_reg32(&ctrl->r4tst[0].rdsta) &
691 RDSTA_IFMASK;
692 /*
693 * If either SH were instantiated by somebody else
694 * (e.g. u-boot) then it is assumed that the entropy
695 * parameters are properly set and thus the function
696 * setting these (kick_trng(...)) is skipped.
697 * Also, if a handle was instantiated, do not change
698 * the TRNG parameters.
699 */
700 if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
701 dev_info(dev,
702 "Entropy delay = %u\n",
703 ent_delay);
704 kick_trng(pdev, ent_delay);
705 ent_delay += 400;
706 }
707 /*
708 * if instantiate_rng(...) fails, the loop will rerun
709 * and the kick_trng(...) function will modfiy the
710 * upper and lower limits of the entropy sampling
711 * interval, leading to a sucessful initialization of
712 * the RNG.
713 */
714 ret = instantiate_rng(dev, inst_handles,
715 gen_sk);
716 if (ret == -EAGAIN)
717 /*
718 * if here, the loop will rerun,
719 * so don't hog the CPU
720 */
721 cpu_relax();
722 } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
723 if (ret) {
724 dev_err(dev, "failed to instantiate RNG");
725 goto caam_remove;
726 }
727 /*
728 * Set handles init'ed by this module as the complement of the
729 * already initialized ones
730 */
731 ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
732
733 /* Enable RDB bit so that RNG works faster */
734 clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE);
735 }
736
737 /* NOTE: RTIC detection ought to go here, around Si time */
738
739 caam_id = (u64)rd_reg32(&ctrl->perfmon.caam_id_ms) << 32 |
740 (u64)rd_reg32(&ctrl->perfmon.caam_id_ls);
741
742 /* Report "alive" for developer to see */
743 dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
744 caam_get_era());
745 dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
746 ctrlpriv->total_jobrs, ctrlpriv->qi_present,
747 caam_dpaa2 ? "yes" : "no");
748
749 #ifdef CONFIG_DEBUG_FS
750 debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
751 ctrlpriv->ctl, &perfmon->req_dequeued,
752 &caam_fops_u64_ro);
753 debugfs_create_file("ob_rq_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
754 ctrlpriv->ctl, &perfmon->ob_enc_req,
755 &caam_fops_u64_ro);
756 debugfs_create_file("ib_rq_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
757 ctrlpriv->ctl, &perfmon->ib_dec_req,
758 &caam_fops_u64_ro);
759 debugfs_create_file("ob_bytes_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
760 ctrlpriv->ctl, &perfmon->ob_enc_bytes,
761 &caam_fops_u64_ro);
762 debugfs_create_file("ob_bytes_protected", S_IRUSR | S_IRGRP | S_IROTH,
763 ctrlpriv->ctl, &perfmon->ob_prot_bytes,
764 &caam_fops_u64_ro);
765 debugfs_create_file("ib_bytes_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
766 ctrlpriv->ctl, &perfmon->ib_dec_bytes,
767 &caam_fops_u64_ro);
768 debugfs_create_file("ib_bytes_validated", S_IRUSR | S_IRGRP | S_IROTH,
769 ctrlpriv->ctl, &perfmon->ib_valid_bytes,
770 &caam_fops_u64_ro);
771
772 /* Controller level - global status values */
773 debugfs_create_file("fault_addr", S_IRUSR | S_IRGRP | S_IROTH,
774 ctrlpriv->ctl, &perfmon->faultaddr,
775 &caam_fops_u32_ro);
776 debugfs_create_file("fault_detail", S_IRUSR | S_IRGRP | S_IROTH,
777 ctrlpriv->ctl, &perfmon->faultdetail,
778 &caam_fops_u32_ro);
779 debugfs_create_file("fault_status", S_IRUSR | S_IRGRP | S_IROTH,
780 ctrlpriv->ctl, &perfmon->status,
781 &caam_fops_u32_ro);
782
783 /* Internal covering keys (useful in non-secure mode only) */
784 ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
785 ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
786 ctrlpriv->ctl_kek = debugfs_create_blob("kek",
787 S_IRUSR |
788 S_IRGRP | S_IROTH,
789 ctrlpriv->ctl,
790 &ctrlpriv->ctl_kek_wrap);
791
792 ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
793 ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
794 ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
795 S_IRUSR |
796 S_IRGRP | S_IROTH,
797 ctrlpriv->ctl,
798 &ctrlpriv->ctl_tkek_wrap);
799
800 ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
801 ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
802 ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
803 S_IRUSR |
804 S_IRGRP | S_IROTH,
805 ctrlpriv->ctl,
806 &ctrlpriv->ctl_tdsk_wrap);
807 #endif
808 return 0;
809
810 caam_remove:
811 #ifdef CONFIG_DEBUG_FS
812 debugfs_remove_recursive(ctrlpriv->dfs_root);
813 #endif
814 caam_remove(pdev);
815 return ret;
816
817 iounmap_ctrl:
818 iounmap(ctrl);
819 disable_caam_emi_slow:
820 if (ctrlpriv->caam_emi_slow)
821 clk_disable_unprepare(ctrlpriv->caam_emi_slow);
822 disable_caam_aclk:
823 clk_disable_unprepare(ctrlpriv->caam_aclk);
824 disable_caam_mem:
825 clk_disable_unprepare(ctrlpriv->caam_mem);
826 disable_caam_ipg:
827 clk_disable_unprepare(ctrlpriv->caam_ipg);
828 return ret;
829 }
830
831 static struct platform_driver caam_driver = {
832 .driver = {
833 .name = "caam",
834 .of_match_table = caam_match,
835 },
836 .probe = caam_probe,
837 .remove = caam_remove,
838 };
839
840 module_platform_driver(caam_driver);
841
842 MODULE_LICENSE("GPL");
843 MODULE_DESCRIPTION("FSL CAAM request backend");
844 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");