hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/crypto/caam/ctrl.c
....@@ -1,41 +1,31 @@
1
+// SPDX-License-Identifier: GPL-2.0+
12 /* * CAAM control-plane driver backend
23 * Controller-level driver, kernel property detection, initialization
34 *
45 * Copyright 2008-2012 Freescale Semiconductor, Inc.
6
+ * Copyright 2018-2019 NXP
57 */
68
79 #include <linux/device.h>
810 #include <linux/of_address.h>
911 #include <linux/of_irq.h>
1012 #include <linux/sys_soc.h>
13
+#include <linux/fsl/mc.h>
1114
1215 #include "compat.h"
16
+#include "debugfs.h"
1317 #include "regs.h"
1418 #include "intern.h"
1519 #include "jr.h"
1620 #include "desc_constr.h"
1721 #include "ctrl.h"
1822
19
-bool caam_little_end;
20
-EXPORT_SYMBOL(caam_little_end);
2123 bool caam_dpaa2;
2224 EXPORT_SYMBOL(caam_dpaa2);
23
-bool caam_imx;
24
-EXPORT_SYMBOL(caam_imx);
2525
2626 #ifdef CONFIG_CAAM_QI
2727 #include "qi.h"
2828 #endif
29
-
30
-/*
31
- * i.MX targets tend to have clock control subsystems that can
32
- * enable/disable clocking to our device.
33
- */
34
-static inline struct clk *caam_drv_identify_clk(struct device *dev,
35
- char *clk_name)
36
-{
37
- return caam_imx ? devm_clk_get(dev, clk_name) : NULL;
38
-}
3929
4030 /*
4131 * Descriptor to instantiate RNG State Handle 0 in normal mode and
....@@ -48,7 +38,8 @@
4838 init_job_desc(desc, 0);
4939
5040 op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
51
- (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT;
41
+ (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT |
42
+ OP_ALG_PR_ON;
5243
5344 /* INIT RNG in non-test mode */
5445 append_operation(desc, op_flags);
....@@ -64,7 +55,7 @@
6455
6556 /*
6657 * load 1 to clear written reg:
67
- * resets the done interrrupt and returns the RNG to idle.
58
+ * resets the done interrupt and returns the RNG to idle.
6859 */
6960 append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
7061
....@@ -105,11 +96,19 @@
10596 struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
10697 struct caam_deco __iomem *deco = ctrlpriv->deco;
10798 unsigned int timeout = 100000;
108
- u32 deco_dbg_reg, flags;
99
+ u32 deco_dbg_reg, deco_state, flags;
109100 int i;
110101
111102
112
- if (ctrlpriv->virt_en == 1) {
103
+ if (ctrlpriv->virt_en == 1 ||
104
+ /*
105
+ * Apparently on i.MX8M{Q,M,N,P} it doesn't matter if virt_en == 1
106
+ * and the following steps should be performed regardless
107
+ */
108
+ of_machine_is_compatible("fsl,imx8mq") ||
109
+ of_machine_is_compatible("fsl,imx8mm") ||
110
+ of_machine_is_compatible("fsl,imx8mn") ||
111
+ of_machine_is_compatible("fsl,imx8mp")) {
113112 clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0);
114113
115114 while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
....@@ -148,13 +147,22 @@
148147 timeout = 10000000;
149148 do {
150149 deco_dbg_reg = rd_reg32(&deco->desc_dbg);
150
+
151
+ if (ctrlpriv->era < 10)
152
+ deco_state = (deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) >>
153
+ DESC_DBG_DECO_STAT_SHIFT;
154
+ else
155
+ deco_state = (rd_reg32(&deco->dbg_exec) &
156
+ DESC_DER_DECO_STAT_MASK) >>
157
+ DESC_DER_DECO_STAT_SHIFT;
158
+
151159 /*
152
- * If an error occured in the descriptor, then
160
+ * If an error occurred in the descriptor, then
153161 * the DECO status field will be set to 0x0D
154162 */
155
- if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
156
- DESC_DBG_DECO_STAT_HOST_ERR)
163
+ if (deco_state == DECO_STAT_HOST_ERR)
157164 break;
165
+
158166 cpu_relax();
159167 } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
160168
....@@ -171,81 +179,6 @@
171179 return -EAGAIN;
172180
173181 return 0;
174
-}
175
-
176
-/*
177
- * instantiate_rng - builds and executes a descriptor on DECO0,
178
- * which initializes the RNG block.
179
- * @ctrldev - pointer to device
180
- * @state_handle_mask - bitmask containing the instantiation status
181
- * for the RNG4 state handles which exist in
182
- * the RNG4 block: 1 if it's been instantiated
183
- * by an external entry, 0 otherwise.
184
- * @gen_sk - generate data to be loaded into the JDKEK, TDKEK and TDSK;
185
- * Caution: this can be done only once; if the keys need to be
186
- * regenerated, a POR is required
187
- *
188
- * Return: - 0 if no error occurred
189
- * - -ENOMEM if there isn't enough memory to allocate the descriptor
190
- * - -ENODEV if DECO0 couldn't be acquired
191
- * - -EAGAIN if an error occurred when executing the descriptor
192
- * f.i. there was a RNG hardware error due to not "good enough"
193
- * entropy being aquired.
194
- */
195
-static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
196
- int gen_sk)
197
-{
198
- struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
199
- struct caam_ctrl __iomem *ctrl;
200
- u32 *desc, status = 0, rdsta_val;
201
- int ret = 0, sh_idx;
202
-
203
- ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
204
- desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
205
- if (!desc)
206
- return -ENOMEM;
207
-
208
- for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
209
- /*
210
- * If the corresponding bit is set, this state handle
211
- * was initialized by somebody else, so it's left alone.
212
- */
213
- if ((1 << sh_idx) & state_handle_mask)
214
- continue;
215
-
216
- /* Create the descriptor for instantiating RNG State Handle */
217
- build_instantiation_desc(desc, sh_idx, gen_sk);
218
-
219
- /* Try to run it through DECO0 */
220
- ret = run_descriptor_deco0(ctrldev, desc, &status);
221
-
222
- /*
223
- * If ret is not 0, or descriptor status is not 0, then
224
- * something went wrong. No need to try the next state
225
- * handle (if available), bail out here.
226
- * Also, if for some reason, the State Handle didn't get
227
- * instantiated although the descriptor has finished
228
- * without any error (HW optimizations for later
229
- * CAAM eras), then try again.
230
- */
231
- if (ret)
232
- break;
233
-
234
- rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
235
- if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
236
- !(rdsta_val & (1 << sh_idx))) {
237
- ret = -EAGAIN;
238
- break;
239
- }
240
-
241
- dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
242
- /* Clear the contents before recreating the descriptor */
243
- memset(desc, 0x00, CAAM_CMD_SZ * 7);
244
- }
245
-
246
- kfree(desc);
247
-
248
- return ret;
249182 }
250183
251184 /*
....@@ -266,7 +199,7 @@
266199 u32 *desc, status;
267200 int sh_idx, ret = 0;
268201
269
- desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
202
+ desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL | GFP_DMA);
270203 if (!desc)
271204 return -ENOMEM;
272205
....@@ -302,47 +235,108 @@
302235 return ret;
303236 }
304237
305
-static int caam_remove(struct platform_device *pdev)
238
+static void devm_deinstantiate_rng(void *data)
306239 {
307
- struct device *ctrldev;
308
- struct caam_drv_private *ctrlpriv;
309
- struct caam_ctrl __iomem *ctrl;
310
-
311
- ctrldev = &pdev->dev;
312
- ctrlpriv = dev_get_drvdata(ctrldev);
313
- ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
314
-
315
- /* Remove platform devices under the crypto node */
316
- of_platform_depopulate(ctrldev);
317
-
318
-#ifdef CONFIG_CAAM_QI
319
- if (ctrlpriv->qidev)
320
- caam_qi_shutdown(ctrlpriv->qidev);
321
-#endif
240
+ struct device *ctrldev = data;
241
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
322242
323243 /*
324244 * De-initialize RNG state handles initialized by this driver.
325245 * In case of SoCs with Management Complex, RNG is managed by MC f/w.
326246 */
327
- if (!ctrlpriv->mc_en && ctrlpriv->rng4_sh_init)
247
+ if (ctrlpriv->rng4_sh_init)
328248 deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
249
+}
329250
330
- /* Shut down debug views */
331
-#ifdef CONFIG_DEBUG_FS
332
- debugfs_remove_recursive(ctrlpriv->dfs_root);
333
-#endif
251
+/*
252
+ * instantiate_rng - builds and executes a descriptor on DECO0,
253
+ * which initializes the RNG block.
254
+ * @ctrldev - pointer to device
255
+ * @state_handle_mask - bitmask containing the instantiation status
256
+ * for the RNG4 state handles which exist in
257
+ * the RNG4 block: 1 if it's been instantiated
258
+ * by an external entry, 0 otherwise.
259
+ * @gen_sk - generate data to be loaded into the JDKEK, TDKEK and TDSK;
260
+ * Caution: this can be done only once; if the keys need to be
261
+ * regenerated, a POR is required
262
+ *
263
+ * Return: - 0 if no error occurred
264
+ * - -ENOMEM if there isn't enough memory to allocate the descriptor
265
+ * - -ENODEV if DECO0 couldn't be acquired
266
+ * - -EAGAIN if an error occurred when executing the descriptor
267
+ * f.i. there was a RNG hardware error due to not "good enough"
268
+ * entropy being acquired.
269
+ */
270
+static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
271
+ int gen_sk)
272
+{
273
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
274
+ struct caam_ctrl __iomem *ctrl;
275
+ u32 *desc, status = 0, rdsta_val;
276
+ int ret = 0, sh_idx;
334277
335
- /* Unmap controller region */
336
- iounmap(ctrl);
278
+ ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
279
+ desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL | GFP_DMA);
280
+ if (!desc)
281
+ return -ENOMEM;
337282
338
- /* shut clocks off before finalizing shutdown */
339
- clk_disable_unprepare(ctrlpriv->caam_ipg);
340
- if (ctrlpriv->caam_mem)
341
- clk_disable_unprepare(ctrlpriv->caam_mem);
342
- clk_disable_unprepare(ctrlpriv->caam_aclk);
343
- if (ctrlpriv->caam_emi_slow)
344
- clk_disable_unprepare(ctrlpriv->caam_emi_slow);
345
- return 0;
283
+ for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
284
+ const u32 rdsta_if = RDSTA_IF0 << sh_idx;
285
+ const u32 rdsta_pr = RDSTA_PR0 << sh_idx;
286
+ const u32 rdsta_mask = rdsta_if | rdsta_pr;
287
+ /*
288
+ * If the corresponding bit is set, this state handle
289
+ * was initialized by somebody else, so it's left alone.
290
+ */
291
+ if (rdsta_if & state_handle_mask) {
292
+ if (rdsta_pr & state_handle_mask)
293
+ continue;
294
+
295
+ dev_info(ctrldev,
296
+ "RNG4 SH%d was previously instantiated without prediction resistance. Tearing it down\n",
297
+ sh_idx);
298
+
299
+ ret = deinstantiate_rng(ctrldev, rdsta_if);
300
+ if (ret)
301
+ break;
302
+ }
303
+
304
+ /* Create the descriptor for instantiating RNG State Handle */
305
+ build_instantiation_desc(desc, sh_idx, gen_sk);
306
+
307
+ /* Try to run it through DECO0 */
308
+ ret = run_descriptor_deco0(ctrldev, desc, &status);
309
+
310
+ /*
311
+ * If ret is not 0, or descriptor status is not 0, then
312
+ * something went wrong. No need to try the next state
313
+ * handle (if available), bail out here.
314
+ * Also, if for some reason, the State Handle didn't get
315
+ * instantiated although the descriptor has finished
316
+ * without any error (HW optimizations for later
317
+ * CAAM eras), then try again.
318
+ */
319
+ if (ret)
320
+ break;
321
+
322
+ rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_MASK;
323
+ if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
324
+ (rdsta_val & rdsta_mask) != rdsta_mask) {
325
+ ret = -EAGAIN;
326
+ break;
327
+ }
328
+
329
+ dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
330
+ /* Clear the contents before recreating the descriptor */
331
+ memset(desc, 0x00, CAAM_CMD_SZ * 7);
332
+ }
333
+
334
+ kfree(desc);
335
+
336
+ if (ret)
337
+ return ret;
338
+
339
+ return devm_add_action_or_reset(ctrldev, devm_deinstantiate_rng, ctrldev);
346340 }
347341
348342 /*
....@@ -362,8 +356,12 @@
362356 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
363357 r4tst = &ctrl->r4tst[0];
364358
365
- /* put RNG4 into program mode */
366
- clrsetbits_32(&r4tst->rtmctl, 0, RTMCTL_PRGM);
359
+ /*
360
+ * Setting both RTMCTL:PRGM and RTMCTL:TRNG_ACC causes TRNG to
361
+ * properly invalidate the entropy in the entropy register and
362
+ * force re-generation.
363
+ */
364
+ clrsetbits_32(&r4tst->rtmctl, 0, RTMCTL_PRGM | RTMCTL_ACC);
367365
368366 /*
369367 * Performance-wise, it does not make sense to
....@@ -393,7 +391,8 @@
393391 * select raw sampling in both entropy shifter
394392 * and statistical checker; ; put RNG4 into run mode
395393 */
396
- clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
394
+ clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM | RTMCTL_ACC,
395
+ RTMCTL_SAMP_MODE_RAW_ES_SC);
397396 }
398397
399398 static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
....@@ -444,7 +443,9 @@
444443 * by u-boot.
445444 * In case this property is not passed an attempt to retrieve the CAAM
446445 * era via register reads will be made.
447
- **/
446
+ *
447
+ * @ctrl: controller region
448
+ */
448449 static int caam_get_era(struct caam_ctrl __iomem *ctrl)
449450 {
450451 struct device_node *caam_node;
....@@ -461,6 +462,24 @@
461462 return caam_get_era_from_hw(ctrl);
462463 }
463464
465
+/*
466
+ * ERRATA: imx6 devices (imx6D, imx6Q, imx6DL, imx6S, imx6DP and imx6QP)
467
+ * have an issue wherein AXI bus transactions may not occur in the correct
468
+ * order. This isn't a problem running single descriptors, but can be if
469
+ * running multiple concurrent descriptors. Reworking the driver to throttle
470
+ * to single requests is impractical, thus the workaround is to limit the AXI
471
+ * pipeline to a depth of 1 (from it's default of 4) to preclude this situation
472
+ * from occurring.
473
+ */
474
+static void handle_imx6_err005766(u32 __iomem *mcr)
475
+{
476
+ if (of_machine_is_compatible("fsl,imx6q") ||
477
+ of_machine_is_compatible("fsl,imx6dl") ||
478
+ of_machine_is_compatible("fsl,imx6qp"))
479
+ clrsetbits_32(mcr, MCFGR_AXIPIPE_MASK,
480
+ 1 << MCFGR_AXIPIPE_SHIFT);
481
+}
482
+
464483 static const struct of_device_id caam_match[] = {
465484 {
466485 .compatible = "fsl,sec-v4.0",
....@@ -472,27 +491,147 @@
472491 };
473492 MODULE_DEVICE_TABLE(of, caam_match);
474493
494
+struct caam_imx_data {
495
+ const struct clk_bulk_data *clks;
496
+ int num_clks;
497
+};
498
+
499
+static const struct clk_bulk_data caam_imx6_clks[] = {
500
+ { .id = "ipg" },
501
+ { .id = "mem" },
502
+ { .id = "aclk" },
503
+ { .id = "emi_slow" },
504
+};
505
+
506
+static const struct caam_imx_data caam_imx6_data = {
507
+ .clks = caam_imx6_clks,
508
+ .num_clks = ARRAY_SIZE(caam_imx6_clks),
509
+};
510
+
511
+static const struct clk_bulk_data caam_imx7_clks[] = {
512
+ { .id = "ipg" },
513
+ { .id = "aclk" },
514
+};
515
+
516
+static const struct caam_imx_data caam_imx7_data = {
517
+ .clks = caam_imx7_clks,
518
+ .num_clks = ARRAY_SIZE(caam_imx7_clks),
519
+};
520
+
521
+static const struct clk_bulk_data caam_imx6ul_clks[] = {
522
+ { .id = "ipg" },
523
+ { .id = "mem" },
524
+ { .id = "aclk" },
525
+};
526
+
527
+static const struct caam_imx_data caam_imx6ul_data = {
528
+ .clks = caam_imx6ul_clks,
529
+ .num_clks = ARRAY_SIZE(caam_imx6ul_clks),
530
+};
531
+
532
+static const struct clk_bulk_data caam_vf610_clks[] = {
533
+ { .id = "ipg" },
534
+};
535
+
536
+static const struct caam_imx_data caam_vf610_data = {
537
+ .clks = caam_vf610_clks,
538
+ .num_clks = ARRAY_SIZE(caam_vf610_clks),
539
+};
540
+
541
+static const struct soc_device_attribute caam_imx_soc_table[] = {
542
+ { .soc_id = "i.MX6UL", .data = &caam_imx6ul_data },
543
+ { .soc_id = "i.MX6*", .data = &caam_imx6_data },
544
+ { .soc_id = "i.MX7*", .data = &caam_imx7_data },
545
+ { .soc_id = "i.MX8M*", .data = &caam_imx7_data },
546
+ { .soc_id = "VF*", .data = &caam_vf610_data },
547
+ { .family = "Freescale i.MX" },
548
+ { /* sentinel */ }
549
+};
550
+
551
+static void disable_clocks(void *data)
552
+{
553
+ struct caam_drv_private *ctrlpriv = data;
554
+
555
+ clk_bulk_disable_unprepare(ctrlpriv->num_clks, ctrlpriv->clks);
556
+}
557
+
558
+static int init_clocks(struct device *dev, const struct caam_imx_data *data)
559
+{
560
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
561
+ int ret;
562
+
563
+ ctrlpriv->num_clks = data->num_clks;
564
+ ctrlpriv->clks = devm_kmemdup(dev, data->clks,
565
+ data->num_clks * sizeof(data->clks[0]),
566
+ GFP_KERNEL);
567
+ if (!ctrlpriv->clks)
568
+ return -ENOMEM;
569
+
570
+ ret = devm_clk_bulk_get(dev, ctrlpriv->num_clks, ctrlpriv->clks);
571
+ if (ret) {
572
+ dev_err(dev,
573
+ "Failed to request all necessary clocks\n");
574
+ return ret;
575
+ }
576
+
577
+ ret = clk_bulk_prepare_enable(ctrlpriv->num_clks, ctrlpriv->clks);
578
+ if (ret) {
579
+ dev_err(dev,
580
+ "Failed to prepare/enable all necessary clocks\n");
581
+ return ret;
582
+ }
583
+
584
+ return devm_add_action_or_reset(dev, disable_clocks, ctrlpriv);
585
+}
586
+
587
+static void caam_remove_debugfs(void *root)
588
+{
589
+ debugfs_remove_recursive(root);
590
+}
591
+
592
+#ifdef CONFIG_FSL_MC_BUS
593
+static bool check_version(struct fsl_mc_version *mc_version, u32 major,
594
+ u32 minor, u32 revision)
595
+{
596
+ if (mc_version->major > major)
597
+ return true;
598
+
599
+ if (mc_version->major == major) {
600
+ if (mc_version->minor > minor)
601
+ return true;
602
+
603
+ if (mc_version->minor == minor &&
604
+ mc_version->revision > revision)
605
+ return true;
606
+ }
607
+
608
+ return false;
609
+}
610
+#endif
611
+
612
+static bool needs_entropy_delay_adjustment(void)
613
+{
614
+ if (of_machine_is_compatible("fsl,imx6sx"))
615
+ return true;
616
+ return false;
617
+}
618
+
475619 /* Probe routine for CAAM top (controller) level */
476620 static int caam_probe(struct platform_device *pdev)
477621 {
478622 int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
479623 u64 caam_id;
480
- static const struct soc_device_attribute imx_soc[] = {
481
- {.family = "Freescale i.MX"},
482
- {},
483
- };
624
+ const struct soc_device_attribute *imx_soc_match;
484625 struct device *dev;
485626 struct device_node *nprop, *np;
486627 struct caam_ctrl __iomem *ctrl;
487628 struct caam_drv_private *ctrlpriv;
488
- struct clk *clk;
489
-#ifdef CONFIG_DEBUG_FS
490
- struct caam_perfmon *perfmon;
491
-#endif
629
+ struct dentry *dfs_root;
492630 u32 scfgr, comp_params;
493
- u32 cha_vid_ls;
631
+ u8 rng_vid;
494632 int pg_size;
495633 int BLOCK_OFFSET = 0;
634
+ bool pr_support = false;
496635
497636 ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
498637 if (!ctrlpriv)
....@@ -502,101 +641,65 @@
502641 dev_set_drvdata(dev, ctrlpriv);
503642 nprop = pdev->dev.of_node;
504643
505
- caam_imx = (bool)soc_device_match(imx_soc);
644
+ imx_soc_match = soc_device_match(caam_imx_soc_table);
645
+ caam_imx = (bool)imx_soc_match;
506646
507
- /* Enable clocking */
508
- clk = caam_drv_identify_clk(&pdev->dev, "ipg");
509
- if (IS_ERR(clk)) {
510
- ret = PTR_ERR(clk);
511
- dev_err(&pdev->dev,
512
- "can't identify CAAM ipg clk: %d\n", ret);
513
- return ret;
514
- }
515
- ctrlpriv->caam_ipg = clk;
647
+ if (imx_soc_match) {
648
+ if (!imx_soc_match->data) {
649
+ dev_err(dev, "No clock data provided for i.MX SoC");
650
+ return -EINVAL;
651
+ }
516652
517
- if (!of_machine_is_compatible("fsl,imx7d") &&
518
- !of_machine_is_compatible("fsl,imx7s")) {
519
- clk = caam_drv_identify_clk(&pdev->dev, "mem");
520
- if (IS_ERR(clk)) {
521
- ret = PTR_ERR(clk);
522
- dev_err(&pdev->dev,
523
- "can't identify CAAM mem clk: %d\n", ret);
653
+ ret = init_clocks(dev, imx_soc_match->data);
654
+ if (ret)
524655 return ret;
525
- }
526
- ctrlpriv->caam_mem = clk;
527656 }
528657
529
- clk = caam_drv_identify_clk(&pdev->dev, "aclk");
530
- if (IS_ERR(clk)) {
531
- ret = PTR_ERR(clk);
532
- dev_err(&pdev->dev,
533
- "can't identify CAAM aclk clk: %d\n", ret);
534
- return ret;
535
- }
536
- ctrlpriv->caam_aclk = clk;
537
-
538
- if (!of_machine_is_compatible("fsl,imx6ul") &&
539
- !of_machine_is_compatible("fsl,imx7d") &&
540
- !of_machine_is_compatible("fsl,imx7s")) {
541
- clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
542
- if (IS_ERR(clk)) {
543
- ret = PTR_ERR(clk);
544
- dev_err(&pdev->dev,
545
- "can't identify CAAM emi_slow clk: %d\n", ret);
546
- return ret;
547
- }
548
- ctrlpriv->caam_emi_slow = clk;
549
- }
550
-
551
- ret = clk_prepare_enable(ctrlpriv->caam_ipg);
552
- if (ret < 0) {
553
- dev_err(&pdev->dev, "can't enable CAAM ipg clock: %d\n", ret);
554
- return ret;
555
- }
556
-
557
- if (ctrlpriv->caam_mem) {
558
- ret = clk_prepare_enable(ctrlpriv->caam_mem);
559
- if (ret < 0) {
560
- dev_err(&pdev->dev, "can't enable CAAM secure mem clock: %d\n",
561
- ret);
562
- goto disable_caam_ipg;
563
- }
564
- }
565
-
566
- ret = clk_prepare_enable(ctrlpriv->caam_aclk);
567
- if (ret < 0) {
568
- dev_err(&pdev->dev, "can't enable CAAM aclk clock: %d\n", ret);
569
- goto disable_caam_mem;
570
- }
571
-
572
- if (ctrlpriv->caam_emi_slow) {
573
- ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
574
- if (ret < 0) {
575
- dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
576
- ret);
577
- goto disable_caam_aclk;
578
- }
579
- }
580658
581659 /* Get configuration properties from device tree */
582660 /* First, get register page */
583
- ctrl = of_iomap(nprop, 0);
584
- if (ctrl == NULL) {
661
+ ctrl = devm_of_iomap(dev, nprop, 0, NULL);
662
+ ret = PTR_ERR_OR_ZERO(ctrl);
663
+ if (ret) {
585664 dev_err(dev, "caam: of_iomap() failed\n");
586
- ret = -ENOMEM;
587
- goto disable_caam_emi_slow;
665
+ return ret;
588666 }
589667
590668 caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
591669 (CSTA_PLEND | CSTA_ALT_PLEND));
592
-
593
- /* Finding the page size for using the CTPR_MS register */
594670 comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
595
- pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
671
+ if (comp_params & CTPR_MS_PS && rd_reg32(&ctrl->mcr) & MCFGR_LONG_PTR)
672
+ caam_ptr_sz = sizeof(u64);
673
+ else
674
+ caam_ptr_sz = sizeof(u32);
675
+ caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
676
+ ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
677
+
678
+#ifdef CONFIG_CAAM_QI
679
+ /* If (DPAA 1.x) QI present, check whether dependencies are available */
680
+ if (ctrlpriv->qi_present && !caam_dpaa2) {
681
+ ret = qman_is_probed();
682
+ if (!ret) {
683
+ return -EPROBE_DEFER;
684
+ } else if (ret < 0) {
685
+ dev_err(dev, "failing probe due to qman probe error\n");
686
+ return -ENODEV;
687
+ }
688
+
689
+ ret = qman_portals_probed();
690
+ if (!ret) {
691
+ return -EPROBE_DEFER;
692
+ } else if (ret < 0) {
693
+ dev_err(dev, "failing probe due to qman portals probe error\n");
694
+ return -ENODEV;
695
+ }
696
+ }
697
+#endif
596698
597699 /* Allocating the BLOCK_OFFSET based on the supported page size on
598700 * the platform
599701 */
702
+ pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
600703 if (pg_size == 0)
601704 BLOCK_OFFSET = PG_SIZE_4K;
602705 else
....@@ -614,6 +717,21 @@
614717
615718 /* Get the IRQ of the controller (for security violations only) */
616719 ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0);
720
+ np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc");
721
+ ctrlpriv->mc_en = !!np;
722
+ of_node_put(np);
723
+
724
+#ifdef CONFIG_FSL_MC_BUS
725
+ if (ctrlpriv->mc_en) {
726
+ struct fsl_mc_version *mc_version;
727
+
728
+ mc_version = fsl_mc_get_version();
729
+ if (mc_version)
730
+ pr_support = check_version(mc_version, 10, 20, 0);
731
+ else
732
+ return -EPROBE_DEFER;
733
+ }
734
+#endif
617735
618736 /*
619737 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
....@@ -621,21 +739,16 @@
621739 * In case of SoCs with Management Complex, MC f/w performs
622740 * the configuration.
623741 */
624
- caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
625
- np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc");
626
- ctrlpriv->mc_en = !!np;
627
- of_node_put(np);
628
-
629742 if (!ctrlpriv->mc_en)
630
- clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
743
+ clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK,
631744 MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
632
- MCFGR_WDENABLE | MCFGR_LARGE_BURST |
633
- (sizeof(dma_addr_t) == sizeof(u64) ?
634
- MCFGR_LONG_PTR : 0));
745
+ MCFGR_WDENABLE | MCFGR_LARGE_BURST);
746
+
747
+ handle_imx6_err005766(&ctrl->mcr);
635748
636749 /*
637
- * Read the Compile Time paramters and SCFGR to determine
638
- * if Virtualization is enabled for this platform
750
+ * Read the Compile Time parameters and SCFGR to determine
751
+ * if virtualization is enabled for this platform
639752 */
640753 scfgr = rd_reg32(&ctrl->scfgr);
641754
....@@ -659,56 +772,26 @@
659772 JRSTART_JR1_START | JRSTART_JR2_START |
660773 JRSTART_JR3_START);
661774
662
- if (sizeof(dma_addr_t) == sizeof(u64)) {
663
- if (caam_dpaa2)
664
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
665
- else if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
666
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
667
- else
668
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
669
- } else {
670
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
671
- }
775
+ ret = dma_set_mask_and_coherent(dev, caam_get_dma_mask(dev));
672776 if (ret) {
673777 dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
674
- goto iounmap_ctrl;
778
+ return ret;
675779 }
676780
677781 ctrlpriv->era = caam_get_era(ctrl);
782
+ ctrlpriv->domain = iommu_get_domain_for_dev(dev);
678783
679
- ret = of_platform_populate(nprop, caam_match, NULL, dev);
680
- if (ret) {
681
- dev_err(dev, "JR platform devices creation error\n");
682
- goto iounmap_ctrl;
784
+ dfs_root = debugfs_create_dir(dev_name(dev), NULL);
785
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
786
+ ret = devm_add_action_or_reset(dev, caam_remove_debugfs,
787
+ dfs_root);
788
+ if (ret)
789
+ return ret;
683790 }
684791
685
-#ifdef CONFIG_DEBUG_FS
686
- /*
687
- * FIXME: needs better naming distinction, as some amalgamation of
688
- * "caam" and nprop->full_name. The OF name isn't distinctive,
689
- * but does separate instances
690
- */
691
- perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
692
-
693
- ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
694
- ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
695
-#endif
696
-
697
- ring = 0;
698
- for_each_available_child_of_node(nprop, np)
699
- if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
700
- of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
701
- ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
702
- ((__force uint8_t *)ctrl +
703
- (ring + JR_BLOCK_NUMBER) *
704
- BLOCK_OFFSET
705
- );
706
- ctrlpriv->total_jobrs++;
707
- ring++;
708
- }
792
+ caam_debugfs_init(ctrlpriv, dfs_root);
709793
710794 /* Check to see if (DPAA 1.x) QI present. If so, enable */
711
- ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
712795 if (ctrlpriv->qi_present && !caam_dpaa2) {
713796 ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
714797 ((__force uint8_t *)ctrl +
....@@ -725,22 +808,38 @@
725808 #endif
726809 }
727810
811
+ ring = 0;
812
+ for_each_available_child_of_node(nprop, np)
813
+ if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
814
+ of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
815
+ ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
816
+ ((__force uint8_t *)ctrl +
817
+ (ring + JR_BLOCK_NUMBER) *
818
+ BLOCK_OFFSET
819
+ );
820
+ ctrlpriv->total_jobrs++;
821
+ ring++;
822
+ }
823
+
728824 /* If no QI and no rings specified, quit and go home */
729825 if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
730826 dev_err(dev, "no queues configured, terminating\n");
731
- ret = -ENOMEM;
732
- goto caam_remove;
827
+ return -ENOMEM;
733828 }
734829
735
- cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
830
+ if (ctrlpriv->era < 10)
831
+ rng_vid = (rd_reg32(&ctrl->perfmon.cha_id_ls) &
832
+ CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
833
+ else
834
+ rng_vid = (rd_reg32(&ctrl->vreg.rng) & CHA_VER_VID_MASK) >>
835
+ CHA_VER_VID_SHIFT;
736836
737837 /*
738838 * If SEC has RNG version >= 4 and RNG state handle has not been
739839 * already instantiated, do RNG instantiation
740840 * In case of SoCs with Management Complex, RNG is managed by MC f/w.
741841 */
742
- if (!ctrlpriv->mc_en &&
743
- (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
842
+ if (!(ctrlpriv->mc_en && pr_support) && rng_vid >= 4) {
744843 ctrlpriv->rng4_sh_init =
745844 rd_reg32(&ctrl->r4tst[0].rdsta);
746845 /*
....@@ -750,11 +849,11 @@
750849 * to regenerate these keys before the next POR.
751850 */
752851 gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
753
- ctrlpriv->rng4_sh_init &= RDSTA_IFMASK;
852
+ ctrlpriv->rng4_sh_init &= RDSTA_MASK;
754853 do {
755854 int inst_handles =
756855 rd_reg32(&ctrl->r4tst[0].rdsta) &
757
- RDSTA_IFMASK;
856
+ RDSTA_MASK;
758857 /*
759858 * If either SH were instantiated by somebody else
760859 * (e.g. u-boot) then it is assumed that the entropy
....@@ -763,6 +862,8 @@
763862 * Also, if a handle was instantiated, do not change
764863 * the TRNG parameters.
765864 */
865
+ if (needs_entropy_delay_adjustment())
866
+ ent_delay = 12000;
766867 if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
767868 dev_info(dev,
768869 "Entropy delay = %u\n",
....@@ -772,13 +873,22 @@
772873 }
773874 /*
774875 * if instantiate_rng(...) fails, the loop will rerun
775
- * and the kick_trng(...) function will modfiy the
876
+ * and the kick_trng(...) function will modify the
776877 * upper and lower limits of the entropy sampling
777
- * interval, leading to a sucessful initialization of
878
+ * interval, leading to a successful initialization of
778879 * the RNG.
779880 */
780881 ret = instantiate_rng(dev, inst_handles,
781882 gen_sk);
883
+ /*
884
+ * Entropy delay is determined via TRNG characterization.
885
+ * TRNG characterization is run across different voltages
886
+ * and temperatures.
887
+ * If worst case value for ent_dly is identified,
888
+ * the loop can be skipped for that platform.
889
+ */
890
+ if (needs_entropy_delay_adjustment())
891
+ break;
782892 if (ret == -EAGAIN)
783893 /*
784894 * if here, the loop will rerun,
....@@ -788,13 +898,13 @@
788898 } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
789899 if (ret) {
790900 dev_err(dev, "failed to instantiate RNG");
791
- goto caam_remove;
901
+ return ret;
792902 }
793903 /*
794
- * Set handles init'ed by this module as the complement of the
795
- * already initialized ones
904
+ * Set handles initialized by this module as the complement of
905
+ * the already initialized ones
796906 */
797
- ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
907
+ ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_MASK;
798908
799909 /* Enable RDB bit so that RNG works faster */
800910 clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE);
....@@ -811,83 +921,10 @@
811921 dev_info(dev, "job rings = %d, qi = %d\n",
812922 ctrlpriv->total_jobrs, ctrlpriv->qi_present);
813923
814
-#ifdef CONFIG_DEBUG_FS
815
- debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
816
- ctrlpriv->ctl, &perfmon->req_dequeued,
817
- &caam_fops_u64_ro);
818
- debugfs_create_file("ob_rq_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
819
- ctrlpriv->ctl, &perfmon->ob_enc_req,
820
- &caam_fops_u64_ro);
821
- debugfs_create_file("ib_rq_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
822
- ctrlpriv->ctl, &perfmon->ib_dec_req,
823
- &caam_fops_u64_ro);
824
- debugfs_create_file("ob_bytes_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
825
- ctrlpriv->ctl, &perfmon->ob_enc_bytes,
826
- &caam_fops_u64_ro);
827
- debugfs_create_file("ob_bytes_protected", S_IRUSR | S_IRGRP | S_IROTH,
828
- ctrlpriv->ctl, &perfmon->ob_prot_bytes,
829
- &caam_fops_u64_ro);
830
- debugfs_create_file("ib_bytes_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
831
- ctrlpriv->ctl, &perfmon->ib_dec_bytes,
832
- &caam_fops_u64_ro);
833
- debugfs_create_file("ib_bytes_validated", S_IRUSR | S_IRGRP | S_IROTH,
834
- ctrlpriv->ctl, &perfmon->ib_valid_bytes,
835
- &caam_fops_u64_ro);
924
+ ret = devm_of_platform_populate(dev);
925
+ if (ret)
926
+ dev_err(dev, "JR platform devices creation error\n");
836927
837
- /* Controller level - global status values */
838
- debugfs_create_file("fault_addr", S_IRUSR | S_IRGRP | S_IROTH,
839
- ctrlpriv->ctl, &perfmon->faultaddr,
840
- &caam_fops_u32_ro);
841
- debugfs_create_file("fault_detail", S_IRUSR | S_IRGRP | S_IROTH,
842
- ctrlpriv->ctl, &perfmon->faultdetail,
843
- &caam_fops_u32_ro);
844
- debugfs_create_file("fault_status", S_IRUSR | S_IRGRP | S_IROTH,
845
- ctrlpriv->ctl, &perfmon->status,
846
- &caam_fops_u32_ro);
847
-
848
- /* Internal covering keys (useful in non-secure mode only) */
849
- ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
850
- ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
851
- ctrlpriv->ctl_kek = debugfs_create_blob("kek",
852
- S_IRUSR |
853
- S_IRGRP | S_IROTH,
854
- ctrlpriv->ctl,
855
- &ctrlpriv->ctl_kek_wrap);
856
-
857
- ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
858
- ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
859
- ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
860
- S_IRUSR |
861
- S_IRGRP | S_IROTH,
862
- ctrlpriv->ctl,
863
- &ctrlpriv->ctl_tkek_wrap);
864
-
865
- ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
866
- ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
867
- ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
868
- S_IRUSR |
869
- S_IRGRP | S_IROTH,
870
- ctrlpriv->ctl,
871
- &ctrlpriv->ctl_tdsk_wrap);
872
-#endif
873
- return 0;
874
-
875
-caam_remove:
876
- caam_remove(pdev);
877
- return ret;
878
-
879
-iounmap_ctrl:
880
- iounmap(ctrl);
881
-disable_caam_emi_slow:
882
- if (ctrlpriv->caam_emi_slow)
883
- clk_disable_unprepare(ctrlpriv->caam_emi_slow);
884
-disable_caam_aclk:
885
- clk_disable_unprepare(ctrlpriv->caam_aclk);
886
-disable_caam_mem:
887
- if (ctrlpriv->caam_mem)
888
- clk_disable_unprepare(ctrlpriv->caam_mem);
889
-disable_caam_ipg:
890
- clk_disable_unprepare(ctrlpriv->caam_ipg);
891928 return ret;
892929 }
893930
....@@ -897,7 +934,6 @@
897934 .of_match_table = caam_match,
898935 },
899936 .probe = caam_probe,
900
- .remove = caam_remove,
901937 };
902938
903939 module_platform_driver(caam_driver);