hc
2024-05-10 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb
kernel/drivers/crypto/caam/ctrl.c
....@@ -1,41 +1,31 @@
1
+// SPDX-License-Identifier: GPL-2.0+
12 /* * CAAM control-plane driver backend
23 * Controller-level driver, kernel property detection, initialization
34 *
45 * Copyright 2008-2012 Freescale Semiconductor, Inc.
6
+ * Copyright 2018-2019 NXP
57 */
68
79 #include <linux/device.h>
810 #include <linux/of_address.h>
911 #include <linux/of_irq.h>
1012 #include <linux/sys_soc.h>
13
+#include <linux/fsl/mc.h>
1114
1215 #include "compat.h"
16
+#include "debugfs.h"
1317 #include "regs.h"
1418 #include "intern.h"
1519 #include "jr.h"
1620 #include "desc_constr.h"
1721 #include "ctrl.h"
1822
19
-bool caam_little_end;
20
-EXPORT_SYMBOL(caam_little_end);
2123 bool caam_dpaa2;
2224 EXPORT_SYMBOL(caam_dpaa2);
23
-bool caam_imx;
24
-EXPORT_SYMBOL(caam_imx);
2525
2626 #ifdef CONFIG_CAAM_QI
2727 #include "qi.h"
2828 #endif
29
-
30
-/*
31
- * i.MX targets tend to have clock control subsystems that can
32
- * enable/disable clocking to our device.
33
- */
34
-static inline struct clk *caam_drv_identify_clk(struct device *dev,
35
- char *clk_name)
36
-{
37
- return caam_imx ? devm_clk_get(dev, clk_name) : NULL;
38
-}
3929
4030 /*
4131 * Descriptor to instantiate RNG State Handle 0 in normal mode and
....@@ -48,7 +38,8 @@
4838 init_job_desc(desc, 0);
4939
5040 op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
51
- (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT;
41
+ (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT |
42
+ OP_ALG_PR_ON;
5243
5344 /* INIT RNG in non-test mode */
5445 append_operation(desc, op_flags);
....@@ -64,7 +55,7 @@
6455
6556 /*
6657 * load 1 to clear written reg:
67
- * resets the done interrrupt and returns the RNG to idle.
58
+ * resets the done interrupt and returns the RNG to idle.
6859 */
6960 append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
7061
....@@ -105,11 +96,19 @@
10596 struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
10697 struct caam_deco __iomem *deco = ctrlpriv->deco;
10798 unsigned int timeout = 100000;
108
- u32 deco_dbg_reg, flags;
99
+ u32 deco_dbg_reg, deco_state, flags;
109100 int i;
110101
111102
112
- if (ctrlpriv->virt_en == 1) {
103
+ if (ctrlpriv->virt_en == 1 ||
104
+ /*
105
+ * Apparently on i.MX8M{Q,M,N,P} it doesn't matter if virt_en == 1
106
+ * and the following steps should be performed regardless
107
+ */
108
+ of_machine_is_compatible("fsl,imx8mq") ||
109
+ of_machine_is_compatible("fsl,imx8mm") ||
110
+ of_machine_is_compatible("fsl,imx8mn") ||
111
+ of_machine_is_compatible("fsl,imx8mp")) {
113112 clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0);
114113
115114 while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
....@@ -148,13 +147,22 @@
148147 timeout = 10000000;
149148 do {
150149 deco_dbg_reg = rd_reg32(&deco->desc_dbg);
150
+
151
+ if (ctrlpriv->era < 10)
152
+ deco_state = (deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) >>
153
+ DESC_DBG_DECO_STAT_SHIFT;
154
+ else
155
+ deco_state = (rd_reg32(&deco->dbg_exec) &
156
+ DESC_DER_DECO_STAT_MASK) >>
157
+ DESC_DER_DECO_STAT_SHIFT;
158
+
151159 /*
152
- * If an error occured in the descriptor, then
160
+ * If an error occurred in the descriptor, then
153161 * the DECO status field will be set to 0x0D
154162 */
155
- if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
156
- DESC_DBG_DECO_STAT_HOST_ERR)
163
+ if (deco_state == DECO_STAT_HOST_ERR)
157164 break;
165
+
158166 cpu_relax();
159167 } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
160168
....@@ -171,81 +179,6 @@
171179 return -EAGAIN;
172180
173181 return 0;
174
-}
175
-
176
-/*
177
- * instantiate_rng - builds and executes a descriptor on DECO0,
178
- * which initializes the RNG block.
179
- * @ctrldev - pointer to device
180
- * @state_handle_mask - bitmask containing the instantiation status
181
- * for the RNG4 state handles which exist in
182
- * the RNG4 block: 1 if it's been instantiated
183
- * by an external entry, 0 otherwise.
184
- * @gen_sk - generate data to be loaded into the JDKEK, TDKEK and TDSK;
185
- * Caution: this can be done only once; if the keys need to be
186
- * regenerated, a POR is required
187
- *
188
- * Return: - 0 if no error occurred
189
- * - -ENOMEM if there isn't enough memory to allocate the descriptor
190
- * - -ENODEV if DECO0 couldn't be acquired
191
- * - -EAGAIN if an error occurred when executing the descriptor
192
- * f.i. there was a RNG hardware error due to not "good enough"
193
- * entropy being aquired.
194
- */
195
-static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
196
- int gen_sk)
197
-{
198
- struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
199
- struct caam_ctrl __iomem *ctrl;
200
- u32 *desc, status = 0, rdsta_val;
201
- int ret = 0, sh_idx;
202
-
203
- ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
204
- desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
205
- if (!desc)
206
- return -ENOMEM;
207
-
208
- for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
209
- /*
210
- * If the corresponding bit is set, this state handle
211
- * was initialized by somebody else, so it's left alone.
212
- */
213
- if ((1 << sh_idx) & state_handle_mask)
214
- continue;
215
-
216
- /* Create the descriptor for instantiating RNG State Handle */
217
- build_instantiation_desc(desc, sh_idx, gen_sk);
218
-
219
- /* Try to run it through DECO0 */
220
- ret = run_descriptor_deco0(ctrldev, desc, &status);
221
-
222
- /*
223
- * If ret is not 0, or descriptor status is not 0, then
224
- * something went wrong. No need to try the next state
225
- * handle (if available), bail out here.
226
- * Also, if for some reason, the State Handle didn't get
227
- * instantiated although the descriptor has finished
228
- * without any error (HW optimizations for later
229
- * CAAM eras), then try again.
230
- */
231
- if (ret)
232
- break;
233
-
234
- rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
235
- if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
236
- !(rdsta_val & (1 << sh_idx))) {
237
- ret = -EAGAIN;
238
- break;
239
- }
240
-
241
- dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
242
- /* Clear the contents before recreating the descriptor */
243
- memset(desc, 0x00, CAAM_CMD_SZ * 7);
244
- }
245
-
246
- kfree(desc);
247
-
248
- return ret;
249182 }
250183
251184 /*
....@@ -266,7 +199,7 @@
266199 u32 *desc, status;
267200 int sh_idx, ret = 0;
268201
269
- desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
202
+ desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL | GFP_DMA);
270203 if (!desc)
271204 return -ENOMEM;
272205
....@@ -302,47 +235,110 @@
302235 return ret;
303236 }
304237
305
-static int caam_remove(struct platform_device *pdev)
238
+static void devm_deinstantiate_rng(void *data)
306239 {
307
- struct device *ctrldev;
308
- struct caam_drv_private *ctrlpriv;
309
- struct caam_ctrl __iomem *ctrl;
310
-
311
- ctrldev = &pdev->dev;
312
- ctrlpriv = dev_get_drvdata(ctrldev);
313
- ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
314
-
315
- /* Remove platform devices under the crypto node */
316
- of_platform_depopulate(ctrldev);
317
-
318
-#ifdef CONFIG_CAAM_QI
319
- if (ctrlpriv->qidev)
320
- caam_qi_shutdown(ctrlpriv->qidev);
321
-#endif
240
+ struct device *ctrldev = data;
241
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
322242
323243 /*
324244 * De-initialize RNG state handles initialized by this driver.
325245 * In case of SoCs with Management Complex, RNG is managed by MC f/w.
326246 */
327
- if (!ctrlpriv->mc_en && ctrlpriv->rng4_sh_init)
247
+ if (ctrlpriv->rng4_sh_init)
328248 deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
249
+}
329250
330
- /* Shut down debug views */
331
-#ifdef CONFIG_DEBUG_FS
332
- debugfs_remove_recursive(ctrlpriv->dfs_root);
333
-#endif
251
+/*
252
+ * instantiate_rng - builds and executes a descriptor on DECO0,
253
+ * which initializes the RNG block.
254
+ * @ctrldev - pointer to device
255
+ * @state_handle_mask - bitmask containing the instantiation status
256
+ * for the RNG4 state handles which exist in
257
+ * the RNG4 block: 1 if it's been instantiated
258
+ * by an external entry, 0 otherwise.
259
+ * @gen_sk - generate data to be loaded into the JDKEK, TDKEK and TDSK;
260
+ * Caution: this can be done only once; if the keys need to be
261
+ * regenerated, a POR is required
262
+ *
263
+ * Return: - 0 if no error occurred
264
+ * - -ENOMEM if there isn't enough memory to allocate the descriptor
265
+ * - -ENODEV if DECO0 couldn't be acquired
266
+ * - -EAGAIN if an error occurred when executing the descriptor
267
+ * f.i. there was a RNG hardware error due to not "good enough"
268
+ * entropy being acquired.
269
+ */
270
+static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
271
+ int gen_sk)
272
+{
273
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
274
+ struct caam_ctrl __iomem *ctrl;
275
+ u32 *desc, status = 0, rdsta_val;
276
+ int ret = 0, sh_idx;
334277
335
- /* Unmap controller region */
336
- iounmap(ctrl);
278
+ ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
279
+ desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL | GFP_DMA);
280
+ if (!desc)
281
+ return -ENOMEM;
337282
338
- /* shut clocks off before finalizing shutdown */
339
- clk_disable_unprepare(ctrlpriv->caam_ipg);
340
- if (ctrlpriv->caam_mem)
341
- clk_disable_unprepare(ctrlpriv->caam_mem);
342
- clk_disable_unprepare(ctrlpriv->caam_aclk);
343
- if (ctrlpriv->caam_emi_slow)
344
- clk_disable_unprepare(ctrlpriv->caam_emi_slow);
345
- return 0;
283
+ for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
284
+ const u32 rdsta_if = RDSTA_IF0 << sh_idx;
285
+ const u32 rdsta_pr = RDSTA_PR0 << sh_idx;
286
+ const u32 rdsta_mask = rdsta_if | rdsta_pr;
287
+
288
+ /* Clear the contents before using the descriptor */
289
+ memset(desc, 0x00, CAAM_CMD_SZ * 7);
290
+
291
+ /*
292
+ * If the corresponding bit is set, this state handle
293
+ * was initialized by somebody else, so it's left alone.
294
+ */
295
+ if (rdsta_if & state_handle_mask) {
296
+ if (rdsta_pr & state_handle_mask)
297
+ continue;
298
+
299
+ dev_info(ctrldev,
300
+ "RNG4 SH%d was previously instantiated without prediction resistance. Tearing it down\n",
301
+ sh_idx);
302
+
303
+ ret = deinstantiate_rng(ctrldev, rdsta_if);
304
+ if (ret)
305
+ break;
306
+ }
307
+
308
+ /* Create the descriptor for instantiating RNG State Handle */
309
+ build_instantiation_desc(desc, sh_idx, gen_sk);
310
+
311
+ /* Try to run it through DECO0 */
312
+ ret = run_descriptor_deco0(ctrldev, desc, &status);
313
+
314
+ /*
315
+ * If ret is not 0, or descriptor status is not 0, then
316
+ * something went wrong. No need to try the next state
317
+ * handle (if available), bail out here.
318
+ * Also, if for some reason, the State Handle didn't get
319
+ * instantiated although the descriptor has finished
320
+ * without any error (HW optimizations for later
321
+ * CAAM eras), then try again.
322
+ */
323
+ if (ret)
324
+ break;
325
+
326
+ rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_MASK;
327
+ if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
328
+ (rdsta_val & rdsta_mask) != rdsta_mask) {
329
+ ret = -EAGAIN;
330
+ break;
331
+ }
332
+
333
+ dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
334
+ }
335
+
336
+ kfree(desc);
337
+
338
+ if (ret)
339
+ return ret;
340
+
341
+ return devm_add_action_or_reset(ctrldev, devm_deinstantiate_rng, ctrldev);
346342 }
347343
348344 /*
....@@ -362,8 +358,12 @@
362358 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
363359 r4tst = &ctrl->r4tst[0];
364360
365
- /* put RNG4 into program mode */
366
- clrsetbits_32(&r4tst->rtmctl, 0, RTMCTL_PRGM);
361
+ /*
362
+ * Setting both RTMCTL:PRGM and RTMCTL:TRNG_ACC causes TRNG to
363
+ * properly invalidate the entropy in the entropy register and
364
+ * force re-generation.
365
+ */
366
+ clrsetbits_32(&r4tst->rtmctl, 0, RTMCTL_PRGM | RTMCTL_ACC);
367367
368368 /*
369369 * Performance-wise, it does not make sense to
....@@ -393,7 +393,8 @@
393393 * select raw sampling in both entropy shifter
394394 * and statistical checker; ; put RNG4 into run mode
395395 */
396
- clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
396
+ clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM | RTMCTL_ACC,
397
+ RTMCTL_SAMP_MODE_RAW_ES_SC);
397398 }
398399
399400 static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
....@@ -444,7 +445,9 @@
444445 * by u-boot.
445446 * In case this property is not passed an attempt to retrieve the CAAM
446447 * era via register reads will be made.
447
- **/
448
+ *
449
+ * @ctrl: controller region
450
+ */
448451 static int caam_get_era(struct caam_ctrl __iomem *ctrl)
449452 {
450453 struct device_node *caam_node;
....@@ -461,6 +464,24 @@
461464 return caam_get_era_from_hw(ctrl);
462465 }
463466
467
+/*
468
+ * ERRATA: imx6 devices (imx6D, imx6Q, imx6DL, imx6S, imx6DP and imx6QP)
469
+ * have an issue wherein AXI bus transactions may not occur in the correct
470
+ * order. This isn't a problem running single descriptors, but can be if
471
+ * running multiple concurrent descriptors. Reworking the driver to throttle
472
+ * to single requests is impractical, thus the workaround is to limit the AXI
473
+ * pipeline to a depth of 1 (from it's default of 4) to preclude this situation
474
+ * from occurring.
475
+ */
476
+static void handle_imx6_err005766(u32 __iomem *mcr)
477
+{
478
+ if (of_machine_is_compatible("fsl,imx6q") ||
479
+ of_machine_is_compatible("fsl,imx6dl") ||
480
+ of_machine_is_compatible("fsl,imx6qp"))
481
+ clrsetbits_32(mcr, MCFGR_AXIPIPE_MASK,
482
+ 1 << MCFGR_AXIPIPE_SHIFT);
483
+}
484
+
464485 static const struct of_device_id caam_match[] = {
465486 {
466487 .compatible = "fsl,sec-v4.0",
....@@ -472,27 +493,147 @@
472493 };
473494 MODULE_DEVICE_TABLE(of, caam_match);
474495
496
+struct caam_imx_data {
497
+ const struct clk_bulk_data *clks;
498
+ int num_clks;
499
+};
500
+
501
+static const struct clk_bulk_data caam_imx6_clks[] = {
502
+ { .id = "ipg" },
503
+ { .id = "mem" },
504
+ { .id = "aclk" },
505
+ { .id = "emi_slow" },
506
+};
507
+
508
+static const struct caam_imx_data caam_imx6_data = {
509
+ .clks = caam_imx6_clks,
510
+ .num_clks = ARRAY_SIZE(caam_imx6_clks),
511
+};
512
+
513
+static const struct clk_bulk_data caam_imx7_clks[] = {
514
+ { .id = "ipg" },
515
+ { .id = "aclk" },
516
+};
517
+
518
+static const struct caam_imx_data caam_imx7_data = {
519
+ .clks = caam_imx7_clks,
520
+ .num_clks = ARRAY_SIZE(caam_imx7_clks),
521
+};
522
+
523
+static const struct clk_bulk_data caam_imx6ul_clks[] = {
524
+ { .id = "ipg" },
525
+ { .id = "mem" },
526
+ { .id = "aclk" },
527
+};
528
+
529
+static const struct caam_imx_data caam_imx6ul_data = {
530
+ .clks = caam_imx6ul_clks,
531
+ .num_clks = ARRAY_SIZE(caam_imx6ul_clks),
532
+};
533
+
534
+static const struct clk_bulk_data caam_vf610_clks[] = {
535
+ { .id = "ipg" },
536
+};
537
+
538
+static const struct caam_imx_data caam_vf610_data = {
539
+ .clks = caam_vf610_clks,
540
+ .num_clks = ARRAY_SIZE(caam_vf610_clks),
541
+};
542
+
543
+static const struct soc_device_attribute caam_imx_soc_table[] = {
544
+ { .soc_id = "i.MX6UL", .data = &caam_imx6ul_data },
545
+ { .soc_id = "i.MX6*", .data = &caam_imx6_data },
546
+ { .soc_id = "i.MX7*", .data = &caam_imx7_data },
547
+ { .soc_id = "i.MX8M*", .data = &caam_imx7_data },
548
+ { .soc_id = "VF*", .data = &caam_vf610_data },
549
+ { .family = "Freescale i.MX" },
550
+ { /* sentinel */ }
551
+};
552
+
553
+static void disable_clocks(void *data)
554
+{
555
+ struct caam_drv_private *ctrlpriv = data;
556
+
557
+ clk_bulk_disable_unprepare(ctrlpriv->num_clks, ctrlpriv->clks);
558
+}
559
+
560
+static int init_clocks(struct device *dev, const struct caam_imx_data *data)
561
+{
562
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
563
+ int ret;
564
+
565
+ ctrlpriv->num_clks = data->num_clks;
566
+ ctrlpriv->clks = devm_kmemdup(dev, data->clks,
567
+ data->num_clks * sizeof(data->clks[0]),
568
+ GFP_KERNEL);
569
+ if (!ctrlpriv->clks)
570
+ return -ENOMEM;
571
+
572
+ ret = devm_clk_bulk_get(dev, ctrlpriv->num_clks, ctrlpriv->clks);
573
+ if (ret) {
574
+ dev_err(dev,
575
+ "Failed to request all necessary clocks\n");
576
+ return ret;
577
+ }
578
+
579
+ ret = clk_bulk_prepare_enable(ctrlpriv->num_clks, ctrlpriv->clks);
580
+ if (ret) {
581
+ dev_err(dev,
582
+ "Failed to prepare/enable all necessary clocks\n");
583
+ return ret;
584
+ }
585
+
586
+ return devm_add_action_or_reset(dev, disable_clocks, ctrlpriv);
587
+}
588
+
589
+static void caam_remove_debugfs(void *root)
590
+{
591
+ debugfs_remove_recursive(root);
592
+}
593
+
594
+#ifdef CONFIG_FSL_MC_BUS
595
+static bool check_version(struct fsl_mc_version *mc_version, u32 major,
596
+ u32 minor, u32 revision)
597
+{
598
+ if (mc_version->major > major)
599
+ return true;
600
+
601
+ if (mc_version->major == major) {
602
+ if (mc_version->minor > minor)
603
+ return true;
604
+
605
+ if (mc_version->minor == minor &&
606
+ mc_version->revision > revision)
607
+ return true;
608
+ }
609
+
610
+ return false;
611
+}
612
+#endif
613
+
614
+static bool needs_entropy_delay_adjustment(void)
615
+{
616
+ if (of_machine_is_compatible("fsl,imx6sx"))
617
+ return true;
618
+ return false;
619
+}
620
+
475621 /* Probe routine for CAAM top (controller) level */
476622 static int caam_probe(struct platform_device *pdev)
477623 {
478624 int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
479625 u64 caam_id;
480
- static const struct soc_device_attribute imx_soc[] = {
481
- {.family = "Freescale i.MX"},
482
- {},
483
- };
626
+ const struct soc_device_attribute *imx_soc_match;
484627 struct device *dev;
485628 struct device_node *nprop, *np;
486629 struct caam_ctrl __iomem *ctrl;
487630 struct caam_drv_private *ctrlpriv;
488
- struct clk *clk;
489
-#ifdef CONFIG_DEBUG_FS
490
- struct caam_perfmon *perfmon;
491
-#endif
631
+ struct dentry *dfs_root;
492632 u32 scfgr, comp_params;
493
- u32 cha_vid_ls;
633
+ u8 rng_vid;
494634 int pg_size;
495635 int BLOCK_OFFSET = 0;
636
+ bool pr_support = false;
496637
497638 ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
498639 if (!ctrlpriv)
....@@ -502,101 +643,65 @@
502643 dev_set_drvdata(dev, ctrlpriv);
503644 nprop = pdev->dev.of_node;
504645
505
- caam_imx = (bool)soc_device_match(imx_soc);
646
+ imx_soc_match = soc_device_match(caam_imx_soc_table);
647
+ caam_imx = (bool)imx_soc_match;
506648
507
- /* Enable clocking */
508
- clk = caam_drv_identify_clk(&pdev->dev, "ipg");
509
- if (IS_ERR(clk)) {
510
- ret = PTR_ERR(clk);
511
- dev_err(&pdev->dev,
512
- "can't identify CAAM ipg clk: %d\n", ret);
513
- return ret;
514
- }
515
- ctrlpriv->caam_ipg = clk;
649
+ if (imx_soc_match) {
650
+ if (!imx_soc_match->data) {
651
+ dev_err(dev, "No clock data provided for i.MX SoC");
652
+ return -EINVAL;
653
+ }
516654
517
- if (!of_machine_is_compatible("fsl,imx7d") &&
518
- !of_machine_is_compatible("fsl,imx7s")) {
519
- clk = caam_drv_identify_clk(&pdev->dev, "mem");
520
- if (IS_ERR(clk)) {
521
- ret = PTR_ERR(clk);
522
- dev_err(&pdev->dev,
523
- "can't identify CAAM mem clk: %d\n", ret);
655
+ ret = init_clocks(dev, imx_soc_match->data);
656
+ if (ret)
524657 return ret;
525
- }
526
- ctrlpriv->caam_mem = clk;
527658 }
528659
529
- clk = caam_drv_identify_clk(&pdev->dev, "aclk");
530
- if (IS_ERR(clk)) {
531
- ret = PTR_ERR(clk);
532
- dev_err(&pdev->dev,
533
- "can't identify CAAM aclk clk: %d\n", ret);
534
- return ret;
535
- }
536
- ctrlpriv->caam_aclk = clk;
537
-
538
- if (!of_machine_is_compatible("fsl,imx6ul") &&
539
- !of_machine_is_compatible("fsl,imx7d") &&
540
- !of_machine_is_compatible("fsl,imx7s")) {
541
- clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
542
- if (IS_ERR(clk)) {
543
- ret = PTR_ERR(clk);
544
- dev_err(&pdev->dev,
545
- "can't identify CAAM emi_slow clk: %d\n", ret);
546
- return ret;
547
- }
548
- ctrlpriv->caam_emi_slow = clk;
549
- }
550
-
551
- ret = clk_prepare_enable(ctrlpriv->caam_ipg);
552
- if (ret < 0) {
553
- dev_err(&pdev->dev, "can't enable CAAM ipg clock: %d\n", ret);
554
- return ret;
555
- }
556
-
557
- if (ctrlpriv->caam_mem) {
558
- ret = clk_prepare_enable(ctrlpriv->caam_mem);
559
- if (ret < 0) {
560
- dev_err(&pdev->dev, "can't enable CAAM secure mem clock: %d\n",
561
- ret);
562
- goto disable_caam_ipg;
563
- }
564
- }
565
-
566
- ret = clk_prepare_enable(ctrlpriv->caam_aclk);
567
- if (ret < 0) {
568
- dev_err(&pdev->dev, "can't enable CAAM aclk clock: %d\n", ret);
569
- goto disable_caam_mem;
570
- }
571
-
572
- if (ctrlpriv->caam_emi_slow) {
573
- ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
574
- if (ret < 0) {
575
- dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
576
- ret);
577
- goto disable_caam_aclk;
578
- }
579
- }
580660
581661 /* Get configuration properties from device tree */
582662 /* First, get register page */
583
- ctrl = of_iomap(nprop, 0);
584
- if (ctrl == NULL) {
663
+ ctrl = devm_of_iomap(dev, nprop, 0, NULL);
664
+ ret = PTR_ERR_OR_ZERO(ctrl);
665
+ if (ret) {
585666 dev_err(dev, "caam: of_iomap() failed\n");
586
- ret = -ENOMEM;
587
- goto disable_caam_emi_slow;
667
+ return ret;
588668 }
589669
590670 caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
591671 (CSTA_PLEND | CSTA_ALT_PLEND));
592
-
593
- /* Finding the page size for using the CTPR_MS register */
594672 comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
595
- pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
673
+ if (comp_params & CTPR_MS_PS && rd_reg32(&ctrl->mcr) & MCFGR_LONG_PTR)
674
+ caam_ptr_sz = sizeof(u64);
675
+ else
676
+ caam_ptr_sz = sizeof(u32);
677
+ caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
678
+ ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
679
+
680
+#ifdef CONFIG_CAAM_QI
681
+ /* If (DPAA 1.x) QI present, check whether dependencies are available */
682
+ if (ctrlpriv->qi_present && !caam_dpaa2) {
683
+ ret = qman_is_probed();
684
+ if (!ret) {
685
+ return -EPROBE_DEFER;
686
+ } else if (ret < 0) {
687
+ dev_err(dev, "failing probe due to qman probe error\n");
688
+ return -ENODEV;
689
+ }
690
+
691
+ ret = qman_portals_probed();
692
+ if (!ret) {
693
+ return -EPROBE_DEFER;
694
+ } else if (ret < 0) {
695
+ dev_err(dev, "failing probe due to qman portals probe error\n");
696
+ return -ENODEV;
697
+ }
698
+ }
699
+#endif
596700
597701 /* Allocating the BLOCK_OFFSET based on the supported page size on
598702 * the platform
599703 */
704
+ pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
600705 if (pg_size == 0)
601706 BLOCK_OFFSET = PG_SIZE_4K;
602707 else
....@@ -614,6 +719,21 @@
614719
615720 /* Get the IRQ of the controller (for security violations only) */
616721 ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0);
722
+ np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc");
723
+ ctrlpriv->mc_en = !!np;
724
+ of_node_put(np);
725
+
726
+#ifdef CONFIG_FSL_MC_BUS
727
+ if (ctrlpriv->mc_en) {
728
+ struct fsl_mc_version *mc_version;
729
+
730
+ mc_version = fsl_mc_get_version();
731
+ if (mc_version)
732
+ pr_support = check_version(mc_version, 10, 20, 0);
733
+ else
734
+ return -EPROBE_DEFER;
735
+ }
736
+#endif
617737
618738 /*
619739 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
....@@ -621,21 +741,16 @@
621741 * In case of SoCs with Management Complex, MC f/w performs
622742 * the configuration.
623743 */
624
- caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
625
- np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc");
626
- ctrlpriv->mc_en = !!np;
627
- of_node_put(np);
628
-
629744 if (!ctrlpriv->mc_en)
630
- clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
745
+ clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK,
631746 MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
632
- MCFGR_WDENABLE | MCFGR_LARGE_BURST |
633
- (sizeof(dma_addr_t) == sizeof(u64) ?
634
- MCFGR_LONG_PTR : 0));
747
+ MCFGR_WDENABLE | MCFGR_LARGE_BURST);
748
+
749
+ handle_imx6_err005766(&ctrl->mcr);
635750
636751 /*
637
- * Read the Compile Time paramters and SCFGR to determine
638
- * if Virtualization is enabled for this platform
752
+ * Read the Compile Time parameters and SCFGR to determine
753
+ * if virtualization is enabled for this platform
639754 */
640755 scfgr = rd_reg32(&ctrl->scfgr);
641756
....@@ -659,56 +774,26 @@
659774 JRSTART_JR1_START | JRSTART_JR2_START |
660775 JRSTART_JR3_START);
661776
662
- if (sizeof(dma_addr_t) == sizeof(u64)) {
663
- if (caam_dpaa2)
664
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
665
- else if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
666
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
667
- else
668
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
669
- } else {
670
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
671
- }
777
+ ret = dma_set_mask_and_coherent(dev, caam_get_dma_mask(dev));
672778 if (ret) {
673779 dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
674
- goto iounmap_ctrl;
780
+ return ret;
675781 }
676782
677783 ctrlpriv->era = caam_get_era(ctrl);
784
+ ctrlpriv->domain = iommu_get_domain_for_dev(dev);
678785
679
- ret = of_platform_populate(nprop, caam_match, NULL, dev);
680
- if (ret) {
681
- dev_err(dev, "JR platform devices creation error\n");
682
- goto iounmap_ctrl;
786
+ dfs_root = debugfs_create_dir(dev_name(dev), NULL);
787
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
788
+ ret = devm_add_action_or_reset(dev, caam_remove_debugfs,
789
+ dfs_root);
790
+ if (ret)
791
+ return ret;
683792 }
684793
685
-#ifdef CONFIG_DEBUG_FS
686
- /*
687
- * FIXME: needs better naming distinction, as some amalgamation of
688
- * "caam" and nprop->full_name. The OF name isn't distinctive,
689
- * but does separate instances
690
- */
691
- perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
692
-
693
- ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
694
- ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
695
-#endif
696
-
697
- ring = 0;
698
- for_each_available_child_of_node(nprop, np)
699
- if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
700
- of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
701
- ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
702
- ((__force uint8_t *)ctrl +
703
- (ring + JR_BLOCK_NUMBER) *
704
- BLOCK_OFFSET
705
- );
706
- ctrlpriv->total_jobrs++;
707
- ring++;
708
- }
794
+ caam_debugfs_init(ctrlpriv, dfs_root);
709795
710796 /* Check to see if (DPAA 1.x) QI present. If so, enable */
711
- ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
712797 if (ctrlpriv->qi_present && !caam_dpaa2) {
713798 ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
714799 ((__force uint8_t *)ctrl +
....@@ -725,22 +810,38 @@
725810 #endif
726811 }
727812
813
+ ring = 0;
814
+ for_each_available_child_of_node(nprop, np)
815
+ if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
816
+ of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
817
+ ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
818
+ ((__force uint8_t *)ctrl +
819
+ (ring + JR_BLOCK_NUMBER) *
820
+ BLOCK_OFFSET
821
+ );
822
+ ctrlpriv->total_jobrs++;
823
+ ring++;
824
+ }
825
+
728826 /* If no QI and no rings specified, quit and go home */
729827 if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
730828 dev_err(dev, "no queues configured, terminating\n");
731
- ret = -ENOMEM;
732
- goto caam_remove;
829
+ return -ENOMEM;
733830 }
734831
735
- cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
832
+ if (ctrlpriv->era < 10)
833
+ rng_vid = (rd_reg32(&ctrl->perfmon.cha_id_ls) &
834
+ CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
835
+ else
836
+ rng_vid = (rd_reg32(&ctrl->vreg.rng) & CHA_VER_VID_MASK) >>
837
+ CHA_VER_VID_SHIFT;
736838
737839 /*
738840 * If SEC has RNG version >= 4 and RNG state handle has not been
739841 * already instantiated, do RNG instantiation
740842 * In case of SoCs with Management Complex, RNG is managed by MC f/w.
741843 */
742
- if (!ctrlpriv->mc_en &&
743
- (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
844
+ if (!(ctrlpriv->mc_en && pr_support) && rng_vid >= 4) {
744845 ctrlpriv->rng4_sh_init =
745846 rd_reg32(&ctrl->r4tst[0].rdsta);
746847 /*
....@@ -750,11 +851,11 @@
750851 * to regenerate these keys before the next POR.
751852 */
752853 gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
753
- ctrlpriv->rng4_sh_init &= RDSTA_IFMASK;
854
+ ctrlpriv->rng4_sh_init &= RDSTA_MASK;
754855 do {
755856 int inst_handles =
756857 rd_reg32(&ctrl->r4tst[0].rdsta) &
757
- RDSTA_IFMASK;
858
+ RDSTA_MASK;
758859 /*
759860 * If either SH were instantiated by somebody else
760861 * (e.g. u-boot) then it is assumed that the entropy
....@@ -763,6 +864,8 @@
763864 * Also, if a handle was instantiated, do not change
764865 * the TRNG parameters.
765866 */
867
+ if (needs_entropy_delay_adjustment())
868
+ ent_delay = 12000;
766869 if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
767870 dev_info(dev,
768871 "Entropy delay = %u\n",
....@@ -772,13 +875,22 @@
772875 }
773876 /*
774877 * if instantiate_rng(...) fails, the loop will rerun
775
- * and the kick_trng(...) function will modfiy the
878
+ * and the kick_trng(...) function will modify the
776879 * upper and lower limits of the entropy sampling
777
- * interval, leading to a sucessful initialization of
880
+ * interval, leading to a successful initialization of
778881 * the RNG.
779882 */
780883 ret = instantiate_rng(dev, inst_handles,
781884 gen_sk);
885
+ /*
886
+ * Entropy delay is determined via TRNG characterization.
887
+ * TRNG characterization is run across different voltages
888
+ * and temperatures.
889
+ * If worst case value for ent_dly is identified,
890
+ * the loop can be skipped for that platform.
891
+ */
892
+ if (needs_entropy_delay_adjustment())
893
+ break;
782894 if (ret == -EAGAIN)
783895 /*
784896 * if here, the loop will rerun,
....@@ -788,13 +900,13 @@
788900 } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
789901 if (ret) {
790902 dev_err(dev, "failed to instantiate RNG");
791
- goto caam_remove;
903
+ return ret;
792904 }
793905 /*
794
- * Set handles init'ed by this module as the complement of the
795
- * already initialized ones
906
+ * Set handles initialized by this module as the complement of
907
+ * the already initialized ones
796908 */
797
- ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
909
+ ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_MASK;
798910
799911 /* Enable RDB bit so that RNG works faster */
800912 clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE);
....@@ -811,83 +923,10 @@
811923 dev_info(dev, "job rings = %d, qi = %d\n",
812924 ctrlpriv->total_jobrs, ctrlpriv->qi_present);
813925
814
-#ifdef CONFIG_DEBUG_FS
815
- debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
816
- ctrlpriv->ctl, &perfmon->req_dequeued,
817
- &caam_fops_u64_ro);
818
- debugfs_create_file("ob_rq_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
819
- ctrlpriv->ctl, &perfmon->ob_enc_req,
820
- &caam_fops_u64_ro);
821
- debugfs_create_file("ib_rq_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
822
- ctrlpriv->ctl, &perfmon->ib_dec_req,
823
- &caam_fops_u64_ro);
824
- debugfs_create_file("ob_bytes_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
825
- ctrlpriv->ctl, &perfmon->ob_enc_bytes,
826
- &caam_fops_u64_ro);
827
- debugfs_create_file("ob_bytes_protected", S_IRUSR | S_IRGRP | S_IROTH,
828
- ctrlpriv->ctl, &perfmon->ob_prot_bytes,
829
- &caam_fops_u64_ro);
830
- debugfs_create_file("ib_bytes_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
831
- ctrlpriv->ctl, &perfmon->ib_dec_bytes,
832
- &caam_fops_u64_ro);
833
- debugfs_create_file("ib_bytes_validated", S_IRUSR | S_IRGRP | S_IROTH,
834
- ctrlpriv->ctl, &perfmon->ib_valid_bytes,
835
- &caam_fops_u64_ro);
926
+ ret = devm_of_platform_populate(dev);
927
+ if (ret)
928
+ dev_err(dev, "JR platform devices creation error\n");
836929
837
- /* Controller level - global status values */
838
- debugfs_create_file("fault_addr", S_IRUSR | S_IRGRP | S_IROTH,
839
- ctrlpriv->ctl, &perfmon->faultaddr,
840
- &caam_fops_u32_ro);
841
- debugfs_create_file("fault_detail", S_IRUSR | S_IRGRP | S_IROTH,
842
- ctrlpriv->ctl, &perfmon->faultdetail,
843
- &caam_fops_u32_ro);
844
- debugfs_create_file("fault_status", S_IRUSR | S_IRGRP | S_IROTH,
845
- ctrlpriv->ctl, &perfmon->status,
846
- &caam_fops_u32_ro);
847
-
848
- /* Internal covering keys (useful in non-secure mode only) */
849
- ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
850
- ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
851
- ctrlpriv->ctl_kek = debugfs_create_blob("kek",
852
- S_IRUSR |
853
- S_IRGRP | S_IROTH,
854
- ctrlpriv->ctl,
855
- &ctrlpriv->ctl_kek_wrap);
856
-
857
- ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
858
- ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
859
- ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
860
- S_IRUSR |
861
- S_IRGRP | S_IROTH,
862
- ctrlpriv->ctl,
863
- &ctrlpriv->ctl_tkek_wrap);
864
-
865
- ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
866
- ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
867
- ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
868
- S_IRUSR |
869
- S_IRGRP | S_IROTH,
870
- ctrlpriv->ctl,
871
- &ctrlpriv->ctl_tdsk_wrap);
872
-#endif
873
- return 0;
874
-
875
-caam_remove:
876
- caam_remove(pdev);
877
- return ret;
878
-
879
-iounmap_ctrl:
880
- iounmap(ctrl);
881
-disable_caam_emi_slow:
882
- if (ctrlpriv->caam_emi_slow)
883
- clk_disable_unprepare(ctrlpriv->caam_emi_slow);
884
-disable_caam_aclk:
885
- clk_disable_unprepare(ctrlpriv->caam_aclk);
886
-disable_caam_mem:
887
- if (ctrlpriv->caam_mem)
888
- clk_disable_unprepare(ctrlpriv->caam_mem);
889
-disable_caam_ipg:
890
- clk_disable_unprepare(ctrlpriv->caam_ipg);
891930 return ret;
892931 }
893932
....@@ -897,7 +936,6 @@
897936 .of_match_table = caam_match,
898937 },
899938 .probe = caam_probe,
900
- .remove = caam_remove,
901939 };
902940
903941 module_platform_driver(caam_driver);