hc
2024-05-10 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb
kernel/drivers/crypto/caam/jr.c
....@@ -1,8 +1,10 @@
1
+// SPDX-License-Identifier: GPL-2.0+
12 /*
23 * CAAM/SEC 4.x transport/backend driver
34 * JobR backend functionality
45 *
56 * Copyright 2008-2012 Freescale Semiconductor, Inc.
7
+ * Copyright 2019 NXP
68 */
79
810 #include <linux/of_irq.h>
....@@ -22,6 +24,52 @@
2224 } ____cacheline_aligned;
2325
2426 static struct jr_driver_data driver_data;
27
+static DEFINE_MUTEX(algs_lock);
28
+static unsigned int active_devs;
29
+
30
+static void register_algs(struct caam_drv_private_jr *jrpriv,
31
+ struct device *dev)
32
+{
33
+ mutex_lock(&algs_lock);
34
+
35
+ if (++active_devs != 1)
36
+ goto algs_unlock;
37
+
38
+ caam_algapi_init(dev);
39
+ caam_algapi_hash_init(dev);
40
+ caam_pkc_init(dev);
41
+ jrpriv->hwrng = !caam_rng_init(dev);
42
+ caam_qi_algapi_init(dev);
43
+
44
+algs_unlock:
45
+ mutex_unlock(&algs_lock);
46
+}
47
+
48
+static void unregister_algs(void)
49
+{
50
+ mutex_lock(&algs_lock);
51
+
52
+ if (--active_devs != 0)
53
+ goto algs_unlock;
54
+
55
+ caam_qi_algapi_exit();
56
+
57
+ caam_pkc_exit();
58
+ caam_algapi_hash_exit();
59
+ caam_algapi_exit();
60
+
61
+algs_unlock:
62
+ mutex_unlock(&algs_lock);
63
+}
64
+
65
+static void caam_jr_crypto_engine_exit(void *data)
66
+{
67
+ struct device *jrdev = data;
68
+ struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
69
+
70
+ /* Free the resources of crypto-engine */
71
+ crypto_engine_exit(jrpriv->engine);
72
+}
2573
2674 static int caam_reset_hw_jr(struct device *dev)
2775 {
....@@ -69,24 +117,11 @@
69117 static int caam_jr_shutdown(struct device *dev)
70118 {
71119 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
72
- dma_addr_t inpbusaddr, outbusaddr;
73120 int ret;
74121
75122 ret = caam_reset_hw_jr(dev);
76123
77124 tasklet_kill(&jrp->irqtask);
78
-
79
- /* Release interrupt */
80
- free_irq(jrp->irq, dev);
81
-
82
- /* Free rings */
83
- inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
84
- outbusaddr = rd_reg64(&jrp->rregs->outring_base);
85
- dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
86
- jrp->inpring, inpbusaddr);
87
- dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
88
- jrp->outring, outbusaddr);
89
- kfree(jrp->entinfo);
90125
91126 return ret;
92127 }
....@@ -100,6 +135,9 @@
100135 jrdev = &pdev->dev;
101136 jrpriv = dev_get_drvdata(jrdev);
102137
138
+ if (jrpriv->hwrng)
139
+ caam_rng_exit(jrdev->parent);
140
+
103141 /*
104142 * Return EBUSY if job ring already allocated.
105143 */
....@@ -107,6 +145,9 @@
107145 dev_err(jrdev, "Device is busy\n");
108146 return -EBUSY;
109147 }
148
+
149
+ /* Unregister JR-based RNG & crypto algorithms */
150
+ unregister_algs();
110151
111152 /* Remove the node from Physical JobR list maintained by driver */
112153 spin_lock(&driver_data.jr_alloc_lock);
....@@ -117,7 +158,6 @@
117158 ret = caam_jr_shutdown(jrdev);
118159 if (ret)
119160 dev_err(jrdev, "Failed to shut down job ring\n");
120
- irq_dispose_mapping(jrpriv->irq);
121161
122162 return ret;
123163 }
....@@ -169,12 +209,12 @@
169209 void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
170210 u32 *userdesc, userstatus;
171211 void *userarg;
212
+ u32 outring_used = 0;
172213
173
- while (rd_reg32(&jrp->rregs->outring_used)) {
214
+ while (outring_used ||
215
+ (outring_used = rd_reg32(&jrp->rregs->outring_used))) {
174216
175217 head = READ_ONCE(jrp->head);
176
-
177
- spin_lock(&jrp->outlock);
178218
179219 sw_idx = tail = jrp->tail;
180220 hw_idx = jrp->out_ring_read_index;
....@@ -182,7 +222,7 @@
182222 for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
183223 sw_idx = (tail + i) & (JOBR_DEPTH - 1);
184224
185
- if (jrp->outring[hw_idx].desc ==
225
+ if (jr_outentry_desc(jrp->outring, hw_idx) ==
186226 caam_dma_to_cpu(jrp->entinfo[sw_idx].desc_addr_dma))
187227 break; /* found */
188228 }
....@@ -191,18 +231,20 @@
191231
192232 /* Unmap just-run descriptor so we can post-process */
193233 dma_unmap_single(dev,
194
- caam_dma_to_cpu(jrp->outring[hw_idx].desc),
234
+ caam_dma_to_cpu(jr_outentry_desc(jrp->outring,
235
+ hw_idx)),
195236 jrp->entinfo[sw_idx].desc_size,
196237 DMA_TO_DEVICE);
197238
198239 /* mark completed, avoid matching on a recycled desc addr */
199240 jrp->entinfo[sw_idx].desc_addr_dma = 0;
200241
201
- /* Stash callback params for use outside of lock */
242
+ /* Stash callback params */
202243 usercall = jrp->entinfo[sw_idx].callbk;
203244 userarg = jrp->entinfo[sw_idx].cbkarg;
204245 userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
205
- userstatus = caam32_to_cpu(jrp->outring[hw_idx].jrstatus);
246
+ userstatus = caam32_to_cpu(jr_outentry_jrstatus(jrp->outring,
247
+ hw_idx));
206248
207249 /*
208250 * Make sure all information from the job has been obtained
....@@ -231,10 +273,9 @@
231273 jrp->tail = tail;
232274 }
233275
234
- spin_unlock(&jrp->outlock);
235
-
236276 /* Finally, execute user's callback */
237277 usercall(dev, userdesc, userstatus, userarg);
278
+ outring_used--;
238279 }
239280
240281 /* reenable / unmask IRQs */
....@@ -283,7 +324,7 @@
283324
284325 /**
285326 * caam_jr_free() - Free the Job Ring
286
- * @rdev - points to the dev that identifies the Job ring to
327
+ * @rdev: points to the dev that identifies the Job ring to
287328 * be released.
288329 **/
289330 void caam_jr_free(struct device *rdev)
....@@ -295,11 +336,10 @@
295336 EXPORT_SYMBOL(caam_jr_free);
296337
297338 /**
298
- * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
299
- * -EBUSY if the queue is full, -EIO if it cannot map the caller's
339
+ * caam_jr_enqueue() - Enqueue a job descriptor head. Returns -EINPROGRESS
340
+ * if OK, -ENOSPC if the queue is full, -EIO if it cannot map the caller's
300341 * descriptor.
301
- * @dev: device of the job ring to be used. This device should have
302
- * been assigned prior by caam_jr_register().
342
+ * @dev: struct device of the job ring to be used
303343 * @desc: points to a job descriptor that execute our request. All
304344 * descriptors (and all referenced data) must be in a DMAable
305345 * region, and all data references must be physical addresses
....@@ -309,15 +349,15 @@
309349 * of this request. This has the form:
310350 * callback(struct device *dev, u32 *desc, u32 stat, void *arg)
311351 * where:
312
- * @dev: contains the job ring device that processed this
352
+ * dev: contains the job ring device that processed this
313353 * response.
314
- * @desc: descriptor that initiated the request, same as
354
+ * desc: descriptor that initiated the request, same as
315355 * "desc" being argued to caam_jr_enqueue().
316
- * @status: untranslated status received from CAAM. See the
356
+ * status: untranslated status received from CAAM. See the
317357 * reference manual for a detailed description of
318358 * error meaning, or see the JRSTA definitions in the
319359 * register header file
320
- * @areq: optional pointer to an argument passed with the
360
+ * areq: optional pointer to an argument passed with the
321361 * original request
322362 * @areq: optional pointer to a user argument for use at callback
323363 * time.
....@@ -344,11 +384,11 @@
344384 head = jrp->head;
345385 tail = READ_ONCE(jrp->tail);
346386
347
- if (!rd_reg32(&jrp->rregs->inpring_avail) ||
387
+ if (!jrp->inpring_avail ||
348388 CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
349389 spin_unlock_bh(&jrp->inplock);
350390 dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
351
- return -EBUSY;
391
+ return -ENOSPC;
352392 }
353393
354394 head_entry = &jrp->entinfo[head];
....@@ -358,7 +398,7 @@
358398 head_entry->cbkarg = areq;
359399 head_entry->desc_addr_dma = desc_dma;
360400
361
- jrp->inpring[jrp->inp_ring_write_index] = cpu_to_caam_dma(desc_dma);
401
+ jr_inpentry_set(jrp->inpring, head, cpu_to_caam_dma(desc_dma));
362402
363403 /*
364404 * Guarantee that the descriptor's DMA address has been written to
....@@ -367,21 +407,25 @@
367407 */
368408 smp_wmb();
369409
370
- jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
371
- (JOBR_DEPTH - 1);
372410 jrp->head = (head + 1) & (JOBR_DEPTH - 1);
373411
374412 /*
375413 * Ensure that all job information has been written before
376
- * notifying CAAM that a new job was added to the input ring.
414
+ * notifying CAAM that a new job was added to the input ring
415
+ * using a memory barrier. The wr_reg32() uses api iowrite32()
416
+ * to do the register write. iowrite32() issues a memory barrier
417
+ * before the write operation.
377418 */
378
- wmb();
379419
380420 wr_reg32(&jrp->rregs->inpring_jobadd, 1);
381421
422
+ jrp->inpring_avail--;
423
+ if (!jrp->inpring_avail)
424
+ jrp->inpring_avail = rd_reg32(&jrp->rregs->inpring_avail);
425
+
382426 spin_unlock_bh(&jrp->inplock);
383427
384
- return 0;
428
+ return -EINPROGRESS;
385429 }
386430 EXPORT_SYMBOL(caam_jr_enqueue);
387431
....@@ -396,41 +440,31 @@
396440
397441 jrp = dev_get_drvdata(dev);
398442
399
- tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
400
-
401
- /* Connect job ring interrupt handler. */
402
- error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
403
- dev_name(dev), dev);
404
- if (error) {
405
- dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
406
- jrp->ridx, jrp->irq);
407
- goto out_kill_deq;
408
- }
409
-
410443 error = caam_reset_hw_jr(dev);
411444 if (error)
412
- goto out_free_irq;
445
+ return error;
413446
414
- error = -ENOMEM;
415
- jrp->inpring = dma_alloc_coherent(dev, sizeof(*jrp->inpring) *
416
- JOBR_DEPTH, &inpbusaddr, GFP_KERNEL);
447
+ jrp->inpring = dmam_alloc_coherent(dev, SIZEOF_JR_INPENTRY *
448
+ JOBR_DEPTH, &inpbusaddr,
449
+ GFP_KERNEL);
417450 if (!jrp->inpring)
418
- goto out_free_irq;
451
+ return -ENOMEM;
419452
420
- jrp->outring = dma_alloc_coherent(dev, sizeof(*jrp->outring) *
421
- JOBR_DEPTH, &outbusaddr, GFP_KERNEL);
453
+ jrp->outring = dmam_alloc_coherent(dev, SIZEOF_JR_OUTENTRY *
454
+ JOBR_DEPTH, &outbusaddr,
455
+ GFP_KERNEL);
422456 if (!jrp->outring)
423
- goto out_free_inpring;
457
+ return -ENOMEM;
424458
425
- jrp->entinfo = kcalloc(JOBR_DEPTH, sizeof(*jrp->entinfo), GFP_KERNEL);
459
+ jrp->entinfo = devm_kcalloc(dev, JOBR_DEPTH, sizeof(*jrp->entinfo),
460
+ GFP_KERNEL);
426461 if (!jrp->entinfo)
427
- goto out_free_outring;
462
+ return -ENOMEM;
428463
429464 for (i = 0; i < JOBR_DEPTH; i++)
430465 jrp->entinfo[i].desc_addr_dma = !0;
431466
432467 /* Setup rings */
433
- jrp->inp_ring_write_index = 0;
434468 jrp->out_ring_read_index = 0;
435469 jrp->head = 0;
436470 jrp->tail = 0;
....@@ -440,32 +474,33 @@
440474 wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
441475 wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
442476
443
- jrp->ringsize = JOBR_DEPTH;
477
+ jrp->inpring_avail = JOBR_DEPTH;
444478
445479 spin_lock_init(&jrp->inplock);
446
- spin_lock_init(&jrp->outlock);
447480
448481 /* Select interrupt coalescing parameters */
449482 clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC |
450483 (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
451484 (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
452485
453
- return 0;
486
+ tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
454487
455
-out_free_outring:
456
- dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
457
- jrp->outring, outbusaddr);
458
-out_free_inpring:
459
- dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
460
- jrp->inpring, inpbusaddr);
461
- dev_err(dev, "can't allocate job rings for %d\n", jrp->ridx);
462
-out_free_irq:
463
- free_irq(jrp->irq, dev);
464
-out_kill_deq:
465
- tasklet_kill(&jrp->irqtask);
488
+ /* Connect job ring interrupt handler. */
489
+ error = devm_request_irq(dev, jrp->irq, caam_jr_interrupt, IRQF_SHARED,
490
+ dev_name(dev), dev);
491
+ if (error) {
492
+ dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
493
+ jrp->ridx, jrp->irq);
494
+ tasklet_kill(&jrp->irqtask);
495
+ }
496
+
466497 return error;
467498 }
468499
500
+static void caam_jr_irq_dispose_mapping(void *data)
501
+{
502
+ irq_dispose_mapping((unsigned long)data);
503
+}
469504
470505 /*
471506 * Probe routine for each detected JobR subsystem.
....@@ -477,10 +512,11 @@
477512 struct caam_job_ring __iomem *ctrl;
478513 struct caam_drv_private_jr *jrpriv;
479514 static int total_jobrs;
515
+ struct resource *r;
480516 int error;
481517
482518 jrdev = &pdev->dev;
483
- jrpriv = devm_kmalloc(jrdev, sizeof(*jrpriv), GFP_KERNEL);
519
+ jrpriv = devm_kzalloc(jrdev, sizeof(*jrpriv), GFP_KERNEL);
484520 if (!jrpriv)
485521 return -ENOMEM;
486522
....@@ -492,45 +528,62 @@
492528 nprop = pdev->dev.of_node;
493529 /* Get configuration properties from device tree */
494530 /* First, get register page */
495
- ctrl = of_iomap(nprop, 0);
531
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
532
+ if (!r) {
533
+ dev_err(jrdev, "platform_get_resource() failed\n");
534
+ return -ENOMEM;
535
+ }
536
+
537
+ ctrl = devm_ioremap(jrdev, r->start, resource_size(r));
496538 if (!ctrl) {
497
- dev_err(jrdev, "of_iomap() failed\n");
539
+ dev_err(jrdev, "devm_ioremap() failed\n");
498540 return -ENOMEM;
499541 }
500542
501543 jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl;
502544
503
- if (sizeof(dma_addr_t) == sizeof(u64)) {
504
- if (caam_dpaa2)
505
- error = dma_set_mask_and_coherent(jrdev,
506
- DMA_BIT_MASK(49));
507
- else if (of_device_is_compatible(nprop,
508
- "fsl,sec-v5.0-job-ring"))
509
- error = dma_set_mask_and_coherent(jrdev,
510
- DMA_BIT_MASK(40));
511
- else
512
- error = dma_set_mask_and_coherent(jrdev,
513
- DMA_BIT_MASK(36));
514
- } else {
515
- error = dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
516
- }
545
+ error = dma_set_mask_and_coherent(jrdev, caam_get_dma_mask(jrdev));
517546 if (error) {
518547 dev_err(jrdev, "dma_set_mask_and_coherent failed (%d)\n",
519548 error);
520
- iounmap(ctrl);
549
+ return error;
550
+ }
551
+
552
+ /* Initialize crypto engine */
553
+ jrpriv->engine = crypto_engine_alloc_init(jrdev, false);
554
+ if (!jrpriv->engine) {
555
+ dev_err(jrdev, "Could not init crypto-engine\n");
556
+ return -ENOMEM;
557
+ }
558
+
559
+ error = devm_add_action_or_reset(jrdev, caam_jr_crypto_engine_exit,
560
+ jrdev);
561
+ if (error)
562
+ return error;
563
+
564
+ /* Start crypto engine */
565
+ error = crypto_engine_start(jrpriv->engine);
566
+ if (error) {
567
+ dev_err(jrdev, "Could not start crypto-engine\n");
521568 return error;
522569 }
523570
524571 /* Identify the interrupt */
525572 jrpriv->irq = irq_of_parse_and_map(nprop, 0);
573
+ if (!jrpriv->irq) {
574
+ dev_err(jrdev, "irq_of_parse_and_map failed\n");
575
+ return -EINVAL;
576
+ }
577
+
578
+ error = devm_add_action_or_reset(jrdev, caam_jr_irq_dispose_mapping,
579
+ (void *)(unsigned long)jrpriv->irq);
580
+ if (error)
581
+ return error;
526582
527583 /* Now do the platform independent part */
528584 error = caam_jr_init(jrdev); /* now turn on hardware */
529
- if (error) {
530
- irq_dispose_mapping(jrpriv->irq);
531
- iounmap(ctrl);
585
+ if (error)
532586 return error;
533
- }
534587
535588 jrpriv->dev = jrdev;
536589 spin_lock(&driver_data.jr_alloc_lock);
....@@ -539,6 +592,8 @@
539592
540593 atomic_set(&jrpriv->tfm_count, 0);
541594
595
+ register_algs(jrpriv, jrdev->parent);
596
+
542597 return 0;
543598 }
544599