hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/dma/mxs-dma.c
....@@ -24,6 +24,7 @@
2424 #include <linux/of_device.h>
2525 #include <linux/of_dma.h>
2626 #include <linux/list.h>
27
+#include <linux/dma/mxs-dma.h>
2728
2829 #include <asm/irq.h>
2930
....@@ -77,6 +78,7 @@
7778 #define BM_CCW_COMMAND (3 << 0)
7879 #define CCW_CHAIN (1 << 2)
7980 #define CCW_IRQ (1 << 3)
81
+#define CCW_WAIT4RDY (1 << 5)
8082 #define CCW_DEC_SEM (1 << 6)
8183 #define CCW_WAIT4END (1 << 7)
8284 #define CCW_HALT_ON_TERM (1 << 8)
....@@ -139,7 +141,6 @@
139141 void __iomem *base;
140142 struct clk *clk;
141143 struct dma_device dma_device;
142
- struct device_dma_parameters dma_parms;
143144 struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS];
144145 struct platform_device *pdev;
145146 unsigned int nr_channels;
....@@ -166,29 +167,11 @@
166167 }
167168 };
168169
169
-static const struct platform_device_id mxs_dma_ids[] = {
170
- {
171
- .name = "imx23-dma-apbh",
172
- .driver_data = (kernel_ulong_t) &mxs_dma_types[0],
173
- }, {
174
- .name = "imx23-dma-apbx",
175
- .driver_data = (kernel_ulong_t) &mxs_dma_types[1],
176
- }, {
177
- .name = "imx28-dma-apbh",
178
- .driver_data = (kernel_ulong_t) &mxs_dma_types[2],
179
- }, {
180
- .name = "imx28-dma-apbx",
181
- .driver_data = (kernel_ulong_t) &mxs_dma_types[3],
182
- }, {
183
- /* end of list */
184
- }
185
-};
186
-
187170 static const struct of_device_id mxs_dma_dt_ids[] = {
188
- { .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_ids[0], },
189
- { .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_ids[1], },
190
- { .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_ids[2], },
191
- { .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_ids[3], },
171
+ { .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_types[0], },
172
+ { .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_types[1], },
173
+ { .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_types[2], },
174
+ { .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_types[3], },
192175 { /* sentinel */ }
193176 };
194177 MODULE_DEVICE_TABLE(of, mxs_dma_dt_ids);
....@@ -318,9 +301,9 @@
318301 return dma_cookie_assign(tx);
319302 }
320303
321
-static void mxs_dma_tasklet(unsigned long data)
304
+static void mxs_dma_tasklet(struct tasklet_struct *t)
322305 {
323
- struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data;
306
+ struct mxs_dma_chan *mxs_chan = from_tasklet(mxs_chan, t, tasklet);
324307
325308 dmaengine_desc_get_callback_invoke(&mxs_chan->desc, NULL);
326309 }
....@@ -416,9 +399,9 @@
416399 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
417400 int ret;
418401
419
- mxs_chan->ccw = dma_zalloc_coherent(mxs_dma->dma_device.dev,
420
- CCW_BLOCK_SIZE,
421
- &mxs_chan->ccw_phys, GFP_KERNEL);
402
+ mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev,
403
+ CCW_BLOCK_SIZE,
404
+ &mxs_chan->ccw_phys, GFP_KERNEL);
422405 if (!mxs_chan->ccw) {
423406 ret = -ENOMEM;
424407 goto err_alloc;
....@@ -477,16 +460,16 @@
477460 * ......
478461 * ->device_prep_slave_sg(0);
479462 * ......
480
- * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
463
+ * ->device_prep_slave_sg(DMA_CTRL_ACK);
481464 * ......
482465 * [3] If there are more than two DMA commands in the DMA chain, the code
483466 * should be:
484467 * ......
485468 * ->device_prep_slave_sg(0); // First
486469 * ......
487
- * ->device_prep_slave_sg(DMA_PREP_INTERRUPT [| DMA_CTRL_ACK]);
470
+ * ->device_prep_slave_sg(DMA_CTRL_ACK]);
488471 * ......
489
- * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); // Last
472
+ * ->device_prep_slave_sg(DMA_CTRL_ACK); // Last
490473 * ......
491474 */
492475 static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
....@@ -500,13 +483,12 @@
500483 struct scatterlist *sg;
501484 u32 i, j;
502485 u32 *pio;
503
- bool append = flags & DMA_PREP_INTERRUPT;
504
- int idx = append ? mxs_chan->desc_count : 0;
486
+ int idx = 0;
505487
506
- if (mxs_chan->status == DMA_IN_PROGRESS && !append)
507
- return NULL;
488
+ if (mxs_chan->status == DMA_IN_PROGRESS)
489
+ idx = mxs_chan->desc_count;
508490
509
- if (sg_len + (append ? idx : 0) > NUM_CCW) {
491
+ if (sg_len + idx > NUM_CCW) {
510492 dev_err(mxs_dma->dma_device.dev,
511493 "maximum number of sg exceeded: %d > %d\n",
512494 sg_len, NUM_CCW);
....@@ -520,7 +502,7 @@
520502 * If the sg is prepared with append flag set, the sg
521503 * will be appended to the last prepared sg.
522504 */
523
- if (append) {
505
+ if (idx) {
524506 BUG_ON(idx < 1);
525507 ccw = &mxs_chan->ccw[idx - 1];
526508 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
....@@ -541,12 +523,14 @@
541523 ccw->bits = 0;
542524 ccw->bits |= CCW_IRQ;
543525 ccw->bits |= CCW_DEC_SEM;
544
- if (flags & DMA_CTRL_ACK)
526
+ if (flags & MXS_DMA_CTRL_WAIT4END)
545527 ccw->bits |= CCW_WAIT4END;
546528 ccw->bits |= CCW_HALT_ON_TERM;
547529 ccw->bits |= CCW_TERM_FLUSH;
548530 ccw->bits |= BF_CCW(sg_len, PIO_NUM);
549531 ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND);
532
+ if (flags & MXS_DMA_CTRL_WAIT4RDY)
533
+ ccw->bits |= CCW_WAIT4RDY;
550534 } else {
551535 for_each_sg(sgl, sg, sg_len, i) {
552536 if (sg_dma_len(sg) > MAX_XFER_BYTES) {
....@@ -573,7 +557,7 @@
573557 ccw->bits &= ~CCW_CHAIN;
574558 ccw->bits |= CCW_IRQ;
575559 ccw->bits |= CCW_DEC_SEM;
576
- if (flags & DMA_CTRL_ACK)
560
+ if (flags & MXS_DMA_CTRL_WAIT4END)
577561 ccw->bits |= CCW_WAIT4END;
578562 }
579563 }
....@@ -686,7 +670,7 @@
686670 return mxs_chan->status;
687671 }
688672
689
-static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
673
+static int mxs_dma_init(struct mxs_dma_engine *mxs_dma)
690674 {
691675 int ret;
692676
....@@ -716,7 +700,6 @@
716700 }
717701
718702 struct mxs_dma_filter_param {
719
- struct device_node *of_node;
720703 unsigned int chan_id;
721704 };
722705
....@@ -726,9 +709,6 @@
726709 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
727710 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
728711 int chan_irq;
729
-
730
- if (mxs_dma->dma_device.dev->of_node != param->of_node)
731
- return false;
732712
733713 if (chan->chan_id != param->chan_id)
734714 return false;
....@@ -752,20 +732,18 @@
752732 if (dma_spec->args_count != 1)
753733 return NULL;
754734
755
- param.of_node = ofdma->of_node;
756735 param.chan_id = dma_spec->args[0];
757736
758737 if (param.chan_id >= mxs_dma->nr_channels)
759738 return NULL;
760739
761
- return dma_request_channel(mask, mxs_dma_filter_fn, &param);
740
+ return __dma_request_channel(&mask, mxs_dma_filter_fn, &param,
741
+ ofdma->of_node);
762742 }
763743
764
-static int __init mxs_dma_probe(struct platform_device *pdev)
744
+static int mxs_dma_probe(struct platform_device *pdev)
765745 {
766746 struct device_node *np = pdev->dev.of_node;
767
- const struct platform_device_id *id_entry;
768
- const struct of_device_id *of_id;
769747 const struct mxs_dma_type *dma_type;
770748 struct mxs_dma_engine *mxs_dma;
771749 struct resource *iores;
....@@ -781,13 +759,7 @@
781759 return ret;
782760 }
783761
784
- of_id = of_match_device(mxs_dma_dt_ids, &pdev->dev);
785
- if (of_id)
786
- id_entry = of_id->data;
787
- else
788
- id_entry = platform_get_device_id(pdev);
789
-
790
- dma_type = (struct mxs_dma_type *)id_entry->driver_data;
762
+ dma_type = (struct mxs_dma_type *)of_device_get_match_data(&pdev->dev);
791763 mxs_dma->type = dma_type->type;
792764 mxs_dma->dev_id = dma_type->id;
793765
....@@ -813,8 +785,7 @@
813785 mxs_chan->chan.device = &mxs_dma->dma_device;
814786 dma_cookie_init(&mxs_chan->chan);
815787
816
- tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet,
817
- (unsigned long) mxs_chan);
788
+ tasklet_setup(&mxs_chan->tasklet, mxs_dma_tasklet);
818789
819790
820791 /* Add the channel to mxs_chan list */
....@@ -830,7 +801,6 @@
830801 mxs_dma->dma_device.dev = &pdev->dev;
831802
832803 /* mxs_dma gets 65535 bytes maximum sg size */
833
- mxs_dma->dma_device.dev->dma_parms = &mxs_dma->dma_parms;
834804 dma_set_max_seg_size(mxs_dma->dma_device.dev, MAX_XFER_BYTES);
835805
836806 mxs_dma->dma_device.device_alloc_chan_resources = mxs_dma_alloc_chan_resources;
....@@ -847,7 +817,7 @@
847817 mxs_dma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
848818 mxs_dma->dma_device.device_issue_pending = mxs_dma_enable_chan;
849819
850
- ret = dma_async_device_register(&mxs_dma->dma_device);
820
+ ret = dmaenginem_async_device_register(&mxs_dma->dma_device);
851821 if (ret) {
852822 dev_err(mxs_dma->dma_device.dev, "unable to register\n");
853823 return ret;
....@@ -857,7 +827,6 @@
857827 if (ret) {
858828 dev_err(mxs_dma->dma_device.dev,
859829 "failed to register controller\n");
860
- dma_async_device_unregister(&mxs_dma->dma_device);
861830 }
862831
863832 dev_info(mxs_dma->dma_device.dev, "initialized\n");
....@@ -870,11 +839,7 @@
870839 .name = "mxs-dma",
871840 .of_match_table = mxs_dma_dt_ids,
872841 },
873
- .id_table = mxs_dma_ids,
842
+ .probe = mxs_dma_probe,
874843 };
875844
876
-static int __init mxs_dma_module_init(void)
877
-{
878
- return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe);
879
-}
880
-subsys_initcall(mxs_dma_module_init);
845
+builtin_platform_driver(mxs_dma_driver);