hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/dma/imx-sdma.c
....@@ -335,7 +335,7 @@
335335 * @sdma: pointer to the SDMA engine for this channel
336336 * @channel: the channel number, matches dmaengine chan_id + 1
337337 * @direction: transfer type. Needed for setting SDMA script
338
- * @slave_config Slave configuration
338
+ * @slave_config: Slave configuration
339339 * @peripheral_type: Peripheral type. Needed for setting SDMA script
340340 * @event_id0: aka dma request line
341341 * @event_id1: for channels that use 2 events
....@@ -354,8 +354,10 @@
354354 * @shp_addr: value for gReg[6]
355355 * @per_addr: value for gReg[2]
356356 * @status: status of dma channel
357
+ * @context_loaded: ensure context is only loaded once
357358 * @data: specific sdma interface structure
358359 * @bd_pool: dma_pool for bd
360
+ * @terminate_worker: used to call back into terminate work function
359361 */
360362 struct sdma_channel {
361363 struct virt_dma_chan vc;
....@@ -418,11 +420,11 @@
418420 int chnenbl0;
419421 int num_events;
420422 struct sdma_script_start_addrs *script_addrs;
423
+ bool check_ratio;
421424 };
422425
423426 struct sdma_engine {
424427 struct device *dev;
425
- struct device_dma_parameters dma_parms;
426428 struct sdma_channel channel[MAX_DMA_CHANNELS];
427429 struct sdma_channel_control *channel_control;
428430 void __iomem *regs;
....@@ -440,6 +442,8 @@
440442 unsigned int irq;
441443 dma_addr_t bd0_phys;
442444 struct sdma_buffer_descriptor *bd0;
445
+ /* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/
446
+ bool clk_ratio;
443447 };
444448
445449 static int sdma_config_write(struct dma_chan *chan,
....@@ -554,6 +558,13 @@
554558 .script_addrs = &sdma_script_imx7d,
555559 };
556560
561
+static struct sdma_driver_data sdma_imx8mq = {
562
+ .chnenbl0 = SDMA_CHNENBL0_IMX35,
563
+ .num_events = 48,
564
+ .script_addrs = &sdma_script_imx7d,
565
+ .check_ratio = 1,
566
+};
567
+
557568 static const struct platform_device_id sdma_devtypes[] = {
558569 {
559570 .name = "imx25-sdma",
....@@ -577,6 +588,9 @@
577588 .name = "imx7d-sdma",
578589 .driver_data = (unsigned long)&sdma_imx7d,
579590 }, {
591
+ .name = "imx8mq-sdma",
592
+ .driver_data = (unsigned long)&sdma_imx8mq,
593
+ }, {
580594 /* sentinel */
581595 }
582596 };
....@@ -590,6 +604,7 @@
590604 { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
591605 { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
592606 { .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, },
607
+ { .compatible = "fsl,imx8mq-sdma", .data = &sdma_imx8mq, },
593608 { /* sentinel */ }
594609 };
595610 MODULE_DEVICE_TABLE(of, sdma_dt_ids);
....@@ -662,8 +677,11 @@
662677 dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
663678
664679 /* Set bits of CONFIG register with dynamic context switching */
665
- if (readl(sdma->regs + SDMA_H_CONFIG) == 0)
666
- writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
680
+ reg = readl(sdma->regs + SDMA_H_CONFIG);
681
+ if ((reg & SDMA_H_CONFIG_CSM) == 0) {
682
+ reg |= SDMA_H_CONFIG_CSM;
683
+ writel_relaxed(reg, sdma->regs + SDMA_H_CONFIG);
684
+ }
667685
668686 return ret;
669687 }
....@@ -677,9 +695,7 @@
677695 int ret;
678696 unsigned long flags;
679697
680
- buf_virt = dma_alloc_coherent(NULL,
681
- size,
682
- &buf_phys, GFP_KERNEL);
698
+ buf_virt = dma_alloc_coherent(sdma->dev, size, &buf_phys, GFP_KERNEL);
683699 if (!buf_virt) {
684700 return -ENOMEM;
685701 }
....@@ -698,7 +714,7 @@
698714
699715 spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
700716
701
- dma_free_coherent(NULL, size, buf_virt, buf_phys);
717
+ dma_free_coherent(sdma->dev, size, buf_virt, buf_phys);
702718
703719 return ret;
704720 }
....@@ -744,12 +760,8 @@
744760 return;
745761 }
746762 sdmac->desc = desc = to_sdma_desc(&vd->tx);
747
- /*
748
- * Do not delete the node in desc_issued list in cyclic mode, otherwise
749
- * the desc allocated will never be freed in vchan_dma_desc_free_list
750
- */
751
- if (!(sdmac->flags & IMX_DMA_SG_LOOP))
752
- list_del(&vd->node);
763
+
764
+ list_del(&vd->node);
753765
754766 sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
755767 sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
....@@ -1050,19 +1062,26 @@
10501062
10511063 spin_lock_irqsave(&sdmac->vc.lock, flags);
10521064 vchan_get_all_descriptors(&sdmac->vc, &head);
1053
- sdmac->desc = NULL;
10541065 spin_unlock_irqrestore(&sdmac->vc.lock, flags);
10551066 vchan_dma_desc_free_list(&sdmac->vc, &head);
10561067 }
10571068
1058
-static int sdma_disable_channel_async(struct dma_chan *chan)
1069
+static int sdma_terminate_all(struct dma_chan *chan)
10591070 {
10601071 struct sdma_channel *sdmac = to_sdma_chan(chan);
1072
+ unsigned long flags;
1073
+
1074
+ spin_lock_irqsave(&sdmac->vc.lock, flags);
10611075
10621076 sdma_disable_channel(chan);
10631077
1064
- if (sdmac->desc)
1078
+ if (sdmac->desc) {
1079
+ vchan_terminate_vdesc(&sdmac->desc->vd);
1080
+ sdmac->desc = NULL;
10651081 schedule_work(&sdmac->terminate_worker);
1082
+ }
1083
+
1084
+ spin_unlock_irqrestore(&sdmac->vc.lock, flags);
10661085
10671086 return 0;
10681087 }
....@@ -1119,7 +1138,6 @@
11191138 static int sdma_config_channel(struct dma_chan *chan)
11201139 {
11211140 struct sdma_channel *sdmac = to_sdma_chan(chan);
1122
- int ret;
11231141
11241142 sdma_disable_channel(chan);
11251143
....@@ -1159,9 +1177,7 @@
11591177 sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
11601178 }
11611179
1162
- ret = sdma_load_context(sdmac);
1163
-
1164
- return ret;
1180
+ return 0;
11651181 }
11661182
11671183 static int sdma_set_channel_priority(struct sdma_channel *sdmac,
....@@ -1184,7 +1200,7 @@
11841200 {
11851201 int ret = -EBUSY;
11861202
1187
- sdma->bd0 = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys,
1203
+ sdma->bd0 = dma_alloc_coherent(sdma->dev, PAGE_SIZE, &sdma->bd0_phys,
11881204 GFP_NOWAIT);
11891205 if (!sdma->bd0) {
11901206 ret = -ENOMEM;
....@@ -1207,8 +1223,8 @@
12071223 u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
12081224 int ret = 0;
12091225
1210
- desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys,
1211
- GFP_NOWAIT);
1226
+ desc->bd = dma_alloc_coherent(desc->sdmac->sdma->dev, bd_size,
1227
+ &desc->bd_phys, GFP_NOWAIT);
12121228 if (!desc->bd) {
12131229 ret = -ENOMEM;
12141230 goto out;
....@@ -1221,7 +1237,8 @@
12211237 {
12221238 u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
12231239
1224
- dma_free_coherent(NULL, bd_size, desc->bd, desc->bd_phys);
1240
+ dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd,
1241
+ desc->bd_phys);
12251242 }
12261243
12271244 static void sdma_desc_free(struct virt_dma_desc *vd)
....@@ -1301,12 +1318,11 @@
13011318 struct sdma_channel *sdmac = to_sdma_chan(chan);
13021319 struct sdma_engine *sdma = sdmac->sdma;
13031320
1304
- sdma_disable_channel_async(chan);
1321
+ sdma_terminate_all(chan);
13051322
13061323 sdma_channel_synchronize(chan);
13071324
1308
- if (sdmac->event_id0)
1309
- sdma_event_disable(sdmac, sdmac->event_id0);
1325
+ sdma_event_disable(sdmac, sdmac->event_id0);
13101326 if (sdmac->event_id1)
13111327 sdma_event_disable(sdmac, sdmac->event_id1);
13121328
....@@ -1347,10 +1363,12 @@
13471363 sdma_config_ownership(sdmac, false, true, false);
13481364
13491365 if (sdma_load_context(sdmac))
1350
- goto err_desc_out;
1366
+ goto err_bd_out;
13511367
13521368 return desc;
13531369
1370
+err_bd_out:
1371
+ sdma_free_bd(desc);
13541372 err_desc_out:
13551373 kfree(desc);
13561374 err_out:
....@@ -1605,11 +1623,9 @@
16051623 memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
16061624
16071625 /* Set ENBLn earlier to make sure dma request triggered after that */
1608
- if (sdmac->event_id0) {
1609
- if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
1610
- return -EINVAL;
1611
- sdma_event_enable(sdmac, sdmac->event_id0);
1612
- }
1626
+ if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
1627
+ return -EINVAL;
1628
+ sdma_event_enable(sdmac, sdmac->event_id0);
16131629
16141630 if (sdmac->event_id1) {
16151631 if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
....@@ -1625,7 +1641,7 @@
16251641 struct dma_tx_state *txstate)
16261642 {
16271643 struct sdma_channel *sdmac = to_sdma_chan(chan);
1628
- struct sdma_desc *desc;
1644
+ struct sdma_desc *desc = NULL;
16291645 u32 residue;
16301646 struct virt_dma_desc *vd;
16311647 enum dma_status ret;
....@@ -1636,19 +1652,23 @@
16361652 return ret;
16371653
16381654 spin_lock_irqsave(&sdmac->vc.lock, flags);
1655
+
16391656 vd = vchan_find_desc(&sdmac->vc, cookie);
1640
- if (vd) {
1657
+ if (vd)
16411658 desc = to_sdma_desc(&vd->tx);
1659
+ else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie)
1660
+ desc = sdmac->desc;
1661
+
1662
+ if (desc) {
16421663 if (sdmac->flags & IMX_DMA_SG_LOOP)
16431664 residue = (desc->num_bd - desc->buf_ptail) *
16441665 desc->period_len - desc->chn_real_count;
16451666 else
16461667 residue = desc->chn_count - desc->chn_real_count;
1647
- } else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) {
1648
- residue = sdmac->desc->chn_count - sdmac->desc->chn_real_count;
16491668 } else {
16501669 residue = 0;
16511670 }
1671
+
16521672 spin_unlock_irqrestore(&sdmac->vc.lock, flags);
16531673
16541674 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
....@@ -1771,7 +1791,7 @@
17711791 u32 reg, val, shift, num_map, i;
17721792 int ret = 0;
17731793
1774
- if (IS_ERR(np) || IS_ERR(gpr_np))
1794
+ if (IS_ERR(np) || !gpr_np)
17751795 goto out;
17761796
17771797 event_remap = of_find_property(np, propname, NULL);
....@@ -1819,7 +1839,7 @@
18191839 }
18201840
18211841 out:
1822
- if (!IS_ERR(gpr_np))
1842
+ if (gpr_np)
18231843 of_node_put(gpr_np);
18241844
18251845 return ret;
....@@ -1849,10 +1869,14 @@
18491869 if (ret)
18501870 goto disable_clk_ipg;
18511871
1872
+ if (sdma->drvdata->check_ratio &&
1873
+ (clk_get_rate(sdma->clk_ahb) == clk_get_rate(sdma->clk_ipg)))
1874
+ sdma->clk_ratio = 1;
1875
+
18521876 /* Be sure SDMA has not started yet */
18531877 writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
18541878
1855
- sdma->channel_control = dma_alloc_coherent(NULL,
1879
+ sdma->channel_control = dma_alloc_coherent(sdma->dev,
18561880 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
18571881 sizeof(struct sdma_context_data),
18581882 &ccb_phys, GFP_KERNEL);
....@@ -1866,10 +1890,6 @@
18661890 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
18671891 sdma->context_phys = ccb_phys +
18681892 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1869
-
1870
- /* Zero-out the CCB structures array just allocated */
1871
- memset(sdma->channel_control, 0,
1872
- MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control));
18731893
18741894 /* disable all channels */
18751895 for (i = 0; i < sdma->drvdata->num_events; i++)
....@@ -1889,8 +1909,10 @@
18891909 writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR);
18901910
18911911 /* Set bits of CONFIG register but with static context switching */
1892
- /* FIXME: Check whether to set ACR bit depending on clock ratios */
1893
- writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
1912
+ if (sdma->clk_ratio)
1913
+ writel_relaxed(SDMA_H_CONFIG_ACR, sdma->regs + SDMA_H_CONFIG);
1914
+ else
1915
+ writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
18941916
18951917 writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
18961918
....@@ -1946,7 +1968,8 @@
19461968 */
19471969 data.dma_request2 = 0;
19481970
1949
- return dma_request_channel(mask, sdma_filter_fn, &data);
1971
+ return __dma_request_channel(&mask, sdma_filter_fn, &data,
1972
+ ofdma->of_node);
19501973 }
19511974
19521975 static int sdma_probe(struct platform_device *pdev)
....@@ -2029,7 +2052,7 @@
20292052
20302053 /* initially no scripts available */
20312054 saddr_arr = (s32 *)sdma->script_addrs;
2032
- for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
2055
+ for (i = 0; i < sizeof(*sdma->script_addrs) / sizeof(s32); i++)
20332056 saddr_arr[i] = -EINVAL;
20342057
20352058 dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
....@@ -2077,7 +2100,7 @@
20772100 sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
20782101 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
20792102 sdma->dma_device.device_config = sdma_config;
2080
- sdma->dma_device.device_terminate_all = sdma_disable_channel_async;
2103
+ sdma->dma_device.device_terminate_all = sdma_terminate_all;
20812104 sdma->dma_device.device_synchronize = sdma_channel_synchronize;
20822105 sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
20832106 sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
....@@ -2085,7 +2108,7 @@
20852108 sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
20862109 sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy;
20872110 sdma->dma_device.device_issue_pending = sdma_issue_pending;
2088
- sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
2111
+ sdma->dma_device.copy_align = 2;
20892112 dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT);
20902113
20912114 platform_set_drvdata(pdev, sdma);
....@@ -2191,7 +2214,7 @@
21912214 #if IS_ENABLED(CONFIG_SOC_IMX6Q)
21922215 MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin");
21932216 #endif
2194
-#if IS_ENABLED(CONFIG_SOC_IMX7D)
2217
+#if IS_ENABLED(CONFIG_SOC_IMX7D) || IS_ENABLED(CONFIG_SOC_IMX8M)
21952218 MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin");
21962219 #endif
21972220 MODULE_LICENSE("GPL");