forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 61598093bbdd283a7edc367d900f223070ead8d2
kernel/drivers/dma/ti/omap-dma.c
....@@ -1,10 +1,8 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * OMAP DMAengine support
3
- *
4
- * This program is free software; you can redistribute it and/or modify
5
- * it under the terms of the GNU General Public License version 2 as
6
- * published by the Free Software Foundation.
74 */
5
+#include <linux/cpu_pm.h>
86 #include <linux/delay.h>
97 #include <linux/dmaengine.h>
108 #include <linux/dma-mapping.h>
....@@ -26,12 +24,33 @@
2624 #define OMAP_SDMA_REQUESTS 127
2725 #define OMAP_SDMA_CHANNELS 32
2826
27
+struct omap_dma_config {
28
+ int lch_end;
29
+ unsigned int rw_priority:1;
30
+ unsigned int needs_busy_check:1;
31
+ unsigned int may_lose_context:1;
32
+ unsigned int needs_lch_clear:1;
33
+};
34
+
35
+struct omap_dma_context {
36
+ u32 irqenable_l0;
37
+ u32 irqenable_l1;
38
+ u32 ocp_sysconfig;
39
+ u32 gcr;
40
+};
41
+
2942 struct omap_dmadev {
3043 struct dma_device ddev;
3144 spinlock_t lock;
3245 void __iomem *base;
3346 const struct omap_dma_reg *reg_map;
3447 struct omap_system_dma_plat_info *plat;
48
+ const struct omap_dma_config *cfg;
49
+ struct notifier_block nb;
50
+ struct omap_dma_context context;
51
+ int lch_count;
52
+ DECLARE_BITMAP(lch_bitmap, OMAP_SDMA_CHANNELS);
53
+ struct mutex lch_lock; /* for assigning logical channels */
3554 bool legacy;
3655 bool ll123_supported;
3756 struct dma_pool *desc_pool;
....@@ -94,6 +113,7 @@
94113 bool using_ll;
95114 enum dma_transfer_direction dir;
96115 dma_addr_t dev_addr;
116
+ bool polled;
97117
98118 int32_t fi; /* for OMAP_DMA_SYNC_PACKET / double indexing */
99119 int16_t ei; /* for double indexing */
....@@ -104,7 +124,7 @@
104124 uint32_t csdp; /* CSDP value */
105125
106126 unsigned sglen;
107
- struct omap_sg sg[0];
127
+ struct omap_sg sg[];
108128 };
109129
110130 enum {
....@@ -205,6 +225,7 @@
205225 [CSDP_DATA_TYPE_32] = 4,
206226 };
207227
228
+static bool omap_dma_filter_fn(struct dma_chan *chan, void *param);
208229 static struct of_dma_filter_info omap_dma_info = {
209230 .filter_fn = omap_dma_filter_fn,
210231 };
....@@ -375,6 +396,19 @@
375396 omap_dma_chan_write(c, CSR, val);
376397
377398 return val;
399
+}
400
+
401
+static void omap_dma_clear_lch(struct omap_dmadev *od, int lch)
402
+{
403
+ struct omap_chan *c;
404
+ int i;
405
+
406
+ c = od->lch_map[lch];
407
+ if (!c)
408
+ return;
409
+
410
+ for (i = CSDP; i <= od->cfg->lch_end; i++)
411
+ omap_dma_chan_write(c, i, 0);
378412 }
379413
380414 static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c,
....@@ -634,6 +668,37 @@
634668 return IRQ_HANDLED;
635669 }
636670
671
+static int omap_dma_get_lch(struct omap_dmadev *od, int *lch)
672
+{
673
+ int channel;
674
+
675
+ mutex_lock(&od->lch_lock);
676
+ channel = find_first_zero_bit(od->lch_bitmap, od->lch_count);
677
+ if (channel >= od->lch_count)
678
+ goto out_busy;
679
+ set_bit(channel, od->lch_bitmap);
680
+ mutex_unlock(&od->lch_lock);
681
+
682
+ omap_dma_clear_lch(od, channel);
683
+ *lch = channel;
684
+
685
+ return 0;
686
+
687
+out_busy:
688
+ mutex_unlock(&od->lch_lock);
689
+ *lch = -EINVAL;
690
+
691
+ return -EBUSY;
692
+}
693
+
694
+static void omap_dma_put_lch(struct omap_dmadev *od, int lch)
695
+{
696
+ omap_dma_clear_lch(od, lch);
697
+ mutex_lock(&od->lch_lock);
698
+ clear_bit(lch, od->lch_bitmap);
699
+ mutex_unlock(&od->lch_lock);
700
+}
701
+
637702 static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
638703 {
639704 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
....@@ -645,8 +710,7 @@
645710 ret = omap_request_dma(c->dma_sig, "DMA engine",
646711 omap_dma_callback, c, &c->dma_ch);
647712 } else {
648
- ret = omap_request_dma(c->dma_sig, "DMA engine", NULL, NULL,
649
- &c->dma_ch);
713
+ ret = omap_dma_get_lch(od, &c->dma_ch);
650714 }
651715
652716 dev_dbg(dev, "allocating channel %u for %u\n", c->dma_ch, c->dma_sig);
....@@ -703,7 +767,11 @@
703767 c->channel_base = NULL;
704768 od->lch_map[c->dma_ch] = NULL;
705769 vchan_free_chan_resources(&c->vc);
706
- omap_free_dma(c->dma_ch);
770
+
771
+ if (od->legacy)
772
+ omap_free_dma(c->dma_ch);
773
+ else
774
+ omap_dma_put_lch(od, c->dma_ch);
707775
708776 dev_dbg(od->ddev.dev, "freeing channel %u used for %u\n", c->dma_ch,
709777 c->dma_sig);
....@@ -815,31 +883,22 @@
815883 dma_cookie_t cookie, struct dma_tx_state *txstate)
816884 {
817885 struct omap_chan *c = to_omap_dma_chan(chan);
818
- struct virt_dma_desc *vd;
819886 enum dma_status ret;
820887 unsigned long flags;
888
+ struct omap_desc *d = NULL;
821889
822890 ret = dma_cookie_status(chan, cookie, txstate);
823
-
824
- if (!c->paused && c->running) {
825
- uint32_t ccr = omap_dma_chan_read(c, CCR);
826
- /*
827
- * The channel is no longer active, set the return value
828
- * accordingly
829
- */
830
- if (!(ccr & CCR_ENABLE))
831
- ret = DMA_COMPLETE;
832
- }
833
-
834
- if (ret == DMA_COMPLETE || !txstate)
891
+ if (ret == DMA_COMPLETE)
835892 return ret;
836893
837894 spin_lock_irqsave(&c->vc.lock, flags);
838
- vd = vchan_find_desc(&c->vc, cookie);
839
- if (vd) {
840
- txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
841
- } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
842
- struct omap_desc *d = c->desc;
895
+ if (c->desc && c->desc->vd.tx.cookie == cookie)
896
+ d = c->desc;
897
+
898
+ if (!txstate)
899
+ goto out;
900
+
901
+ if (d) {
843902 dma_addr_t pos;
844903
845904 if (d->dir == DMA_MEM_TO_DEV)
....@@ -851,10 +910,31 @@
851910
852911 txstate->residue = omap_dma_desc_size_pos(d, pos);
853912 } else {
854
- txstate->residue = 0;
913
+ struct virt_dma_desc *vd = vchan_find_desc(&c->vc, cookie);
914
+
915
+ if (vd)
916
+ txstate->residue = omap_dma_desc_size(
917
+ to_omap_dma_desc(&vd->tx));
918
+ else
919
+ txstate->residue = 0;
855920 }
856
- if (ret == DMA_IN_PROGRESS && c->paused)
921
+
922
+out:
923
+ if (ret == DMA_IN_PROGRESS && c->paused) {
857924 ret = DMA_PAUSED;
925
+ } else if (d && d->polled && c->running) {
926
+ uint32_t ccr = omap_dma_chan_read(c, CCR);
927
+ /*
928
+ * The channel is no longer active, set the return value
929
+ * accordingly and mark it as completed
930
+ */
931
+ if (!(ccr & CCR_ENABLE)) {
932
+ ret = DMA_COMPLETE;
933
+ omap_dma_start_desc(c);
934
+ vchan_cookie_complete(&d->vd);
935
+ }
936
+ }
937
+
858938 spin_unlock_irqrestore(&c->vc.lock, flags);
859939
860940 return ret;
....@@ -1181,7 +1261,10 @@
11811261 d->ccr = c->ccr;
11821262 d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC;
11831263
1184
- d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
1264
+ if (tx_flags & DMA_PREP_INTERRUPT)
1265
+ d->cicr |= CICR_FRAME_IE;
1266
+ else
1267
+ d->polled = true;
11851268
11861269 d->csdp = data_type;
11871270
....@@ -1439,16 +1522,139 @@
14391522 }
14401523 }
14411524
1525
+/* Currently used by omap2 & 3 to block deeper SoC idle states */
1526
+static bool omap_dma_busy(struct omap_dmadev *od)
1527
+{
1528
+ struct omap_chan *c;
1529
+ int lch = -1;
1530
+
1531
+ while (1) {
1532
+ lch = find_next_bit(od->lch_bitmap, od->lch_count, lch + 1);
1533
+ if (lch >= od->lch_count)
1534
+ break;
1535
+ c = od->lch_map[lch];
1536
+ if (!c)
1537
+ continue;
1538
+ if (omap_dma_chan_read(c, CCR) & CCR_ENABLE)
1539
+ return true;
1540
+ }
1541
+
1542
+ return false;
1543
+}
1544
+
1545
+/* Currently only used for omap2. For omap1, also a check for lcd_dma is needed */
1546
+static int omap_dma_busy_notifier(struct notifier_block *nb,
1547
+ unsigned long cmd, void *v)
1548
+{
1549
+ struct omap_dmadev *od;
1550
+
1551
+ od = container_of(nb, struct omap_dmadev, nb);
1552
+
1553
+ switch (cmd) {
1554
+ case CPU_CLUSTER_PM_ENTER:
1555
+ if (omap_dma_busy(od))
1556
+ return NOTIFY_BAD;
1557
+ break;
1558
+ case CPU_CLUSTER_PM_ENTER_FAILED:
1559
+ case CPU_CLUSTER_PM_EXIT:
1560
+ break;
1561
+ }
1562
+
1563
+ return NOTIFY_OK;
1564
+}
1565
+
1566
+/*
1567
+ * We are using IRQENABLE_L1, and legacy DMA code was using IRQENABLE_L0.
1568
+ * As the DSP may be using IRQENABLE_L2 and L3, let's not touch those for
1569
+ * now. Context save seems to be only currently needed on omap3.
1570
+ */
1571
+static void omap_dma_context_save(struct omap_dmadev *od)
1572
+{
1573
+ od->context.irqenable_l0 = omap_dma_glbl_read(od, IRQENABLE_L0);
1574
+ od->context.irqenable_l1 = omap_dma_glbl_read(od, IRQENABLE_L1);
1575
+ od->context.ocp_sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG);
1576
+ od->context.gcr = omap_dma_glbl_read(od, GCR);
1577
+}
1578
+
1579
+static void omap_dma_context_restore(struct omap_dmadev *od)
1580
+{
1581
+ int i;
1582
+
1583
+ omap_dma_glbl_write(od, GCR, od->context.gcr);
1584
+ omap_dma_glbl_write(od, OCP_SYSCONFIG, od->context.ocp_sysconfig);
1585
+ omap_dma_glbl_write(od, IRQENABLE_L0, od->context.irqenable_l0);
1586
+ omap_dma_glbl_write(od, IRQENABLE_L1, od->context.irqenable_l1);
1587
+
1588
+ /* Clear IRQSTATUS_L0 as legacy DMA code is no longer doing it */
1589
+ if (od->plat->errata & DMA_ROMCODE_BUG)
1590
+ omap_dma_glbl_write(od, IRQSTATUS_L0, 0);
1591
+
1592
+ /* Clear dma channels */
1593
+ for (i = 0; i < od->lch_count; i++)
1594
+ omap_dma_clear_lch(od, i);
1595
+}
1596
+
1597
+/* Currently only used for omap3 */
1598
+static int omap_dma_context_notifier(struct notifier_block *nb,
1599
+ unsigned long cmd, void *v)
1600
+{
1601
+ struct omap_dmadev *od;
1602
+
1603
+ od = container_of(nb, struct omap_dmadev, nb);
1604
+
1605
+ switch (cmd) {
1606
+ case CPU_CLUSTER_PM_ENTER:
1607
+ if (omap_dma_busy(od))
1608
+ return NOTIFY_BAD;
1609
+ omap_dma_context_save(od);
1610
+ break;
1611
+ case CPU_CLUSTER_PM_ENTER_FAILED:
1612
+ case CPU_CLUSTER_PM_EXIT:
1613
+ omap_dma_context_restore(od);
1614
+ break;
1615
+ }
1616
+
1617
+ return NOTIFY_OK;
1618
+}
1619
+
1620
+static void omap_dma_init_gcr(struct omap_dmadev *od, int arb_rate,
1621
+ int max_fifo_depth, int tparams)
1622
+{
1623
+ u32 val;
1624
+
1625
+ /* Set only for omap2430 and later */
1626
+ if (!od->cfg->rw_priority)
1627
+ return;
1628
+
1629
+ if (max_fifo_depth == 0)
1630
+ max_fifo_depth = 1;
1631
+ if (arb_rate == 0)
1632
+ arb_rate = 1;
1633
+
1634
+ val = 0xff & max_fifo_depth;
1635
+ val |= (0x3 & tparams) << 12;
1636
+ val |= (arb_rate & 0xff) << 16;
1637
+
1638
+ omap_dma_glbl_write(od, GCR, val);
1639
+}
1640
+
14421641 #define OMAP_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
14431642 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
14441643 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
14451644
1645
+/*
1646
+ * No flags currently set for default configuration as omap1 is still
1647
+ * using platform data.
1648
+ */
1649
+static const struct omap_dma_config default_cfg;
1650
+
14461651 static int omap_dma_probe(struct platform_device *pdev)
14471652 {
1653
+ const struct omap_dma_config *conf;
14481654 struct omap_dmadev *od;
14491655 struct resource *res;
14501656 int rc, i, irq;
1451
- u32 lch_count;
1657
+ u32 val;
14521658
14531659 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
14541660 if (!od)
....@@ -1459,9 +1665,21 @@
14591665 if (IS_ERR(od->base))
14601666 return PTR_ERR(od->base);
14611667
1462
- od->plat = omap_get_plat_info();
1463
- if (!od->plat)
1464
- return -EPROBE_DEFER;
1668
+ conf = of_device_get_match_data(&pdev->dev);
1669
+ if (conf) {
1670
+ od->cfg = conf;
1671
+ od->plat = dev_get_platdata(&pdev->dev);
1672
+ if (!od->plat) {
1673
+ dev_err(&pdev->dev, "omap_system_dma_plat_info is missing");
1674
+ return -ENODEV;
1675
+ }
1676
+ } else {
1677
+ od->cfg = &default_cfg;
1678
+
1679
+ od->plat = omap_get_plat_info();
1680
+ if (!od->plat)
1681
+ return -EPROBE_DEFER;
1682
+ }
14651683
14661684 od->reg_map = od->plat->reg_map;
14671685
....@@ -1493,6 +1711,7 @@
14931711 od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */
14941712 od->ddev.dev = &pdev->dev;
14951713 INIT_LIST_HEAD(&od->ddev.channels);
1714
+ mutex_init(&od->lch_lock);
14961715 spin_lock_init(&od->lock);
14971716 spin_lock_init(&od->irq_lock);
14981717
....@@ -1508,18 +1727,30 @@
15081727
15091728 /* Number of available logical channels */
15101729 if (!pdev->dev.of_node) {
1511
- lch_count = od->plat->dma_attr->lch_count;
1512
- if (unlikely(!lch_count))
1513
- lch_count = OMAP_SDMA_CHANNELS;
1730
+ od->lch_count = od->plat->dma_attr->lch_count;
1731
+ if (unlikely(!od->lch_count))
1732
+ od->lch_count = OMAP_SDMA_CHANNELS;
15141733 } else if (of_property_read_u32(pdev->dev.of_node, "dma-channels",
1515
- &lch_count)) {
1734
+ &od->lch_count)) {
15161735 dev_info(&pdev->dev,
15171736 "Missing dma-channels property, using %u.\n",
15181737 OMAP_SDMA_CHANNELS);
1519
- lch_count = OMAP_SDMA_CHANNELS;
1738
+ od->lch_count = OMAP_SDMA_CHANNELS;
15201739 }
15211740
1522
- od->lch_map = devm_kcalloc(&pdev->dev, lch_count, sizeof(*od->lch_map),
1741
+ /* Mask of allowed logical channels */
1742
+ if (pdev->dev.of_node && !of_property_read_u32(pdev->dev.of_node,
1743
+ "dma-channel-mask",
1744
+ &val)) {
1745
+ /* Tag channels not in mask as reserved */
1746
+ val = ~val;
1747
+ bitmap_from_arr32(od->lch_bitmap, &val, od->lch_count);
1748
+ }
1749
+ if (od->plat->dma_attr->dev_caps & HS_CHANNELS_RESERVED)
1750
+ bitmap_set(od->lch_bitmap, 0, 2);
1751
+
1752
+ od->lch_map = devm_kcalloc(&pdev->dev, od->lch_count,
1753
+ sizeof(*od->lch_map),
15231754 GFP_KERNEL);
15241755 if (!od->lch_map)
15251756 return -ENOMEM;
....@@ -1591,6 +1822,16 @@
15911822 }
15921823 }
15931824
1825
+ omap_dma_init_gcr(od, DMA_DEFAULT_ARB_RATE, DMA_DEFAULT_FIFO_DEPTH, 0);
1826
+
1827
+ if (od->cfg->needs_busy_check) {
1828
+ od->nb.notifier_call = omap_dma_busy_notifier;
1829
+ cpu_pm_register_notifier(&od->nb);
1830
+ } else if (od->cfg->may_lose_context) {
1831
+ od->nb.notifier_call = omap_dma_context_notifier;
1832
+ cpu_pm_register_notifier(&od->nb);
1833
+ }
1834
+
15941835 dev_info(&pdev->dev, "OMAP DMA engine driver%s\n",
15951836 od->ll123_supported ? " (LinkedList1/2/3 supported)" : "");
15961837
....@@ -1601,6 +1842,9 @@
16011842 {
16021843 struct omap_dmadev *od = platform_get_drvdata(pdev);
16031844 int irq;
1845
+
1846
+ if (od->cfg->may_lose_context)
1847
+ cpu_pm_unregister_notifier(&od->nb);
16041848
16051849 if (pdev->dev.of_node)
16061850 of_dma_controller_free(pdev->dev.of_node);
....@@ -1623,12 +1867,45 @@
16231867 return 0;
16241868 }
16251869
1870
+static const struct omap_dma_config omap2420_data = {
1871
+ .lch_end = CCFN,
1872
+ .rw_priority = true,
1873
+ .needs_lch_clear = true,
1874
+ .needs_busy_check = true,
1875
+};
1876
+
1877
+static const struct omap_dma_config omap2430_data = {
1878
+ .lch_end = CCFN,
1879
+ .rw_priority = true,
1880
+ .needs_lch_clear = true,
1881
+};
1882
+
1883
+static const struct omap_dma_config omap3430_data = {
1884
+ .lch_end = CCFN,
1885
+ .rw_priority = true,
1886
+ .needs_lch_clear = true,
1887
+ .may_lose_context = true,
1888
+};
1889
+
1890
+static const struct omap_dma_config omap3630_data = {
1891
+ .lch_end = CCDN,
1892
+ .rw_priority = true,
1893
+ .needs_lch_clear = true,
1894
+ .may_lose_context = true,
1895
+};
1896
+
1897
+static const struct omap_dma_config omap4_data = {
1898
+ .lch_end = CCDN,
1899
+ .rw_priority = true,
1900
+ .needs_lch_clear = true,
1901
+};
1902
+
16261903 static const struct of_device_id omap_dma_match[] = {
1627
- { .compatible = "ti,omap2420-sdma", },
1628
- { .compatible = "ti,omap2430-sdma", },
1629
- { .compatible = "ti,omap3430-sdma", },
1630
- { .compatible = "ti,omap3630-sdma", },
1631
- { .compatible = "ti,omap4430-sdma", },
1904
+ { .compatible = "ti,omap2420-sdma", .data = &omap2420_data, },
1905
+ { .compatible = "ti,omap2430-sdma", .data = &omap2430_data, },
1906
+ { .compatible = "ti,omap3430-sdma", .data = &omap3430_data, },
1907
+ { .compatible = "ti,omap3630-sdma", .data = &omap3630_data, },
1908
+ { .compatible = "ti,omap4430-sdma", .data = &omap4_data, },
16321909 {},
16331910 };
16341911 MODULE_DEVICE_TABLE(of, omap_dma_match);
....@@ -1638,11 +1915,11 @@
16381915 .remove = omap_dma_remove,
16391916 .driver = {
16401917 .name = "omap-dma-engine",
1641
- .of_match_table = of_match_ptr(omap_dma_match),
1918
+ .of_match_table = omap_dma_match,
16421919 },
16431920 };
16441921
1645
-bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
1922
+static bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
16461923 {
16471924 if (chan->device->dev->driver == &omap_dma_driver.driver) {
16481925 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
....@@ -1656,7 +1933,6 @@
16561933 }
16571934 return false;
16581935 }
1659
-EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
16601936
16611937 static int omap_dma_init(void)
16621938 {