forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-11 072de836f53be56a70cecf70b43ae43b7ce17376
kernel/drivers/dma/at_xdmac.c
....@@ -1,21 +1,10 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Driver for the Atmel Extensible DMA Controller (aka XDMAC on AT91 systems)
34 *
45 * Copyright (C) 2014 Atmel Corporation
56 *
67 * Author: Ludovic Desroches <ludovic.desroches@atmel.com>
7
- *
8
- * This program is free software; you can redistribute it and/or modify it
9
- * under the terms of the GNU General Public License version 2 as published by
10
- * the Free Software Foundation.
11
- *
12
- * This program is distributed in the hope that it will be useful, but WITHOUT
13
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15
- * more details.
16
- *
17
- * You should have received a copy of the GNU General Public License along with
18
- * this program. If not, see <http://www.gnu.org/licenses/>.
198 */
209
2110 #include <asm/barrier.h>
....@@ -224,7 +213,7 @@
224213 struct clk *clk;
225214 u32 save_gim;
226215 struct dma_pool *at_xdmac_desc_pool;
227
- struct at_xdmac_chan chan[0];
216
+ struct at_xdmac_chan chan[];
228217 };
229218
230219
....@@ -309,6 +298,11 @@
309298 return csize;
310299 };
311300
301
+static inline bool at_xdmac_chan_is_peripheral_xfer(u32 cfg)
302
+{
303
+ return cfg & AT_XDMAC_CC_TYPE_PER_TRAN;
304
+}
305
+
312306 static inline u8 at_xdmac_get_dwidth(u32 cfg)
313307 {
314308 return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET;
....@@ -388,7 +382,13 @@
388382 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
389383
390384 at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff);
391
- reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE | AT_XDMAC_CIE_ROIE;
385
+ reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE;
386
+ /*
387
+ * Request Overflow Error is only for peripheral synchronized transfers
388
+ */
389
+ if (at_xdmac_chan_is_peripheral_xfer(first->lld.mbr_cfg))
390
+ reg |= AT_XDMAC_CIE_ROIE;
391
+
392392 /*
393393 * There is no end of list when doing cyclic dma, we need to get
394394 * an interrupt after each periods.
....@@ -1390,7 +1390,7 @@
13901390 {
13911391 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
13921392 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1393
- struct at_xdmac_desc *desc, *_desc;
1393
+ struct at_xdmac_desc *desc, *_desc, *iter;
13941394 struct list_head *descs_list;
13951395 enum dma_status ret;
13961396 int residue, retry;
....@@ -1505,11 +1505,13 @@
15051505 * microblock.
15061506 */
15071507 descs_list = &desc->descs_list;
1508
- list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
1509
- dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
1510
- residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
1511
- if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
1508
+ list_for_each_entry_safe(iter, _desc, descs_list, desc_node) {
1509
+ dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg);
1510
+ residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth;
1511
+ if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) {
1512
+ desc = iter;
15121513 break;
1514
+ }
15131515 }
15141516 residue += cur_ubc << dwidth;
15151517
....@@ -1541,9 +1543,6 @@
15411543 static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
15421544 {
15431545 struct at_xdmac_desc *desc;
1544
- unsigned long flags;
1545
-
1546
- spin_lock_irqsave(&atchan->lock, flags);
15471546
15481547 /*
15491548 * If channel is enabled, do nothing, advance_work will be triggered
....@@ -1557,8 +1556,6 @@
15571556 if (!desc->active_xfer)
15581557 at_xdmac_start_xfer(atchan, desc);
15591558 }
1560
-
1561
- spin_unlock_irqrestore(&atchan->lock, flags);
15621559 }
15631560
15641561 static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
....@@ -1566,16 +1563,62 @@
15661563 struct at_xdmac_desc *desc;
15671564 struct dma_async_tx_descriptor *txd;
15681565
1569
- desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
1566
+ spin_lock_irq(&atchan->lock);
1567
+ if (list_empty(&atchan->xfers_list)) {
1568
+ spin_unlock_irq(&atchan->lock);
1569
+ return;
1570
+ }
1571
+ desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
1572
+ xfer_node);
1573
+ spin_unlock_irq(&atchan->lock);
15701574 txd = &desc->tx_dma_desc;
1571
-
15721575 if (txd->flags & DMA_PREP_INTERRUPT)
15731576 dmaengine_desc_get_callback_invoke(txd, NULL);
15741577 }
15751578
1576
-static void at_xdmac_tasklet(unsigned long data)
1579
+static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
15771580 {
1578
- struct at_xdmac_chan *atchan = (struct at_xdmac_chan *)data;
1581
+ struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1582
+ struct at_xdmac_desc *bad_desc;
1583
+
1584
+ /*
1585
+ * The descriptor currently at the head of the active list is
1586
+ * broken. Since we don't have any way to report errors, we'll
1587
+ * just have to scream loudly and try to continue with other
1588
+ * descriptors queued (if any).
1589
+ */
1590
+ if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
1591
+ dev_err(chan2dev(&atchan->chan), "read bus error!!!");
1592
+ if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
1593
+ dev_err(chan2dev(&atchan->chan), "write bus error!!!");
1594
+ if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
1595
+ dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
1596
+
1597
+ spin_lock_irq(&atchan->lock);
1598
+
1599
+ /* Channel must be disabled first as it's not done automatically */
1600
+ at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1601
+ while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
1602
+ cpu_relax();
1603
+
1604
+ bad_desc = list_first_entry(&atchan->xfers_list,
1605
+ struct at_xdmac_desc,
1606
+ xfer_node);
1607
+
1608
+ spin_unlock_irq(&atchan->lock);
1609
+
1610
+ /* Print bad descriptor's details if needed */
1611
+ dev_dbg(chan2dev(&atchan->chan),
1612
+ "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
1613
+ __func__, &bad_desc->lld.mbr_sa, &bad_desc->lld.mbr_da,
1614
+ bad_desc->lld.mbr_ubc);
1615
+
1616
+ /* Then continue with usual descriptor management */
1617
+}
1618
+
1619
+static void at_xdmac_tasklet(struct tasklet_struct *t)
1620
+{
1621
+ struct at_xdmac_chan *atchan = from_tasklet(atchan, t, tasklet);
15791622 struct at_xdmac_desc *desc;
15801623 u32 error_mask;
15811624
....@@ -1592,38 +1635,34 @@
15921635 || (atchan->irq_status & error_mask)) {
15931636 struct dma_async_tx_descriptor *txd;
15941637
1595
- if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
1596
- dev_err(chan2dev(&atchan->chan), "read bus error!!!");
1597
- if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
1598
- dev_err(chan2dev(&atchan->chan), "write bus error!!!");
1599
- if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
1600
- dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
1638
+ if (atchan->irq_status & error_mask)
1639
+ at_xdmac_handle_error(atchan);
16011640
1602
- spin_lock_bh(&atchan->lock);
1641
+ spin_lock_irq(&atchan->lock);
16031642 desc = list_first_entry(&atchan->xfers_list,
16041643 struct at_xdmac_desc,
16051644 xfer_node);
16061645 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
16071646 if (!desc->active_xfer) {
16081647 dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
1609
- spin_unlock(&atchan->lock);
1648
+ spin_unlock_irq(&atchan->lock);
16101649 return;
16111650 }
16121651
16131652 txd = &desc->tx_dma_desc;
16141653
16151654 at_xdmac_remove_xfer(atchan, desc);
1616
- spin_unlock_bh(&atchan->lock);
1655
+ spin_unlock_irq(&atchan->lock);
16171656
1618
- if (!at_xdmac_chan_is_cyclic(atchan)) {
1619
- dma_cookie_complete(txd);
1620
- if (txd->flags & DMA_PREP_INTERRUPT)
1621
- dmaengine_desc_get_callback_invoke(txd, NULL);
1622
- }
1657
+ dma_cookie_complete(txd);
1658
+ if (txd->flags & DMA_PREP_INTERRUPT)
1659
+ dmaengine_desc_get_callback_invoke(txd, NULL);
16231660
16241661 dma_run_dependencies(txd);
16251662
1663
+ spin_lock_irq(&atchan->lock);
16261664 at_xdmac_advance_work(atchan);
1665
+ spin_unlock_irq(&atchan->lock);
16271666 }
16281667 }
16291668
....@@ -1684,11 +1723,13 @@
16841723 static void at_xdmac_issue_pending(struct dma_chan *chan)
16851724 {
16861725 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1726
+ unsigned long flags;
16871727
16881728 dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);
16891729
1690
- if (!at_xdmac_chan_is_cyclic(atchan))
1691
- at_xdmac_advance_work(atchan);
1730
+ spin_lock_irqsave(&atchan->lock, flags);
1731
+ at_xdmac_advance_work(atchan);
1732
+ spin_unlock_irqrestore(&atchan->lock, flags);
16921733
16931734 return;
16941735 }
....@@ -1781,27 +1822,27 @@
17811822 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
17821823 struct at_xdmac_desc *desc;
17831824 int i;
1784
- unsigned long flags;
1785
-
1786
- spin_lock_irqsave(&atchan->lock, flags);
17871825
17881826 if (at_xdmac_chan_is_enabled(atchan)) {
17891827 dev_err(chan2dev(chan),
17901828 "can't allocate channel resources (channel enabled)\n");
1791
- i = -EIO;
1792
- goto spin_unlock;
1829
+ return -EIO;
17931830 }
17941831
17951832 if (!list_empty(&atchan->free_descs_list)) {
17961833 dev_err(chan2dev(chan),
17971834 "can't allocate channel resources (channel not free from a previous use)\n");
1798
- i = -EIO;
1799
- goto spin_unlock;
1835
+ return -EIO;
18001836 }
18011837
18021838 for (i = 0; i < init_nr_desc_per_channel; i++) {
1803
- desc = at_xdmac_alloc_desc(chan, GFP_ATOMIC);
1839
+ desc = at_xdmac_alloc_desc(chan, GFP_KERNEL);
18041840 if (!desc) {
1841
+ if (i == 0) {
1842
+ dev_warn(chan2dev(chan),
1843
+ "can't allocate any descriptors\n");
1844
+ return -EIO;
1845
+ }
18051846 dev_warn(chan2dev(chan),
18061847 "only %d descriptors have been allocated\n", i);
18071848 break;
....@@ -1813,8 +1854,6 @@
18131854
18141855 dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
18151856
1816
-spin_unlock:
1817
- spin_unlock_irqrestore(&atchan->lock, flags);
18181857 return i;
18191858 }
18201859
....@@ -1916,21 +1955,16 @@
19161955
19171956 static int at_xdmac_probe(struct platform_device *pdev)
19181957 {
1919
- struct resource *res;
19201958 struct at_xdmac *atxdmac;
19211959 int irq, size, nr_channels, i, ret;
19221960 void __iomem *base;
19231961 u32 reg;
19241962
1925
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1926
- if (!res)
1927
- return -EINVAL;
1928
-
19291963 irq = platform_get_irq(pdev, 0);
19301964 if (irq < 0)
19311965 return irq;
19321966
1933
- base = devm_ioremap_resource(&pdev->dev, res);
1967
+ base = devm_platform_ioremap_resource(pdev, 0);
19341968 if (IS_ERR(base))
19351969 return PTR_ERR(base);
19361970
....@@ -2035,8 +2069,7 @@
20352069 spin_lock_init(&atchan->lock);
20362070 INIT_LIST_HEAD(&atchan->xfers_list);
20372071 INIT_LIST_HEAD(&atchan->free_descs_list);
2038
- tasklet_init(&atchan->tasklet, at_xdmac_tasklet,
2039
- (unsigned long)atchan);
2072
+ tasklet_setup(&atchan->tasklet, at_xdmac_tasklet);
20402073
20412074 /* Clear pending interrupts. */
20422075 while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))