hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/dma/at_xdmac.c
....@@ -1,21 +1,10 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Driver for the Atmel Extensible DMA Controller (aka XDMAC on AT91 systems)
34 *
45 * Copyright (C) 2014 Atmel Corporation
56 *
67 * Author: Ludovic Desroches <ludovic.desroches@atmel.com>
7
- *
8
- * This program is free software; you can redistribute it and/or modify it
9
- * under the terms of the GNU General Public License version 2 as published by
10
- * the Free Software Foundation.
11
- *
12
- * This program is distributed in the hope that it will be useful, but WITHOUT
13
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15
- * more details.
16
- *
17
- * You should have received a copy of the GNU General Public License along with
18
- * this program. If not, see <http://www.gnu.org/licenses/>.
198 */
209
2110 #include <asm/barrier.h>
....@@ -223,8 +212,9 @@
223212 int irq;
224213 struct clk *clk;
225214 u32 save_gim;
215
+ u32 save_gs;
226216 struct dma_pool *at_xdmac_desc_pool;
227
- struct at_xdmac_chan chan[0];
217
+ struct at_xdmac_chan chan[];
228218 };
229219
230220
....@@ -309,6 +299,11 @@
309299 return csize;
310300 };
311301
302
+static inline bool at_xdmac_chan_is_peripheral_xfer(u32 cfg)
303
+{
304
+ return cfg & AT_XDMAC_CC_TYPE_PER_TRAN;
305
+}
306
+
312307 static inline u8 at_xdmac_get_dwidth(u32 cfg)
313308 {
314309 return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET;
....@@ -388,7 +383,13 @@
388383 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
389384
390385 at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff);
391
- reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE | AT_XDMAC_CIE_ROIE;
386
+ reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE;
387
+ /*
388
+ * Request Overflow Error is only for peripheral synchronized transfers
389
+ */
390
+ if (at_xdmac_chan_is_peripheral_xfer(first->lld.mbr_cfg))
391
+ reg |= AT_XDMAC_CIE_ROIE;
392
+
392393 /*
393394 * There is no end of list when doing cyclic dma, we need to get
394395 * an interrupt after each periods.
....@@ -677,7 +678,8 @@
677678 if (!desc) {
678679 dev_err(chan2dev(chan), "can't get descriptor\n");
679680 if (first)
680
- list_splice_init(&first->descs_list, &atchan->free_descs_list);
681
+ list_splice_tail_init(&first->descs_list,
682
+ &atchan->free_descs_list);
681683 goto spin_unlock;
682684 }
683685
....@@ -765,7 +767,8 @@
765767 if (!desc) {
766768 dev_err(chan2dev(chan), "can't get descriptor\n");
767769 if (first)
768
- list_splice_init(&first->descs_list, &atchan->free_descs_list);
770
+ list_splice_tail_init(&first->descs_list,
771
+ &atchan->free_descs_list);
769772 spin_unlock_irqrestore(&atchan->lock, irqflags);
770773 return NULL;
771774 }
....@@ -967,6 +970,8 @@
967970 NULL,
968971 src_addr, dst_addr,
969972 xt, xt->sgl);
973
+ if (!first)
974
+ return NULL;
970975
971976 /* Length of the block is (BLEN+1) microblocks. */
972977 for (i = 0; i < xt->numf - 1; i++)
....@@ -997,8 +1002,9 @@
9971002 src_addr, dst_addr,
9981003 xt, chunk);
9991004 if (!desc) {
1000
- list_splice_init(&first->descs_list,
1001
- &atchan->free_descs_list);
1005
+ if (first)
1006
+ list_splice_tail_init(&first->descs_list,
1007
+ &atchan->free_descs_list);
10021008 return NULL;
10031009 }
10041010
....@@ -1076,7 +1082,8 @@
10761082 if (!desc) {
10771083 dev_err(chan2dev(chan), "can't get descriptor\n");
10781084 if (first)
1079
- list_splice_init(&first->descs_list, &atchan->free_descs_list);
1085
+ list_splice_tail_init(&first->descs_list,
1086
+ &atchan->free_descs_list);
10801087 return NULL;
10811088 }
10821089
....@@ -1250,8 +1257,8 @@
12501257 sg_dma_len(sg),
12511258 value);
12521259 if (!desc && first)
1253
- list_splice_init(&first->descs_list,
1254
- &atchan->free_descs_list);
1260
+ list_splice_tail_init(&first->descs_list,
1261
+ &atchan->free_descs_list);
12551262
12561263 if (!first)
12571264 first = desc;
....@@ -1390,7 +1397,7 @@
13901397 {
13911398 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
13921399 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1393
- struct at_xdmac_desc *desc, *_desc;
1400
+ struct at_xdmac_desc *desc, *_desc, *iter;
13941401 struct list_head *descs_list;
13951402 enum dma_status ret;
13961403 int residue, retry;
....@@ -1505,11 +1512,13 @@
15051512 * microblock.
15061513 */
15071514 descs_list = &desc->descs_list;
1508
- list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
1509
- dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
1510
- residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
1511
- if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
1515
+ list_for_each_entry_safe(iter, _desc, descs_list, desc_node) {
1516
+ dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg);
1517
+ residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth;
1518
+ if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) {
1519
+ desc = iter;
15121520 break;
1521
+ }
15131522 }
15141523 residue += cur_ubc << dwidth;
15151524
....@@ -1524,26 +1533,9 @@
15241533 return ret;
15251534 }
15261535
1527
-/* Call must be protected by lock. */
1528
-static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
1529
- struct at_xdmac_desc *desc)
1530
-{
1531
- dev_dbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1532
-
1533
- /*
1534
- * Remove the transfer from the transfer list then move the transfer
1535
- * descriptors into the free descriptors list.
1536
- */
1537
- list_del(&desc->xfer_node);
1538
- list_splice_init(&desc->descs_list, &atchan->free_descs_list);
1539
-}
1540
-
15411536 static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
15421537 {
15431538 struct at_xdmac_desc *desc;
1544
- unsigned long flags;
1545
-
1546
- spin_lock_irqsave(&atchan->lock, flags);
15471539
15481540 /*
15491541 * If channel is enabled, do nothing, advance_work will be triggered
....@@ -1557,8 +1549,6 @@
15571549 if (!desc->active_xfer)
15581550 at_xdmac_start_xfer(atchan, desc);
15591551 }
1560
-
1561
- spin_unlock_irqrestore(&atchan->lock, flags);
15621552 }
15631553
15641554 static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
....@@ -1566,16 +1556,62 @@
15661556 struct at_xdmac_desc *desc;
15671557 struct dma_async_tx_descriptor *txd;
15681558
1569
- desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
1559
+ spin_lock_irq(&atchan->lock);
1560
+ if (list_empty(&atchan->xfers_list)) {
1561
+ spin_unlock_irq(&atchan->lock);
1562
+ return;
1563
+ }
1564
+ desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
1565
+ xfer_node);
1566
+ spin_unlock_irq(&atchan->lock);
15701567 txd = &desc->tx_dma_desc;
1571
-
15721568 if (txd->flags & DMA_PREP_INTERRUPT)
15731569 dmaengine_desc_get_callback_invoke(txd, NULL);
15741570 }
15751571
1576
-static void at_xdmac_tasklet(unsigned long data)
1572
+static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
15771573 {
1578
- struct at_xdmac_chan *atchan = (struct at_xdmac_chan *)data;
1574
+ struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1575
+ struct at_xdmac_desc *bad_desc;
1576
+
1577
+ /*
1578
+ * The descriptor currently at the head of the active list is
1579
+ * broken. Since we don't have any way to report errors, we'll
1580
+ * just have to scream loudly and try to continue with other
1581
+ * descriptors queued (if any).
1582
+ */
1583
+ if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
1584
+ dev_err(chan2dev(&atchan->chan), "read bus error!!!");
1585
+ if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
1586
+ dev_err(chan2dev(&atchan->chan), "write bus error!!!");
1587
+ if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
1588
+ dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
1589
+
1590
+ spin_lock_irq(&atchan->lock);
1591
+
1592
+ /* Channel must be disabled first as it's not done automatically */
1593
+ at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1594
+ while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
1595
+ cpu_relax();
1596
+
1597
+ bad_desc = list_first_entry(&atchan->xfers_list,
1598
+ struct at_xdmac_desc,
1599
+ xfer_node);
1600
+
1601
+ spin_unlock_irq(&atchan->lock);
1602
+
1603
+ /* Print bad descriptor's details if needed */
1604
+ dev_dbg(chan2dev(&atchan->chan),
1605
+ "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
1606
+ __func__, &bad_desc->lld.mbr_sa, &bad_desc->lld.mbr_da,
1607
+ bad_desc->lld.mbr_ubc);
1608
+
1609
+ /* Then continue with usual descriptor management */
1610
+}
1611
+
1612
+static void at_xdmac_tasklet(struct tasklet_struct *t)
1613
+{
1614
+ struct at_xdmac_chan *atchan = from_tasklet(atchan, t, tasklet);
15791615 struct at_xdmac_desc *desc;
15801616 u32 error_mask;
15811617
....@@ -1592,38 +1628,37 @@
15921628 || (atchan->irq_status & error_mask)) {
15931629 struct dma_async_tx_descriptor *txd;
15941630
1595
- if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
1596
- dev_err(chan2dev(&atchan->chan), "read bus error!!!");
1597
- if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
1598
- dev_err(chan2dev(&atchan->chan), "write bus error!!!");
1599
- if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
1600
- dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
1631
+ if (atchan->irq_status & error_mask)
1632
+ at_xdmac_handle_error(atchan);
16011633
1602
- spin_lock_bh(&atchan->lock);
1634
+ spin_lock_irq(&atchan->lock);
16031635 desc = list_first_entry(&atchan->xfers_list,
16041636 struct at_xdmac_desc,
16051637 xfer_node);
16061638 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
16071639 if (!desc->active_xfer) {
16081640 dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
1609
- spin_unlock(&atchan->lock);
1641
+ spin_unlock_irq(&atchan->lock);
16101642 return;
16111643 }
16121644
16131645 txd = &desc->tx_dma_desc;
1646
+ dma_cookie_complete(txd);
1647
+ /* Remove the transfer from the transfer list. */
1648
+ list_del(&desc->xfer_node);
1649
+ spin_unlock_irq(&atchan->lock);
16141650
1615
- at_xdmac_remove_xfer(atchan, desc);
1616
- spin_unlock_bh(&atchan->lock);
1617
-
1618
- if (!at_xdmac_chan_is_cyclic(atchan)) {
1619
- dma_cookie_complete(txd);
1620
- if (txd->flags & DMA_PREP_INTERRUPT)
1621
- dmaengine_desc_get_callback_invoke(txd, NULL);
1622
- }
1651
+ if (txd->flags & DMA_PREP_INTERRUPT)
1652
+ dmaengine_desc_get_callback_invoke(txd, NULL);
16231653
16241654 dma_run_dependencies(txd);
16251655
1656
+ spin_lock_irq(&atchan->lock);
1657
+ /* Move the xfer descriptors into the free descriptors list. */
1658
+ list_splice_tail_init(&desc->descs_list,
1659
+ &atchan->free_descs_list);
16261660 at_xdmac_advance_work(atchan);
1661
+ spin_unlock_irq(&atchan->lock);
16271662 }
16281663 }
16291664
....@@ -1684,11 +1719,13 @@
16841719 static void at_xdmac_issue_pending(struct dma_chan *chan)
16851720 {
16861721 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1722
+ unsigned long flags;
16871723
16881724 dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);
16891725
1690
- if (!at_xdmac_chan_is_cyclic(atchan))
1691
- at_xdmac_advance_work(atchan);
1726
+ spin_lock_irqsave(&atchan->lock, flags);
1727
+ at_xdmac_advance_work(atchan);
1728
+ spin_unlock_irqrestore(&atchan->lock, flags);
16921729
16931730 return;
16941731 }
....@@ -1766,8 +1803,11 @@
17661803 cpu_relax();
17671804
17681805 /* Cancel all pending transfers. */
1769
- list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
1770
- at_xdmac_remove_xfer(atchan, desc);
1806
+ list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) {
1807
+ list_del(&desc->xfer_node);
1808
+ list_splice_tail_init(&desc->descs_list,
1809
+ &atchan->free_descs_list);
1810
+ }
17711811
17721812 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
17731813 clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
....@@ -1781,27 +1821,27 @@
17811821 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
17821822 struct at_xdmac_desc *desc;
17831823 int i;
1784
- unsigned long flags;
1785
-
1786
- spin_lock_irqsave(&atchan->lock, flags);
17871824
17881825 if (at_xdmac_chan_is_enabled(atchan)) {
17891826 dev_err(chan2dev(chan),
17901827 "can't allocate channel resources (channel enabled)\n");
1791
- i = -EIO;
1792
- goto spin_unlock;
1828
+ return -EIO;
17931829 }
17941830
17951831 if (!list_empty(&atchan->free_descs_list)) {
17961832 dev_err(chan2dev(chan),
17971833 "can't allocate channel resources (channel not free from a previous use)\n");
1798
- i = -EIO;
1799
- goto spin_unlock;
1834
+ return -EIO;
18001835 }
18011836
18021837 for (i = 0; i < init_nr_desc_per_channel; i++) {
1803
- desc = at_xdmac_alloc_desc(chan, GFP_ATOMIC);
1838
+ desc = at_xdmac_alloc_desc(chan, GFP_KERNEL);
18041839 if (!desc) {
1840
+ if (i == 0) {
1841
+ dev_warn(chan2dev(chan),
1842
+ "can't allocate any descriptors\n");
1843
+ return -EIO;
1844
+ }
18051845 dev_warn(chan2dev(chan),
18061846 "only %d descriptors have been allocated\n", i);
18071847 break;
....@@ -1813,8 +1853,6 @@
18131853
18141854 dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
18151855
1816
-spin_unlock:
1817
- spin_unlock_irqrestore(&atchan->lock, flags);
18181856 return i;
18191857 }
18201858
....@@ -1871,6 +1909,7 @@
18711909 }
18721910 }
18731911 atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
1912
+ atxdmac->save_gs = at_xdmac_read(atxdmac, AT_XDMAC_GS);
18741913
18751914 at_xdmac_off(atxdmac);
18761915 clk_disable_unprepare(atxdmac->clk);
....@@ -1907,7 +1946,8 @@
19071946 at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
19081947 at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
19091948 wmb();
1910
- at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
1949
+ if (atxdmac->save_gs & atchan->mask)
1950
+ at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
19111951 }
19121952 }
19131953 return 0;
....@@ -1916,21 +1956,16 @@
19161956
19171957 static int at_xdmac_probe(struct platform_device *pdev)
19181958 {
1919
- struct resource *res;
19201959 struct at_xdmac *atxdmac;
19211960 int irq, size, nr_channels, i, ret;
19221961 void __iomem *base;
19231962 u32 reg;
19241963
1925
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1926
- if (!res)
1927
- return -EINVAL;
1928
-
19291964 irq = platform_get_irq(pdev, 0);
19301965 if (irq < 0)
19311966 return irq;
19321967
1933
- base = devm_ioremap_resource(&pdev->dev, res);
1968
+ base = devm_platform_ioremap_resource(pdev, 0);
19341969 if (IS_ERR(base))
19351970 return PTR_ERR(base);
19361971
....@@ -2035,8 +2070,7 @@
20352070 spin_lock_init(&atchan->lock);
20362071 INIT_LIST_HEAD(&atchan->xfers_list);
20372072 INIT_LIST_HEAD(&atchan->free_descs_list);
2038
- tasklet_init(&atchan->tasklet, at_xdmac_tasklet,
2039
- (unsigned long)atchan);
2073
+ tasklet_setup(&atchan->tasklet, at_xdmac_tasklet);
20402074
20412075 /* Clear pending interrupts. */
20422076 while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))