hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/dma/ti/edma.c
....@@ -15,7 +15,7 @@
1515
1616 #include <linux/dmaengine.h>
1717 #include <linux/dma-mapping.h>
18
-#include <linux/edma.h>
18
+#include <linux/bitmap.h>
1919 #include <linux/err.h>
2020 #include <linux/init.h>
2121 #include <linux/interrupt.h>
....@@ -133,6 +133,17 @@
133133 #define EDMA_CONT_PARAMS_FIXED_EXACT 1002
134134 #define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
135135
136
+/*
137
+ * 64bit array registers are split into two 32bit registers:
138
+ * reg0: channel/event 0-31
139
+ * reg1: channel/event 32-63
140
+ *
141
+ * bit 5 in the channel number tells the array index (0/1)
142
+ * bit 0-4 (0x1f) is the bit offset within the register
143
+ */
144
+#define EDMA_REG_ARRAY_INDEX(channel) ((channel) >> 5)
145
+#define EDMA_CHANNEL_BIT(channel) (BIT((channel) & 0x1f))
146
+
136147 /* PaRAM slots are laid out like this */
137148 struct edmacc_param {
138149 u32 opt;
....@@ -169,6 +180,7 @@
169180 struct list_head node;
170181 enum dma_transfer_direction direction;
171182 int cyclic;
183
+ bool polled;
172184 int absync;
173185 int pset_nr;
174186 struct edma_chan *echan;
....@@ -199,7 +211,7 @@
199211 u32 residue;
200212 u32 residue_stat;
201213
202
- struct edma_pset pset[0];
214
+ struct edma_pset pset[];
203215 };
204216
205217 struct edma_cc;
....@@ -247,6 +259,13 @@
247259 * in use by Linux or if it is allocated to be used by DSP.
248260 */
249261 unsigned long *slot_inuse;
262
+
263
+ /*
264
+ * For tracking reserved channels used by DSP.
265
+ * If the bit is cleared, the channel is allocated to be used by DSP
266
+ * and Linux must not touch it.
267
+ */
268
+ unsigned long *channels_mask;
250269
251270 struct dma_device dma_slave;
252271 struct dma_device *dma_memcpy;
....@@ -412,12 +431,6 @@
412431 edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or);
413432 }
414433
415
-static inline void edma_set_bits(int offset, int len, unsigned long *p)
416
-{
417
- for (; len > 0; len--)
418
- set_bit(offset + (len - 1), p);
419
-}
420
-
421434 static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no,
422435 int priority)
423436 {
....@@ -441,15 +454,14 @@
441454 {
442455 struct edma_cc *ecc = echan->ecc;
443456 int channel = EDMA_CHAN_SLOT(echan->ch_num);
457
+ int idx = EDMA_REG_ARRAY_INDEX(channel);
458
+ int ch_bit = EDMA_CHANNEL_BIT(channel);
444459
445460 if (enable) {
446
- edma_shadow0_write_array(ecc, SH_ICR, channel >> 5,
447
- BIT(channel & 0x1f));
448
- edma_shadow0_write_array(ecc, SH_IESR, channel >> 5,
449
- BIT(channel & 0x1f));
461
+ edma_shadow0_write_array(ecc, SH_ICR, idx, ch_bit);
462
+ edma_shadow0_write_array(ecc, SH_IESR, idx, ch_bit);
450463 } else {
451
- edma_shadow0_write_array(ecc, SH_IECR, channel >> 5,
452
- BIT(channel & 0x1f));
464
+ edma_shadow0_write_array(ecc, SH_IECR, idx, ch_bit);
453465 }
454466 }
455467
....@@ -587,26 +599,26 @@
587599 {
588600 struct edma_cc *ecc = echan->ecc;
589601 int channel = EDMA_CHAN_SLOT(echan->ch_num);
590
- int j = (channel >> 5);
591
- unsigned int mask = BIT(channel & 0x1f);
602
+ int idx = EDMA_REG_ARRAY_INDEX(channel);
603
+ int ch_bit = EDMA_CHANNEL_BIT(channel);
592604
593605 if (!echan->hw_triggered) {
594606 /* EDMA channels without event association */
595
- dev_dbg(ecc->dev, "ESR%d %08x\n", j,
596
- edma_shadow0_read_array(ecc, SH_ESR, j));
597
- edma_shadow0_write_array(ecc, SH_ESR, j, mask);
607
+ dev_dbg(ecc->dev, "ESR%d %08x\n", idx,
608
+ edma_shadow0_read_array(ecc, SH_ESR, idx));
609
+ edma_shadow0_write_array(ecc, SH_ESR, idx, ch_bit);
598610 } else {
599611 /* EDMA channel with event association */
600
- dev_dbg(ecc->dev, "ER%d %08x\n", j,
601
- edma_shadow0_read_array(ecc, SH_ER, j));
612
+ dev_dbg(ecc->dev, "ER%d %08x\n", idx,
613
+ edma_shadow0_read_array(ecc, SH_ER, idx));
602614 /* Clear any pending event or error */
603
- edma_write_array(ecc, EDMA_ECR, j, mask);
604
- edma_write_array(ecc, EDMA_EMCR, j, mask);
615
+ edma_write_array(ecc, EDMA_ECR, idx, ch_bit);
616
+ edma_write_array(ecc, EDMA_EMCR, idx, ch_bit);
605617 /* Clear any SER */
606
- edma_shadow0_write_array(ecc, SH_SECR, j, mask);
607
- edma_shadow0_write_array(ecc, SH_EESR, j, mask);
608
- dev_dbg(ecc->dev, "EER%d %08x\n", j,
609
- edma_shadow0_read_array(ecc, SH_EER, j));
618
+ edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit);
619
+ edma_shadow0_write_array(ecc, SH_EESR, idx, ch_bit);
620
+ dev_dbg(ecc->dev, "EER%d %08x\n", idx,
621
+ edma_shadow0_read_array(ecc, SH_EER, idx));
610622 }
611623 }
612624
....@@ -614,19 +626,19 @@
614626 {
615627 struct edma_cc *ecc = echan->ecc;
616628 int channel = EDMA_CHAN_SLOT(echan->ch_num);
617
- int j = (channel >> 5);
618
- unsigned int mask = BIT(channel & 0x1f);
629
+ int idx = EDMA_REG_ARRAY_INDEX(channel);
630
+ int ch_bit = EDMA_CHANNEL_BIT(channel);
619631
620
- edma_shadow0_write_array(ecc, SH_EECR, j, mask);
621
- edma_shadow0_write_array(ecc, SH_ECR, j, mask);
622
- edma_shadow0_write_array(ecc, SH_SECR, j, mask);
623
- edma_write_array(ecc, EDMA_EMCR, j, mask);
632
+ edma_shadow0_write_array(ecc, SH_EECR, idx, ch_bit);
633
+ edma_shadow0_write_array(ecc, SH_ECR, idx, ch_bit);
634
+ edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit);
635
+ edma_write_array(ecc, EDMA_EMCR, idx, ch_bit);
624636
625637 /* clear possibly pending completion interrupt */
626
- edma_shadow0_write_array(ecc, SH_ICR, j, mask);
638
+ edma_shadow0_write_array(ecc, SH_ICR, idx, ch_bit);
627639
628
- dev_dbg(ecc->dev, "EER%d %08x\n", j,
629
- edma_shadow0_read_array(ecc, SH_EER, j));
640
+ dev_dbg(ecc->dev, "EER%d %08x\n", idx,
641
+ edma_shadow0_read_array(ecc, SH_EER, idx));
630642
631643 /* REVISIT: consider guarding against inappropriate event
632644 * chaining by overwriting with dummy_paramset.
....@@ -640,45 +652,49 @@
640652 static void edma_pause(struct edma_chan *echan)
641653 {
642654 int channel = EDMA_CHAN_SLOT(echan->ch_num);
643
- unsigned int mask = BIT(channel & 0x1f);
644655
645
- edma_shadow0_write_array(echan->ecc, SH_EECR, channel >> 5, mask);
656
+ edma_shadow0_write_array(echan->ecc, SH_EECR,
657
+ EDMA_REG_ARRAY_INDEX(channel),
658
+ EDMA_CHANNEL_BIT(channel));
646659 }
647660
648661 /* Re-enable EDMA hardware events on the specified channel. */
649662 static void edma_resume(struct edma_chan *echan)
650663 {
651664 int channel = EDMA_CHAN_SLOT(echan->ch_num);
652
- unsigned int mask = BIT(channel & 0x1f);
653665
654
- edma_shadow0_write_array(echan->ecc, SH_EESR, channel >> 5, mask);
666
+ edma_shadow0_write_array(echan->ecc, SH_EESR,
667
+ EDMA_REG_ARRAY_INDEX(channel),
668
+ EDMA_CHANNEL_BIT(channel));
655669 }
656670
657671 static void edma_trigger_channel(struct edma_chan *echan)
658672 {
659673 struct edma_cc *ecc = echan->ecc;
660674 int channel = EDMA_CHAN_SLOT(echan->ch_num);
661
- unsigned int mask = BIT(channel & 0x1f);
675
+ int idx = EDMA_REG_ARRAY_INDEX(channel);
676
+ int ch_bit = EDMA_CHANNEL_BIT(channel);
662677
663
- edma_shadow0_write_array(ecc, SH_ESR, (channel >> 5), mask);
678
+ edma_shadow0_write_array(ecc, SH_ESR, idx, ch_bit);
664679
665
- dev_dbg(ecc->dev, "ESR%d %08x\n", (channel >> 5),
666
- edma_shadow0_read_array(ecc, SH_ESR, (channel >> 5)));
680
+ dev_dbg(ecc->dev, "ESR%d %08x\n", idx,
681
+ edma_shadow0_read_array(ecc, SH_ESR, idx));
667682 }
668683
669684 static void edma_clean_channel(struct edma_chan *echan)
670685 {
671686 struct edma_cc *ecc = echan->ecc;
672687 int channel = EDMA_CHAN_SLOT(echan->ch_num);
673
- int j = (channel >> 5);
674
- unsigned int mask = BIT(channel & 0x1f);
688
+ int idx = EDMA_REG_ARRAY_INDEX(channel);
689
+ int ch_bit = EDMA_CHANNEL_BIT(channel);
675690
676
- dev_dbg(ecc->dev, "EMR%d %08x\n", j, edma_read_array(ecc, EDMA_EMR, j));
677
- edma_shadow0_write_array(ecc, SH_ECR, j, mask);
691
+ dev_dbg(ecc->dev, "EMR%d %08x\n", idx,
692
+ edma_read_array(ecc, EDMA_EMR, idx));
693
+ edma_shadow0_write_array(ecc, SH_ECR, idx, ch_bit);
678694 /* Clear the corresponding EMR bits */
679
- edma_write_array(ecc, EDMA_EMCR, j, mask);
695
+ edma_write_array(ecc, EDMA_EMCR, idx, ch_bit);
680696 /* Clear any SER */
681
- edma_shadow0_write_array(ecc, SH_SECR, j, mask);
697
+ edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit);
682698 edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
683699 }
684700
....@@ -707,8 +723,15 @@
707723 struct edma_cc *ecc = echan->ecc;
708724 int channel = EDMA_CHAN_SLOT(echan->ch_num);
709725
726
+ if (!test_bit(echan->ch_num, ecc->channels_mask)) {
727
+ dev_err(ecc->dev, "Channel%d is reserved, can not be used!\n",
728
+ echan->ch_num);
729
+ return -EINVAL;
730
+ }
731
+
710732 /* ensure access through shadow region 0 */
711
- edma_or_array2(ecc, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
733
+ edma_or_array2(ecc, EDMA_DRAE, 0, EDMA_REG_ARRAY_INDEX(channel),
734
+ EDMA_CHANNEL_BIT(channel));
712735
713736 /* ensure no events are pending */
714737 edma_stop(echan);
....@@ -1011,6 +1034,7 @@
10111034 src_cidx = cidx;
10121035 dst_bidx = acnt;
10131036 dst_cidx = cidx;
1037
+ epset->addr = src_addr;
10141038 } else {
10151039 dev_err(dev, "%s: direction not implemented yet\n", __func__);
10161040 return -EINVAL;
....@@ -1211,8 +1235,9 @@
12111235
12121236 edesc->pset[0].param.opt |= ITCCHEN;
12131237 if (nslots == 1) {
1214
- /* Enable transfer complete interrupt */
1215
- edesc->pset[0].param.opt |= TCINTEN;
1238
+ /* Enable transfer complete interrupt if requested */
1239
+ if (tx_flags & DMA_PREP_INTERRUPT)
1240
+ edesc->pset[0].param.opt |= TCINTEN;
12161241 } else {
12171242 /* Enable transfer complete chaining for the first slot */
12181243 edesc->pset[0].param.opt |= TCCHEN;
....@@ -1239,8 +1264,88 @@
12391264 }
12401265
12411266 edesc->pset[1].param.opt |= ITCCHEN;
1242
- edesc->pset[1].param.opt |= TCINTEN;
1267
+ /* Enable transfer complete interrupt if requested */
1268
+ if (tx_flags & DMA_PREP_INTERRUPT)
1269
+ edesc->pset[1].param.opt |= TCINTEN;
12431270 }
1271
+
1272
+ if (!(tx_flags & DMA_PREP_INTERRUPT))
1273
+ edesc->polled = true;
1274
+
1275
+ return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1276
+}
1277
+
1278
+static struct dma_async_tx_descriptor *
1279
+edma_prep_dma_interleaved(struct dma_chan *chan,
1280
+ struct dma_interleaved_template *xt,
1281
+ unsigned long tx_flags)
1282
+{
1283
+ struct device *dev = chan->device->dev;
1284
+ struct edma_chan *echan = to_edma_chan(chan);
1285
+ struct edmacc_param *param;
1286
+ struct edma_desc *edesc;
1287
+ size_t src_icg, dst_icg;
1288
+ int src_bidx, dst_bidx;
1289
+
1290
+ /* Slave mode is not supported */
1291
+ if (is_slave_direction(xt->dir))
1292
+ return NULL;
1293
+
1294
+ if (xt->frame_size != 1 || xt->numf == 0)
1295
+ return NULL;
1296
+
1297
+ if (xt->sgl[0].size > SZ_64K || xt->numf > SZ_64K)
1298
+ return NULL;
1299
+
1300
+ src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
1301
+ if (src_icg) {
1302
+ src_bidx = src_icg + xt->sgl[0].size;
1303
+ } else if (xt->src_inc) {
1304
+ src_bidx = xt->sgl[0].size;
1305
+ } else {
1306
+ dev_err(dev, "%s: SRC constant addressing is not supported\n",
1307
+ __func__);
1308
+ return NULL;
1309
+ }
1310
+
1311
+ dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
1312
+ if (dst_icg) {
1313
+ dst_bidx = dst_icg + xt->sgl[0].size;
1314
+ } else if (xt->dst_inc) {
1315
+ dst_bidx = xt->sgl[0].size;
1316
+ } else {
1317
+ dev_err(dev, "%s: DST constant addressing is not supported\n",
1318
+ __func__);
1319
+ return NULL;
1320
+ }
1321
+
1322
+ if (src_bidx > SZ_64K || dst_bidx > SZ_64K)
1323
+ return NULL;
1324
+
1325
+ edesc = kzalloc(struct_size(edesc, pset, 1), GFP_ATOMIC);
1326
+ if (!edesc)
1327
+ return NULL;
1328
+
1329
+ edesc->direction = DMA_MEM_TO_MEM;
1330
+ edesc->echan = echan;
1331
+ edesc->pset_nr = 1;
1332
+
1333
+ param = &edesc->pset[0].param;
1334
+
1335
+ param->src = xt->src_start;
1336
+ param->dst = xt->dst_start;
1337
+ param->a_b_cnt = xt->numf << 16 | xt->sgl[0].size;
1338
+ param->ccnt = 1;
1339
+ param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
1340
+ param->src_dst_cidx = 0;
1341
+
1342
+ param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
1343
+ param->opt |= ITCCHEN;
1344
+ /* Enable transfer complete interrupt if requested */
1345
+ if (tx_flags & DMA_PREP_INTERRUPT)
1346
+ param->opt |= TCINTEN;
1347
+ else
1348
+ edesc->polled = true;
12441349
12451350 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
12461351 }
....@@ -1721,7 +1826,11 @@
17211826 int loop_count = EDMA_MAX_TR_WAIT_LOOPS;
17221827 struct edma_chan *echan = edesc->echan;
17231828 struct edma_pset *pset = edesc->pset;
1724
- dma_addr_t done, pos;
1829
+ dma_addr_t done, pos, pos_old;
1830
+ int channel = EDMA_CHAN_SLOT(echan->ch_num);
1831
+ int idx = EDMA_REG_ARRAY_INDEX(channel);
1832
+ int ch_bit = EDMA_CHANNEL_BIT(channel);
1833
+ int event_reg;
17251834 int i;
17261835
17271836 /*
....@@ -1734,16 +1843,20 @@
17341843 * "pos" may represent a transfer request that is still being
17351844 * processed by the EDMACC or EDMATC. We will busy wait until
17361845 * any one of the situations occurs:
1737
- * 1. the DMA hardware is idle
1738
- * 2. a new transfer request is setup
1846
+ * 1. while and event is pending for the channel
1847
+ * 2. a position updated
17391848 * 3. we hit the loop limit
17401849 */
1741
- while (edma_read(echan->ecc, EDMA_CCSTAT) & EDMA_CCSTAT_ACTV) {
1742
- /* check if a new transfer request is setup */
1743
- if (edma_get_position(echan->ecc,
1744
- echan->slot[0], dst) != pos) {
1850
+ if (is_slave_direction(edesc->direction))
1851
+ event_reg = SH_ER;
1852
+ else
1853
+ event_reg = SH_ESR;
1854
+
1855
+ pos_old = pos;
1856
+ while (edma_shadow0_read_array(echan->ecc, event_reg, idx) & ch_bit) {
1857
+ pos = edma_get_position(echan->ecc, echan->slot[0], dst);
1858
+ if (pos != pos_old)
17451859 break;
1746
- }
17471860
17481861 if (!--loop_count) {
17491862 dev_dbg_ratelimited(echan->vchan.chan.device->dev,
....@@ -1768,6 +1881,12 @@
17681881 return edesc->residue_stat;
17691882 }
17701883
1884
+ /*
1885
+ * If the position is 0, then EDMA loaded the closing dummy slot, the
1886
+ * transfer is completed
1887
+ */
1888
+ if (!pos)
1889
+ return 0;
17711890 /*
17721891 * For SG operation we catch up with the last processed
17731892 * status.
....@@ -1796,19 +1915,46 @@
17961915 struct dma_tx_state *txstate)
17971916 {
17981917 struct edma_chan *echan = to_edma_chan(chan);
1799
- struct virt_dma_desc *vdesc;
1918
+ struct dma_tx_state txstate_tmp;
18001919 enum dma_status ret;
18011920 unsigned long flags;
18021921
18031922 ret = dma_cookie_status(chan, cookie, txstate);
1804
- if (ret == DMA_COMPLETE || !txstate)
1923
+
1924
+ if (ret == DMA_COMPLETE)
18051925 return ret;
18061926
1927
+ /* Provide a dummy dma_tx_state for completion checking */
1928
+ if (!txstate)
1929
+ txstate = &txstate_tmp;
1930
+
18071931 spin_lock_irqsave(&echan->vchan.lock, flags);
1808
- if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie)
1932
+ if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) {
18091933 txstate->residue = edma_residue(echan->edesc);
1810
- else if ((vdesc = vchan_find_desc(&echan->vchan, cookie)))
1811
- txstate->residue = to_edma_desc(&vdesc->tx)->residue;
1934
+ } else {
1935
+ struct virt_dma_desc *vdesc = vchan_find_desc(&echan->vchan,
1936
+ cookie);
1937
+
1938
+ if (vdesc)
1939
+ txstate->residue = to_edma_desc(&vdesc->tx)->residue;
1940
+ else
1941
+ txstate->residue = 0;
1942
+ }
1943
+
1944
+ /*
1945
+ * Mark the cookie completed if the residue is 0 for non cyclic
1946
+ * transfers
1947
+ */
1948
+ if (ret != DMA_COMPLETE && !txstate->residue &&
1949
+ echan->edesc && echan->edesc->polled &&
1950
+ echan->edesc->vdesc.tx.cookie == cookie) {
1951
+ edma_stop(echan);
1952
+ vchan_cookie_complete(&echan->edesc->vdesc);
1953
+ echan->edesc = NULL;
1954
+ edma_execute(echan);
1955
+ ret = DMA_COMPLETE;
1956
+ }
1957
+
18121958 spin_unlock_irqrestore(&echan->vchan.lock, flags);
18131959
18141960 return ret;
....@@ -1846,7 +1992,9 @@
18461992 "Legacy memcpy is enabled, things might not work\n");
18471993
18481994 dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask);
1995
+ dma_cap_set(DMA_INTERLEAVE, s_ddev->cap_mask);
18491996 s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
1997
+ s_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved;
18501998 s_ddev->directions = BIT(DMA_MEM_TO_MEM);
18511999 }
18522000
....@@ -1882,8 +2030,10 @@
18822030
18832031 dma_cap_zero(m_ddev->cap_mask);
18842032 dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask);
2033
+ dma_cap_set(DMA_INTERLEAVE, m_ddev->cap_mask);
18852034
18862035 m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
2036
+ m_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved;
18872037 m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
18882038 m_ddev->device_free_chan_resources = edma_free_chan_resources;
18892039 m_ddev->device_issue_pending = edma_issue_pending;
....@@ -2185,14 +2335,14 @@
21852335 }
21862336 #endif
21872337
2338
+static bool edma_filter_fn(struct dma_chan *chan, void *param);
2339
+
21882340 static int edma_probe(struct platform_device *pdev)
21892341 {
21902342 struct edma_soc_info *info = pdev->dev.platform_data;
21912343 s8 (*queue_priority_mapping)[2];
2192
- int i, off, ln;
2193
- const s16 (*rsv_slots)[2];
2194
- const s16 (*xbar_chans)[2];
2195
- int irq;
2344
+ const s16 (*reserved)[2];
2345
+ int i, irq;
21962346 char *irq_name;
21972347 struct resource *mem;
21982348 struct device_node *node = pdev->dev.of_node;
....@@ -2217,13 +2367,6 @@
22172367
22182368 if (!info)
22192369 return -ENODEV;
2220
-
2221
- pm_runtime_enable(dev);
2222
- ret = pm_runtime_get_sync(dev);
2223
- if (ret < 0) {
2224
- dev_err(dev, "pm_runtime_get_sync() failed\n");
2225
- return ret;
2226
- }
22272370
22282371 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
22292372 if (ret)
....@@ -2255,33 +2398,54 @@
22552398
22562399 platform_set_drvdata(pdev, ecc);
22572400
2401
+ pm_runtime_enable(dev);
2402
+ ret = pm_runtime_get_sync(dev);
2403
+ if (ret < 0) {
2404
+ dev_err(dev, "pm_runtime_get_sync() failed\n");
2405
+ pm_runtime_disable(dev);
2406
+ return ret;
2407
+ }
2408
+
22582409 /* Get eDMA3 configuration from IP */
22592410 ret = edma_setup_from_hw(dev, info, ecc);
22602411 if (ret)
2261
- return ret;
2412
+ goto err_disable_pm;
22622413
22632414 /* Allocate memory based on the information we got from the IP */
22642415 ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels,
22652416 sizeof(*ecc->slave_chans), GFP_KERNEL);
2266
- if (!ecc->slave_chans)
2267
- return -ENOMEM;
22682417
22692418 ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots),
22702419 sizeof(unsigned long), GFP_KERNEL);
2271
- if (!ecc->slot_inuse)
2272
- return -ENOMEM;
2420
+
2421
+ ecc->channels_mask = devm_kcalloc(dev,
2422
+ BITS_TO_LONGS(ecc->num_channels),
2423
+ sizeof(unsigned long), GFP_KERNEL);
2424
+ if (!ecc->slave_chans || !ecc->slot_inuse || !ecc->channels_mask) {
2425
+ ret = -ENOMEM;
2426
+ goto err_disable_pm;
2427
+ }
2428
+
2429
+ /* Mark all channels available initially */
2430
+ bitmap_fill(ecc->channels_mask, ecc->num_channels);
22732431
22742432 ecc->default_queue = info->default_queue;
22752433
22762434 if (info->rsv) {
22772435 /* Set the reserved slots in inuse list */
2278
- rsv_slots = info->rsv->rsv_slots;
2279
- if (rsv_slots) {
2280
- for (i = 0; rsv_slots[i][0] != -1; i++) {
2281
- off = rsv_slots[i][0];
2282
- ln = rsv_slots[i][1];
2283
- edma_set_bits(off, ln, ecc->slot_inuse);
2284
- }
2436
+ reserved = info->rsv->rsv_slots;
2437
+ if (reserved) {
2438
+ for (i = 0; reserved[i][0] != -1; i++)
2439
+ bitmap_set(ecc->slot_inuse, reserved[i][0],
2440
+ reserved[i][1]);
2441
+ }
2442
+
2443
+ /* Clear channels not usable for Linux */
2444
+ reserved = info->rsv->rsv_chans;
2445
+ if (reserved) {
2446
+ for (i = 0; reserved[i][0] != -1; i++)
2447
+ bitmap_clear(ecc->channels_mask, reserved[i][0],
2448
+ reserved[i][1]);
22852449 }
22862450 }
22872451
....@@ -2289,14 +2453,6 @@
22892453 /* Reset only unused - not reserved - paRAM slots */
22902454 if (!test_bit(i, ecc->slot_inuse))
22912455 edma_write_slot(ecc, i, &dummy_paramset);
2292
- }
2293
-
2294
- /* Clear the xbar mapped channels in unused list */
2295
- xbar_chans = info->xbar_chans;
2296
- if (xbar_chans) {
2297
- for (i = 0; xbar_chans[i][1] != -1; i++) {
2298
- off = xbar_chans[i][1];
2299
- }
23002456 }
23012457
23022458 irq = platform_get_irq_byname(pdev, "edma3_ccint");
....@@ -2310,7 +2466,7 @@
23102466 ecc);
23112467 if (ret) {
23122468 dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret);
2313
- return ret;
2469
+ goto err_disable_pm;
23142470 }
23152471 ecc->ccint = irq;
23162472 }
....@@ -2326,7 +2482,7 @@
23262482 ecc);
23272483 if (ret) {
23282484 dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret);
2329
- return ret;
2485
+ goto err_disable_pm;
23302486 }
23312487 ecc->ccerrint = irq;
23322488 }
....@@ -2334,13 +2490,15 @@
23342490 ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY);
23352491 if (ecc->dummy_slot < 0) {
23362492 dev_err(dev, "Can't allocate PaRAM dummy slot\n");
2337
- return ecc->dummy_slot;
2493
+ ret = ecc->dummy_slot;
2494
+ goto err_disable_pm;
23382495 }
23392496
23402497 queue_priority_mapping = info->queue_priority_mapping;
23412498
23422499 if (!ecc->legacy_mode) {
23432500 int lowest_priority = 0;
2501
+ unsigned int array_max;
23442502 struct of_phandle_args tc_args;
23452503
23462504 ecc->tc_list = devm_kcalloc(dev, ecc->num_tc,
....@@ -2364,6 +2522,18 @@
23642522 info->default_queue = i;
23652523 }
23662524 }
2525
+
2526
+ /* See if we have optional dma-channel-mask array */
2527
+ array_max = DIV_ROUND_UP(ecc->num_channels, BITS_PER_TYPE(u32));
2528
+ ret = of_property_read_variable_u32_array(node,
2529
+ "dma-channel-mask",
2530
+ (u32 *)ecc->channels_mask,
2531
+ 1, array_max);
2532
+ if (ret > 0 && ret != array_max)
2533
+ dev_warn(dev, "dma-channel-mask is not complete.\n");
2534
+ else if (ret == -EOVERFLOW || ret == -ENODATA)
2535
+ dev_warn(dev,
2536
+ "dma-channel-mask is out of range or empty\n");
23672537 }
23682538
23692539 /* Event queue priority mapping */
....@@ -2371,17 +2541,20 @@
23712541 edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
23722542 queue_priority_mapping[i][1]);
23732543
2374
- for (i = 0; i < ecc->num_region; i++) {
2375
- edma_write_array2(ecc, EDMA_DRAE, i, 0, 0x0);
2376
- edma_write_array2(ecc, EDMA_DRAE, i, 1, 0x0);
2377
- edma_write_array(ecc, EDMA_QRAE, i, 0x0);
2378
- }
2544
+ edma_write_array2(ecc, EDMA_DRAE, 0, 0, 0x0);
2545
+ edma_write_array2(ecc, EDMA_DRAE, 0, 1, 0x0);
2546
+ edma_write_array(ecc, EDMA_QRAE, 0, 0x0);
2547
+
23792548 ecc->info = info;
23802549
23812550 /* Init the dma device and channels */
23822551 edma_dma_init(ecc, legacy_mode);
23832552
23842553 for (i = 0; i < ecc->num_channels; i++) {
2554
+ /* Do not touch reserved channels */
2555
+ if (!test_bit(i, ecc->channels_mask))
2556
+ continue;
2557
+
23852558 /* Assign all channels to the default queue */
23862559 edma_assign_channel_eventq(&ecc->slave_chans[i],
23872560 info->default_queue);
....@@ -2418,6 +2591,9 @@
24182591
24192592 err_reg1:
24202593 edma_free_slot(ecc, ecc->dummy_slot);
2594
+err_disable_pm:
2595
+ pm_runtime_put_sync(dev);
2596
+ pm_runtime_disable(dev);
24212597 return ret;
24222598 }
24232599
....@@ -2448,6 +2624,8 @@
24482624 if (ecc->dma_memcpy)
24492625 dma_async_device_unregister(ecc->dma_memcpy);
24502626 edma_free_slot(ecc, ecc->dummy_slot);
2627
+ pm_runtime_put_sync(dev);
2628
+ pm_runtime_disable(dev);
24512629
24522630 return 0;
24532631 }
....@@ -2487,8 +2665,9 @@
24872665 for (i = 0; i < ecc->num_channels; i++) {
24882666 if (echan[i].alloced) {
24892667 /* ensure access through shadow region 0 */
2490
- edma_or_array2(ecc, EDMA_DRAE, 0, i >> 5,
2491
- BIT(i & 0x1f));
2668
+ edma_or_array2(ecc, EDMA_DRAE, 0,
2669
+ EDMA_REG_ARRAY_INDEX(i),
2670
+ EDMA_CHANNEL_BIT(i));
24922671
24932672 edma_setup_interrupt(&echan[i], true);
24942673
....@@ -2529,7 +2708,7 @@
25292708 },
25302709 };
25312710
2532
-bool edma_filter_fn(struct dma_chan *chan, void *param)
2711
+static bool edma_filter_fn(struct dma_chan *chan, void *param)
25332712 {
25342713 bool match = false;
25352714
....@@ -2544,7 +2723,6 @@
25442723 }
25452724 return match;
25462725 }
2547
-EXPORT_SYMBOL(edma_filter_fn);
25482726
25492727 static int edma_init(void)
25502728 {