hc
2024-11-01 2f529f9b558ca1c1bd74be7437a84e4711743404
kernel/drivers/dma/imx-sdma.c
....@@ -444,6 +444,10 @@
444444 struct sdma_buffer_descriptor *bd0;
445445 /* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/
446446 bool clk_ratio;
447
+#ifdef CONFIG_IMX_SDMA_OOB
448
+ hard_spinlock_t oob_lock;
449
+ u32 pending_stat;
450
+#endif
447451 };
448452
449453 static int sdma_config_write(struct dma_chan *chan,
....@@ -748,6 +752,11 @@
748752 return container_of(t, struct sdma_desc, vd.tx);
749753 }
750754
755
+static inline bool sdma_oob_capable(void)
756
+{
757
+ return IS_ENABLED(CONFIG_IMX_SDMA_OOB);
758
+}
759
+
751760 static void sdma_start_desc(struct sdma_channel *sdmac)
752761 {
753762 struct virt_dma_desc *vd = vchan_next_desc(&sdmac->vc);
....@@ -765,7 +774,8 @@
765774
766775 sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
767776 sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
768
- sdma_enable_channel(sdma, sdmac->channel);
777
+ if (!sdma_oob_capable() || !vchan_oob_pulsed(vd))
778
+ sdma_enable_channel(sdma, sdmac->channel);
769779 }
770780
771781 static void sdma_update_channel_loop(struct sdma_channel *sdmac)
....@@ -809,9 +819,9 @@
809819 * SDMA transaction status by the time the client tasklet is
810820 * executed.
811821 */
812
- spin_unlock(&sdmac->vc.lock);
822
+ vchan_unlock(&sdmac->vc);
813823 dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL);
814
- spin_lock(&sdmac->vc.lock);
824
+ vchan_lock(&sdmac->vc);
815825
816826 if (error)
817827 sdmac->status = old_status;
....@@ -821,20 +831,21 @@
821831 static void mxc_sdma_handle_channel_normal(struct sdma_channel *data)
822832 {
823833 struct sdma_channel *sdmac = (struct sdma_channel *) data;
834
+ struct sdma_desc *desc = sdmac->desc;
824835 struct sdma_buffer_descriptor *bd;
825836 int i, error = 0;
826837
827
- sdmac->desc->chn_real_count = 0;
838
+ desc->chn_real_count = 0;
828839 /*
829840 * non loop mode. Iterate over all descriptors, collect
830841 * errors and call callback function
831842 */
832
- for (i = 0; i < sdmac->desc->num_bd; i++) {
833
- bd = &sdmac->desc->bd[i];
843
+ for (i = 0; i < desc->num_bd; i++) {
844
+ bd = &desc->bd[i];
834845
835846 if (bd->mode.status & (BD_DONE | BD_RROR))
836847 error = -EIO;
837
- sdmac->desc->chn_real_count += bd->mode.count;
848
+ desc->chn_real_count += bd->mode.count;
838849 }
839850
840851 if (error)
....@@ -843,36 +854,83 @@
843854 sdmac->status = DMA_COMPLETE;
844855 }
845856
846
-static irqreturn_t sdma_int_handler(int irq, void *dev_id)
857
+static unsigned long sdma_do_channels(struct sdma_engine *sdma,
858
+ unsigned long stat)
847859 {
848
- struct sdma_engine *sdma = dev_id;
849
- unsigned long stat;
860
+ unsigned long mask = stat;
850861
851
- stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
852
- writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
853
- /* channel 0 is special and not handled here, see run_channel0() */
854
- stat &= ~1;
855
-
856
- while (stat) {
857
- int channel = fls(stat) - 1;
862
+ while (mask) {
863
+ int channel = fls(mask) - 1;
858864 struct sdma_channel *sdmac = &sdma->channel[channel];
859865 struct sdma_desc *desc;
860866
861
- spin_lock(&sdmac->vc.lock);
867
+ vchan_lock(&sdmac->vc);
862868 desc = sdmac->desc;
863869 if (desc) {
870
+ if (running_oob() && !vchan_oob_handled(&desc->vd))
871
+ goto next;
864872 if (sdmac->flags & IMX_DMA_SG_LOOP) {
865873 sdma_update_channel_loop(sdmac);
866874 } else {
867875 mxc_sdma_handle_channel_normal(sdmac);
876
+ if (running_oob()) {
877
+ vchan_unlock(&sdmac->vc);
878
+ dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL);
879
+ __clear_bit(channel, &stat);
880
+ goto next_unlocked;
881
+ }
868882 vchan_cookie_complete(&desc->vd);
869883 sdma_start_desc(sdmac);
870884 }
871885 }
872
-
873
- spin_unlock(&sdmac->vc.lock);
874886 __clear_bit(channel, &stat);
887
+ next:
888
+ vchan_unlock(&sdmac->vc);
889
+ next_unlocked:
890
+ __clear_bit(channel, &mask);
875891 }
892
+
893
+ return stat;
894
+}
895
+
896
+static irqreturn_t sdma_int_handler(int irq, void *dev_id)
897
+{
898
+ struct sdma_engine *sdma = dev_id;
899
+ unsigned long stat, flags __maybe_unused;
900
+
901
+#ifdef CONFIG_IMX_SDMA_OOB
902
+ if (running_oob()) {
903
+ stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
904
+ writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
905
+ /*
906
+ * Locking is only to guard against IRQ migration with
907
+ * a delayed in-band event running from a remote CPU
908
+ * after some IRQ routing changed the affinity of the
909
+ * out-of-band handler in the meantime.
910
+ */
911
+ stat = sdma_do_channels(sdma, stat & ~1);
912
+ if (stat) {
913
+ raw_spin_lock(&sdma->oob_lock);
914
+ sdma->pending_stat |= stat;
915
+ raw_spin_unlock(&sdma->oob_lock);
916
+ /* Call us back from in-band context. */
917
+ irq_post_inband(irq);
918
+ }
919
+ return IRQ_HANDLED;
920
+ }
921
+
922
+ /* In-band IRQ context: stalled, but hard irqs are on. */
923
+ raw_spin_lock_irqsave(&sdma->oob_lock, flags);
924
+ stat = sdma->pending_stat;
925
+ sdma->pending_stat = 0;
926
+ raw_spin_unlock_irqrestore(&sdma->oob_lock, flags);
927
+ sdma_do_channels(sdma, stat);
928
+#else
929
+ stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
930
+ writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
931
+ /* channel 0 is special and not handled here, see run_channel0() */
932
+ sdma_do_channels(sdma, stat & ~1);
933
+#endif
876934
877935 return IRQ_HANDLED;
878936 }
....@@ -1060,9 +1118,9 @@
10601118 */
10611119 usleep_range(1000, 2000);
10621120
1063
- spin_lock_irqsave(&sdmac->vc.lock, flags);
1121
+ vchan_lock_irqsave(&sdmac->vc, flags);
10641122 vchan_get_all_descriptors(&sdmac->vc, &head);
1065
- spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1123
+ vchan_unlock_irqrestore(&sdmac->vc, flags);
10661124 vchan_dma_desc_free_list(&sdmac->vc, &head);
10671125 }
10681126
....@@ -1071,17 +1129,18 @@
10711129 struct sdma_channel *sdmac = to_sdma_chan(chan);
10721130 unsigned long flags;
10731131
1074
- spin_lock_irqsave(&sdmac->vc.lock, flags);
1132
+ vchan_lock_irqsave(&sdmac->vc, flags);
10751133
10761134 sdma_disable_channel(chan);
10771135
10781136 if (sdmac->desc) {
10791137 vchan_terminate_vdesc(&sdmac->desc->vd);
10801138 sdmac->desc = NULL;
1139
+ vchan_unlock_irqrestore(&sdmac->vc, flags);
10811140 schedule_work(&sdmac->terminate_worker);
1141
+ } else {
1142
+ vchan_unlock_irqrestore(&sdmac->vc, flags);
10821143 }
1083
-
1084
- spin_unlock_irqrestore(&sdmac->vc.lock, flags);
10851144
10861145 return 0;
10871146 }
....@@ -1441,6 +1500,15 @@
14411500 struct scatterlist *sg;
14421501 struct sdma_desc *desc;
14431502
1503
+ if (!sdma_oob_capable()) {
1504
+ if (flags & (DMA_OOB_INTERRUPT|DMA_OOB_PULSE)) {
1505
+ dev_err(sdma->dev,
1506
+ "%s: out-of-band slave transfers disabled\n",
1507
+ __func__);
1508
+ return NULL;
1509
+ }
1510
+ }
1511
+
14441512 sdma_config_write(chan, &sdmac->slave_config, direction);
14451513
14461514 desc = sdma_transfer_init(sdmac, direction, sg_len);
....@@ -1492,7 +1560,8 @@
14921560
14931561 if (i + 1 == sg_len) {
14941562 param |= BD_INTR;
1495
- param |= BD_LAST;
1563
+ if (!sdma_oob_capable() || !(flags & DMA_OOB_PULSE))
1564
+ param |= BD_LAST;
14961565 param &= ~BD_CONT;
14971566 }
14981567
....@@ -1526,6 +1595,20 @@
15261595 struct sdma_desc *desc;
15271596
15281597 dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
1598
+
1599
+ if (!sdma_oob_capable()) {
1600
+ if (flags & (DMA_OOB_INTERRUPT|DMA_OOB_PULSE)) {
1601
+ dev_err(sdma->dev,
1602
+ "%s: out-of-band cyclic transfers disabled\n",
1603
+ __func__);
1604
+ return NULL;
1605
+ }
1606
+ } else if (flags & DMA_OOB_PULSE) {
1607
+ dev_err(chan->device->dev,
1608
+ "%s: no pulse mode with out-of-band cyclic transfers\n",
1609
+ __func__);
1610
+ return NULL;
1611
+ }
15291612
15301613 sdma_config_write(chan, &sdmac->slave_config, direction);
15311614
....@@ -1649,7 +1732,7 @@
16491732 if (ret == DMA_COMPLETE || !txstate)
16501733 return ret;
16511734
1652
- spin_lock_irqsave(&sdmac->vc.lock, flags);
1735
+ vchan_lock_irqsave(&sdmac->vc, flags);
16531736
16541737 vd = vchan_find_desc(&sdmac->vc, cookie);
16551738 if (vd)
....@@ -1667,7 +1750,7 @@
16671750 residue = 0;
16681751 }
16691752
1670
- spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1753
+ vchan_unlock_irqrestore(&sdmac->vc, flags);
16711754
16721755 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
16731756 residue);
....@@ -1680,11 +1763,38 @@
16801763 struct sdma_channel *sdmac = to_sdma_chan(chan);
16811764 unsigned long flags;
16821765
1683
- spin_lock_irqsave(&sdmac->vc.lock, flags);
1766
+ vchan_lock_irqsave(&sdmac->vc, flags);
16841767 if (vchan_issue_pending(&sdmac->vc) && !sdmac->desc)
16851768 sdma_start_desc(sdmac);
1686
- spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1769
+ vchan_unlock_irqrestore(&sdmac->vc, flags);
16871770 }
1771
+
1772
+#ifdef CONFIG_IMX_SDMA_OOB
1773
+static int sdma_pulse_oob(struct dma_chan *chan)
1774
+{
1775
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
1776
+ struct sdma_desc *desc = sdmac->desc;
1777
+ unsigned long flags;
1778
+ int n, ret = -EIO;
1779
+
1780
+ vchan_lock_irqsave(&sdmac->vc, flags);
1781
+ if (desc && vchan_oob_pulsed(&desc->vd)) {
1782
+ for (n = 0; n < desc->num_bd - 1; n++)
1783
+ desc->bd[n].mode.status |= BD_DONE;
1784
+ desc->bd[n].mode.status |= BD_DONE|BD_WRAP;
1785
+ sdma_enable_channel(sdmac->sdma, sdmac->channel);
1786
+ ret = 0;
1787
+ }
1788
+ vchan_unlock_irqrestore(&sdmac->vc, flags);
1789
+
1790
+ return ret;
1791
+}
1792
+#else
1793
+static int sdma_pulse_oob(struct dma_chan *chan)
1794
+{
1795
+ return -ENOTSUPP;
1796
+}
1797
+#endif
16881798
16891799 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
16901800 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38
....@@ -1920,6 +2030,9 @@
19202030 clk_disable(sdma->clk_ipg);
19212031 clk_disable(sdma->clk_ahb);
19222032
2033
+#ifdef CONFIG_IMX_SDMA_OOB
2034
+ raw_spin_lock_init(&sdma->oob_lock);
2035
+#endif
19232036 return 0;
19242037
19252038 err_dma_alloc:
....@@ -2035,8 +2148,9 @@
20352148 if (ret)
20362149 goto err_clk;
20372150
2038
- ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
2039
- sdma);
2151
+ ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler,
2152
+ IS_ENABLED(CONFIG_IMX_SDMA_OOB) ? IRQF_OOB : 0,
2153
+ "sdma", sdma);
20402154 if (ret)
20412155 goto err_irq;
20422156
....@@ -2055,6 +2169,7 @@
20552169
20562170 dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
20572171 dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
2172
+ dma_cap_set(DMA_OOB, sdma->dma_device.cap_mask);
20582173 dma_cap_set(DMA_MEMCPY, sdma->dma_device.cap_mask);
20592174
20602175 INIT_LIST_HEAD(&sdma->dma_device.channels);
....@@ -2106,6 +2221,7 @@
21062221 sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
21072222 sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy;
21082223 sdma->dma_device.device_issue_pending = sdma_issue_pending;
2224
+ sdma->dma_device.device_pulse_oob = sdma_pulse_oob;
21092225 sdma->dma_device.copy_align = 2;
21102226 dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT);
21112227
....@@ -2160,6 +2276,16 @@
21602276 }
21612277 }
21622278
2279
+ /*
2280
+ * Keep the clocks enabled at any time if we plan to use the
2281
+ * DMA from out-of-band context, bumping their refcount to
2282
+ * keep them on until sdma_remove() is called eventually.
2283
+ */
2284
+ if (IS_ENABLED(CONFIG_IMX_SDMA_OOB)) {
2285
+ clk_enable(sdma->clk_ipg);
2286
+ clk_enable(sdma->clk_ahb);
2287
+ }
2288
+
21632289 return 0;
21642290
21652291 err_register:
....@@ -2178,6 +2304,11 @@
21782304 struct sdma_engine *sdma = platform_get_drvdata(pdev);
21792305 int i;
21802306
2307
+ if (IS_ENABLED(CONFIG_IMX_SDMA_OOB)) {
2308
+ clk_disable(sdma->clk_ahb);
2309
+ clk_disable(sdma->clk_ipg);
2310
+ }
2311
+
21812312 devm_free_irq(&pdev->dev, sdma->irq, sdma);
21822313 dma_async_device_unregister(&sdma->dma_device);
21832314 kfree(sdma->script_addrs);