hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/dma/stm32-dma.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Driver for STM32 DMA controller
34 *
....@@ -6,8 +7,6 @@
67 * Copyright (C) M'boumba Cedric Madianga 2015
78 * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
89 * Pierre-Yves Mordret <pierre-yves.mordret@st.com>
9
- *
10
- * License terms: GNU General Public License (GPL), version 2
1110 */
1211
1312 #include <linux/clk.h>
....@@ -16,6 +15,7 @@
1615 #include <linux/dma-mapping.h>
1716 #include <linux/err.h>
1817 #include <linux/init.h>
18
+#include <linux/iopoll.h>
1919 #include <linux/jiffies.h>
2020 #include <linux/list.h>
2121 #include <linux/module.h>
....@@ -23,6 +23,7 @@
2323 #include <linux/of_device.h>
2424 #include <linux/of_dma.h>
2525 #include <linux/platform_device.h>
26
+#include <linux/pm_runtime.h>
2627 #include <linux/reset.h>
2728 #include <linux/sched.h>
2829 #include <linux/slab.h>
....@@ -116,6 +117,7 @@
116117 #define STM32_DMA_FIFO_THRESHOLD_HALFFULL 0x01
117118 #define STM32_DMA_FIFO_THRESHOLD_3QUARTERSFULL 0x02
118119 #define STM32_DMA_FIFO_THRESHOLD_FULL 0x03
120
+#define STM32_DMA_FIFO_THRESHOLD_NONE 0x04
119121
120122 #define STM32_DMA_MAX_DATA_ITEMS 0xffff
121123 /*
....@@ -135,6 +137,9 @@
135137 /* DMA Features */
136138 #define STM32_DMA_THRESHOLD_FTR_MASK GENMASK(1, 0)
137139 #define STM32_DMA_THRESHOLD_FTR_GET(n) ((n) & STM32_DMA_THRESHOLD_FTR_MASK)
140
+#define STM32_DMA_DIRECT_MODE_MASK BIT(2)
141
+#define STM32_DMA_DIRECT_MODE_GET(n) (((n) & STM32_DMA_DIRECT_MODE_MASK) \
142
+ >> 2)
138143
139144 enum stm32_dma_width {
140145 STM32_DMA_BYTE,
....@@ -207,7 +212,6 @@
207212 struct dma_device ddev;
208213 void __iomem *base;
209214 struct clk *clk;
210
- struct reset_control *rst;
211215 bool mem2mem;
212216 struct stm32_dma_chan chan[STM32_DMA_MAX_CHANNELS];
213217 };
....@@ -241,12 +245,6 @@
241245 static void stm32_dma_write(struct stm32_dma_device *dmadev, u32 reg, u32 val)
242246 {
243247 writel_relaxed(val, dmadev->base + reg);
244
-}
245
-
246
-static struct stm32_dma_desc *stm32_dma_alloc_desc(u32 num_sgs)
247
-{
248
- return kzalloc(sizeof(struct stm32_dma_desc) +
249
- sizeof(struct stm32_dma_sg_req) * num_sgs, GFP_NOWAIT);
250248 }
251249
252250 static int stm32_dma_get_width(struct stm32_dma_chan *chan,
....@@ -287,6 +285,9 @@
287285 {
288286 u32 remaining;
289287
288
+ if (threshold == STM32_DMA_FIFO_THRESHOLD_NONE)
289
+ return false;
290
+
290291 if (width != DMA_SLAVE_BUSWIDTH_UNDEFINED) {
291292 if (burst != 0) {
292293 /*
....@@ -308,6 +309,10 @@
308309
309310 static bool stm32_dma_is_burst_possible(u32 buf_len, u32 threshold)
310311 {
312
+ /* If FIFO direct mode, burst is not possible */
313
+ if (threshold == STM32_DMA_FIFO_THRESHOLD_NONE)
314
+ return false;
315
+
311316 /*
312317 * Buffer or period length has to be aligned on FIFO depth.
313318 * Otherwise bytes may be stuck within FIFO at buffer or period
....@@ -428,29 +433,19 @@
428433 static int stm32_dma_disable_chan(struct stm32_dma_chan *chan)
429434 {
430435 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
431
- unsigned long timeout = jiffies + msecs_to_jiffies(5000);
432
- u32 dma_scr, id;
436
+ u32 dma_scr, id, reg;
433437
434438 id = chan->id;
435
- dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
439
+ reg = STM32_DMA_SCR(id);
440
+ dma_scr = stm32_dma_read(dmadev, reg);
436441
437442 if (dma_scr & STM32_DMA_SCR_EN) {
438443 dma_scr &= ~STM32_DMA_SCR_EN;
439
- stm32_dma_write(dmadev, STM32_DMA_SCR(id), dma_scr);
444
+ stm32_dma_write(dmadev, reg, dma_scr);
440445
441
- do {
442
- dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
443
- dma_scr &= STM32_DMA_SCR_EN;
444
- if (!dma_scr)
445
- break;
446
-
447
- if (time_after_eq(jiffies, timeout)) {
448
- dev_err(chan2dev(chan), "%s: timeout!\n",
449
- __func__);
450
- return -EBUSY;
451
- }
452
- cond_resched();
453
- } while (1);
446
+ return readl_relaxed_poll_timeout_atomic(dmadev->base + reg,
447
+ dma_scr, !(dma_scr & STM32_DMA_SCR_EN),
448
+ 10, 1000000);
454449 }
455450
456451 return 0;
....@@ -565,6 +560,7 @@
565560 sg_req = &chan->desc->sg_req[chan->next_sg];
566561 reg = &sg_req->chan_reg;
567562
563
+ reg->dma_scr &= ~STM32_DMA_SCR_EN;
568564 stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
569565 stm32_dma_write(dmadev, STM32_DMA_SPAR(chan->id), reg->dma_spar);
570566 stm32_dma_write(dmadev, STM32_DMA_SM0AR(chan->id), reg->dma_sm0ar);
....@@ -644,12 +640,13 @@
644640 {
645641 struct stm32_dma_chan *chan = devid;
646642 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
647
- u32 status, scr;
643
+ u32 status, scr, sfcr;
648644
649645 spin_lock(&chan->vchan.lock);
650646
651647 status = stm32_dma_irq_status(chan);
652648 scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
649
+ sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
653650
654651 if (status & STM32_DMA_TCI) {
655652 stm32_dma_irq_clear(chan, STM32_DMA_TCI);
....@@ -664,10 +661,18 @@
664661 if (status & STM32_DMA_FEI) {
665662 stm32_dma_irq_clear(chan, STM32_DMA_FEI);
666663 status &= ~STM32_DMA_FEI;
667
- if (!(scr & STM32_DMA_SCR_EN))
668
- dev_err(chan2dev(chan), "FIFO Error\n");
669
- else
670
- dev_dbg(chan2dev(chan), "FIFO over/underrun\n");
664
+ if (sfcr & STM32_DMA_SFCR_FEIE) {
665
+ if (!(scr & STM32_DMA_SCR_EN))
666
+ dev_err(chan2dev(chan), "FIFO Error\n");
667
+ else
668
+ dev_dbg(chan2dev(chan), "FIFO over/underrun\n");
669
+ }
670
+ }
671
+ if (status & STM32_DMA_DMEI) {
672
+ stm32_dma_irq_clear(chan, STM32_DMA_DMEI);
673
+ status &= ~STM32_DMA_DMEI;
674
+ if (sfcr & STM32_DMA_SCR_DMEIE)
675
+ dev_dbg(chan2dev(chan), "Direct mode overrun\n");
671676 }
672677 if (status) {
673678 stm32_dma_irq_clear(chan, status);
....@@ -704,13 +709,13 @@
704709 int src_bus_width, dst_bus_width;
705710 int src_burst_size, dst_burst_size;
706711 u32 src_maxburst, dst_maxburst, src_best_burst, dst_best_burst;
707
- u32 dma_scr, threshold;
712
+ u32 dma_scr, fifoth;
708713
709714 src_addr_width = chan->dma_sconfig.src_addr_width;
710715 dst_addr_width = chan->dma_sconfig.dst_addr_width;
711716 src_maxburst = chan->dma_sconfig.src_maxburst;
712717 dst_maxburst = chan->dma_sconfig.dst_maxburst;
713
- threshold = chan->threshold;
718
+ fifoth = chan->threshold;
714719
715720 switch (direction) {
716721 case DMA_MEM_TO_DEV:
....@@ -722,7 +727,7 @@
722727 /* Set device burst size */
723728 dst_best_burst = stm32_dma_get_best_burst(buf_len,
724729 dst_maxburst,
725
- threshold,
730
+ fifoth,
726731 dst_addr_width);
727732
728733 dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst);
....@@ -730,7 +735,7 @@
730735 return dst_burst_size;
731736
732737 /* Set memory data size */
733
- src_addr_width = stm32_dma_get_max_width(buf_len, threshold);
738
+ src_addr_width = stm32_dma_get_max_width(buf_len, fifoth);
734739 chan->mem_width = src_addr_width;
735740 src_bus_width = stm32_dma_get_width(chan, src_addr_width);
736741 if (src_bus_width < 0)
....@@ -740,7 +745,7 @@
740745 src_maxburst = STM32_DMA_MAX_BURST;
741746 src_best_burst = stm32_dma_get_best_burst(buf_len,
742747 src_maxburst,
743
- threshold,
748
+ fifoth,
744749 src_addr_width);
745750 src_burst_size = stm32_dma_get_burst(chan, src_best_burst);
746751 if (src_burst_size < 0)
....@@ -754,7 +759,8 @@
754759
755760 /* Set FIFO threshold */
756761 chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK;
757
- chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(threshold);
762
+ if (fifoth != STM32_DMA_FIFO_THRESHOLD_NONE)
763
+ chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(fifoth);
758764
759765 /* Set peripheral address */
760766 chan->chan_reg.dma_spar = chan->dma_sconfig.dst_addr;
....@@ -770,7 +776,7 @@
770776 /* Set device burst size */
771777 src_best_burst = stm32_dma_get_best_burst(buf_len,
772778 src_maxburst,
773
- threshold,
779
+ fifoth,
774780 src_addr_width);
775781 chan->mem_burst = src_best_burst;
776782 src_burst_size = stm32_dma_get_burst(chan, src_best_burst);
....@@ -778,7 +784,7 @@
778784 return src_burst_size;
779785
780786 /* Set memory data size */
781
- dst_addr_width = stm32_dma_get_max_width(buf_len, threshold);
787
+ dst_addr_width = stm32_dma_get_max_width(buf_len, fifoth);
782788 chan->mem_width = dst_addr_width;
783789 dst_bus_width = stm32_dma_get_width(chan, dst_addr_width);
784790 if (dst_bus_width < 0)
....@@ -788,7 +794,7 @@
788794 dst_maxburst = STM32_DMA_MAX_BURST;
789795 dst_best_burst = stm32_dma_get_best_burst(buf_len,
790796 dst_maxburst,
791
- threshold,
797
+ fifoth,
792798 dst_addr_width);
793799 chan->mem_burst = dst_best_burst;
794800 dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst);
....@@ -803,7 +809,8 @@
803809
804810 /* Set FIFO threshold */
805811 chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK;
806
- chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(threshold);
812
+ if (fifoth != STM32_DMA_FIFO_THRESHOLD_NONE)
813
+ chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(fifoth);
807814
808815 /* Set peripheral address */
809816 chan->chan_reg.dma_spar = chan->dma_sconfig.src_addr;
....@@ -853,7 +860,7 @@
853860 return NULL;
854861 }
855862
856
- desc = stm32_dma_alloc_desc(sg_len);
863
+ desc = kzalloc(struct_size(desc, sg_req, sg_len), GFP_NOWAIT);
857864 if (!desc)
858865 return NULL;
859866
....@@ -954,7 +961,7 @@
954961
955962 num_periods = buf_len / period_len;
956963
957
- desc = stm32_dma_alloc_desc(num_periods);
964
+ desc = kzalloc(struct_size(desc, sg_req, num_periods), GFP_NOWAIT);
958965 if (!desc)
959966 return NULL;
960967
....@@ -989,7 +996,7 @@
989996 int i;
990997
991998 num_sgs = DIV_ROUND_UP(len, STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
992
- desc = stm32_dma_alloc_desc(num_sgs);
999
+ desc = kzalloc(struct_size(desc, sg_req, num_sgs), GFP_NOWAIT);
9931000 if (!desc)
9941001 return NULL;
9951002
....@@ -1041,33 +1048,97 @@
10411048 return ndtr << width;
10421049 }
10431050
1051
+/**
1052
+ * stm32_dma_is_current_sg - check that expected sg_req is currently transferred
1053
+ * @chan: dma channel
1054
+ *
1055
+ * This function called when IRQ are disable, checks that the hardware has not
1056
+ * switched on the next transfer in double buffer mode. The test is done by
1057
+ * comparing the next_sg memory address with the hardware related register
1058
+ * (based on CT bit value).
1059
+ *
1060
+ * Returns true if expected current transfer is still running or double
1061
+ * buffer mode is not activated.
1062
+ */
1063
+static bool stm32_dma_is_current_sg(struct stm32_dma_chan *chan)
1064
+{
1065
+ struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
1066
+ struct stm32_dma_sg_req *sg_req;
1067
+ u32 dma_scr, dma_smar, id;
1068
+
1069
+ id = chan->id;
1070
+ dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
1071
+
1072
+ if (!(dma_scr & STM32_DMA_SCR_DBM))
1073
+ return true;
1074
+
1075
+ sg_req = &chan->desc->sg_req[chan->next_sg];
1076
+
1077
+ if (dma_scr & STM32_DMA_SCR_CT) {
1078
+ dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(id));
1079
+ return (dma_smar == sg_req->chan_reg.dma_sm0ar);
1080
+ }
1081
+
1082
+ dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(id));
1083
+
1084
+ return (dma_smar == sg_req->chan_reg.dma_sm1ar);
1085
+}
1086
+
10441087 static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
10451088 struct stm32_dma_desc *desc,
10461089 u32 next_sg)
10471090 {
10481091 u32 modulo, burst_size;
1049
- u32 residue = 0;
1092
+ u32 residue;
1093
+ u32 n_sg = next_sg;
1094
+ struct stm32_dma_sg_req *sg_req = &chan->desc->sg_req[chan->next_sg];
10501095 int i;
10511096
10521097 /*
1053
- * In cyclic mode, for the last period, residue = remaining bytes from
1054
- * NDTR
1098
+ * Calculate the residue means compute the descriptors
1099
+ * information:
1100
+ * - the sg_req currently transferred
1101
+ * - the Hardware remaining position in this sg (NDTR bits field).
1102
+ *
1103
+ * A race condition may occur if DMA is running in cyclic or double
1104
+ * buffer mode, since the DMA register are automatically reloaded at end
1105
+ * of period transfer. The hardware may have switched to the next
1106
+ * transfer (CT bit updated) just before the position (SxNDTR reg) is
1107
+ * read.
1108
+ * In this case the SxNDTR reg could (or not) correspond to the new
1109
+ * transfer position, and not the expected one.
1110
+ * The strategy implemented in the stm32 driver is to:
1111
+ * - read the SxNDTR register
1112
+ * - crosscheck that hardware is still in current transfer.
1113
+ * In case of switch, we can assume that the DMA is at the beginning of
1114
+ * the next transfer. So we approximate the residue in consequence, by
1115
+ * pointing on the beginning of next transfer.
1116
+ *
1117
+ * This race condition doesn't apply for none cyclic mode, as double
1118
+ * buffer is not used. In such situation registers are updated by the
1119
+ * software.
10551120 */
1056
- if (chan->desc->cyclic && next_sg == 0) {
1057
- residue = stm32_dma_get_remaining_bytes(chan);
1058
- goto end;
1121
+
1122
+ residue = stm32_dma_get_remaining_bytes(chan);
1123
+
1124
+ if (!stm32_dma_is_current_sg(chan)) {
1125
+ n_sg++;
1126
+ if (n_sg == chan->desc->num_sgs)
1127
+ n_sg = 0;
1128
+ residue = sg_req->len;
10591129 }
10601130
10611131 /*
1062
- * For all other periods in cyclic mode, and in sg mode,
1063
- * residue = remaining bytes from NDTR + remaining periods/sg to be
1064
- * transferred
1132
+ * In cyclic mode, for the last period, residue = remaining bytes
1133
+ * from NDTR,
1134
+ * else for all other periods in cyclic mode, and in sg mode,
1135
+ * residue = remaining bytes from NDTR + remaining
1136
+ * periods/sg to be transferred
10651137 */
1066
- for (i = next_sg; i < desc->num_sgs; i++)
1067
- residue += desc->sg_req[i].len;
1068
- residue += stm32_dma_get_remaining_bytes(chan);
1138
+ if (!chan->desc->cyclic || n_sg != 0)
1139
+ for (i = n_sg; i < desc->num_sgs; i++)
1140
+ residue += desc->sg_req[i].len;
10691141
1070
-end:
10711142 if (!chan->mem_burst)
10721143 return residue;
10731144
....@@ -1115,15 +1186,14 @@
11151186 int ret;
11161187
11171188 chan->config_init = false;
1118
- ret = clk_prepare_enable(dmadev->clk);
1119
- if (ret < 0) {
1120
- dev_err(chan2dev(chan), "clk_prepare_enable failed: %d\n", ret);
1189
+
1190
+ ret = pm_runtime_resume_and_get(dmadev->ddev.dev);
1191
+ if (ret < 0)
11211192 return ret;
1122
- }
11231193
11241194 ret = stm32_dma_disable_chan(chan);
11251195 if (ret < 0)
1126
- clk_disable_unprepare(dmadev->clk);
1196
+ pm_runtime_put(dmadev->ddev.dev);
11271197
11281198 return ret;
11291199 }
....@@ -1143,7 +1213,7 @@
11431213 spin_unlock_irqrestore(&chan->vchan.lock, flags);
11441214 }
11451215
1146
- clk_disable_unprepare(dmadev->clk);
1216
+ pm_runtime_put(dmadev->ddev.dev);
11471217
11481218 vchan_free_chan_resources(to_virt_chan(c));
11491219 }
....@@ -1165,6 +1235,8 @@
11651235 chan->chan_reg.dma_scr |= STM32_DMA_SCR_TEIE | STM32_DMA_SCR_TCIE;
11661236
11671237 chan->threshold = STM32_DMA_THRESHOLD_FTR_GET(cfg->features);
1238
+ if (STM32_DMA_DIRECT_MODE_GET(cfg->features))
1239
+ chan->threshold = STM32_DMA_FIFO_THRESHOLD_NONE;
11681240 }
11691241
11701242 static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
....@@ -1218,6 +1290,7 @@
12181290 struct dma_device *dd;
12191291 const struct of_device_id *match;
12201292 struct resource *res;
1293
+ struct reset_control *rst;
12211294 int i, ret;
12221295
12231296 match = of_match_device(stm32_dma_of_match, &pdev->dev);
....@@ -1238,20 +1311,30 @@
12381311 return PTR_ERR(dmadev->base);
12391312
12401313 dmadev->clk = devm_clk_get(&pdev->dev, NULL);
1241
- if (IS_ERR(dmadev->clk)) {
1242
- dev_err(&pdev->dev, "Error: Missing controller clock\n");
1243
- return PTR_ERR(dmadev->clk);
1314
+ if (IS_ERR(dmadev->clk))
1315
+ return dev_err_probe(&pdev->dev, PTR_ERR(dmadev->clk), "Can't get clock\n");
1316
+
1317
+ ret = clk_prepare_enable(dmadev->clk);
1318
+ if (ret < 0) {
1319
+ dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
1320
+ return ret;
12441321 }
12451322
12461323 dmadev->mem2mem = of_property_read_bool(pdev->dev.of_node,
12471324 "st,mem2mem");
12481325
1249
- dmadev->rst = devm_reset_control_get(&pdev->dev, NULL);
1250
- if (!IS_ERR(dmadev->rst)) {
1251
- reset_control_assert(dmadev->rst);
1326
+ rst = devm_reset_control_get(&pdev->dev, NULL);
1327
+ if (IS_ERR(rst)) {
1328
+ ret = PTR_ERR(rst);
1329
+ if (ret == -EPROBE_DEFER)
1330
+ goto clk_free;
1331
+ } else {
1332
+ reset_control_assert(rst);
12521333 udelay(2);
1253
- reset_control_deassert(dmadev->rst);
1334
+ reset_control_deassert(rst);
12541335 }
1336
+
1337
+ dma_set_max_seg_size(&pdev->dev, STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
12551338
12561339 dma_cap_set(DMA_SLAVE, dd->cap_mask);
12571340 dma_cap_set(DMA_PRIVATE, dd->cap_mask);
....@@ -1273,7 +1356,9 @@
12731356 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
12741357 dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
12751358 dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1359
+ dd->copy_align = DMAENGINE_ALIGN_32_BYTES;
12761360 dd->max_burst = STM32_DMA_MAX_BURST;
1361
+ dd->descriptor_reuse = true;
12771362 dd->dev = &pdev->dev;
12781363 INIT_LIST_HEAD(&dd->channels);
12791364
....@@ -1292,17 +1377,15 @@
12921377
12931378 ret = dma_async_device_register(dd);
12941379 if (ret)
1295
- return ret;
1380
+ goto clk_free;
12961381
12971382 for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
12981383 chan = &dmadev->chan[i];
1299
- res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
1300
- if (!res) {
1301
- ret = -EINVAL;
1302
- dev_err(&pdev->dev, "No irq resource for chan %d\n", i);
1384
+ ret = platform_get_irq(pdev, i);
1385
+ if (ret < 0)
13031386 goto err_unregister;
1304
- }
1305
- chan->irq = res->start;
1387
+ chan->irq = ret;
1388
+
13061389 ret = devm_request_irq(&pdev->dev, chan->irq,
13071390 stm32_dma_chan_irq, 0,
13081391 dev_name(chan2dev(chan)), chan);
....@@ -1324,25 +1407,96 @@
13241407
13251408 platform_set_drvdata(pdev, dmadev);
13261409
1410
+ pm_runtime_set_active(&pdev->dev);
1411
+ pm_runtime_enable(&pdev->dev);
1412
+ pm_runtime_get_noresume(&pdev->dev);
1413
+ pm_runtime_put(&pdev->dev);
1414
+
13271415 dev_info(&pdev->dev, "STM32 DMA driver registered\n");
13281416
13291417 return 0;
13301418
13311419 err_unregister:
13321420 dma_async_device_unregister(dd);
1421
+clk_free:
1422
+ clk_disable_unprepare(dmadev->clk);
13331423
13341424 return ret;
13351425 }
1426
+
1427
+#ifdef CONFIG_PM
1428
+static int stm32_dma_runtime_suspend(struct device *dev)
1429
+{
1430
+ struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
1431
+
1432
+ clk_disable_unprepare(dmadev->clk);
1433
+
1434
+ return 0;
1435
+}
1436
+
1437
+static int stm32_dma_runtime_resume(struct device *dev)
1438
+{
1439
+ struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
1440
+ int ret;
1441
+
1442
+ ret = clk_prepare_enable(dmadev->clk);
1443
+ if (ret) {
1444
+ dev_err(dev, "failed to prepare_enable clock\n");
1445
+ return ret;
1446
+ }
1447
+
1448
+ return 0;
1449
+}
1450
+#endif
1451
+
1452
+#ifdef CONFIG_PM_SLEEP
1453
+static int stm32_dma_suspend(struct device *dev)
1454
+{
1455
+ struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
1456
+ int id, ret, scr;
1457
+
1458
+ ret = pm_runtime_resume_and_get(dev);
1459
+ if (ret < 0)
1460
+ return ret;
1461
+
1462
+ for (id = 0; id < STM32_DMA_MAX_CHANNELS; id++) {
1463
+ scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
1464
+ if (scr & STM32_DMA_SCR_EN) {
1465
+ dev_warn(dev, "Suspend is prevented by Chan %i\n", id);
1466
+ return -EBUSY;
1467
+ }
1468
+ }
1469
+
1470
+ pm_runtime_put_sync(dev);
1471
+
1472
+ pm_runtime_force_suspend(dev);
1473
+
1474
+ return 0;
1475
+}
1476
+
1477
+static int stm32_dma_resume(struct device *dev)
1478
+{
1479
+ return pm_runtime_force_resume(dev);
1480
+}
1481
+#endif
1482
+
1483
+static const struct dev_pm_ops stm32_dma_pm_ops = {
1484
+ SET_SYSTEM_SLEEP_PM_OPS(stm32_dma_suspend, stm32_dma_resume)
1485
+ SET_RUNTIME_PM_OPS(stm32_dma_runtime_suspend,
1486
+ stm32_dma_runtime_resume, NULL)
1487
+};
13361488
13371489 static struct platform_driver stm32_dma_driver = {
13381490 .driver = {
13391491 .name = "stm32-dma",
13401492 .of_match_table = stm32_dma_of_match,
1493
+ .pm = &stm32_dma_pm_ops,
13411494 },
1495
+ .probe = stm32_dma_probe,
13421496 };
13431497
13441498 static int __init stm32_dma_init(void)
13451499 {
1346
- return platform_driver_probe(&stm32_dma_driver, stm32_dma_probe);
1500
+ return platform_driver_register(&stm32_dma_driver);
13471501 }
13481502 subsys_initcall(stm32_dma_init);