.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Driver for STM32 DMA controller |
---|
3 | 4 | * |
---|
.. | .. |
---|
6 | 7 | * Copyright (C) M'boumba Cedric Madianga 2015 |
---|
7 | 8 | * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com> |
---|
8 | 9 | * Pierre-Yves Mordret <pierre-yves.mordret@st.com> |
---|
9 | | - * |
---|
10 | | - * License terms: GNU General Public License (GPL), version 2 |
---|
11 | 10 | */ |
---|
12 | 11 | |
---|
13 | 12 | #include <linux/clk.h> |
---|
.. | .. |
---|
16 | 15 | #include <linux/dma-mapping.h> |
---|
17 | 16 | #include <linux/err.h> |
---|
18 | 17 | #include <linux/init.h> |
---|
| 18 | +#include <linux/iopoll.h> |
---|
19 | 19 | #include <linux/jiffies.h> |
---|
20 | 20 | #include <linux/list.h> |
---|
21 | 21 | #include <linux/module.h> |
---|
.. | .. |
---|
23 | 23 | #include <linux/of_device.h> |
---|
24 | 24 | #include <linux/of_dma.h> |
---|
25 | 25 | #include <linux/platform_device.h> |
---|
| 26 | +#include <linux/pm_runtime.h> |
---|
26 | 27 | #include <linux/reset.h> |
---|
27 | 28 | #include <linux/sched.h> |
---|
28 | 29 | #include <linux/slab.h> |
---|
.. | .. |
---|
116 | 117 | #define STM32_DMA_FIFO_THRESHOLD_HALFFULL 0x01 |
---|
117 | 118 | #define STM32_DMA_FIFO_THRESHOLD_3QUARTERSFULL 0x02 |
---|
118 | 119 | #define STM32_DMA_FIFO_THRESHOLD_FULL 0x03 |
---|
| 120 | +#define STM32_DMA_FIFO_THRESHOLD_NONE 0x04 |
---|
119 | 121 | |
---|
120 | 122 | #define STM32_DMA_MAX_DATA_ITEMS 0xffff |
---|
121 | 123 | /* |
---|
.. | .. |
---|
135 | 137 | /* DMA Features */ |
---|
136 | 138 | #define STM32_DMA_THRESHOLD_FTR_MASK GENMASK(1, 0) |
---|
137 | 139 | #define STM32_DMA_THRESHOLD_FTR_GET(n) ((n) & STM32_DMA_THRESHOLD_FTR_MASK) |
---|
| 140 | +#define STM32_DMA_DIRECT_MODE_MASK BIT(2) |
---|
| 141 | +#define STM32_DMA_DIRECT_MODE_GET(n) (((n) & STM32_DMA_DIRECT_MODE_MASK) \ |
---|
| 142 | + >> 2) |
---|
138 | 143 | |
---|
139 | 144 | enum stm32_dma_width { |
---|
140 | 145 | STM32_DMA_BYTE, |
---|
.. | .. |
---|
207 | 212 | struct dma_device ddev; |
---|
208 | 213 | void __iomem *base; |
---|
209 | 214 | struct clk *clk; |
---|
210 | | - struct reset_control *rst; |
---|
211 | 215 | bool mem2mem; |
---|
212 | 216 | struct stm32_dma_chan chan[STM32_DMA_MAX_CHANNELS]; |
---|
213 | 217 | }; |
---|
.. | .. |
---|
241 | 245 | static void stm32_dma_write(struct stm32_dma_device *dmadev, u32 reg, u32 val) |
---|
242 | 246 | { |
---|
243 | 247 | writel_relaxed(val, dmadev->base + reg); |
---|
244 | | -} |
---|
245 | | - |
---|
246 | | -static struct stm32_dma_desc *stm32_dma_alloc_desc(u32 num_sgs) |
---|
247 | | -{ |
---|
248 | | - return kzalloc(sizeof(struct stm32_dma_desc) + |
---|
249 | | - sizeof(struct stm32_dma_sg_req) * num_sgs, GFP_NOWAIT); |
---|
250 | 248 | } |
---|
251 | 249 | |
---|
252 | 250 | static int stm32_dma_get_width(struct stm32_dma_chan *chan, |
---|
.. | .. |
---|
287 | 285 | { |
---|
288 | 286 | u32 remaining; |
---|
289 | 287 | |
---|
| 288 | + if (threshold == STM32_DMA_FIFO_THRESHOLD_NONE) |
---|
| 289 | + return false; |
---|
| 290 | + |
---|
290 | 291 | if (width != DMA_SLAVE_BUSWIDTH_UNDEFINED) { |
---|
291 | 292 | if (burst != 0) { |
---|
292 | 293 | /* |
---|
.. | .. |
---|
308 | 309 | |
---|
309 | 310 | static bool stm32_dma_is_burst_possible(u32 buf_len, u32 threshold) |
---|
310 | 311 | { |
---|
| 312 | + /* If FIFO direct mode, burst is not possible */ |
---|
| 313 | + if (threshold == STM32_DMA_FIFO_THRESHOLD_NONE) |
---|
| 314 | + return false; |
---|
| 315 | + |
---|
311 | 316 | /* |
---|
312 | 317 | * Buffer or period length has to be aligned on FIFO depth. |
---|
313 | 318 | * Otherwise bytes may be stuck within FIFO at buffer or period |
---|
.. | .. |
---|
428 | 433 | static int stm32_dma_disable_chan(struct stm32_dma_chan *chan) |
---|
429 | 434 | { |
---|
430 | 435 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); |
---|
431 | | - unsigned long timeout = jiffies + msecs_to_jiffies(5000); |
---|
432 | | - u32 dma_scr, id; |
---|
| 436 | + u32 dma_scr, id, reg; |
---|
433 | 437 | |
---|
434 | 438 | id = chan->id; |
---|
435 | | - dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); |
---|
| 439 | + reg = STM32_DMA_SCR(id); |
---|
| 440 | + dma_scr = stm32_dma_read(dmadev, reg); |
---|
436 | 441 | |
---|
437 | 442 | if (dma_scr & STM32_DMA_SCR_EN) { |
---|
438 | 443 | dma_scr &= ~STM32_DMA_SCR_EN; |
---|
439 | | - stm32_dma_write(dmadev, STM32_DMA_SCR(id), dma_scr); |
---|
| 444 | + stm32_dma_write(dmadev, reg, dma_scr); |
---|
440 | 445 | |
---|
441 | | - do { |
---|
442 | | - dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); |
---|
443 | | - dma_scr &= STM32_DMA_SCR_EN; |
---|
444 | | - if (!dma_scr) |
---|
445 | | - break; |
---|
446 | | - |
---|
447 | | - if (time_after_eq(jiffies, timeout)) { |
---|
448 | | - dev_err(chan2dev(chan), "%s: timeout!\n", |
---|
449 | | - __func__); |
---|
450 | | - return -EBUSY; |
---|
451 | | - } |
---|
452 | | - cond_resched(); |
---|
453 | | - } while (1); |
---|
| 446 | + return readl_relaxed_poll_timeout_atomic(dmadev->base + reg, |
---|
| 447 | + dma_scr, !(dma_scr & STM32_DMA_SCR_EN), |
---|
| 448 | + 10, 1000000); |
---|
454 | 449 | } |
---|
455 | 450 | |
---|
456 | 451 | return 0; |
---|
.. | .. |
---|
565 | 560 | sg_req = &chan->desc->sg_req[chan->next_sg]; |
---|
566 | 561 | reg = &sg_req->chan_reg; |
---|
567 | 562 | |
---|
| 563 | + reg->dma_scr &= ~STM32_DMA_SCR_EN; |
---|
568 | 564 | stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr); |
---|
569 | 565 | stm32_dma_write(dmadev, STM32_DMA_SPAR(chan->id), reg->dma_spar); |
---|
570 | 566 | stm32_dma_write(dmadev, STM32_DMA_SM0AR(chan->id), reg->dma_sm0ar); |
---|
.. | .. |
---|
644 | 640 | { |
---|
645 | 641 | struct stm32_dma_chan *chan = devid; |
---|
646 | 642 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); |
---|
647 | | - u32 status, scr; |
---|
| 643 | + u32 status, scr, sfcr; |
---|
648 | 644 | |
---|
649 | 645 | spin_lock(&chan->vchan.lock); |
---|
650 | 646 | |
---|
651 | 647 | status = stm32_dma_irq_status(chan); |
---|
652 | 648 | scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); |
---|
| 649 | + sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id)); |
---|
653 | 650 | |
---|
654 | 651 | if (status & STM32_DMA_TCI) { |
---|
655 | 652 | stm32_dma_irq_clear(chan, STM32_DMA_TCI); |
---|
.. | .. |
---|
664 | 661 | if (status & STM32_DMA_FEI) { |
---|
665 | 662 | stm32_dma_irq_clear(chan, STM32_DMA_FEI); |
---|
666 | 663 | status &= ~STM32_DMA_FEI; |
---|
667 | | - if (!(scr & STM32_DMA_SCR_EN)) |
---|
668 | | - dev_err(chan2dev(chan), "FIFO Error\n"); |
---|
669 | | - else |
---|
670 | | - dev_dbg(chan2dev(chan), "FIFO over/underrun\n"); |
---|
| 664 | + if (sfcr & STM32_DMA_SFCR_FEIE) { |
---|
| 665 | + if (!(scr & STM32_DMA_SCR_EN)) |
---|
| 666 | + dev_err(chan2dev(chan), "FIFO Error\n"); |
---|
| 667 | + else |
---|
| 668 | + dev_dbg(chan2dev(chan), "FIFO over/underrun\n"); |
---|
| 669 | + } |
---|
| 670 | + } |
---|
| 671 | + if (status & STM32_DMA_DMEI) { |
---|
| 672 | + stm32_dma_irq_clear(chan, STM32_DMA_DMEI); |
---|
| 673 | + status &= ~STM32_DMA_DMEI; |
---|
| 674 | + if (sfcr & STM32_DMA_SCR_DMEIE) |
---|
| 675 | + dev_dbg(chan2dev(chan), "Direct mode overrun\n"); |
---|
671 | 676 | } |
---|
672 | 677 | if (status) { |
---|
673 | 678 | stm32_dma_irq_clear(chan, status); |
---|
.. | .. |
---|
704 | 709 | int src_bus_width, dst_bus_width; |
---|
705 | 710 | int src_burst_size, dst_burst_size; |
---|
706 | 711 | u32 src_maxburst, dst_maxburst, src_best_burst, dst_best_burst; |
---|
707 | | - u32 dma_scr, threshold; |
---|
| 712 | + u32 dma_scr, fifoth; |
---|
708 | 713 | |
---|
709 | 714 | src_addr_width = chan->dma_sconfig.src_addr_width; |
---|
710 | 715 | dst_addr_width = chan->dma_sconfig.dst_addr_width; |
---|
711 | 716 | src_maxburst = chan->dma_sconfig.src_maxburst; |
---|
712 | 717 | dst_maxburst = chan->dma_sconfig.dst_maxburst; |
---|
713 | | - threshold = chan->threshold; |
---|
| 718 | + fifoth = chan->threshold; |
---|
714 | 719 | |
---|
715 | 720 | switch (direction) { |
---|
716 | 721 | case DMA_MEM_TO_DEV: |
---|
.. | .. |
---|
722 | 727 | /* Set device burst size */ |
---|
723 | 728 | dst_best_burst = stm32_dma_get_best_burst(buf_len, |
---|
724 | 729 | dst_maxburst, |
---|
725 | | - threshold, |
---|
| 730 | + fifoth, |
---|
726 | 731 | dst_addr_width); |
---|
727 | 732 | |
---|
728 | 733 | dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst); |
---|
.. | .. |
---|
730 | 735 | return dst_burst_size; |
---|
731 | 736 | |
---|
732 | 737 | /* Set memory data size */ |
---|
733 | | - src_addr_width = stm32_dma_get_max_width(buf_len, threshold); |
---|
| 738 | + src_addr_width = stm32_dma_get_max_width(buf_len, fifoth); |
---|
734 | 739 | chan->mem_width = src_addr_width; |
---|
735 | 740 | src_bus_width = stm32_dma_get_width(chan, src_addr_width); |
---|
736 | 741 | if (src_bus_width < 0) |
---|
.. | .. |
---|
740 | 745 | src_maxburst = STM32_DMA_MAX_BURST; |
---|
741 | 746 | src_best_burst = stm32_dma_get_best_burst(buf_len, |
---|
742 | 747 | src_maxburst, |
---|
743 | | - threshold, |
---|
| 748 | + fifoth, |
---|
744 | 749 | src_addr_width); |
---|
745 | 750 | src_burst_size = stm32_dma_get_burst(chan, src_best_burst); |
---|
746 | 751 | if (src_burst_size < 0) |
---|
.. | .. |
---|
754 | 759 | |
---|
755 | 760 | /* Set FIFO threshold */ |
---|
756 | 761 | chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK; |
---|
757 | | - chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(threshold); |
---|
| 762 | + if (fifoth != STM32_DMA_FIFO_THRESHOLD_NONE) |
---|
| 763 | + chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(fifoth); |
---|
758 | 764 | |
---|
759 | 765 | /* Set peripheral address */ |
---|
760 | 766 | chan->chan_reg.dma_spar = chan->dma_sconfig.dst_addr; |
---|
.. | .. |
---|
770 | 776 | /* Set device burst size */ |
---|
771 | 777 | src_best_burst = stm32_dma_get_best_burst(buf_len, |
---|
772 | 778 | src_maxburst, |
---|
773 | | - threshold, |
---|
| 779 | + fifoth, |
---|
774 | 780 | src_addr_width); |
---|
775 | 781 | chan->mem_burst = src_best_burst; |
---|
776 | 782 | src_burst_size = stm32_dma_get_burst(chan, src_best_burst); |
---|
.. | .. |
---|
778 | 784 | return src_burst_size; |
---|
779 | 785 | |
---|
780 | 786 | /* Set memory data size */ |
---|
781 | | - dst_addr_width = stm32_dma_get_max_width(buf_len, threshold); |
---|
| 787 | + dst_addr_width = stm32_dma_get_max_width(buf_len, fifoth); |
---|
782 | 788 | chan->mem_width = dst_addr_width; |
---|
783 | 789 | dst_bus_width = stm32_dma_get_width(chan, dst_addr_width); |
---|
784 | 790 | if (dst_bus_width < 0) |
---|
.. | .. |
---|
788 | 794 | dst_maxburst = STM32_DMA_MAX_BURST; |
---|
789 | 795 | dst_best_burst = stm32_dma_get_best_burst(buf_len, |
---|
790 | 796 | dst_maxburst, |
---|
791 | | - threshold, |
---|
| 797 | + fifoth, |
---|
792 | 798 | dst_addr_width); |
---|
793 | 799 | chan->mem_burst = dst_best_burst; |
---|
794 | 800 | dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst); |
---|
.. | .. |
---|
803 | 809 | |
---|
804 | 810 | /* Set FIFO threshold */ |
---|
805 | 811 | chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK; |
---|
806 | | - chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(threshold); |
---|
| 812 | + if (fifoth != STM32_DMA_FIFO_THRESHOLD_NONE) |
---|
| 813 | + chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(fifoth); |
---|
807 | 814 | |
---|
808 | 815 | /* Set peripheral address */ |
---|
809 | 816 | chan->chan_reg.dma_spar = chan->dma_sconfig.src_addr; |
---|
.. | .. |
---|
853 | 860 | return NULL; |
---|
854 | 861 | } |
---|
855 | 862 | |
---|
856 | | - desc = stm32_dma_alloc_desc(sg_len); |
---|
| 863 | + desc = kzalloc(struct_size(desc, sg_req, sg_len), GFP_NOWAIT); |
---|
857 | 864 | if (!desc) |
---|
858 | 865 | return NULL; |
---|
859 | 866 | |
---|
.. | .. |
---|
954 | 961 | |
---|
955 | 962 | num_periods = buf_len / period_len; |
---|
956 | 963 | |
---|
957 | | - desc = stm32_dma_alloc_desc(num_periods); |
---|
| 964 | + desc = kzalloc(struct_size(desc, sg_req, num_periods), GFP_NOWAIT); |
---|
958 | 965 | if (!desc) |
---|
959 | 966 | return NULL; |
---|
960 | 967 | |
---|
.. | .. |
---|
989 | 996 | int i; |
---|
990 | 997 | |
---|
991 | 998 | num_sgs = DIV_ROUND_UP(len, STM32_DMA_ALIGNED_MAX_DATA_ITEMS); |
---|
992 | | - desc = stm32_dma_alloc_desc(num_sgs); |
---|
| 999 | + desc = kzalloc(struct_size(desc, sg_req, num_sgs), GFP_NOWAIT); |
---|
993 | 1000 | if (!desc) |
---|
994 | 1001 | return NULL; |
---|
995 | 1002 | |
---|
.. | .. |
---|
1041 | 1048 | return ndtr << width; |
---|
1042 | 1049 | } |
---|
1043 | 1050 | |
---|
| 1051 | +/** |
---|
| 1052 | + * stm32_dma_is_current_sg - check that expected sg_req is currently transferred |
---|
| 1053 | + * @chan: dma channel |
---|
| 1054 | + * |
---|
| 1055 | + * This function called when IRQ are disable, checks that the hardware has not |
---|
| 1056 | + * switched on the next transfer in double buffer mode. The test is done by |
---|
| 1057 | + * comparing the next_sg memory address with the hardware related register |
---|
| 1058 | + * (based on CT bit value). |
---|
| 1059 | + * |
---|
| 1060 | + * Returns true if expected current transfer is still running or double |
---|
| 1061 | + * buffer mode is not activated. |
---|
| 1062 | + */ |
---|
| 1063 | +static bool stm32_dma_is_current_sg(struct stm32_dma_chan *chan) |
---|
| 1064 | +{ |
---|
| 1065 | + struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); |
---|
| 1066 | + struct stm32_dma_sg_req *sg_req; |
---|
| 1067 | + u32 dma_scr, dma_smar, id; |
---|
| 1068 | + |
---|
| 1069 | + id = chan->id; |
---|
| 1070 | + dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); |
---|
| 1071 | + |
---|
| 1072 | + if (!(dma_scr & STM32_DMA_SCR_DBM)) |
---|
| 1073 | + return true; |
---|
| 1074 | + |
---|
| 1075 | + sg_req = &chan->desc->sg_req[chan->next_sg]; |
---|
| 1076 | + |
---|
| 1077 | + if (dma_scr & STM32_DMA_SCR_CT) { |
---|
| 1078 | + dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(id)); |
---|
| 1079 | + return (dma_smar == sg_req->chan_reg.dma_sm0ar); |
---|
| 1080 | + } |
---|
| 1081 | + |
---|
| 1082 | + dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(id)); |
---|
| 1083 | + |
---|
| 1084 | + return (dma_smar == sg_req->chan_reg.dma_sm1ar); |
---|
| 1085 | +} |
---|
| 1086 | + |
---|
1044 | 1087 | static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan, |
---|
1045 | 1088 | struct stm32_dma_desc *desc, |
---|
1046 | 1089 | u32 next_sg) |
---|
1047 | 1090 | { |
---|
1048 | 1091 | u32 modulo, burst_size; |
---|
1049 | | - u32 residue = 0; |
---|
| 1092 | + u32 residue; |
---|
| 1093 | + u32 n_sg = next_sg; |
---|
| 1094 | + struct stm32_dma_sg_req *sg_req = &chan->desc->sg_req[chan->next_sg]; |
---|
1050 | 1095 | int i; |
---|
1051 | 1096 | |
---|
1052 | 1097 | /* |
---|
1053 | | - * In cyclic mode, for the last period, residue = remaining bytes from |
---|
1054 | | - * NDTR |
---|
| 1098 | + * Calculate the residue means compute the descriptors |
---|
| 1099 | + * information: |
---|
| 1100 | + * - the sg_req currently transferred |
---|
| 1101 | + * - the Hardware remaining position in this sg (NDTR bits field). |
---|
| 1102 | + * |
---|
| 1103 | + * A race condition may occur if DMA is running in cyclic or double |
---|
| 1104 | + * buffer mode, since the DMA register are automatically reloaded at end |
---|
| 1105 | + * of period transfer. The hardware may have switched to the next |
---|
| 1106 | + * transfer (CT bit updated) just before the position (SxNDTR reg) is |
---|
| 1107 | + * read. |
---|
| 1108 | + * In this case the SxNDTR reg could (or not) correspond to the new |
---|
| 1109 | + * transfer position, and not the expected one. |
---|
| 1110 | + * The strategy implemented in the stm32 driver is to: |
---|
| 1111 | + * - read the SxNDTR register |
---|
| 1112 | + * - crosscheck that hardware is still in current transfer. |
---|
| 1113 | + * In case of switch, we can assume that the DMA is at the beginning of |
---|
| 1114 | + * the next transfer. So we approximate the residue in consequence, by |
---|
| 1115 | + * pointing on the beginning of next transfer. |
---|
| 1116 | + * |
---|
| 1117 | + * This race condition doesn't apply for none cyclic mode, as double |
---|
| 1118 | + * buffer is not used. In such situation registers are updated by the |
---|
| 1119 | + * software. |
---|
1055 | 1120 | */ |
---|
1056 | | - if (chan->desc->cyclic && next_sg == 0) { |
---|
1057 | | - residue = stm32_dma_get_remaining_bytes(chan); |
---|
1058 | | - goto end; |
---|
| 1121 | + |
---|
| 1122 | + residue = stm32_dma_get_remaining_bytes(chan); |
---|
| 1123 | + |
---|
| 1124 | + if (!stm32_dma_is_current_sg(chan)) { |
---|
| 1125 | + n_sg++; |
---|
| 1126 | + if (n_sg == chan->desc->num_sgs) |
---|
| 1127 | + n_sg = 0; |
---|
| 1128 | + residue = sg_req->len; |
---|
1059 | 1129 | } |
---|
1060 | 1130 | |
---|
1061 | 1131 | /* |
---|
1062 | | - * For all other periods in cyclic mode, and in sg mode, |
---|
1063 | | - * residue = remaining bytes from NDTR + remaining periods/sg to be |
---|
1064 | | - * transferred |
---|
| 1132 | + * In cyclic mode, for the last period, residue = remaining bytes |
---|
| 1133 | + * from NDTR, |
---|
| 1134 | + * else for all other periods in cyclic mode, and in sg mode, |
---|
| 1135 | + * residue = remaining bytes from NDTR + remaining |
---|
| 1136 | + * periods/sg to be transferred |
---|
1065 | 1137 | */ |
---|
1066 | | - for (i = next_sg; i < desc->num_sgs; i++) |
---|
1067 | | - residue += desc->sg_req[i].len; |
---|
1068 | | - residue += stm32_dma_get_remaining_bytes(chan); |
---|
| 1138 | + if (!chan->desc->cyclic || n_sg != 0) |
---|
| 1139 | + for (i = n_sg; i < desc->num_sgs; i++) |
---|
| 1140 | + residue += desc->sg_req[i].len; |
---|
1069 | 1141 | |
---|
1070 | | -end: |
---|
1071 | 1142 | if (!chan->mem_burst) |
---|
1072 | 1143 | return residue; |
---|
1073 | 1144 | |
---|
.. | .. |
---|
1115 | 1186 | int ret; |
---|
1116 | 1187 | |
---|
1117 | 1188 | chan->config_init = false; |
---|
1118 | | - ret = clk_prepare_enable(dmadev->clk); |
---|
1119 | | - if (ret < 0) { |
---|
1120 | | - dev_err(chan2dev(chan), "clk_prepare_enable failed: %d\n", ret); |
---|
| 1189 | + |
---|
| 1190 | + ret = pm_runtime_resume_and_get(dmadev->ddev.dev); |
---|
| 1191 | + if (ret < 0) |
---|
1121 | 1192 | return ret; |
---|
1122 | | - } |
---|
1123 | 1193 | |
---|
1124 | 1194 | ret = stm32_dma_disable_chan(chan); |
---|
1125 | 1195 | if (ret < 0) |
---|
1126 | | - clk_disable_unprepare(dmadev->clk); |
---|
| 1196 | + pm_runtime_put(dmadev->ddev.dev); |
---|
1127 | 1197 | |
---|
1128 | 1198 | return ret; |
---|
1129 | 1199 | } |
---|
.. | .. |
---|
1143 | 1213 | spin_unlock_irqrestore(&chan->vchan.lock, flags); |
---|
1144 | 1214 | } |
---|
1145 | 1215 | |
---|
1146 | | - clk_disable_unprepare(dmadev->clk); |
---|
| 1216 | + pm_runtime_put(dmadev->ddev.dev); |
---|
1147 | 1217 | |
---|
1148 | 1218 | vchan_free_chan_resources(to_virt_chan(c)); |
---|
1149 | 1219 | } |
---|
.. | .. |
---|
1165 | 1235 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_TEIE | STM32_DMA_SCR_TCIE; |
---|
1166 | 1236 | |
---|
1167 | 1237 | chan->threshold = STM32_DMA_THRESHOLD_FTR_GET(cfg->features); |
---|
| 1238 | + if (STM32_DMA_DIRECT_MODE_GET(cfg->features)) |
---|
| 1239 | + chan->threshold = STM32_DMA_FIFO_THRESHOLD_NONE; |
---|
1168 | 1240 | } |
---|
1169 | 1241 | |
---|
1170 | 1242 | static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec, |
---|
.. | .. |
---|
1218 | 1290 | struct dma_device *dd; |
---|
1219 | 1291 | const struct of_device_id *match; |
---|
1220 | 1292 | struct resource *res; |
---|
| 1293 | + struct reset_control *rst; |
---|
1221 | 1294 | int i, ret; |
---|
1222 | 1295 | |
---|
1223 | 1296 | match = of_match_device(stm32_dma_of_match, &pdev->dev); |
---|
.. | .. |
---|
1238 | 1311 | return PTR_ERR(dmadev->base); |
---|
1239 | 1312 | |
---|
1240 | 1313 | dmadev->clk = devm_clk_get(&pdev->dev, NULL); |
---|
1241 | | - if (IS_ERR(dmadev->clk)) { |
---|
1242 | | - dev_err(&pdev->dev, "Error: Missing controller clock\n"); |
---|
1243 | | - return PTR_ERR(dmadev->clk); |
---|
| 1314 | + if (IS_ERR(dmadev->clk)) |
---|
| 1315 | + return dev_err_probe(&pdev->dev, PTR_ERR(dmadev->clk), "Can't get clock\n"); |
---|
| 1316 | + |
---|
| 1317 | + ret = clk_prepare_enable(dmadev->clk); |
---|
| 1318 | + if (ret < 0) { |
---|
| 1319 | + dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret); |
---|
| 1320 | + return ret; |
---|
1244 | 1321 | } |
---|
1245 | 1322 | |
---|
1246 | 1323 | dmadev->mem2mem = of_property_read_bool(pdev->dev.of_node, |
---|
1247 | 1324 | "st,mem2mem"); |
---|
1248 | 1325 | |
---|
1249 | | - dmadev->rst = devm_reset_control_get(&pdev->dev, NULL); |
---|
1250 | | - if (!IS_ERR(dmadev->rst)) { |
---|
1251 | | - reset_control_assert(dmadev->rst); |
---|
| 1326 | + rst = devm_reset_control_get(&pdev->dev, NULL); |
---|
| 1327 | + if (IS_ERR(rst)) { |
---|
| 1328 | + ret = PTR_ERR(rst); |
---|
| 1329 | + if (ret == -EPROBE_DEFER) |
---|
| 1330 | + goto clk_free; |
---|
| 1331 | + } else { |
---|
| 1332 | + reset_control_assert(rst); |
---|
1252 | 1333 | udelay(2); |
---|
1253 | | - reset_control_deassert(dmadev->rst); |
---|
| 1334 | + reset_control_deassert(rst); |
---|
1254 | 1335 | } |
---|
| 1336 | + |
---|
| 1337 | + dma_set_max_seg_size(&pdev->dev, STM32_DMA_ALIGNED_MAX_DATA_ITEMS); |
---|
1255 | 1338 | |
---|
1256 | 1339 | dma_cap_set(DMA_SLAVE, dd->cap_mask); |
---|
1257 | 1340 | dma_cap_set(DMA_PRIVATE, dd->cap_mask); |
---|
.. | .. |
---|
1273 | 1356 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); |
---|
1274 | 1357 | dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); |
---|
1275 | 1358 | dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
---|
| 1359 | + dd->copy_align = DMAENGINE_ALIGN_32_BYTES; |
---|
1276 | 1360 | dd->max_burst = STM32_DMA_MAX_BURST; |
---|
| 1361 | + dd->descriptor_reuse = true; |
---|
1277 | 1362 | dd->dev = &pdev->dev; |
---|
1278 | 1363 | INIT_LIST_HEAD(&dd->channels); |
---|
1279 | 1364 | |
---|
.. | .. |
---|
1292 | 1377 | |
---|
1293 | 1378 | ret = dma_async_device_register(dd); |
---|
1294 | 1379 | if (ret) |
---|
1295 | | - return ret; |
---|
| 1380 | + goto clk_free; |
---|
1296 | 1381 | |
---|
1297 | 1382 | for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) { |
---|
1298 | 1383 | chan = &dmadev->chan[i]; |
---|
1299 | | - res = platform_get_resource(pdev, IORESOURCE_IRQ, i); |
---|
1300 | | - if (!res) { |
---|
1301 | | - ret = -EINVAL; |
---|
1302 | | - dev_err(&pdev->dev, "No irq resource for chan %d\n", i); |
---|
| 1384 | + ret = platform_get_irq(pdev, i); |
---|
| 1385 | + if (ret < 0) |
---|
1303 | 1386 | goto err_unregister; |
---|
1304 | | - } |
---|
1305 | | - chan->irq = res->start; |
---|
| 1387 | + chan->irq = ret; |
---|
| 1388 | + |
---|
1306 | 1389 | ret = devm_request_irq(&pdev->dev, chan->irq, |
---|
1307 | 1390 | stm32_dma_chan_irq, 0, |
---|
1308 | 1391 | dev_name(chan2dev(chan)), chan); |
---|
.. | .. |
---|
1324 | 1407 | |
---|
1325 | 1408 | platform_set_drvdata(pdev, dmadev); |
---|
1326 | 1409 | |
---|
| 1410 | + pm_runtime_set_active(&pdev->dev); |
---|
| 1411 | + pm_runtime_enable(&pdev->dev); |
---|
| 1412 | + pm_runtime_get_noresume(&pdev->dev); |
---|
| 1413 | + pm_runtime_put(&pdev->dev); |
---|
| 1414 | + |
---|
1327 | 1415 | dev_info(&pdev->dev, "STM32 DMA driver registered\n"); |
---|
1328 | 1416 | |
---|
1329 | 1417 | return 0; |
---|
1330 | 1418 | |
---|
1331 | 1419 | err_unregister: |
---|
1332 | 1420 | dma_async_device_unregister(dd); |
---|
| 1421 | +clk_free: |
---|
| 1422 | + clk_disable_unprepare(dmadev->clk); |
---|
1333 | 1423 | |
---|
1334 | 1424 | return ret; |
---|
1335 | 1425 | } |
---|
| 1426 | + |
---|
| 1427 | +#ifdef CONFIG_PM |
---|
| 1428 | +static int stm32_dma_runtime_suspend(struct device *dev) |
---|
| 1429 | +{ |
---|
| 1430 | + struct stm32_dma_device *dmadev = dev_get_drvdata(dev); |
---|
| 1431 | + |
---|
| 1432 | + clk_disable_unprepare(dmadev->clk); |
---|
| 1433 | + |
---|
| 1434 | + return 0; |
---|
| 1435 | +} |
---|
| 1436 | + |
---|
| 1437 | +static int stm32_dma_runtime_resume(struct device *dev) |
---|
| 1438 | +{ |
---|
| 1439 | + struct stm32_dma_device *dmadev = dev_get_drvdata(dev); |
---|
| 1440 | + int ret; |
---|
| 1441 | + |
---|
| 1442 | + ret = clk_prepare_enable(dmadev->clk); |
---|
| 1443 | + if (ret) { |
---|
| 1444 | + dev_err(dev, "failed to prepare_enable clock\n"); |
---|
| 1445 | + return ret; |
---|
| 1446 | + } |
---|
| 1447 | + |
---|
| 1448 | + return 0; |
---|
| 1449 | +} |
---|
| 1450 | +#endif |
---|
| 1451 | + |
---|
| 1452 | +#ifdef CONFIG_PM_SLEEP |
---|
| 1453 | +static int stm32_dma_suspend(struct device *dev) |
---|
| 1454 | +{ |
---|
| 1455 | + struct stm32_dma_device *dmadev = dev_get_drvdata(dev); |
---|
| 1456 | + int id, ret, scr; |
---|
| 1457 | + |
---|
| 1458 | + ret = pm_runtime_resume_and_get(dev); |
---|
| 1459 | + if (ret < 0) |
---|
| 1460 | + return ret; |
---|
| 1461 | + |
---|
| 1462 | + for (id = 0; id < STM32_DMA_MAX_CHANNELS; id++) { |
---|
| 1463 | + scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); |
---|
| 1464 | + if (scr & STM32_DMA_SCR_EN) { |
---|
| 1465 | + dev_warn(dev, "Suspend is prevented by Chan %i\n", id); |
---|
| 1466 | + return -EBUSY; |
---|
| 1467 | + } |
---|
| 1468 | + } |
---|
| 1469 | + |
---|
| 1470 | + pm_runtime_put_sync(dev); |
---|
| 1471 | + |
---|
| 1472 | + pm_runtime_force_suspend(dev); |
---|
| 1473 | + |
---|
| 1474 | + return 0; |
---|
| 1475 | +} |
---|
| 1476 | + |
---|
| 1477 | +static int stm32_dma_resume(struct device *dev) |
---|
| 1478 | +{ |
---|
| 1479 | + return pm_runtime_force_resume(dev); |
---|
| 1480 | +} |
---|
| 1481 | +#endif |
---|
| 1482 | + |
---|
| 1483 | +static const struct dev_pm_ops stm32_dma_pm_ops = { |
---|
| 1484 | + SET_SYSTEM_SLEEP_PM_OPS(stm32_dma_suspend, stm32_dma_resume) |
---|
| 1485 | + SET_RUNTIME_PM_OPS(stm32_dma_runtime_suspend, |
---|
| 1486 | + stm32_dma_runtime_resume, NULL) |
---|
| 1487 | +}; |
---|
1336 | 1488 | |
---|
1337 | 1489 | static struct platform_driver stm32_dma_driver = { |
---|
1338 | 1490 | .driver = { |
---|
1339 | 1491 | .name = "stm32-dma", |
---|
1340 | 1492 | .of_match_table = stm32_dma_of_match, |
---|
| 1493 | + .pm = &stm32_dma_pm_ops, |
---|
1341 | 1494 | }, |
---|
| 1495 | + .probe = stm32_dma_probe, |
---|
1342 | 1496 | }; |
---|
1343 | 1497 | |
---|
1344 | 1498 | static int __init stm32_dma_init(void) |
---|
1345 | 1499 | { |
---|
1346 | | - return platform_driver_probe(&stm32_dma_driver, stm32_dma_probe); |
---|
| 1500 | + return platform_driver_register(&stm32_dma_driver); |
---|
1347 | 1501 | } |
---|
1348 | 1502 | subsys_initcall(stm32_dma_init); |
---|