.. | .. |
---|
444 | 444 | struct sdma_buffer_descriptor *bd0; |
---|
445 | 445 | /* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/ |
---|
446 | 446 | bool clk_ratio; |
---|
| 447 | +#ifdef CONFIG_IMX_SDMA_OOB |
---|
| 448 | + hard_spinlock_t oob_lock; |
---|
| 449 | + u32 pending_stat; |
---|
| 450 | +#endif |
---|
447 | 451 | }; |
---|
448 | 452 | |
---|
449 | 453 | static int sdma_config_write(struct dma_chan *chan, |
---|
.. | .. |
---|
748 | 752 | return container_of(t, struct sdma_desc, vd.tx); |
---|
749 | 753 | } |
---|
750 | 754 | |
---|
| 755 | +static inline bool sdma_oob_capable(void) |
---|
| 756 | +{ |
---|
| 757 | + return IS_ENABLED(CONFIG_IMX_SDMA_OOB); |
---|
| 758 | +} |
---|
| 759 | + |
---|
751 | 760 | static void sdma_start_desc(struct sdma_channel *sdmac) |
---|
752 | 761 | { |
---|
753 | 762 | struct virt_dma_desc *vd = vchan_next_desc(&sdmac->vc); |
---|
.. | .. |
---|
765 | 774 | |
---|
766 | 775 | sdma->channel_control[channel].base_bd_ptr = desc->bd_phys; |
---|
767 | 776 | sdma->channel_control[channel].current_bd_ptr = desc->bd_phys; |
---|
768 | | - sdma_enable_channel(sdma, sdmac->channel); |
---|
| 777 | + if (!sdma_oob_capable() || !vchan_oob_pulsed(vd)) |
---|
| 778 | + sdma_enable_channel(sdma, sdmac->channel); |
---|
769 | 779 | } |
---|
770 | 780 | |
---|
771 | 781 | static void sdma_update_channel_loop(struct sdma_channel *sdmac) |
---|
.. | .. |
---|
809 | 819 | * SDMA transaction status by the time the client tasklet is |
---|
810 | 820 | * executed. |
---|
811 | 821 | */ |
---|
812 | | - spin_unlock(&sdmac->vc.lock); |
---|
| 822 | + vchan_unlock(&sdmac->vc); |
---|
813 | 823 | dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL); |
---|
814 | | - spin_lock(&sdmac->vc.lock); |
---|
| 824 | + vchan_lock(&sdmac->vc); |
---|
815 | 825 | |
---|
816 | 826 | if (error) |
---|
817 | 827 | sdmac->status = old_status; |
---|
.. | .. |
---|
821 | 831 | static void mxc_sdma_handle_channel_normal(struct sdma_channel *data) |
---|
822 | 832 | { |
---|
823 | 833 | struct sdma_channel *sdmac = (struct sdma_channel *) data; |
---|
| 834 | + struct sdma_desc *desc = sdmac->desc; |
---|
824 | 835 | struct sdma_buffer_descriptor *bd; |
---|
825 | 836 | int i, error = 0; |
---|
826 | 837 | |
---|
827 | | - sdmac->desc->chn_real_count = 0; |
---|
| 838 | + desc->chn_real_count = 0; |
---|
828 | 839 | /* |
---|
829 | 840 | * non loop mode. Iterate over all descriptors, collect |
---|
830 | 841 | * errors and call callback function |
---|
831 | 842 | */ |
---|
832 | | - for (i = 0; i < sdmac->desc->num_bd; i++) { |
---|
833 | | - bd = &sdmac->desc->bd[i]; |
---|
| 843 | + for (i = 0; i < desc->num_bd; i++) { |
---|
| 844 | + bd = &desc->bd[i]; |
---|
834 | 845 | |
---|
835 | 846 | if (bd->mode.status & (BD_DONE | BD_RROR)) |
---|
836 | 847 | error = -EIO; |
---|
837 | | - sdmac->desc->chn_real_count += bd->mode.count; |
---|
| 848 | + desc->chn_real_count += bd->mode.count; |
---|
838 | 849 | } |
---|
839 | 850 | |
---|
840 | 851 | if (error) |
---|
.. | .. |
---|
843 | 854 | sdmac->status = DMA_COMPLETE; |
---|
844 | 855 | } |
---|
845 | 856 | |
---|
846 | | -static irqreturn_t sdma_int_handler(int irq, void *dev_id) |
---|
| 857 | +static unsigned long sdma_do_channels(struct sdma_engine *sdma, |
---|
| 858 | + unsigned long stat) |
---|
847 | 859 | { |
---|
848 | | - struct sdma_engine *sdma = dev_id; |
---|
849 | | - unsigned long stat; |
---|
| 860 | + unsigned long mask = stat; |
---|
850 | 861 | |
---|
851 | | - stat = readl_relaxed(sdma->regs + SDMA_H_INTR); |
---|
852 | | - writel_relaxed(stat, sdma->regs + SDMA_H_INTR); |
---|
853 | | - /* channel 0 is special and not handled here, see run_channel0() */ |
---|
854 | | - stat &= ~1; |
---|
855 | | - |
---|
856 | | - while (stat) { |
---|
857 | | - int channel = fls(stat) - 1; |
---|
| 862 | + while (mask) { |
---|
| 863 | + int channel = fls(mask) - 1; |
---|
858 | 864 | struct sdma_channel *sdmac = &sdma->channel[channel]; |
---|
859 | 865 | struct sdma_desc *desc; |
---|
860 | 866 | |
---|
861 | | - spin_lock(&sdmac->vc.lock); |
---|
| 867 | + vchan_lock(&sdmac->vc); |
---|
862 | 868 | desc = sdmac->desc; |
---|
863 | 869 | if (desc) { |
---|
| 870 | + if (running_oob() && !vchan_oob_handled(&desc->vd)) |
---|
| 871 | + goto next; |
---|
864 | 872 | if (sdmac->flags & IMX_DMA_SG_LOOP) { |
---|
865 | 873 | sdma_update_channel_loop(sdmac); |
---|
866 | 874 | } else { |
---|
867 | 875 | mxc_sdma_handle_channel_normal(sdmac); |
---|
| 876 | + if (running_oob()) { |
---|
| 877 | + vchan_unlock(&sdmac->vc); |
---|
| 878 | + dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL); |
---|
| 879 | + __clear_bit(channel, &stat); |
---|
| 880 | + goto next_unlocked; |
---|
| 881 | + } |
---|
868 | 882 | vchan_cookie_complete(&desc->vd); |
---|
869 | 883 | sdma_start_desc(sdmac); |
---|
870 | 884 | } |
---|
871 | 885 | } |
---|
872 | | - |
---|
873 | | - spin_unlock(&sdmac->vc.lock); |
---|
874 | 886 | __clear_bit(channel, &stat); |
---|
| 887 | + next: |
---|
| 888 | + vchan_unlock(&sdmac->vc); |
---|
| 889 | + next_unlocked: |
---|
| 890 | + __clear_bit(channel, &mask); |
---|
875 | 891 | } |
---|
| 892 | + |
---|
| 893 | + return stat; |
---|
| 894 | +} |
---|
| 895 | + |
---|
| 896 | +static irqreturn_t sdma_int_handler(int irq, void *dev_id) |
---|
| 897 | +{ |
---|
| 898 | + struct sdma_engine *sdma = dev_id; |
---|
| 899 | + unsigned long stat, flags __maybe_unused; |
---|
| 900 | + |
---|
| 901 | +#ifdef CONFIG_IMX_SDMA_OOB |
---|
| 902 | + if (running_oob()) { |
---|
| 903 | + stat = readl_relaxed(sdma->regs + SDMA_H_INTR); |
---|
| 904 | + writel_relaxed(stat, sdma->regs + SDMA_H_INTR); |
---|
| 905 | + /* |
---|
| 906 | + * Locking is only to guard against IRQ migration with |
---|
| 907 | + * a delayed in-band event running from a remote CPU |
---|
| 908 | + * after some IRQ routing changed the affinity of the |
---|
| 909 | + * out-of-band handler in the meantime. |
---|
| 910 | + */ |
---|
| 911 | + stat = sdma_do_channels(sdma, stat & ~1); |
---|
| 912 | + if (stat) { |
---|
| 913 | + raw_spin_lock(&sdma->oob_lock); |
---|
| 914 | + sdma->pending_stat |= stat; |
---|
| 915 | + raw_spin_unlock(&sdma->oob_lock); |
---|
| 916 | + /* Call us back from in-band context. */ |
---|
| 917 | + irq_post_inband(irq); |
---|
| 918 | + } |
---|
| 919 | + return IRQ_HANDLED; |
---|
| 920 | + } |
---|
| 921 | + |
---|
| 922 | + /* In-band IRQ context: stalled, but hard irqs are on. */ |
---|
| 923 | + raw_spin_lock_irqsave(&sdma->oob_lock, flags); |
---|
| 924 | + stat = sdma->pending_stat; |
---|
| 925 | + sdma->pending_stat = 0; |
---|
| 926 | + raw_spin_unlock_irqrestore(&sdma->oob_lock, flags); |
---|
| 927 | + sdma_do_channels(sdma, stat); |
---|
| 928 | +#else |
---|
| 929 | + stat = readl_relaxed(sdma->regs + SDMA_H_INTR); |
---|
| 930 | + writel_relaxed(stat, sdma->regs + SDMA_H_INTR); |
---|
| 931 | + /* channel 0 is special and not handled here, see run_channel0() */ |
---|
| 932 | + sdma_do_channels(sdma, stat & ~1); |
---|
| 933 | +#endif |
---|
876 | 934 | |
---|
877 | 935 | return IRQ_HANDLED; |
---|
878 | 936 | } |
---|
.. | .. |
---|
1060 | 1118 | */ |
---|
1061 | 1119 | usleep_range(1000, 2000); |
---|
1062 | 1120 | |
---|
1063 | | - spin_lock_irqsave(&sdmac->vc.lock, flags); |
---|
| 1121 | + vchan_lock_irqsave(&sdmac->vc, flags); |
---|
1064 | 1122 | vchan_get_all_descriptors(&sdmac->vc, &head); |
---|
1065 | | - spin_unlock_irqrestore(&sdmac->vc.lock, flags); |
---|
| 1123 | + vchan_unlock_irqrestore(&sdmac->vc, flags); |
---|
1066 | 1124 | vchan_dma_desc_free_list(&sdmac->vc, &head); |
---|
1067 | 1125 | } |
---|
1068 | 1126 | |
---|
.. | .. |
---|
1071 | 1129 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
---|
1072 | 1130 | unsigned long flags; |
---|
1073 | 1131 | |
---|
1074 | | - spin_lock_irqsave(&sdmac->vc.lock, flags); |
---|
| 1132 | + vchan_lock_irqsave(&sdmac->vc, flags); |
---|
1075 | 1133 | |
---|
1076 | 1134 | sdma_disable_channel(chan); |
---|
1077 | 1135 | |
---|
1078 | 1136 | if (sdmac->desc) { |
---|
1079 | 1137 | vchan_terminate_vdesc(&sdmac->desc->vd); |
---|
1080 | 1138 | sdmac->desc = NULL; |
---|
| 1139 | + vchan_unlock_irqrestore(&sdmac->vc, flags); |
---|
1081 | 1140 | schedule_work(&sdmac->terminate_worker); |
---|
| 1141 | + } else { |
---|
| 1142 | + vchan_unlock_irqrestore(&sdmac->vc, flags); |
---|
1082 | 1143 | } |
---|
1083 | | - |
---|
1084 | | - spin_unlock_irqrestore(&sdmac->vc.lock, flags); |
---|
1085 | 1144 | |
---|
1086 | 1145 | return 0; |
---|
1087 | 1146 | } |
---|
.. | .. |
---|
1441 | 1500 | struct scatterlist *sg; |
---|
1442 | 1501 | struct sdma_desc *desc; |
---|
1443 | 1502 | |
---|
| 1503 | + if (!sdma_oob_capable()) { |
---|
| 1504 | + if (flags & (DMA_OOB_INTERRUPT|DMA_OOB_PULSE)) { |
---|
| 1505 | + dev_err(sdma->dev, |
---|
| 1506 | + "%s: out-of-band slave transfers disabled\n", |
---|
| 1507 | + __func__); |
---|
| 1508 | + return NULL; |
---|
| 1509 | + } |
---|
| 1510 | + } |
---|
| 1511 | + |
---|
1444 | 1512 | sdma_config_write(chan, &sdmac->slave_config, direction); |
---|
1445 | 1513 | |
---|
1446 | 1514 | desc = sdma_transfer_init(sdmac, direction, sg_len); |
---|
.. | .. |
---|
1492 | 1560 | |
---|
1493 | 1561 | if (i + 1 == sg_len) { |
---|
1494 | 1562 | param |= BD_INTR; |
---|
1495 | | - param |= BD_LAST; |
---|
| 1563 | + if (!sdma_oob_capable() || !(flags & DMA_OOB_PULSE)) |
---|
| 1564 | + param |= BD_LAST; |
---|
1496 | 1565 | param &= ~BD_CONT; |
---|
1497 | 1566 | } |
---|
1498 | 1567 | |
---|
.. | .. |
---|
1526 | 1595 | struct sdma_desc *desc; |
---|
1527 | 1596 | |
---|
1528 | 1597 | dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); |
---|
| 1598 | + |
---|
| 1599 | + if (!sdma_oob_capable()) { |
---|
| 1600 | + if (flags & (DMA_OOB_INTERRUPT|DMA_OOB_PULSE)) { |
---|
| 1601 | + dev_err(sdma->dev, |
---|
| 1602 | + "%s: out-of-band cyclic transfers disabled\n", |
---|
| 1603 | + __func__); |
---|
| 1604 | + return NULL; |
---|
| 1605 | + } |
---|
| 1606 | + } else if (flags & DMA_OOB_PULSE) { |
---|
| 1607 | + dev_err(chan->device->dev, |
---|
| 1608 | + "%s: no pulse mode with out-of-band cyclic transfers\n", |
---|
| 1609 | + __func__); |
---|
| 1610 | + return NULL; |
---|
| 1611 | + } |
---|
1529 | 1612 | |
---|
1530 | 1613 | sdma_config_write(chan, &sdmac->slave_config, direction); |
---|
1531 | 1614 | |
---|
.. | .. |
---|
1649 | 1732 | if (ret == DMA_COMPLETE || !txstate) |
---|
1650 | 1733 | return ret; |
---|
1651 | 1734 | |
---|
1652 | | - spin_lock_irqsave(&sdmac->vc.lock, flags); |
---|
| 1735 | + vchan_lock_irqsave(&sdmac->vc, flags); |
---|
1653 | 1736 | |
---|
1654 | 1737 | vd = vchan_find_desc(&sdmac->vc, cookie); |
---|
1655 | 1738 | if (vd) |
---|
.. | .. |
---|
1667 | 1750 | residue = 0; |
---|
1668 | 1751 | } |
---|
1669 | 1752 | |
---|
1670 | | - spin_unlock_irqrestore(&sdmac->vc.lock, flags); |
---|
| 1753 | + vchan_unlock_irqrestore(&sdmac->vc, flags); |
---|
1671 | 1754 | |
---|
1672 | 1755 | dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, |
---|
1673 | 1756 | residue); |
---|
.. | .. |
---|
1680 | 1763 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
---|
1681 | 1764 | unsigned long flags; |
---|
1682 | 1765 | |
---|
1683 | | - spin_lock_irqsave(&sdmac->vc.lock, flags); |
---|
| 1766 | + vchan_lock_irqsave(&sdmac->vc, flags); |
---|
1684 | 1767 | if (vchan_issue_pending(&sdmac->vc) && !sdmac->desc) |
---|
1685 | 1768 | sdma_start_desc(sdmac); |
---|
1686 | | - spin_unlock_irqrestore(&sdmac->vc.lock, flags); |
---|
| 1769 | + vchan_unlock_irqrestore(&sdmac->vc, flags); |
---|
1687 | 1770 | } |
---|
| 1771 | + |
---|
| 1772 | +#ifdef CONFIG_IMX_SDMA_OOB |
---|
| 1773 | +static int sdma_pulse_oob(struct dma_chan *chan) |
---|
| 1774 | +{ |
---|
| 1775 | + struct sdma_channel *sdmac = to_sdma_chan(chan); |
---|
| 1776 | + struct sdma_desc *desc = sdmac->desc; |
---|
| 1777 | + unsigned long flags; |
---|
| 1778 | + int n, ret = -EIO; |
---|
| 1779 | + |
---|
| 1780 | + vchan_lock_irqsave(&sdmac->vc, flags); |
---|
| 1781 | + if (desc && vchan_oob_pulsed(&desc->vd)) { |
---|
| 1782 | + for (n = 0; n < desc->num_bd - 1; n++) |
---|
| 1783 | + desc->bd[n].mode.status |= BD_DONE; |
---|
| 1784 | + desc->bd[n].mode.status |= BD_DONE|BD_WRAP; |
---|
| 1785 | + sdma_enable_channel(sdmac->sdma, sdmac->channel); |
---|
| 1786 | + ret = 0; |
---|
| 1787 | + } |
---|
| 1788 | + vchan_unlock_irqrestore(&sdmac->vc, flags); |
---|
| 1789 | + |
---|
| 1790 | + return ret; |
---|
| 1791 | +} |
---|
| 1792 | +#else |
---|
| 1793 | +static int sdma_pulse_oob(struct dma_chan *chan) |
---|
| 1794 | +{ |
---|
| 1795 | + return -ENOTSUPP; |
---|
| 1796 | +} |
---|
| 1797 | +#endif |
---|
1688 | 1798 | |
---|
1689 | 1799 | #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34 |
---|
1690 | 1800 | #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38 |
---|
.. | .. |
---|
1920 | 2030 | clk_disable(sdma->clk_ipg); |
---|
1921 | 2031 | clk_disable(sdma->clk_ahb); |
---|
1922 | 2032 | |
---|
| 2033 | +#ifdef CONFIG_IMX_SDMA_OOB |
---|
| 2034 | + raw_spin_lock_init(&sdma->oob_lock); |
---|
| 2035 | +#endif |
---|
1923 | 2036 | return 0; |
---|
1924 | 2037 | |
---|
1925 | 2038 | err_dma_alloc: |
---|
.. | .. |
---|
2035 | 2148 | if (ret) |
---|
2036 | 2149 | goto err_clk; |
---|
2037 | 2150 | |
---|
2038 | | - ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma", |
---|
2039 | | - sdma); |
---|
| 2151 | + ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, |
---|
| 2152 | + IS_ENABLED(CONFIG_IMX_SDMA_OOB) ? IRQF_OOB : 0, |
---|
| 2153 | + "sdma", sdma); |
---|
2040 | 2154 | if (ret) |
---|
2041 | 2155 | goto err_irq; |
---|
2042 | 2156 | |
---|
.. | .. |
---|
2055 | 2169 | |
---|
2056 | 2170 | dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); |
---|
2057 | 2171 | dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask); |
---|
| 2172 | + dma_cap_set(DMA_OOB, sdma->dma_device.cap_mask); |
---|
2058 | 2173 | dma_cap_set(DMA_MEMCPY, sdma->dma_device.cap_mask); |
---|
2059 | 2174 | |
---|
2060 | 2175 | INIT_LIST_HEAD(&sdma->dma_device.channels); |
---|
.. | .. |
---|
2106 | 2221 | sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; |
---|
2107 | 2222 | sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy; |
---|
2108 | 2223 | sdma->dma_device.device_issue_pending = sdma_issue_pending; |
---|
| 2224 | + sdma->dma_device.device_pulse_oob = sdma_pulse_oob; |
---|
2109 | 2225 | sdma->dma_device.copy_align = 2; |
---|
2110 | 2226 | dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT); |
---|
2111 | 2227 | |
---|
.. | .. |
---|
2160 | 2276 | } |
---|
2161 | 2277 | } |
---|
2162 | 2278 | |
---|
| 2279 | + /* |
---|
| 2280 | + * Keep the clocks enabled at any time if we plan to use the |
---|
| 2281 | + * DMA from out-of-band context, bumping their refcount to |
---|
| 2282 | + * keep them on until sdma_remove() is called eventually. |
---|
| 2283 | + */ |
---|
| 2284 | + if (IS_ENABLED(CONFIG_IMX_SDMA_OOB)) { |
---|
| 2285 | + clk_enable(sdma->clk_ipg); |
---|
| 2286 | + clk_enable(sdma->clk_ahb); |
---|
| 2287 | + } |
---|
| 2288 | + |
---|
2163 | 2289 | return 0; |
---|
2164 | 2290 | |
---|
2165 | 2291 | err_register: |
---|
.. | .. |
---|
2178 | 2304 | struct sdma_engine *sdma = platform_get_drvdata(pdev); |
---|
2179 | 2305 | int i; |
---|
2180 | 2306 | |
---|
| 2307 | + if (IS_ENABLED(CONFIG_IMX_SDMA_OOB)) { |
---|
| 2308 | + clk_disable(sdma->clk_ahb); |
---|
| 2309 | + clk_disable(sdma->clk_ipg); |
---|
| 2310 | + } |
---|
| 2311 | + |
---|
2181 | 2312 | devm_free_irq(&pdev->dev, sdma->irq, sdma); |
---|
2182 | 2313 | dma_async_device_unregister(&sdma->dma_device); |
---|
2183 | 2314 | kfree(sdma->script_addrs); |
---|