.. | .. |
---|
335 | 335 | * @sdma: pointer to the SDMA engine for this channel |
---|
336 | 336 | * @channel: the channel number, matches dmaengine chan_id + 1 |
---|
337 | 337 | * @direction: transfer type. Needed for setting SDMA script |
---|
338 | | - * @slave_config Slave configuration |
---|
| 338 | + * @slave_config: Slave configuration |
---|
339 | 339 | * @peripheral_type: Peripheral type. Needed for setting SDMA script |
---|
340 | 340 | * @event_id0: aka dma request line |
---|
341 | 341 | * @event_id1: for channels that use 2 events |
---|
.. | .. |
---|
354 | 354 | * @shp_addr: value for gReg[6] |
---|
355 | 355 | * @per_addr: value for gReg[2] |
---|
356 | 356 | * @status: status of dma channel |
---|
| 357 | + * @context_loaded: ensure context is only loaded once |
---|
357 | 358 | * @data: specific sdma interface structure |
---|
358 | 359 | * @bd_pool: dma_pool for bd |
---|
| 360 | + * @terminate_worker: used to call back into terminate work function |
---|
359 | 361 | */ |
---|
360 | 362 | struct sdma_channel { |
---|
361 | 363 | struct virt_dma_chan vc; |
---|
.. | .. |
---|
418 | 420 | int chnenbl0; |
---|
419 | 421 | int num_events; |
---|
420 | 422 | struct sdma_script_start_addrs *script_addrs; |
---|
| 423 | + bool check_ratio; |
---|
421 | 424 | }; |
---|
422 | 425 | |
---|
423 | 426 | struct sdma_engine { |
---|
424 | 427 | struct device *dev; |
---|
425 | | - struct device_dma_parameters dma_parms; |
---|
426 | 428 | struct sdma_channel channel[MAX_DMA_CHANNELS]; |
---|
427 | 429 | struct sdma_channel_control *channel_control; |
---|
428 | 430 | void __iomem *regs; |
---|
.. | .. |
---|
440 | 442 | unsigned int irq; |
---|
441 | 443 | dma_addr_t bd0_phys; |
---|
442 | 444 | struct sdma_buffer_descriptor *bd0; |
---|
| 445 | + /* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/ |
---|
| 446 | + bool clk_ratio; |
---|
443 | 447 | }; |
---|
444 | 448 | |
---|
445 | 449 | static int sdma_config_write(struct dma_chan *chan, |
---|
.. | .. |
---|
554 | 558 | .script_addrs = &sdma_script_imx7d, |
---|
555 | 559 | }; |
---|
556 | 560 | |
---|
| 561 | +static struct sdma_driver_data sdma_imx8mq = { |
---|
| 562 | + .chnenbl0 = SDMA_CHNENBL0_IMX35, |
---|
| 563 | + .num_events = 48, |
---|
| 564 | + .script_addrs = &sdma_script_imx7d, |
---|
| 565 | + .check_ratio = 1, |
---|
| 566 | +}; |
---|
| 567 | + |
---|
557 | 568 | static const struct platform_device_id sdma_devtypes[] = { |
---|
558 | 569 | { |
---|
559 | 570 | .name = "imx25-sdma", |
---|
.. | .. |
---|
577 | 588 | .name = "imx7d-sdma", |
---|
578 | 589 | .driver_data = (unsigned long)&sdma_imx7d, |
---|
579 | 590 | }, { |
---|
| 591 | + .name = "imx8mq-sdma", |
---|
| 592 | + .driver_data = (unsigned long)&sdma_imx8mq, |
---|
| 593 | + }, { |
---|
580 | 594 | /* sentinel */ |
---|
581 | 595 | } |
---|
582 | 596 | }; |
---|
.. | .. |
---|
590 | 604 | { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, }, |
---|
591 | 605 | { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, }, |
---|
592 | 606 | { .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, }, |
---|
| 607 | + { .compatible = "fsl,imx8mq-sdma", .data = &sdma_imx8mq, }, |
---|
593 | 608 | { /* sentinel */ } |
---|
594 | 609 | }; |
---|
595 | 610 | MODULE_DEVICE_TABLE(of, sdma_dt_ids); |
---|
.. | .. |
---|
662 | 677 | dev_err(sdma->dev, "Timeout waiting for CH0 ready\n"); |
---|
663 | 678 | |
---|
664 | 679 | /* Set bits of CONFIG register with dynamic context switching */ |
---|
665 | | - if (readl(sdma->regs + SDMA_H_CONFIG) == 0) |
---|
666 | | - writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); |
---|
| 680 | + reg = readl(sdma->regs + SDMA_H_CONFIG); |
---|
| 681 | + if ((reg & SDMA_H_CONFIG_CSM) == 0) { |
---|
| 682 | + reg |= SDMA_H_CONFIG_CSM; |
---|
| 683 | + writel_relaxed(reg, sdma->regs + SDMA_H_CONFIG); |
---|
| 684 | + } |
---|
667 | 685 | |
---|
668 | 686 | return ret; |
---|
669 | 687 | } |
---|
.. | .. |
---|
677 | 695 | int ret; |
---|
678 | 696 | unsigned long flags; |
---|
679 | 697 | |
---|
680 | | - buf_virt = dma_alloc_coherent(NULL, |
---|
681 | | - size, |
---|
682 | | - &buf_phys, GFP_KERNEL); |
---|
| 698 | + buf_virt = dma_alloc_coherent(sdma->dev, size, &buf_phys, GFP_KERNEL); |
---|
683 | 699 | if (!buf_virt) { |
---|
684 | 700 | return -ENOMEM; |
---|
685 | 701 | } |
---|
.. | .. |
---|
698 | 714 | |
---|
699 | 715 | spin_unlock_irqrestore(&sdma->channel_0_lock, flags); |
---|
700 | 716 | |
---|
701 | | - dma_free_coherent(NULL, size, buf_virt, buf_phys); |
---|
| 717 | + dma_free_coherent(sdma->dev, size, buf_virt, buf_phys); |
---|
702 | 718 | |
---|
703 | 719 | return ret; |
---|
704 | 720 | } |
---|
.. | .. |
---|
744 | 760 | return; |
---|
745 | 761 | } |
---|
746 | 762 | sdmac->desc = desc = to_sdma_desc(&vd->tx); |
---|
747 | | - /* |
---|
748 | | - * Do not delete the node in desc_issued list in cyclic mode, otherwise |
---|
749 | | - * the desc allocated will never be freed in vchan_dma_desc_free_list |
---|
750 | | - */ |
---|
751 | | - if (!(sdmac->flags & IMX_DMA_SG_LOOP)) |
---|
752 | | - list_del(&vd->node); |
---|
| 763 | + |
---|
| 764 | + list_del(&vd->node); |
---|
753 | 765 | |
---|
754 | 766 | sdma->channel_control[channel].base_bd_ptr = desc->bd_phys; |
---|
755 | 767 | sdma->channel_control[channel].current_bd_ptr = desc->bd_phys; |
---|
.. | .. |
---|
1050 | 1062 | |
---|
1051 | 1063 | spin_lock_irqsave(&sdmac->vc.lock, flags); |
---|
1052 | 1064 | vchan_get_all_descriptors(&sdmac->vc, &head); |
---|
1053 | | - sdmac->desc = NULL; |
---|
1054 | 1065 | spin_unlock_irqrestore(&sdmac->vc.lock, flags); |
---|
1055 | 1066 | vchan_dma_desc_free_list(&sdmac->vc, &head); |
---|
1056 | 1067 | } |
---|
1057 | 1068 | |
---|
1058 | | -static int sdma_disable_channel_async(struct dma_chan *chan) |
---|
| 1069 | +static int sdma_terminate_all(struct dma_chan *chan) |
---|
1059 | 1070 | { |
---|
1060 | 1071 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
---|
| 1072 | + unsigned long flags; |
---|
| 1073 | + |
---|
| 1074 | + spin_lock_irqsave(&sdmac->vc.lock, flags); |
---|
1061 | 1075 | |
---|
1062 | 1076 | sdma_disable_channel(chan); |
---|
1063 | 1077 | |
---|
1064 | | - if (sdmac->desc) |
---|
| 1078 | + if (sdmac->desc) { |
---|
| 1079 | + vchan_terminate_vdesc(&sdmac->desc->vd); |
---|
| 1080 | + sdmac->desc = NULL; |
---|
1065 | 1081 | schedule_work(&sdmac->terminate_worker); |
---|
| 1082 | + } |
---|
| 1083 | + |
---|
| 1084 | + spin_unlock_irqrestore(&sdmac->vc.lock, flags); |
---|
1066 | 1085 | |
---|
1067 | 1086 | return 0; |
---|
1068 | 1087 | } |
---|
.. | .. |
---|
1119 | 1138 | static int sdma_config_channel(struct dma_chan *chan) |
---|
1120 | 1139 | { |
---|
1121 | 1140 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
---|
1122 | | - int ret; |
---|
1123 | 1141 | |
---|
1124 | 1142 | sdma_disable_channel(chan); |
---|
1125 | 1143 | |
---|
.. | .. |
---|
1159 | 1177 | sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */ |
---|
1160 | 1178 | } |
---|
1161 | 1179 | |
---|
1162 | | - ret = sdma_load_context(sdmac); |
---|
1163 | | - |
---|
1164 | | - return ret; |
---|
| 1180 | + return 0; |
---|
1165 | 1181 | } |
---|
1166 | 1182 | |
---|
1167 | 1183 | static int sdma_set_channel_priority(struct sdma_channel *sdmac, |
---|
.. | .. |
---|
1184 | 1200 | { |
---|
1185 | 1201 | int ret = -EBUSY; |
---|
1186 | 1202 | |
---|
1187 | | - sdma->bd0 = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys, |
---|
| 1203 | + sdma->bd0 = dma_alloc_coherent(sdma->dev, PAGE_SIZE, &sdma->bd0_phys, |
---|
1188 | 1204 | GFP_NOWAIT); |
---|
1189 | 1205 | if (!sdma->bd0) { |
---|
1190 | 1206 | ret = -ENOMEM; |
---|
.. | .. |
---|
1207 | 1223 | u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); |
---|
1208 | 1224 | int ret = 0; |
---|
1209 | 1225 | |
---|
1210 | | - desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys, |
---|
1211 | | - GFP_NOWAIT); |
---|
| 1226 | + desc->bd = dma_alloc_coherent(desc->sdmac->sdma->dev, bd_size, |
---|
| 1227 | + &desc->bd_phys, GFP_NOWAIT); |
---|
1212 | 1228 | if (!desc->bd) { |
---|
1213 | 1229 | ret = -ENOMEM; |
---|
1214 | 1230 | goto out; |
---|
.. | .. |
---|
1221 | 1237 | { |
---|
1222 | 1238 | u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); |
---|
1223 | 1239 | |
---|
1224 | | - dma_free_coherent(NULL, bd_size, desc->bd, desc->bd_phys); |
---|
| 1240 | + dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd, |
---|
| 1241 | + desc->bd_phys); |
---|
1225 | 1242 | } |
---|
1226 | 1243 | |
---|
1227 | 1244 | static void sdma_desc_free(struct virt_dma_desc *vd) |
---|
.. | .. |
---|
1301 | 1318 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
---|
1302 | 1319 | struct sdma_engine *sdma = sdmac->sdma; |
---|
1303 | 1320 | |
---|
1304 | | - sdma_disable_channel_async(chan); |
---|
| 1321 | + sdma_terminate_all(chan); |
---|
1305 | 1322 | |
---|
1306 | 1323 | sdma_channel_synchronize(chan); |
---|
1307 | 1324 | |
---|
1308 | | - if (sdmac->event_id0) |
---|
1309 | | - sdma_event_disable(sdmac, sdmac->event_id0); |
---|
| 1325 | + sdma_event_disable(sdmac, sdmac->event_id0); |
---|
1310 | 1326 | if (sdmac->event_id1) |
---|
1311 | 1327 | sdma_event_disable(sdmac, sdmac->event_id1); |
---|
1312 | 1328 | |
---|
.. | .. |
---|
1347 | 1363 | sdma_config_ownership(sdmac, false, true, false); |
---|
1348 | 1364 | |
---|
1349 | 1365 | if (sdma_load_context(sdmac)) |
---|
1350 | | - goto err_desc_out; |
---|
| 1366 | + goto err_bd_out; |
---|
1351 | 1367 | |
---|
1352 | 1368 | return desc; |
---|
1353 | 1369 | |
---|
| 1370 | +err_bd_out: |
---|
| 1371 | + sdma_free_bd(desc); |
---|
1354 | 1372 | err_desc_out: |
---|
1355 | 1373 | kfree(desc); |
---|
1356 | 1374 | err_out: |
---|
.. | .. |
---|
1605 | 1623 | memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg)); |
---|
1606 | 1624 | |
---|
1607 | 1625 | /* Set ENBLn earlier to make sure dma request triggered after that */ |
---|
1608 | | - if (sdmac->event_id0) { |
---|
1609 | | - if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events) |
---|
1610 | | - return -EINVAL; |
---|
1611 | | - sdma_event_enable(sdmac, sdmac->event_id0); |
---|
1612 | | - } |
---|
| 1626 | + if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events) |
---|
| 1627 | + return -EINVAL; |
---|
| 1628 | + sdma_event_enable(sdmac, sdmac->event_id0); |
---|
1613 | 1629 | |
---|
1614 | 1630 | if (sdmac->event_id1) { |
---|
1615 | 1631 | if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events) |
---|
.. | .. |
---|
1625 | 1641 | struct dma_tx_state *txstate) |
---|
1626 | 1642 | { |
---|
1627 | 1643 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
---|
1628 | | - struct sdma_desc *desc; |
---|
| 1644 | + struct sdma_desc *desc = NULL; |
---|
1629 | 1645 | u32 residue; |
---|
1630 | 1646 | struct virt_dma_desc *vd; |
---|
1631 | 1647 | enum dma_status ret; |
---|
.. | .. |
---|
1636 | 1652 | return ret; |
---|
1637 | 1653 | |
---|
1638 | 1654 | spin_lock_irqsave(&sdmac->vc.lock, flags); |
---|
| 1655 | + |
---|
1639 | 1656 | vd = vchan_find_desc(&sdmac->vc, cookie); |
---|
1640 | | - if (vd) { |
---|
| 1657 | + if (vd) |
---|
1641 | 1658 | desc = to_sdma_desc(&vd->tx); |
---|
| 1659 | + else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) |
---|
| 1660 | + desc = sdmac->desc; |
---|
| 1661 | + |
---|
| 1662 | + if (desc) { |
---|
1642 | 1663 | if (sdmac->flags & IMX_DMA_SG_LOOP) |
---|
1643 | 1664 | residue = (desc->num_bd - desc->buf_ptail) * |
---|
1644 | 1665 | desc->period_len - desc->chn_real_count; |
---|
1645 | 1666 | else |
---|
1646 | 1667 | residue = desc->chn_count - desc->chn_real_count; |
---|
1647 | | - } else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) { |
---|
1648 | | - residue = sdmac->desc->chn_count - sdmac->desc->chn_real_count; |
---|
1649 | 1668 | } else { |
---|
1650 | 1669 | residue = 0; |
---|
1651 | 1670 | } |
---|
| 1671 | + |
---|
1652 | 1672 | spin_unlock_irqrestore(&sdmac->vc.lock, flags); |
---|
1653 | 1673 | |
---|
1654 | 1674 | dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, |
---|
.. | .. |
---|
1771 | 1791 | u32 reg, val, shift, num_map, i; |
---|
1772 | 1792 | int ret = 0; |
---|
1773 | 1793 | |
---|
1774 | | - if (IS_ERR(np) || IS_ERR(gpr_np)) |
---|
| 1794 | + if (IS_ERR(np) || !gpr_np) |
---|
1775 | 1795 | goto out; |
---|
1776 | 1796 | |
---|
1777 | 1797 | event_remap = of_find_property(np, propname, NULL); |
---|
.. | .. |
---|
1819 | 1839 | } |
---|
1820 | 1840 | |
---|
1821 | 1841 | out: |
---|
1822 | | - if (!IS_ERR(gpr_np)) |
---|
| 1842 | + if (gpr_np) |
---|
1823 | 1843 | of_node_put(gpr_np); |
---|
1824 | 1844 | |
---|
1825 | 1845 | return ret; |
---|
.. | .. |
---|
1849 | 1869 | if (ret) |
---|
1850 | 1870 | goto disable_clk_ipg; |
---|
1851 | 1871 | |
---|
| 1872 | + if (sdma->drvdata->check_ratio && |
---|
| 1873 | + (clk_get_rate(sdma->clk_ahb) == clk_get_rate(sdma->clk_ipg))) |
---|
| 1874 | + sdma->clk_ratio = 1; |
---|
| 1875 | + |
---|
1852 | 1876 | /* Be sure SDMA has not started yet */ |
---|
1853 | 1877 | writel_relaxed(0, sdma->regs + SDMA_H_C0PTR); |
---|
1854 | 1878 | |
---|
1855 | | - sdma->channel_control = dma_alloc_coherent(NULL, |
---|
| 1879 | + sdma->channel_control = dma_alloc_coherent(sdma->dev, |
---|
1856 | 1880 | MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) + |
---|
1857 | 1881 | sizeof(struct sdma_context_data), |
---|
1858 | 1882 | &ccb_phys, GFP_KERNEL); |
---|
.. | .. |
---|
1866 | 1890 | MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control); |
---|
1867 | 1891 | sdma->context_phys = ccb_phys + |
---|
1868 | 1892 | MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control); |
---|
1869 | | - |
---|
1870 | | - /* Zero-out the CCB structures array just allocated */ |
---|
1871 | | - memset(sdma->channel_control, 0, |
---|
1872 | | - MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control)); |
---|
1873 | 1893 | |
---|
1874 | 1894 | /* disable all channels */ |
---|
1875 | 1895 | for (i = 0; i < sdma->drvdata->num_events; i++) |
---|
.. | .. |
---|
1889 | 1909 | writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR); |
---|
1890 | 1910 | |
---|
1891 | 1911 | /* Set bits of CONFIG register but with static context switching */ |
---|
1892 | | - /* FIXME: Check whether to set ACR bit depending on clock ratios */ |
---|
1893 | | - writel_relaxed(0, sdma->regs + SDMA_H_CONFIG); |
---|
| 1912 | + if (sdma->clk_ratio) |
---|
| 1913 | + writel_relaxed(SDMA_H_CONFIG_ACR, sdma->regs + SDMA_H_CONFIG); |
---|
| 1914 | + else |
---|
| 1915 | + writel_relaxed(0, sdma->regs + SDMA_H_CONFIG); |
---|
1894 | 1916 | |
---|
1895 | 1917 | writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR); |
---|
1896 | 1918 | |
---|
.. | .. |
---|
1946 | 1968 | */ |
---|
1947 | 1969 | data.dma_request2 = 0; |
---|
1948 | 1970 | |
---|
1949 | | - return dma_request_channel(mask, sdma_filter_fn, &data); |
---|
| 1971 | + return __dma_request_channel(&mask, sdma_filter_fn, &data, |
---|
| 1972 | + ofdma->of_node); |
---|
1950 | 1973 | } |
---|
1951 | 1974 | |
---|
1952 | 1975 | static int sdma_probe(struct platform_device *pdev) |
---|
.. | .. |
---|
2029 | 2052 | |
---|
2030 | 2053 | /* initially no scripts available */ |
---|
2031 | 2054 | saddr_arr = (s32 *)sdma->script_addrs; |
---|
2032 | | - for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++) |
---|
| 2055 | + for (i = 0; i < sizeof(*sdma->script_addrs) / sizeof(s32); i++) |
---|
2033 | 2056 | saddr_arr[i] = -EINVAL; |
---|
2034 | 2057 | |
---|
2035 | 2058 | dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); |
---|
.. | .. |
---|
2077 | 2100 | sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; |
---|
2078 | 2101 | sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; |
---|
2079 | 2102 | sdma->dma_device.device_config = sdma_config; |
---|
2080 | | - sdma->dma_device.device_terminate_all = sdma_disable_channel_async; |
---|
| 2103 | + sdma->dma_device.device_terminate_all = sdma_terminate_all; |
---|
2081 | 2104 | sdma->dma_device.device_synchronize = sdma_channel_synchronize; |
---|
2082 | 2105 | sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS; |
---|
2083 | 2106 | sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS; |
---|
.. | .. |
---|
2085 | 2108 | sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; |
---|
2086 | 2109 | sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy; |
---|
2087 | 2110 | sdma->dma_device.device_issue_pending = sdma_issue_pending; |
---|
2088 | | - sdma->dma_device.dev->dma_parms = &sdma->dma_parms; |
---|
| 2111 | + sdma->dma_device.copy_align = 2; |
---|
2089 | 2112 | dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT); |
---|
2090 | 2113 | |
---|
2091 | 2114 | platform_set_drvdata(pdev, sdma); |
---|
.. | .. |
---|
2191 | 2214 | #if IS_ENABLED(CONFIG_SOC_IMX6Q) |
---|
2192 | 2215 | MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin"); |
---|
2193 | 2216 | #endif |
---|
2194 | | -#if IS_ENABLED(CONFIG_SOC_IMX7D) |
---|
| 2217 | +#if IS_ENABLED(CONFIG_SOC_IMX7D) || IS_ENABLED(CONFIG_SOC_IMX8M) |
---|
2195 | 2218 | MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin"); |
---|
2196 | 2219 | #endif |
---|
2197 | 2220 | MODULE_LICENSE("GPL"); |
---|