| .. | .. |
|---|
| 335 | 335 | * @sdma: pointer to the SDMA engine for this channel |
|---|
| 336 | 336 | * @channel: the channel number, matches dmaengine chan_id + 1 |
|---|
| 337 | 337 | * @direction: transfer type. Needed for setting SDMA script |
|---|
| 338 | | - * @slave_config Slave configuration |
|---|
| 338 | + * @slave_config: Slave configuration |
|---|
| 339 | 339 | * @peripheral_type: Peripheral type. Needed for setting SDMA script |
|---|
| 340 | 340 | * @event_id0: aka dma request line |
|---|
| 341 | 341 | * @event_id1: for channels that use 2 events |
|---|
| .. | .. |
|---|
| 354 | 354 | * @shp_addr: value for gReg[6] |
|---|
| 355 | 355 | * @per_addr: value for gReg[2] |
|---|
| 356 | 356 | * @status: status of dma channel |
|---|
| 357 | + * @context_loaded: ensure context is only loaded once |
|---|
| 357 | 358 | * @data: specific sdma interface structure |
|---|
| 358 | 359 | * @bd_pool: dma_pool for bd |
|---|
| 360 | + * @terminate_worker: used to call back into terminate work function |
|---|
| 359 | 361 | */ |
|---|
| 360 | 362 | struct sdma_channel { |
|---|
| 361 | 363 | struct virt_dma_chan vc; |
|---|
| .. | .. |
|---|
| 418 | 420 | int chnenbl0; |
|---|
| 419 | 421 | int num_events; |
|---|
| 420 | 422 | struct sdma_script_start_addrs *script_addrs; |
|---|
| 423 | + bool check_ratio; |
|---|
| 421 | 424 | }; |
|---|
| 422 | 425 | |
|---|
| 423 | 426 | struct sdma_engine { |
|---|
| 424 | 427 | struct device *dev; |
|---|
| 425 | | - struct device_dma_parameters dma_parms; |
|---|
| 426 | 428 | struct sdma_channel channel[MAX_DMA_CHANNELS]; |
|---|
| 427 | 429 | struct sdma_channel_control *channel_control; |
|---|
| 428 | 430 | void __iomem *regs; |
|---|
| .. | .. |
|---|
| 440 | 442 | unsigned int irq; |
|---|
| 441 | 443 | dma_addr_t bd0_phys; |
|---|
| 442 | 444 | struct sdma_buffer_descriptor *bd0; |
|---|
| 445 | + /* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/ |
|---|
| 446 | + bool clk_ratio; |
|---|
| 443 | 447 | }; |
|---|
| 444 | 448 | |
|---|
| 445 | 449 | static int sdma_config_write(struct dma_chan *chan, |
|---|
| .. | .. |
|---|
| 554 | 558 | .script_addrs = &sdma_script_imx7d, |
|---|
| 555 | 559 | }; |
|---|
| 556 | 560 | |
|---|
| 561 | +static struct sdma_driver_data sdma_imx8mq = { |
|---|
| 562 | + .chnenbl0 = SDMA_CHNENBL0_IMX35, |
|---|
| 563 | + .num_events = 48, |
|---|
| 564 | + .script_addrs = &sdma_script_imx7d, |
|---|
| 565 | + .check_ratio = 1, |
|---|
| 566 | +}; |
|---|
| 567 | + |
|---|
| 557 | 568 | static const struct platform_device_id sdma_devtypes[] = { |
|---|
| 558 | 569 | { |
|---|
| 559 | 570 | .name = "imx25-sdma", |
|---|
| .. | .. |
|---|
| 577 | 588 | .name = "imx7d-sdma", |
|---|
| 578 | 589 | .driver_data = (unsigned long)&sdma_imx7d, |
|---|
| 579 | 590 | }, { |
|---|
| 591 | + .name = "imx8mq-sdma", |
|---|
| 592 | + .driver_data = (unsigned long)&sdma_imx8mq, |
|---|
| 593 | + }, { |
|---|
| 580 | 594 | /* sentinel */ |
|---|
| 581 | 595 | } |
|---|
| 582 | 596 | }; |
|---|
| .. | .. |
|---|
| 590 | 604 | { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, }, |
|---|
| 591 | 605 | { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, }, |
|---|
| 592 | 606 | { .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, }, |
|---|
| 607 | + { .compatible = "fsl,imx8mq-sdma", .data = &sdma_imx8mq, }, |
|---|
| 593 | 608 | { /* sentinel */ } |
|---|
| 594 | 609 | }; |
|---|
| 595 | 610 | MODULE_DEVICE_TABLE(of, sdma_dt_ids); |
|---|
| .. | .. |
|---|
| 662 | 677 | dev_err(sdma->dev, "Timeout waiting for CH0 ready\n"); |
|---|
| 663 | 678 | |
|---|
| 664 | 679 | /* Set bits of CONFIG register with dynamic context switching */ |
|---|
| 665 | | - if (readl(sdma->regs + SDMA_H_CONFIG) == 0) |
|---|
| 666 | | - writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); |
|---|
| 680 | + reg = readl(sdma->regs + SDMA_H_CONFIG); |
|---|
| 681 | + if ((reg & SDMA_H_CONFIG_CSM) == 0) { |
|---|
| 682 | + reg |= SDMA_H_CONFIG_CSM; |
|---|
| 683 | + writel_relaxed(reg, sdma->regs + SDMA_H_CONFIG); |
|---|
| 684 | + } |
|---|
| 667 | 685 | |
|---|
| 668 | 686 | return ret; |
|---|
| 669 | 687 | } |
|---|
| .. | .. |
|---|
| 677 | 695 | int ret; |
|---|
| 678 | 696 | unsigned long flags; |
|---|
| 679 | 697 | |
|---|
| 680 | | - buf_virt = dma_alloc_coherent(NULL, |
|---|
| 681 | | - size, |
|---|
| 682 | | - &buf_phys, GFP_KERNEL); |
|---|
| 698 | + buf_virt = dma_alloc_coherent(sdma->dev, size, &buf_phys, GFP_KERNEL); |
|---|
| 683 | 699 | if (!buf_virt) { |
|---|
| 684 | 700 | return -ENOMEM; |
|---|
| 685 | 701 | } |
|---|
| .. | .. |
|---|
| 698 | 714 | |
|---|
| 699 | 715 | spin_unlock_irqrestore(&sdma->channel_0_lock, flags); |
|---|
| 700 | 716 | |
|---|
| 701 | | - dma_free_coherent(NULL, size, buf_virt, buf_phys); |
|---|
| 717 | + dma_free_coherent(sdma->dev, size, buf_virt, buf_phys); |
|---|
| 702 | 718 | |
|---|
| 703 | 719 | return ret; |
|---|
| 704 | 720 | } |
|---|
| .. | .. |
|---|
| 744 | 760 | return; |
|---|
| 745 | 761 | } |
|---|
| 746 | 762 | sdmac->desc = desc = to_sdma_desc(&vd->tx); |
|---|
| 747 | | - /* |
|---|
| 748 | | - * Do not delete the node in desc_issued list in cyclic mode, otherwise |
|---|
| 749 | | - * the desc allocated will never be freed in vchan_dma_desc_free_list |
|---|
| 750 | | - */ |
|---|
| 751 | | - if (!(sdmac->flags & IMX_DMA_SG_LOOP)) |
|---|
| 752 | | - list_del(&vd->node); |
|---|
| 763 | + |
|---|
| 764 | + list_del(&vd->node); |
|---|
| 753 | 765 | |
|---|
| 754 | 766 | sdma->channel_control[channel].base_bd_ptr = desc->bd_phys; |
|---|
| 755 | 767 | sdma->channel_control[channel].current_bd_ptr = desc->bd_phys; |
|---|
| .. | .. |
|---|
| 1050 | 1062 | |
|---|
| 1051 | 1063 | spin_lock_irqsave(&sdmac->vc.lock, flags); |
|---|
| 1052 | 1064 | vchan_get_all_descriptors(&sdmac->vc, &head); |
|---|
| 1053 | | - sdmac->desc = NULL; |
|---|
| 1054 | 1065 | spin_unlock_irqrestore(&sdmac->vc.lock, flags); |
|---|
| 1055 | 1066 | vchan_dma_desc_free_list(&sdmac->vc, &head); |
|---|
| 1056 | 1067 | } |
|---|
| 1057 | 1068 | |
|---|
| 1058 | | -static int sdma_disable_channel_async(struct dma_chan *chan) |
|---|
| 1069 | +static int sdma_terminate_all(struct dma_chan *chan) |
|---|
| 1059 | 1070 | { |
|---|
| 1060 | 1071 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
|---|
| 1072 | + unsigned long flags; |
|---|
| 1073 | + |
|---|
| 1074 | + spin_lock_irqsave(&sdmac->vc.lock, flags); |
|---|
| 1061 | 1075 | |
|---|
| 1062 | 1076 | sdma_disable_channel(chan); |
|---|
| 1063 | 1077 | |
|---|
| 1064 | | - if (sdmac->desc) |
|---|
| 1078 | + if (sdmac->desc) { |
|---|
| 1079 | + vchan_terminate_vdesc(&sdmac->desc->vd); |
|---|
| 1080 | + sdmac->desc = NULL; |
|---|
| 1065 | 1081 | schedule_work(&sdmac->terminate_worker); |
|---|
| 1082 | + } |
|---|
| 1083 | + |
|---|
| 1084 | + spin_unlock_irqrestore(&sdmac->vc.lock, flags); |
|---|
| 1066 | 1085 | |
|---|
| 1067 | 1086 | return 0; |
|---|
| 1068 | 1087 | } |
|---|
| .. | .. |
|---|
| 1119 | 1138 | static int sdma_config_channel(struct dma_chan *chan) |
|---|
| 1120 | 1139 | { |
|---|
| 1121 | 1140 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
|---|
| 1122 | | - int ret; |
|---|
| 1123 | 1141 | |
|---|
| 1124 | 1142 | sdma_disable_channel(chan); |
|---|
| 1125 | 1143 | |
|---|
| .. | .. |
|---|
| 1159 | 1177 | sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */ |
|---|
| 1160 | 1178 | } |
|---|
| 1161 | 1179 | |
|---|
| 1162 | | - ret = sdma_load_context(sdmac); |
|---|
| 1163 | | - |
|---|
| 1164 | | - return ret; |
|---|
| 1180 | + return 0; |
|---|
| 1165 | 1181 | } |
|---|
| 1166 | 1182 | |
|---|
| 1167 | 1183 | static int sdma_set_channel_priority(struct sdma_channel *sdmac, |
|---|
| .. | .. |
|---|
| 1184 | 1200 | { |
|---|
| 1185 | 1201 | int ret = -EBUSY; |
|---|
| 1186 | 1202 | |
|---|
| 1187 | | - sdma->bd0 = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys, |
|---|
| 1203 | + sdma->bd0 = dma_alloc_coherent(sdma->dev, PAGE_SIZE, &sdma->bd0_phys, |
|---|
| 1188 | 1204 | GFP_NOWAIT); |
|---|
| 1189 | 1205 | if (!sdma->bd0) { |
|---|
| 1190 | 1206 | ret = -ENOMEM; |
|---|
| .. | .. |
|---|
| 1207 | 1223 | u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); |
|---|
| 1208 | 1224 | int ret = 0; |
|---|
| 1209 | 1225 | |
|---|
| 1210 | | - desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys, |
|---|
| 1211 | | - GFP_NOWAIT); |
|---|
| 1226 | + desc->bd = dma_alloc_coherent(desc->sdmac->sdma->dev, bd_size, |
|---|
| 1227 | + &desc->bd_phys, GFP_NOWAIT); |
|---|
| 1212 | 1228 | if (!desc->bd) { |
|---|
| 1213 | 1229 | ret = -ENOMEM; |
|---|
| 1214 | 1230 | goto out; |
|---|
| .. | .. |
|---|
| 1221 | 1237 | { |
|---|
| 1222 | 1238 | u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); |
|---|
| 1223 | 1239 | |
|---|
| 1224 | | - dma_free_coherent(NULL, bd_size, desc->bd, desc->bd_phys); |
|---|
| 1240 | + dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd, |
|---|
| 1241 | + desc->bd_phys); |
|---|
| 1225 | 1242 | } |
|---|
| 1226 | 1243 | |
|---|
| 1227 | 1244 | static void sdma_desc_free(struct virt_dma_desc *vd) |
|---|
| .. | .. |
|---|
| 1301 | 1318 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
|---|
| 1302 | 1319 | struct sdma_engine *sdma = sdmac->sdma; |
|---|
| 1303 | 1320 | |
|---|
| 1304 | | - sdma_disable_channel_async(chan); |
|---|
| 1321 | + sdma_terminate_all(chan); |
|---|
| 1305 | 1322 | |
|---|
| 1306 | 1323 | sdma_channel_synchronize(chan); |
|---|
| 1307 | 1324 | |
|---|
| 1308 | | - if (sdmac->event_id0) |
|---|
| 1309 | | - sdma_event_disable(sdmac, sdmac->event_id0); |
|---|
| 1325 | + sdma_event_disable(sdmac, sdmac->event_id0); |
|---|
| 1310 | 1326 | if (sdmac->event_id1) |
|---|
| 1311 | 1327 | sdma_event_disable(sdmac, sdmac->event_id1); |
|---|
| 1312 | 1328 | |
|---|
| .. | .. |
|---|
| 1605 | 1621 | memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg)); |
|---|
| 1606 | 1622 | |
|---|
| 1607 | 1623 | /* Set ENBLn earlier to make sure dma request triggered after that */ |
|---|
| 1608 | | - if (sdmac->event_id0) { |
|---|
| 1609 | | - if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events) |
|---|
| 1610 | | - return -EINVAL; |
|---|
| 1611 | | - sdma_event_enable(sdmac, sdmac->event_id0); |
|---|
| 1612 | | - } |
|---|
| 1624 | + if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events) |
|---|
| 1625 | + return -EINVAL; |
|---|
| 1626 | + sdma_event_enable(sdmac, sdmac->event_id0); |
|---|
| 1613 | 1627 | |
|---|
| 1614 | 1628 | if (sdmac->event_id1) { |
|---|
| 1615 | 1629 | if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events) |
|---|
| .. | .. |
|---|
| 1625 | 1639 | struct dma_tx_state *txstate) |
|---|
| 1626 | 1640 | { |
|---|
| 1627 | 1641 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
|---|
| 1628 | | - struct sdma_desc *desc; |
|---|
| 1642 | + struct sdma_desc *desc = NULL; |
|---|
| 1629 | 1643 | u32 residue; |
|---|
| 1630 | 1644 | struct virt_dma_desc *vd; |
|---|
| 1631 | 1645 | enum dma_status ret; |
|---|
| .. | .. |
|---|
| 1636 | 1650 | return ret; |
|---|
| 1637 | 1651 | |
|---|
| 1638 | 1652 | spin_lock_irqsave(&sdmac->vc.lock, flags); |
|---|
| 1653 | + |
|---|
| 1639 | 1654 | vd = vchan_find_desc(&sdmac->vc, cookie); |
|---|
| 1640 | | - if (vd) { |
|---|
| 1655 | + if (vd) |
|---|
| 1641 | 1656 | desc = to_sdma_desc(&vd->tx); |
|---|
| 1657 | + else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) |
|---|
| 1658 | + desc = sdmac->desc; |
|---|
| 1659 | + |
|---|
| 1660 | + if (desc) { |
|---|
| 1642 | 1661 | if (sdmac->flags & IMX_DMA_SG_LOOP) |
|---|
| 1643 | 1662 | residue = (desc->num_bd - desc->buf_ptail) * |
|---|
| 1644 | 1663 | desc->period_len - desc->chn_real_count; |
|---|
| 1645 | 1664 | else |
|---|
| 1646 | 1665 | residue = desc->chn_count - desc->chn_real_count; |
|---|
| 1647 | | - } else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) { |
|---|
| 1648 | | - residue = sdmac->desc->chn_count - sdmac->desc->chn_real_count; |
|---|
| 1649 | 1666 | } else { |
|---|
| 1650 | 1667 | residue = 0; |
|---|
| 1651 | 1668 | } |
|---|
| 1669 | + |
|---|
| 1652 | 1670 | spin_unlock_irqrestore(&sdmac->vc.lock, flags); |
|---|
| 1653 | 1671 | |
|---|
| 1654 | 1672 | dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, |
|---|
| .. | .. |
|---|
| 1771 | 1789 | u32 reg, val, shift, num_map, i; |
|---|
| 1772 | 1790 | int ret = 0; |
|---|
| 1773 | 1791 | |
|---|
| 1774 | | - if (IS_ERR(np) || IS_ERR(gpr_np)) |
|---|
| 1792 | + if (IS_ERR(np) || !gpr_np) |
|---|
| 1775 | 1793 | goto out; |
|---|
| 1776 | 1794 | |
|---|
| 1777 | 1795 | event_remap = of_find_property(np, propname, NULL); |
|---|
| .. | .. |
|---|
| 1819 | 1837 | } |
|---|
| 1820 | 1838 | |
|---|
| 1821 | 1839 | out: |
|---|
| 1822 | | - if (!IS_ERR(gpr_np)) |
|---|
| 1840 | + if (gpr_np) |
|---|
| 1823 | 1841 | of_node_put(gpr_np); |
|---|
| 1824 | 1842 | |
|---|
| 1825 | 1843 | return ret; |
|---|
| .. | .. |
|---|
| 1849 | 1867 | if (ret) |
|---|
| 1850 | 1868 | goto disable_clk_ipg; |
|---|
| 1851 | 1869 | |
|---|
| 1870 | + if (sdma->drvdata->check_ratio && |
|---|
| 1871 | + (clk_get_rate(sdma->clk_ahb) == clk_get_rate(sdma->clk_ipg))) |
|---|
| 1872 | + sdma->clk_ratio = 1; |
|---|
| 1873 | + |
|---|
| 1852 | 1874 | /* Be sure SDMA has not started yet */ |
|---|
| 1853 | 1875 | writel_relaxed(0, sdma->regs + SDMA_H_C0PTR); |
|---|
| 1854 | 1876 | |
|---|
| 1855 | | - sdma->channel_control = dma_alloc_coherent(NULL, |
|---|
| 1877 | + sdma->channel_control = dma_alloc_coherent(sdma->dev, |
|---|
| 1856 | 1878 | MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) + |
|---|
| 1857 | 1879 | sizeof(struct sdma_context_data), |
|---|
| 1858 | 1880 | &ccb_phys, GFP_KERNEL); |
|---|
| .. | .. |
|---|
| 1866 | 1888 | MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control); |
|---|
| 1867 | 1889 | sdma->context_phys = ccb_phys + |
|---|
| 1868 | 1890 | MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control); |
|---|
| 1869 | | - |
|---|
| 1870 | | - /* Zero-out the CCB structures array just allocated */ |
|---|
| 1871 | | - memset(sdma->channel_control, 0, |
|---|
| 1872 | | - MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control)); |
|---|
| 1873 | 1891 | |
|---|
| 1874 | 1892 | /* disable all channels */ |
|---|
| 1875 | 1893 | for (i = 0; i < sdma->drvdata->num_events; i++) |
|---|
| .. | .. |
|---|
| 1889 | 1907 | writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR); |
|---|
| 1890 | 1908 | |
|---|
| 1891 | 1909 | /* Set bits of CONFIG register but with static context switching */ |
|---|
| 1892 | | - /* FIXME: Check whether to set ACR bit depending on clock ratios */ |
|---|
| 1893 | | - writel_relaxed(0, sdma->regs + SDMA_H_CONFIG); |
|---|
| 1910 | + if (sdma->clk_ratio) |
|---|
| 1911 | + writel_relaxed(SDMA_H_CONFIG_ACR, sdma->regs + SDMA_H_CONFIG); |
|---|
| 1912 | + else |
|---|
| 1913 | + writel_relaxed(0, sdma->regs + SDMA_H_CONFIG); |
|---|
| 1894 | 1914 | |
|---|
| 1895 | 1915 | writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR); |
|---|
| 1896 | 1916 | |
|---|
| .. | .. |
|---|
| 1946 | 1966 | */ |
|---|
| 1947 | 1967 | data.dma_request2 = 0; |
|---|
| 1948 | 1968 | |
|---|
| 1949 | | - return dma_request_channel(mask, sdma_filter_fn, &data); |
|---|
| 1969 | + return __dma_request_channel(&mask, sdma_filter_fn, &data, |
|---|
| 1970 | + ofdma->of_node); |
|---|
| 1950 | 1971 | } |
|---|
| 1951 | 1972 | |
|---|
| 1952 | 1973 | static int sdma_probe(struct platform_device *pdev) |
|---|
| .. | .. |
|---|
| 2029 | 2050 | |
|---|
| 2030 | 2051 | /* initially no scripts available */ |
|---|
| 2031 | 2052 | saddr_arr = (s32 *)sdma->script_addrs; |
|---|
| 2032 | | - for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++) |
|---|
| 2053 | + for (i = 0; i < sizeof(*sdma->script_addrs) / sizeof(s32); i++) |
|---|
| 2033 | 2054 | saddr_arr[i] = -EINVAL; |
|---|
| 2034 | 2055 | |
|---|
| 2035 | 2056 | dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); |
|---|
| .. | .. |
|---|
| 2077 | 2098 | sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; |
|---|
| 2078 | 2099 | sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; |
|---|
| 2079 | 2100 | sdma->dma_device.device_config = sdma_config; |
|---|
| 2080 | | - sdma->dma_device.device_terminate_all = sdma_disable_channel_async; |
|---|
| 2101 | + sdma->dma_device.device_terminate_all = sdma_terminate_all; |
|---|
| 2081 | 2102 | sdma->dma_device.device_synchronize = sdma_channel_synchronize; |
|---|
| 2082 | 2103 | sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS; |
|---|
| 2083 | 2104 | sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS; |
|---|
| .. | .. |
|---|
| 2085 | 2106 | sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; |
|---|
| 2086 | 2107 | sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy; |
|---|
| 2087 | 2108 | sdma->dma_device.device_issue_pending = sdma_issue_pending; |
|---|
| 2088 | | - sdma->dma_device.dev->dma_parms = &sdma->dma_parms; |
|---|
| 2109 | + sdma->dma_device.copy_align = 2; |
|---|
| 2089 | 2110 | dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT); |
|---|
| 2090 | 2111 | |
|---|
| 2091 | 2112 | platform_set_drvdata(pdev, sdma); |
|---|
| .. | .. |
|---|
| 2191 | 2212 | #if IS_ENABLED(CONFIG_SOC_IMX6Q) |
|---|
| 2192 | 2213 | MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin"); |
|---|
| 2193 | 2214 | #endif |
|---|
| 2194 | | -#if IS_ENABLED(CONFIG_SOC_IMX7D) |
|---|
| 2215 | +#if IS_ENABLED(CONFIG_SOC_IMX7D) || IS_ENABLED(CONFIG_SOC_IMX8M) |
|---|
| 2195 | 2216 | MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin"); |
|---|
| 2196 | 2217 | #endif |
|---|
| 2197 | 2218 | MODULE_LICENSE("GPL"); |
|---|