hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/dma/fsl-edma.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * drivers/dma/fsl-edma.c
34 *
....@@ -6,678 +7,40 @@
67 * Driver for the Freescale eDMA engine with flexible channel multiplexing
78 * capability for DMA request sources. The eDMA block can be found on some
89 * Vybrid and Layerscape SoCs.
9
- *
10
- * This program is free software; you can redistribute it and/or modify it
11
- * under the terms of the GNU General Public License as published by the
12
- * Free Software Foundation; either version 2 of the License, or (at your
13
- * option) any later version.
1410 */
1511
16
-#include <linux/init.h>
1712 #include <linux/module.h>
1813 #include <linux/interrupt.h>
1914 #include <linux/clk.h>
20
-#include <linux/dma-mapping.h>
21
-#include <linux/dmapool.h>
22
-#include <linux/slab.h>
23
-#include <linux/spinlock.h>
2415 #include <linux/of.h>
2516 #include <linux/of_device.h>
2617 #include <linux/of_address.h>
2718 #include <linux/of_irq.h>
2819 #include <linux/of_dma.h>
2920
30
-#include "virt-dma.h"
21
+#include "fsl-edma-common.h"
3122
32
-#define EDMA_CR 0x00
33
-#define EDMA_ES 0x04
34
-#define EDMA_ERQ 0x0C
35
-#define EDMA_EEI 0x14
36
-#define EDMA_SERQ 0x1B
37
-#define EDMA_CERQ 0x1A
38
-#define EDMA_SEEI 0x19
39
-#define EDMA_CEEI 0x18
40
-#define EDMA_CINT 0x1F
41
-#define EDMA_CERR 0x1E
42
-#define EDMA_SSRT 0x1D
43
-#define EDMA_CDNE 0x1C
44
-#define EDMA_INTR 0x24
45
-#define EDMA_ERR 0x2C
46
-
47
-#define EDMA_TCD_SADDR(x) (0x1000 + 32 * (x))
48
-#define EDMA_TCD_SOFF(x) (0x1004 + 32 * (x))
49
-#define EDMA_TCD_ATTR(x) (0x1006 + 32 * (x))
50
-#define EDMA_TCD_NBYTES(x) (0x1008 + 32 * (x))
51
-#define EDMA_TCD_SLAST(x) (0x100C + 32 * (x))
52
-#define EDMA_TCD_DADDR(x) (0x1010 + 32 * (x))
53
-#define EDMA_TCD_DOFF(x) (0x1014 + 32 * (x))
54
-#define EDMA_TCD_CITER_ELINK(x) (0x1016 + 32 * (x))
55
-#define EDMA_TCD_CITER(x) (0x1016 + 32 * (x))
56
-#define EDMA_TCD_DLAST_SGA(x) (0x1018 + 32 * (x))
57
-#define EDMA_TCD_CSR(x) (0x101C + 32 * (x))
58
-#define EDMA_TCD_BITER_ELINK(x) (0x101E + 32 * (x))
59
-#define EDMA_TCD_BITER(x) (0x101E + 32 * (x))
60
-
61
-#define EDMA_CR_EDBG BIT(1)
62
-#define EDMA_CR_ERCA BIT(2)
63
-#define EDMA_CR_ERGA BIT(3)
64
-#define EDMA_CR_HOE BIT(4)
65
-#define EDMA_CR_HALT BIT(5)
66
-#define EDMA_CR_CLM BIT(6)
67
-#define EDMA_CR_EMLM BIT(7)
68
-#define EDMA_CR_ECX BIT(16)
69
-#define EDMA_CR_CX BIT(17)
70
-
71
-#define EDMA_SEEI_SEEI(x) ((x) & 0x1F)
72
-#define EDMA_CEEI_CEEI(x) ((x) & 0x1F)
73
-#define EDMA_CINT_CINT(x) ((x) & 0x1F)
74
-#define EDMA_CERR_CERR(x) ((x) & 0x1F)
75
-
76
-#define EDMA_TCD_ATTR_DSIZE(x) (((x) & 0x0007))
77
-#define EDMA_TCD_ATTR_DMOD(x) (((x) & 0x001F) << 3)
78
-#define EDMA_TCD_ATTR_SSIZE(x) (((x) & 0x0007) << 8)
79
-#define EDMA_TCD_ATTR_SMOD(x) (((x) & 0x001F) << 11)
80
-#define EDMA_TCD_ATTR_SSIZE_8BIT (0x0000)
81
-#define EDMA_TCD_ATTR_SSIZE_16BIT (0x0100)
82
-#define EDMA_TCD_ATTR_SSIZE_32BIT (0x0200)
83
-#define EDMA_TCD_ATTR_SSIZE_64BIT (0x0300)
84
-#define EDMA_TCD_ATTR_SSIZE_32BYTE (0x0500)
85
-#define EDMA_TCD_ATTR_DSIZE_8BIT (0x0000)
86
-#define EDMA_TCD_ATTR_DSIZE_16BIT (0x0001)
87
-#define EDMA_TCD_ATTR_DSIZE_32BIT (0x0002)
88
-#define EDMA_TCD_ATTR_DSIZE_64BIT (0x0003)
89
-#define EDMA_TCD_ATTR_DSIZE_32BYTE (0x0005)
90
-
91
-#define EDMA_TCD_SOFF_SOFF(x) (x)
92
-#define EDMA_TCD_NBYTES_NBYTES(x) (x)
93
-#define EDMA_TCD_SLAST_SLAST(x) (x)
94
-#define EDMA_TCD_DADDR_DADDR(x) (x)
95
-#define EDMA_TCD_CITER_CITER(x) ((x) & 0x7FFF)
96
-#define EDMA_TCD_DOFF_DOFF(x) (x)
97
-#define EDMA_TCD_DLAST_SGA_DLAST_SGA(x) (x)
98
-#define EDMA_TCD_BITER_BITER(x) ((x) & 0x7FFF)
99
-
100
-#define EDMA_TCD_CSR_START BIT(0)
101
-#define EDMA_TCD_CSR_INT_MAJOR BIT(1)
102
-#define EDMA_TCD_CSR_INT_HALF BIT(2)
103
-#define EDMA_TCD_CSR_D_REQ BIT(3)
104
-#define EDMA_TCD_CSR_E_SG BIT(4)
105
-#define EDMA_TCD_CSR_E_LINK BIT(5)
106
-#define EDMA_TCD_CSR_ACTIVE BIT(6)
107
-#define EDMA_TCD_CSR_DONE BIT(7)
108
-
109
-#define EDMAMUX_CHCFG_DIS 0x0
110
-#define EDMAMUX_CHCFG_ENBL 0x80
111
-#define EDMAMUX_CHCFG_SOURCE(n) ((n) & 0x3F)
112
-
113
-#define DMAMUX_NR 2
114
-
115
-#define FSL_EDMA_BUSWIDTHS BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
116
- BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
117
- BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
118
- BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
119
-enum fsl_edma_pm_state {
120
- RUNNING = 0,
121
- SUSPENDED,
122
-};
123
-
124
-struct fsl_edma_hw_tcd {
125
- __le32 saddr;
126
- __le16 soff;
127
- __le16 attr;
128
- __le32 nbytes;
129
- __le32 slast;
130
- __le32 daddr;
131
- __le16 doff;
132
- __le16 citer;
133
- __le32 dlast_sga;
134
- __le16 csr;
135
- __le16 biter;
136
-};
137
-
138
-struct fsl_edma_sw_tcd {
139
- dma_addr_t ptcd;
140
- struct fsl_edma_hw_tcd *vtcd;
141
-};
142
-
143
-struct fsl_edma_slave_config {
144
- enum dma_transfer_direction dir;
145
- enum dma_slave_buswidth addr_width;
146
- u32 dev_addr;
147
- u32 burst;
148
- u32 attr;
149
-};
150
-
151
-struct fsl_edma_chan {
152
- struct virt_dma_chan vchan;
153
- enum dma_status status;
154
- enum fsl_edma_pm_state pm_state;
155
- bool idle;
156
- u32 slave_id;
157
- struct fsl_edma_engine *edma;
158
- struct fsl_edma_desc *edesc;
159
- struct fsl_edma_slave_config fsc;
160
- struct dma_pool *tcd_pool;
161
-};
162
-
163
-struct fsl_edma_desc {
164
- struct virt_dma_desc vdesc;
165
- struct fsl_edma_chan *echan;
166
- bool iscyclic;
167
- unsigned int n_tcds;
168
- struct fsl_edma_sw_tcd tcd[];
169
-};
170
-
171
-struct fsl_edma_engine {
172
- struct dma_device dma_dev;
173
- void __iomem *membase;
174
- void __iomem *muxbase[DMAMUX_NR];
175
- struct clk *muxclk[DMAMUX_NR];
176
- struct mutex fsl_edma_mutex;
177
- u32 n_chans;
178
- int txirq;
179
- int errirq;
180
- bool big_endian;
181
- struct fsl_edma_chan chans[];
182
-};
183
-
184
-/*
185
- * R/W functions for big- or little-endian registers:
186
- * The eDMA controller's endian is independent of the CPU core's endian.
187
- * For the big-endian IP module, the offset for 8-bit or 16-bit registers
188
- * should also be swapped opposite to that in little-endian IP.
189
- */
190
-
191
-static u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
192
-{
193
- if (edma->big_endian)
194
- return ioread32be(addr);
195
- else
196
- return ioread32(addr);
197
-}
198
-
199
-static void edma_writeb(struct fsl_edma_engine *edma, u8 val, void __iomem *addr)
200
-{
201
- /* swap the reg offset for these in big-endian mode */
202
- if (edma->big_endian)
203
- iowrite8(val, (void __iomem *)((unsigned long)addr ^ 0x3));
204
- else
205
- iowrite8(val, addr);
206
-}
207
-
208
-static void edma_writew(struct fsl_edma_engine *edma, u16 val, void __iomem *addr)
209
-{
210
- /* swap the reg offset for these in big-endian mode */
211
- if (edma->big_endian)
212
- iowrite16be(val, (void __iomem *)((unsigned long)addr ^ 0x2));
213
- else
214
- iowrite16(val, addr);
215
-}
216
-
217
-static void edma_writel(struct fsl_edma_engine *edma, u32 val, void __iomem *addr)
218
-{
219
- if (edma->big_endian)
220
- iowrite32be(val, addr);
221
- else
222
- iowrite32(val, addr);
223
-}
224
-
225
-static struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
226
-{
227
- return container_of(chan, struct fsl_edma_chan, vchan.chan);
228
-}
229
-
230
-static struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
231
-{
232
- return container_of(vd, struct fsl_edma_desc, vdesc);
233
-}
234
-
235
-static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
236
-{
237
- void __iomem *addr = fsl_chan->edma->membase;
238
- u32 ch = fsl_chan->vchan.chan.chan_id;
239
-
240
- edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), addr + EDMA_SEEI);
241
- edma_writeb(fsl_chan->edma, ch, addr + EDMA_SERQ);
242
-}
243
-
244
-static void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
245
-{
246
- void __iomem *addr = fsl_chan->edma->membase;
247
- u32 ch = fsl_chan->vchan.chan.chan_id;
248
-
249
- edma_writeb(fsl_chan->edma, ch, addr + EDMA_CERQ);
250
- edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), addr + EDMA_CEEI);
251
-}
252
-
253
-static void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
254
- unsigned int slot, bool enable)
255
-{
256
- u32 ch = fsl_chan->vchan.chan.chan_id;
257
- void __iomem *muxaddr;
258
- unsigned chans_per_mux, ch_off;
259
-
260
- chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR;
261
- ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
262
- muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
263
- slot = EDMAMUX_CHCFG_SOURCE(slot);
264
-
265
- if (enable)
266
- iowrite8(EDMAMUX_CHCFG_ENBL | slot, muxaddr + ch_off);
267
- else
268
- iowrite8(EDMAMUX_CHCFG_DIS, muxaddr + ch_off);
269
-}
270
-
271
-static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
272
-{
273
- switch (addr_width) {
274
- case 1:
275
- return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
276
- case 2:
277
- return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
278
- case 4:
279
- return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
280
- case 8:
281
- return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
282
- default:
283
- return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
284
- }
285
-}
286
-
287
-static void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
288
-{
289
- struct fsl_edma_desc *fsl_desc;
290
- int i;
291
-
292
- fsl_desc = to_fsl_edma_desc(vdesc);
293
- for (i = 0; i < fsl_desc->n_tcds; i++)
294
- dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
295
- fsl_desc->tcd[i].ptcd);
296
- kfree(fsl_desc);
297
-}
298
-
299
-static int fsl_edma_terminate_all(struct dma_chan *chan)
300
-{
301
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
302
- unsigned long flags;
303
- LIST_HEAD(head);
304
-
305
- spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
306
- fsl_edma_disable_request(fsl_chan);
307
- fsl_chan->edesc = NULL;
308
- fsl_chan->idle = true;
309
- vchan_get_all_descriptors(&fsl_chan->vchan, &head);
310
- spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
311
- vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
312
- return 0;
313
-}
314
-
315
-static int fsl_edma_pause(struct dma_chan *chan)
316
-{
317
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
318
- unsigned long flags;
319
-
320
- spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
321
- if (fsl_chan->edesc) {
322
- fsl_edma_disable_request(fsl_chan);
323
- fsl_chan->status = DMA_PAUSED;
324
- fsl_chan->idle = true;
325
- }
326
- spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
327
- return 0;
328
-}
329
-
330
-static int fsl_edma_resume(struct dma_chan *chan)
331
-{
332
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
333
- unsigned long flags;
334
-
335
- spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
336
- if (fsl_chan->edesc) {
337
- fsl_edma_enable_request(fsl_chan);
338
- fsl_chan->status = DMA_IN_PROGRESS;
339
- fsl_chan->idle = false;
340
- }
341
- spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
342
- return 0;
343
-}
344
-
345
-static int fsl_edma_slave_config(struct dma_chan *chan,
346
- struct dma_slave_config *cfg)
23
+static void fsl_edma_synchronize(struct dma_chan *chan)
34724 {
34825 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
34926
350
- fsl_chan->fsc.dir = cfg->direction;
351
- if (cfg->direction == DMA_DEV_TO_MEM) {
352
- fsl_chan->fsc.dev_addr = cfg->src_addr;
353
- fsl_chan->fsc.addr_width = cfg->src_addr_width;
354
- fsl_chan->fsc.burst = cfg->src_maxburst;
355
- fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->src_addr_width);
356
- } else if (cfg->direction == DMA_MEM_TO_DEV) {
357
- fsl_chan->fsc.dev_addr = cfg->dst_addr;
358
- fsl_chan->fsc.addr_width = cfg->dst_addr_width;
359
- fsl_chan->fsc.burst = cfg->dst_maxburst;
360
- fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->dst_addr_width);
361
- } else {
362
- return -EINVAL;
363
- }
364
- return 0;
365
-}
366
-
367
-static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
368
- struct virt_dma_desc *vdesc, bool in_progress)
369
-{
370
- struct fsl_edma_desc *edesc = fsl_chan->edesc;
371
- void __iomem *addr = fsl_chan->edma->membase;
372
- u32 ch = fsl_chan->vchan.chan.chan_id;
373
- enum dma_transfer_direction dir = fsl_chan->fsc.dir;
374
- dma_addr_t cur_addr, dma_addr;
375
- size_t len, size;
376
- int i;
377
-
378
- /* calculate the total size in this desc */
379
- for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
380
- len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
381
- * le16_to_cpu(edesc->tcd[i].vtcd->biter);
382
-
383
- if (!in_progress)
384
- return len;
385
-
386
- if (dir == DMA_MEM_TO_DEV)
387
- cur_addr = edma_readl(fsl_chan->edma, addr + EDMA_TCD_SADDR(ch));
388
- else
389
- cur_addr = edma_readl(fsl_chan->edma, addr + EDMA_TCD_DADDR(ch));
390
-
391
- /* figure out the finished and calculate the residue */
392
- for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
393
- size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
394
- * le16_to_cpu(edesc->tcd[i].vtcd->biter);
395
- if (dir == DMA_MEM_TO_DEV)
396
- dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
397
- else
398
- dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr);
399
-
400
- len -= size;
401
- if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
402
- len += dma_addr + size - cur_addr;
403
- break;
404
- }
405
- }
406
-
407
- return len;
408
-}
409
-
410
-static enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
411
- dma_cookie_t cookie, struct dma_tx_state *txstate)
412
-{
413
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
414
- struct virt_dma_desc *vdesc;
415
- enum dma_status status;
416
- unsigned long flags;
417
-
418
- status = dma_cookie_status(chan, cookie, txstate);
419
- if (status == DMA_COMPLETE)
420
- return status;
421
-
422
- if (!txstate)
423
- return fsl_chan->status;
424
-
425
- spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
426
- vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
427
- if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
428
- txstate->residue = fsl_edma_desc_residue(fsl_chan, vdesc, true);
429
- else if (vdesc)
430
- txstate->residue = fsl_edma_desc_residue(fsl_chan, vdesc, false);
431
- else
432
- txstate->residue = 0;
433
-
434
- spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
435
-
436
- return fsl_chan->status;
437
-}
438
-
439
-static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
440
- struct fsl_edma_hw_tcd *tcd)
441
-{
442
- struct fsl_edma_engine *edma = fsl_chan->edma;
443
- void __iomem *addr = fsl_chan->edma->membase;
444
- u32 ch = fsl_chan->vchan.chan.chan_id;
445
-
446
- /*
447
- * TCD parameters are stored in struct fsl_edma_hw_tcd in little
448
- * endian format. However, we need to load the TCD registers in
449
- * big- or little-endian obeying the eDMA engine model endian.
450
- */
451
- edma_writew(edma, 0, addr + EDMA_TCD_CSR(ch));
452
- edma_writel(edma, le32_to_cpu(tcd->saddr), addr + EDMA_TCD_SADDR(ch));
453
- edma_writel(edma, le32_to_cpu(tcd->daddr), addr + EDMA_TCD_DADDR(ch));
454
-
455
- edma_writew(edma, le16_to_cpu(tcd->attr), addr + EDMA_TCD_ATTR(ch));
456
- edma_writew(edma, le16_to_cpu(tcd->soff), addr + EDMA_TCD_SOFF(ch));
457
-
458
- edma_writel(edma, le32_to_cpu(tcd->nbytes), addr + EDMA_TCD_NBYTES(ch));
459
- edma_writel(edma, le32_to_cpu(tcd->slast), addr + EDMA_TCD_SLAST(ch));
460
-
461
- edma_writew(edma, le16_to_cpu(tcd->citer), addr + EDMA_TCD_CITER(ch));
462
- edma_writew(edma, le16_to_cpu(tcd->biter), addr + EDMA_TCD_BITER(ch));
463
- edma_writew(edma, le16_to_cpu(tcd->doff), addr + EDMA_TCD_DOFF(ch));
464
-
465
- edma_writel(edma, le32_to_cpu(tcd->dlast_sga), addr + EDMA_TCD_DLAST_SGA(ch));
466
-
467
- edma_writew(edma, le16_to_cpu(tcd->csr), addr + EDMA_TCD_CSR(ch));
468
-}
469
-
470
-static inline
471
-void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
472
- u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
473
- u16 biter, u16 doff, u32 dlast_sga, bool major_int,
474
- bool disable_req, bool enable_sg)
475
-{
476
- u16 csr = 0;
477
-
478
- /*
479
- * eDMA hardware SGs require the TCDs to be stored in little
480
- * endian format irrespective of the register endian model.
481
- * So we put the value in little endian in memory, waiting
482
- * for fsl_edma_set_tcd_regs doing the swap.
483
- */
484
- tcd->saddr = cpu_to_le32(src);
485
- tcd->daddr = cpu_to_le32(dst);
486
-
487
- tcd->attr = cpu_to_le16(attr);
488
-
489
- tcd->soff = cpu_to_le16(EDMA_TCD_SOFF_SOFF(soff));
490
-
491
- tcd->nbytes = cpu_to_le32(EDMA_TCD_NBYTES_NBYTES(nbytes));
492
- tcd->slast = cpu_to_le32(EDMA_TCD_SLAST_SLAST(slast));
493
-
494
- tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
495
- tcd->doff = cpu_to_le16(EDMA_TCD_DOFF_DOFF(doff));
496
-
497
- tcd->dlast_sga = cpu_to_le32(EDMA_TCD_DLAST_SGA_DLAST_SGA(dlast_sga));
498
-
499
- tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
500
- if (major_int)
501
- csr |= EDMA_TCD_CSR_INT_MAJOR;
502
-
503
- if (disable_req)
504
- csr |= EDMA_TCD_CSR_D_REQ;
505
-
506
- if (enable_sg)
507
- csr |= EDMA_TCD_CSR_E_SG;
508
-
509
- tcd->csr = cpu_to_le16(csr);
510
-}
511
-
512
-static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
513
- int sg_len)
514
-{
515
- struct fsl_edma_desc *fsl_desc;
516
- int i;
517
-
518
- fsl_desc = kzalloc(sizeof(*fsl_desc) + sizeof(struct fsl_edma_sw_tcd) * sg_len,
519
- GFP_NOWAIT);
520
- if (!fsl_desc)
521
- return NULL;
522
-
523
- fsl_desc->echan = fsl_chan;
524
- fsl_desc->n_tcds = sg_len;
525
- for (i = 0; i < sg_len; i++) {
526
- fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
527
- GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
528
- if (!fsl_desc->tcd[i].vtcd)
529
- goto err;
530
- }
531
- return fsl_desc;
532
-
533
-err:
534
- while (--i >= 0)
535
- dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
536
- fsl_desc->tcd[i].ptcd);
537
- kfree(fsl_desc);
538
- return NULL;
539
-}
540
-
541
-static struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
542
- struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
543
- size_t period_len, enum dma_transfer_direction direction,
544
- unsigned long flags)
545
-{
546
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
547
- struct fsl_edma_desc *fsl_desc;
548
- dma_addr_t dma_buf_next;
549
- int sg_len, i;
550
- u32 src_addr, dst_addr, last_sg, nbytes;
551
- u16 soff, doff, iter;
552
-
553
- if (!is_slave_direction(fsl_chan->fsc.dir))
554
- return NULL;
555
-
556
- sg_len = buf_len / period_len;
557
- fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
558
- if (!fsl_desc)
559
- return NULL;
560
- fsl_desc->iscyclic = true;
561
-
562
- dma_buf_next = dma_addr;
563
- nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
564
- iter = period_len / nbytes;
565
-
566
- for (i = 0; i < sg_len; i++) {
567
- if (dma_buf_next >= dma_addr + buf_len)
568
- dma_buf_next = dma_addr;
569
-
570
- /* get next sg's physical address */
571
- last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
572
-
573
- if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
574
- src_addr = dma_buf_next;
575
- dst_addr = fsl_chan->fsc.dev_addr;
576
- soff = fsl_chan->fsc.addr_width;
577
- doff = 0;
578
- } else {
579
- src_addr = fsl_chan->fsc.dev_addr;
580
- dst_addr = dma_buf_next;
581
- soff = 0;
582
- doff = fsl_chan->fsc.addr_width;
583
- }
584
-
585
- fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
586
- fsl_chan->fsc.attr, soff, nbytes, 0, iter,
587
- iter, doff, last_sg, true, false, true);
588
- dma_buf_next += period_len;
589
- }
590
-
591
- return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
592
-}
593
-
594
-static struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
595
- struct dma_chan *chan, struct scatterlist *sgl,
596
- unsigned int sg_len, enum dma_transfer_direction direction,
597
- unsigned long flags, void *context)
598
-{
599
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
600
- struct fsl_edma_desc *fsl_desc;
601
- struct scatterlist *sg;
602
- u32 src_addr, dst_addr, last_sg, nbytes;
603
- u16 soff, doff, iter;
604
- int i;
605
-
606
- if (!is_slave_direction(fsl_chan->fsc.dir))
607
- return NULL;
608
-
609
- fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
610
- if (!fsl_desc)
611
- return NULL;
612
- fsl_desc->iscyclic = false;
613
-
614
- nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
615
- for_each_sg(sgl, sg, sg_len, i) {
616
- /* get next sg's physical address */
617
- last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
618
-
619
- if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
620
- src_addr = sg_dma_address(sg);
621
- dst_addr = fsl_chan->fsc.dev_addr;
622
- soff = fsl_chan->fsc.addr_width;
623
- doff = 0;
624
- } else {
625
- src_addr = fsl_chan->fsc.dev_addr;
626
- dst_addr = sg_dma_address(sg);
627
- soff = 0;
628
- doff = fsl_chan->fsc.addr_width;
629
- }
630
-
631
- iter = sg_dma_len(sg) / nbytes;
632
- if (i < sg_len - 1) {
633
- last_sg = fsl_desc->tcd[(i + 1)].ptcd;
634
- fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
635
- dst_addr, fsl_chan->fsc.attr, soff,
636
- nbytes, 0, iter, iter, doff, last_sg,
637
- false, false, true);
638
- } else {
639
- last_sg = 0;
640
- fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
641
- dst_addr, fsl_chan->fsc.attr, soff,
642
- nbytes, 0, iter, iter, doff, last_sg,
643
- true, true, false);
644
- }
645
- }
646
-
647
- return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
648
-}
649
-
650
-static void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
651
-{
652
- struct virt_dma_desc *vdesc;
653
-
654
- vdesc = vchan_next_desc(&fsl_chan->vchan);
655
- if (!vdesc)
656
- return;
657
- fsl_chan->edesc = to_fsl_edma_desc(vdesc);
658
- fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
659
- fsl_edma_enable_request(fsl_chan);
660
- fsl_chan->status = DMA_IN_PROGRESS;
661
- fsl_chan->idle = false;
27
+ vchan_synchronize(&fsl_chan->vchan);
66228 }
66329
66430 static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
66531 {
66632 struct fsl_edma_engine *fsl_edma = dev_id;
66733 unsigned int intr, ch;
668
- void __iomem *base_addr;
34
+ struct edma_regs *regs = &fsl_edma->regs;
66935 struct fsl_edma_chan *fsl_chan;
67036
671
- base_addr = fsl_edma->membase;
672
-
673
- intr = edma_readl(fsl_edma, base_addr + EDMA_INTR);
37
+ intr = edma_readl(fsl_edma, regs->intl);
67438 if (!intr)
67539 return IRQ_NONE;
67640
67741 for (ch = 0; ch < fsl_edma->n_chans; ch++) {
67842 if (intr & (0x1 << ch)) {
679
- edma_writeb(fsl_edma, EDMA_CINT_CINT(ch),
680
- base_addr + EDMA_CINT);
43
+ edma_writeb(fsl_edma, EDMA_CINT_CINT(ch), regs->cint);
68144
68245 fsl_chan = &fsl_edma->chans[ch];
68346
....@@ -712,16 +75,16 @@
71275 {
71376 struct fsl_edma_engine *fsl_edma = dev_id;
71477 unsigned int err, ch;
78
+ struct edma_regs *regs = &fsl_edma->regs;
71579
716
- err = edma_readl(fsl_edma, fsl_edma->membase + EDMA_ERR);
80
+ err = edma_readl(fsl_edma, regs->errl);
71781 if (!err)
71882 return IRQ_NONE;
71983
72084 for (ch = 0; ch < fsl_edma->n_chans; ch++) {
72185 if (err & (0x1 << ch)) {
72286 fsl_edma_disable_request(&fsl_edma->chans[ch]);
723
- edma_writeb(fsl_edma, EDMA_CERR_CERR(ch),
724
- fsl_edma->membase + EDMA_CERR);
87
+ edma_writeb(fsl_edma, EDMA_CERR_CERR(ch), regs->cerr);
72588 fsl_edma->chans[ch].status = DMA_ERROR;
72689 fsl_edma->chans[ch].idle = true;
72790 }
....@@ -737,32 +100,14 @@
737100 return fsl_edma_err_handler(irq, dev_id);
738101 }
739102
740
-static void fsl_edma_issue_pending(struct dma_chan *chan)
741
-{
742
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
743
- unsigned long flags;
744
-
745
- spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
746
-
747
- if (unlikely(fsl_chan->pm_state != RUNNING)) {
748
- spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
749
- /* cannot submit due to suspend */
750
- return;
751
- }
752
-
753
- if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
754
- fsl_edma_xfer_desc(fsl_chan);
755
-
756
- spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
757
-}
758
-
759103 static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
760104 struct of_dma *ofdma)
761105 {
762106 struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
763107 struct dma_chan *chan, *_chan;
764108 struct fsl_edma_chan *fsl_chan;
765
- unsigned long chans_per_mux = fsl_edma->n_chans / DMAMUX_NR;
109
+ u32 dmamux_nr = fsl_edma->drvdata->dmamuxs;
110
+ unsigned long chans_per_mux = fsl_edma->n_chans / dmamux_nr;
766111
767112 if (dma_spec->args_count != 2)
768113 return NULL;
....@@ -788,72 +133,83 @@
788133 return NULL;
789134 }
790135
791
-static int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
792
-{
793
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
794
-
795
- fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
796
- sizeof(struct fsl_edma_hw_tcd),
797
- 32, 0);
798
- return 0;
799
-}
800
-
801
-static void fsl_edma_free_chan_resources(struct dma_chan *chan)
802
-{
803
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
804
- unsigned long flags;
805
- LIST_HEAD(head);
806
-
807
- spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
808
- fsl_edma_disable_request(fsl_chan);
809
- fsl_edma_chan_mux(fsl_chan, 0, false);
810
- fsl_chan->edesc = NULL;
811
- vchan_get_all_descriptors(&fsl_chan->vchan, &head);
812
- spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
813
-
814
- vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
815
- dma_pool_destroy(fsl_chan->tcd_pool);
816
- fsl_chan->tcd_pool = NULL;
817
-}
818
-
819136 static int
820137 fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
821138 {
822139 int ret;
823140
824141 fsl_edma->txirq = platform_get_irq_byname(pdev, "edma-tx");
825
- if (fsl_edma->txirq < 0) {
826
- dev_err(&pdev->dev, "Can't get edma-tx irq.\n");
142
+ if (fsl_edma->txirq < 0)
827143 return fsl_edma->txirq;
828
- }
829144
830145 fsl_edma->errirq = platform_get_irq_byname(pdev, "edma-err");
831
- if (fsl_edma->errirq < 0) {
832
- dev_err(&pdev->dev, "Can't get edma-err irq.\n");
146
+ if (fsl_edma->errirq < 0)
833147 return fsl_edma->errirq;
834
- }
835148
836149 if (fsl_edma->txirq == fsl_edma->errirq) {
837150 ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
838151 fsl_edma_irq_handler, 0, "eDMA", fsl_edma);
839152 if (ret) {
840153 dev_err(&pdev->dev, "Can't register eDMA IRQ.\n");
841
- return ret;
154
+ return ret;
842155 }
843156 } else {
844157 ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
845158 fsl_edma_tx_handler, 0, "eDMA tx", fsl_edma);
846159 if (ret) {
847160 dev_err(&pdev->dev, "Can't register eDMA tx IRQ.\n");
848
- return ret;
161
+ return ret;
849162 }
850163
851164 ret = devm_request_irq(&pdev->dev, fsl_edma->errirq,
852165 fsl_edma_err_handler, 0, "eDMA err", fsl_edma);
853166 if (ret) {
854167 dev_err(&pdev->dev, "Can't register eDMA err IRQ.\n");
855
- return ret;
168
+ return ret;
856169 }
170
+ }
171
+
172
+ return 0;
173
+}
174
+
175
+static int
176
+fsl_edma2_irq_init(struct platform_device *pdev,
177
+ struct fsl_edma_engine *fsl_edma)
178
+{
179
+ int i, ret, irq;
180
+ int count;
181
+
182
+ count = platform_irq_count(pdev);
183
+ dev_dbg(&pdev->dev, "%s Found %d interrupts\r\n", __func__, count);
184
+ if (count <= 2) {
185
+ dev_err(&pdev->dev, "Interrupts in DTS not correct.\n");
186
+ return -EINVAL;
187
+ }
188
+ /*
189
+ * 16 channel independent interrupts + 1 error interrupt on i.mx7ulp.
190
+ * 2 channel share one interrupt, for example, ch0/ch16, ch1/ch17...
191
+ * For now, just simply request irq without IRQF_SHARED flag, since 16
192
+ * channels are enough on i.mx7ulp whose M4 domain own some peripherals.
193
+ */
194
+ for (i = 0; i < count; i++) {
195
+ irq = platform_get_irq(pdev, i);
196
+ if (irq < 0)
197
+ return -ENXIO;
198
+
199
+ sprintf(fsl_edma->chans[i].chan_name, "eDMA2-CH%02d", i);
200
+
201
+ /* The last IRQ is for eDMA err */
202
+ if (i == count - 1)
203
+ ret = devm_request_irq(&pdev->dev, irq,
204
+ fsl_edma_err_handler,
205
+ 0, "eDMA2-ERR", fsl_edma);
206
+ else
207
+ ret = devm_request_irq(&pdev->dev, irq,
208
+ fsl_edma_tx_handler, 0,
209
+ fsl_edma->chans[i].chan_name,
210
+ fsl_edma);
211
+ if (ret)
212
+ return ret;
857213 }
858214
859215 return 0;
....@@ -878,14 +234,53 @@
878234 clk_disable_unprepare(fsl_edma->muxclk[i]);
879235 }
880236
237
+static struct fsl_edma_drvdata vf610_data = {
238
+ .version = v1,
239
+ .dmamuxs = DMAMUX_NR,
240
+ .setup_irq = fsl_edma_irq_init,
241
+};
242
+
243
+static struct fsl_edma_drvdata ls1028a_data = {
244
+ .version = v1,
245
+ .dmamuxs = DMAMUX_NR,
246
+ .mux_swap = true,
247
+ .setup_irq = fsl_edma_irq_init,
248
+};
249
+
250
+static struct fsl_edma_drvdata imx7ulp_data = {
251
+ .version = v3,
252
+ .dmamuxs = 1,
253
+ .has_dmaclk = true,
254
+ .setup_irq = fsl_edma2_irq_init,
255
+};
256
+
257
+static const struct of_device_id fsl_edma_dt_ids[] = {
258
+ { .compatible = "fsl,vf610-edma", .data = &vf610_data},
259
+ { .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data},
260
+ { .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data},
261
+ { /* sentinel */ }
262
+};
263
+MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
264
+
881265 static int fsl_edma_probe(struct platform_device *pdev)
882266 {
267
+ const struct of_device_id *of_id =
268
+ of_match_device(fsl_edma_dt_ids, &pdev->dev);
883269 struct device_node *np = pdev->dev.of_node;
884270 struct fsl_edma_engine *fsl_edma;
271
+ const struct fsl_edma_drvdata *drvdata = NULL;
885272 struct fsl_edma_chan *fsl_chan;
273
+ struct edma_regs *regs;
886274 struct resource *res;
887275 int len, chans;
888276 int ret, i;
277
+
278
+ if (of_id)
279
+ drvdata = of_id->data;
280
+ if (!drvdata) {
281
+ dev_err(&pdev->dev, "unable to find driver data\n");
282
+ return -EINVAL;
283
+ }
889284
890285 ret = of_property_read_u32(np, "dma-channels", &chans);
891286 if (ret) {
....@@ -898,6 +293,7 @@
898293 if (!fsl_edma)
899294 return -ENOMEM;
900295
296
+ fsl_edma->drvdata = drvdata;
901297 fsl_edma->n_chans = chans;
902298 mutex_init(&fsl_edma->fsl_edma_mutex);
903299
....@@ -906,7 +302,24 @@
906302 if (IS_ERR(fsl_edma->membase))
907303 return PTR_ERR(fsl_edma->membase);
908304
909
- for (i = 0; i < DMAMUX_NR; i++) {
305
+ fsl_edma_setup_regs(fsl_edma);
306
+ regs = &fsl_edma->regs;
307
+
308
+ if (drvdata->has_dmaclk) {
309
+ fsl_edma->dmaclk = devm_clk_get(&pdev->dev, "dma");
310
+ if (IS_ERR(fsl_edma->dmaclk)) {
311
+ dev_err(&pdev->dev, "Missing DMA block clock.\n");
312
+ return PTR_ERR(fsl_edma->dmaclk);
313
+ }
314
+
315
+ ret = clk_prepare_enable(fsl_edma->dmaclk);
316
+ if (ret) {
317
+ dev_err(&pdev->dev, "DMA clk block failed.\n");
318
+ return ret;
319
+ }
320
+ }
321
+
322
+ for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) {
910323 char clkname[32];
911324
912325 res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
....@@ -943,15 +356,16 @@
943356 fsl_chan->pm_state = RUNNING;
944357 fsl_chan->slave_id = 0;
945358 fsl_chan->idle = true;
359
+ fsl_chan->dma_dir = DMA_NONE;
946360 fsl_chan->vchan.desc_free = fsl_edma_free_desc;
947361 vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
948362
949
- edma_writew(fsl_edma, 0x0, fsl_edma->membase + EDMA_TCD_CSR(i));
363
+ edma_writew(fsl_edma, 0x0, &regs->tcd[i].csr);
950364 fsl_edma_chan_mux(fsl_chan, 0, false);
951365 }
952366
953
- edma_writel(fsl_edma, ~0, fsl_edma->membase + EDMA_INTR);
954
- ret = fsl_edma_irq_init(pdev, fsl_edma);
367
+ edma_writel(fsl_edma, ~0, regs->intl);
368
+ ret = fsl_edma->drvdata->setup_irq(pdev, fsl_edma);
955369 if (ret)
956370 return ret;
957371
....@@ -971,6 +385,7 @@
971385 fsl_edma->dma_dev.device_pause = fsl_edma_pause;
972386 fsl_edma->dma_dev.device_resume = fsl_edma_resume;
973387 fsl_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all;
388
+ fsl_edma->dma_dev.device_synchronize = fsl_edma_synchronize;
974389 fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
975390
976391 fsl_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
....@@ -983,7 +398,7 @@
983398 if (ret) {
984399 dev_err(&pdev->dev,
985400 "Can't register Freescale eDMA engine. (%d)\n", ret);
986
- fsl_disable_clocks(fsl_edma, DMAMUX_NR);
401
+ fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
987402 return ret;
988403 }
989404
....@@ -992,25 +407,14 @@
992407 dev_err(&pdev->dev,
993408 "Can't register Freescale eDMA of_dma. (%d)\n", ret);
994409 dma_async_device_unregister(&fsl_edma->dma_dev);
995
- fsl_disable_clocks(fsl_edma, DMAMUX_NR);
410
+ fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
996411 return ret;
997412 }
998413
999414 /* enable round robin arbitration */
1000
- edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, fsl_edma->membase + EDMA_CR);
415
+ edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
1001416
1002417 return 0;
1003
-}
1004
-
1005
-static void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
1006
-{
1007
- struct fsl_edma_chan *chan, *_chan;
1008
-
1009
- list_for_each_entry_safe(chan, _chan,
1010
- &dmadev->channels, vchan.chan.device_node) {
1011
- list_del(&chan->vchan.chan.device_node);
1012
- tasklet_kill(&chan->vchan.task);
1013
- }
1014418 }
1015419
1016420 static int fsl_edma_remove(struct platform_device *pdev)
....@@ -1022,7 +426,7 @@
1022426 fsl_edma_cleanup_vchan(&fsl_edma->dma_dev);
1023427 of_dma_controller_free(np);
1024428 dma_async_device_unregister(&fsl_edma->dma_dev);
1025
- fsl_disable_clocks(fsl_edma, DMAMUX_NR);
429
+ fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
1026430
1027431 return 0;
1028432 }
....@@ -1055,18 +459,18 @@
1055459 {
1056460 struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev);
1057461 struct fsl_edma_chan *fsl_chan;
462
+ struct edma_regs *regs = &fsl_edma->regs;
1058463 int i;
1059464
1060465 for (i = 0; i < fsl_edma->n_chans; i++) {
1061466 fsl_chan = &fsl_edma->chans[i];
1062467 fsl_chan->pm_state = RUNNING;
1063
- edma_writew(fsl_edma, 0x0, fsl_edma->membase + EDMA_TCD_CSR(i));
468
+ edma_writew(fsl_edma, 0x0, &regs->tcd[i].csr);
1064469 if (fsl_chan->slave_id != 0)
1065470 fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true);
1066471 }
1067472
1068
- edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA,
1069
- fsl_edma->membase + EDMA_CR);
473
+ edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
1070474
1071475 return 0;
1072476 }
....@@ -1080,12 +484,6 @@
1080484 .suspend_late = fsl_edma_suspend_late,
1081485 .resume_early = fsl_edma_resume_early,
1082486 };
1083
-
1084
-static const struct of_device_id fsl_edma_dt_ids[] = {
1085
- { .compatible = "fsl,vf610-edma", },
1086
- { /* sentinel */ }
1087
-};
1088
-MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
1089487
1090488 static struct platform_driver fsl_edma_driver = {
1091489 .driver = {