hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/drivers/dma/dma-axi-dmac.c
....@@ -1,12 +1,12 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Driver for the Analog Devices AXI-DMAC core
34 *
4
- * Copyright 2013-2015 Analog Devices Inc.
5
+ * Copyright 2013-2019 Analog Devices Inc.
56 * Author: Lars-Peter Clausen <lars@metafoo.de>
6
- *
7
- * Licensed under the GPL-2.
87 */
98
9
+#include <linux/bitfield.h>
1010 #include <linux/clk.h>
1111 #include <linux/device.h>
1212 #include <linux/dma-mapping.h>
....@@ -19,7 +19,9 @@
1919 #include <linux/of.h>
2020 #include <linux/of_dma.h>
2121 #include <linux/platform_device.h>
22
+#include <linux/regmap.h>
2223 #include <linux/slab.h>
24
+#include <linux/fpga/adi-axi-common.h>
2325
2426 #include <dt-bindings/dma/axi-dmac.h>
2527
....@@ -44,6 +46,16 @@
4446 * there is no address than can or needs to be configured for the device side.
4547 */
4648
49
+#define AXI_DMAC_REG_INTERFACE_DESC 0x10
50
+#define AXI_DMAC_DMA_SRC_TYPE_MSK GENMASK(13, 12)
51
+#define AXI_DMAC_DMA_SRC_TYPE_GET(x) FIELD_GET(AXI_DMAC_DMA_SRC_TYPE_MSK, x)
52
+#define AXI_DMAC_DMA_SRC_WIDTH_MSK GENMASK(11, 8)
53
+#define AXI_DMAC_DMA_SRC_WIDTH_GET(x) FIELD_GET(AXI_DMAC_DMA_SRC_WIDTH_MSK, x)
54
+#define AXI_DMAC_DMA_DST_TYPE_MSK GENMASK(5, 4)
55
+#define AXI_DMAC_DMA_DST_TYPE_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_TYPE_MSK, x)
56
+#define AXI_DMAC_DMA_DST_WIDTH_MSK GENMASK(3, 0)
57
+#define AXI_DMAC_DMA_DST_WIDTH_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_WIDTH_MSK, x)
58
+
4759 #define AXI_DMAC_REG_IRQ_MASK 0x80
4860 #define AXI_DMAC_REG_IRQ_PENDING 0x84
4961 #define AXI_DMAC_REG_IRQ_SOURCE 0x88
....@@ -63,6 +75,8 @@
6375 #define AXI_DMAC_REG_STATUS 0x430
6476 #define AXI_DMAC_REG_CURRENT_SRC_ADDR 0x434
6577 #define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438
78
+#define AXI_DMAC_REG_PARTIAL_XFER_LEN 0x44c
79
+#define AXI_DMAC_REG_PARTIAL_XFER_ID 0x450
6680
6781 #define AXI_DMAC_CTRL_ENABLE BIT(0)
6882 #define AXI_DMAC_CTRL_PAUSE BIT(1)
....@@ -71,6 +85,10 @@
7185 #define AXI_DMAC_IRQ_EOT BIT(1)
7286
7387 #define AXI_DMAC_FLAG_CYCLIC BIT(0)
88
+#define AXI_DMAC_FLAG_LAST BIT(1)
89
+#define AXI_DMAC_FLAG_PARTIAL_REPORT BIT(2)
90
+
91
+#define AXI_DMAC_FLAG_PARTIAL_XFER_DONE BIT(31)
7492
7593 /* The maximum ID allocated by the hardware is 31 */
7694 #define AXI_DMAC_SG_UNUSED 32U
....@@ -83,12 +101,14 @@
83101 unsigned int dest_stride;
84102 unsigned int src_stride;
85103 unsigned int id;
104
+ unsigned int partial_len;
86105 bool schedule_when_free;
87106 };
88107
89108 struct axi_dmac_desc {
90109 struct virt_dma_desc vdesc;
91110 bool cyclic;
111
+ bool have_partial_xfer;
92112
93113 unsigned int num_submitted;
94114 unsigned int num_completed;
....@@ -109,8 +129,10 @@
109129 unsigned int dest_type;
110130
111131 unsigned int max_length;
112
- unsigned int align_mask;
132
+ unsigned int address_align_mask;
133
+ unsigned int length_align_mask;
113134
135
+ bool hw_partial_xfer;
114136 bool hw_cyclic;
115137 bool hw_2d;
116138 };
....@@ -123,8 +145,6 @@
123145
124146 struct dma_device dma_dev;
125147 struct axi_dmac_chan chan;
126
-
127
- struct device_dma_parameters dma_parms;
128148 };
129149
130150 static struct axi_dmac *chan_to_axi_dmac(struct axi_dmac_chan *chan)
....@@ -166,16 +186,16 @@
166186
167187 static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len)
168188 {
169
- if (len == 0 || len > chan->max_length)
189
+ if (len == 0)
170190 return false;
171
- if ((len & chan->align_mask) != 0) /* Not aligned */
191
+ if ((len & chan->length_align_mask) != 0) /* Not aligned */
172192 return false;
173193 return true;
174194 }
175195
176196 static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr)
177197 {
178
- if ((addr & chan->align_mask) != 0) /* Not aligned */
198
+ if ((addr & chan->address_align_mask) != 0) /* Not aligned */
179199 return false;
180200 return true;
181201 }
....@@ -211,11 +231,13 @@
211231 }
212232
213233 desc->num_submitted++;
214
- if (desc->num_submitted == desc->num_sgs) {
234
+ if (desc->num_submitted == desc->num_sgs ||
235
+ desc->have_partial_xfer) {
215236 if (desc->cyclic)
216237 desc->num_submitted = 0; /* Start again */
217238 else
218239 chan->next_desc = NULL;
240
+ flags |= AXI_DMAC_FLAG_LAST;
219241 } else {
220242 chan->next_desc = desc;
221243 }
....@@ -241,6 +263,9 @@
241263 desc->num_sgs == 1)
242264 flags |= AXI_DMAC_FLAG_CYCLIC;
243265
266
+ if (chan->hw_partial_xfer)
267
+ flags |= AXI_DMAC_FLAG_PARTIAL_REPORT;
268
+
244269 axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1);
245270 axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1);
246271 axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags);
....@@ -253,6 +278,83 @@
253278 struct axi_dmac_desc, vdesc.node);
254279 }
255280
281
+static inline unsigned int axi_dmac_total_sg_bytes(struct axi_dmac_chan *chan,
282
+ struct axi_dmac_sg *sg)
283
+{
284
+ if (chan->hw_2d)
285
+ return sg->x_len * sg->y_len;
286
+ else
287
+ return sg->x_len;
288
+}
289
+
290
+static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan)
291
+{
292
+ struct axi_dmac *dmac = chan_to_axi_dmac(chan);
293
+ struct axi_dmac_desc *desc;
294
+ struct axi_dmac_sg *sg;
295
+ u32 xfer_done, len, id, i;
296
+ bool found_sg;
297
+
298
+ do {
299
+ len = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_LEN);
300
+ id = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_ID);
301
+
302
+ found_sg = false;
303
+ list_for_each_entry(desc, &chan->active_descs, vdesc.node) {
304
+ for (i = 0; i < desc->num_sgs; i++) {
305
+ sg = &desc->sg[i];
306
+ if (sg->id == AXI_DMAC_SG_UNUSED)
307
+ continue;
308
+ if (sg->id == id) {
309
+ desc->have_partial_xfer = true;
310
+ sg->partial_len = len;
311
+ found_sg = true;
312
+ break;
313
+ }
314
+ }
315
+ if (found_sg)
316
+ break;
317
+ }
318
+
319
+ if (found_sg) {
320
+ dev_dbg(dmac->dma_dev.dev,
321
+ "Found partial segment id=%u, len=%u\n",
322
+ id, len);
323
+ } else {
324
+ dev_warn(dmac->dma_dev.dev,
325
+ "Not found partial segment id=%u, len=%u\n",
326
+ id, len);
327
+ }
328
+
329
+ /* Check if we have any more partial transfers */
330
+ xfer_done = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE);
331
+ xfer_done = !(xfer_done & AXI_DMAC_FLAG_PARTIAL_XFER_DONE);
332
+
333
+ } while (!xfer_done);
334
+}
335
+
336
+static void axi_dmac_compute_residue(struct axi_dmac_chan *chan,
337
+ struct axi_dmac_desc *active)
338
+{
339
+ struct dmaengine_result *rslt = &active->vdesc.tx_result;
340
+ unsigned int start = active->num_completed - 1;
341
+ struct axi_dmac_sg *sg;
342
+ unsigned int i, total;
343
+
344
+ rslt->result = DMA_TRANS_NOERROR;
345
+ rslt->residue = 0;
346
+
347
+ /*
348
+ * We get here if the last completed segment is partial, which
349
+ * means we can compute the residue from that segment onwards
350
+ */
351
+ for (i = start; i < active->num_sgs; i++) {
352
+ sg = &active->sg[i];
353
+ total = axi_dmac_total_sg_bytes(chan, sg);
354
+ rslt->residue += (total - sg->partial_len);
355
+ }
356
+}
357
+
256358 static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
257359 unsigned int completed_transfers)
258360 {
....@@ -263,6 +365,10 @@
263365 active = axi_dmac_active_desc(chan);
264366 if (!active)
265367 return false;
368
+
369
+ if (chan->hw_partial_xfer &&
370
+ (completed_transfers & AXI_DMAC_FLAG_PARTIAL_XFER_DONE))
371
+ axi_dmac_dequeue_partial_xfers(chan);
266372
267373 do {
268374 sg = &active->sg[active->num_completed];
....@@ -277,10 +383,14 @@
277383 start_next = true;
278384 }
279385
386
+ if (sg->partial_len)
387
+ axi_dmac_compute_residue(chan, active);
388
+
280389 if (active->cyclic)
281390 vchan_cyclic_callback(&active->vdesc);
282391
283
- if (active->num_completed == active->num_sgs) {
392
+ if (active->num_completed == active->num_sgs ||
393
+ sg->partial_len) {
284394 if (active->cyclic) {
285395 active->num_completed = 0; /* wrap around */
286396 } else {
....@@ -367,8 +477,7 @@
367477 struct axi_dmac_desc *desc;
368478 unsigned int i;
369479
370
- desc = kzalloc(sizeof(struct axi_dmac_desc) +
371
- sizeof(struct axi_dmac_sg) * num_sgs, GFP_NOWAIT);
480
+ desc = kzalloc(struct_size(desc, sg, num_sgs), GFP_NOWAIT);
372481 if (!desc)
373482 return NULL;
374483
....@@ -380,6 +489,49 @@
380489 return desc;
381490 }
382491
492
+static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
493
+ enum dma_transfer_direction direction, dma_addr_t addr,
494
+ unsigned int num_periods, unsigned int period_len,
495
+ struct axi_dmac_sg *sg)
496
+{
497
+ unsigned int num_segments, i;
498
+ unsigned int segment_size;
499
+ unsigned int len;
500
+
501
+ /* Split into multiple equally sized segments if necessary */
502
+ num_segments = DIV_ROUND_UP(period_len, chan->max_length);
503
+ segment_size = DIV_ROUND_UP(period_len, num_segments);
504
+ /* Take care of alignment */
505
+ segment_size = ((segment_size - 1) | chan->length_align_mask) + 1;
506
+
507
+ for (i = 0; i < num_periods; i++) {
508
+ len = period_len;
509
+
510
+ while (len > segment_size) {
511
+ if (direction == DMA_DEV_TO_MEM)
512
+ sg->dest_addr = addr;
513
+ else
514
+ sg->src_addr = addr;
515
+ sg->x_len = segment_size;
516
+ sg->y_len = 1;
517
+ sg++;
518
+ addr += segment_size;
519
+ len -= segment_size;
520
+ }
521
+
522
+ if (direction == DMA_DEV_TO_MEM)
523
+ sg->dest_addr = addr;
524
+ else
525
+ sg->src_addr = addr;
526
+ sg->x_len = len;
527
+ sg->y_len = 1;
528
+ sg++;
529
+ addr += len;
530
+ }
531
+
532
+ return sg;
533
+}
534
+
383535 static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
384536 struct dma_chan *c, struct scatterlist *sgl,
385537 unsigned int sg_len, enum dma_transfer_direction direction,
....@@ -387,15 +539,23 @@
387539 {
388540 struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
389541 struct axi_dmac_desc *desc;
542
+ struct axi_dmac_sg *dsg;
390543 struct scatterlist *sg;
544
+ unsigned int num_sgs;
391545 unsigned int i;
392546
393547 if (direction != chan->direction)
394548 return NULL;
395549
396
- desc = axi_dmac_alloc_desc(sg_len);
550
+ num_sgs = 0;
551
+ for_each_sg(sgl, sg, sg_len, i)
552
+ num_sgs += DIV_ROUND_UP(sg_dma_len(sg), chan->max_length);
553
+
554
+ desc = axi_dmac_alloc_desc(num_sgs);
397555 if (!desc)
398556 return NULL;
557
+
558
+ dsg = desc->sg;
399559
400560 for_each_sg(sgl, sg, sg_len, i) {
401561 if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) ||
....@@ -404,12 +564,8 @@
404564 return NULL;
405565 }
406566
407
- if (direction == DMA_DEV_TO_MEM)
408
- desc->sg[i].dest_addr = sg_dma_address(sg);
409
- else
410
- desc->sg[i].src_addr = sg_dma_address(sg);
411
- desc->sg[i].x_len = sg_dma_len(sg);
412
- desc->sg[i].y_len = 1;
567
+ dsg = axi_dmac_fill_linear_sg(chan, direction, sg_dma_address(sg), 1,
568
+ sg_dma_len(sg), dsg);
413569 }
414570
415571 desc->cyclic = false;
....@@ -424,7 +580,7 @@
424580 {
425581 struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
426582 struct axi_dmac_desc *desc;
427
- unsigned int num_periods, i;
583
+ unsigned int num_periods, num_segments;
428584
429585 if (direction != chan->direction)
430586 return NULL;
....@@ -437,20 +593,14 @@
437593 return NULL;
438594
439595 num_periods = buf_len / period_len;
596
+ num_segments = DIV_ROUND_UP(period_len, chan->max_length);
440597
441
- desc = axi_dmac_alloc_desc(num_periods);
598
+ desc = axi_dmac_alloc_desc(num_periods * num_segments);
442599 if (!desc)
443600 return NULL;
444601
445
- for (i = 0; i < num_periods; i++) {
446
- if (direction == DMA_DEV_TO_MEM)
447
- desc->sg[i].dest_addr = buf_addr;
448
- else
449
- desc->sg[i].src_addr = buf_addr;
450
- desc->sg[i].x_len = period_len;
451
- desc->sg[i].y_len = 1;
452
- buf_addr += period_len;
453
- }
602
+ axi_dmac_fill_linear_sg(chan, direction, buf_addr, num_periods,
603
+ period_len, desc->sg);
454604
455605 desc->cyclic = true;
456606
....@@ -522,6 +672,9 @@
522672 desc->sg[0].y_len = 1;
523673 }
524674
675
+ if (flags & DMA_CYCLIC)
676
+ desc->cyclic = true;
677
+
525678 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
526679 }
527680
....@@ -533,6 +686,58 @@
533686 static void axi_dmac_desc_free(struct virt_dma_desc *vdesc)
534687 {
535688 kfree(container_of(vdesc, struct axi_dmac_desc, vdesc));
689
+}
690
+
691
+static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg)
692
+{
693
+ switch (reg) {
694
+ case AXI_DMAC_REG_IRQ_MASK:
695
+ case AXI_DMAC_REG_IRQ_SOURCE:
696
+ case AXI_DMAC_REG_IRQ_PENDING:
697
+ case AXI_DMAC_REG_CTRL:
698
+ case AXI_DMAC_REG_TRANSFER_ID:
699
+ case AXI_DMAC_REG_START_TRANSFER:
700
+ case AXI_DMAC_REG_FLAGS:
701
+ case AXI_DMAC_REG_DEST_ADDRESS:
702
+ case AXI_DMAC_REG_SRC_ADDRESS:
703
+ case AXI_DMAC_REG_X_LENGTH:
704
+ case AXI_DMAC_REG_Y_LENGTH:
705
+ case AXI_DMAC_REG_DEST_STRIDE:
706
+ case AXI_DMAC_REG_SRC_STRIDE:
707
+ case AXI_DMAC_REG_TRANSFER_DONE:
708
+ case AXI_DMAC_REG_ACTIVE_TRANSFER_ID:
709
+ case AXI_DMAC_REG_STATUS:
710
+ case AXI_DMAC_REG_CURRENT_SRC_ADDR:
711
+ case AXI_DMAC_REG_CURRENT_DEST_ADDR:
712
+ case AXI_DMAC_REG_PARTIAL_XFER_LEN:
713
+ case AXI_DMAC_REG_PARTIAL_XFER_ID:
714
+ return true;
715
+ default:
716
+ return false;
717
+ }
718
+}
719
+
720
+static const struct regmap_config axi_dmac_regmap_config = {
721
+ .reg_bits = 32,
722
+ .val_bits = 32,
723
+ .reg_stride = 4,
724
+ .max_register = AXI_DMAC_REG_PARTIAL_XFER_ID,
725
+ .readable_reg = axi_dmac_regmap_rdwr,
726
+ .writeable_reg = axi_dmac_regmap_rdwr,
727
+};
728
+
729
+static void axi_dmac_adjust_chan_params(struct axi_dmac_chan *chan)
730
+{
731
+ chan->address_align_mask = max(chan->dest_width, chan->src_width) - 1;
732
+
733
+ if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
734
+ chan->direction = DMA_MEM_TO_MEM;
735
+ else if (!axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
736
+ chan->direction = DMA_MEM_TO_DEV;
737
+ else if (axi_dmac_dest_is_mem(chan) && !axi_dmac_src_is_mem(chan))
738
+ chan->direction = DMA_DEV_TO_MEM;
739
+ else
740
+ chan->direction = DMA_DEV_TO_DEV;
536741 }
537742
538743 /*
....@@ -578,38 +783,132 @@
578783 return ret;
579784 chan->dest_width = val / 8;
580785
581
- ret = of_property_read_u32(of_chan, "adi,length-width", &val);
582
- if (ret)
583
- return ret;
786
+ axi_dmac_adjust_chan_params(chan);
584787
585
- if (val >= 32)
586
- chan->max_length = UINT_MAX;
587
- else
588
- chan->max_length = (1ULL << val) - 1;
788
+ return 0;
789
+}
589790
590
- chan->align_mask = max(chan->dest_width, chan->src_width) - 1;
791
+static int axi_dmac_parse_dt(struct device *dev, struct axi_dmac *dmac)
792
+{
793
+ struct device_node *of_channels, *of_chan;
794
+ int ret;
591795
592
- if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
593
- chan->direction = DMA_MEM_TO_MEM;
594
- else if (!axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
595
- chan->direction = DMA_MEM_TO_DEV;
596
- else if (axi_dmac_dest_is_mem(chan) && !axi_dmac_src_is_mem(chan))
597
- chan->direction = DMA_DEV_TO_MEM;
598
- else
599
- chan->direction = DMA_DEV_TO_DEV;
796
+ of_channels = of_get_child_by_name(dev->of_node, "adi,channels");
797
+ if (of_channels == NULL)
798
+ return -ENODEV;
600799
601
- chan->hw_cyclic = of_property_read_bool(of_chan, "adi,cyclic");
602
- chan->hw_2d = of_property_read_bool(of_chan, "adi,2d");
800
+ for_each_child_of_node(of_channels, of_chan) {
801
+ ret = axi_dmac_parse_chan_dt(of_chan, &dmac->chan);
802
+ if (ret) {
803
+ of_node_put(of_chan);
804
+ of_node_put(of_channels);
805
+ return -EINVAL;
806
+ }
807
+ }
808
+ of_node_put(of_channels);
809
+
810
+ return 0;
811
+}
812
+
813
+static int axi_dmac_read_chan_config(struct device *dev, struct axi_dmac *dmac)
814
+{
815
+ struct axi_dmac_chan *chan = &dmac->chan;
816
+ unsigned int val, desc;
817
+
818
+ desc = axi_dmac_read(dmac, AXI_DMAC_REG_INTERFACE_DESC);
819
+ if (desc == 0) {
820
+ dev_err(dev, "DMA interface register reads zero\n");
821
+ return -EFAULT;
822
+ }
823
+
824
+ val = AXI_DMAC_DMA_SRC_TYPE_GET(desc);
825
+ if (val > AXI_DMAC_BUS_TYPE_FIFO) {
826
+ dev_err(dev, "Invalid source bus type read: %d\n", val);
827
+ return -EINVAL;
828
+ }
829
+ chan->src_type = val;
830
+
831
+ val = AXI_DMAC_DMA_DST_TYPE_GET(desc);
832
+ if (val > AXI_DMAC_BUS_TYPE_FIFO) {
833
+ dev_err(dev, "Invalid destination bus type read: %d\n", val);
834
+ return -EINVAL;
835
+ }
836
+ chan->dest_type = val;
837
+
838
+ val = AXI_DMAC_DMA_SRC_WIDTH_GET(desc);
839
+ if (val == 0) {
840
+ dev_err(dev, "Source bus width is zero\n");
841
+ return -EINVAL;
842
+ }
843
+ /* widths are stored in log2 */
844
+ chan->src_width = 1 << val;
845
+
846
+ val = AXI_DMAC_DMA_DST_WIDTH_GET(desc);
847
+ if (val == 0) {
848
+ dev_err(dev, "Destination bus width is zero\n");
849
+ return -EINVAL;
850
+ }
851
+ chan->dest_width = 1 << val;
852
+
853
+ axi_dmac_adjust_chan_params(chan);
854
+
855
+ return 0;
856
+}
857
+
858
+static int axi_dmac_detect_caps(struct axi_dmac *dmac, unsigned int version)
859
+{
860
+ struct axi_dmac_chan *chan = &dmac->chan;
861
+
862
+ axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, AXI_DMAC_FLAG_CYCLIC);
863
+ if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC)
864
+ chan->hw_cyclic = true;
865
+
866
+ axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, 1);
867
+ if (axi_dmac_read(dmac, AXI_DMAC_REG_Y_LENGTH) == 1)
868
+ chan->hw_2d = true;
869
+
870
+ axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0xffffffff);
871
+ chan->max_length = axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH);
872
+ if (chan->max_length != UINT_MAX)
873
+ chan->max_length++;
874
+
875
+ axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, 0xffffffff);
876
+ if (axi_dmac_read(dmac, AXI_DMAC_REG_DEST_ADDRESS) == 0 &&
877
+ chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM) {
878
+ dev_err(dmac->dma_dev.dev,
879
+ "Destination memory-mapped interface not supported.");
880
+ return -ENODEV;
881
+ }
882
+
883
+ axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, 0xffffffff);
884
+ if (axi_dmac_read(dmac, AXI_DMAC_REG_SRC_ADDRESS) == 0 &&
885
+ chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM) {
886
+ dev_err(dmac->dma_dev.dev,
887
+ "Source memory-mapped interface not supported.");
888
+ return -ENODEV;
889
+ }
890
+
891
+ if (version >= ADI_AXI_PCORE_VER(4, 2, 'a'))
892
+ chan->hw_partial_xfer = true;
893
+
894
+ if (version >= ADI_AXI_PCORE_VER(4, 1, 'a')) {
895
+ axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0x00);
896
+ chan->length_align_mask =
897
+ axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH);
898
+ } else {
899
+ chan->length_align_mask = chan->address_align_mask;
900
+ }
603901
604902 return 0;
605903 }
606904
607905 static int axi_dmac_probe(struct platform_device *pdev)
608906 {
609
- struct device_node *of_channels, *of_chan;
610907 struct dma_device *dma_dev;
611908 struct axi_dmac *dmac;
612909 struct resource *res;
910
+ struct regmap *regmap;
911
+ unsigned int version;
613912 int ret;
614913
615914 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
....@@ -631,28 +930,28 @@
631930 if (IS_ERR(dmac->clk))
632931 return PTR_ERR(dmac->clk);
633932
933
+ ret = clk_prepare_enable(dmac->clk);
934
+ if (ret < 0)
935
+ return ret;
936
+
937
+ version = axi_dmac_read(dmac, ADI_AXI_REG_VERSION);
938
+
939
+ if (version >= ADI_AXI_PCORE_VER(4, 3, 'a'))
940
+ ret = axi_dmac_read_chan_config(&pdev->dev, dmac);
941
+ else
942
+ ret = axi_dmac_parse_dt(&pdev->dev, dmac);
943
+
944
+ if (ret < 0)
945
+ goto err_clk_disable;
946
+
634947 INIT_LIST_HEAD(&dmac->chan.active_descs);
635948
636
- of_channels = of_get_child_by_name(pdev->dev.of_node, "adi,channels");
637
- if (of_channels == NULL)
638
- return -ENODEV;
639
-
640
- for_each_child_of_node(of_channels, of_chan) {
641
- ret = axi_dmac_parse_chan_dt(of_chan, &dmac->chan);
642
- if (ret) {
643
- of_node_put(of_chan);
644
- of_node_put(of_channels);
645
- return -EINVAL;
646
- }
647
- }
648
- of_node_put(of_channels);
649
-
650
- pdev->dev.dma_parms = &dmac->dma_parms;
651
- dma_set_max_seg_size(&pdev->dev, dmac->chan.max_length);
949
+ dma_set_max_seg_size(&pdev->dev, UINT_MAX);
652950
653951 dma_dev = &dmac->dma_dev;
654952 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
655953 dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
954
+ dma_cap_set(DMA_INTERLEAVE, dma_dev->cap_mask);
656955 dma_dev->device_free_chan_resources = axi_dmac_free_chan_resources;
657956 dma_dev->device_tx_status = dma_cookie_status;
658957 dma_dev->device_issue_pending = axi_dmac_issue_pending;
....@@ -672,9 +971,11 @@
672971 dmac->chan.vchan.desc_free = axi_dmac_desc_free;
673972 vchan_init(&dmac->chan.vchan, dma_dev);
674973
675
- ret = clk_prepare_enable(dmac->clk);
676
- if (ret < 0)
677
- return ret;
974
+ ret = axi_dmac_detect_caps(dmac, version);
975
+ if (ret)
976
+ goto err_clk_disable;
977
+
978
+ dma_dev->copy_align = (dmac->chan.address_align_mask + 1);
678979
679980 axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00);
680981
....@@ -694,8 +995,17 @@
694995
695996 platform_set_drvdata(pdev, dmac);
696997
998
+ regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base,
999
+ &axi_dmac_regmap_config);
1000
+ if (IS_ERR(regmap)) {
1001
+ ret = PTR_ERR(regmap);
1002
+ goto err_free_irq;
1003
+ }
1004
+
6971005 return 0;
6981006
1007
+err_free_irq:
1008
+ free_irq(dmac->irq, dmac);
6991009 err_unregister_of:
7001010 of_dma_controller_free(pdev->dev.of_node);
7011011 err_unregister_device: