hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/dma/owl-dma.c
....@@ -21,6 +21,7 @@
2121 #include <linux/mm.h>
2222 #include <linux/module.h>
2323 #include <linux/of_device.h>
24
+#include <linux/of_dma.h>
2425 #include <linux/slab.h>
2526 #include "virt-dma.h"
2627
....@@ -119,30 +120,39 @@
119120 #define BIT_FIELD(val, width, shift, newshift) \
120121 ((((val) >> (shift)) & ((BIT(width)) - 1)) << (newshift))
121122
123
+/* Frame count value is fixed as 1 */
124
+#define FCNT_VAL 0x1
125
+
122126 /**
123
- * struct owl_dma_lli_hw - Hardware link list for dma transfer
124
- * @next_lli: physical address of the next link list
125
- * @saddr: source physical address
126
- * @daddr: destination physical address
127
- * @flen: frame length
128
- * @fcnt: frame count
129
- * @src_stride: source stride
130
- * @dst_stride: destination stride
131
- * @ctrla: dma_mode and linklist ctrl config
132
- * @ctrlb: interrupt config
133
- * @const_num: data for constant fill
127
+ * enum owl_dmadesc_offsets - Describe DMA descriptor, hardware link
128
+ * list for dma transfer
129
+ * @OWL_DMADESC_NEXT_LLI: physical address of the next link list
130
+ * @OWL_DMADESC_SADDR: source physical address
131
+ * @OWL_DMADESC_DADDR: destination physical address
132
+ * @OWL_DMADESC_FLEN: frame length
133
+ * @OWL_DMADESC_SRC_STRIDE: source stride
134
+ * @OWL_DMADESC_DST_STRIDE: destination stride
135
+ * @OWL_DMADESC_CTRLA: dma_mode and linklist ctrl config
136
+ * @OWL_DMADESC_CTRLB: interrupt config
137
+ * @OWL_DMADESC_CONST_NUM: data for constant fill
138
+ * @OWL_DMADESC_SIZE: max size of this enum
134139 */
135
-struct owl_dma_lli_hw {
136
- u32 next_lli;
137
- u32 saddr;
138
- u32 daddr;
139
- u32 flen:20;
140
- u32 fcnt:12;
141
- u32 src_stride;
142
- u32 dst_stride;
143
- u32 ctrla;
144
- u32 ctrlb;
145
- u32 const_num;
140
+enum owl_dmadesc_offsets {
141
+ OWL_DMADESC_NEXT_LLI = 0,
142
+ OWL_DMADESC_SADDR,
143
+ OWL_DMADESC_DADDR,
144
+ OWL_DMADESC_FLEN,
145
+ OWL_DMADESC_SRC_STRIDE,
146
+ OWL_DMADESC_DST_STRIDE,
147
+ OWL_DMADESC_CTRLA,
148
+ OWL_DMADESC_CTRLB,
149
+ OWL_DMADESC_CONST_NUM,
150
+ OWL_DMADESC_SIZE
151
+};
152
+
153
+enum owl_dma_id {
154
+ S900_DMA,
155
+ S700_DMA,
146156 };
147157
148158 /**
....@@ -152,7 +162,7 @@
152162 * @node: node for txd's lli_list
153163 */
154164 struct owl_dma_lli {
155
- struct owl_dma_lli_hw hw;
165
+ u32 hw[OWL_DMADESC_SIZE];
156166 dma_addr_t phys;
157167 struct list_head node;
158168 };
....@@ -161,10 +171,12 @@
161171 * struct owl_dma_txd - Wrapper for struct dma_async_tx_descriptor
162172 * @vd: virtual DMA descriptor
163173 * @lli_list: link list of lli nodes
174
+ * @cyclic: flag to indicate cyclic transfers
164175 */
165176 struct owl_dma_txd {
166177 struct virt_dma_desc vd;
167178 struct list_head lli_list;
179
+ bool cyclic;
168180 };
169181
170182 /**
....@@ -184,11 +196,15 @@
184196 * @vc: wrappped virtual channel
185197 * @pchan: the physical channel utilized by this channel
186198 * @txd: active transaction on this channel
199
+ * @cfg: slave configuration for this channel
200
+ * @drq: physical DMA request ID for this channel
187201 */
188202 struct owl_dma_vchan {
189203 struct virt_dma_chan vc;
190204 struct owl_dma_pchan *pchan;
191205 struct owl_dma_txd *txd;
206
+ struct dma_slave_config cfg;
207
+ u8 drq;
192208 };
193209
194210 /**
....@@ -198,10 +214,12 @@
198214 * @clk: clock for the DMA controller
199215 * @lock: a lock to use when change DMA controller global register
200216 * @lli_pool: a pool for the LLI descriptors
217
+ * @irq: interrupt ID for the DMA controller
201218 * @nr_pchans: the number of physical channels
202219 * @pchans: array of data for the physical channels
203220 * @nr_vchans: the number of physical channels
204221 * @vchans: array of data for the physical channels
222
+ * @devid: device id based on OWL SoC
205223 */
206224 struct owl_dma {
207225 struct dma_device dma;
....@@ -216,6 +234,7 @@
216234
217235 unsigned int nr_vchans;
218236 struct owl_dma_vchan *vchans;
237
+ enum owl_dma_id devid;
219238 };
220239
221240 static void pchan_update(struct owl_dma_pchan *pchan, u32 reg,
....@@ -305,9 +324,18 @@
305324 {
306325 u32 ctl;
307326
327
+ /*
328
+ * Irrespective of the SoC, ctrlb value starts filling from
329
+ * bit 18.
330
+ */
308331 ctl = BIT_FIELD(int_ctl, 7, 0, 18);
309332
310333 return ctl;
334
+}
335
+
336
+static u32 llc_hw_flen(struct owl_dma_lli *lli)
337
+{
338
+ return lli->hw[OWL_DMADESC_FLEN] & GENMASK(19, 0);
311339 }
312340
313341 static void owl_dma_free_lli(struct owl_dma *od,
....@@ -334,13 +362,16 @@
334362
335363 static struct owl_dma_lli *owl_dma_add_lli(struct owl_dma_txd *txd,
336364 struct owl_dma_lli *prev,
337
- struct owl_dma_lli *next)
365
+ struct owl_dma_lli *next,
366
+ bool is_cyclic)
338367 {
339
- list_add_tail(&next->node, &txd->lli_list);
368
+ if (!is_cyclic)
369
+ list_add_tail(&next->node, &txd->lli_list);
340370
341371 if (prev) {
342
- prev->hw.next_lli = next->phys;
343
- prev->hw.ctrla |= llc_hw_ctrla(OWL_DMA_MODE_LME, 0);
372
+ prev->hw[OWL_DMADESC_NEXT_LLI] = next->phys;
373
+ prev->hw[OWL_DMADESC_CTRLA] |=
374
+ llc_hw_ctrla(OWL_DMA_MODE_LME, 0);
344375 }
345376
346377 return next;
....@@ -349,10 +380,12 @@
349380 static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan,
350381 struct owl_dma_lli *lli,
351382 dma_addr_t src, dma_addr_t dst,
352
- u32 len, enum dma_transfer_direction dir)
383
+ u32 len, enum dma_transfer_direction dir,
384
+ struct dma_slave_config *sconfig,
385
+ bool is_cyclic)
353386 {
354
- struct owl_dma_lli_hw *hw = &lli->hw;
355
- u32 mode;
387
+ struct owl_dma *od = to_owl_dma(vchan->vc.chan.device);
388
+ u32 mode, ctrlb;
356389
357390 mode = OWL_DMA_MODE_PW(0);
358391
....@@ -363,23 +396,70 @@
363396 OWL_DMA_MODE_DAM_INC;
364397
365398 break;
399
+ case DMA_MEM_TO_DEV:
400
+ mode |= OWL_DMA_MODE_TS(vchan->drq)
401
+ | OWL_DMA_MODE_ST_DCU | OWL_DMA_MODE_DT_DEV
402
+ | OWL_DMA_MODE_SAM_INC | OWL_DMA_MODE_DAM_CONST;
403
+
404
+ /*
405
+ * Hardware only supports 32bit and 8bit buswidth. Since the
406
+ * default is 32bit, select 8bit only when requested.
407
+ */
408
+ if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE)
409
+ mode |= OWL_DMA_MODE_NDDBW_8BIT;
410
+
411
+ break;
412
+ case DMA_DEV_TO_MEM:
413
+ mode |= OWL_DMA_MODE_TS(vchan->drq)
414
+ | OWL_DMA_MODE_ST_DEV | OWL_DMA_MODE_DT_DCU
415
+ | OWL_DMA_MODE_SAM_CONST | OWL_DMA_MODE_DAM_INC;
416
+
417
+ /*
418
+ * Hardware only supports 32bit and 8bit buswidth. Since the
419
+ * default is 32bit, select 8bit only when requested.
420
+ */
421
+ if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE)
422
+ mode |= OWL_DMA_MODE_NDDBW_8BIT;
423
+
424
+ break;
366425 default:
367426 return -EINVAL;
368427 }
369428
370
- hw->next_lli = 0; /* One link list by default */
371
- hw->saddr = src;
372
- hw->daddr = dst;
429
+ lli->hw[OWL_DMADESC_CTRLA] = llc_hw_ctrla(mode,
430
+ OWL_DMA_LLC_SAV_LOAD_NEXT |
431
+ OWL_DMA_LLC_DAV_LOAD_NEXT);
373432
374
- hw->fcnt = 1; /* Frame count fixed as 1 */
375
- hw->flen = len; /* Max frame length is 1MB */
376
- hw->src_stride = 0;
377
- hw->dst_stride = 0;
378
- hw->ctrla = llc_hw_ctrla(mode,
379
- OWL_DMA_LLC_SAV_LOAD_NEXT |
380
- OWL_DMA_LLC_DAV_LOAD_NEXT);
433
+ if (is_cyclic)
434
+ ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_BLOCK);
435
+ else
436
+ ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_SUPER_BLOCK);
381437
382
- hw->ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_SUPER_BLOCK);
438
+ lli->hw[OWL_DMADESC_NEXT_LLI] = 0; /* One link list by default */
439
+ lli->hw[OWL_DMADESC_SADDR] = src;
440
+ lli->hw[OWL_DMADESC_DADDR] = dst;
441
+ lli->hw[OWL_DMADESC_SRC_STRIDE] = 0;
442
+ lli->hw[OWL_DMADESC_DST_STRIDE] = 0;
443
+
444
+ if (od->devid == S700_DMA) {
445
+ /* Max frame length is 1MB */
446
+ lli->hw[OWL_DMADESC_FLEN] = len;
447
+ /*
448
+ * On S700, word starts from offset 0x1C is shared between
449
+ * frame count and ctrlb, where first 12 bits are for frame
450
+ * count and rest of 20 bits are for ctrlb.
451
+ */
452
+ lli->hw[OWL_DMADESC_CTRLB] = FCNT_VAL | ctrlb;
453
+ } else {
454
+ /*
455
+ * On S900, word starts from offset 0xC is shared between
456
+ * frame length (max frame length is 1MB) and frame count,
457
+ * where first 20 bits are for frame length and rest of
458
+ * 12 bits are for frame count.
459
+ */
460
+ lli->hw[OWL_DMADESC_FLEN] = len | FCNT_VAL << 20;
461
+ lli->hw[OWL_DMADESC_CTRLB] = ctrlb;
462
+ }
383463
384464 return 0;
385465 }
....@@ -441,6 +521,16 @@
441521 spin_unlock_irqrestore(&od->lock, flags);
442522 }
443523
524
+static void owl_dma_pause_pchan(struct owl_dma_pchan *pchan)
525
+{
526
+ pchan_writel(pchan, 1, OWL_DMAX_PAUSE);
527
+}
528
+
529
+static void owl_dma_resume_pchan(struct owl_dma_pchan *pchan)
530
+{
531
+ pchan_writel(pchan, 0, OWL_DMAX_PAUSE);
532
+}
533
+
444534 static int owl_dma_start_next_txd(struct owl_dma_vchan *vchan)
445535 {
446536 struct owl_dma *od = to_owl_dma(vchan->vc.chan.device);
....@@ -462,7 +552,10 @@
462552 lli = list_first_entry(&txd->lli_list,
463553 struct owl_dma_lli, node);
464554
465
- int_ctl = OWL_DMA_INTCTL_SUPER_BLOCK;
555
+ if (txd->cyclic)
556
+ int_ctl = OWL_DMA_INTCTL_BLOCK;
557
+ else
558
+ int_ctl = OWL_DMA_INTCTL_SUPER_BLOCK;
466559
467560 pchan_writel(pchan, OWL_DMAX_MODE, OWL_DMA_MODE_LME);
468561 pchan_writel(pchan, OWL_DMAX_LINKLIST_CTL,
....@@ -528,7 +621,7 @@
528621
529622 global_irq_pending = dma_readl(od, OWL_DMA_IRQ_PD0);
530623
531
- if (chan_irq_pending && !(global_irq_pending & BIT(i))) {
624
+ if (chan_irq_pending && !(global_irq_pending & BIT(i))) {
532625 dev_dbg(od->dma.dev,
533626 "global and channel IRQ pending match err\n");
534627
....@@ -618,7 +711,56 @@
618711 }
619712
620713 vchan_get_all_descriptors(&vchan->vc, &head);
714
+
715
+ spin_unlock_irqrestore(&vchan->vc.lock, flags);
716
+
621717 vchan_dma_desc_free_list(&vchan->vc, &head);
718
+
719
+ return 0;
720
+}
721
+
722
+static int owl_dma_config(struct dma_chan *chan,
723
+ struct dma_slave_config *config)
724
+{
725
+ struct owl_dma_vchan *vchan = to_owl_vchan(chan);
726
+
727
+ /* Reject definitely invalid configurations */
728
+ if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
729
+ config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
730
+ return -EINVAL;
731
+
732
+ memcpy(&vchan->cfg, config, sizeof(struct dma_slave_config));
733
+
734
+ return 0;
735
+}
736
+
737
+static int owl_dma_pause(struct dma_chan *chan)
738
+{
739
+ struct owl_dma_vchan *vchan = to_owl_vchan(chan);
740
+ unsigned long flags;
741
+
742
+ spin_lock_irqsave(&vchan->vc.lock, flags);
743
+
744
+ owl_dma_pause_pchan(vchan->pchan);
745
+
746
+ spin_unlock_irqrestore(&vchan->vc.lock, flags);
747
+
748
+ return 0;
749
+}
750
+
751
+static int owl_dma_resume(struct dma_chan *chan)
752
+{
753
+ struct owl_dma_vchan *vchan = to_owl_vchan(chan);
754
+ unsigned long flags;
755
+
756
+ if (!vchan->pchan && !vchan->txd)
757
+ return 0;
758
+
759
+ dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc);
760
+
761
+ spin_lock_irqsave(&vchan->vc.lock, flags);
762
+
763
+ owl_dma_resume_pchan(vchan->pchan);
622764
623765 spin_unlock_irqrestore(&vchan->vc.lock, flags);
624766
....@@ -649,7 +791,7 @@
649791 /* Start from the next active node */
650792 if (lli->phys == next_lli_phy) {
651793 list_for_each_entry(lli, &txd->lli_list, node)
652
- bytes += lli->hw.flen;
794
+ bytes += llc_hw_flen(lli);
653795 break;
654796 }
655797 }
....@@ -680,7 +822,7 @@
680822 if (vd) {
681823 txd = to_owl_txd(&vd->tx);
682824 list_for_each_entry(lli, &txd->lli_list, node)
683
- bytes += lli->hw.flen;
825
+ bytes += llc_hw_flen(lli);
684826 } else {
685827 bytes = owl_dma_getbytes_chan(vchan);
686828 }
....@@ -752,19 +894,147 @@
752894 bytes = min_t(size_t, (len - offset), OWL_DMA_FRAME_MAX_LENGTH);
753895
754896 ret = owl_dma_cfg_lli(vchan, lli, src + offset, dst + offset,
755
- bytes, DMA_MEM_TO_MEM);
897
+ bytes, DMA_MEM_TO_MEM,
898
+ &vchan->cfg, txd->cyclic);
756899 if (ret) {
757900 dev_warn(chan2dev(chan), "failed to config lli\n");
758901 goto err_txd_free;
759902 }
760903
761
- prev = owl_dma_add_lli(txd, prev, lli);
904
+ prev = owl_dma_add_lli(txd, prev, lli, false);
762905 }
763906
764907 return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
765908
766909 err_txd_free:
767910 owl_dma_free_txd(od, txd);
911
+ return NULL;
912
+}
913
+
914
+static struct dma_async_tx_descriptor
915
+ *owl_dma_prep_slave_sg(struct dma_chan *chan,
916
+ struct scatterlist *sgl,
917
+ unsigned int sg_len,
918
+ enum dma_transfer_direction dir,
919
+ unsigned long flags, void *context)
920
+{
921
+ struct owl_dma *od = to_owl_dma(chan->device);
922
+ struct owl_dma_vchan *vchan = to_owl_vchan(chan);
923
+ struct dma_slave_config *sconfig = &vchan->cfg;
924
+ struct owl_dma_txd *txd;
925
+ struct owl_dma_lli *lli, *prev = NULL;
926
+ struct scatterlist *sg;
927
+ dma_addr_t addr, src = 0, dst = 0;
928
+ size_t len;
929
+ int ret, i;
930
+
931
+ txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
932
+ if (!txd)
933
+ return NULL;
934
+
935
+ INIT_LIST_HEAD(&txd->lli_list);
936
+
937
+ for_each_sg(sgl, sg, sg_len, i) {
938
+ addr = sg_dma_address(sg);
939
+ len = sg_dma_len(sg);
940
+
941
+ if (len > OWL_DMA_FRAME_MAX_LENGTH) {
942
+ dev_err(od->dma.dev,
943
+ "frame length exceeds max supported length");
944
+ goto err_txd_free;
945
+ }
946
+
947
+ lli = owl_dma_alloc_lli(od);
948
+ if (!lli) {
949
+ dev_err(chan2dev(chan), "failed to allocate lli");
950
+ goto err_txd_free;
951
+ }
952
+
953
+ if (dir == DMA_MEM_TO_DEV) {
954
+ src = addr;
955
+ dst = sconfig->dst_addr;
956
+ } else {
957
+ src = sconfig->src_addr;
958
+ dst = addr;
959
+ }
960
+
961
+ ret = owl_dma_cfg_lli(vchan, lli, src, dst, len, dir, sconfig,
962
+ txd->cyclic);
963
+ if (ret) {
964
+ dev_warn(chan2dev(chan), "failed to config lli");
965
+ goto err_txd_free;
966
+ }
967
+
968
+ prev = owl_dma_add_lli(txd, prev, lli, false);
969
+ }
970
+
971
+ return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
972
+
973
+err_txd_free:
974
+ owl_dma_free_txd(od, txd);
975
+
976
+ return NULL;
977
+}
978
+
979
+static struct dma_async_tx_descriptor
980
+ *owl_prep_dma_cyclic(struct dma_chan *chan,
981
+ dma_addr_t buf_addr, size_t buf_len,
982
+ size_t period_len,
983
+ enum dma_transfer_direction dir,
984
+ unsigned long flags)
985
+{
986
+ struct owl_dma *od = to_owl_dma(chan->device);
987
+ struct owl_dma_vchan *vchan = to_owl_vchan(chan);
988
+ struct dma_slave_config *sconfig = &vchan->cfg;
989
+ struct owl_dma_txd *txd;
990
+ struct owl_dma_lli *lli, *prev = NULL, *first = NULL;
991
+ dma_addr_t src = 0, dst = 0;
992
+ unsigned int periods = buf_len / period_len;
993
+ int ret, i;
994
+
995
+ txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
996
+ if (!txd)
997
+ return NULL;
998
+
999
+ INIT_LIST_HEAD(&txd->lli_list);
1000
+ txd->cyclic = true;
1001
+
1002
+ for (i = 0; i < periods; i++) {
1003
+ lli = owl_dma_alloc_lli(od);
1004
+ if (!lli) {
1005
+ dev_warn(chan2dev(chan), "failed to allocate lli");
1006
+ goto err_txd_free;
1007
+ }
1008
+
1009
+ if (dir == DMA_MEM_TO_DEV) {
1010
+ src = buf_addr + (period_len * i);
1011
+ dst = sconfig->dst_addr;
1012
+ } else if (dir == DMA_DEV_TO_MEM) {
1013
+ src = sconfig->src_addr;
1014
+ dst = buf_addr + (period_len * i);
1015
+ }
1016
+
1017
+ ret = owl_dma_cfg_lli(vchan, lli, src, dst, period_len,
1018
+ dir, sconfig, txd->cyclic);
1019
+ if (ret) {
1020
+ dev_warn(chan2dev(chan), "failed to config lli");
1021
+ goto err_txd_free;
1022
+ }
1023
+
1024
+ if (!first)
1025
+ first = lli;
1026
+
1027
+ prev = owl_dma_add_lli(txd, prev, lli, false);
1028
+ }
1029
+
1030
+ /* close the cyclic list */
1031
+ owl_dma_add_lli(txd, prev, first, true);
1032
+
1033
+ return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
1034
+
1035
+err_txd_free:
1036
+ owl_dma_free_txd(od, txd);
1037
+
7681038 return NULL;
7691039 }
7701040
....@@ -788,22 +1058,45 @@
7881058 }
7891059 }
7901060
1061
+static struct dma_chan *owl_dma_of_xlate(struct of_phandle_args *dma_spec,
1062
+ struct of_dma *ofdma)
1063
+{
1064
+ struct owl_dma *od = ofdma->of_dma_data;
1065
+ struct owl_dma_vchan *vchan;
1066
+ struct dma_chan *chan;
1067
+ u8 drq = dma_spec->args[0];
1068
+
1069
+ if (drq > od->nr_vchans)
1070
+ return NULL;
1071
+
1072
+ chan = dma_get_any_slave_channel(&od->dma);
1073
+ if (!chan)
1074
+ return NULL;
1075
+
1076
+ vchan = to_owl_vchan(chan);
1077
+ vchan->drq = drq;
1078
+
1079
+ return chan;
1080
+}
1081
+
1082
+static const struct of_device_id owl_dma_match[] = {
1083
+ { .compatible = "actions,s900-dma", .data = (void *)S900_DMA,},
1084
+ { .compatible = "actions,s700-dma", .data = (void *)S700_DMA,},
1085
+ { /* sentinel */ },
1086
+};
1087
+MODULE_DEVICE_TABLE(of, owl_dma_match);
1088
+
7911089 static int owl_dma_probe(struct platform_device *pdev)
7921090 {
7931091 struct device_node *np = pdev->dev.of_node;
7941092 struct owl_dma *od;
795
- struct resource *res;
7961093 int ret, i, nr_channels, nr_requests;
7971094
7981095 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
7991096 if (!od)
8001097 return -ENOMEM;
8011098
802
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
803
- if (!res)
804
- return -EINVAL;
805
-
806
- od->base = devm_ioremap_resource(&pdev->dev, res);
1099
+ od->base = devm_platform_ioremap_resource(pdev, 0);
8071100 if (IS_ERR(od->base))
8081101 return PTR_ERR(od->base);
8091102
....@@ -822,6 +1115,8 @@
8221115 dev_info(&pdev->dev, "dma-channels %d, dma-requests %d\n",
8231116 nr_channels, nr_requests);
8241117
1118
+ od->devid = (enum owl_dma_id)of_device_get_match_data(&pdev->dev);
1119
+
8251120 od->nr_pchans = nr_channels;
8261121 od->nr_vchans = nr_requests;
8271122
....@@ -831,12 +1126,19 @@
8311126 spin_lock_init(&od->lock);
8321127
8331128 dma_cap_set(DMA_MEMCPY, od->dma.cap_mask);
1129
+ dma_cap_set(DMA_SLAVE, od->dma.cap_mask);
1130
+ dma_cap_set(DMA_CYCLIC, od->dma.cap_mask);
8341131
8351132 od->dma.dev = &pdev->dev;
8361133 od->dma.device_free_chan_resources = owl_dma_free_chan_resources;
8371134 od->dma.device_tx_status = owl_dma_tx_status;
8381135 od->dma.device_issue_pending = owl_dma_issue_pending;
8391136 od->dma.device_prep_dma_memcpy = owl_dma_prep_memcpy;
1137
+ od->dma.device_prep_slave_sg = owl_dma_prep_slave_sg;
1138
+ od->dma.device_prep_dma_cyclic = owl_prep_dma_cyclic;
1139
+ od->dma.device_config = owl_dma_config;
1140
+ od->dma.device_pause = owl_dma_pause;
1141
+ od->dma.device_resume = owl_dma_resume;
8401142 od->dma.device_terminate_all = owl_dma_terminate_all;
8411143 od->dma.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
8421144 od->dma.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
....@@ -908,8 +1210,18 @@
9081210 goto err_pool_free;
9091211 }
9101212
1213
+ /* Device-tree DMA controller registration */
1214
+ ret = of_dma_controller_register(pdev->dev.of_node,
1215
+ owl_dma_of_xlate, od);
1216
+ if (ret) {
1217
+ dev_err(&pdev->dev, "of_dma_controller_register failed\n");
1218
+ goto err_dma_unregister;
1219
+ }
1220
+
9111221 return 0;
9121222
1223
+err_dma_unregister:
1224
+ dma_async_device_unregister(&od->dma);
9131225 err_pool_free:
9141226 clk_disable_unprepare(od->clk);
9151227 dma_pool_destroy(od->lli_pool);
....@@ -921,6 +1233,7 @@
9211233 {
9221234 struct owl_dma *od = platform_get_drvdata(pdev);
9231235
1236
+ of_dma_controller_free(pdev->dev.of_node);
9241237 dma_async_device_unregister(&od->dma);
9251238
9261239 /* Mask all interrupts for this execution environment */
....@@ -936,12 +1249,6 @@
9361249
9371250 return 0;
9381251 }
939
-
940
-static const struct of_device_id owl_dma_match[] = {
941
- { .compatible = "actions,s900-dma", },
942
- { /* sentinel */ }
943
-};
944
-MODULE_DEVICE_TABLE(of, owl_dma_match);
9451252
9461253 static struct platform_driver owl_dma_driver = {
9471254 .probe = owl_dma_probe,