hc
2024-05-10 23fa18eaa71266feff7ba8d83022d9e1cc83c65a
kernel/include/linux/dmaengine.h
....@@ -1,18 +1,6 @@
1
+/* SPDX-License-Identifier: GPL-2.0-or-later */
12 /*
23 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3
- *
4
- * This program is free software; you can redistribute it and/or modify it
5
- * under the terms of the GNU General Public License as published by the Free
6
- * Software Foundation; either version 2 of the License, or (at your option)
7
- * any later version.
8
- *
9
- * This program is distributed in the hope that it will be useful, but WITHOUT
10
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12
- * more details.
13
- *
14
- * The full GNU General Public License is included in this distribution in the
15
- * file called COPYING.
164 */
175 #ifndef LINUX_DMAENGINE_H
186 #define LINUX_DMAENGINE_H
....@@ -24,6 +12,7 @@
2412 #include <linux/scatterlist.h>
2513 #include <linux/bitmap.h>
2614 #include <linux/types.h>
15
+#include <linux/android_kabi.h>
2716 #include <asm/page.h>
2817
2918 /**
....@@ -51,6 +40,7 @@
5140 DMA_IN_PROGRESS,
5241 DMA_PAUSED,
5342 DMA_ERROR,
43
+ DMA_OUT_OF_ORDER,
5444 };
5545
5646 /**
....@@ -73,6 +63,9 @@
7363 DMA_SLAVE,
7464 DMA_CYCLIC,
7565 DMA_INTERLEAVE,
66
+ DMA_COMPLETION_NO_ORDER,
67
+ DMA_REPEAT,
68
+ DMA_LOAD_EOT,
7669 /* last transaction type for creation of the capabilities mask */
7770 DMA_TX_TYPE_END,
7871 };
....@@ -95,9 +88,9 @@
9588 /**
9689 * Interleaved Transfer Request
9790 * ----------------------------
98
- * A chunk is collection of contiguous bytes to be transfered.
91
+ * A chunk is collection of contiguous bytes to be transferred.
9992 * The gap(in bytes) between two chunks is called inter-chunk-gap(ICG).
100
- * ICGs may or maynot change between chunks.
93
+ * ICGs may or may not change between chunks.
10194 * A FRAME is the smallest series of contiguous {chunk,icg} pairs,
10295 * that when repeated an integral number of times, specifies the transfer.
10396 * A transfer template is specification of a Frame, the number of times
....@@ -152,6 +145,7 @@
152145 * Otherwise, destination is filled contiguously (icg ignored).
153146 * Ignored if dst_inc is false.
154147 * @numf: Number of frames in this template.
148
+ * @nump: Number of period frames in this template.
155149 * @frame_size: Number of chunks in a frame i.e, size of sgl[].
156150 * @sgl: Array of {chunk,icg} pairs that make up a frame.
157151 */
....@@ -164,8 +158,11 @@
164158 bool src_sgl;
165159 bool dst_sgl;
166160 size_t numf;
161
+#ifdef CONFIG_NO_GKI
162
+ size_t nump;
163
+#endif
167164 size_t frame_size;
168
- struct data_chunk sgl[0];
165
+ struct data_chunk sgl[];
169166 };
170167
171168 /**
....@@ -174,7 +171,7 @@
174171 * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
175172 * this transaction
176173 * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
177
- * acknowledges receipt, i.e. has has a chance to establish any dependency
174
+ * acknowledges receipt, i.e. has a chance to establish any dependency
178175 * chains
179176 * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
180177 * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
....@@ -188,6 +185,16 @@
188185 * @DMA_PREP_CMD: tell the driver that the data passed to DMA API is command
189186 * data and the descriptor should be in different format from normal
190187 * data descriptors.
188
+ * @DMA_PREP_REPEAT: tell the driver that the transaction shall be automatically
189
+ * repeated when it ends until a transaction is issued on the same channel
190
+ * with the DMA_PREP_LOAD_EOT flag set. This flag is only applicable to
191
+ * interleaved transactions and is ignored for all other transaction types.
192
+ * @DMA_PREP_LOAD_EOT: tell the driver that the transaction shall replace any
193
+ * active repeated (as indicated by DMA_PREP_REPEAT) transaction when the
194
+ * repeated transaction ends. Not setting this flag when the previously queued
195
+ * transaction is marked with DMA_PREP_REPEAT will cause the new transaction
196
+ * to never be processed and stay in the issued queue forever. The flag is
197
+ * ignored if the previous transaction is not a repeated transaction.
191198 */
192199 enum dma_ctrl_flags {
193200 DMA_PREP_INTERRUPT = (1 << 0),
....@@ -198,6 +205,8 @@
198205 DMA_PREP_FENCE = (1 << 5),
199206 DMA_CTRL_REUSE = (1 << 6),
200207 DMA_PREP_CMD = (1 << 7),
208
+ DMA_PREP_REPEAT = (1 << 8),
209
+ DMA_PREP_LOAD_EOT = (1 << 9),
201210 };
202211
203212 /**
....@@ -231,6 +240,62 @@
231240 * @bytes_transferred: byte counter
232241 */
233242
243
+/**
244
+ * enum dma_desc_metadata_mode - per descriptor metadata mode types supported
245
+ * @DESC_METADATA_CLIENT - the metadata buffer is allocated/provided by the
246
+ * client driver and it is attached (via the dmaengine_desc_attach_metadata()
247
+ * helper) to the descriptor.
248
+ *
249
+ * Client drivers interested to use this mode can follow:
250
+ * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
251
+ * 1. prepare the descriptor (dmaengine_prep_*)
252
+ * construct the metadata in the client's buffer
253
+ * 2. use dmaengine_desc_attach_metadata() to attach the buffer to the
254
+ * descriptor
255
+ * 3. submit the transfer
256
+ * - DMA_DEV_TO_MEM:
257
+ * 1. prepare the descriptor (dmaengine_prep_*)
258
+ * 2. use dmaengine_desc_attach_metadata() to attach the buffer to the
259
+ * descriptor
260
+ * 3. submit the transfer
261
+ * 4. when the transfer is completed, the metadata should be available in the
262
+ * attached buffer
263
+ *
264
+ * @DESC_METADATA_ENGINE - the metadata buffer is allocated/managed by the DMA
265
+ * driver. The client driver can ask for the pointer, maximum size and the
266
+ * currently used size of the metadata and can directly update or read it.
267
+ * dmaengine_desc_get_metadata_ptr() and dmaengine_desc_set_metadata_len() is
268
+ * provided as helper functions.
269
+ *
270
+ * Note: the metadata area for the descriptor is no longer valid after the
271
+ * transfer has been completed (valid up to the point when the completion
272
+ * callback returns if used).
273
+ *
274
+ * Client drivers interested to use this mode can follow:
275
+ * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
276
+ * 1. prepare the descriptor (dmaengine_prep_*)
277
+ * 2. use dmaengine_desc_get_metadata_ptr() to get the pointer to the engine's
278
+ * metadata area
279
+ * 3. update the metadata at the pointer
280
+ * 4. use dmaengine_desc_set_metadata_len() to tell the DMA engine the amount
281
+ * of data the client has placed into the metadata buffer
282
+ * 5. submit the transfer
283
+ * - DMA_DEV_TO_MEM:
284
+ * 1. prepare the descriptor (dmaengine_prep_*)
285
+ * 2. submit the transfer
286
+ * 3. on transfer completion, use dmaengine_desc_get_metadata_ptr() to get the
287
+ * pointer to the engine's metadata area
288
+ * 4. Read out the metadata from the pointer
289
+ *
290
+ * Note: the two mode is not compatible and clients must use one mode for a
291
+ * descriptor.
292
+ */
293
+enum dma_desc_metadata_mode {
294
+ DESC_METADATA_NONE = 0,
295
+ DESC_METADATA_CLIENT = BIT(0),
296
+ DESC_METADATA_ENGINE = BIT(1),
297
+};
298
+
234299 struct dma_chan_percpu {
235300 /* stats */
236301 unsigned long memcpy_count;
....@@ -250,10 +315,14 @@
250315 /**
251316 * struct dma_chan - devices supply DMA channels, clients use them
252317 * @device: ptr to the dma device who supplies this channel, always !%NULL
318
+ * @slave: ptr to the device using this channel
253319 * @cookie: last cookie value returned to client
254320 * @completed_cookie: last completed cookie for this channel
255321 * @chan_id: channel ID for sysfs
256322 * @dev: class device for sysfs
323
+ * @name: backlink name for sysfs
324
+ * @dbg_client_name: slave name for debugfs in format:
325
+ * dev_name(requester's dev):channel name, for example: "2b00000.mcasp:tx"
257326 * @device_node: used to add this to the device chan list
258327 * @local: per-cpu pointer to a struct dma_chan_percpu
259328 * @client_count: how many clients are using this channel
....@@ -264,12 +333,17 @@
264333 */
265334 struct dma_chan {
266335 struct dma_device *device;
336
+ struct device *slave;
267337 dma_cookie_t cookie;
268338 dma_cookie_t completed_cookie;
269339
270340 /* sysfs */
271341 int chan_id;
272342 struct dma_chan_dev *dev;
343
+ const char *name;
344
+#ifdef CONFIG_DEBUG_FS
345
+ char *dbg_client_name;
346
+#endif
273347
274348 struct list_head device_node;
275349 struct dma_chan_percpu __percpu *local;
....@@ -288,13 +362,11 @@
288362 * @chan: driver channel device
289363 * @device: sysfs device
290364 * @dev_id: parent dma_device dev_id
291
- * @idr_ref: reference count to gate release of dma_device dev_id
292365 */
293366 struct dma_chan_dev {
294367 struct dma_chan *chan;
295368 struct device device;
296369 int dev_id;
297
- atomic_t *idr_ref;
298370 };
299371
300372 /**
....@@ -351,6 +423,9 @@
351423 * @slave_id: Slave requester id. Only valid for slave channels. The dma
352424 * slave peripheral will have unique id as dma requester which need to be
353425 * pass as slave config.
426
+ * @peripheral_config: peripheral configuration for programming peripheral
427
+ * for dmaengine transfer
428
+ * @peripheral_size: peripheral configuration buffer size
354429 *
355430 * This struct is passed in as configuration data to a DMA engine
356431 * in order to set up a certain channel for DMA transport at runtime.
....@@ -376,8 +451,8 @@
376451 u32 dst_port_window_size;
377452 bool device_fc;
378453 unsigned int slave_id;
379
- unsigned int src_interlace_size;
380
- unsigned int dst_interlace_size;
454
+ void *peripheral_config;
455
+ size_t peripheral_size;
381456 };
382457
383458 /**
....@@ -416,7 +491,11 @@
416491 * Since the enum dma_transfer_direction is not defined as bit flag for
417492 * each type, the dma controller should set BIT(<TYPE>) and same
418493 * should be checked by controller as well
494
+ * @min_burst: min burst capability per-transfer
419495 * @max_burst: max burst capability per-transfer
496
+ * @max_sg_burst: max number of SG list entries executed in a single burst
497
+ * DMA tansaction with no software intervention for reinitialization.
498
+ * Zero value means unlimited number of entries.
420499 * @cmd_pause: true, if pause is supported (i.e. for reading residue or
421500 * for resume later)
422501 * @cmd_resume: true, if resume is supported
....@@ -429,7 +508,9 @@
429508 u32 src_addr_widths;
430509 u32 dst_addr_widths;
431510 u32 directions;
511
+ u32 min_burst;
432512 u32 max_burst;
513
+ u32 max_sg_burst;
433514 bool cmd_pause;
434515 bool cmd_resume;
435516 bool cmd_terminate;
....@@ -486,7 +567,19 @@
486567 struct device *dev;
487568 struct kref kref;
488569 size_t len;
489
- dma_addr_t addr[0];
570
+ dma_addr_t addr[];
571
+};
572
+
573
+struct dma_async_tx_descriptor;
574
+
575
+struct dma_descriptor_metadata_ops {
576
+ int (*attach)(struct dma_async_tx_descriptor *desc, void *data,
577
+ size_t len);
578
+
579
+ void *(*get_ptr)(struct dma_async_tx_descriptor *desc,
580
+ size_t *payload_len, size_t *max_len);
581
+ int (*set_len)(struct dma_async_tx_descriptor *desc,
582
+ size_t payload_len);
490583 };
491584
492585 /**
....@@ -495,13 +588,18 @@
495588 * @cookie: tracking cookie for this transaction, set to -EBUSY if
496589 * this tx is sitting on a dependency list
497590 * @flags: flags to augment operation preparation, control completion, and
498
- * communicate status
591
+ * communicate status
499592 * @phys: physical address of the descriptor
500593 * @chan: target channel for this operation
501594 * @tx_submit: accept the descriptor, assign ordered cookie and mark the
502595 * descriptor pending. To be pushed on .issue_pending() call
503596 * @callback: routine to call after this operation is complete
504597 * @callback_param: general parameter to pass to the callback routine
598
+ * @desc_metadata_mode: core managed metadata mode to protect mixed use of
599
+ * DESC_METADATA_CLIENT or DESC_METADATA_ENGINE. Otherwise
600
+ * DESC_METADATA_NONE
601
+ * @metadata_ops: DMA driver provided metadata mode ops, need to be set by the
602
+ * DMA driver if metadata mode is supported with the descriptor
505603 * ---async_tx api specific fields---
506604 * @next: at completion submit this descriptor
507605 * @parent: pointer to the next level up in the dependency chain
....@@ -518,6 +616,8 @@
518616 dma_async_tx_callback_result callback_result;
519617 void *callback_param;
520618 struct dmaengine_unmap_data *unmap;
619
+ enum dma_desc_metadata_mode desc_metadata_mode;
620
+ struct dma_descriptor_metadata_ops *metadata_ops;
521621 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
522622 struct dma_async_tx_descriptor *next;
523623 struct dma_async_tx_descriptor *parent;
....@@ -553,10 +653,11 @@
553653
554654 static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
555655 {
556
- if (tx->unmap) {
557
- dmaengine_unmap_put(tx->unmap);
558
- tx->unmap = NULL;
559
- }
656
+ if (!tx->unmap)
657
+ return;
658
+
659
+ dmaengine_unmap_put(tx->unmap);
660
+ tx->unmap = NULL;
560661 }
561662
562663 #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
....@@ -625,11 +726,13 @@
625726 * @residue: the remaining number of bytes left to transmit
626727 * on the selected transfer for states DMA_IN_PROGRESS and
627728 * DMA_PAUSED if this is implemented in the driver, else 0
729
+ * @in_flight_bytes: amount of data in bytes cached by the DMA.
628730 */
629731 struct dma_tx_state {
630732 dma_cookie_t last;
631733 dma_cookie_t used;
632734 u32 residue;
735
+ u32 in_flight_bytes;
633736 };
634737
635738 /**
....@@ -680,6 +783,7 @@
680783 * @global_node: list_head for global dma_device_list
681784 * @filter: information for device/slave to filter function/param mapping
682785 * @cap_mask: one or more dma_capability flags
786
+ * @desc_metadata_modes: supported metadata modes by the DMA device
683787 * @max_xor: maximum number of xor sources, 0 if no capability
684788 * @max_pq: maximum number of PQ sources and PQ-continue capability
685789 * @copy_align: alignment shift for memcpy operations
....@@ -697,7 +801,11 @@
697801 * Since the enum dma_transfer_direction is not defined as bit flag for
698802 * each type, the dma controller should set BIT(<TYPE>) and same
699803 * should be checked by controller as well
804
+ * @min_burst: min burst capability per-transfer
700805 * @max_burst: max burst capability per-transfer
806
+ * @max_sg_burst: max number of SG list entries executed in a single burst
807
+ * DMA tansaction with no software intervention for reinitialization.
808
+ * Zero value means unlimited number of entries.
701809 * @residue_granularity: granularity of the transfer residue reported
702810 * by tx_status
703811 * @device_alloc_chan_resources: allocate resources and return the
....@@ -717,6 +825,8 @@
717825 * be called after period_len bytes have been transferred.
718826 * @device_prep_interleaved_dma: Transfer expression in a generic way.
719827 * @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address
828
+ * @device_caps: May be used to override the generic DMA slave capabilities
829
+ * with per-channel specific ones
720830 * @device_config: Pushes a new configuration to a channel, return 0 or an error
721831 * code
722832 * @device_pause: Pauses any transfer happening on a channel. Returns
....@@ -733,15 +843,23 @@
733843 * will just return a simple status code
734844 * @device_issue_pending: push pending transactions to hardware
735845 * @descriptor_reuse: a submitted transfer can be resubmitted after completion
846
+ * @device_release: called sometime atfer dma_async_device_unregister() is
847
+ * called and there are no further references to this structure. This
848
+ * must be implemented to free resources however many existing drivers
849
+ * do not and are therefore not safe to unbind while in use.
850
+ * @dbg_summary_show: optional routine to show contents in debugfs; default code
851
+ * will be used when this is omitted, but custom code can show extra,
852
+ * controller specific information.
736853 */
737854 struct dma_device {
738
-
855
+ struct kref ref;
739856 unsigned int chancnt;
740857 unsigned int privatecnt;
741858 struct list_head channels;
742859 struct list_head global_node;
743860 struct dma_filter filter;
744861 dma_cap_mask_t cap_mask;
862
+ enum dma_desc_metadata_mode desc_metadata_modes;
745863 unsigned short max_xor;
746864 unsigned short max_pq;
747865 enum dmaengine_alignment copy_align;
....@@ -753,11 +871,15 @@
753871 int dev_id;
754872 struct device *dev;
755873 struct module *owner;
874
+ struct ida chan_ida;
875
+ struct mutex chan_mutex; /* to protect chan_ida */
756876
757877 u32 src_addr_widths;
758878 u32 dst_addr_widths;
759879 u32 directions;
880
+ u32 min_burst;
760881 u32 max_burst;
882
+ u32 max_sg_burst;
761883 bool descriptor_reuse;
762884 enum dma_residue_granularity residue_granularity;
763885
....@@ -805,6 +927,8 @@
805927 struct dma_chan *chan, dma_addr_t dst, u64 data,
806928 unsigned long flags);
807929
930
+ void (*device_caps)(struct dma_chan *chan,
931
+ struct dma_slave_caps *caps);
808932 int (*device_config)(struct dma_chan *chan,
809933 struct dma_slave_config *config);
810934 int (*device_pause)(struct dma_chan *chan);
....@@ -816,6 +940,17 @@
816940 dma_cookie_t cookie,
817941 struct dma_tx_state *txstate);
818942 void (*device_issue_pending)(struct dma_chan *chan);
943
+ void (*device_release)(struct dma_device *dev);
944
+ /* debugfs support */
945
+#ifdef CONFIG_DEBUG_FS
946
+ void (*dbg_summary_show)(struct seq_file *s, struct dma_device *dev);
947
+ struct dentry *dbg_dev_root;
948
+#endif
949
+
950
+ ANDROID_KABI_RESERVE(1);
951
+ ANDROID_KABI_RESERVE(2);
952
+ ANDROID_KABI_RESERVE(3);
953
+ ANDROID_KABI_RESERVE(4);
819954 };
820955
821956 static inline int dmaengine_slave_config(struct dma_chan *chan,
....@@ -892,6 +1027,9 @@
8921027 {
8931028 if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma)
8941029 return NULL;
1030
+ if (flags & DMA_PREP_REPEAT &&
1031
+ !test_bit(DMA_REPEAT, chan->device->cap_mask.bits))
1032
+ return NULL;
8951033
8961034 return chan->device->device_prep_interleaved_dma(chan, xt, flags);
8971035 }
....@@ -917,6 +1055,41 @@
9171055 return chan->device->device_prep_dma_memcpy(chan, dest, src,
9181056 len, flags);
9191057 }
1058
+
1059
+static inline bool dmaengine_is_metadata_mode_supported(struct dma_chan *chan,
1060
+ enum dma_desc_metadata_mode mode)
1061
+{
1062
+ if (!chan)
1063
+ return false;
1064
+
1065
+ return !!(chan->device->desc_metadata_modes & mode);
1066
+}
1067
+
1068
+#ifdef CONFIG_DMA_ENGINE
1069
+int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
1070
+ void *data, size_t len);
1071
+void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
1072
+ size_t *payload_len, size_t *max_len);
1073
+int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
1074
+ size_t payload_len);
1075
+#else /* CONFIG_DMA_ENGINE */
1076
+static inline int dmaengine_desc_attach_metadata(
1077
+ struct dma_async_tx_descriptor *desc, void *data, size_t len)
1078
+{
1079
+ return -EINVAL;
1080
+}
1081
+static inline void *dmaengine_desc_get_metadata_ptr(
1082
+ struct dma_async_tx_descriptor *desc, size_t *payload_len,
1083
+ size_t *max_len)
1084
+{
1085
+ return NULL;
1086
+}
1087
+static inline int dmaengine_desc_set_metadata_len(
1088
+ struct dma_async_tx_descriptor *desc, size_t payload_len)
1089
+{
1090
+ return -EINVAL;
1091
+}
1092
+#endif /* CONFIG_DMA_ENGINE */
9201093
9211094 /**
9221095 * dmaengine_terminate_all() - Terminate all active DMA transfers
....@@ -946,7 +1119,7 @@
9461119 * dmaengine_synchronize() needs to be called before it is safe to free
9471120 * any memory that is accessed by previously submitted descriptors or before
9481121 * freeing any resources accessed from within the completion callback of any
949
- * perviously submitted descriptors.
1122
+ * previously submitted descriptors.
9501123 *
9511124 * This function can be called from atomic context as well as from within a
9521125 * complete callback of a descriptor submitted on the same channel.
....@@ -968,7 +1141,7 @@
9681141 *
9691142 * Synchronizes to the DMA channel termination to the current context. When this
9701143 * function returns it is guaranteed that all transfers for previously issued
971
- * descriptors have stopped and and it is safe to free the memory assoicated
1144
+ * descriptors have stopped and it is safe to free the memory associated
9721145 * with them. Furthermore it is guaranteed that all complete callback functions
9731146 * for a previously submitted descriptor have finished running and it is safe to
9741147 * free resources accessed from within the complete callbacks.
....@@ -1045,14 +1218,7 @@
10451218 static inline bool dmaengine_check_align(enum dmaengine_alignment align,
10461219 size_t off1, size_t off2, size_t len)
10471220 {
1048
- size_t mask;
1049
-
1050
- if (!align)
1051
- return true;
1052
- mask = (1 << align) - 1;
1053
- if (mask & (off1 | off2 | len))
1054
- return false;
1055
- return true;
1221
+ return !(((1 << align) - 1) & (off1 | off2 | len));
10561222 }
10571223
10581224 static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
....@@ -1126,9 +1292,9 @@
11261292 {
11271293 if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
11281294 return dma_dev_to_maxpq(dma);
1129
- else if (dmaf_p_disabled_continue(flags))
1295
+ if (dmaf_p_disabled_continue(flags))
11301296 return dma_dev_to_maxpq(dma) - 1;
1131
- else if (dmaf_continue(flags))
1297
+ if (dmaf_continue(flags))
11321298 return dma_dev_to_maxpq(dma) - 3;
11331299 BUG();
11341300 }
....@@ -1139,7 +1305,7 @@
11391305 if (inc) {
11401306 if (dir_icg)
11411307 return dir_icg;
1142
- else if (sgl)
1308
+ if (sgl)
11431309 return icg;
11441310 }
11451311
....@@ -1305,11 +1471,12 @@
13051471 static inline void
13061472 dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
13071473 {
1308
- if (st) {
1309
- st->last = last;
1310
- st->used = used;
1311
- st->residue = residue;
1312
- }
1474
+ if (!st)
1475
+ return;
1476
+
1477
+ st->last = last;
1478
+ st->used = used;
1479
+ st->residue = residue;
13131480 }
13141481
13151482 #ifdef CONFIG_DMA_ENGINE
....@@ -1318,8 +1485,8 @@
13181485 enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
13191486 void dma_issue_pending_all(void);
13201487 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1321
- dma_filter_fn fn, void *fn_param);
1322
-struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
1488
+ dma_filter_fn fn, void *fn_param,
1489
+ struct device_node *np);
13231490
13241491 struct dma_chan *dma_request_chan(struct device *dev, const char *name);
13251492 struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
....@@ -1343,12 +1510,9 @@
13431510 {
13441511 }
13451512 static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1346
- dma_filter_fn fn, void *fn_param)
1347
-{
1348
- return NULL;
1349
-}
1350
-static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
1351
- const char *name)
1513
+ dma_filter_fn fn,
1514
+ void *fn_param,
1515
+ struct device_node *np)
13521516 {
13531517 return NULL;
13541518 }
....@@ -1372,8 +1536,6 @@
13721536 }
13731537 #endif
13741538
1375
-#define dma_request_slave_channel_reason(dev, name) dma_request_chan(dev, name)
1376
-
13771539 static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
13781540 {
13791541 struct dma_slave_caps caps;
....@@ -1383,12 +1545,11 @@
13831545 if (ret)
13841546 return ret;
13851547
1386
- if (caps.descriptor_reuse) {
1387
- tx->flags |= DMA_CTRL_REUSE;
1388
- return 0;
1389
- } else {
1548
+ if (!caps.descriptor_reuse)
13901549 return -EPERM;
1391
- }
1550
+
1551
+ tx->flags |= DMA_CTRL_REUSE;
1552
+ return 0;
13921553 }
13931554
13941555 static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
....@@ -1404,10 +1565,10 @@
14041565 static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
14051566 {
14061567 /* this is supported for reusable desc, so check that */
1407
- if (dmaengine_desc_test_reuse(desc))
1408
- return desc->desc_free(desc);
1409
- else
1568
+ if (!dmaengine_desc_test_reuse(desc))
14101569 return -EPERM;
1570
+
1571
+ return desc->desc_free(desc);
14111572 }
14121573
14131574 /* --- DMA device --- */
....@@ -1415,15 +1576,25 @@
14151576 int dma_async_device_register(struct dma_device *device);
14161577 int dmaenginem_async_device_register(struct dma_device *device);
14171578 void dma_async_device_unregister(struct dma_device *device);
1579
+int dma_async_device_channel_register(struct dma_device *device,
1580
+ struct dma_chan *chan);
1581
+void dma_async_device_channel_unregister(struct dma_device *device,
1582
+ struct dma_chan *chan);
14181583 void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
1419
-struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
1420
-struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
1421
-#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
1422
-#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
1423
- __dma_request_slave_channel_compat(&(mask), x, y, dev, name)
1584
+#define dma_request_channel(mask, x, y) \
1585
+ __dma_request_channel(&(mask), x, y, NULL)
1586
+
1587
+/* Deprecated, please use dma_request_chan() directly */
1588
+static inline struct dma_chan * __deprecated
1589
+dma_request_slave_channel(struct device *dev, const char *name)
1590
+{
1591
+ struct dma_chan *ch = dma_request_chan(dev, name);
1592
+
1593
+ return IS_ERR(ch) ? NULL : ch;
1594
+}
14241595
14251596 static inline struct dma_chan
1426
-*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
1597
+*dma_request_slave_channel_compat(const dma_cap_mask_t mask,
14271598 dma_filter_fn fn, void *fn_param,
14281599 struct device *dev, const char *name)
14291600 {
....@@ -1436,6 +1607,23 @@
14361607 if (!fn || !fn_param)
14371608 return NULL;
14381609
1439
- return __dma_request_channel(mask, fn, fn_param);
1610
+ return __dma_request_channel(&mask, fn, fn_param, NULL);
1611
+}
1612
+
1613
+static inline char *
1614
+dmaengine_get_direction_text(enum dma_transfer_direction dir)
1615
+{
1616
+ switch (dir) {
1617
+ case DMA_DEV_TO_MEM:
1618
+ return "DEV_TO_MEM";
1619
+ case DMA_MEM_TO_DEV:
1620
+ return "MEM_TO_DEV";
1621
+ case DMA_MEM_TO_MEM:
1622
+ return "MEM_TO_MEM";
1623
+ case DMA_DEV_TO_DEV:
1624
+ return "DEV_TO_DEV";
1625
+ default:
1626
+ return "invalid";
1627
+ }
14401628 }
14411629 #endif /* DMAENGINE_H */