hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/iio/buffer/industrialio-buffer-dmaengine.c
....@@ -1,8 +1,7 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * Copyright 2014-2015 Analog Devices Inc.
34 * Author: Lars-Peter Clausen <lars@metafoo.de>
4
- *
5
- * Licensed under the GPL-2 or later.
65 */
76
87 #include <linux/slab.h>
....@@ -11,8 +10,10 @@
1110 #include <linux/dma-mapping.h>
1211 #include <linux/spinlock.h>
1312 #include <linux/err.h>
13
+#include <linux/module.h>
1414
1515 #include <linux/iio/iio.h>
16
+#include <linux/iio/sysfs.h>
1617 #include <linux/iio/buffer.h>
1718 #include <linux/iio/buffer_impl.h>
1819 #include <linux/iio/buffer-dma.h>
....@@ -44,7 +45,8 @@
4445 return container_of(buffer, struct dmaengine_buffer, queue.buffer);
4546 }
4647
47
-static void iio_dmaengine_buffer_block_done(void *data)
48
+static void iio_dmaengine_buffer_block_done(void *data,
49
+ const struct dmaengine_result *result)
4850 {
4951 struct iio_dma_buffer_block *block = data;
5052 unsigned long flags;
....@@ -52,6 +54,7 @@
5254 spin_lock_irqsave(&block->queue->list_lock, flags);
5355 list_del(&block->head);
5456 spin_unlock_irqrestore(&block->queue->list_lock, flags);
57
+ block->bytes_used -= result->residue;
5558 iio_dma_buffer_block_done(block);
5659 }
5760
....@@ -73,7 +76,7 @@
7376 if (!desc)
7477 return -ENOMEM;
7578
76
- desc->callback = iio_dmaengine_buffer_block_done;
79
+ desc->callback_result = iio_dmaengine_buffer_block_done;
7780 desc->callback_param = block;
7881
7982 cookie = dmaengine_submit(desc);
....@@ -108,7 +111,7 @@
108111 }
109112
110113 static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = {
111
- .read_first_n = iio_dma_buffer_read,
114
+ .read = iio_dma_buffer_read,
112115 .set_bytes_per_datum = iio_dma_buffer_set_bytes_per_datum,
113116 .set_length = iio_dma_buffer_set_length,
114117 .request_update = iio_dma_buffer_request_update,
....@@ -126,6 +129,24 @@
126129 .abort = iio_dmaengine_buffer_abort,
127130 };
128131
132
+static ssize_t iio_dmaengine_buffer_get_length_align(struct device *dev,
133
+ struct device_attribute *attr, char *buf)
134
+{
135
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
136
+ struct dmaengine_buffer *dmaengine_buffer =
137
+ iio_buffer_to_dmaengine_buffer(indio_dev->buffer);
138
+
139
+ return sprintf(buf, "%zu\n", dmaengine_buffer->align);
140
+}
141
+
142
+static IIO_DEVICE_ATTR(length_align_bytes, 0444,
143
+ iio_dmaengine_buffer_get_length_align, NULL, 0);
144
+
145
+static const struct attribute *iio_dmaengine_buffer_attrs[] = {
146
+ &iio_dev_attr_length_align_bytes.dev_attr.attr,
147
+ NULL,
148
+};
149
+
129150 /**
130151 * iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine
131152 * @dev: Parent device for the buffer
....@@ -138,7 +159,7 @@
138159 * Once done using the buffer iio_dmaengine_buffer_free() should be used to
139160 * release it.
140161 */
141
-struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
162
+static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
142163 const char *channel)
143164 {
144165 struct dmaengine_buffer *dmaengine_buffer;
....@@ -151,7 +172,7 @@
151172 if (!dmaengine_buffer)
152173 return ERR_PTR(-ENOMEM);
153174
154
- chan = dma_request_slave_channel_reason(dev, channel);
175
+ chan = dma_request_chan(dev, channel);
155176 if (IS_ERR(chan)) {
156177 ret = PTR_ERR(chan);
157178 goto err_free;
....@@ -179,6 +200,8 @@
179200
180201 iio_dma_buffer_init(&dmaengine_buffer->queue, chan->device->dev,
181202 &iio_dmaengine_default_ops);
203
+ iio_buffer_set_attrs(&dmaengine_buffer->queue.buffer,
204
+ iio_dmaengine_buffer_attrs);
182205
183206 dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops;
184207
....@@ -188,7 +211,6 @@
188211 kfree(dmaengine_buffer);
189212 return ERR_PTR(ret);
190213 }
191
-EXPORT_SYMBOL(iio_dmaengine_buffer_alloc);
192214
193215 /**
194216 * iio_dmaengine_buffer_free() - Free dmaengine buffer
....@@ -196,7 +218,7 @@
196218 *
197219 * Frees a buffer previously allocated with iio_dmaengine_buffer_alloc().
198220 */
199
-void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
221
+static void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
200222 {
201223 struct dmaengine_buffer *dmaengine_buffer =
202224 iio_buffer_to_dmaengine_buffer(buffer);
....@@ -206,4 +228,46 @@
206228
207229 iio_buffer_put(buffer);
208230 }
209
-EXPORT_SYMBOL_GPL(iio_dmaengine_buffer_free);
231
+
232
+static void __devm_iio_dmaengine_buffer_free(struct device *dev, void *res)
233
+{
234
+ iio_dmaengine_buffer_free(*(struct iio_buffer **)res);
235
+}
236
+
237
+/**
238
+ * devm_iio_dmaengine_buffer_alloc() - Resource-managed iio_dmaengine_buffer_alloc()
239
+ * @dev: Parent device for the buffer
240
+ * @channel: DMA channel name, typically "rx".
241
+ *
242
+ * This allocates a new IIO buffer which internally uses the DMAengine framework
243
+ * to perform its transfers. The parent device will be used to request the DMA
244
+ * channel.
245
+ *
246
+ * The buffer will be automatically de-allocated once the device gets destroyed.
247
+ */
248
+struct iio_buffer *devm_iio_dmaengine_buffer_alloc(struct device *dev,
249
+ const char *channel)
250
+{
251
+ struct iio_buffer **bufferp, *buffer;
252
+
253
+ bufferp = devres_alloc(__devm_iio_dmaengine_buffer_free,
254
+ sizeof(*bufferp), GFP_KERNEL);
255
+ if (!bufferp)
256
+ return ERR_PTR(-ENOMEM);
257
+
258
+ buffer = iio_dmaengine_buffer_alloc(dev, channel);
259
+ if (IS_ERR(buffer)) {
260
+ devres_free(bufferp);
261
+ return buffer;
262
+ }
263
+
264
+ *bufferp = buffer;
265
+ devres_add(dev, bufferp);
266
+
267
+ return buffer;
268
+}
269
+EXPORT_SYMBOL_GPL(devm_iio_dmaengine_buffer_alloc);
270
+
271
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
272
+MODULE_DESCRIPTION("DMA buffer for the IIO framework");
273
+MODULE_LICENSE("GPL");