| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Copyright 2014-2015 Analog Devices Inc. |
|---|
| 3 | 4 | * Author: Lars-Peter Clausen <lars@metafoo.de> |
|---|
| 4 | | - * |
|---|
| 5 | | - * Licensed under the GPL-2 or later. |
|---|
| 6 | 5 | */ |
|---|
| 7 | 6 | |
|---|
| 8 | 7 | #include <linux/slab.h> |
|---|
| .. | .. |
|---|
| 11 | 10 | #include <linux/dma-mapping.h> |
|---|
| 12 | 11 | #include <linux/spinlock.h> |
|---|
| 13 | 12 | #include <linux/err.h> |
|---|
| 13 | +#include <linux/module.h> |
|---|
| 14 | 14 | |
|---|
| 15 | 15 | #include <linux/iio/iio.h> |
|---|
| 16 | +#include <linux/iio/sysfs.h> |
|---|
| 16 | 17 | #include <linux/iio/buffer.h> |
|---|
| 17 | 18 | #include <linux/iio/buffer_impl.h> |
|---|
| 18 | 19 | #include <linux/iio/buffer-dma.h> |
|---|
| .. | .. |
|---|
| 44 | 45 | return container_of(buffer, struct dmaengine_buffer, queue.buffer); |
|---|
| 45 | 46 | } |
|---|
| 46 | 47 | |
|---|
| 47 | | -static void iio_dmaengine_buffer_block_done(void *data) |
|---|
| 48 | +static void iio_dmaengine_buffer_block_done(void *data, |
|---|
| 49 | + const struct dmaengine_result *result) |
|---|
| 48 | 50 | { |
|---|
| 49 | 51 | struct iio_dma_buffer_block *block = data; |
|---|
| 50 | 52 | unsigned long flags; |
|---|
| .. | .. |
|---|
| 52 | 54 | spin_lock_irqsave(&block->queue->list_lock, flags); |
|---|
| 53 | 55 | list_del(&block->head); |
|---|
| 54 | 56 | spin_unlock_irqrestore(&block->queue->list_lock, flags); |
|---|
| 57 | + block->bytes_used -= result->residue; |
|---|
| 55 | 58 | iio_dma_buffer_block_done(block); |
|---|
| 56 | 59 | } |
|---|
| 57 | 60 | |
|---|
| .. | .. |
|---|
| 73 | 76 | if (!desc) |
|---|
| 74 | 77 | return -ENOMEM; |
|---|
| 75 | 78 | |
|---|
| 76 | | - desc->callback = iio_dmaengine_buffer_block_done; |
|---|
| 79 | + desc->callback_result = iio_dmaengine_buffer_block_done; |
|---|
| 77 | 80 | desc->callback_param = block; |
|---|
| 78 | 81 | |
|---|
| 79 | 82 | cookie = dmaengine_submit(desc); |
|---|
| .. | .. |
|---|
| 108 | 111 | } |
|---|
| 109 | 112 | |
|---|
| 110 | 113 | static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = { |
|---|
| 111 | | - .read_first_n = iio_dma_buffer_read, |
|---|
| 114 | + .read = iio_dma_buffer_read, |
|---|
| 112 | 115 | .set_bytes_per_datum = iio_dma_buffer_set_bytes_per_datum, |
|---|
| 113 | 116 | .set_length = iio_dma_buffer_set_length, |
|---|
| 114 | 117 | .request_update = iio_dma_buffer_request_update, |
|---|
| .. | .. |
|---|
| 126 | 129 | .abort = iio_dmaengine_buffer_abort, |
|---|
| 127 | 130 | }; |
|---|
| 128 | 131 | |
|---|
| 132 | +static ssize_t iio_dmaengine_buffer_get_length_align(struct device *dev, |
|---|
| 133 | + struct device_attribute *attr, char *buf) |
|---|
| 134 | +{ |
|---|
| 135 | + struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
|---|
| 136 | + struct dmaengine_buffer *dmaengine_buffer = |
|---|
| 137 | + iio_buffer_to_dmaengine_buffer(indio_dev->buffer); |
|---|
| 138 | + |
|---|
| 139 | + return sprintf(buf, "%zu\n", dmaengine_buffer->align); |
|---|
| 140 | +} |
|---|
| 141 | + |
|---|
| 142 | +static IIO_DEVICE_ATTR(length_align_bytes, 0444, |
|---|
| 143 | + iio_dmaengine_buffer_get_length_align, NULL, 0); |
|---|
| 144 | + |
|---|
| 145 | +static const struct attribute *iio_dmaengine_buffer_attrs[] = { |
|---|
| 146 | + &iio_dev_attr_length_align_bytes.dev_attr.attr, |
|---|
| 147 | + NULL, |
|---|
| 148 | +}; |
|---|
| 149 | + |
|---|
| 129 | 150 | /** |
|---|
| 130 | 151 | * iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine |
|---|
| 131 | 152 | * @dev: Parent device for the buffer |
|---|
| .. | .. |
|---|
| 138 | 159 | * Once done using the buffer iio_dmaengine_buffer_free() should be used to |
|---|
| 139 | 160 | * release it. |
|---|
| 140 | 161 | */ |
|---|
| 141 | | -struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev, |
|---|
| 162 | +static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev, |
|---|
| 142 | 163 | const char *channel) |
|---|
| 143 | 164 | { |
|---|
| 144 | 165 | struct dmaengine_buffer *dmaengine_buffer; |
|---|
| .. | .. |
|---|
| 151 | 172 | if (!dmaengine_buffer) |
|---|
| 152 | 173 | return ERR_PTR(-ENOMEM); |
|---|
| 153 | 174 | |
|---|
| 154 | | - chan = dma_request_slave_channel_reason(dev, channel); |
|---|
| 175 | + chan = dma_request_chan(dev, channel); |
|---|
| 155 | 176 | if (IS_ERR(chan)) { |
|---|
| 156 | 177 | ret = PTR_ERR(chan); |
|---|
| 157 | 178 | goto err_free; |
|---|
| .. | .. |
|---|
| 179 | 200 | |
|---|
| 180 | 201 | iio_dma_buffer_init(&dmaengine_buffer->queue, chan->device->dev, |
|---|
| 181 | 202 | &iio_dmaengine_default_ops); |
|---|
| 203 | + iio_buffer_set_attrs(&dmaengine_buffer->queue.buffer, |
|---|
| 204 | + iio_dmaengine_buffer_attrs); |
|---|
| 182 | 205 | |
|---|
| 183 | 206 | dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops; |
|---|
| 184 | 207 | |
|---|
| .. | .. |
|---|
| 188 | 211 | kfree(dmaengine_buffer); |
|---|
| 189 | 212 | return ERR_PTR(ret); |
|---|
| 190 | 213 | } |
|---|
| 191 | | -EXPORT_SYMBOL(iio_dmaengine_buffer_alloc); |
|---|
| 192 | 214 | |
|---|
| 193 | 215 | /** |
|---|
| 194 | 216 | * iio_dmaengine_buffer_free() - Free dmaengine buffer |
|---|
| .. | .. |
|---|
| 196 | 218 | * |
|---|
| 197 | 219 | * Frees a buffer previously allocated with iio_dmaengine_buffer_alloc(). |
|---|
| 198 | 220 | */ |
|---|
| 199 | | -void iio_dmaengine_buffer_free(struct iio_buffer *buffer) |
|---|
| 221 | +static void iio_dmaengine_buffer_free(struct iio_buffer *buffer) |
|---|
| 200 | 222 | { |
|---|
| 201 | 223 | struct dmaengine_buffer *dmaengine_buffer = |
|---|
| 202 | 224 | iio_buffer_to_dmaengine_buffer(buffer); |
|---|
| .. | .. |
|---|
| 206 | 228 | |
|---|
| 207 | 229 | iio_buffer_put(buffer); |
|---|
| 208 | 230 | } |
|---|
| 209 | | -EXPORT_SYMBOL_GPL(iio_dmaengine_buffer_free); |
|---|
| 231 | + |
|---|
| 232 | +static void __devm_iio_dmaengine_buffer_free(struct device *dev, void *res) |
|---|
| 233 | +{ |
|---|
| 234 | + iio_dmaengine_buffer_free(*(struct iio_buffer **)res); |
|---|
| 235 | +} |
|---|
| 236 | + |
|---|
| 237 | +/** |
|---|
| 238 | + * devm_iio_dmaengine_buffer_alloc() - Resource-managed iio_dmaengine_buffer_alloc() |
|---|
| 239 | + * @dev: Parent device for the buffer |
|---|
| 240 | + * @channel: DMA channel name, typically "rx". |
|---|
| 241 | + * |
|---|
| 242 | + * This allocates a new IIO buffer which internally uses the DMAengine framework |
|---|
| 243 | + * to perform its transfers. The parent device will be used to request the DMA |
|---|
| 244 | + * channel. |
|---|
| 245 | + * |
|---|
| 246 | + * The buffer will be automatically de-allocated once the device gets destroyed. |
|---|
| 247 | + */ |
|---|
| 248 | +struct iio_buffer *devm_iio_dmaengine_buffer_alloc(struct device *dev, |
|---|
| 249 | + const char *channel) |
|---|
| 250 | +{ |
|---|
| 251 | + struct iio_buffer **bufferp, *buffer; |
|---|
| 252 | + |
|---|
| 253 | + bufferp = devres_alloc(__devm_iio_dmaengine_buffer_free, |
|---|
| 254 | + sizeof(*bufferp), GFP_KERNEL); |
|---|
| 255 | + if (!bufferp) |
|---|
| 256 | + return ERR_PTR(-ENOMEM); |
|---|
| 257 | + |
|---|
| 258 | + buffer = iio_dmaengine_buffer_alloc(dev, channel); |
|---|
| 259 | + if (IS_ERR(buffer)) { |
|---|
| 260 | + devres_free(bufferp); |
|---|
| 261 | + return buffer; |
|---|
| 262 | + } |
|---|
| 263 | + |
|---|
| 264 | + *bufferp = buffer; |
|---|
| 265 | + devres_add(dev, bufferp); |
|---|
| 266 | + |
|---|
| 267 | + return buffer; |
|---|
| 268 | +} |
|---|
| 269 | +EXPORT_SYMBOL_GPL(devm_iio_dmaengine_buffer_alloc); |
|---|
| 270 | + |
|---|
| 271 | +MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); |
|---|
| 272 | +MODULE_DESCRIPTION("DMA buffer for the IIO framework"); |
|---|
| 273 | +MODULE_LICENSE("GPL"); |
|---|