]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/iio/buffer/industrialio-buffer-dmaengine.c
iio: ad7768-1: Call iio_trigger_notify_done() on error
[mirror_ubuntu-jammy-kernel.git] / drivers / iio / buffer / industrialio-buffer-dmaengine.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright 2014-2015 Analog Devices Inc.
4 * Author: Lars-Peter Clausen <lars@metafoo.de>
5 */
6
7 #include <linux/slab.h>
8 #include <linux/kernel.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/spinlock.h>
12 #include <linux/err.h>
13 #include <linux/module.h>
14
15 #include <linux/iio/iio.h>
16 #include <linux/iio/sysfs.h>
17 #include <linux/iio/buffer.h>
18 #include <linux/iio/buffer_impl.h>
19 #include <linux/iio/buffer-dma.h>
20 #include <linux/iio/buffer-dmaengine.h>
21
22 /*
23 * The IIO DMAengine buffer combines the generic IIO DMA buffer infrastructure
24 * with the DMAengine framework. The generic IIO DMA buffer infrastructure is
25 * used to manage the buffer memory and implement the IIO buffer operations
26 * while the DMAengine framework is used to perform the DMA transfers. Combined
27 * this results in a device independent fully functional DMA buffer
28 * implementation that can be used by device drivers for peripherals which are
29 * connected to a DMA controller which has a DMAengine driver implementation.
30 */
31
32 struct dmaengine_buffer {
33 struct iio_dma_buffer_queue queue;
34
35 struct dma_chan *chan;
36 struct list_head active;
37
38 size_t align;
39 size_t max_size;
40 };
41
42 static struct dmaengine_buffer *iio_buffer_to_dmaengine_buffer(
43 struct iio_buffer *buffer)
44 {
45 return container_of(buffer, struct dmaengine_buffer, queue.buffer);
46 }
47
48 static void iio_dmaengine_buffer_block_done(void *data,
49 const struct dmaengine_result *result)
50 {
51 struct iio_dma_buffer_block *block = data;
52 unsigned long flags;
53
54 spin_lock_irqsave(&block->queue->list_lock, flags);
55 list_del(&block->head);
56 spin_unlock_irqrestore(&block->queue->list_lock, flags);
57 block->bytes_used -= result->residue;
58 iio_dma_buffer_block_done(block);
59 }
60
61 static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
62 struct iio_dma_buffer_block *block)
63 {
64 struct dmaengine_buffer *dmaengine_buffer =
65 iio_buffer_to_dmaengine_buffer(&queue->buffer);
66 struct dma_async_tx_descriptor *desc;
67 dma_cookie_t cookie;
68
69 block->bytes_used = min(block->size, dmaengine_buffer->max_size);
70 block->bytes_used = rounddown(block->bytes_used,
71 dmaengine_buffer->align);
72
73 desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
74 block->phys_addr, block->bytes_used, DMA_DEV_TO_MEM,
75 DMA_PREP_INTERRUPT);
76 if (!desc)
77 return -ENOMEM;
78
79 desc->callback_result = iio_dmaengine_buffer_block_done;
80 desc->callback_param = block;
81
82 cookie = dmaengine_submit(desc);
83 if (dma_submit_error(cookie))
84 return dma_submit_error(cookie);
85
86 spin_lock_irq(&dmaengine_buffer->queue.list_lock);
87 list_add_tail(&block->head, &dmaengine_buffer->active);
88 spin_unlock_irq(&dmaengine_buffer->queue.list_lock);
89
90 dma_async_issue_pending(dmaengine_buffer->chan);
91
92 return 0;
93 }
94
95 static void iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue *queue)
96 {
97 struct dmaengine_buffer *dmaengine_buffer =
98 iio_buffer_to_dmaengine_buffer(&queue->buffer);
99
100 dmaengine_terminate_sync(dmaengine_buffer->chan);
101 iio_dma_buffer_block_list_abort(queue, &dmaengine_buffer->active);
102 }
103
104 static void iio_dmaengine_buffer_release(struct iio_buffer *buf)
105 {
106 struct dmaengine_buffer *dmaengine_buffer =
107 iio_buffer_to_dmaengine_buffer(buf);
108
109 iio_dma_buffer_release(&dmaengine_buffer->queue);
110 kfree(dmaengine_buffer);
111 }
112
113 static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = {
114 .read = iio_dma_buffer_read,
115 .set_bytes_per_datum = iio_dma_buffer_set_bytes_per_datum,
116 .set_length = iio_dma_buffer_set_length,
117 .request_update = iio_dma_buffer_request_update,
118 .enable = iio_dma_buffer_enable,
119 .disable = iio_dma_buffer_disable,
120 .data_available = iio_dma_buffer_data_available,
121 .release = iio_dmaengine_buffer_release,
122
123 .modes = INDIO_BUFFER_HARDWARE,
124 .flags = INDIO_BUFFER_FLAG_FIXED_WATERMARK,
125 };
126
127 static const struct iio_dma_buffer_ops iio_dmaengine_default_ops = {
128 .submit = iio_dmaengine_buffer_submit_block,
129 .abort = iio_dmaengine_buffer_abort,
130 };
131
132 static ssize_t iio_dmaengine_buffer_get_length_align(struct device *dev,
133 struct device_attribute *attr, char *buf)
134 {
135 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
136 struct dmaengine_buffer *dmaengine_buffer =
137 iio_buffer_to_dmaengine_buffer(buffer);
138
139 return sprintf(buf, "%zu\n", dmaengine_buffer->align);
140 }
141
142 static IIO_DEVICE_ATTR(length_align_bytes, 0444,
143 iio_dmaengine_buffer_get_length_align, NULL, 0);
144
145 static const struct attribute *iio_dmaengine_buffer_attrs[] = {
146 &iio_dev_attr_length_align_bytes.dev_attr.attr,
147 NULL,
148 };
149
150 /**
151 * iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine
152 * @dev: Parent device for the buffer
153 * @channel: DMA channel name, typically "rx".
154 *
155 * This allocates a new IIO buffer which internally uses the DMAengine framework
156 * to perform its transfers. The parent device will be used to request the DMA
157 * channel.
158 *
159 * Once done using the buffer iio_dmaengine_buffer_free() should be used to
160 * release it.
161 */
162 static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
163 const char *channel)
164 {
165 struct dmaengine_buffer *dmaengine_buffer;
166 unsigned int width, src_width, dest_width;
167 struct dma_slave_caps caps;
168 struct dma_chan *chan;
169 int ret;
170
171 dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL);
172 if (!dmaengine_buffer)
173 return ERR_PTR(-ENOMEM);
174
175 chan = dma_request_chan(dev, channel);
176 if (IS_ERR(chan)) {
177 ret = PTR_ERR(chan);
178 goto err_free;
179 }
180
181 ret = dma_get_slave_caps(chan, &caps);
182 if (ret < 0)
183 goto err_free;
184
185 /* Needs to be aligned to the maximum of the minimums */
186 if (caps.src_addr_widths)
187 src_width = __ffs(caps.src_addr_widths);
188 else
189 src_width = 1;
190 if (caps.dst_addr_widths)
191 dest_width = __ffs(caps.dst_addr_widths);
192 else
193 dest_width = 1;
194 width = max(src_width, dest_width);
195
196 INIT_LIST_HEAD(&dmaengine_buffer->active);
197 dmaengine_buffer->chan = chan;
198 dmaengine_buffer->align = width;
199 dmaengine_buffer->max_size = dma_get_max_seg_size(chan->device->dev);
200
201 iio_dma_buffer_init(&dmaengine_buffer->queue, chan->device->dev,
202 &iio_dmaengine_default_ops);
203
204 dmaengine_buffer->queue.buffer.attrs = iio_dmaengine_buffer_attrs;
205 dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops;
206
207 return &dmaengine_buffer->queue.buffer;
208
209 err_free:
210 kfree(dmaengine_buffer);
211 return ERR_PTR(ret);
212 }
213
214 /**
215 * iio_dmaengine_buffer_free() - Free dmaengine buffer
216 * @buffer: Buffer to free
217 *
218 * Frees a buffer previously allocated with iio_dmaengine_buffer_alloc().
219 */
220 static void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
221 {
222 struct dmaengine_buffer *dmaengine_buffer =
223 iio_buffer_to_dmaengine_buffer(buffer);
224
225 iio_dma_buffer_exit(&dmaengine_buffer->queue);
226 dma_release_channel(dmaengine_buffer->chan);
227
228 iio_buffer_put(buffer);
229 }
230
231 static void __devm_iio_dmaengine_buffer_free(void *buffer)
232 {
233 iio_dmaengine_buffer_free(buffer);
234 }
235
236 /**
237 * devm_iio_dmaengine_buffer_alloc() - Resource-managed iio_dmaengine_buffer_alloc()
238 * @dev: Parent device for the buffer
239 * @channel: DMA channel name, typically "rx".
240 *
241 * This allocates a new IIO buffer which internally uses the DMAengine framework
242 * to perform its transfers. The parent device will be used to request the DMA
243 * channel.
244 *
245 * The buffer will be automatically de-allocated once the device gets destroyed.
246 */
247 static struct iio_buffer *devm_iio_dmaengine_buffer_alloc(struct device *dev,
248 const char *channel)
249 {
250 struct iio_buffer *buffer;
251 int ret;
252
253 buffer = iio_dmaengine_buffer_alloc(dev, channel);
254 if (IS_ERR(buffer))
255 return buffer;
256
257 ret = devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free,
258 buffer);
259 if (ret)
260 return ERR_PTR(ret);
261
262 return buffer;
263 }
264
265 /**
266 * devm_iio_dmaengine_buffer_setup() - Setup a DMA buffer for an IIO device
267 * @dev: Parent device for the buffer
268 * @indio_dev: IIO device to which to attach this buffer.
269 * @channel: DMA channel name, typically "rx".
270 *
271 * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc()
272 * and attaches it to an IIO device with iio_device_attach_buffer().
273 * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the
274 * IIO device.
275 */
276 int devm_iio_dmaengine_buffer_setup(struct device *dev,
277 struct iio_dev *indio_dev,
278 const char *channel)
279 {
280 struct iio_buffer *buffer;
281
282 buffer = devm_iio_dmaengine_buffer_alloc(indio_dev->dev.parent,
283 channel);
284 if (IS_ERR(buffer))
285 return PTR_ERR(buffer);
286
287 indio_dev->modes |= INDIO_BUFFER_HARDWARE;
288
289 return iio_device_attach_buffer(indio_dev, buffer);
290 }
291 EXPORT_SYMBOL_GPL(devm_iio_dmaengine_buffer_setup);
292
293 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
294 MODULE_DESCRIPTION("DMA buffer for the IIO framework");
295 MODULE_LICENSE("GPL");