]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/staging/iio/adc/ad7887_ring.c
Fix common misspellings
[mirror_ubuntu-bionic-kernel.git] / drivers / staging / iio / adc / ad7887_ring.c
CommitLineData
2b4756aa
MH
1/*
2 * Copyright 2010 Analog Devices Inc.
3 * Copyright (C) 2008 Jonathan Cameron
4 *
5 * Licensed under the GPL-2 or later.
6 *
7 * ad7887_ring.c
8 */
9
10#include <linux/interrupt.h>
11#include <linux/gpio.h>
12#include <linux/workqueue.h>
13#include <linux/device.h>
14#include <linux/kernel.h>
15#include <linux/slab.h>
16#include <linux/sysfs.h>
17#include <linux/list.h>
18#include <linux/spi/spi.h>
19
20#include "../iio.h"
21#include "../ring_generic.h"
22#include "../ring_sw.h"
23#include "../trigger.h"
24#include "../sysfs.h"
25
26#include "ad7887.h"
27
28static IIO_SCAN_EL_C(in0, 0, 0, NULL);
29static IIO_SCAN_EL_C(in1, 1, 0, NULL);
e08d0265
MH
30static IIO_SCAN_EL_TIMESTAMP(2);
31static IIO_CONST_ATTR_SCAN_EL_TYPE(timestamp, s, 64, 64);
2b4756aa
MH
32
33static ssize_t ad7887_show_type(struct device *dev,
34 struct device_attribute *attr,
35 char *buf)
36{
37 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
38 struct iio_dev *indio_dev = ring->indio_dev;
39 struct ad7887_state *st = indio_dev->dev_data;
40
41 return sprintf(buf, "%c%d/%d>>%d\n", st->chip_info->sign,
42 st->chip_info->bits, st->chip_info->storagebits,
43 st->chip_info->left_shift);
44}
45static IIO_DEVICE_ATTR(in_type, S_IRUGO, ad7887_show_type, NULL, 0);
46
47static struct attribute *ad7887_scan_el_attrs[] = {
48 &iio_scan_el_in0.dev_attr.attr,
49 &iio_const_attr_in0_index.dev_attr.attr,
50 &iio_scan_el_in1.dev_attr.attr,
51 &iio_const_attr_in1_index.dev_attr.attr,
e08d0265
MH
52 &iio_const_attr_timestamp_index.dev_attr.attr,
53 &iio_scan_el_timestamp.dev_attr.attr,
54 &iio_const_attr_timestamp_type.dev_attr.attr,
2b4756aa
MH
55 &iio_dev_attr_in_type.dev_attr.attr,
56 NULL,
57};
58
59static mode_t ad7887_scan_el_attr_is_visible(struct kobject *kobj,
60 struct attribute *attr, int n)
61{
62 struct device *dev = container_of(kobj, struct device, kobj);
63 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
64 struct iio_dev *indio_dev = ring->indio_dev;
65 struct ad7887_state *st = indio_dev->dev_data;
66
67 mode_t mode = attr->mode;
68
69 if ((attr == &iio_scan_el_in1.dev_attr.attr) ||
70 (attr == &iio_const_attr_in1_index.dev_attr.attr))
71 if (!st->en_dual)
72 mode = 0;
73
74 return mode;
75}
76
77static struct attribute_group ad7887_scan_el_group = {
78 .name = "scan_elements",
79 .attrs = ad7887_scan_el_attrs,
80 .is_visible = ad7887_scan_el_attr_is_visible,
81};
82
83int ad7887_scan_from_ring(struct ad7887_state *st, long mask)
84{
85 struct iio_ring_buffer *ring = st->indio_dev->ring;
86 int count = 0, ret;
87 u16 *ring_data;
88
89 if (!(ring->scan_mask & mask)) {
90 ret = -EBUSY;
91 goto error_ret;
92 }
93
94 ring_data = kmalloc(ring->access.get_bytes_per_datum(ring), GFP_KERNEL);
95 if (ring_data == NULL) {
96 ret = -ENOMEM;
97 goto error_ret;
98 }
99 ret = ring->access.read_last(ring, (u8 *) ring_data);
100 if (ret)
101 goto error_free_ring_data;
102
103 /* for single channel scan the result is stored with zero offset */
104 if ((ring->scan_mask == ((1 << 1) | (1 << 0))) && (mask == (1 << 1)))
105 count = 1;
106
107 ret = be16_to_cpu(ring_data[count]);
108
109error_free_ring_data:
110 kfree(ring_data);
111error_ret:
112 return ret;
113}
114
115/**
116 * ad7887_ring_preenable() setup the parameters of the ring before enabling
117 *
118 * The complex nature of the setting of the nuber of bytes per datum is due
119 * to this driver currently ensuring that the timestamp is stored at an 8
120 * byte boundary.
121 **/
122static int ad7887_ring_preenable(struct iio_dev *indio_dev)
123{
124 struct ad7887_state *st = indio_dev->dev_data;
125 struct iio_ring_buffer *ring = indio_dev->ring;
2b4756aa 126
e08d0265
MH
127 st->d_size = ring->scan_count * st->chip_info->storagebits / 8;
128
129 if (ring->scan_timestamp) {
130 st->d_size += sizeof(s64);
131
132 if (st->d_size % sizeof(s64))
133 st->d_size += sizeof(s64) - (st->d_size % sizeof(s64));
2b4756aa
MH
134 }
135
e08d0265
MH
136 if (indio_dev->ring->access.set_bytes_per_datum)
137 indio_dev->ring->access.set_bytes_per_datum(indio_dev->ring,
138 st->d_size);
139
2b4756aa
MH
140 switch (ring->scan_mask) {
141 case (1 << 0):
142 st->ring_msg = &st->msg[AD7887_CH0];
143 break;
144 case (1 << 1):
145 st->ring_msg = &st->msg[AD7887_CH1];
146 /* Dummy read: push CH1 setting down to hardware */
147 spi_sync(st->spi, st->ring_msg);
148 break;
149 case ((1 << 1) | (1 << 0)):
150 st->ring_msg = &st->msg[AD7887_CH0_CH1];
151 break;
152 }
153
154 return 0;
155}
156
157static int ad7887_ring_postdisable(struct iio_dev *indio_dev)
158{
159 struct ad7887_state *st = indio_dev->dev_data;
160
161 /* dummy read: restore default CH0 settin */
162 return spi_sync(st->spi, &st->msg[AD7887_CH0]);
163}
164
165/**
166 * ad7887_poll_func_th() th of trigger launched polling to ring buffer
167 *
25985edc 168 * As sampling only occurs on spi comms occurring, leave timestamping until
2b4756aa
MH
169 * then. Some triggers will generate their own time stamp. Currently
170 * there is no way of notifying them when no one cares.
171 **/
172static void ad7887_poll_func_th(struct iio_dev *indio_dev, s64 time)
173{
174 struct ad7887_state *st = indio_dev->dev_data;
175
176 schedule_work(&st->poll_work);
177 return;
178}
179/**
180 * ad7887_poll_bh_to_ring() bh of trigger launched polling to ring buffer
181 * @work_s: the work struct through which this was scheduled
182 *
183 * Currently there is no option in this driver to disable the saving of
184 * timestamps within the ring.
185 * I think the one copy of this at a time was to avoid problems if the
186 * trigger was set far too high and the reads then locked up the computer.
187 **/
188static void ad7887_poll_bh_to_ring(struct work_struct *work_s)
189{
190 struct ad7887_state *st = container_of(work_s, struct ad7887_state,
191 poll_work);
192 struct iio_dev *indio_dev = st->indio_dev;
193 struct iio_sw_ring_buffer *sw_ring = iio_to_sw_ring(indio_dev->ring);
194 struct iio_ring_buffer *ring = indio_dev->ring;
195 s64 time_ns;
196 __u8 *buf;
197 int b_sent;
2b4756aa
MH
198
199 unsigned int bytes = ring->scan_count * st->chip_info->storagebits / 8;
200
2b4756aa
MH
201 /* Ensure only one copy of this function running at a time */
202 if (atomic_inc_return(&st->protect_ring) > 1)
203 return;
204
e08d0265 205 buf = kzalloc(st->d_size, GFP_KERNEL);
2b4756aa
MH
206 if (buf == NULL)
207 return;
208
209 b_sent = spi_sync(st->spi, st->ring_msg);
210 if (b_sent)
211 goto done;
212
213 time_ns = iio_get_time_ns();
214
215 memcpy(buf, st->data, bytes);
e08d0265
MH
216 if (ring->scan_timestamp)
217 memcpy(buf + st->d_size - sizeof(s64),
218 &time_ns, sizeof(time_ns));
2b4756aa
MH
219
220 indio_dev->ring->access.store_to(&sw_ring->buf, buf, time_ns);
221done:
222 kfree(buf);
223 atomic_dec(&st->protect_ring);
224}
225
226int ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev)
227{
228 struct ad7887_state *st = indio_dev->dev_data;
229 int ret;
230
231 indio_dev->ring = iio_sw_rb_allocate(indio_dev);
232 if (!indio_dev->ring) {
233 ret = -ENOMEM;
234 goto error_ret;
235 }
236 /* Effectively select the ring buffer implementation */
237 iio_ring_sw_register_funcs(&indio_dev->ring->access);
238 ret = iio_alloc_pollfunc(indio_dev, NULL, &ad7887_poll_func_th);
239 if (ret)
240 goto error_deallocate_sw_rb;
241
242 /* Ring buffer functions - here trigger setup related */
243
244 indio_dev->ring->preenable = &ad7887_ring_preenable;
245 indio_dev->ring->postenable = &iio_triggered_ring_postenable;
246 indio_dev->ring->predisable = &iio_triggered_ring_predisable;
247 indio_dev->ring->postdisable = &ad7887_ring_postdisable;
248 indio_dev->ring->scan_el_attrs = &ad7887_scan_el_group;
e08d0265 249 indio_dev->ring->scan_timestamp = true;
2b4756aa
MH
250
251 INIT_WORK(&st->poll_work, &ad7887_poll_bh_to_ring);
252
253 /* Flag that polled ring buffering is possible */
254 indio_dev->modes |= INDIO_RING_TRIGGERED;
255 return 0;
256error_deallocate_sw_rb:
257 iio_sw_rb_free(indio_dev->ring);
258error_ret:
259 return ret;
260}
261
262void ad7887_ring_cleanup(struct iio_dev *indio_dev)
263{
264 /* ensure that the trigger has been detached */
265 if (indio_dev->trig) {
266 iio_put_trigger(indio_dev->trig);
267 iio_trigger_dettach_poll_func(indio_dev->trig,
268 indio_dev->pollfunc);
269 }
270 kfree(indio_dev->pollfunc);
271 iio_sw_rb_free(indio_dev->ring);
272}