1 #include <linux/interrupt.h>
2 #include <linux/gpio.h>
3 #include <linux/mutex.h>
4 #include <linux/kernel.h>
5 #include <linux/spi/spi.h>
6 #include <linux/slab.h>
9 #include "../ring_sw.h"
10 #include "../kfifo_buf.h"
11 #include "../trigger.h"
12 #include "../trigger_consumer.h"
13 #include "lis3l02dq.h"
16 * combine_8_to_16() utility function to munge to u8s into u16
18 static inline u16 combine_8_to_16(u8 lower, u8 upper)
22 return _lower | (_upper << 8);
26 * lis3l02dq_data_rdy_trig_poll() the event handler for the data rdy trig
28 irqreturn_t lis3l02dq_data_rdy_trig_poll(int irq, void *private)
30 struct iio_dev *indio_dev = private;
31 struct lis3l02dq_state *st = iio_priv(indio_dev);
34 iio_trigger_poll(st->trig, iio_get_time_ns());
37 return IRQ_WAKE_THREAD;
41 * lis3l02dq_read_accel_from_ring() individual acceleration read from ring
43 ssize_t lis3l02dq_read_accel_from_ring(struct iio_ring_buffer *ring,
50 if (!iio_scan_mask_query(ring, index))
53 if (!ring->access->read_last)
56 data = kmalloc(ring->access->get_bytes_per_datum(ring),
61 ret = ring->access->read_last(ring, (u8 *)data);
64 *val = data[bitmap_weight(&ring->scan_mask, index)];
71 static const u8 read_all_tx_array[] = {
72 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_L_ADDR), 0,
73 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_H_ADDR), 0,
74 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_L_ADDR), 0,
75 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_H_ADDR), 0,
76 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_L_ADDR), 0,
77 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_H_ADDR), 0,
81 * lis3l02dq_read_all() Reads all channels currently selected
82 * @st: device specific state
83 * @rx_array: (dma capable) receive array, must be at least
84 * 4*number of channels
86 static int lis3l02dq_read_all(struct iio_dev *indio_dev, u8 *rx_array)
88 struct iio_ring_buffer *ring = indio_dev->ring;
89 struct lis3l02dq_state *st = iio_priv(indio_dev);
90 struct spi_transfer *xfers;
91 struct spi_message msg;
94 xfers = kzalloc((ring->scan_count) * 2
95 * sizeof(*xfers), GFP_KERNEL);
99 mutex_lock(&st->buf_lock);
101 for (i = 0; i < ARRAY_SIZE(read_all_tx_array)/4; i++)
102 if (ring->scan_mask & (1 << i)) {
104 xfers[j].tx_buf = st->tx + 2*j;
105 st->tx[2*j] = read_all_tx_array[i*4];
108 xfers[j].rx_buf = rx_array + j*2;
109 xfers[j].bits_per_word = 8;
111 xfers[j].cs_change = 1;
115 xfers[j].tx_buf = st->tx + 2*j;
116 st->tx[2*j] = read_all_tx_array[i*4 + 2];
119 xfers[j].rx_buf = rx_array + j*2;
120 xfers[j].bits_per_word = 8;
122 xfers[j].cs_change = 1;
126 /* After these are transmitted, the rx_buff should have
127 * values in alternate bytes
129 spi_message_init(&msg);
130 for (j = 0; j < ring->scan_count * 2; j++)
131 spi_message_add_tail(&xfers[j], &msg);
133 ret = spi_sync(st->us, &msg);
134 mutex_unlock(&st->buf_lock);
140 static int lis3l02dq_get_ring_element(struct iio_dev *indio_dev,
145 s16 *data = (s16 *)buf;
147 rx_array = kzalloc(4 * (indio_dev->ring->scan_count), GFP_KERNEL);
148 if (rx_array == NULL)
150 ret = lis3l02dq_read_all(indio_dev, rx_array);
153 for (i = 0; i < indio_dev->ring->scan_count; i++)
154 data[i] = combine_8_to_16(rx_array[i*4+1],
158 return i*sizeof(data[0]);
161 static irqreturn_t lis3l02dq_trigger_handler(int irq, void *p)
163 struct iio_poll_func *pf = p;
164 struct iio_dev *indio_dev = pf->indio_dev;
165 struct iio_ring_buffer *ring = indio_dev->ring;
167 size_t datasize = ring->access->get_bytes_per_datum(ring);
168 char *data = kmalloc(datasize, GFP_KERNEL);
171 dev_err(indio_dev->dev.parent,
172 "memory alloc failed in ring bh");
176 if (ring->scan_count)
177 len = lis3l02dq_get_ring_element(indio_dev, data);
179 /* Guaranteed to be aligned with 8 byte boundary */
180 if (ring->scan_timestamp)
181 *(s64 *)(((phys_addr_t)data + len
182 + sizeof(s64) - 1) & ~(sizeof(s64) - 1))
184 ring->access->store_to(ring, (u8 *)data, pf->timestamp);
186 iio_trigger_notify_done(indio_dev->trig);
191 /* Caller responsible for locking as necessary. */
193 __lis3l02dq_write_data_ready_config(struct device *dev, bool state)
198 struct iio_dev *indio_dev = dev_get_drvdata(dev);
199 struct lis3l02dq_state *st = iio_priv(indio_dev);
201 /* Get the current event mask register */
202 ret = lis3l02dq_spi_read_reg_8(indio_dev,
203 LIS3L02DQ_REG_CTRL_2_ADDR,
207 /* Find out if data ready is already on */
209 = valold & LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
211 /* Disable requested */
212 if (!state && currentlyset) {
213 /* disable the data ready signal */
214 valold &= ~LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
216 /* The double write is to overcome a hardware bug?*/
217 ret = lis3l02dq_spi_write_reg_8(indio_dev,
218 LIS3L02DQ_REG_CTRL_2_ADDR,
222 ret = lis3l02dq_spi_write_reg_8(indio_dev,
223 LIS3L02DQ_REG_CTRL_2_ADDR,
227 st->trigger_on = false;
228 /* Enable requested */
229 } else if (state && !currentlyset) {
230 /* if not set, enable requested */
231 /* first disable all events */
232 ret = lis3l02dq_disable_all_events(indio_dev);
237 LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
239 st->trigger_on = true;
240 ret = lis3l02dq_spi_write_reg_8(indio_dev,
241 LIS3L02DQ_REG_CTRL_2_ADDR,
253 * lis3l02dq_data_rdy_trigger_set_state() set datardy interrupt state
255 * If disabling the interrupt also does a final read to ensure it is clear.
256 * This is only important in some cases where the scan enable elements are
257 * switched before the ring is reenabled.
259 static int lis3l02dq_data_rdy_trigger_set_state(struct iio_trigger *trig,
262 struct iio_dev *indio_dev = trig->private_data;
266 __lis3l02dq_write_data_ready_config(&indio_dev->dev, state);
267 if (state == false) {
269 * A possible quirk with teh handler is currently worked around
270 * by ensuring outstanding read events are cleared.
272 ret = lis3l02dq_read_all(indio_dev, NULL);
274 lis3l02dq_spi_read_reg_8(indio_dev,
275 LIS3L02DQ_REG_WAKE_UP_SRC_ADDR,
281 * lis3l02dq_trig_try_reen() try renabling irq for data rdy trigger
282 * @trig: the datardy trigger
284 static int lis3l02dq_trig_try_reen(struct iio_trigger *trig)
286 struct iio_dev *indio_dev = trig->private_data;
287 struct lis3l02dq_state *st = iio_priv(indio_dev);
290 /* If gpio still high (or high again) */
291 /* In theory possible we will need to do this several times */
292 for (i = 0; i < 5; i++)
293 if (gpio_get_value(irq_to_gpio(st->us->irq)))
294 lis3l02dq_read_all(indio_dev, NULL);
299 "Failed to clear the interrupt for lis3l02dq\n");
301 /* irq reenabled so success! */
305 static const struct iio_trigger_ops lis3l02dq_trigger_ops = {
306 .owner = THIS_MODULE,
307 .set_trigger_state = &lis3l02dq_data_rdy_trigger_set_state,
308 .try_reenable = &lis3l02dq_trig_try_reen,
311 int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
314 struct lis3l02dq_state *st = iio_priv(indio_dev);
316 st->trig = iio_allocate_trigger("lis3l02dq-dev%d", indio_dev->id);
322 st->trig->dev.parent = &st->us->dev;
323 st->trig->ops = &lis3l02dq_trigger_ops;
324 st->trig->private_data = indio_dev;
325 ret = iio_trigger_register(st->trig);
327 goto error_free_trig;
332 iio_free_trigger(st->trig);
337 void lis3l02dq_remove_trigger(struct iio_dev *indio_dev)
339 struct lis3l02dq_state *st = iio_priv(indio_dev);
341 iio_trigger_unregister(st->trig);
342 iio_free_trigger(st->trig);
345 void lis3l02dq_unconfigure_ring(struct iio_dev *indio_dev)
347 iio_dealloc_pollfunc(indio_dev->pollfunc);
348 lis3l02dq_free_buf(indio_dev->ring);
351 static int lis3l02dq_ring_postenable(struct iio_dev *indio_dev)
353 /* Disable unwanted channels otherwise the interrupt will not clear */
356 bool oneenabled = false;
358 ret = lis3l02dq_spi_read_reg_8(indio_dev,
359 LIS3L02DQ_REG_CTRL_1_ADDR,
364 if (iio_scan_mask_query(indio_dev->ring, 0)) {
365 t |= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
368 t &= ~LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
369 if (iio_scan_mask_query(indio_dev->ring, 1)) {
370 t |= LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
373 t &= ~LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
374 if (iio_scan_mask_query(indio_dev->ring, 2)) {
375 t |= LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
378 t &= ~LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
380 if (!oneenabled) /* what happens in this case is unknown */
382 ret = lis3l02dq_spi_write_reg_8(indio_dev,
383 LIS3L02DQ_REG_CTRL_1_ADDR,
388 return iio_triggered_ring_postenable(indio_dev);
393 /* Turn all channels on again */
394 static int lis3l02dq_ring_predisable(struct iio_dev *indio_dev)
399 ret = iio_triggered_ring_predisable(indio_dev);
403 ret = lis3l02dq_spi_read_reg_8(indio_dev,
404 LIS3L02DQ_REG_CTRL_1_ADDR,
408 t |= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE |
409 LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE |
410 LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
412 ret = lis3l02dq_spi_write_reg_8(indio_dev,
413 LIS3L02DQ_REG_CTRL_1_ADDR,
420 static const struct iio_ring_setup_ops lis3l02dq_ring_setup_ops = {
421 .preenable = &iio_sw_ring_preenable,
422 .postenable = &lis3l02dq_ring_postenable,
423 .predisable = &lis3l02dq_ring_predisable,
426 int lis3l02dq_configure_ring(struct iio_dev *indio_dev)
429 struct iio_ring_buffer *ring;
431 ring = lis3l02dq_alloc_buf(indio_dev);
435 indio_dev->ring = ring;
436 /* Effectively select the ring buffer implementation */
437 indio_dev->ring->access = &lis3l02dq_access_funcs;
440 ring->scan_timestamp = true;
441 ring->setup_ops = &lis3l02dq_ring_setup_ops;
442 ring->owner = THIS_MODULE;
444 /* Set default scan mode */
445 iio_scan_mask_set(ring, 0);
446 iio_scan_mask_set(ring, 1);
447 iio_scan_mask_set(ring, 2);
449 /* Functions are NULL as we set handler below */
450 indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
451 &lis3l02dq_trigger_handler,
454 "lis3l02dq_consumer%d",
457 if (indio_dev->pollfunc == NULL) {
459 goto error_iio_sw_rb_free;
462 indio_dev->modes |= INDIO_RING_TRIGGERED;
465 error_iio_sw_rb_free:
466 lis3l02dq_free_buf(indio_dev->ring);