Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[cascardo/linux.git] / drivers / staging / iio / accel / lis3l02dq_ring.c
1 #include <linux/interrupt.h>
2 #include <linux/gpio.h>
3 #include <linux/mutex.h>
4 #include <linux/kernel.h>
5 #include <linux/spi/spi.h>
6 #include <linux/slab.h>
7 #include <linux/export.h>
8
9 #include "../iio.h"
10 #include "../ring_sw.h"
11 #include "../kfifo_buf.h"
12 #include "../trigger.h"
13 #include "../trigger_consumer.h"
14 #include "lis3l02dq.h"
15
16 /**
17  * combine_8_to_16() utility function to munge to u8s into u16
18  **/
19 static inline u16 combine_8_to_16(u8 lower, u8 upper)
20 {
21         u16 _lower = lower;
22         u16 _upper = upper;
23         return _lower | (_upper << 8);
24 }
25
26 /**
27  * lis3l02dq_data_rdy_trig_poll() the event handler for the data rdy trig
28  **/
29 irqreturn_t lis3l02dq_data_rdy_trig_poll(int irq, void *private)
30 {
31         struct iio_dev *indio_dev = private;
32         struct lis3l02dq_state *st = iio_priv(indio_dev);
33
34         if (st->trigger_on) {
35                 iio_trigger_poll(st->trig, iio_get_time_ns());
36                 return IRQ_HANDLED;
37         } else
38                 return IRQ_WAKE_THREAD;
39 }
40
41 static const u8 read_all_tx_array[] = {
42         LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_L_ADDR), 0,
43         LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_H_ADDR), 0,
44         LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_L_ADDR), 0,
45         LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_H_ADDR), 0,
46         LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_L_ADDR), 0,
47         LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_H_ADDR), 0,
48 };
49
50 /**
51  * lis3l02dq_read_all() Reads all channels currently selected
52  * @st:         device specific state
53  * @rx_array:   (dma capable) receive array, must be at least
54  *              4*number of channels
55  **/
56 static int lis3l02dq_read_all(struct iio_dev *indio_dev, u8 *rx_array)
57 {
58         struct lis3l02dq_state *st = iio_priv(indio_dev);
59         struct spi_transfer *xfers;
60         struct spi_message msg;
61         int ret, i, j = 0;
62
63         xfers = kcalloc(bitmap_weight(indio_dev->active_scan_mask,
64                                       indio_dev->masklength) * 2,
65                         sizeof(*xfers), GFP_KERNEL);
66         if (!xfers)
67                 return -ENOMEM;
68
69         mutex_lock(&st->buf_lock);
70
71         for (i = 0; i < ARRAY_SIZE(read_all_tx_array)/4; i++)
72                 if (test_bit(i, indio_dev->active_scan_mask)) {
73                         /* lower byte */
74                         xfers[j].tx_buf = st->tx + 2*j;
75                         st->tx[2*j] = read_all_tx_array[i*4];
76                         st->tx[2*j + 1] = 0;
77                         if (rx_array)
78                                 xfers[j].rx_buf = rx_array + j*2;
79                         xfers[j].bits_per_word = 8;
80                         xfers[j].len = 2;
81                         xfers[j].cs_change = 1;
82                         j++;
83
84                         /* upper byte */
85                         xfers[j].tx_buf = st->tx + 2*j;
86                         st->tx[2*j] = read_all_tx_array[i*4 + 2];
87                         st->tx[2*j + 1] = 0;
88                         if (rx_array)
89                                 xfers[j].rx_buf = rx_array + j*2;
90                         xfers[j].bits_per_word = 8;
91                         xfers[j].len = 2;
92                         xfers[j].cs_change = 1;
93                         j++;
94                 }
95
96         /* After these are transmitted, the rx_buff should have
97          * values in alternate bytes
98          */
99         spi_message_init(&msg);
100         for (j = 0; j < bitmap_weight(indio_dev->active_scan_mask,
101                                       indio_dev->masklength) * 2; j++)
102                 spi_message_add_tail(&xfers[j], &msg);
103
104         ret = spi_sync(st->us, &msg);
105         mutex_unlock(&st->buf_lock);
106         kfree(xfers);
107
108         return ret;
109 }
110
111 static int lis3l02dq_get_buffer_element(struct iio_dev *indio_dev,
112                                 u8 *buf)
113 {
114         int ret, i;
115         u8 *rx_array ;
116         s16 *data = (s16 *)buf;
117         int scan_count = bitmap_weight(indio_dev->active_scan_mask,
118                                        indio_dev->masklength);
119
120         rx_array = kzalloc(4 * scan_count, GFP_KERNEL);
121         if (rx_array == NULL)
122                 return -ENOMEM;
123         ret = lis3l02dq_read_all(indio_dev, rx_array);
124         if (ret < 0)
125                 return ret;
126         for (i = 0; i < scan_count; i++)
127                 data[i] = combine_8_to_16(rx_array[i*4+1],
128                                         rx_array[i*4+3]);
129         kfree(rx_array);
130
131         return i*sizeof(data[0]);
132 }
133
134 static irqreturn_t lis3l02dq_trigger_handler(int irq, void *p)
135 {
136         struct iio_poll_func *pf = p;
137         struct iio_dev *indio_dev = pf->indio_dev;
138         struct iio_buffer *buffer = indio_dev->buffer;
139         int len = 0;
140         size_t datasize = buffer->access->get_bytes_per_datum(buffer);
141         char *data = kmalloc(datasize, GFP_KERNEL);
142
143         if (data == NULL) {
144                 dev_err(indio_dev->dev.parent,
145                         "memory alloc failed in buffer bh");
146                 return -ENOMEM;
147         }
148
149         if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
150                 len = lis3l02dq_get_buffer_element(indio_dev, data);
151
152           /* Guaranteed to be aligned with 8 byte boundary */
153         if (buffer->scan_timestamp)
154                 *(s64 *)(((phys_addr_t)data + len
155                                 + sizeof(s64) - 1) & ~(sizeof(s64) - 1))
156                         = pf->timestamp;
157         buffer->access->store_to(buffer, (u8 *)data, pf->timestamp);
158
159         iio_trigger_notify_done(indio_dev->trig);
160         kfree(data);
161         return IRQ_HANDLED;
162 }
163
164 /* Caller responsible for locking as necessary. */
165 static int
166 __lis3l02dq_write_data_ready_config(struct device *dev, bool state)
167 {
168         int ret;
169         u8 valold;
170         bool currentlyset;
171         struct iio_dev *indio_dev = dev_get_drvdata(dev);
172         struct lis3l02dq_state *st = iio_priv(indio_dev);
173
174 /* Get the current event mask register */
175         ret = lis3l02dq_spi_read_reg_8(indio_dev,
176                                        LIS3L02DQ_REG_CTRL_2_ADDR,
177                                        &valold);
178         if (ret)
179                 goto error_ret;
180 /* Find out if data ready is already on */
181         currentlyset
182                 = valold & LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
183
184 /* Disable requested */
185         if (!state && currentlyset) {
186                 /* disable the data ready signal */
187                 valold &= ~LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
188
189                 /* The double write is to overcome a hardware bug?*/
190                 ret = lis3l02dq_spi_write_reg_8(indio_dev,
191                                                 LIS3L02DQ_REG_CTRL_2_ADDR,
192                                                 valold);
193                 if (ret)
194                         goto error_ret;
195                 ret = lis3l02dq_spi_write_reg_8(indio_dev,
196                                                 LIS3L02DQ_REG_CTRL_2_ADDR,
197                                                 valold);
198                 if (ret)
199                         goto error_ret;
200                 st->trigger_on = false;
201 /* Enable requested */
202         } else if (state && !currentlyset) {
203                 /* if not set, enable requested */
204                 /* first disable all events */
205                 ret = lis3l02dq_disable_all_events(indio_dev);
206                 if (ret < 0)
207                         goto error_ret;
208
209                 valold = ret |
210                         LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
211
212                 st->trigger_on = true;
213                 ret = lis3l02dq_spi_write_reg_8(indio_dev,
214                                                 LIS3L02DQ_REG_CTRL_2_ADDR,
215                                                 valold);
216                 if (ret)
217                         goto error_ret;
218         }
219
220         return 0;
221 error_ret:
222         return ret;
223 }
224
225 /**
226  * lis3l02dq_data_rdy_trigger_set_state() set datardy interrupt state
227  *
228  * If disabling the interrupt also does a final read to ensure it is clear.
229  * This is only important in some cases where the scan enable elements are
230  * switched before the buffer is reenabled.
231  **/
232 static int lis3l02dq_data_rdy_trigger_set_state(struct iio_trigger *trig,
233                                                 bool state)
234 {
235         struct iio_dev *indio_dev = trig->private_data;
236         int ret = 0;
237         u8 t;
238
239         __lis3l02dq_write_data_ready_config(&indio_dev->dev, state);
240         if (state == false) {
241                 /*
242                  * A possible quirk with teh handler is currently worked around
243                  *  by ensuring outstanding read events are cleared.
244                  */
245                 ret = lis3l02dq_read_all(indio_dev, NULL);
246         }
247         lis3l02dq_spi_read_reg_8(indio_dev,
248                                  LIS3L02DQ_REG_WAKE_UP_SRC_ADDR,
249                                  &t);
250         return ret;
251 }
252
253 /**
254  * lis3l02dq_trig_try_reen() try renabling irq for data rdy trigger
255  * @trig:       the datardy trigger
256  */
257 static int lis3l02dq_trig_try_reen(struct iio_trigger *trig)
258 {
259         struct iio_dev *indio_dev = trig->private_data;
260         struct lis3l02dq_state *st = iio_priv(indio_dev);
261         int i;
262
263         /* If gpio still high (or high again) */
264         /* In theory possible we will need to do this several times */
265         for (i = 0; i < 5; i++)
266                 if (gpio_get_value(irq_to_gpio(st->us->irq)))
267                         lis3l02dq_read_all(indio_dev, NULL);
268                 else
269                         break;
270         if (i == 5)
271                 printk(KERN_INFO
272                        "Failed to clear the interrupt for lis3l02dq\n");
273
274         /* irq reenabled so success! */
275         return 0;
276 }
277
278 static const struct iio_trigger_ops lis3l02dq_trigger_ops = {
279         .owner = THIS_MODULE,
280         .set_trigger_state = &lis3l02dq_data_rdy_trigger_set_state,
281         .try_reenable = &lis3l02dq_trig_try_reen,
282 };
283
284 int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
285 {
286         int ret;
287         struct lis3l02dq_state *st = iio_priv(indio_dev);
288
289         st->trig = iio_allocate_trigger("lis3l02dq-dev%d", indio_dev->id);
290         if (!st->trig) {
291                 ret = -ENOMEM;
292                 goto error_ret;
293         }
294
295         st->trig->dev.parent = &st->us->dev;
296         st->trig->ops = &lis3l02dq_trigger_ops;
297         st->trig->private_data = indio_dev;
298         ret = iio_trigger_register(st->trig);
299         if (ret)
300                 goto error_free_trig;
301
302         return 0;
303
304 error_free_trig:
305         iio_free_trigger(st->trig);
306 error_ret:
307         return ret;
308 }
309
310 void lis3l02dq_remove_trigger(struct iio_dev *indio_dev)
311 {
312         struct lis3l02dq_state *st = iio_priv(indio_dev);
313
314         iio_trigger_unregister(st->trig);
315         iio_free_trigger(st->trig);
316 }
317
318 void lis3l02dq_unconfigure_buffer(struct iio_dev *indio_dev)
319 {
320         iio_dealloc_pollfunc(indio_dev->pollfunc);
321         lis3l02dq_free_buf(indio_dev->buffer);
322 }
323
324 static int lis3l02dq_buffer_postenable(struct iio_dev *indio_dev)
325 {
326         /* Disable unwanted channels otherwise the interrupt will not clear */
327         u8 t;
328         int ret;
329         bool oneenabled = false;
330
331         ret = lis3l02dq_spi_read_reg_8(indio_dev,
332                                        LIS3L02DQ_REG_CTRL_1_ADDR,
333                                        &t);
334         if (ret)
335                 goto error_ret;
336
337         if (test_bit(0, indio_dev->active_scan_mask)) {
338                 t |= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
339                 oneenabled = true;
340         } else
341                 t &= ~LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
342         if (test_bit(1, indio_dev->active_scan_mask)) {
343                 t |= LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
344                 oneenabled = true;
345         } else
346                 t &= ~LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
347         if (test_bit(2, indio_dev->active_scan_mask)) {
348                 t |= LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
349                 oneenabled = true;
350         } else
351                 t &= ~LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
352
353         if (!oneenabled) /* what happens in this case is unknown */
354                 return -EINVAL;
355         ret = lis3l02dq_spi_write_reg_8(indio_dev,
356                                         LIS3L02DQ_REG_CTRL_1_ADDR,
357                                         t);
358         if (ret)
359                 goto error_ret;
360
361         return iio_triggered_buffer_postenable(indio_dev);
362 error_ret:
363         return ret;
364 }
365
366 /* Turn all channels on again */
367 static int lis3l02dq_buffer_predisable(struct iio_dev *indio_dev)
368 {
369         u8 t;
370         int ret;
371
372         ret = iio_triggered_buffer_predisable(indio_dev);
373         if (ret)
374                 goto error_ret;
375
376         ret = lis3l02dq_spi_read_reg_8(indio_dev,
377                                        LIS3L02DQ_REG_CTRL_1_ADDR,
378                                        &t);
379         if (ret)
380                 goto error_ret;
381         t |= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE |
382                 LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE |
383                 LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
384
385         ret = lis3l02dq_spi_write_reg_8(indio_dev,
386                                         LIS3L02DQ_REG_CTRL_1_ADDR,
387                                         t);
388
389 error_ret:
390         return ret;
391 }
392
393 static const struct iio_buffer_setup_ops lis3l02dq_buffer_setup_ops = {
394         .preenable = &iio_sw_buffer_preenable,
395         .postenable = &lis3l02dq_buffer_postenable,
396         .predisable = &lis3l02dq_buffer_predisable,
397 };
398
399 int lis3l02dq_configure_buffer(struct iio_dev *indio_dev)
400 {
401         int ret;
402         struct iio_buffer *buffer;
403
404         buffer = lis3l02dq_alloc_buf(indio_dev);
405         if (!buffer)
406                 return -ENOMEM;
407
408         indio_dev->buffer = buffer;
409         /* Effectively select the buffer implementation */
410         indio_dev->buffer->access = &lis3l02dq_access_funcs;
411
412         buffer->scan_timestamp = true;
413         indio_dev->setup_ops = &lis3l02dq_buffer_setup_ops;
414
415         /* Functions are NULL as we set handler below */
416         indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
417                                                  &lis3l02dq_trigger_handler,
418                                                  0,
419                                                  indio_dev,
420                                                  "lis3l02dq_consumer%d",
421                                                  indio_dev->id);
422
423         if (indio_dev->pollfunc == NULL) {
424                 ret = -ENOMEM;
425                 goto error_iio_sw_rb_free;
426         }
427
428         indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
429         return 0;
430
431 error_iio_sw_rb_free:
432         lis3l02dq_free_buf(indio_dev->buffer);
433         return ret;
434 }