1 #include <linux/interrupt.h>
3 #include <linux/gpio.h>
4 #include <linux/workqueue.h>
5 #include <linux/mutex.h>
6 #include <linux/device.h>
7 #include <linux/kernel.h>
8 #include <linux/spi/spi.h>
9 #include <linux/sysfs.h>
10 #include <linux/list.h>
11 #include <linux/slab.h>
15 #include "../ring_sw.h"
17 #include "../trigger.h"
18 #include "lis3l02dq.h"
21 * combine_8_to_16() utility function to munge to u8s into u16
23 static inline u16
combine_8_to_16(u8 lower
, u8 upper
)
27 return _lower
| (_upper
<< 8);
31 * lis3l02dq_scan_el_set_state() set whether a scan contains a given channel
32 * @scan_el: associtate iio scan element attribute
33 * @indio_dev: the device structure
34 * @bool: desired state
36 * mlock already held when this is called.
38 static int lis3l02dq_scan_el_set_state(struct iio_scan_el
*scan_el
,
39 struct iio_dev
*indio_dev
,
45 ret
= lis3l02dq_spi_read_reg_8(&indio_dev
->dev
,
46 LIS3L02DQ_REG_CTRL_1_ADDR
,
50 switch (scan_el
->label
) {
51 case LIS3L02DQ_REG_OUT_X_L_ADDR
:
52 mask
= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE
;
54 case LIS3L02DQ_REG_OUT_Y_L_ADDR
:
55 mask
= LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE
;
57 case LIS3L02DQ_REG_OUT_Z_L_ADDR
:
58 mask
= LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE
;
65 if (!(mask
& t
) == state
) {
70 ret
= lis3l02dq_spi_write_reg_8(&indio_dev
->dev
,
71 LIS3L02DQ_REG_CTRL_1_ADDR
,
78 static IIO_SCAN_EL_C(accel_x
, LIS3L02DQ_SCAN_ACC_X
, IIO_SIGNED(16),
79 LIS3L02DQ_REG_OUT_X_L_ADDR
,
80 &lis3l02dq_scan_el_set_state
);
81 static IIO_SCAN_EL_C(accel_y
, LIS3L02DQ_SCAN_ACC_Y
, IIO_SIGNED(16),
82 LIS3L02DQ_REG_OUT_Y_L_ADDR
,
83 &lis3l02dq_scan_el_set_state
);
84 static IIO_SCAN_EL_C(accel_z
, LIS3L02DQ_SCAN_ACC_Z
, IIO_SIGNED(16),
85 LIS3L02DQ_REG_OUT_Z_L_ADDR
,
86 &lis3l02dq_scan_el_set_state
);
87 static IIO_SCAN_EL_TIMESTAMP
;
89 static struct attribute
*lis3l02dq_scan_el_attrs
[] = {
90 &iio_scan_el_accel_x
.dev_attr
.attr
,
91 &iio_scan_el_accel_y
.dev_attr
.attr
,
92 &iio_scan_el_accel_z
.dev_attr
.attr
,
93 &iio_scan_el_timestamp
.dev_attr
.attr
,
97 static struct attribute_group lis3l02dq_scan_el_group
= {
98 .attrs
= lis3l02dq_scan_el_attrs
,
99 .name
= "scan_elements",
103 * lis3l02dq_poll_func_th() top half interrupt handler called by trigger
104 * @private_data: iio_dev
106 static void lis3l02dq_poll_func_th(struct iio_dev
*indio_dev
)
108 struct lis3l02dq_state
*st
= iio_dev_get_devdata(indio_dev
);
109 st
->last_timestamp
= indio_dev
->trig
->timestamp
;
110 schedule_work(&st
->work_trigger_to_ring
);
111 /* Indicate that this interrupt is being handled */
113 /* Technically this is trigger related, but without this
114 * handler running there is currently now way for the interrupt
121 * lis3l02dq_data_rdy_trig_poll() the event handler for the data rdy trig
123 static int lis3l02dq_data_rdy_trig_poll(struct iio_dev
*dev_info
,
128 struct lis3l02dq_state
*st
= iio_dev_get_devdata(dev_info
);
129 struct iio_trigger
*trig
= st
->trig
;
131 trig
->timestamp
= timestamp
;
132 iio_trigger_poll(trig
);
137 /* This is an event as it is a response to a physical interrupt */
138 IIO_EVENT_SH(data_rdy_trig
, &lis3l02dq_data_rdy_trig_poll
);
141 * lis3l02dq_read_accel_from_ring() individual acceleration read from ring
143 ssize_t
lis3l02dq_read_accel_from_ring(struct device
*dev
,
144 struct device_attribute
*attr
,
147 struct iio_scan_el
*el
= NULL
;
148 int ret
, len
= 0, i
= 0;
149 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
150 struct iio_dev
*dev_info
= dev_get_drvdata(dev
);
153 while (dev_info
->scan_el_attrs
->attrs
[i
]) {
154 el
= to_iio_scan_el((struct device_attribute
*)
155 (dev_info
->scan_el_attrs
->attrs
[i
]));
156 /* label is in fact the address */
157 if (el
->label
== this_attr
->address
)
161 if (!dev_info
->scan_el_attrs
->attrs
[i
]) {
165 /* If this element is in the scan mask */
166 ret
= iio_scan_mask_query(dev_info
, el
->number
);
170 data
= kmalloc(dev_info
->ring
->access
.get_bpd(dev_info
->ring
),
174 ret
= dev_info
->ring
->access
.read_last(dev_info
->ring
,
177 goto error_free_data
;
182 len
= iio_scan_mask_count_to_right(dev_info
, el
->number
);
185 goto error_free_data
;
187 len
= sprintf(buf
, "ring %d\n", data
[len
]);
191 return ret
? ret
: len
;
195 static const u8 read_all_tx_array
[] =
197 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_L_ADDR
), 0,
198 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_H_ADDR
), 0,
199 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_L_ADDR
), 0,
200 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_H_ADDR
), 0,
201 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_L_ADDR
), 0,
202 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_H_ADDR
), 0,
206 * lis3l02dq_read_all() Reads all channels currently selected
207 * @st: device specific state
208 * @rx_array: (dma capable) recieve array, must be at least
209 * 4*number of channels
211 int lis3l02dq_read_all(struct lis3l02dq_state
*st
, u8
*rx_array
)
213 struct spi_transfer
*xfers
;
214 struct spi_message msg
;
217 xfers
= kzalloc((st
->indio_dev
->scan_count
) * 2
218 * sizeof(*xfers
), GFP_KERNEL
);
222 mutex_lock(&st
->buf_lock
);
224 for (i
= 0; i
< ARRAY_SIZE(read_all_tx_array
)/4; i
++) {
225 if (st
->indio_dev
->scan_mask
& (1 << i
)) {
227 xfers
[j
].tx_buf
= st
->tx
+ 2*j
;
228 st
->tx
[2*j
] = read_all_tx_array
[i
*4];
231 xfers
[j
].rx_buf
= rx_array
+ j
*2;
232 xfers
[j
].bits_per_word
= 8;
234 xfers
[j
].cs_change
= 1;
238 xfers
[j
].tx_buf
= st
->tx
+ 2*j
;
239 st
->tx
[2*j
] = read_all_tx_array
[i
*4 + 2];
242 xfers
[j
].rx_buf
= rx_array
+ j
*2;
243 xfers
[j
].bits_per_word
= 8;
245 xfers
[j
].cs_change
= 1;
249 /* After these are transmitted, the rx_buff should have
250 * values in alternate bytes
252 spi_message_init(&msg
);
253 for (j
= 0; j
< st
->indio_dev
->scan_count
* 2; j
++)
254 spi_message_add_tail(&xfers
[j
], &msg
);
256 ret
= spi_sync(st
->us
, &msg
);
257 mutex_unlock(&st
->buf_lock
);
264 /* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device
265 * specific to be rolled into the core.
267 static void lis3l02dq_trigger_bh_to_ring(struct work_struct
*work_s
)
269 struct lis3l02dq_state
*st
270 = container_of(work_s
, struct lis3l02dq_state
,
271 work_trigger_to_ring
);
276 size_t datasize
= st
->indio_dev
277 ->ring
->access
.get_bpd(st
->indio_dev
->ring
);
279 data
= kmalloc(datasize
, GFP_KERNEL
);
281 dev_err(&st
->us
->dev
, "memory alloc failed in ring bh");
284 /* Due to interleaved nature of transmission this buffer must be
285 * twice the number of bytes, or 4 times the number of channels
287 rx_array
= kmalloc(4 * (st
->indio_dev
->scan_count
), GFP_KERNEL
);
288 if (rx_array
== NULL
) {
289 dev_err(&st
->us
->dev
, "memory alloc failed in ring bh");
294 /* whilst trigger specific, if this read does nto occur the data
295 ready interrupt will not be cleared. Need to add a mechanism
296 to provide a dummy read function if this is not triggering on
297 the data ready function but something else is.
301 if (st
->indio_dev
->scan_count
)
302 if (lis3l02dq_read_all(st
, rx_array
) >= 0)
303 for (; i
< st
->indio_dev
->scan_count
; i
++)
304 data
[i
] = combine_8_to_16(rx_array
[i
*4+1],
306 /* Guaranteed to be aligned with 8 byte boundary */
307 if (st
->indio_dev
->scan_timestamp
)
308 *((s64
*)(data
+ ((i
+ 3)/4)*4)) = st
->last_timestamp
;
310 st
->indio_dev
->ring
->access
.store_to(st
->indio_dev
->ring
,
314 iio_trigger_notify_done(st
->indio_dev
->trig
);
320 /* in these circumstances is it better to go with unaligned packing and
321 * deal with the cost?*/
322 static int lis3l02dq_data_rdy_ring_preenable(struct iio_dev
*indio_dev
)
325 /* Check if there are any scan elements enabled, if not fail*/
326 if (!(indio_dev
->scan_count
|| indio_dev
->scan_timestamp
))
329 if (indio_dev
->ring
->access
.set_bpd
) {
330 if (indio_dev
->scan_timestamp
)
331 if (indio_dev
->scan_count
) /* Timestamp and data */
332 size
= 2*sizeof(s64
);
333 else /* Timestamp only */
336 size
= indio_dev
->scan_count
*sizeof(s16
);
337 indio_dev
->ring
->access
.set_bpd(indio_dev
->ring
, size
);
343 static int lis3l02dq_data_rdy_ring_postenable(struct iio_dev
*indio_dev
)
345 return indio_dev
->trig
346 ? iio_trigger_attach_poll_func(indio_dev
->trig
,
351 static int lis3l02dq_data_rdy_ring_predisable(struct iio_dev
*indio_dev
)
353 return indio_dev
->trig
354 ? iio_trigger_dettach_poll_func(indio_dev
->trig
,
360 /* Caller responsible for locking as necessary. */
361 static int __lis3l02dq_write_data_ready_config(struct device
*dev
,
363 iio_event_handler_list
*list
,
369 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
371 /* Get the current event mask register */
372 ret
= lis3l02dq_spi_read_reg_8(dev
,
373 LIS3L02DQ_REG_CTRL_2_ADDR
,
377 /* Find out if data ready is already on */
379 = valold
& LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION
;
381 /* Disable requested */
382 if (!state
&& currentlyset
) {
384 valold
&= ~LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION
;
385 /* The double write is to overcome a hardware bug?*/
386 ret
= lis3l02dq_spi_write_reg_8(dev
,
387 LIS3L02DQ_REG_CTRL_2_ADDR
,
391 ret
= lis3l02dq_spi_write_reg_8(dev
,
392 LIS3L02DQ_REG_CTRL_2_ADDR
,
397 iio_remove_event_from_list(list
,
398 &indio_dev
->interrupts
[0]
401 /* Enable requested */
402 } else if (state
&& !currentlyset
) {
403 /* if not set, enable requested */
404 valold
|= LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION
;
405 iio_add_event_to_list(list
, &indio_dev
->interrupts
[0]->ev_list
);
406 ret
= lis3l02dq_spi_write_reg_8(dev
,
407 LIS3L02DQ_REG_CTRL_2_ADDR
,
419 * lis3l02dq_data_rdy_trigger_set_state() set datardy interrupt state
421 * If disabling the interrupt also does a final read to ensure it is clear.
422 * This is only important in some cases where the scan enable elements are
423 * switched before the ring is reenabled.
425 static int lis3l02dq_data_rdy_trigger_set_state(struct iio_trigger
*trig
,
428 struct lis3l02dq_state
*st
= trig
->private_data
;
431 __lis3l02dq_write_data_ready_config(&st
->indio_dev
->dev
,
432 &iio_event_data_rdy_trig
,
434 if (state
== false) {
435 /* possible quirk with handler currently worked around
436 by ensuring the work queue is empty */
437 flush_scheduled_work();
438 /* Clear any outstanding ready events */
439 ret
= lis3l02dq_read_all(st
, NULL
);
441 lis3l02dq_spi_read_reg_8(&st
->indio_dev
->dev
,
442 LIS3L02DQ_REG_WAKE_UP_SRC_ADDR
,
446 static DEVICE_ATTR(name
, S_IRUGO
, iio_trigger_read_name
, NULL
);
448 static struct attribute
*lis3l02dq_trigger_attrs
[] = {
453 static const struct attribute_group lis3l02dq_trigger_attr_group
= {
454 .attrs
= lis3l02dq_trigger_attrs
,
458 * lis3l02dq_trig_try_reen() try renabling irq for data rdy trigger
459 * @trig: the datardy trigger
461 * As the trigger may occur on any data element being updated it is
462 * really rather likely to occur during the read from the previous
463 * trigger event. The only way to discover if this has occured on
464 * boards not supporting level interrupts is to take a look at the line.
465 * If it is indicating another interrupt and we don't seem to have a
466 * handler looking at it, then we need to notify the core that we need
467 * to tell the triggering core to try reading all these again.
469 static int lis3l02dq_trig_try_reen(struct iio_trigger
*trig
)
471 struct lis3l02dq_state
*st
= trig
->private_data
;
472 enable_irq(st
->us
->irq
);
473 /* If gpio still high (or high again) */
474 if (gpio_get_value(irq_to_gpio(st
->us
->irq
)))
475 if (st
->inter
== 0) {
476 /* already interrupt handler dealing with it */
477 disable_irq_nosync(st
->us
->irq
);
478 if (st
->inter
== 1) {
479 /* interrupt handler snuck in between test
481 enable_irq(st
->us
->irq
);
486 /* irq reenabled so success! */
490 int lis3l02dq_probe_trigger(struct iio_dev
*indio_dev
)
493 struct lis3l02dq_state
*state
= indio_dev
->dev_data
;
495 state
->trig
= iio_allocate_trigger();
496 state
->trig
->name
= kmalloc(IIO_TRIGGER_NAME_LENGTH
, GFP_KERNEL
);
497 if (!state
->trig
->name
) {
499 goto error_free_trig
;
501 snprintf((char *)state
->trig
->name
,
502 IIO_TRIGGER_NAME_LENGTH
,
503 "lis3l02dq-dev%d", indio_dev
->id
);
504 state
->trig
->dev
.parent
= &state
->us
->dev
;
505 state
->trig
->owner
= THIS_MODULE
;
506 state
->trig
->private_data
= state
;
507 state
->trig
->set_trigger_state
= &lis3l02dq_data_rdy_trigger_set_state
;
508 state
->trig
->try_reenable
= &lis3l02dq_trig_try_reen
;
509 state
->trig
->control_attrs
= &lis3l02dq_trigger_attr_group
;
510 ret
= iio_trigger_register(state
->trig
);
512 goto error_free_trig_name
;
516 error_free_trig_name
:
517 kfree(state
->trig
->name
);
519 iio_free_trigger(state
->trig
);
524 void lis3l02dq_remove_trigger(struct iio_dev
*indio_dev
)
526 struct lis3l02dq_state
*state
= indio_dev
->dev_data
;
528 iio_trigger_unregister(state
->trig
);
529 kfree(state
->trig
->name
);
530 iio_free_trigger(state
->trig
);
533 void lis3l02dq_unconfigure_ring(struct iio_dev
*indio_dev
)
535 kfree(indio_dev
->pollfunc
);
536 iio_sw_rb_free(indio_dev
->ring
);
539 int lis3l02dq_configure_ring(struct iio_dev
*indio_dev
)
542 struct lis3l02dq_state
*st
= indio_dev
->dev_data
;
543 struct iio_ring_buffer
*ring
;
544 INIT_WORK(&st
->work_trigger_to_ring
, lis3l02dq_trigger_bh_to_ring
);
545 /* Set default scan mode */
547 iio_scan_mask_set(indio_dev
, iio_scan_el_accel_x
.number
);
548 iio_scan_mask_set(indio_dev
, iio_scan_el_accel_y
.number
);
549 iio_scan_mask_set(indio_dev
, iio_scan_el_accel_z
.number
);
550 indio_dev
->scan_timestamp
= true;
552 indio_dev
->scan_el_attrs
= &lis3l02dq_scan_el_group
;
554 ring
= iio_sw_rb_allocate(indio_dev
);
559 indio_dev
->ring
= ring
;
560 /* Effectively select the ring buffer implementation */
561 iio_ring_sw_register_funcs(&ring
->access
);
562 ring
->preenable
= &lis3l02dq_data_rdy_ring_preenable
;
563 ring
->postenable
= &lis3l02dq_data_rdy_ring_postenable
;
564 ring
->predisable
= &lis3l02dq_data_rdy_ring_predisable
;
565 ring
->owner
= THIS_MODULE
;
567 indio_dev
->pollfunc
= kzalloc(sizeof(*indio_dev
->pollfunc
), GFP_KERNEL
);
568 if (indio_dev
->pollfunc
== NULL
) {
570 goto error_iio_sw_rb_free
;;
572 indio_dev
->pollfunc
->poll_func_main
= &lis3l02dq_poll_func_th
;
573 indio_dev
->pollfunc
->private_data
= indio_dev
;
574 indio_dev
->modes
|= INDIO_RING_TRIGGERED
;
577 error_iio_sw_rb_free
:
578 iio_sw_rb_free(indio_dev
->ring
);
582 int lis3l02dq_initialize_ring(struct iio_ring_buffer
*ring
)
584 return iio_ring_buffer_register(ring
);
587 void lis3l02dq_uninitialize_ring(struct iio_ring_buffer
*ring
)
589 iio_ring_buffer_unregister(ring
);
593 int lis3l02dq_set_ring_length(struct iio_dev
*indio_dev
, int length
)
595 /* Set sensible defaults for the ring buffer */
596 if (indio_dev
->ring
->access
.set_length
)
597 return indio_dev
->ring
->access
.set_length(indio_dev
->ring
, 500);