1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Based on elements of hwmon and input subsystems.
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/idr.h>
15 #include <linux/kdev_t.h>
16 #include <linux/err.h>
17 #include <linux/device.h>
19 #include <linux/poll.h>
20 #include <linux/sched.h>
21 #include <linux/wait.h>
22 #include <linux/cdev.h>
23 #include <linux/slab.h>
24 #include <linux/anon_inodes.h>
27 #include "iio_core_trigger.h"
31 /* IDA to assign each registered device a unique id*/
32 static DEFINE_IDA(iio_ida
);
34 static dev_t iio_devt
;
36 #define IIO_DEV_MAX 256
37 struct bus_type iio_bus_type
= {
40 EXPORT_SYMBOL(iio_bus_type
);
42 static const char * const iio_data_type_name
[] = {
44 [IIO_PROCESSED
] = "input",
47 static const char * const iio_direction
[] = {
52 static const char * const iio_chan_type_name_spec
[] = {
53 [IIO_VOLTAGE
] = "voltage",
54 [IIO_CURRENT
] = "current",
55 [IIO_POWER
] = "power",
56 [IIO_ACCEL
] = "accel",
57 [IIO_ANGL_VEL
] = "anglvel",
59 [IIO_LIGHT
] = "illuminance",
60 [IIO_INTENSITY
] = "intensity",
61 [IIO_PROXIMITY
] = "proximity",
63 [IIO_INCLI
] = "incli",
66 [IIO_TIMESTAMP
] = "timestamp",
67 [IIO_CAPACITANCE
] = "capacitance",
70 static const char * const iio_modifier_names
[] = {
74 [IIO_MOD_LIGHT_BOTH
] = "both",
75 [IIO_MOD_LIGHT_IR
] = "ir",
78 /* relies on pairs of these shared then separate */
79 static const char * const iio_chan_info_postfix
[] = {
80 [IIO_CHAN_INFO_SCALE
] = "scale",
81 [IIO_CHAN_INFO_OFFSET
] = "offset",
82 [IIO_CHAN_INFO_CALIBSCALE
] = "calibscale",
83 [IIO_CHAN_INFO_CALIBBIAS
] = "calibbias",
84 [IIO_CHAN_INFO_PEAK
] = "peak_raw",
85 [IIO_CHAN_INFO_PEAK_SCALE
] = "peak_scale",
86 [IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW
] = "quadrature_correction_raw",
87 [IIO_CHAN_INFO_AVERAGE_RAW
] = "mean_raw",
88 [IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY
]
89 = "filter_low_pass_3db_frequency",
92 const struct iio_chan_spec
93 *iio_find_channel_from_si(struct iio_dev
*indio_dev
, int si
)
97 for (i
= 0; i
< indio_dev
->num_channels
; i
++)
98 if (indio_dev
->channels
[i
].scan_index
== si
)
99 return &indio_dev
->channels
[i
];
104 * struct iio_detected_event_list - list element for events that have occurred
105 * @list: linked list header
106 * @ev: the event itself
108 struct iio_detected_event_list
{
109 struct list_head list
;
110 struct iio_event_data ev
;
114 * struct iio_event_interface - chrdev interface for an event line
115 * @dev: device assocated with event interface
116 * @wait: wait queue to allow blocking reads of events
117 * @event_list_lock: mutex to protect the list of detected events
118 * @det_events: list of detected events
119 * @max_events: maximum number of events before new ones are dropped
120 * @current_events: number of events in detected list
121 * @flags: file operations related flags including busy flag.
123 struct iio_event_interface
{
124 wait_queue_head_t wait
;
125 struct mutex event_list_lock
;
126 struct list_head det_events
;
129 struct list_head dev_attr_list
;
131 struct attribute_group group
;
134 int iio_push_event(struct iio_dev
*indio_dev
, u64 ev_code
, s64 timestamp
)
136 struct iio_event_interface
*ev_int
= indio_dev
->event_interface
;
137 struct iio_detected_event_list
*ev
;
140 /* Does anyone care? */
141 mutex_lock(&ev_int
->event_list_lock
);
142 if (test_bit(IIO_BUSY_BIT_POS
, &ev_int
->flags
)) {
143 if (ev_int
->current_events
== ev_int
->max_events
) {
144 mutex_unlock(&ev_int
->event_list_lock
);
147 ev
= kmalloc(sizeof(*ev
), GFP_KERNEL
);
150 mutex_unlock(&ev_int
->event_list_lock
);
154 ev
->ev
.timestamp
= timestamp
;
156 list_add_tail(&ev
->list
, &ev_int
->det_events
);
157 ev_int
->current_events
++;
158 mutex_unlock(&ev_int
->event_list_lock
);
159 wake_up_interruptible(&ev_int
->wait
);
161 mutex_unlock(&ev_int
->event_list_lock
);
166 EXPORT_SYMBOL(iio_push_event
);
168 /* This turns up an awful lot */
169 ssize_t
iio_read_const_attr(struct device
*dev
,
170 struct device_attribute
*attr
,
173 return sprintf(buf
, "%s\n", to_iio_const_attr(attr
)->string
);
175 EXPORT_SYMBOL(iio_read_const_attr
);
177 static ssize_t
iio_event_chrdev_read(struct file
*filep
,
182 struct iio_event_interface
*ev_int
= filep
->private_data
;
183 struct iio_detected_event_list
*el
;
184 size_t len
= sizeof(el
->ev
);
190 mutex_lock(&ev_int
->event_list_lock
);
191 if (list_empty(&ev_int
->det_events
)) {
192 if (filep
->f_flags
& O_NONBLOCK
) {
194 goto error_mutex_unlock
;
196 mutex_unlock(&ev_int
->event_list_lock
);
197 /* Blocking on device; waiting for something to be there */
198 ret
= wait_event_interruptible(ev_int
->wait
,
203 /* Single access device so no one else can get the data */
204 mutex_lock(&ev_int
->event_list_lock
);
207 el
= list_first_entry(&ev_int
->det_events
,
208 struct iio_detected_event_list
,
210 if (copy_to_user(buf
, &(el
->ev
), len
)) {
212 goto error_mutex_unlock
;
215 ev_int
->current_events
--;
216 mutex_unlock(&ev_int
->event_list_lock
);
222 mutex_unlock(&ev_int
->event_list_lock
);
228 static int iio_event_chrdev_release(struct inode
*inode
, struct file
*filep
)
230 struct iio_event_interface
*ev_int
= filep
->private_data
;
231 struct iio_detected_event_list
*el
, *t
;
233 mutex_lock(&ev_int
->event_list_lock
);
234 clear_bit(IIO_BUSY_BIT_POS
, &ev_int
->flags
);
236 * In order to maintain a clean state for reopening,
237 * clear out any awaiting events. The mask will prevent
238 * any new __iio_push_event calls running.
240 list_for_each_entry_safe(el
, t
, &ev_int
->det_events
, list
) {
244 ev_int
->current_events
= 0;
245 mutex_unlock(&ev_int
->event_list_lock
);
250 static const struct file_operations iio_event_chrdev_fileops
= {
251 .read
= iio_event_chrdev_read
,
252 .release
= iio_event_chrdev_release
,
253 .owner
= THIS_MODULE
,
254 .llseek
= noop_llseek
,
257 static int iio_event_getfd(struct iio_dev
*indio_dev
)
259 struct iio_event_interface
*ev_int
= indio_dev
->event_interface
;
265 mutex_lock(&ev_int
->event_list_lock
);
266 if (test_and_set_bit(IIO_BUSY_BIT_POS
, &ev_int
->flags
)) {
267 mutex_unlock(&ev_int
->event_list_lock
);
270 mutex_unlock(&ev_int
->event_list_lock
);
271 fd
= anon_inode_getfd("iio:event",
272 &iio_event_chrdev_fileops
, ev_int
, O_RDONLY
);
274 mutex_lock(&ev_int
->event_list_lock
);
275 clear_bit(IIO_BUSY_BIT_POS
, &ev_int
->flags
);
276 mutex_unlock(&ev_int
->event_list_lock
);
281 static int __init
iio_init(void)
285 /* Register sysfs bus */
286 ret
= bus_register(&iio_bus_type
);
289 "%s could not register bus type\n",
294 ret
= alloc_chrdev_region(&iio_devt
, 0, IIO_DEV_MAX
, "iio");
296 printk(KERN_ERR
"%s: failed to allocate char dev region\n",
298 goto error_unregister_bus_type
;
303 error_unregister_bus_type
:
304 bus_unregister(&iio_bus_type
);
309 static void __exit
iio_exit(void)
312 unregister_chrdev_region(iio_devt
, IIO_DEV_MAX
);
313 bus_unregister(&iio_bus_type
);
316 static ssize_t
iio_read_channel_info(struct device
*dev
,
317 struct device_attribute
*attr
,
320 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
321 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
323 int ret
= indio_dev
->info
->read_raw(indio_dev
, this_attr
->c
,
324 &val
, &val2
, this_attr
->address
);
329 if (ret
== IIO_VAL_INT
)
330 return sprintf(buf
, "%d\n", val
);
331 else if (ret
== IIO_VAL_INT_PLUS_MICRO
) {
333 return sprintf(buf
, "-%d.%06u\n", val
, -val2
);
335 return sprintf(buf
, "%d.%06u\n", val
, val2
);
336 } else if (ret
== IIO_VAL_INT_PLUS_NANO
) {
338 return sprintf(buf
, "-%d.%09u\n", val
, -val2
);
340 return sprintf(buf
, "%d.%09u\n", val
, val2
);
345 static ssize_t
iio_write_channel_info(struct device
*dev
,
346 struct device_attribute
*attr
,
350 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
351 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
352 int ret
, integer
= 0, fract
= 0, fract_mult
= 100000;
353 bool integer_part
= true, negative
= false;
355 /* Assumes decimal - precision based on number of digits */
356 if (!indio_dev
->info
->write_raw
)
359 if (indio_dev
->info
->write_raw_get_fmt
)
360 switch (indio_dev
->info
->write_raw_get_fmt(indio_dev
,
361 this_attr
->c
, this_attr
->address
)) {
362 case IIO_VAL_INT_PLUS_MICRO
:
365 case IIO_VAL_INT_PLUS_NANO
:
366 fract_mult
= 100000000;
378 if ('0' <= *buf
&& *buf
<= '9') {
380 integer
= integer
*10 + *buf
- '0';
382 fract
+= fract_mult
*(*buf
- '0');
387 } else if (*buf
== '\n') {
388 if (*(buf
+ 1) == '\0')
392 } else if (*buf
== '.') {
393 integer_part
= false;
406 ret
= indio_dev
->info
->write_raw(indio_dev
, this_attr
->c
,
407 integer
, fract
, this_attr
->address
);
415 int __iio_device_attr_init(struct device_attribute
*dev_attr
,
417 struct iio_chan_spec
const *chan
,
418 ssize_t (*readfunc
)(struct device
*dev
,
419 struct device_attribute
*attr
,
421 ssize_t (*writefunc
)(struct device
*dev
,
422 struct device_attribute
*attr
,
428 char *name_format
, *full_postfix
;
429 sysfs_attr_init(&dev_attr
->attr
);
431 /* Build up postfix of <extend_name>_<modifier>_postfix */
432 if (chan
->modified
&& !generic
) {
433 if (chan
->extend_name
)
434 full_postfix
= kasprintf(GFP_KERNEL
, "%s_%s_%s",
435 iio_modifier_names
[chan
440 full_postfix
= kasprintf(GFP_KERNEL
, "%s_%s",
441 iio_modifier_names
[chan
445 if (chan
->extend_name
== NULL
)
446 full_postfix
= kstrdup(postfix
, GFP_KERNEL
);
448 full_postfix
= kasprintf(GFP_KERNEL
,
453 if (full_postfix
== NULL
) {
458 if (chan
->differential
) { /* Differential can not have modifier */
461 = kasprintf(GFP_KERNEL
, "%s_%s-%s_%s",
462 iio_direction
[chan
->output
],
463 iio_chan_type_name_spec
[chan
->type
],
464 iio_chan_type_name_spec
[chan
->type
],
466 else if (chan
->indexed
)
468 = kasprintf(GFP_KERNEL
, "%s_%s%d-%s%d_%s",
469 iio_direction
[chan
->output
],
470 iio_chan_type_name_spec
[chan
->type
],
472 iio_chan_type_name_spec
[chan
->type
],
476 WARN_ON("Differential channels must be indexed\n");
478 goto error_free_full_postfix
;
480 } else { /* Single ended */
483 = kasprintf(GFP_KERNEL
, "%s_%s_%s",
484 iio_direction
[chan
->output
],
485 iio_chan_type_name_spec
[chan
->type
],
487 else if (chan
->indexed
)
489 = kasprintf(GFP_KERNEL
, "%s_%s%d_%s",
490 iio_direction
[chan
->output
],
491 iio_chan_type_name_spec
[chan
->type
],
496 = kasprintf(GFP_KERNEL
, "%s_%s_%s",
497 iio_direction
[chan
->output
],
498 iio_chan_type_name_spec
[chan
->type
],
501 if (name_format
== NULL
) {
503 goto error_free_full_postfix
;
505 dev_attr
->attr
.name
= kasprintf(GFP_KERNEL
,
509 if (dev_attr
->attr
.name
== NULL
) {
511 goto error_free_name_format
;
515 dev_attr
->attr
.mode
|= S_IRUGO
;
516 dev_attr
->show
= readfunc
;
520 dev_attr
->attr
.mode
|= S_IWUSR
;
521 dev_attr
->store
= writefunc
;
528 error_free_name_format
:
530 error_free_full_postfix
:
536 static void __iio_device_attr_deinit(struct device_attribute
*dev_attr
)
538 kfree(dev_attr
->attr
.name
);
541 int __iio_add_chan_devattr(const char *postfix
,
542 struct iio_chan_spec
const *chan
,
543 ssize_t (*readfunc
)(struct device
*dev
,
544 struct device_attribute
*attr
,
546 ssize_t (*writefunc
)(struct device
*dev
,
547 struct device_attribute
*attr
,
553 struct list_head
*attr_list
)
556 struct iio_dev_attr
*iio_attr
, *t
;
558 iio_attr
= kzalloc(sizeof *iio_attr
, GFP_KERNEL
);
559 if (iio_attr
== NULL
) {
563 ret
= __iio_device_attr_init(&iio_attr
->dev_attr
,
565 readfunc
, writefunc
, generic
);
567 goto error_iio_dev_attr_free
;
569 iio_attr
->address
= mask
;
570 list_for_each_entry(t
, attr_list
, l
)
571 if (strcmp(t
->dev_attr
.attr
.name
,
572 iio_attr
->dev_attr
.attr
.name
) == 0) {
574 dev_err(dev
, "tried to double register : %s\n",
575 t
->dev_attr
.attr
.name
);
577 goto error_device_attr_deinit
;
579 list_add(&iio_attr
->l
, attr_list
);
583 error_device_attr_deinit
:
584 __iio_device_attr_deinit(&iio_attr
->dev_attr
);
585 error_iio_dev_attr_free
:
591 static int iio_device_add_channel_sysfs(struct iio_dev
*indio_dev
,
592 struct iio_chan_spec
const *chan
)
594 int ret
, i
, attrcount
= 0;
596 if (chan
->channel
< 0)
599 ret
= __iio_add_chan_devattr(iio_data_type_name
[chan
->processed_val
],
601 &iio_read_channel_info
,
603 &iio_write_channel_info
: NULL
),
607 &indio_dev
->channel_attr_list
);
612 for_each_set_bit(i
, &chan
->info_mask
, sizeof(long)*8) {
613 ret
= __iio_add_chan_devattr(iio_chan_info_postfix
[i
/2],
615 &iio_read_channel_info
,
616 &iio_write_channel_info
,
620 &indio_dev
->channel_attr_list
);
621 if (ret
== -EBUSY
&& (i
%2 == 0)) {
634 static void iio_device_remove_and_free_read_attr(struct iio_dev
*indio_dev
,
635 struct iio_dev_attr
*p
)
637 kfree(p
->dev_attr
.attr
.name
);
641 static ssize_t
iio_show_dev_name(struct device
*dev
,
642 struct device_attribute
*attr
,
645 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
646 return sprintf(buf
, "%s\n", indio_dev
->name
);
649 static DEVICE_ATTR(name
, S_IRUGO
, iio_show_dev_name
, NULL
);
651 static int iio_device_register_sysfs(struct iio_dev
*indio_dev
)
653 int i
, ret
= 0, attrcount
, attrn
, attrcount_orig
= 0;
654 struct iio_dev_attr
*p
, *n
;
655 struct attribute
**attr
;
657 /* First count elements in any existing group */
658 if (indio_dev
->info
->attrs
) {
659 attr
= indio_dev
->info
->attrs
->attrs
;
660 while (*attr
++ != NULL
)
663 attrcount
= attrcount_orig
;
665 * New channel registration method - relies on the fact a group does
666 * not need to be initialized if it is name is NULL.
668 INIT_LIST_HEAD(&indio_dev
->channel_attr_list
);
669 if (indio_dev
->channels
)
670 for (i
= 0; i
< indio_dev
->num_channels
; i
++) {
671 ret
= iio_device_add_channel_sysfs(indio_dev
,
675 goto error_clear_attrs
;
682 indio_dev
->chan_attr_group
.attrs
= kcalloc(attrcount
+ 1,
683 sizeof(indio_dev
->chan_attr_group
.attrs
[0]),
685 if (indio_dev
->chan_attr_group
.attrs
== NULL
) {
687 goto error_clear_attrs
;
689 /* Copy across original attributes */
690 if (indio_dev
->info
->attrs
)
691 memcpy(indio_dev
->chan_attr_group
.attrs
,
692 indio_dev
->info
->attrs
->attrs
,
693 sizeof(indio_dev
->chan_attr_group
.attrs
[0])
695 attrn
= attrcount_orig
;
696 /* Add all elements from the list. */
697 list_for_each_entry(p
, &indio_dev
->channel_attr_list
, l
)
698 indio_dev
->chan_attr_group
.attrs
[attrn
++] = &p
->dev_attr
.attr
;
700 indio_dev
->chan_attr_group
.attrs
[attrn
++] = &dev_attr_name
.attr
;
702 indio_dev
->groups
[indio_dev
->groupcounter
++] =
703 &indio_dev
->chan_attr_group
;
708 list_for_each_entry_safe(p
, n
,
709 &indio_dev
->channel_attr_list
, l
) {
711 iio_device_remove_and_free_read_attr(indio_dev
, p
);
717 static void iio_device_unregister_sysfs(struct iio_dev
*indio_dev
)
720 struct iio_dev_attr
*p
, *n
;
722 list_for_each_entry_safe(p
, n
, &indio_dev
->channel_attr_list
, l
) {
724 iio_device_remove_and_free_read_attr(indio_dev
, p
);
726 kfree(indio_dev
->chan_attr_group
.attrs
);
729 static const char * const iio_ev_type_text
[] = {
730 [IIO_EV_TYPE_THRESH
] = "thresh",
731 [IIO_EV_TYPE_MAG
] = "mag",
732 [IIO_EV_TYPE_ROC
] = "roc",
733 [IIO_EV_TYPE_THRESH_ADAPTIVE
] = "thresh_adaptive",
734 [IIO_EV_TYPE_MAG_ADAPTIVE
] = "mag_adaptive",
737 static const char * const iio_ev_dir_text
[] = {
738 [IIO_EV_DIR_EITHER
] = "either",
739 [IIO_EV_DIR_RISING
] = "rising",
740 [IIO_EV_DIR_FALLING
] = "falling"
743 static ssize_t
iio_ev_state_store(struct device
*dev
,
744 struct device_attribute
*attr
,
748 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
749 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
753 ret
= strtobool(buf
, &val
);
757 ret
= indio_dev
->info
->write_event_config(indio_dev
,
760 return (ret
< 0) ? ret
: len
;
763 static ssize_t
iio_ev_state_show(struct device
*dev
,
764 struct device_attribute
*attr
,
767 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
768 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
769 int val
= indio_dev
->info
->read_event_config(indio_dev
,
775 return sprintf(buf
, "%d\n", val
);
778 static ssize_t
iio_ev_value_show(struct device
*dev
,
779 struct device_attribute
*attr
,
782 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
783 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
786 ret
= indio_dev
->info
->read_event_value(indio_dev
,
787 this_attr
->address
, &val
);
791 return sprintf(buf
, "%d\n", val
);
794 static ssize_t
iio_ev_value_store(struct device
*dev
,
795 struct device_attribute
*attr
,
799 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
800 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
804 if (!indio_dev
->info
->write_event_value
)
807 ret
= strict_strtoul(buf
, 10, &val
);
811 ret
= indio_dev
->info
->write_event_value(indio_dev
, this_attr
->address
,
819 static int iio_device_add_event_sysfs(struct iio_dev
*indio_dev
,
820 struct iio_chan_spec
const *chan
)
822 int ret
= 0, i
, attrcount
= 0;
825 if (!chan
->event_mask
)
828 for_each_set_bit(i
, &chan
->event_mask
, sizeof(chan
->event_mask
)*8) {
829 postfix
= kasprintf(GFP_KERNEL
, "%s_%s_en",
830 iio_ev_type_text
[i
/IIO_EV_DIR_MAX
],
831 iio_ev_dir_text
[i
%IIO_EV_DIR_MAX
]);
832 if (postfix
== NULL
) {
837 mask
= IIO_MOD_EVENT_CODE(chan
->type
, 0, chan
->channel
,
840 else if (chan
->differential
)
841 mask
= IIO_EVENT_CODE(chan
->type
,
849 mask
= IIO_UNMOD_EVENT_CODE(chan
->type
,
854 ret
= __iio_add_chan_devattr(postfix
,
861 &indio_dev
->event_interface
->
867 postfix
= kasprintf(GFP_KERNEL
, "%s_%s_value",
868 iio_ev_type_text
[i
/IIO_EV_DIR_MAX
],
869 iio_ev_dir_text
[i
%IIO_EV_DIR_MAX
]);
870 if (postfix
== NULL
) {
874 ret
= __iio_add_chan_devattr(postfix
, chan
,
880 &indio_dev
->event_interface
->
892 static inline void __iio_remove_event_config_attrs(struct iio_dev
*indio_dev
)
894 struct iio_dev_attr
*p
, *n
;
895 list_for_each_entry_safe(p
, n
,
896 &indio_dev
->event_interface
->
898 kfree(p
->dev_attr
.attr
.name
);
903 static inline int __iio_add_event_config_attrs(struct iio_dev
*indio_dev
)
905 int j
, ret
, attrcount
= 0;
907 INIT_LIST_HEAD(&indio_dev
->event_interface
->dev_attr_list
);
908 /* Dynically created from the channels array */
909 for (j
= 0; j
< indio_dev
->num_channels
; j
++) {
910 ret
= iio_device_add_event_sysfs(indio_dev
,
911 &indio_dev
->channels
[j
]);
913 goto error_clear_attrs
;
919 __iio_remove_event_config_attrs(indio_dev
);
924 static bool iio_check_for_dynamic_events(struct iio_dev
*indio_dev
)
928 for (j
= 0; j
< indio_dev
->num_channels
; j
++)
929 if (indio_dev
->channels
[j
].event_mask
!= 0)
934 static void iio_setup_ev_int(struct iio_event_interface
*ev_int
)
936 mutex_init(&ev_int
->event_list_lock
);
937 /* discussion point - make this variable? */
938 ev_int
->max_events
= 10;
939 ev_int
->current_events
= 0;
940 INIT_LIST_HEAD(&ev_int
->det_events
);
941 init_waitqueue_head(&ev_int
->wait
);
944 static const char *iio_event_group_name
= "events";
945 static int iio_device_register_eventset(struct iio_dev
*indio_dev
)
947 struct iio_dev_attr
*p
;
948 int ret
= 0, attrcount_orig
= 0, attrcount
, attrn
;
949 struct attribute
**attr
;
951 if (!(indio_dev
->info
->event_attrs
||
952 iio_check_for_dynamic_events(indio_dev
)))
955 indio_dev
->event_interface
=
956 kzalloc(sizeof(struct iio_event_interface
), GFP_KERNEL
);
957 if (indio_dev
->event_interface
== NULL
) {
962 iio_setup_ev_int(indio_dev
->event_interface
);
963 if (indio_dev
->info
->event_attrs
!= NULL
) {
964 attr
= indio_dev
->info
->event_attrs
->attrs
;
965 while (*attr
++ != NULL
)
968 attrcount
= attrcount_orig
;
969 if (indio_dev
->channels
) {
970 ret
= __iio_add_event_config_attrs(indio_dev
);
972 goto error_free_setup_event_lines
;
976 indio_dev
->event_interface
->group
.name
= iio_event_group_name
;
977 indio_dev
->event_interface
->group
.attrs
= kcalloc(attrcount
+ 1,
978 sizeof(indio_dev
->event_interface
->group
.attrs
[0]),
980 if (indio_dev
->event_interface
->group
.attrs
== NULL
) {
982 goto error_free_setup_event_lines
;
984 if (indio_dev
->info
->event_attrs
)
985 memcpy(indio_dev
->event_interface
->group
.attrs
,
986 indio_dev
->info
->event_attrs
->attrs
,
987 sizeof(indio_dev
->event_interface
->group
.attrs
[0])
989 attrn
= attrcount_orig
;
990 /* Add all elements from the list. */
991 list_for_each_entry(p
,
992 &indio_dev
->event_interface
->dev_attr_list
,
994 indio_dev
->event_interface
->group
.attrs
[attrn
++] =
996 indio_dev
->groups
[indio_dev
->groupcounter
++] =
997 &indio_dev
->event_interface
->group
;
1001 error_free_setup_event_lines
:
1002 __iio_remove_event_config_attrs(indio_dev
);
1003 kfree(indio_dev
->event_interface
);
1009 static void iio_device_unregister_eventset(struct iio_dev
*indio_dev
)
1011 if (indio_dev
->event_interface
== NULL
)
1013 __iio_remove_event_config_attrs(indio_dev
);
1014 kfree(indio_dev
->event_interface
->group
.attrs
);
1015 kfree(indio_dev
->event_interface
);
1018 static void iio_dev_release(struct device
*device
)
1020 struct iio_dev
*indio_dev
= container_of(device
, struct iio_dev
, dev
);
1021 cdev_del(&indio_dev
->chrdev
);
1022 if (indio_dev
->modes
& INDIO_BUFFER_TRIGGERED
)
1023 iio_device_unregister_trigger_consumer(indio_dev
);
1024 iio_device_unregister_eventset(indio_dev
);
1025 iio_device_unregister_sysfs(indio_dev
);
1028 static struct device_type iio_dev_type
= {
1029 .name
= "iio_device",
1030 .release
= iio_dev_release
,
1033 struct iio_dev
*iio_allocate_device(int sizeof_priv
)
1035 struct iio_dev
*dev
;
1038 alloc_size
= sizeof(struct iio_dev
);
1040 alloc_size
= ALIGN(alloc_size
, IIO_ALIGN
);
1041 alloc_size
+= sizeof_priv
;
1043 /* ensure 32-byte alignment of whole construct ? */
1044 alloc_size
+= IIO_ALIGN
- 1;
1046 dev
= kzalloc(alloc_size
, GFP_KERNEL
);
1049 dev
->dev
.groups
= dev
->groups
;
1050 dev
->dev
.type
= &iio_dev_type
;
1051 dev
->dev
.bus
= &iio_bus_type
;
1052 device_initialize(&dev
->dev
);
1053 dev_set_drvdata(&dev
->dev
, (void *)dev
);
1054 mutex_init(&dev
->mlock
);
1056 dev
->id
= ida_simple_get(&iio_ida
, 0, 0, GFP_KERNEL
);
1058 /* cannot use a dev_err as the name isn't available */
1059 printk(KERN_ERR
"Failed to get id\n");
1063 dev_set_name(&dev
->dev
, "iio:device%d", dev
->id
);
1068 EXPORT_SYMBOL(iio_allocate_device
);
1070 void iio_free_device(struct iio_dev
*dev
)
1073 ida_simple_remove(&iio_ida
, dev
->id
);
1077 EXPORT_SYMBOL(iio_free_device
);
1080 * iio_chrdev_open() - chrdev file open for buffer access and ioctls
1082 static int iio_chrdev_open(struct inode
*inode
, struct file
*filp
)
1084 struct iio_dev
*indio_dev
= container_of(inode
->i_cdev
,
1085 struct iio_dev
, chrdev
);
1086 filp
->private_data
= indio_dev
;
1088 return iio_chrdev_buffer_open(indio_dev
);
1092 * iio_chrdev_release() - chrdev file close buffer access and ioctls
1094 static int iio_chrdev_release(struct inode
*inode
, struct file
*filp
)
1096 iio_chrdev_buffer_release(container_of(inode
->i_cdev
,
1097 struct iio_dev
, chrdev
));
1101 /* Somewhat of a cross file organization violation - ioctls here are actually
1103 static long iio_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
1105 struct iio_dev
*indio_dev
= filp
->private_data
;
1106 int __user
*ip
= (int __user
*)arg
;
1109 if (cmd
== IIO_GET_EVENT_FD_IOCTL
) {
1110 fd
= iio_event_getfd(indio_dev
);
1111 if (copy_to_user(ip
, &fd
, sizeof(fd
)))
1118 static const struct file_operations iio_buffer_fileops
= {
1119 .read
= iio_buffer_read_first_n_outer_addr
,
1120 .release
= iio_chrdev_release
,
1121 .open
= iio_chrdev_open
,
1122 .poll
= iio_buffer_poll_addr
,
1123 .owner
= THIS_MODULE
,
1124 .llseek
= noop_llseek
,
1125 .unlocked_ioctl
= iio_ioctl
,
1126 .compat_ioctl
= iio_ioctl
,
1129 int iio_device_register(struct iio_dev
*indio_dev
)
1133 /* configure elements for the chrdev */
1134 indio_dev
->dev
.devt
= MKDEV(MAJOR(iio_devt
), indio_dev
->id
);
1136 ret
= iio_device_register_sysfs(indio_dev
);
1138 dev_err(indio_dev
->dev
.parent
,
1139 "Failed to register sysfs interfaces\n");
1142 ret
= iio_device_register_eventset(indio_dev
);
1144 dev_err(indio_dev
->dev
.parent
,
1145 "Failed to register event set\n");
1146 goto error_free_sysfs
;
1148 if (indio_dev
->modes
& INDIO_BUFFER_TRIGGERED
)
1149 iio_device_register_trigger_consumer(indio_dev
);
1151 ret
= device_add(&indio_dev
->dev
);
1153 goto error_unreg_eventset
;
1154 cdev_init(&indio_dev
->chrdev
, &iio_buffer_fileops
);
1155 indio_dev
->chrdev
.owner
= indio_dev
->info
->driver_module
;
1156 ret
= cdev_add(&indio_dev
->chrdev
, indio_dev
->dev
.devt
, 1);
1158 goto error_del_device
;
1162 device_del(&indio_dev
->dev
);
1163 error_unreg_eventset
:
1164 iio_device_unregister_eventset(indio_dev
);
1166 iio_device_unregister_sysfs(indio_dev
);
1170 EXPORT_SYMBOL(iio_device_register
);
1172 void iio_device_unregister(struct iio_dev
*indio_dev
)
1174 device_unregister(&indio_dev
->dev
);
1176 EXPORT_SYMBOL(iio_device_unregister
);
1177 subsys_initcall(iio_init
);
1178 module_exit(iio_exit
);
1180 MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
1181 MODULE_DESCRIPTION("Industrial I/O core");
1182 MODULE_LICENSE("GPL");