*/
struct iio_event_interface {
wait_queue_head_t wait;
- struct mutex event_list_lock;
DECLARE_KFIFO(det_events, struct iio_event_data, 16);
struct list_head dev_attr_list;
int copied;
/* Does anyone care? */
- mutex_lock(&ev_int->event_list_lock);
+ spin_lock(&ev_int->wait.lock);
if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
ev.id = ev_code;
ev.timestamp = timestamp;
copied = kfifo_put(&ev_int->det_events, &ev);
-
- mutex_unlock(&ev_int->event_list_lock);
if (copied != 0)
- wake_up_interruptible(&ev_int->wait);
- } else
- mutex_unlock(&ev_int->event_list_lock);
+ wake_up_locked(&ev_int->wait);
+ }
+ spin_unlock(&ev_int->wait.lock);
return 0;
}
if (count < sizeof(struct iio_event_data))
return -EINVAL;
- mutex_lock(&ev_int->event_list_lock);
+ spin_lock(&ev_int->wait.lock);
if (kfifo_is_empty(&ev_int->det_events)) {
if (filep->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
- goto error_mutex_unlock;
+ goto error_unlock;
}
- mutex_unlock(&ev_int->event_list_lock);
/* Blocking on device; waiting for something to be there */
- ret = wait_event_interruptible(ev_int->wait,
+ ret = wait_event_interruptible_locked(ev_int->wait,
!kfifo_is_empty(&ev_int->det_events));
if (ret)
- goto error_ret;
+ goto error_unlock;
/* Single access device so no one else can get the data */
- mutex_lock(&ev_int->event_list_lock);
}
- mutex_unlock(&ev_int->event_list_lock);
ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
-error_mutex_unlock:
- mutex_unlock(&ev_int->event_list_lock);
-error_ret:
+error_unlock:
+ spin_unlock(&ev_int->wait.lock);
+
return ret ? ret : copied;
}
{
struct iio_event_interface *ev_int = filep->private_data;
- mutex_lock(&ev_int->event_list_lock);
+ spin_lock(&ev_int->wait.lock);
clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
/*
* In order to maintain a clean state for reopening,
* any new __iio_push_event calls running.
*/
kfifo_reset_out(&ev_int->det_events);
- mutex_unlock(&ev_int->event_list_lock);
+ spin_unlock(&ev_int->wait.lock);
return 0;
}
if (ev_int == NULL)
return -ENODEV;
- mutex_lock(&ev_int->event_list_lock);
+ spin_lock(&ev_int->wait.lock);
if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
- mutex_unlock(&ev_int->event_list_lock);
+ spin_unlock(&ev_int->wait.lock);
return -EBUSY;
}
- mutex_unlock(&ev_int->event_list_lock);
+ spin_unlock(&ev_int->wait.lock);
fd = anon_inode_getfd("iio:event",
&iio_event_chrdev_fileops, ev_int, O_RDONLY);
if (fd < 0) {
- mutex_lock(&ev_int->event_list_lock);
+ spin_lock(&ev_int->wait.lock);
clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
- mutex_unlock(&ev_int->event_list_lock);
+ spin_unlock(&ev_int->wait.lock);
}
return fd;
}
static void iio_setup_ev_int(struct iio_event_interface *ev_int)
{
- mutex_init(&ev_int->event_list_lock);
INIT_KFIFO(ev_int->det_events);
init_waitqueue_head(&ev_int->wait);
}