as much buffer functionality as possible. Note almost all of these
are optional.
-mark_in_use, unmark_in_use
- Basically indicate that not changes should be made to the buffer state that
- will effect the form of the data being captures (e.g. scan elements or length)
-
store_to
If possible, push data to the buffer.
/**
* struct iio_buffer_access_funcs - access functions for buffers.
- * @mark_in_use: reference counting, typically to prevent module removal
- * @unmark_in_use: reduce reference count when no longer using buffer
* @store_to: actually store stuff to the buffer
* @read_first_n: try to get a specified number of bytes (must exist)
* @request_update: if a parameter change has been marked, update underlying
* any of them not existing.
**/
struct iio_buffer_access_funcs {
- void (*mark_in_use)(struct iio_buffer *buffer);
- void (*unmark_in_use)(struct iio_buffer *buffer);
-
int (*store_to)(struct iio_buffer *buffer, u8 *data, s64 timestamp);
int (*read_first_n)(struct iio_buffer *buffer,
size_t n,
#ifdef CONFIG_IIO_BUFFER
struct poll_table_struct;
-int iio_chrdev_buffer_open(struct iio_dev *indio_dev);
-void iio_chrdev_buffer_release(struct iio_dev *indio_dev);
-
unsigned int iio_buffer_poll(struct file *filp,
struct poll_table_struct *wait);
ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
#else
-static inline int iio_chrdev_buffer_open(struct iio_dev *indio_dev)
-{
- return 0;
-}
-
-static inline void iio_chrdev_buffer_release(struct iio_dev *indio_dev)
-{}
-
#define iio_buffer_poll_addr NULL
#define iio_buffer_read_first_n_outer_addr NULL
return 0;
}
-int iio_chrdev_buffer_open(struct iio_dev *indio_dev)
-{
- struct iio_buffer *rb = indio_dev->buffer;
- if (!rb)
- return 0;
- if (rb->access->mark_in_use)
- rb->access->mark_in_use(rb);
- return 0;
-}
-
-void iio_chrdev_buffer_release(struct iio_dev *indio_dev)
-{
- struct iio_buffer *rb = indio_dev->buffer;
-
- if (!rb)
- return;
- if (rb->access->unmark_in_use)
- rb->access->unmark_in_use(rb);
-}
-
void iio_buffer_init(struct iio_buffer *buffer)
{
INIT_LIST_HEAD(&buffer->demux_list);
goto error_ret;
}
}
- if (buffer->access->mark_in_use)
- buffer->access->mark_in_use(buffer);
/* Definitely possible for devices to support both of these.*/
if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
if (!indio_dev->trig) {
printk(KERN_INFO
"Buffer not started: no trigger\n");
ret = -EINVAL;
- if (buffer->access->unmark_in_use)
- buffer->access->unmark_in_use(buffer);
goto error_ret;
}
indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
printk(KERN_INFO
"Buffer not started:"
"postenable failed\n");
- if (buffer->access->unmark_in_use)
- buffer->access->unmark_in_use(buffer);
indio_dev->currentmode = previous_mode;
if (indio_dev->setup_ops->postdisable)
indio_dev->setup_ops->
if (ret)
goto error_ret;
}
- if (buffer->access->unmark_in_use)
- buffer->access->unmark_in_use(buffer);
indio_dev->currentmode = INDIO_DIRECT_MODE;
if (indio_dev->setup_ops->postdisable) {
ret = indio_dev->setup_ops->postdisable(indio_dev);
{
struct iio_dev *indio_dev = container_of(inode->i_cdev,
struct iio_dev, chrdev);
- unsigned int ret;
if (test_and_set_bit(IIO_BUSY_BIT_POS, &indio_dev->flags))
return -EBUSY;
filp->private_data = indio_dev;
- ret = iio_chrdev_buffer_open(indio_dev);
- if (ret < 0)
- clear_bit(IIO_BUSY_BIT_POS, &indio_dev->flags);
-
- return ret;
+ return 0;
}
/**
{
struct iio_dev *indio_dev = container_of(inode->i_cdev,
struct iio_dev, chrdev);
- iio_chrdev_buffer_release(indio_dev);
clear_bit(IIO_BUSY_BIT_POS, &indio_dev->flags);
return 0;
}
struct iio_kfifo {
struct iio_buffer buffer;
struct kfifo kf;
- int use_count;
int update_needed;
- struct mutex use_lock;
};
#define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer)
int ret = 0;
struct iio_kfifo *buf = iio_to_kfifo(r);
- mutex_lock(&buf->use_lock);
if (!buf->update_needed)
goto error_ret;
- if (buf->use_count) {
- ret = -EAGAIN;
- goto error_ret;
- }
kfifo_free(&buf->kf);
ret = __iio_allocate_kfifo(buf, buf->buffer.bytes_per_datum,
buf->buffer.length);
error_ret:
- mutex_unlock(&buf->use_lock);
return ret;
}
-static void iio_mark_kfifo_in_use(struct iio_buffer *r)
-{
- struct iio_kfifo *buf = iio_to_kfifo(r);
- mutex_lock(&buf->use_lock);
- buf->use_count++;
- mutex_unlock(&buf->use_lock);
-}
-
-static void iio_unmark_kfifo_in_use(struct iio_buffer *r)
-{
- struct iio_kfifo *buf = iio_to_kfifo(r);
- mutex_lock(&buf->use_lock);
- buf->use_count--;
- mutex_unlock(&buf->use_lock);
-}
-
static int iio_get_length_kfifo(struct iio_buffer *r)
{
return r->length;
}
-static inline void __iio_init_kfifo(struct iio_kfifo *kf)
-{
- mutex_init(&kf->use_lock);
-}
-
static IIO_BUFFER_ENABLE_ATTR;
static IIO_BUFFER_LENGTH_ATTR;
kf->update_needed = true;
iio_buffer_init(&kf->buffer);
kf->buffer.attrs = &iio_kfifo_attribute_group;
- __iio_init_kfifo(kf);
return &kf->buffer;
}
}
const struct iio_buffer_access_funcs kfifo_access_funcs = {
- .mark_in_use = &iio_mark_kfifo_in_use,
- .unmark_in_use = &iio_unmark_kfifo_in_use,
.store_to = &iio_store_to_kfifo,
.read_first_n = &iio_read_first_n_kfifo,
.request_update = &iio_request_update_kfifo,
* @read_p: read pointer (oldest available)
* @write_p: write pointer
* @half_p: half buffer length behind write_p (event generation)
- * @use_count: reference count to prevent resizing when in use
* @update_needed: flag to indicated change in size requested
- * @use_lock: lock to prevent change in size when in use
*
* Note that the first element of all ring buffers must be a
* struct iio_buffer.
unsigned char *write_p;
/* used to act as a point at which to signal an event */
unsigned char *half_p;
- int use_count;
int update_needed;
- spinlock_t use_lock;
};
#define iio_to_sw_ring(r) container_of(r, struct iio_sw_ring_buffer, buf)
return ring->data ? 0 : -ENOMEM;
}
-static inline void __iio_init_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
-{
- spin_lock_init(&ring->use_lock);
-}
-
static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
{
kfree(ring->data);
}
-static void iio_mark_sw_rb_in_use(struct iio_buffer *r)
-{
- struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
- spin_lock(&ring->use_lock);
- ring->use_count++;
- spin_unlock(&ring->use_lock);
-}
-
-static void iio_unmark_sw_rb_in_use(struct iio_buffer *r)
-{
- struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
- spin_lock(&ring->use_lock);
- ring->use_count--;
- spin_unlock(&ring->use_lock);
-}
-
-
/* Ring buffer related functionality */
/* Store to ring is typically called in the bh of a data ready interrupt handler
* in the device driver */
struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
r->stufftoread = false;
- spin_lock(&ring->use_lock);
if (!ring->update_needed)
goto error_ret;
- if (ring->use_count) {
- ret = -EAGAIN;
- goto error_ret;
- }
__iio_free_sw_ring_buffer(ring);
ret = __iio_allocate_sw_ring_buffer(ring, ring->buf.bytes_per_datum,
ring->buf.length);
error_ret:
- spin_unlock(&ring->use_lock);
return ret;
}
ring->update_needed = true;
buf = &ring->buf;
iio_buffer_init(buf);
- __iio_init_sw_ring_buffer(ring);
buf->attrs = &iio_ring_attribute_group;
return buf;
EXPORT_SYMBOL(iio_sw_rb_free);
const struct iio_buffer_access_funcs ring_sw_access_funcs = {
- .mark_in_use = &iio_mark_sw_rb_in_use,
- .unmark_in_use = &iio_unmark_sw_rb_in_use,
.store_to = &iio_store_to_sw_rb,
.read_first_n = &iio_read_first_n_sw_rb,
.request_update = &iio_request_update_sw_rb,