size_t mbo_offs;
struct mbo *stacked_mbo;
DECLARE_KFIFO_PTR(fifo, typeof(struct mbo *));
- atomic_t access_ref;
+ int access_ref;
struct list_head list;
};
return -EBUSY;
}
- if (!atomic_inc_and_test(&c->access_ref)) {
+ if (c->access_ref) {
pr_info("WARN: Device is busy\n");
- atomic_dec(&c->access_ref);
mutex_unlock(&c->io_mutex);
return -EBUSY;
}
ret = most_start_channel(c->iface, c->channel_id, &cdev_aim);
- if (ret)
- atomic_dec(&c->access_ref);
+ if (!ret)
+ c->access_ref = 1;
mutex_unlock(&c->io_mutex);
return ret;
}
mutex_lock(&c->io_mutex);
spin_lock(&c->unlink);
- atomic_dec(&c->access_ref);
+ c->access_ref = 0;
spin_unlock(&c->unlink);
if (c->dev) {
stop_channel(c);
spin_lock(&c->unlink);
c->dev = NULL;
spin_unlock(&c->unlink);
- if (!atomic_read(&c->access_ref)) {
+ if (c->access_ref) {
stop_channel(c);
wake_up_interruptible(&c->wq);
mutex_unlock(&c->io_mutex);
return -ENXIO;
spin_lock(&c->unlink);
- if (atomic_read(&c->access_ref) || !c->dev) {
+ if (!c->access_ref || !c->dev) {
spin_unlock(&c->unlink);
return -EFAULT;
}
c->cfg = cfg;
c->channel_id = channel_id;
c->mbo_offs = 0;
- atomic_set(&c->access_ref, -1);
+ c->access_ref = 0;
spin_lock_init(&c->unlink);
INIT_KFIFO(c->fifo);
retval = kfifo_alloc(&c->fifo, cfg->num_buffers, GFP_KERNEL);