}
INIT_LIST_HEAD(&ff->write_entry);
- atomic_set(&ff->count, 1);
+ refcount_set(&ff->count, 1);
RB_CLEAR_NODE(&ff->polled_node);
init_waitqueue_head(&ff->poll_wait);
static struct fuse_file *fuse_file_get(struct fuse_file *ff)
{
- atomic_inc(&ff->count);
+ refcount_inc(&ff->count);
return ff;
}
static void fuse_file_put(struct fuse_file *ff, bool sync)
{
- if (atomic_dec_and_test(&ff->count)) {
+ if (refcount_dec_and_test(&ff->count)) {
struct fuse_req *req = ff->reserved_req;
if (ff->fc->no_open) {
void fuse_sync_release(struct fuse_file *ff, int flags)
{
- WARN_ON(atomic_read(&ff->count) != 1);
+ WARN_ON(refcount_read(&ff->count) > 1);
fuse_prepare_release(ff, flags, FUSE_RELEASE);
/*
* iput(NULL) is a no-op and since the refcount is 1 and everything's
#include <linux/workqueue.h>
#include <linux/kref.h>
#include <linux/xattr.h>
+#include <linux/refcount.h>
/** Max number of pages that can be used in a single read request */
#define FUSE_MAX_PAGES_PER_REQ 32
u64 nodeid;
/** Refcount */
- atomic_t count;
+ refcount_t count;
/** FOPEN_* flags returned by open */
u32 open_flags;