}
fuse_req_init_context(req);
- req->waiting = 1;
- req->background = for_background;
+ __set_bit(FR_WAITING, &req->flags);
+ if (for_background)
+ __set_bit(FR_BACKGROUND, &req->flags);
+
return req;
out:
req = get_reserved_req(fc, file);
fuse_req_init_context(req);
- req->waiting = 1;
- req->background = 0;
+ __set_bit(FR_WAITING, &req->flags);
+ __clear_bit(FR_BACKGROUND, &req->flags);
return req;
}
void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
{
if (atomic_dec_and_test(&req->count)) {
- if (unlikely(req->background)) {
+ if (test_bit(FR_BACKGROUND, &req->flags)) {
/*
* We get here in the unlikely case that a background
* request was allocated but not sent
spin_unlock(&fc->lock);
}
- if (req->waiting) {
+ if (test_bit(FR_WAITING, &req->flags)) {
+ __clear_bit(FR_WAITING, &req->flags);
atomic_dec(&fc->num_waiting);
- req->waiting = 0;
}
if (req->stolen_file)
list_del_init(&req->list);
list_del_init(&req->intr_entry);
req->state = FUSE_REQ_FINISHED;
- if (req->background) {
- req->background = 0;
-
+ if (test_bit(FR_BACKGROUND, &req->flags)) {
+ clear_bit(FR_BACKGROUND, &req->flags);
if (fc->num_background == fc->max_background)
fc->blocked = 0;
if (req->state == FUSE_REQ_FINISHED)
return;
- req->interrupted = 1;
+ set_bit(FR_INTERRUPTED, &req->flags);
if (req->state == FUSE_REQ_SENT)
queue_interrupt(fc, req);
}
- if (!req->force) {
+ if (!test_bit(FR_FORCE, &req->flags)) {
sigset_t oldset;
/* Only fatal signals may interrupt this */
static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
{
- BUG_ON(req->background);
+ BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
spin_lock(&fc->lock);
if (!fc->connected)
req->out.h.error = -ENOTCONN;
void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
{
- req->isreply = 1;
- if (!req->waiting) {
- req->waiting = 1;
+ __set_bit(FR_ISREPLY, &req->flags);
+ if (!test_bit(FR_WAITING, &req->flags)) {
+ __set_bit(FR_WAITING, &req->flags);
atomic_inc(&fc->num_waiting);
}
__fuse_request_send(fc, req);
void fuse_request_send_background_locked(struct fuse_conn *fc,
struct fuse_req *req)
{
- BUG_ON(!req->background);
- if (!req->waiting) {
- req->waiting = 1;
+ BUG_ON(!test_bit(FR_BACKGROUND, &req->flags));
+ if (!test_bit(FR_WAITING, &req->flags)) {
+ __set_bit(FR_WAITING, &req->flags);
atomic_inc(&fc->num_waiting);
}
- req->isreply = 1;
+ __set_bit(FR_ISREPLY, &req->flags);
fc->num_background++;
if (fc->num_background == fc->max_background)
fc->blocked = 1;
{
int err = -ENODEV;
- req->isreply = 0;
+ __clear_bit(FR_ISREPLY, &req->flags);
req->in.h.unique = unique;
spin_lock(&fc->lock);
if (fc->connected) {
req->in.numargs = 1;
req->in.args[0].size = sizeof(inarg);
req->in.args[0].value = &inarg;
- req->isreply = 0;
+ __clear_bit(FR_ISREPLY, &req->flags);
__fuse_request_send(fc, req);
/* ignore errors */
fuse_put_request(fc, req);
int err = 0;
if (req) {
spin_lock(&fc->lock);
- if (req->aborted)
+ if (test_bit(FR_ABORTED, &req->flags))
err = -ENOENT;
else
- req->locked = 1;
+ set_bit(FR_LOCKED, &req->flags);
spin_unlock(&fc->lock);
}
return err;
int err = 0;
if (req) {
spin_lock(&fc->lock);
- if (req->aborted)
+ if (test_bit(FR_ABORTED, &req->flags))
err = -ENOENT;
else
- req->locked = 0;
+ clear_bit(FR_LOCKED, &req->flags);
spin_unlock(&fc->lock);
}
return err;
err = 0;
spin_lock(&cs->fc->lock);
- if (cs->req->aborted)
+ if (test_bit(FR_ABORTED, &cs->req->flags))
err = -ENOENT;
else
*pagep = newpage;
(struct fuse_arg *) in->args, 0);
fuse_copy_finish(cs);
spin_lock(&fc->lock);
- req->locked = 0;
+ clear_bit(FR_LOCKED, &req->flags);
if (!fc->connected) {
request_end(fc, req);
return -ENODEV;
request_end(fc, req);
return err;
}
- if (!req->isreply)
+ if (!test_bit(FR_ISREPLY, &req->flags)) {
request_end(fc, req);
- else {
+ } else {
req->state = FUSE_REQ_SENT;
list_move_tail(&req->list, &fc->processing);
- if (req->interrupted)
+ if (test_bit(FR_INTERRUPTED, &req->flags))
queue_interrupt(fc, req);
spin_unlock(&fc->lock);
}
req->state = FUSE_REQ_WRITING;
list_move(&req->list, &fc->io);
req->out.h = oh;
- req->locked = 1;
+ set_bit(FR_LOCKED, &req->flags);
cs->req = req;
if (!req->out.page_replace)
cs->move_pages = 0;
fuse_copy_finish(cs);
spin_lock(&fc->lock);
- req->locked = 0;
+ clear_bit(FR_LOCKED, &req->flags);
if (!fc->connected)
err = -ENOENT;
else if (err)
list_for_each_entry_safe(req, next, &fc->io, list) {
req->out.h.error = -ECONNABORTED;
- req->aborted = 1;
- if (!req->locked)
+ set_bit(FR_ABORTED, &req->flags);
+ if (!test_bit(FR_LOCKED, &req->flags))
list_move(&req->list, &to_end);
}
while (!list_empty(&to_end)) {
* Drop the release request when client does not
* implement 'open'
*/
- req->background = 0;
+ __clear_bit(FR_BACKGROUND, &req->flags);
iput(req->misc.release.inode);
fuse_put_request(ff->fc, req);
} else if (sync) {
- req->background = 0;
+ __clear_bit(FR_BACKGROUND, &req->flags);
fuse_request_send(ff->fc, req);
iput(req->misc.release.inode);
fuse_put_request(ff->fc, req);
} else {
req->end = fuse_release_end;
- req->background = 1;
+ __set_bit(FR_BACKGROUND, &req->flags);
fuse_request_send_background(ff->fc, req);
}
kfree(ff);
{
WARN_ON(atomic_read(&ff->count) > 1);
fuse_prepare_release(ff, flags, FUSE_RELEASE);
- ff->reserved_req->force = 1;
- ff->reserved_req->background = 0;
+ __set_bit(FR_FORCE, &ff->reserved_req->flags);
+ __clear_bit(FR_BACKGROUND, &ff->reserved_req->flags);
fuse_request_send(ff->fc, ff->reserved_req);
fuse_put_request(ff->fc, ff->reserved_req);
kfree(ff);
req->in.numargs = 1;
req->in.args[0].size = sizeof(inarg);
req->in.args[0].value = &inarg;
- req->force = 1;
+ __set_bit(FR_FORCE, &req->flags);
fuse_request_send(fc, req);
err = req->out.h.error;
fuse_put_request(fc, req);
if (!req)
goto err;
- req->background = 1; /* writeback always goes to bg_queue */
+ /* writeback always goes to bg_queue */
+ __set_bit(FR_BACKGROUND, &req->flags);
tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
if (!tmp_page)
goto err_free;
req->misc.write.in.write_flags |= FUSE_WRITE_CACHE;
req->misc.write.next = NULL;
req->in.argpages = 1;
- req->background = 1;
+ __set_bit(FR_BACKGROUND, &req->flags);
req->num_pages = 0;
req->end = fuse_writepage_end;
req->inode = inode;
struct completion *done;
};
+/**
+ * Request flags
+ *
+ * FR_ISREPLY: set if the request has reply
+ * FR_FORCE: force sending of the request even if interrupted
+ * FR_BACKGROUND: request is sent in the background
+ * FR_WAITING: request is counted as "waiting"
+ * FR_ABORTED: the request was aborted
+ * FR_INTERRUPTED: the request has been interrupted
+ * FR_LOCKED: data is being copied to/from the request
+ */
+enum fuse_req_flag {
+ FR_ISREPLY,
+ FR_FORCE,
+ FR_BACKGROUND,
+ FR_WAITING,
+ FR_ABORTED,
+ FR_INTERRUPTED,
+ FR_LOCKED,
+};
+
/**
* A request to the client
*/
/** Unique ID for the interrupt request */
u64 intr_unique;
- /*
- * The following bitfields are either set once before the
- * request is queued or setting/clearing them is protected by
- * fuse_conn->lock
- */
-
- /** True if the request has reply */
- unsigned isreply:1;
-
- /** Force sending of the request even if interrupted */
- unsigned force:1;
-
- /** The request was aborted */
- unsigned aborted:1;
-
- /** Request is sent in the background */
- unsigned background:1;
-
- /** The request has been interrupted */
- unsigned interrupted:1;
-
- /** Data is being copied to/from the request */
- unsigned locked:1;
-
- /** Request is counted as "waiting" */
- unsigned waiting:1;
+ /* Request flags, updated with test/set/clear_bit() */
+ unsigned long flags;
/** State of the request */
enum fuse_req_state state;