NFS: Add functionality to allow waiting on all outstanding reads to complete
authorTrond Myklebust <Trond.Myklebust@netapp.com>
Tue, 9 Apr 2013 01:38:12 +0000 (21:38 -0400)
committerTrond Myklebust <Trond.Myklebust@netapp.com>
Tue, 9 Apr 2013 02:12:33 +0000 (22:12 -0400)
This will later allow NFS locking code to wait for readahead to complete
before releasing byte range locks.

Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
fs/nfs/inode.c
fs/nfs/internal.h
fs/nfs/pagelist.c
include/linux/nfs_fs.h

index 55b840f05ab265aee86309300dd8afabe4b17e1a..c1c7a9d78722257867846f39c74780536b28d0c5 100644 (file)
@@ -561,6 +561,7 @@ static void nfs_init_lock_context(struct nfs_lock_context *l_ctx)
        l_ctx->lockowner.l_owner = current->files;
        l_ctx->lockowner.l_pid = current->tgid;
        INIT_LIST_HEAD(&l_ctx->list);
+       nfs_iocounter_init(&l_ctx->io_count);
 }
 
 static struct nfs_lock_context *__nfs_find_lock_context(struct nfs_open_context *ctx)
index 541c9ebdbc5a3e905f07798e2207bf5eec028081..91e59a39fc08dcfd3b6b788cd9a3610e5d4b8984 100644 (file)
@@ -229,6 +229,13 @@ extern void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
                              struct nfs_pgio_header *hdr,
                              void (*release)(struct nfs_pgio_header *hdr));
 void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos);
+int nfs_iocounter_wait(struct nfs_io_counter *c);
+
+static inline void nfs_iocounter_init(struct nfs_io_counter *c)
+{
+       c->flags = 0;
+       atomic_set(&c->io_count, 0);
+}
 
 /* nfs2xdr.c */
 extern struct rpc_procinfo nfs_procedures[];
index 7f0933086b36c7be9218697517bb3c3c3d49fd80..29cfb7ade121276e2d5ed7372d01949c5e0c2819 100644 (file)
@@ -84,6 +84,55 @@ nfs_page_free(struct nfs_page *p)
        kmem_cache_free(nfs_page_cachep, p);
 }
 
+static void
+nfs_iocounter_inc(struct nfs_io_counter *c)
+{
+       atomic_inc(&c->io_count);
+}
+
+static void
+nfs_iocounter_dec(struct nfs_io_counter *c)
+{
+       if (atomic_dec_and_test(&c->io_count)) {
+               clear_bit(NFS_IO_INPROGRESS, &c->flags);
+               smp_mb__after_clear_bit();
+               wake_up_bit(&c->flags, NFS_IO_INPROGRESS);
+       }
+}
+
+static int
+__nfs_iocounter_wait(struct nfs_io_counter *c)
+{
+       wait_queue_head_t *wq = bit_waitqueue(&c->flags, NFS_IO_INPROGRESS);
+       DEFINE_WAIT_BIT(q, &c->flags, NFS_IO_INPROGRESS);
+       int ret = 0;
+
+       do {
+               prepare_to_wait(wq, &q.wait, TASK_KILLABLE);
+               set_bit(NFS_IO_INPROGRESS, &c->flags);
+               if (atomic_read(&c->io_count) == 0)
+                       break;
+               ret = nfs_wait_bit_killable(&c->flags);
+       } while (atomic_read(&c->io_count) != 0);
+       finish_wait(wq, &q.wait);
+       return ret;
+}
+
+/**
+ * nfs_iocounter_wait - wait for i/o to complete
+ * @c: nfs_io_counter to use
+ *
+ * returns -ERESTARTSYS if interrupted by a fatal signal.
+ * Otherwise returns 0 once the io_count hits 0.
+ */
+int
+nfs_iocounter_wait(struct nfs_io_counter *c)
+{
+       if (atomic_read(&c->io_count) == 0)
+               return 0;
+       return __nfs_iocounter_wait(c);
+}
+
 /**
  * nfs_create_request - Create an NFS read/write request.
  * @ctx: open context to use
@@ -118,6 +167,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
                return ERR_CAST(l_ctx);
        }
        req->wb_lock_context = l_ctx;
+       nfs_iocounter_inc(&l_ctx->io_count);
 
        /* Initialize the request struct. Initially, we assume a
         * long write-back delay. This will be adjusted in
@@ -177,6 +227,7 @@ static void nfs_clear_request(struct nfs_page *req)
                req->wb_page = NULL;
        }
        if (l_ctx != NULL) {
+               nfs_iocounter_dec(&l_ctx->io_count);
                nfs_put_lock_context(l_ctx);
                req->wb_lock_context = NULL;
        }
index f6b1956f3c867a5fca6e5d0f69bae482c1e5eb44..fc01d5cb4cf1e013afc41f42f9f9f9b6da6908b7 100644 (file)
@@ -59,11 +59,18 @@ struct nfs_lockowner {
        pid_t l_pid;
 };
 
+#define NFS_IO_INPROGRESS 0
+struct nfs_io_counter {
+       unsigned long flags;
+       atomic_t io_count;
+};
+
 struct nfs_lock_context {
        atomic_t count;
        struct list_head list;
        struct nfs_open_context *open_context;
        struct nfs_lockowner lockowner;
+       struct nfs_io_counter io_count;
 };
 
 struct nfs4_state;