NFSv4: Ensure delegation recall and byte range lock removal don't conflict
authorTrond Myklebust <Trond.Myklebust@netapp.com>
Thu, 7 Feb 2013 15:54:07 +0000 (10:54 -0500)
committerTrond Myklebust <Trond.Myklebust@netapp.com>
Mon, 11 Feb 2013 20:33:13 +0000 (15:33 -0500)
Add a mutex to the struct nfs4_state_owner to ensure that delegation
recall doesn't conflict with byte range lock removal.

Note that we nest the new mutex _outside_ the state manager reclaim
protection (nfsi->rwsem) in order to avoid deadlocks.

Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
fs/nfs/delegation.c
fs/nfs/nfs4_fs.h
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c

index 2542cdaa111661871f045418807fba7f69c18d03..6390a4b5fee75823512f30de7f7452696a23093c 100644 (file)
@@ -71,8 +71,10 @@ static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_
        int status = 0;
 
        if (inode->i_flock == NULL)
-               goto out;
+               return 0;
 
+       if (inode->i_flock == NULL)
+               goto out;
        /* Protect inode->i_flock using the file locks lock */
        lock_flocks();
        for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
@@ -113,12 +115,15 @@ again:
                get_nfs_open_context(ctx);
                spin_unlock(&inode->i_lock);
                sp = state->owner;
+               /* Block nfs4_proc_unlck */
+               mutex_lock(&sp->so_delegreturn_mutex);
                seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
                err = nfs4_open_delegation_recall(ctx, state, stateid);
                if (!err)
                        err = nfs_delegation_claim_locks(ctx, state);
                if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
                        err = -EAGAIN;
+               mutex_unlock(&sp->so_delegreturn_mutex);
                put_nfs_open_context(ctx);
                if (err != 0)
                        return err;
index b12b73472020e0a7bc7287f7c08ddc9e8df523a8..944c9a5c10390cdca6112f3ecf27574ef0a89e81 100644 (file)
@@ -93,6 +93,7 @@ struct nfs4_state_owner {
        struct list_head     so_states;
        struct nfs_seqid_counter so_seqid;
        seqcount_t           so_reclaim_seqcount;
+       struct mutex         so_delegreturn_mutex;
 };
 
 enum {
index f7e05ade557215f52741ad1f927e91ff4e288075..d51227371c6768f2ca903276c33e21edb64f9b70 100644 (file)
@@ -4485,7 +4485,9 @@ static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
 
 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
 {
-       struct nfs_inode *nfsi = NFS_I(state->inode);
+       struct inode *inode = state->inode;
+       struct nfs4_state_owner *sp = state->owner;
+       struct nfs_inode *nfsi = NFS_I(inode);
        struct nfs_seqid *seqid;
        struct nfs4_lock_state *lsp;
        struct rpc_task *task;
@@ -4495,12 +4497,17 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
        status = nfs4_set_lock_state(state, request);
        /* Unlock _before_ we do the RPC call */
        request->fl_flags |= FL_EXISTS;
+       /* Exclude nfs_delegation_claim_locks() */
+       mutex_lock(&sp->so_delegreturn_mutex);
+       /* Exclude nfs4_reclaim_open_stateid() - note nesting! */
        down_read(&nfsi->rwsem);
        if (do_vfs_lock(request->fl_file, request) == -ENOENT) {
                up_read(&nfsi->rwsem);
+               mutex_unlock(&sp->so_delegreturn_mutex);
                goto out;
        }
        up_read(&nfsi->rwsem);
+       mutex_unlock(&sp->so_delegreturn_mutex);
        if (status != 0)
                goto out;
        /* Is this a delegated lock? */
index fff97228cdec14e0649cfc639776b4890883e360..6ace365c6334db844af0c2c3f221cd871ef3658c 100644 (file)
@@ -519,6 +519,7 @@ nfs4_alloc_state_owner(struct nfs_server *server,
        atomic_set(&sp->so_count, 1);
        INIT_LIST_HEAD(&sp->so_lru);
        seqcount_init(&sp->so_reclaim_seqcount);
+       mutex_init(&sp->so_delegreturn_mutex);
        return sp;
 }