clp->cl_proto = cl_init->proto;
#ifdef CONFIG_NFS_V4
- init_rwsem(&clp->cl_sem);
INIT_LIST_HEAD(&clp->cl_delegations);
spin_lock_init(&clp->cl_lock);
INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state);
*/
static int __nfs_inode_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
{
- struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
struct nfs_inode *nfsi = NFS_I(inode);
nfs_msync_inode(inode);
- down_read(&clp->cl_sem);
/* Guard against new delegated open calls */
down_write(&nfsi->rwsem);
nfs_delegation_claim_opens(inode, &delegation->stateid);
up_write(&nfsi->rwsem);
- up_read(&clp->cl_sem);
nfs_msync_inode(inode);
return nfs_do_return_delegation(inode, delegation, 1);
daemonize("nfsv4-delegreturn");
nfs_msync_inode(inode);
- down_read(&clp->cl_sem);
down_write(&nfsi->rwsem);
spin_lock(&clp->cl_lock);
delegation = nfs_detach_delegation_locked(nfsi, args->stateid);
complete(&args->started);
nfs_delegation_claim_opens(inode, args->stateid);
up_write(&nfsi->rwsem);
- up_read(&clp->cl_sem);
nfs_msync_inode(inode);
if (delegation != NULL)
might_sleep();
- rwsem_acquire(&clp->cl_sem.dep_map, 0, 0, _RET_IP_);
-
res = wait_on_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER,
nfs4_wait_bit_killable, TASK_KILLABLE);
-
- rwsem_release(&clp->cl_sem.dep_map, 1, _RET_IP_);
return res;
}
struct nfs4_state_owner *sp;
struct nfs4_state *state = NULL;
struct nfs_server *server = NFS_SERVER(dir);
- struct nfs_client *clp = server->nfs_client;
struct nfs4_opendata *opendata;
int status;
goto err_put_state_owner;
if (path->dentry->d_inode != NULL)
nfs4_return_incompatible_delegation(path->dentry->d_inode, flags & (FMODE_READ|FMODE_WRITE));
- down_read(&clp->cl_sem);
status = -ENOMEM;
opendata = nfs4_opendata_alloc(path, sp, flags, sattr);
if (opendata == NULL)
- goto err_release_rwsem;
+ goto err_put_state_owner;
if (path->dentry->d_inode != NULL)
opendata->state = nfs4_get_open_state(path->dentry->d_inode, sp);
goto err_opendata_put;
nfs4_opendata_put(opendata);
nfs4_put_state_owner(sp);
- up_read(&clp->cl_sem);
*res = state;
return 0;
err_opendata_put:
nfs4_opendata_put(opendata);
-err_release_rwsem:
- up_read(&clp->cl_sem);
err_put_state_owner:
nfs4_put_state_owner(sp);
out_err:
struct nfs4_lock_state *lsp;
int status;
- down_read(&clp->cl_sem);
arg.lock_owner.clientid = clp->cl_clientid;
status = nfs4_set_lock_state(state, request);
if (status != 0)
}
request->fl_ops->fl_release_private(request);
out:
- up_read(&clp->cl_sem);
return status;
}
static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
{
- struct nfs_client *clp = state->owner->so_client;
struct nfs_inode *nfsi = NFS_I(state->inode);
struct nfs_seqid *seqid;
struct nfs4_lock_state *lsp;
status = nfs4_set_lock_state(state, request);
/* Unlock _before_ we do the RPC call */
request->fl_flags |= FL_EXISTS;
- down_read(&clp->cl_sem);
down_read(&nfsi->rwsem);
if (do_vfs_lock(request->fl_file, request) == -ENOENT) {
up_read(&nfsi->rwsem);
- up_read(&clp->cl_sem);
goto out;
}
up_read(&nfsi->rwsem);
- up_read(&clp->cl_sem);
if (status != 0)
goto out;
/* Is this a delegated lock? */
static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
{
- struct nfs_client *clp = state->owner->so_client;
struct nfs_inode *nfsi = NFS_I(state->inode);
unsigned char fl_flags = request->fl_flags;
int status;
status = do_vfs_lock(request->fl_file, request);
if (status < 0)
goto out;
- down_read(&clp->cl_sem);
down_read(&nfsi->rwsem);
if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
/* Yes: cache locks! */
printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__);
out_unlock:
up_read(&nfsi->rwsem);
- up_read(&clp->cl_sem);
out:
request->fl_flags = fl_flags;
return status;
long lease, timeout;
unsigned long last, now;
- down_read(&clp->cl_sem);
dprintk("%s: start\n", __func__);
/* Are there any active superblocks? */
if (list_empty(&clp->cl_superblocks))
schedule_delayed_work(&clp->cl_renewd, timeout);
spin_unlock(&clp->cl_lock);
out:
- up_read(&clp->cl_sem);
dprintk("%s: done\n", __func__);
}
-/* Must be called with clp->cl_sem locked for writes */
void
nfs4_schedule_state_renewal(struct nfs_client *clp)
{
}
}
-/*
- * Note: must be called with clp->cl_sem held in order to prevent races
- * with reboot recovery!
- */
struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
{
struct nfs_client *clp = server->nfs_client;
return sp;
}
-/*
- * Must be called with clp->cl_sem held in order to avoid races
- * with state recovery...
- */
void nfs4_put_state_owner(struct nfs4_state_owner *sp)
{
struct nfs_client *clp = sp->so_client;
return state;
}
-/*
- * Beware! Caller must be holding exactly one
- * reference to clp->cl_sem!
- */
void nfs4_put_open_state(struct nfs4_state *state)
{
struct inode *inode = state->inode;
* Return a compatible lock_state. If no initialized lock_state structure
* exists, return an uninitialized one.
*
- * The caller must be holding clp->cl_sem
*/
static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
{
allow_signal(SIGKILL);
/* Ensure exclusive access to NFSv4 state */
- down_write(&clp->cl_sem);
while (!list_empty(&clp->cl_superblocks)) {
if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) {
/* We're going to have to re-establish a clientid */
/* First recover reboot state... */
if (test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
- /* Note: list is protected by exclusive lock on cl->cl_sem */
status = nfs4_do_reclaim(clp, &nfs4_reboot_recovery_ops);
if (status == -NFS4ERR_STALE_CLIENTID)
continue;
/* Now recover expired state... */
if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
- /* Note: list is protected by exclusive lock on cl->cl_sem */
status = nfs4_do_reclaim(clp, &nfs4_nograce_recovery_ops);
if (status < 0) {
set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
break;
}
out:
- up_write(&clp->cl_sem);
if (test_and_clear_bit(NFS4CLNT_CB_PATH_DOWN, &clp->cl_state))
nfs_handle_cb_pathdown(clp);
nfs4_clear_recover_bit(clp);
struct rb_root cl_openowner_id;
struct rb_root cl_lockowner_id;
- /*
- * The following rwsem ensures exclusive access to the server
- * while we recover the state following a lease expiration.
- */
- struct rw_semaphore cl_sem;
-
struct list_head cl_delegations;
struct rb_root cl_state_owners;
spinlock_t cl_lock;