struct server_scope **);
extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
-extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t, pid_t);
+extern void nfs4_select_rw_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t, pid_t);
extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask);
extern int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task);
if (nfs4_copy_delegation_stateid(&arg.stateid, inode)) {
/* Use that stateid */
} else if (state != NULL) {
- nfs4_copy_stateid(&arg.stateid, state, current->files, current->tgid);
+ nfs4_select_rw_stateid(&arg.stateid, state, current->files, current->tgid);
} else
memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid));
* Byte-range lock aware utility to initialize the stateid of read/write
* requests.
*/
-void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid)
+void nfs4_select_rw_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid)
{
struct nfs4_lock_state *lsp;
int seq;
nfs4_stateid stateid;
if (ctx->state != NULL) {
- nfs4_copy_stateid(&stateid, ctx->state, l_ctx->lockowner, l_ctx->pid);
+ nfs4_select_rw_stateid(&stateid, ctx->state, l_ctx->lockowner, l_ctx->pid);
if (zero_seqid)
stateid.stateid.seqid = 0;
encode_nfs4_stateid(xdr, &stateid);