/* We compute the checksum for the verifier over the xdr-encoded bytes
* starting with the xid and ending at the end of the credential: */
- iov.iov_base = xprt_skip_transport_header(task->tk_xprt,
+ iov.iov_base = xprt_skip_transport_header(req->rq_xprt,
req->rq_snd_buf.head[0].iov_base);
iov.iov_len = (u8 *)p - (u8 *)iov.iov_base;
xdr_buf_from_iov(&iov, &verf_buf);
{
unsigned int slack = task->tk_rqstp->rq_cred->cr_auth->au_cslack;
struct rpc_rqst *req = task->tk_rqstp;
- struct rpc_xprt *xprt = task->tk_xprt;
+ struct rpc_xprt *xprt = req->rq_xprt;
struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
dprint_status(task);
if (rpc_reply_expected(task))
return;
task->tk_action = rpc_exit_task;
- rpc_wake_up_queued_task(&task->tk_xprt->pending, task);
+ rpc_wake_up_queued_task(&task->tk_rqstp->rq_xprt->pending, task);
}
/*
*/
printk(KERN_NOTICE "RPC: Could not send backchannel reply "
"error: %d\n", task->tk_status);
- xprt_conditional_disconnect(task->tk_xprt,
+ xprt_conditional_disconnect(req->rq_xprt,
req->rq_connect_cookie);
break;
default:
case -ETIMEDOUT:
task->tk_action = call_timeout;
if (task->tk_client->cl_discrtry)
- xprt_conditional_disconnect(task->tk_xprt,
+ xprt_conditional_disconnect(req->rq_xprt,
req->rq_connect_cookie);
break;
case -ECONNRESET:
if (task->tk_rqstp == req) {
req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0;
if (task->tk_client->cl_discrtry)
- xprt_conditional_disconnect(task->tk_xprt,
+ xprt_conditional_disconnect(req->rq_xprt,
req->rq_connect_cookie);
}
}
/* FIXME: check buffer size? */
- p = xprt_skip_transport_header(task->tk_xprt, p);
+ p = xprt_skip_transport_header(req->rq_xprt, p);
*p++ = req->rq_xid; /* XID */
*p++ = htonl(RPC_CALL); /* CALL */
*p++ = htonl(RPC_VERSION); /* RPC version */
*/
void xprt_release_rqst_cong(struct rpc_task *task)
{
- __xprt_put_cong(task->tk_xprt, task->tk_rqstp);
+ struct rpc_rqst *req = task->tk_rqstp;
+
+ __xprt_put_cong(req->rq_xprt, req);
}
EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
struct rpcrdma_msg *headerp, enum rpcrdma_chunktype type)
{
struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
- struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_task->tk_xprt);
+ struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
int nsegs, nchunks = 0;
unsigned int pos;
struct rpcrdma_mr_seg *seg = req->rl_segments;
int
rpcrdma_marshal_req(struct rpc_rqst *rqst)
{
- struct rpc_xprt *xprt = rqst->rq_task->tk_xprt;
+ struct rpc_xprt *xprt = rqst->rq_xprt;
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
char *base;
static void *
xprt_rdma_allocate(struct rpc_task *task, size_t size)
{
- struct rpc_xprt *xprt = task->tk_xprt;
+ struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
struct rpcrdma_req *req, *nreq;
req = rpcrdma_buffer_get(&rpcx_to_rdmax(xprt)->rx_buf);
xprt_rdma_send_request(struct rpc_task *task)
{
struct rpc_rqst *rqst = task->tk_rqstp;
- struct rpc_xprt *xprt = task->tk_xprt;
+ struct rpc_xprt *xprt = rqst->rq_xprt;
struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
};
#define RPCRDMA_INLINE_READ_THRESHOLD(rq) \
- (rpcx_to_rdmad(rq->rq_task->tk_xprt).inline_rsize)
+ (rpcx_to_rdmad(rq->rq_xprt).inline_rsize)
#define RPCRDMA_INLINE_WRITE_THRESHOLD(rq)\
- (rpcx_to_rdmad(rq->rq_task->tk_xprt).inline_wsize)
+ (rpcx_to_rdmad(rq->rq_xprt).inline_wsize)
#define RPCRDMA_INLINE_PAD_VALUE(rq)\
- rpcx_to_rdmad(rq->rq_task->tk_xprt).padding
+ rpcx_to_rdmad(rq->rq_xprt).padding
/*
* Statistics for RPCRDMA
goto out_release;
if (req->rq_bytes_sent == req->rq_snd_buf.len)
goto out_release;
- set_bit(XPRT_CLOSE_WAIT, &task->tk_xprt->state);
+ set_bit(XPRT_CLOSE_WAIT, &xprt->state);
out_release:
xprt_release_xprt(xprt, task);
}