xprtrdma: Add support for sending backward direction RPC replies
authorChuck Lever <chuck.lever@oracle.com>
Sat, 24 Oct 2015 21:27:59 +0000 (17:27 -0400)
committerAnna Schumaker <Anna.Schumaker@Netapp.com>
Mon, 2 Nov 2015 18:45:15 +0000 (13:45 -0500)
Backward direction RPC replies are sent via the client transport's
send_request method, the same way forward direction RPC calls are
sent.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Sagi Grimberg <sagig@mellanox.com>
Tested-By: Devesh Sharma <devesh.sharma@avagotech.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
net/sunrpc/xprtrdma/backchannel.c
net/sunrpc/xprtrdma/rpc_rdma.c
net/sunrpc/xprtrdma/xprt_rdma.h

index 3165ed639eecedb5721a62dde3a9a70a55f4779d..ffc4853a068e0a6080bde8a03bd38c62f73c01ed 100644 (file)
@@ -169,6 +169,51 @@ out_err:
        return -ENOMEM;
 }
 
+/**
+ * rpcrdma_bc_marshal_reply - Send backwards direction reply
+ * @rqst: buffer containing RPC reply data
+ *
+ * Returns zero on success.
+ */
+int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
+{
+       struct rpc_xprt *xprt = rqst->rq_xprt;
+       struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
+       struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
+       struct rpcrdma_msg *headerp;
+       size_t rpclen;
+
+       headerp = rdmab_to_msg(req->rl_rdmabuf);
+       headerp->rm_xid = rqst->rq_xid;
+       headerp->rm_vers = rpcrdma_version;
+       headerp->rm_credit =
+                       cpu_to_be32(r_xprt->rx_buf.rb_bc_srv_max_requests);
+       headerp->rm_type = rdma_msg;
+       headerp->rm_body.rm_chunks[0] = xdr_zero;
+       headerp->rm_body.rm_chunks[1] = xdr_zero;
+       headerp->rm_body.rm_chunks[2] = xdr_zero;
+
+       rpclen = rqst->rq_svec[0].iov_len;
+
+       pr_info("RPC:       %s: rpclen %zd headerp 0x%p lkey 0x%x\n",
+               __func__, rpclen, headerp, rdmab_lkey(req->rl_rdmabuf));
+       pr_info("RPC:       %s: RPC/RDMA: %*ph\n",
+               __func__, (int)RPCRDMA_HDRLEN_MIN, headerp);
+       pr_info("RPC:       %s:      RPC: %*ph\n",
+               __func__, (int)rpclen, rqst->rq_svec[0].iov_base);
+
+       req->rl_send_iov[0].addr = rdmab_addr(req->rl_rdmabuf);
+       req->rl_send_iov[0].length = RPCRDMA_HDRLEN_MIN;
+       req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf);
+
+       req->rl_send_iov[1].addr = rdmab_addr(req->rl_sendbuf);
+       req->rl_send_iov[1].length = rpclen;
+       req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf);
+
+       req->rl_niovs = 2;
+       return 0;
+}
+
 /**
  * xprt_rdma_bc_destroy - Release resources for handling backchannel requests
  * @xprt: transport associated with these backchannel resources
index 95774fcc1b4352ca13be01a6823554f17cec26fc..b7a21e5518880c0476710358cd9a75f8279e1fc3 100644 (file)
@@ -441,6 +441,11 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
        enum rpcrdma_chunktype rtype, wtype;
        struct rpcrdma_msg *headerp;
 
+#if defined(CONFIG_SUNRPC_BACKCHANNEL)
+       if (test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state))
+               return rpcrdma_bc_marshal_reply(rqst);
+#endif
+
        /*
         * rpclen gets amount of data in first buffer, which is the
         * pre-registered buffer.
index 55d2660df56a270e835a92140b54da76c882f00c..e2d23ea23df96d3823ec63b3a328caab8fceb153 100644 (file)
@@ -520,6 +520,7 @@ void xprt_rdma_cleanup(void);
 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
 int xprt_rdma_bc_setup(struct rpc_xprt *, unsigned int);
 int rpcrdma_bc_post_recv(struct rpcrdma_xprt *, unsigned int);
+int rpcrdma_bc_marshal_reply(struct rpc_rqst *);
 void xprt_rdma_bc_free_rqst(struct rpc_rqst *);
 void xprt_rdma_bc_destroy(struct rpc_xprt *, unsigned int);
 #endif /* CONFIG_SUNRPC_BACKCHANNEL */