xprtrdma: Clean up rpcrdma_marshal_req() synopsis
authorChuck Lever <chuck.lever@oracle.com>
Thu, 10 Aug 2017 16:47:12 +0000 (12:47 -0400)
committerAnna Schumaker <Anna.Schumaker@Netapp.com>
Fri, 11 Aug 2017 17:20:08 +0000 (13:20 -0400)
Clean up: The caller already has rpcrdma_xprt, so pass that directly
instead. And provide a documenting comment for this critical
function.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
net/sunrpc/xprtrdma/rpc_rdma.c
net/sunrpc/xprtrdma/transport.c
net/sunrpc/xprtrdma/xprt_rdma.h

index 62198615649544baba3e6483a03258310708accc..d916e596d4275010b33654afcd2828e7b7179ad5 100644 (file)
@@ -651,18 +651,27 @@ rpcrdma_unmap_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
        req->rl_mapped_sges = 0;
 }
 
-/*
- * Marshal a request: the primary job of this routine is to choose
- * the transfer modes. See comments below.
+/**
+ * rpcrdma_marshal_req - Marshal and send one RPC request
+ * @r_xprt: controlling transport
+ * @rqst: RPC request to be marshaled
  *
- * Returns zero on success, otherwise a negative errno.
+ * For the RPC in "rqst", this function:
+ *  - Chooses the transfer mode (eg., RDMA_MSG or RDMA_NOMSG)
+ *  - Registers Read, Write, and Reply chunks
+ *  - Constructs the transport header
+ *  - Posts a Send WR to send the transport header and request
+ *
+ * Returns:
+ *     %0 if the RPC was sent successfully,
+ *     %-ENOTCONN if the connection was lost,
+ *     %-EAGAIN if not enough pages are available for on-demand reply buffer,
+ *     %-ENOBUFS if no MRs are available to register chunks,
+ *     %-EIO if a permanent problem occurred while marshaling.
  */
-
 int
-rpcrdma_marshal_req(struct rpc_rqst *rqst)
+rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
 {
-       struct rpc_xprt *xprt = rqst->rq_xprt;
-       struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
        struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
        enum rpcrdma_chunktype rtype, wtype;
        struct rpcrdma_msg *headerp;
index 42752e4cc99674974f5be347e3e97b59eac29647..a43b8280349fcf537afe34cfdebabc1c2d54551b 100644 (file)
@@ -730,7 +730,7 @@ xprt_rdma_send_request(struct rpc_task *task)
        if (unlikely(!list_empty(&req->rl_registered)))
                r_xprt->rx_ia.ri_ops->ro_unmap_safe(r_xprt, req, false);
 
-       rc = rpcrdma_marshal_req(rqst);
+       rc = rpcrdma_marshal_req(r_xprt, rqst);
        if (rc < 0)
                goto failed_marshal;
 
index 52e73eaacebb3b957690a060e886d1749dea3a7f..78958e92a0a1c85a937e93d6cae2fceb3c9ee772 100644 (file)
@@ -637,7 +637,7 @@ enum rpcrdma_chunktype {
 bool rpcrdma_prepare_send_sges(struct rpcrdma_ia *, struct rpcrdma_req *,
                               u32, struct xdr_buf *, enum rpcrdma_chunktype);
 void rpcrdma_unmap_sges(struct rpcrdma_ia *, struct rpcrdma_req *);
-int rpcrdma_marshal_req(struct rpc_rqst *);
+int rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst);
 void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *);
 void rpcrdma_reply_handler(struct work_struct *work);