xprtrdma: Move send_wr to struct rpcrdma_req
authorChuck Lever <chuck.lever@oracle.com>
Thu, 15 Sep 2016 14:56:43 +0000 (10:56 -0400)
committerAnna Schumaker <Anna.Schumaker@Netapp.com>
Mon, 19 Sep 2016 17:08:38 +0000 (13:08 -0400)
Clean up: Most of the fields in each send_wr do not vary. There is
no need to initialize them before each ib_post_send(). This removes
a large-ish data structure from the stack.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
net/sunrpc/xprtrdma/backchannel.c
net/sunrpc/xprtrdma/rpc_rdma.c
net/sunrpc/xprtrdma/verbs.c
net/sunrpc/xprtrdma/xprt_rdma.h

index 887ef44c135191fdd0d198dea95dcdcb65b166e8..61a58f59133f85a3ede9c0c47c32a930024830f2 100644 (file)
@@ -241,7 +241,8 @@ int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
        req->rl_send_iov[1].length = rpclen;
        req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf);
 
-       req->rl_niovs = 2;
+       req->rl_send_wr.num_sge = 2;
+
        return 0;
 
 out_map:
index 6187cee87fa9a72de31474258abfa03cf5e67e8d..c2906e31428740a7adcfa0b4e7e4e1bd1989b74d 100644 (file)
@@ -687,7 +687,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
        req->rl_send_iov[0].length = hdrlen;
        req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf);
 
-       req->rl_niovs = 1;
+       req->rl_send_wr.num_sge = 1;
        if (rtype == rpcrdma_areadch)
                return 0;
 
@@ -697,7 +697,8 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
        req->rl_send_iov[1].length = rpclen;
        req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf);
 
-       req->rl_niovs = 2;
+       req->rl_send_wr.num_sge = 2;
+
        return 0;
 
 out_overflow:
index 7b189fe680bbdfd1fc4b06e8b36aaf5bb651a6b7..79a6346b96c2eb297a027c351fcc2e4caf3f3d44 100644 (file)
@@ -849,6 +849,10 @@ rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
        req->rl_cqe.done = rpcrdma_wc_send;
        req->rl_buffer = &r_xprt->rx_buf;
        INIT_LIST_HEAD(&req->rl_registered);
+       req->rl_send_wr.next = NULL;
+       req->rl_send_wr.wr_cqe = &req->rl_cqe;
+       req->rl_send_wr.sg_list = req->rl_send_iov;
+       req->rl_send_wr.opcode = IB_WR_SEND;
        return req;
 }
 
@@ -1128,7 +1132,7 @@ rpcrdma_buffer_put(struct rpcrdma_req *req)
        struct rpcrdma_buffer *buffers = req->rl_buffer;
        struct rpcrdma_rep *rep = req->rl_reply;
 
-       req->rl_niovs = 0;
+       req->rl_send_wr.num_sge = 0;
        req->rl_reply = NULL;
 
        spin_lock(&buffers->rb_lock);
@@ -1259,38 +1263,32 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
                struct rpcrdma_req *req)
 {
        struct ib_device *device = ia->ri_device;
-       struct ib_send_wr send_wr, *send_wr_fail;
-       struct rpcrdma_rep *rep = req->rl_reply;
-       struct ib_sge *iov = req->rl_send_iov;
+       struct ib_send_wr *send_wr = &req->rl_send_wr;
+       struct ib_send_wr *send_wr_fail;
+       struct ib_sge *sge = req->rl_send_iov;
        int i, rc;
 
-       if (rep) {
-               rc = rpcrdma_ep_post_recv(ia, rep);
+       if (req->rl_reply) {
+               rc = rpcrdma_ep_post_recv(ia, req->rl_reply);
                if (rc)
                        return rc;
                req->rl_reply = NULL;
        }
 
-       send_wr.next = NULL;
-       send_wr.wr_cqe = &req->rl_cqe;
-       send_wr.sg_list = iov;
-       send_wr.num_sge = req->rl_niovs;
-       send_wr.opcode = IB_WR_SEND;
-
-       for (i = 0; i < send_wr.num_sge; i++)
-               ib_dma_sync_single_for_device(device, iov[i].addr,
-                                             iov[i].length, DMA_TO_DEVICE);
+       for (i = 0; i < send_wr->num_sge; i++)
+               ib_dma_sync_single_for_device(device, sge[i].addr,
+                                             sge[i].length, DMA_TO_DEVICE);
        dprintk("RPC:       %s: posting %d s/g entries\n",
-               __func__, send_wr.num_sge);
+               __func__, send_wr->num_sge);
 
        if (DECR_CQCOUNT(ep) > 0)
-               send_wr.send_flags = 0;
+               send_wr->send_flags = 0;
        else { /* Provider must take a send completion every now and then */
                INIT_CQCOUNT(ep);
-               send_wr.send_flags = IB_SEND_SIGNALED;
+               send_wr->send_flags = IB_SEND_SIGNALED;
        }
 
-       rc = ib_post_send(ia->ri_id->qp, &send_wr, &send_wr_fail);
+       rc = ib_post_send(ia->ri_id->qp, send_wr, &send_wr_fail);
        if (rc)
                goto out_postsend_err;
        return 0;
index decd13417ac2748bf8711f5d1b62355dac5f3050..3c5a89a4ff4fc1750383fd5d6feda1a8b12763ca 100644 (file)
@@ -284,10 +284,10 @@ struct rpcrdma_mr_seg {           /* chunk descriptors */
 struct rpcrdma_buffer;
 struct rpcrdma_req {
        struct list_head        rl_free;
-       unsigned int            rl_niovs;
        unsigned int            rl_connect_cookie;
        struct rpcrdma_buffer   *rl_buffer;
-       struct rpcrdma_rep      *rl_reply;/* holder for reply buffer */
+       struct rpcrdma_rep      *rl_reply;
+       struct ib_send_wr       rl_send_wr;
        struct ib_sge           rl_send_iov[RPCRDMA_MAX_IOVS];
        struct rpcrdma_regbuf   *rl_rdmabuf;    /* xprt header */
        struct rpcrdma_regbuf   *rl_sendbuf;    /* rq_snd_buf */