xprtrdma: Eliminate INLINE_THRESHOLD macros
authorChuck Lever <chuck.lever@oracle.com>
Thu, 15 Sep 2016 14:55:04 +0000 (10:55 -0400)
committerAnna Schumaker <Anna.Schumaker@Netapp.com>
Mon, 19 Sep 2016 17:08:37 +0000 (13:08 -0400)
Clean up: r_xprt is already available everywhere these macros are
invoked, so just dereference that directly.

RPCRDMA_INLINE_PAD_VALUE is no longer used, so it can simply be
removed.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
net/sunrpc/xprtrdma/backchannel.c
net/sunrpc/xprtrdma/rpc_rdma.c
net/sunrpc/xprtrdma/transport.c
net/sunrpc/xprtrdma/xprt_rdma.h

index 87762d976b63b9a1ebc5cb7a8b2603ed17b8b010..5f60ab2f858adf5bd8e3f5b0f4261b38e1a0b010 100644 (file)
@@ -46,13 +46,13 @@ static int rpcrdma_bc_setup_rqst(struct rpcrdma_xprt *r_xprt,
                return PTR_ERR(req);
        req->rl_backchannel = true;
 
-       size = RPCRDMA_INLINE_WRITE_THRESHOLD(rqst);
+       size = r_xprt->rx_data.inline_wsize;
        rb = rpcrdma_alloc_regbuf(ia, size, GFP_KERNEL);
        if (IS_ERR(rb))
                goto out_fail;
        req->rl_rdmabuf = rb;
 
-       size += RPCRDMA_INLINE_READ_THRESHOLD(rqst);
+       size += r_xprt->rx_data.inline_rsize;
        rb = rpcrdma_alloc_regbuf(ia, size, GFP_KERNEL);
        if (IS_ERR(rb))
                goto out_fail;
index a47f170b20ef88d1ebe1f9ca406374dee1b84102..845586f7df4701b69453408e2b6c558c195c6105 100644 (file)
@@ -673,7 +673,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
                goto out_unmap;
        hdrlen = (unsigned char *)iptr - (unsigned char *)headerp;
 
-       if (hdrlen + rpclen > RPCRDMA_INLINE_WRITE_THRESHOLD(rqst))
+       if (hdrlen + rpclen > r_xprt->rx_data.inline_wsize)
                goto out_overflow;
 
        dprintk("RPC: %5u %s: %s/%s: hdrlen %zd rpclen %zd\n",
index 81f0e879f019e43d35cc9c85ead0dfd17ebc8d30..be95eced0726741624351427e6f8f42b1570aa2d 100644 (file)
@@ -518,7 +518,7 @@ out:
        return req->rl_sendbuf->rg_base;
 
 out_rdmabuf:
-       min_size = RPCRDMA_INLINE_WRITE_THRESHOLD(task->tk_rqstp);
+       min_size = r_xprt->rx_data.inline_wsize;
        rb = rpcrdma_alloc_regbuf(&r_xprt->rx_ia, min_size, flags);
        if (IS_ERR(rb))
                goto out_fail;
@@ -541,8 +541,8 @@ out_sendbuf:
         * reply will be large, but slush is provided here to allow
         * flexibility when marshaling.
         */
-       min_size = RPCRDMA_INLINE_READ_THRESHOLD(task->tk_rqstp);
-       min_size += RPCRDMA_INLINE_WRITE_THRESHOLD(task->tk_rqstp);
+       min_size = r_xprt->rx_data.inline_rsize;
+       min_size += r_xprt->rx_data.inline_wsize;
        if (size < min_size)
                size = min_size;
 
index a71b0f5897d8721ee8edaed291388c34f61d3cee..9df47c857d2783012d873f6c190d142191c73e4c 100644 (file)
@@ -356,15 +356,6 @@ struct rpcrdma_create_data_internal {
        unsigned int    padding;        /* non-rdma write header padding */
 };
 
-#define RPCRDMA_INLINE_READ_THRESHOLD(rq) \
-       (rpcx_to_rdmad(rq->rq_xprt).inline_rsize)
-
-#define RPCRDMA_INLINE_WRITE_THRESHOLD(rq)\
-       (rpcx_to_rdmad(rq->rq_xprt).inline_wsize)
-
-#define RPCRDMA_INLINE_PAD_VALUE(rq)\
-       rpcx_to_rdmad(rq->rq_xprt).padding
-
 /*
  * Statistics for RPCRDMA
  */