SUNRPC: Fix a race in the receive code path
authorTrond Myklebust <trond.myklebust@primarydata.com>
Fri, 15 Dec 2017 02:24:08 +0000 (21:24 -0500)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 20 Dec 2017 09:10:21 +0000 (10:10 +0100)
commit 90d91b0cd371193d9dbfa9beacab8ab9a4cb75e0 upstream.

We must ensure that the call to rpc_sleep_on() in xprt_transmit() cannot
race with the call to xprt_complete_rqst().

Reported-by: Chuck Lever <chuck.lever@oracle.com>
Link: https://bugzilla.linux-nfs.org/show_bug.cgi?id=317
Fixes: ce7c252a8c74 ("SUNRPC: Add a separate spinlock to protect..")
Reviewed-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
net/sunrpc/xprt.c

index 898485e3ece4e82a93d7759af2d77fd509a8676d..8eb0c4f3b3e96a3b73c74938e879fc41154a9412 100644 (file)
@@ -1001,6 +1001,7 @@ void xprt_transmit(struct rpc_task *task)
 {
        struct rpc_rqst *req = task->tk_rqstp;
        struct rpc_xprt *xprt = req->rq_xprt;
+       unsigned int connect_cookie;
        int status, numreqs;
 
        dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
@@ -1024,6 +1025,7 @@ void xprt_transmit(struct rpc_task *task)
        } else if (!req->rq_bytes_sent)
                return;
 
+       connect_cookie = xprt->connect_cookie;
        req->rq_xtime = ktime_get();
        status = xprt->ops->send_request(task);
        trace_xprt_transmit(xprt, req->rq_xid, status);
@@ -1047,20 +1049,28 @@ void xprt_transmit(struct rpc_task *task)
        xprt->stat.bklog_u += xprt->backlog.qlen;
        xprt->stat.sending_u += xprt->sending.qlen;
        xprt->stat.pending_u += xprt->pending.qlen;
+       spin_unlock_bh(&xprt->transport_lock);
 
-       /* Don't race with disconnect */
-       if (!xprt_connected(xprt))
-               task->tk_status = -ENOTCONN;
-       else {
+       req->rq_connect_cookie = connect_cookie;
+       if (rpc_reply_expected(task) && !READ_ONCE(req->rq_reply_bytes_recvd)) {
                /*
-                * Sleep on the pending queue since
-                * we're expecting a reply.
+                * Sleep on the pending queue if we're expecting a reply.
+                * The spinlock ensures atomicity between the test of
+                * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
                 */
-               if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task))
+               spin_lock(&xprt->recv_lock);
+               if (!req->rq_reply_bytes_recvd) {
                        rpc_sleep_on(&xprt->pending, task, xprt_timer);
-               req->rq_connect_cookie = xprt->connect_cookie;
+                       /*
+                        * Send an extra queue wakeup call if the
+                        * connection was dropped in case the call to
+                        * rpc_sleep_on() raced.
+                        */
+                       if (!xprt_connected(xprt))
+                               xprt_wake_pending_tasks(xprt, -ENOTCONN);
+               }
+               spin_unlock(&xprt->recv_lock);
        }
-       spin_unlock_bh(&xprt->transport_lock);
 }
 
 static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)