rxrpc: Cache the congestion window setting
authorDavid Howells <dhowells@redhat.com>
Wed, 14 Jun 2017 16:56:50 +0000 (17:56 +0100)
committerDavid S. Miller <davem@davemloft.net>
Wed, 14 Jun 2017 19:42:45 +0000 (15:42 -0400)
Cache the congestion window setting that was determined during a call's
transmission phase when it finishes so that it can be used by the next call
to the same peer, thereby shortcutting the slow-start algorithm.

The value is stored in the rxrpc_peer struct and is accessed without
locking.  Each call takes the value that happens to be there when it starts
and just overwrites the value when it finishes.

Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/rxrpc/ar-internal.h
net/rxrpc/call_accept.c
net/rxrpc/call_object.c
net/rxrpc/conn_client.c
net/rxrpc/conn_object.c
net/rxrpc/peer_object.c

index adbf37946450c54d8d2aef9f05b9be3c09b4706f..69b97339ff9da594a578e14437afbbaf471029c3 100644 (file)
@@ -300,6 +300,8 @@ struct rxrpc_peer {
        u64                     rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* Determined RTT cache */
        u8                      rtt_cursor;     /* next entry at which to insert */
        u8                      rtt_usage;      /* amount of cache actually used */
+
+       u8                      cong_cwnd;      /* Congestion window size */
 };
 
 /*
index 0d4d84e8c074da699898d1fab3f4b936e1f04a9c..dd30d74824b0de42d1866f4449300f89c83d6724 100644 (file)
@@ -310,6 +310,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
        rxrpc_see_call(call);
        call->conn = conn;
        call->peer = rxrpc_get_peer(conn->params.peer);
+       call->cong_cwnd = call->peer->cong_cwnd;
        return call;
 }
 
index 423030fd93bed29d1b2aac0e0f0c232ed42049bb..d7809a0620b4e19acd511d2bd5233e88bf2ba0b3 100644 (file)
@@ -136,12 +136,7 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
        call->tx_winsize = 16;
        call->rx_expect_next = 1;
 
-       if (RXRPC_TX_SMSS > 2190)
-               call->cong_cwnd = 2;
-       else if (RXRPC_TX_SMSS > 1095)
-               call->cong_cwnd = 3;
-       else
-               call->cong_cwnd = 4;
+       call->cong_cwnd = 2;
        call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1;
        return call;
 
index dd8bb919c15a16b912c4b5e9b43a047b9b401ff0..eb21576803997b572902b9eaa2086327873ad778 100644 (file)
@@ -292,6 +292,12 @@ static int rxrpc_get_client_conn(struct rxrpc_call *call,
        if (!cp->peer)
                goto error;
 
+       call->cong_cwnd = cp->peer->cong_cwnd;
+       if (call->cong_cwnd >= call->cong_ssthresh)
+               call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
+       else
+               call->cong_mode = RXRPC_CALL_SLOW_START;
+
        /* If the connection is not meant to be exclusive, search the available
         * connections to see if the connection we want to use already exists.
         */
index 5bb255107427b2fe29cfa07be708e1b9ac822976..929b50d5afe843fd6e9e5822b483fd4e451c8013 100644 (file)
@@ -193,6 +193,8 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
 {
        struct rxrpc_connection *conn = call->conn;
 
+       call->peer->cong_cwnd = call->cong_cwnd;
+
        spin_lock_bh(&conn->params.peer->lock);
        hlist_del_init(&call->error_link);
        spin_unlock_bh(&conn->params.peer->lock);
index cfed3b27adf038b89f92562e69c194f06037464e..5787f97f533026006da00764235061862e1886ae 100644 (file)
@@ -228,6 +228,13 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
                seqlock_init(&peer->service_conn_lock);
                spin_lock_init(&peer->lock);
                peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
+
+               if (RXRPC_TX_SMSS > 2190)
+                       peer->cong_cwnd = 2;
+               else if (RXRPC_TX_SMSS > 1095)
+                       peer->cong_cwnd = 3;
+               else
+                       peer->cong_cwnd = 4;
        }
 
        _leave(" = %p", peer);