tcp: make undo_cwnd mandatory for congestion modules
authorFlorian Westphal <fw@strlen.de>
Mon, 21 Nov 2016 13:18:38 +0000 (14:18 +0100)
committerDavid S. Miller <davem@davemloft.net>
Mon, 21 Nov 2016 18:20:17 +0000 (13:20 -0500)
The undo_cwnd fallback in the stack doubles cwnd based on ssthresh,
which un-does reno halving behaviour.

It seems more appropriate to let congctl algorithms pair .ssthresh
and .undo_cwnd properly. Add a 'tcp_reno_undo_cwnd' function and wire it
up for all congestion algorithms that used to rely on the fallback.

Cc: Eric Dumazet <edumazet@google.com>
Cc: Yuchung Cheng <ycheng@google.com>
Cc: Neal Cardwell <ncardwell@google.com>
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/tcp.h
net/ipv4/tcp_cong.c
net/ipv4/tcp_dctcp.c
net/ipv4/tcp_hybla.c
net/ipv4/tcp_input.c
net/ipv4/tcp_lp.c
net/ipv4/tcp_vegas.c
net/ipv4/tcp_westwood.c

index 123979fe12bf780b50ed0967d8ba289c63f798a3..7de80739adabace8c4d6250cb30021c5dace1b9b 100644 (file)
@@ -958,6 +958,7 @@ u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
 void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
 
 u32 tcp_reno_ssthresh(struct sock *sk);
+u32 tcp_reno_undo_cwnd(struct sock *sk);
 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
 extern struct tcp_congestion_ops tcp_reno;
 
index 1294af4e0127b7a9b98d6e9cfa9e3979c7d7086e..38905ec5f50812d1021f6e1c906a1e54f0042732 100644 (file)
@@ -68,8 +68,9 @@ int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
 {
        int ret = 0;
 
-       /* all algorithms must implement ssthresh and cong_avoid ops */
-       if (!ca->ssthresh || !(ca->cong_avoid || ca->cong_control)) {
+       /* all algorithms must implement these */
+       if (!ca->ssthresh || !ca->undo_cwnd ||
+           !(ca->cong_avoid || ca->cong_control)) {
                pr_err("%s does not implement required ops\n", ca->name);
                return -EINVAL;
        }
@@ -441,10 +442,19 @@ u32 tcp_reno_ssthresh(struct sock *sk)
 }
 EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
 
+u32 tcp_reno_undo_cwnd(struct sock *sk)
+{
+       const struct tcp_sock *tp = tcp_sk(sk);
+
+       return max(tp->snd_cwnd, tp->snd_ssthresh << 1);
+}
+EXPORT_SYMBOL_GPL(tcp_reno_undo_cwnd);
+
 struct tcp_congestion_ops tcp_reno = {
        .flags          = TCP_CONG_NON_RESTRICTED,
        .name           = "reno",
        .owner          = THIS_MODULE,
        .ssthresh       = tcp_reno_ssthresh,
        .cong_avoid     = tcp_reno_cong_avoid,
+       .undo_cwnd      = tcp_reno_undo_cwnd,
 };
index 51139175bf6143dec815be4bf71e74778aab9040..bde22ebb92a8fc47b474ab0794c6ce3e323dc44b 100644 (file)
@@ -342,6 +342,7 @@ static struct tcp_congestion_ops dctcp __read_mostly = {
 static struct tcp_congestion_ops dctcp_reno __read_mostly = {
        .ssthresh       = tcp_reno_ssthresh,
        .cong_avoid     = tcp_reno_cong_avoid,
+       .undo_cwnd      = tcp_reno_undo_cwnd,
        .get_info       = dctcp_get_info,
        .owner          = THIS_MODULE,
        .name           = "dctcp-reno",
index 083831e359df92ca9ba0fe7dd5a7a76fe41a94b0..0f7175c3338e062a4a6507aacfdebc89e97a1948 100644 (file)
@@ -166,6 +166,7 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 static struct tcp_congestion_ops tcp_hybla __read_mostly = {
        .init           = hybla_init,
        .ssthresh       = tcp_reno_ssthresh,
+       .undo_cwnd      = tcp_reno_undo_cwnd,
        .cong_avoid     = hybla_cong_avoid,
        .set_state      = hybla_state,
 
index a70046fea0e89a7aff274b2702bd3b0b4d3853a9..22e6a2097ff60d3a3b4ae01bf948bfe595d9ef8c 100644 (file)
@@ -2394,10 +2394,7 @@ static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss)
        if (tp->prior_ssthresh) {
                const struct inet_connection_sock *icsk = inet_csk(sk);
 
-               if (icsk->icsk_ca_ops->undo_cwnd)
-                       tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk);
-               else
-                       tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1);
+               tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk);
 
                if (tp->prior_ssthresh > tp->snd_ssthresh) {
                        tp->snd_ssthresh = tp->prior_ssthresh;
index c67ece1390c253304454cd41eed59cae26dd10a8..046fd3910873306d74207615d6997e1c847ea361 100644 (file)
@@ -316,6 +316,7 @@ static void tcp_lp_pkts_acked(struct sock *sk, const struct ack_sample *sample)
 static struct tcp_congestion_ops tcp_lp __read_mostly = {
        .init = tcp_lp_init,
        .ssthresh = tcp_reno_ssthresh,
+       .undo_cwnd = tcp_reno_undo_cwnd,
        .cong_avoid = tcp_lp_cong_avoid,
        .pkts_acked = tcp_lp_pkts_acked,
 
index 4c4bac1b5eab221928c569592c833e1bfcba748d..218cfcc77650004fea3f4bdfe24760ff0f634acc 100644 (file)
@@ -307,6 +307,7 @@ EXPORT_SYMBOL_GPL(tcp_vegas_get_info);
 static struct tcp_congestion_ops tcp_vegas __read_mostly = {
        .init           = tcp_vegas_init,
        .ssthresh       = tcp_reno_ssthresh,
+       .undo_cwnd      = tcp_reno_undo_cwnd,
        .cong_avoid     = tcp_vegas_cong_avoid,
        .pkts_acked     = tcp_vegas_pkts_acked,
        .set_state      = tcp_vegas_state,
index 4b03a2e2a0504617813838746c13691cf86557f6..fed66dc0e0f5f242cf0af25434fa9cfa89998958 100644 (file)
@@ -278,6 +278,7 @@ static struct tcp_congestion_ops tcp_westwood __read_mostly = {
        .init           = tcp_westwood_init,
        .ssthresh       = tcp_reno_ssthresh,
        .cong_avoid     = tcp_reno_cong_avoid,
+       .undo_cwnd      = tcp_reno_undo_cwnd,
        .cwnd_event     = tcp_westwood_event,
        .in_ack_event   = tcp_westwood_ack,
        .get_info       = tcp_westwood_info,