tcp: do not set queue_mapping on SYNACK
authorEric Dumazet <edumazet@google.com>
Fri, 16 Oct 2015 20:00:01 +0000 (13:00 -0700)
committerDavid S. Miller <davem@davemloft.net>
Mon, 19 Oct 2015 05:26:02 +0000 (22:26 -0700)
At the time of commit fff326990789 ("tcp: reflect SYN queue_mapping into
SYNACK packets") we had little ways to cope with SYN floods.

We no longer need to reflect incoming skb queue mappings, and instead
can pick a TX queue based on cpu cooking the SYNACK, with normal XPS
affinities.

Note that all SYNACK retransmits were picking TX queue 0, this no longer
is a win given that SYNACK rtx are now distributed on all cpus.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/tcp.h
net/ipv4/ip_output.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv6/tcp_ipv6.c

index a6be56d5f0e3757cb0b0f6f5d6caf2c63ea66203..eed94fc355c1aef2dd9ccb60c201ef2db79f5f85 100644 (file)
@@ -1716,7 +1716,7 @@ struct tcp_request_sock_ops {
        __u32 (*init_seq)(const struct sk_buff *skb);
        int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
                           struct flowi *fl, struct request_sock *req,
-                          u16 queue_mapping, struct tcp_fastopen_cookie *foc,
+                          struct tcp_fastopen_cookie *foc,
                           bool attach_req);
 };
 
index 67404e1fe7d40fe5121f7405d738bea40fe70942..50e29737b584624b6d338630622e87634961612e 100644 (file)
@@ -1596,7 +1596,6 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
                          arg->csumoffset) = csum_fold(csum_add(nskb->csum,
                                                                arg->csum));
                nskb->ip_summed = CHECKSUM_NONE;
-               skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
                ip_push_pending_frames(sk, &fl4);
        }
 out:
index 3b35c3f4d268a5d8a8a2e6a9232d2e682b360c25..944eaca6911596676af66a934a17685c27fc11cc 100644 (file)
@@ -6236,7 +6236,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
        }
        if (fastopen_sk) {
                af_ops->send_synack(fastopen_sk, dst, &fl, req,
-                                   skb_get_queue_mapping(skb), &foc, false);
+                                   &foc, false);
                /* Add the child socket directly into the accept queue */
                inet_csk_reqsk_queue_add(sk, req, fastopen_sk);
                sk->sk_data_ready(sk);
@@ -6247,7 +6247,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
                if (!want_cookie)
                        inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
                af_ops->send_synack(sk, dst, &fl, req,
-                                   skb_get_queue_mapping(skb), &foc, !want_cookie);
+                                   &foc, !want_cookie);
                if (want_cookie)
                        goto drop_and_free;
        }
index 9c68cf3762c4407b321635b7a1fb969468f86bf9..30dd45c1f568a0e986b89752a9f0839ea5f8d5bb 100644 (file)
@@ -821,7 +821,6 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
                              struct flowi *fl,
                              struct request_sock *req,
-                             u16 queue_mapping,
                              struct tcp_fastopen_cookie *foc,
                                  bool attach_req)
 {
@@ -839,7 +838,6 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
        if (skb) {
                __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
 
-               skb_set_queue_mapping(skb, queue_mapping);
                err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
                                            ireq->ir_rmt_addr,
                                            ireq->opt);
index 6e79fcb0addb9443384614c6c1c9cff5bc571ad1..19adedb8c5cc41d4375194558e2a21f3036ffd83 100644 (file)
@@ -3518,7 +3518,7 @@ int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
        int res;
 
        tcp_rsk(req)->txhash = net_tx_rndhash();
-       res = af_ops->send_synack(sk, NULL, &fl, req, 0, NULL, true);
+       res = af_ops->send_synack(sk, NULL, &fl, req, NULL, true);
        if (!res) {
                TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
index acb06f86f3723f09722d0458232a0c56306db472..f495d189f5e022263bca20d01b10afdb0be06059 100644 (file)
@@ -437,7 +437,6 @@ out:
 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
                              struct flowi *fl,
                              struct request_sock *req,
-                             u16 queue_mapping,
                              struct tcp_fastopen_cookie *foc,
                              bool attach_req)
 {
@@ -462,7 +461,6 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
                if (np->repflow && ireq->pktopts)
                        fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
 
-               skb_set_queue_mapping(skb, queue_mapping);
                err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
                err = net_xmit_eval(err);
        }