net: disable fragment reassembly if high_thresh is set to zero
authorMichal Kubecek <mkubecek@suse.cz>
Mon, 9 May 2016 09:01:04 +0000 (11:01 +0200)
committerDanny Wood <danwood76@gmail.com>
Tue, 29 Jan 2019 13:15:14 +0000 (13:15 +0000)
commit 30759219f562cfaaebe7b9c1d1c0e6b5445c69b0 upstream.

Before commit 6d7b857d541e ("net: use lib/percpu_counter API for
fragmentation mem accounting"), setting high threshold to 0 prevented
fragment reassembly as first fragment would be always evicted before
second could be added to the queue. While inefficient, some users
apparently relied on it.

Since the commit mentioned above, a percpu counter is used for
reassembly memory accounting and high batch size avoids taking slow path
in most common scenarios. As a result, a whole full sized packet can be
reassembled without the percpu counter's main counter changing its
value so that even with high_thresh set to 0, fragmented packets can be
still reassembled and processed.

Add explicit checks preventing reassembly if high threshold is zero.

[mk] backport to 3.12

Signed-off-by: Michal Kubecek <mkubecek@suse.cz>
Signed-off-by: Jiri Slaby <jslaby@suse.cz>
Signed-off-by: Willy Tarreau <w@1wt.eu>
net/ipv4/ip_fragment.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/reassembly.c

index 41000561cf074d698e59cdc542ada2649b59c2b7..989201a3b4d4e215bbdfd088ae6b28a5104c83c1 100644 (file)
@@ -656,6 +656,9 @@ int ip_defrag(struct sk_buff *skb, u32 user)
        net = skb->dev ? dev_net(skb->dev) : dev_net(skb_dst(skb)->dev);
        IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
 
+       if (!net->ipv4.frags.high_thresh)
+               goto fail;
+
        /* Start by cleaning up the memory. */
        ip_evictor(net);
 
@@ -672,6 +675,7 @@ int ip_defrag(struct sk_buff *skb, u32 user)
                return ret;
        }
 
+fail:
        IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
        kfree_skb(skb);
        return -ENOMEM;
index 7cd623588532065e3f1ace86620c7ae71dd86406..c11a40caf5b614435df84b9cdefd56fc4730b6f1 100644 (file)
@@ -569,6 +569,9 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
        if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0)
                return skb;
 
+       if (!net->nf_frag.frags.high_thresh)
+               return skb;
+
        clone = skb_clone(skb, GFP_ATOMIC);
        if (clone == NULL) {
                pr_debug("Can't clone skb\n");
index a1fb511da3b5bd0ded7cfcdb090d88f0d6e06968..1a5318efa31c240c7ded39c18133a00433989446 100644 (file)
@@ -556,6 +556,9 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
                return 1;
        }
 
+       if (!net->ipv6.frags.high_thresh)
+               goto fail_mem;
+
        evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags, false);
        if (evicted)
                IP6_ADD_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
@@ -575,6 +578,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
                return ret;
        }
 
+fail_mem:
        IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS);
        kfree_skb(skb);
        return -1;