A large momentary influx of packets flooding the TCP layer may cause
packets to get dropped at the socket's backlog queue. Bump this up to
prevent these drops. Note that this change may cause the socket memory
accounting to allow the total backlog queue length to exceed the user
space configured values, sometimes by a substantial amount, which can
lead to out of order packets to be dropped instead of being queued. To
avoid these ofo drops, the condition to drop an out of order packet is
modified to allow out of order queuing to continue as long as it falls
within the now increased backlog queue limit.
Change-Id: I447ffc8560cb149fe84193c72bf693862f7ec740
Signed-off-by: Harout Hedeshian <harouth@codeaurora.org>
#include <net/dst.h>
#include <net/checksum.h>
+#define TCP_BACKLOG_SCALE 4
+
struct cgroup;
struct cgroup_subsys;
#ifdef CONFIG_NET
static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
unsigned int limit)
{
- if (sk_rcvqueues_full(sk, skb, limit))
+ if (sk_rcvqueues_full(sk, skb, limit * TCP_BACKLOG_SCALE))
return -ENOBUFS;
/*
static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
unsigned int size)
{
- if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
+ if (atomic_read(&sk->sk_rmem_alloc)
+ > ((sk->sk_rcvbuf + sk->sk_sndbuf) * 4) ||
!sk_rmem_schedule(sk, skb, size)) {
if (tcp_prune_queue(sk) < 0)