udp: use limited socket backlog
authorZhu Yi <yi.zhu@intel.com>
Thu, 4 Mar 2010 18:01:42 +0000 (18:01 +0000)
committerDavid S. Miller <davem@davemloft.net>
Fri, 5 Mar 2010 21:34:00 +0000 (13:34 -0800)
Make udp adapt to the limited socket backlog change.

Cc: "David S. Miller" <davem@davemloft.net>
Cc: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
Cc: "Pekka Savola (ipv6)" <pekkas@netcore.fi>
Cc: Patrick McHardy <kaber@trash.net>
Signed-off-by: Zhu Yi <yi.zhu@intel.com>
Acked-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/ipv4/udp.c
net/ipv6/udp.c

index 608a5446d05b8f7b2292e42e40d675ce656f4e8c..e7eb47f338d4e1576da75d87a462188b7a4f43bd 100644 (file)
@@ -1371,8 +1371,10 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
        bh_lock_sock(sk);
        if (!sock_owned_by_user(sk))
                rc = __udp_queue_rcv_skb(sk, skb);
-       else
-               sk_add_backlog(sk, skb);
+       else if (sk_add_backlog_limited(sk, skb)) {
+               bh_unlock_sock(sk);
+               goto drop;
+       }
        bh_unlock_sock(sk);
 
        return rc;
index 52b8347ae3b2a0cd3c954cb44812ba71b676410c..64804912b093e402fedcad391b4dd8d1c3dcd0a6 100644 (file)
@@ -583,16 +583,20 @@ static void flush_stack(struct sock **stack, unsigned int count,
                        bh_lock_sock(sk);
                        if (!sock_owned_by_user(sk))
                                udpv6_queue_rcv_skb(sk, skb1);
-                       else
-                               sk_add_backlog(sk, skb1);
+                       else if (sk_add_backlog_limited(sk, skb1)) {
+                               kfree_skb(skb1);
+                               bh_unlock_sock(sk);
+                               goto drop;
+                       }
                        bh_unlock_sock(sk);
-               } else {
-                       atomic_inc(&sk->sk_drops);
-                       UDP6_INC_STATS_BH(sock_net(sk),
-                                       UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
-                       UDP6_INC_STATS_BH(sock_net(sk),
-                                       UDP_MIB_INERRORS, IS_UDPLITE(sk));
+                       continue;
                }
+drop:
+               atomic_inc(&sk->sk_drops);
+               UDP6_INC_STATS_BH(sock_net(sk),
+                               UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
+               UDP6_INC_STATS_BH(sock_net(sk),
+                               UDP_MIB_INERRORS, IS_UDPLITE(sk));
        }
 }
 /*
@@ -754,8 +758,12 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
        bh_lock_sock(sk);
        if (!sock_owned_by_user(sk))
                udpv6_queue_rcv_skb(sk, skb);
-       else
-               sk_add_backlog(sk, skb);
+       else if (sk_add_backlog_limited(sk, skb)) {
+               atomic_inc(&sk->sk_drops);
+               bh_unlock_sock(sk);
+               sock_put(sk);
+               goto discard;
+       }
        bh_unlock_sock(sk);
        sock_put(sk);
        return 0;