{
unsigned int i;
struct sk_buff *skb1 = NULL;
+ struct sock *sk;
for (i = 0; i < count; i++) {
+ sk = stack[i];
if (likely(skb1 == NULL))
skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
- if (skb1 && udp_queue_rcv_skb(stack[i], skb1) <= 0)
+ if (!skb1) {
+ atomic_inc(&sk->sk_drops);
+ UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
+ IS_UDPLITE(sk));
+ UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
+ IS_UDPLITE(sk));
+ }
+
+ if (skb1 && udp_queue_rcv_skb(sk, skb1) <= 0)
skb1 = NULL;
}
if (unlikely(skb1))
for (i = 0; i < count; i++) {
skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
+ sk = stack[i];
if (skb1) {
- sk = stack[i];
bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
udpv6_queue_rcv_skb(sk, skb1);
else
sk_add_backlog(sk, skb1);
bh_unlock_sock(sk);
+ } else {
+ atomic_inc(&sk->sk_drops);
+ UDP6_INC_STATS_BH(sock_net(sk),
+ UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
+ UDP6_INC_STATS_BH(sock_net(sk),
+ UDP_MIB_INERRORS, IS_UDPLITE(sk));
}
}
}