gro_cells: gro_cells_receive now return error code
authorPaolo Abeni <pabeni@redhat.com>
Wed, 20 Jul 2016 16:11:31 +0000 (18:11 +0200)
committerDavid S. Miller <davem@davemloft.net>
Fri, 22 Jul 2016 04:50:41 +0000 (00:50 -0400)
so that the caller can update stats accordingly, if needed

Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/gro_cells.h

index cf6c74550baa53af7bfe6a8c0079174a85ac917d..d15214d673b2e8e08fd6437b572278fb1359f10d 100644 (file)
@@ -14,27 +14,26 @@ struct gro_cells {
        struct gro_cell __percpu        *cells;
 };
 
-static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
+static inline int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
 {
        struct gro_cell *cell;
        struct net_device *dev = skb->dev;
 
-       if (!gcells->cells || skb_cloned(skb) || !(dev->features & NETIF_F_GRO)) {
-               netif_rx(skb);
-               return;
-       }
+       if (!gcells->cells || skb_cloned(skb) || !(dev->features & NETIF_F_GRO))
+               return netif_rx(skb);
 
        cell = this_cpu_ptr(gcells->cells);
 
        if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
                atomic_long_inc(&dev->rx_dropped);
                kfree_skb(skb);
-               return;
+               return NET_RX_DROP;
        }
 
        __skb_queue_tail(&cell->napi_skbs, skb);
        if (skb_queue_len(&cell->napi_skbs) == 1)
                napi_schedule(&cell->napi);
+       return NET_RX_SUCCESS;
 }
 
 /* called under BH context */