napi_enable(&priv->napi);
+ skb_queue_head_init(&priv->rx_recycle);
+
/* Initialize a bunch of registers */
init_registers(dev);
napi_disable(&priv->napi);
+ skb_queue_purge(&priv->rx_recycle);
cancel_work_sync(&priv->reset_task);
stop_gfar(dev);
bdp = next_txbd(bdp, base, tx_ring_size);
}
- dev_kfree_skb_any(skb);
+ /*
+ * If there's room in the queue (limit it to rx_buffer_size)
+ * we add this skb back into the pool, if it's the right size
+ */
+ if (skb_queue_len(&priv->rx_recycle) < priv->rx_ring_size &&
+ skb_recycle_check(skb, priv->rx_buffer_size +
+ RXBUF_ALIGNMENT))
+ __skb_queue_head(&priv->rx_recycle, skb);
+ else
+ dev_kfree_skb_any(skb);
+
priv->tx_skbuff[skb_dirtytx] = NULL;
skb_dirtytx = (skb_dirtytx + 1) &
struct gfar_private *priv = netdev_priv(dev);
struct sk_buff *skb = NULL;
- /* We have to allocate the skb, so keep trying till we succeed */
- skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
+ skb = __skb_dequeue(&priv->rx_recycle);
+ if (!skb)
+ skb = netdev_alloc_skb(dev,
+ priv->rx_buffer_size + RXBUF_ALIGNMENT);
if (!skb)
return NULL;
if (unlikely(!newskb))
newskb = skb;
else if (skb)
- dev_kfree_skb_any(skb);
+ __skb_queue_head(&priv->rx_recycle, skb);
} else {
/* Increment the number of packets */
dev->stats.rx_packets++;