unsigned int tx_tail;
void __iomem *base;
- struct sk_buff_head rx_recycle;
unsigned int dma_buf_sz;
dma_addr_t dma_rx_phy;
dma_addr_t dma_tx_phy;
p = priv->dma_rx + entry;
if (priv->rx_skbuff[entry] == NULL) {
- skb = __skb_dequeue(&priv->rx_recycle);
- if (skb == NULL)
- skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
+ skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
if (unlikely(skb == NULL))
break;
desc_get_buf_len(p), DMA_TO_DEVICE);
}
- /*
- * If there's room in the queue (limit it to size)
- * we add this skb back into the pool,
- * if it's the right size.
- */
- if ((skb_queue_len(&priv->rx_recycle) <
- DMA_RX_RING_SZ) &&
- skb_recycle_check(skb, priv->dma_buf_sz))
- __skb_queue_head(&priv->rx_recycle, skb);
- else
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
}
if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
dev->dev_addr);
}
- skb_queue_head_init(&priv->rx_recycle);
memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats));
/* Initialize the XGMAC and descriptors */
napi_disable(&priv->napi);
writel(0, priv->base + XGMAC_DMA_INTR_ENA);
- skb_queue_purge(&priv->rx_recycle);
/* Disable the MAC core */
xgmac_mac_disable(priv->base);
sizeof(struct rxbd8) * priv->total_rx_ring_size,
priv->tx_queue[0]->tx_bd_base,
priv->tx_queue[0]->tx_bd_dma_base);
- skb_queue_purge(&priv->rx_recycle);
}
void gfar_start(struct net_device *dev)
enable_napi(priv);
- skb_queue_head_init(&priv->rx_recycle);
-
/* Initialize a bunch of registers */
init_registers(dev);
bytes_sent += skb->len;
- /* If there's room in the queue (limit it to rx_buffer_size)
- * we add this skb back into the pool, if it's the right size
- */
- if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
- skb_recycle_check(skb, priv->rx_buffer_size +
- RXBUF_ALIGNMENT)) {
- gfar_align_skb(skb);
- skb_queue_head(&priv->rx_recycle, skb);
- } else
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(skb);
tx_queue->tx_skbuff[skb_dirtytx] = NULL;
static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb;
skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
if (!skb)
struct sk_buff *gfar_new_skb(struct net_device *dev)
{
- struct gfar_private *priv = netdev_priv(dev);
- struct sk_buff *skb = NULL;
-
- skb = skb_dequeue(&priv->rx_recycle);
- if (!skb)
- skb = gfar_alloc_skb(dev);
-
- return skb;
+ return gfar_alloc_skb(dev);
}
static inline void count_errors(unsigned short status, struct net_device *dev)
if (unlikely(!newskb))
newskb = skb;
else if (skb)
- skb_queue_head(&priv->rx_recycle, skb);
+ dev_kfree_skb(skb);
} else {
/* Increment the number of packets */
rx_queue->stats.rx_packets++;
u32 cur_filer_idx;
- struct sk_buff_head rx_recycle;
-
/* RX queue filer rule set*/
struct ethtool_rx_list rx_list;
struct mutex rx_queue_access;
static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
u8 __iomem *bd)
{
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb;
- skb = __skb_dequeue(&ugeth->rx_recycle);
+ skb = netdev_alloc_skb(ugeth->ndev,
+ ugeth->ug_info->uf_info.max_rx_buf_length +
+ UCC_GETH_RX_DATA_BUF_ALIGNMENT);
if (!skb)
- skb = netdev_alloc_skb(ugeth->ndev,
- ugeth->ug_info->uf_info.max_rx_buf_length +
- UCC_GETH_RX_DATA_BUF_ALIGNMENT);
- if (skb == NULL)
return NULL;
/* We need the data buffer to be aligned properly. We will reserve
iounmap(ugeth->ug_regs);
ugeth->ug_regs = NULL;
}
-
- skb_queue_purge(&ugeth->rx_recycle);
}
static void ucc_geth_set_multi(struct net_device *dev)
return -ENOMEM;
}
- skb_queue_head_init(&ugeth->rx_recycle);
-
return 0;
}
if (netif_msg_rx_err(ugeth))
ugeth_err("%s, %d: ERROR!!! skb - 0x%08x",
__func__, __LINE__, (u32) skb);
- if (skb) {
- skb->data = skb->head + NET_SKB_PAD;
- skb->len = 0;
- skb_reset_tail_pointer(skb);
- __skb_queue_head(&ugeth->rx_recycle, skb);
- }
+ dev_free_skb(skb);
ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
dev->stats.rx_dropped++;
dev->stats.tx_packets++;
- if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN &&
- skb_recycle_check(skb,
- ugeth->ug_info->uf_info.max_rx_buf_length +
- UCC_GETH_RX_DATA_BUF_ALIGNMENT))
- __skb_queue_head(&ugeth->rx_recycle, skb);
- else
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
ugeth->skb_dirtytx[txQ] =
/* index of the first skb which hasn't been transmitted yet. */
u16 skb_dirtytx[NUM_TX_QUEUES];
- struct sk_buff_head rx_recycle;
-
struct ugeth_mii_info *mii_info;
struct phy_device *phydev;
phy_interface_t phy_interface;
u8 work_rx_refill;
int skb_size;
- struct sk_buff_head rx_recycle;
/*
* RX state.
struct rx_desc *rx_desc;
int size;
- skb = __skb_dequeue(&mp->rx_recycle);
- if (skb == NULL)
- skb = netdev_alloc_skb(mp->dev, mp->skb_size);
+ skb = netdev_alloc_skb(mp->dev, mp->skb_size);
if (skb == NULL) {
mp->oom = 1;
desc->byte_cnt, DMA_TO_DEVICE);
}
- if (skb != NULL) {
- if (skb_queue_len(&mp->rx_recycle) <
- mp->rx_ring_size &&
- skb_recycle_check(skb, mp->skb_size))
- __skb_queue_head(&mp->rx_recycle, skb);
- else
- dev_kfree_skb(skb);
- }
+ dev_kfree_skb(skb);
}
__netif_tx_unlock(nq);
napi_enable(&mp->napi);
- skb_queue_head_init(&mp->rx_recycle);
-
mp->int_mask = INT_EXT;
for (i = 0; i < mp->rxq_count; i++) {
mib_counters_update(mp);
del_timer_sync(&mp->mib_counters_timer);
- skb_queue_purge(&mp->rx_recycle);
-
for (i = 0; i < mp->rxq_count; i++)
rxq_deinit(mp->rxq + i);
for (i = 0; i < mp->txq_count; i++)
unsigned int dirty_rx;
struct sk_buff **rx_skbuff;
dma_addr_t *rx_skbuff_dma;
- struct sk_buff_head rx_recycle;
struct net_device *dev;
dma_addr_t dma_rx_phy;
priv->hw->ring->clean_desc3(p);
if (likely(skb != NULL)) {
- /*
- * If there's room in the queue (limit it to size)
- * we add this skb back into the pool,
- * if it's the right size.
- */
- if ((skb_queue_len(&priv->rx_recycle) <
- priv->dma_rx_size) &&
- skb_recycle_check(skb, priv->dma_buf_sz))
- __skb_queue_head(&priv->rx_recycle, skb);
- else
- dev_kfree_skb(skb);
-
+ dev_kfree_skb(skb);
priv->tx_skbuff[entry] = NULL;
}
priv->eee_enabled = stmmac_eee_init(priv);
napi_enable(&priv->napi);
- skb_queue_head_init(&priv->rx_recycle);
netif_start_queue(dev);
return 0;
kfree(priv->tm);
#endif
napi_disable(&priv->napi);
- skb_queue_purge(&priv->rx_recycle);
/* Free the IRQ lines */
free_irq(dev->irq, dev);
if (likely(priv->rx_skbuff[entry] == NULL)) {
struct sk_buff *skb;
- skb = __skb_dequeue(&priv->rx_recycle);
- if (skb == NULL)
- skb = netdev_alloc_skb_ip_align(priv->dev,
- bfsize);
+ skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
if (unlikely(skb == NULL))
break;
return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
}
-extern void skb_recycle(struct sk_buff *skb);
-extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
-
extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
extern struct sk_buff *skb_clone(struct sk_buff *skb,
bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
-static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size)
-{
- if (irqs_disabled())
- return false;
-
- if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
- return false;
-
- if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
- return false;
-
- skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
- if (skb_end_offset(skb) < skb_size)
- return false;
-
- if (skb_shared(skb) || skb_cloned(skb))
- return false;
-
- return true;
-}
-
/**
* skb_head_is_locked - Determine if the skb->head is locked down
* @skb: skb to check
}
EXPORT_SYMBOL(consume_skb);
-/**
- * skb_recycle - clean up an skb for reuse
- * @skb: buffer
- *
- * Recycles the skb to be reused as a receive buffer. This
- * function does any necessary reference count dropping, and
- * cleans up the skbuff as if it just came from __alloc_skb().
- */
-void skb_recycle(struct sk_buff *skb)
-{
- struct skb_shared_info *shinfo;
-
- skb_release_head_state(skb);
-
- shinfo = skb_shinfo(skb);
- memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
- atomic_set(&shinfo->dataref, 1);
-
- memset(skb, 0, offsetof(struct sk_buff, tail));
- skb->data = skb->head + NET_SKB_PAD;
- skb_reset_tail_pointer(skb);
-}
-EXPORT_SYMBOL(skb_recycle);
-
-/**
- * skb_recycle_check - check if skb can be reused for receive
- * @skb: buffer
- * @skb_size: minimum receive buffer size
- *
- * Checks that the skb passed in is not shared or cloned, and
- * that it is linear and its head portion at least as large as
- * skb_size so that it can be recycled as a receive buffer.
- * If these conditions are met, this function does any necessary
- * reference count dropping and cleans up the skbuff as if it
- * just came from __alloc_skb().
- */
-bool skb_recycle_check(struct sk_buff *skb, int skb_size)
-{
- if (!skb_is_recycleable(skb, skb_size))
- return false;
-
- skb_recycle(skb);
-
- return true;
-}
-EXPORT_SYMBOL(skb_recycle_check);
-
static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
{
new->tstamp = old->tstamp;