priv->dirty_tx = 0;
priv->cur_tx = 0;
+ netdev_reset_queue(priv->dev);
stmmac_clear_descriptors(priv);
static void stmmac_tx_clean(struct stmmac_priv *priv)
{
unsigned int txsize = priv->dma_tx_size;
+ unsigned int bytes_compl = 0, pkts_compl = 0;
spin_lock(&priv->tx_lock);
priv->hw->mode->clean_desc3(priv, p);
if (likely(skb != NULL)) {
+ pkts_compl++;
+ bytes_compl += skb->len;
dev_consume_skb_any(skb);
priv->tx_skbuff[entry] = NULL;
}
priv->dirty_tx++;
}
+
+ netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
+
if (unlikely(netif_queue_stopped(priv->dev) &&
stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
netif_tx_lock(priv->dev);
(i == txsize - 1));
priv->dirty_tx = 0;
priv->cur_tx = 0;
+ netdev_reset_queue(priv->dev);
priv->hw->dma->start_tx(priv->ioaddr);
priv->dev->stats.tx_errors++;
if (!priv->hwts_tx_en)
skb_tx_timestamp(skb);
+ netdev_sent_queue(dev, skb->len);
priv->hw->dma->enable_dma_transmission(priv->ioaddr);
spin_unlock(&priv->tx_lock);