Followup of commits
9d21493b4beb8f918ba248032fefa393074a5e2b
and
08baf561083bc27a953aa087dd8a664bb2b88e8e
(net: tx scalability works : trans_start)
(net: txq_trans_update() helper)
Now that core network takes care of trans_start updates, dont do it
in drivers themselves, if possible. Multi queue drivers can
avoid one cache miss (on dev->trans_start) in their start_xmit()
handler.
Exceptions are NETIF_F_LLTX drivers (vxge & tehuti)
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
be_txq_notify(&adapter->ctrl, txq->id, wrb_cnt);
- netdev->trans_start = jiffies;
-
be_tx_stats_update(adapter, wrb_cnt, copied, stopped);
return NETDEV_TX_OK;
}
mmiowb();
fp->tx_bd_prod += nbd;
- dev->trans_start = jiffies;
if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
/* We want bnx2x_tx_int to "see" the updated tx_bd_prod
cpl->vlan_valid = 0;
send:
- dev->trans_start = jiffies;
ret = t1_sge_tx(skb, adapter, 0, dev);
/* If transmit busy, and we reallocated skb's due to headroom limit,
if (vlan_tx_tag_present(skb) && pi->vlan_grp)
qs->port_stats[SGE_PSTAT_VLANINS]++;
- dev->trans_start = jiffies;
spin_unlock(&q->lock);
/*
if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + 1)
netif_stop_queue(netdev);
- netdev->trans_start = jiffies;
-
spin_unlock_irqrestore(&enic->wq_lock[0], flags);
return NETDEV_TX_OK;
if (count) {
ixgb_tx_queue(adapter, count, vlan_id, tx_flags);
- netdev->trans_start = jiffies;
/* Make sure there is space in the ring for the next send. */
ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
if (count) {
ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
hdr_len);
- netdev->trans_start = jiffies;
ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
} else {
/* Ring doorbell! */
wmb();
writel(ring->doorbell_qpn, mdev->uar_map + MLX4_SEND_DOORBELL);
- dev->trans_start = jiffies;
/* Poll CQ here */
mlx4_en_xmit_poll(priv, tx_ind);
tx->stop_queue++;
netif_tx_stop_queue(netdev_queue);
}
- dev->trans_start = jiffies;
return 0;
abort_linearize:
netxen_nic_update_cmd_producer(adapter, tx_ring, producer);
adapter->stats.xmitcalled++;
- netdev->trans_start = jiffies;
return NETDEV_TX_OK;
wmb();
ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
- ndev->trans_start = jiffies;
QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
tx_ring->prod_idx, skb->len);
s2io_stop_tx_queue(sp, fifo->fifo_no);
}
mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
- dev->trans_start = jiffies;
spin_unlock_irqrestore(&fifo->tx_lock, flags);
if (sp->config.intr_type == MSI_X)
kfree_skb(skb);
return -EPIPE;
}
+ efx->net_dev->trans_start = jiffies;
}
return 0;
/* Map fragments for DMA and add to TX queue */
rc = efx_enqueue_skb(tx_queue, skb);
- if (unlikely(rc != NETDEV_TX_OK))
- goto out;
-
- /* Update last TX timer */
- efx->net_dev->trans_start = jiffies;
-
- out:
return rc;
}
WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
#endif
- ndev->trans_start = jiffies;
-
+#ifdef BDX_LLTX
+ ndev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
+#endif
priv->net_stats.tx_packets++;
priv->net_stats.tx_bytes += skb->len;
return VXGE_HW_OK;
}
-/* select a vpath to trasmit the packet */
+/* select a vpath to transmit the packet */
static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb,
int *do_lock)
{
VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN);
vxge_hw_fifo_txdl_post(fifo_hw, dtr);
- dev->trans_start = jiffies;
+#ifdef NETIF_F_LLTX
+ dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
+#endif
spin_unlock_irqrestore(&fifo->tx_lock, flags);
VXGE_COMPLETE_VPATH_TX(fifo);