net/mlx4_en: Move queue stopped/waked counters to be per ring
authorEugenia Emantayev <eugenia@mellanox.com>
Sun, 2 Mar 2014 08:25:00 +0000 (10:25 +0200)
committerDavid S. Miller <davem@davemloft.net>
Mon, 3 Mar 2014 01:04:01 +0000 (20:04 -0500)
Give accurate counters and avoids cache misses when several rings
update the counters of stop/wake queue.

Signed-off-by: Eugenia Emantayev <eugenia@mellanox.com>
Signed-off-by: Amir Vadai <amirv@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/mellanox/mlx4/en_port.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h

index dae1a1f4ae55e38287e6bcb5ff1bc5cb73435808..c2cfb05e72905cc6961ddc773547e1d09ae26bbf 100644 (file)
@@ -148,10 +148,16 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
        stats->tx_packets = 0;
        stats->tx_bytes = 0;
        priv->port_stats.tx_chksum_offload = 0;
+       priv->port_stats.queue_stopped = 0;
+       priv->port_stats.wake_queue = 0;
+
        for (i = 0; i < priv->tx_ring_num; i++) {
                stats->tx_packets += priv->tx_ring[i]->packets;
                stats->tx_bytes += priv->tx_ring[i]->bytes;
                priv->port_stats.tx_chksum_offload += priv->tx_ring[i]->tx_csum;
+               priv->port_stats.queue_stopped +=
+                       priv->tx_ring[i]->queue_stopped;
+               priv->port_stats.wake_queue += priv->tx_ring[i]->wake_queue;
        }
 
        stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
index 781ebca81d0e81390248954c99f2faaf40132347..56e8fbc128f864daafc462aec17069cf3d9f7e55 100644 (file)
@@ -445,7 +445,7 @@ static int mlx4_en_process_tx_cq(struct net_device *dev,
         */
        if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) {
                netif_tx_wake_queue(ring->tx_queue);
-               priv->port_stats.wake_queue++;
+               ring->wake_queue++;
        }
        return done;
 }
@@ -691,7 +691,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
                /* every full Tx ring stops queue */
                netif_tx_stop_queue(ring->tx_queue);
-               priv->port_stats.queue_stopped++;
+               ring->queue_stopped++;
 
                /* If queue was emptied after the if, and before the
                 * stop_queue - need to wake the queue, or else it will remain
@@ -704,7 +704,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                if (unlikely(((int)(ring->prod - ring->cons)) <=
                             ring->size - HEADROOM - MAX_DESC_TXBBS)) {
                        netif_tx_wake_queue(ring->tx_queue);
-                       priv->port_stats.wake_queue++;
+                       ring->wake_queue++;
                } else {
                        return NETDEV_TX_BUSY;
                }
index 2610cc53fb4aed5dde80e17c4189a6c7057dd315..c59011d4e830c23a824ae22e2c39842012c0120e 100644 (file)
@@ -274,6 +274,8 @@ struct mlx4_en_tx_ring {
        unsigned long bytes;
        unsigned long packets;
        unsigned long tx_csum;
+       unsigned long queue_stopped;
+       unsigned long wake_queue;
        struct mlx4_bf bf;
        bool bf_enabled;
        struct netdev_queue *tx_queue;