struct igb_tx_queue_stats {
u64 packets;
u64 bytes;
+ u64 restart_queue;
};
struct igb_rx_queue_stats {
u64 packets;
u64 bytes;
u64 drops;
+ u64 csum_err;
+ u64 alloc_failed;
};
struct igb_q_vector {
/* TX */
struct igb_ring *tx_ring; /* One per active queue */
- unsigned int restart_queue;
unsigned long tx_queue_len;
u32 txd_cmd;
u32 gotc;
int num_tx_queues;
int num_rx_queues;
- u64 hw_csum_err;
- u32 alloc_rx_buff_failed;
u32 gorc;
u64 gorc_old;
u32 max_frame_size;
{ "tx_single_coll_ok", IGB_STAT(stats.scc) },
{ "tx_multi_coll_ok", IGB_STAT(stats.mcc) },
{ "tx_timeout_count", IGB_STAT(tx_timeout_count) },
- { "tx_restart_queue", IGB_STAT(restart_queue) },
{ "rx_long_length_errors", IGB_STAT(stats.roc) },
{ "rx_short_length_errors", IGB_STAT(stats.ruc) },
{ "rx_align_errors", IGB_STAT(stats.algnerrc) },
{ "tx_flow_control_xon", IGB_STAT(stats.xontxc) },
{ "tx_flow_control_xoff", IGB_STAT(stats.xofftxc) },
{ "rx_long_byte_count", IGB_STAT(stats.gorc) },
- { "rx_csum_offload_errors", IGB_STAT(hw_csum_err) },
{ "tx_dma_out_of_sync", IGB_STAT(stats.doosync) },
- { "alloc_rx_buff_failed", IGB_STAT(alloc_rx_buff_failed) },
{ "tx_smbus", IGB_STAT(stats.mgptc) },
{ "rx_smbus", IGB_STAT(stats.mgprc) },
{ "dropped_smbus", IGB_STAT(stats.mgpdc) },
p += ETH_GSTRING_LEN;
sprintf(p, "tx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_queue_%u_restart", i);
+ p += ETH_GSTRING_LEN;
}
for (i = 0; i < adapter->num_rx_queues; i++) {
sprintf(p, "rx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_drops", i);
p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_csum_err", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_alloc_failed", i);
+ p += ETH_GSTRING_LEN;
}
/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
break;
static int __igb_maybe_stop_tx(struct net_device *netdev,
struct igb_ring *tx_ring, int size)
{
- struct igb_adapter *adapter = netdev_priv(netdev);
-
netif_stop_subqueue(netdev, tx_ring->queue_index);
/* Herbert's original patch had:
/* A reprieve! */
netif_wake_subqueue(netdev, tx_ring->queue_index);
- ++adapter->restart_queue;
+ tx_ring->tx_stats.restart_queue++;
return 0;
}
if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
!(test_bit(__IGB_DOWN, &adapter->state))) {
netif_wake_subqueue(netdev, tx_ring->queue_index);
- ++adapter->restart_queue;
+ tx_ring->tx_stats.restart_queue++;
}
}
napi_gro_receive(&q_vector->napi, skb);
}
-static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
+static inline void igb_rx_checksum_adv(struct igb_ring *ring,
+ struct igb_adapter *adapter,
u32 status_err, struct sk_buff *skb)
{
skb->ip_summed = CHECKSUM_NONE;
*/
if (!((adapter->hw.mac.type == e1000_82576) &&
(skb->len == 60)))
- adapter->hw_csum_err++;
+ ring->rx_stats.csum_err++;
/* let the stack verify checksum errors */
return;
}
total_bytes += skb->len;
total_packets++;
- igb_rx_checksum_adv(adapter, staterr, skb);
+ igb_rx_checksum_adv(rx_ring, adapter, staterr, skb);
skb->protocol = eth_type_trans(skb, netdev);
skb_record_rx_queue(skb, rx_ring->queue_index);
if (!buffer_info->page) {
buffer_info->page = alloc_page(GFP_ATOMIC);
if (!buffer_info->page) {
- adapter->alloc_rx_buff_failed++;
+ rx_ring->rx_stats.alloc_failed++;
goto no_buffers;
}
buffer_info->page_offset = 0;
if (!buffer_info->skb) {
skb = netdev_alloc_skb_ip_align(netdev, bufsz);
if (!skb) {
- adapter->alloc_rx_buff_failed++;
+ rx_ring->rx_stats.alloc_failed++;
goto no_buffers;
}