};
};
-struct igb_queue_stats {
+struct igb_tx_queue_stats {
u64 packets;
u64 bytes;
};
+struct igb_rx_queue_stats {
+ u64 packets;
+ u64 bytes;
+ u64 drops;
+};
+
struct igb_ring {
struct igb_adapter *adapter; /* backlink */
void *desc; /* descriptor ring memory */
union {
/* TX */
struct {
- struct igb_queue_stats tx_stats;
+ struct igb_tx_queue_stats tx_stats;
bool detect_tx_hung;
};
/* RX */
struct {
- struct igb_queue_stats rx_stats;
+ struct igb_rx_queue_stats rx_stats;
+ u64 rx_queue_drops;
struct napi_struct napi;
int set_itr;
struct igb_ring *buddy;
};
#define IGB_QUEUE_STATS_LEN \
- ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues + \
- ((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues) * \
- (sizeof(struct igb_queue_stats) / sizeof(u64)))
+ (((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues)* \
+ (sizeof(struct igb_rx_queue_stats) / sizeof(u64))) + \
+ ((((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues) * \
+ (sizeof(struct igb_tx_queue_stats) / sizeof(u64))))
#define IGB_GLOBAL_STATS_LEN \
sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)
#define IGB_STATS_LEN (IGB_GLOBAL_STATS_LEN + IGB_QUEUE_STATS_LEN)
{
struct igb_adapter *adapter = netdev_priv(netdev);
u64 *queue_stat;
- int stat_count = sizeof(struct igb_queue_stats) / sizeof(u64);
+ int stat_count_tx = sizeof(struct igb_tx_queue_stats) / sizeof(u64);
+ int stat_count_rx = sizeof(struct igb_rx_queue_stats) / sizeof(u64);
int j;
int i;
for (j = 0; j < adapter->num_tx_queues; j++) {
int k;
queue_stat = (u64 *)&adapter->tx_ring[j].tx_stats;
- for (k = 0; k < stat_count; k++)
+ for (k = 0; k < stat_count_tx; k++)
data[i + k] = queue_stat[k];
i += k;
}
for (j = 0; j < adapter->num_rx_queues; j++) {
int k;
queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats;
- for (k = 0; k < stat_count; k++)
+ for (k = 0; k < stat_count_rx; k++)
data[i + k] = queue_stat[k];
i += k;
}
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_drops", i);
+ p += ETH_GSTRING_LEN;
}
/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
break;
/* Rx Errors */
+ if (hw->mac.type != e1000_82575) {
+ u32 rqdpc_tmp;
+ int i;
+ /* Read out drops stats per RX queue. Notice RQDPC (Receive
+ * Queue Drop Packet Count) stats only gets incremented, if
+ * the DROP_EN but it set (in the SRRCTL register for that
+ * queue). If DROP_EN bit is NOT set, then the some what
+ * equivalent count is stored in RNBC (not per queue basis).
+ * Also note the drop count is due to lack of available
+ * descriptors.
+ */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0xFFF;
+ adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp;
+ }
+ }
+
/* RLEC on some newer hardware can be incorrect so build
- * our own version based on RUC and ROC */
+ * our own version based on RUC and ROC */
adapter->net_stats.rx_errors = adapter->stats.rxerrc +
adapter->stats.crcerrs + adapter->stats.algnerrc +
adapter->stats.ruc + adapter->stats.roc +