u32 version;
u32 coal_intvl;
u32 bus_freq_mhz;
- struct net_device_stats stats;
int rx_packet_max;
int host_port;
struct clk *clk;
if (unlikely(netif_queue_stopped(ndev)))
netif_wake_queue(ndev);
cpts_tx_timestamp(priv->cpts, skb);
- priv->stats.tx_packets++;
- priv->stats.tx_bytes += len;
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += len;
dev_kfree_skb_any(skb);
}
cpts_rx_timestamp(priv->cpts, skb);
skb->protocol = eth_type_trans(skb, ndev);
netif_receive_skb(skb);
- priv->stats.rx_bytes += len;
- priv->stats.rx_packets++;
+ ndev->stats.rx_bytes += len;
+ ndev->stats.rx_packets++;
} else {
- priv->stats.rx_dropped++;
+ ndev->stats.rx_dropped++;
new_skb = skb;
}
if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
cpsw_err(priv, tx_err, "packet pad failed\n");
- priv->stats.tx_dropped++;
+ ndev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
return NETDEV_TX_OK;
fail:
- priv->stats.tx_dropped++;
+ ndev->stats.tx_dropped++;
netif_stop_queue(ndev);
return NETDEV_TX_BUSY;
}
struct cpsw_priv *priv = netdev_priv(ndev);
cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
- priv->stats.tx_errors++;
+ ndev->stats.tx_errors++;
cpsw_intr_disable(priv);
cpdma_ctlr_int_ctrl(priv->dma, false);
cpdma_chan_stop(priv->txch);
return 0;
}
-static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- return &priv->stats;
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
static void cpsw_ndo_poll_controller(struct net_device *ndev)
{
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
.ndo_tx_timeout = cpsw_ndo_tx_timeout,
- .ndo_get_stats = cpsw_ndo_get_stats,
.ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cpsw_ndo_poll_controller,