dma_addr_t tx_dma[TX_RING_SIZE];
struct net_device *dev;
struct napi_struct napi;
- struct net_device_stats stats;
/* Media monitoring timer */
struct timer_list timer;
/* Frequently used values: keep some adjacent for cache effect */
enable_irq(dev->irq);
dev->trans_start = jiffies; /* prevent tx timeout */
- np->stats.tx_errors++;
+ dev->stats.tx_errors++;
netif_wake_queue(dev);
}
np->tx_dma[i], np->tx_skbuff[i]->len,
PCI_DMA_TODEVICE);
dev_kfree_skb(np->tx_skbuff[i]);
- np->stats.tx_dropped++;
+ dev->stats.tx_dropped++;
}
np->tx_skbuff[i] = NULL;
}
writel(TxOn, ioaddr + ChipCmd);
} else {
dev_kfree_skb_irq(skb);
- np->stats.tx_dropped++;
+ dev->stats.tx_dropped++;
}
spin_unlock_irqrestore(&np->lock, flags);
dev->name, np->dirty_tx,
le32_to_cpu(np->tx_ring[entry].cmd_status));
if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) {
- np->stats.tx_packets++;
- np->stats.tx_bytes += np->tx_skbuff[entry]->len;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += np->tx_skbuff[entry]->len;
} else { /* Various Tx errors */
int tx_status =
le32_to_cpu(np->tx_ring[entry].cmd_status);
if (tx_status & (DescTxAbort|DescTxExcColl))
- np->stats.tx_aborted_errors++;
+ dev->stats.tx_aborted_errors++;
if (tx_status & DescTxFIFO)
- np->stats.tx_fifo_errors++;
+ dev->stats.tx_fifo_errors++;
if (tx_status & DescTxCarrier)
- np->stats.tx_carrier_errors++;
+ dev->stats.tx_carrier_errors++;
if (tx_status & DescTxOOWCol)
- np->stats.tx_window_errors++;
- np->stats.tx_errors++;
+ dev->stats.tx_window_errors++;
+ dev->stats.tx_errors++;
}
pci_unmap_single(np->pci_dev,np->tx_dma[entry],
np->tx_skbuff[entry]->len,
"buffers, entry %#08x "
"status %#08x.\n", dev->name,
np->cur_rx, desc_status);
- np->stats.rx_length_errors++;
+ dev->stats.rx_length_errors++;
/* The RX state machine has probably
* locked up beneath us. Follow the
} else {
/* There was an error. */
- np->stats.rx_errors++;
+ dev->stats.rx_errors++;
if (desc_status & (DescRxAbort|DescRxOver))
- np->stats.rx_over_errors++;
+ dev->stats.rx_over_errors++;
if (desc_status & (DescRxLong|DescRxRunt))
- np->stats.rx_length_errors++;
+ dev->stats.rx_length_errors++;
if (desc_status & (DescRxInvalid|DescRxAlign))
- np->stats.rx_frame_errors++;
+ dev->stats.rx_frame_errors++;
if (desc_status & DescRxCRC)
- np->stats.rx_crc_errors++;
+ dev->stats.rx_crc_errors++;
}
} else if (pkt_len > np->rx_buf_sz) {
/* if this is the tail of a double buffer
}
skb->protocol = eth_type_trans(skb, dev);
netif_receive_skb(skb);
- np->stats.rx_packets++;
- np->stats.rx_bytes += pkt_len;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
}
entry = (++np->cur_rx) % RX_RING_SIZE;
np->rx_head_desc = &np->rx_ring[entry];
printk(KERN_NOTICE "%s: Rx status FIFO overrun\n",
dev->name);
}
- np->stats.rx_fifo_errors++;
- np->stats.rx_errors++;
+ dev->stats.rx_fifo_errors++;
+ dev->stats.rx_errors++;
}
/* Hmmmmm, it's not clear how to recover from PCI faults. */
if (intr_status & IntrPCIErr) {
printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name,
intr_status & IntrPCIErr);
- np->stats.tx_fifo_errors++;
- np->stats.tx_errors++;
- np->stats.rx_fifo_errors++;
- np->stats.rx_errors++;
+ dev->stats.tx_fifo_errors++;
+ dev->stats.tx_errors++;
+ dev->stats.rx_fifo_errors++;
+ dev->stats.rx_errors++;
}
spin_unlock(&np->lock);
}
static void __get_stats(struct net_device *dev)
{
void __iomem * ioaddr = ns_ioaddr(dev);
- struct netdev_private *np = netdev_priv(dev);
/* The chip only need report frame silently dropped. */
- np->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs);
- np->stats.rx_missed_errors += readl(ioaddr + RxMissed);
+ dev->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs);
+ dev->stats.rx_missed_errors += readl(ioaddr + RxMissed);
}
static struct net_device_stats *get_stats(struct net_device *dev)
__get_stats(dev);
spin_unlock_irq(&np->lock);
- return &np->stats;
+ return &dev->stats;
}
#ifdef CONFIG_NET_POLL_CONTROLLER