{
if (vptr->mii_status & VELOCITY_LINK_FAIL) {
- VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->dev->name);
+ VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->netdev->name);
} else if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
- VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->dev->name);
+ VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->netdev->name);
if (vptr->mii_status & VELOCITY_SPEED_1000)
VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps");
else
VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n");
} else {
- VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name);
+ VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->netdev->name);
switch (vptr->options.spd_dpx) {
case SPD_DPX_1000_FULL:
VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps full duplex\n");
case VELOCITY_INIT_RESET:
case VELOCITY_INIT_WOL:
- netif_stop_queue(vptr->dev);
+ netif_stop_queue(vptr->netdev);
/*
* Reset RX to prevent RX pointer not on the 4X location
if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
velocity_print_link_status(vptr);
if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
- netif_wake_queue(vptr->dev);
+ netif_wake_queue(vptr->netdev);
}
enable_flow_control_ability(vptr);
mac_eeprom_reload(regs);
for (i = 0; i < 6; i++)
- writeb(vptr->dev->dev_addr[i], &(regs->PAR[i]));
+ writeb(vptr->netdev->dev_addr[i], &(regs->PAR[i]));
/*
* clear Pre_ACPI bit.
/*
* Set packet filter: Receive directed and broadcast address
*/
- velocity_set_multi(vptr->dev);
+ velocity_set_multi(vptr->netdev);
/*
* Enable MII auto-polling
writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), ®s->CR0Set);
mii_status = velocity_get_opt_media_mode(vptr);
- netif_stop_queue(vptr->dev);
+ netif_stop_queue(vptr->netdev);
mii_init(vptr, mii_status);
if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
velocity_print_link_status(vptr);
if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
- netif_wake_queue(vptr->dev);
+ netif_wake_queue(vptr->netdev);
}
enable_flow_control_ability(vptr);
rx_ring_size, &pool_dma);
if (!pool) {
dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n",
- vptr->dev->name);
+ vptr->netdev->name);
return -ENOMEM;
}
struct rx_desc *rd = &(vptr->rx.ring[idx]);
struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
- rd_info->skb = netdev_alloc_skb(vptr->dev, vptr->rx.buf_sz + 64);
+ rd_info->skb = netdev_alloc_skb(vptr->netdev, vptr->rx.buf_sz + 64);
if (rd_info->skb == NULL)
return -ENOMEM;
if (velocity_rx_refill(vptr) != vptr->options.numrx) {
VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
- "%s: failed to allocate RX buffer.\n", vptr->dev->name);
+ "%s: failed to allocate RX buffer.\n", vptr->netdev->name);
velocity_free_rd_ring(vptr);
goto out;
}
printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(®s->TDIdx[0]));
BYTE_REG_BITS_ON(TXESR_TDSTR, ®s->TXESR);
writew(TRDCSR_RUN, ®s->TDCSRClr);
- netif_stop_queue(vptr->dev);
+ netif_stop_queue(vptr->netdev);
/* FIXME: port over the pci_device_failed code and use it
here */
if (linked) {
vptr->mii_status &= ~VELOCITY_LINK_FAIL;
- netif_carrier_on(vptr->dev);
+ netif_carrier_on(vptr->netdev);
} else {
vptr->mii_status |= VELOCITY_LINK_FAIL;
- netif_carrier_off(vptr->dev);
+ netif_carrier_off(vptr->netdev);
}
velocity_print_link_status(vptr);
enable_mii_autopoll(regs);
if (vptr->mii_status & VELOCITY_LINK_FAIL)
- netif_stop_queue(vptr->dev);
+ netif_stop_queue(vptr->netdev);
else
- netif_wake_queue(vptr->dev);
+ netif_wake_queue(vptr->netdev);
}
if (status & ISR_MIBFI)
int idx;
int works = 0;
struct velocity_td_info *tdinfo;
- struct net_device_stats *stats = &vptr->dev->stats;
+ struct net_device_stats *stats = &vptr->netdev->stats;
for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
* Look to see if we should kick the transmit network
* layer for more work.
*/
- if (netif_queue_stopped(vptr->dev) && (full == 0) &&
+ if (netif_queue_stopped(vptr->netdev) && (full == 0) &&
(!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
- netif_wake_queue(vptr->dev);
+ netif_wake_queue(vptr->netdev);
}
return works;
}
if (pkt_size < rx_copybreak) {
struct sk_buff *new_skb;
- new_skb = netdev_alloc_skb_ip_align(vptr->dev, pkt_size);
+ new_skb = netdev_alloc_skb_ip_align(vptr->netdev, pkt_size);
if (new_skb) {
new_skb->ip_summed = rx_skb[0]->ip_summed;
skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
static int velocity_receive_frame(struct velocity_info *vptr, int idx)
{
void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
- struct net_device_stats *stats = &vptr->dev->stats;
+ struct net_device_stats *stats = &vptr->netdev->stats;
struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
struct rx_desc *rd = &(vptr->rx.ring[idx]);
int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
struct sk_buff *skb;
if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
- VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name);
+ VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->netdev->name);
stats->rx_length_errors++;
return -EINVAL;
}
PCI_DMA_FROMDEVICE);
skb_put(skb, pkt_len - 4);
- skb->protocol = eth_type_trans(skb, vptr->dev);
+ skb->protocol = eth_type_trans(skb, vptr->netdev);
if (rd->rdesc0.RSR & RSR_DETAG) {
u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG));
*/
static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
{
- struct net_device_stats *stats = &vptr->dev->stats;
+ struct net_device_stats *stats = &vptr->netdev->stats;
int rd_curr = vptr->rx.curr;
int works = 0;
if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) {
VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n",
- vptr->dev->name);
+ vptr->netdev->name);
ret = -EINVAL;
goto out_0;
}
goto out_0;
}
- tmp_vptr->dev = dev;
+ tmp_vptr->netdev = dev;
tmp_vptr->pdev = vptr->pdev;
tmp_vptr->options = vptr->options;
tmp_vptr->tx.numq = vptr->tx.numq;
*/
static void velocity_print_info(struct velocity_info *vptr)
{
- struct net_device *dev = vptr->dev;
+ struct net_device *dev = vptr->netdev;
printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
printk(KERN_INFO "%s: Ethernet Address: %pM\n",
velocity_init_info(pdev, vptr, info);
- vptr->dev = dev;
+ vptr->netdev = dev;
ret = pci_enable_device(pdev);
if (ret < 0)
struct velocity_info *vptr = netdev_priv(dev);
unsigned long flags;
- if (!netif_running(vptr->dev))
+ if (!netif_running(vptr->netdev))
return 0;
- netif_device_detach(vptr->dev);
+ netif_device_detach(vptr->netdev);
spin_lock_irqsave(&vptr->lock, flags);
pci_save_state(pdev);
unsigned long flags;
int i;
- if (!netif_running(vptr->dev))
+ if (!netif_running(vptr->netdev))
return 0;
pci_set_power_state(pdev, PCI_D0);
mac_enable_int(vptr->mac_regs);
spin_unlock_irqrestore(&vptr->lock, flags);
- netif_device_attach(vptr->dev);
+ netif_device_attach(vptr->netdev);
return 0;
}