netdev: convert bulk of drivers to netdev_tx_t
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / tg3.c
index 3725ac857086a51e8c6951422dca0f122b1cd52a..9d5c1786c66489fc891b86f392719ff3c8174740 100644 (file)
@@ -68,8 +68,8 @@
 
 #define DRV_MODULE_NAME                "tg3"
 #define PFX DRV_MODULE_NAME    ": "
-#define DRV_MODULE_VERSION     "3.100"
-#define DRV_MODULE_RELDATE     "August 25, 2009"
+#define DRV_MODULE_VERSION     "3.101"
+#define DRV_MODULE_RELDATE     "August 28, 2009"
 
 #define TG3_DEF_MAC_MODE       0
 #define TG3_DEF_RX_MODE                0
 #define TG3_RX_JMB_MAP_SZ              TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
 
 /* minimum number of free TX descriptors required to wake up TX process */
-#define TG3_TX_WAKEUP_THRESH(tp)               ((tp)->tx_pending / 4)
+#define TG3_TX_WAKEUP_THRESH(tnapi)            ((tnapi)->tx_pending / 4)
 
 #define TG3_RAW_IP_ALIGN 2
 
@@ -615,13 +615,13 @@ static void tg3_disable_ints(struct tg3 *tp)
 {
        tw32(TG3PCI_MISC_HOST_CTRL,
             (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
-       tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
+       tw32_mailbox_f(tp->napi[0].int_mbox, 0x00000001);
 }
 
 static inline void tg3_cond_int(struct tg3 *tp)
 {
        if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
-           (tp->hw_status->status & SD_STATUS_UPDATED))
+           (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
                tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
        else
                tw32(HOSTCC_MODE, tp->coalesce_mode |
@@ -630,22 +630,22 @@ static inline void tg3_cond_int(struct tg3 *tp)
 
 static void tg3_enable_ints(struct tg3 *tp)
 {
+       struct tg3_napi *tnapi = &tp->napi[0];
        tp->irq_sync = 0;
        wmb();
 
        tw32(TG3PCI_MISC_HOST_CTRL,
             (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
-       tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
-                      (tp->last_tag << 24));
+       tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
        if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
-               tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
-                              (tp->last_tag << 24));
+               tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
        tg3_cond_int(tp);
 }
 
-static inline unsigned int tg3_has_work(struct tg3 *tp)
+static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
 {
-       struct tg3_hw_status *sblk = tp->hw_status;
+       struct tg3 *tp = tnapi->tp;
+       struct tg3_hw_status *sblk = tnapi->hw_status;
        unsigned int work_exists = 0;
 
        /* check for phy events */
@@ -656,22 +656,23 @@ static inline unsigned int tg3_has_work(struct tg3 *tp)
                        work_exists = 1;
        }
        /* check for RX/TX work to do */
-       if (sblk->idx[0].tx_consumer != tp->tx_cons ||
-           sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
+       if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
+           sblk->idx[0].rx_producer != tnapi->rx_rcb_ptr)
                work_exists = 1;
 
        return work_exists;
 }
 
-/* tg3_restart_ints
+/* tg3_int_reenable
  *  similar to tg3_enable_ints, but it accurately determines whether there
  *  is new work pending and can return without flushing the PIO write
  *  which reenables interrupts
  */
-static void tg3_restart_ints(struct tg3 *tp)
+static void tg3_int_reenable(struct tg3_napi *tnapi)
 {
-       tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
-                    tp->last_tag << 24);
+       struct tg3 *tp = tnapi->tp;
+
+       tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
        mmiowb();
 
        /* When doing tagged status, this work check is unnecessary.
@@ -679,7 +680,7 @@ static void tg3_restart_ints(struct tg3 *tp)
         * work we've completed.
         */
        if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
-           tg3_has_work(tp))
+           tg3_has_work(tnapi))
                tw32(HOSTCC_MODE, tp->coalesce_mode |
                     (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
 }
@@ -687,19 +688,20 @@ static void tg3_restart_ints(struct tg3 *tp)
 static inline void tg3_netif_stop(struct tg3 *tp)
 {
        tp->dev->trans_start = jiffies; /* prevent tx timeout */
-       napi_disable(&tp->napi);
+       napi_disable(&tp->napi[0].napi);
        netif_tx_disable(tp->dev);
 }
 
 static inline void tg3_netif_start(struct tg3 *tp)
 {
+       struct tg3_napi *tnapi = &tp->napi[0];
        netif_wake_queue(tp->dev);
        /* NOTE: unconditional netif_wake_queue is only appropriate
         * so long as all callers are assured to have free tx slots
         * (such as after tg3_init_hw)
         */
-       napi_enable(&tp->napi);
-       tp->hw_status->status |= SD_STATUS_UPDATED;
+       napi_enable(&tnapi->napi);
+       tnapi->hw_status->status |= SD_STATUS_UPDATED;
        tg3_enable_ints(tp);
 }
 
@@ -3899,9 +3901,9 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
        else
                current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
 
-       tp->hw_status->status =
+       tp->napi[0].hw_status->status =
                (SD_STATUS_UPDATED |
-                (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
+                (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
 
        for (i = 0; i < 100; i++) {
                tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
@@ -4267,24 +4269,25 @@ static void tg3_tx_recover(struct tg3 *tp)
        spin_unlock(&tp->lock);
 }
 
-static inline u32 tg3_tx_avail(struct tg3 *tp)
+static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
 {
        smp_mb();
-       return (tp->tx_pending -
-               ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
+       return tnapi->tx_pending -
+              ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
 }
 
 /* Tigon3 never reports partial packet sends.  So we do not
  * need special logic to handle SKBs that have not had all
  * of their frags sent yet, like SunGEM does.
  */
-static void tg3_tx(struct tg3 *tp)
+static void tg3_tx(struct tg3_napi *tnapi)
 {
-       u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
-       u32 sw_idx = tp->tx_cons;
+       struct tg3 *tp = tnapi->tp;
+       u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
+       u32 sw_idx = tnapi->tx_cons;
 
        while (sw_idx != hw_idx) {
-               struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
+               struct tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
                struct sk_buff *skb = ri->skb;
                int i, tx_bug = 0;
 
@@ -4300,7 +4303,7 @@ static void tg3_tx(struct tg3 *tp)
                sw_idx = NEXT_TX(sw_idx);
 
                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-                       ri = &tp->tx_buffers[sw_idx];
+                       ri = &tnapi->tx_buffers[sw_idx];
                        if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
                                tx_bug = 1;
                        sw_idx = NEXT_TX(sw_idx);
@@ -4314,7 +4317,7 @@ static void tg3_tx(struct tg3 *tp)
                }
        }
 
-       tp->tx_cons = sw_idx;
+       tnapi->tx_cons = sw_idx;
 
        /* Need to make the tx_cons update visible to tg3_start_xmit()
         * before checking for netif_queue_stopped().  Without the
@@ -4324,10 +4327,10 @@ static void tg3_tx(struct tg3 *tp)
        smp_mb();
 
        if (unlikely(netif_queue_stopped(tp->dev) &&
-                    (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
+                    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
                netif_tx_lock(tp->dev);
                if (netif_queue_stopped(tp->dev) &&
-                   (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
+                   (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
                        netif_wake_queue(tp->dev);
                netif_tx_unlock(tp->dev);
        }
@@ -4344,9 +4347,10 @@ static void tg3_tx(struct tg3 *tp)
  * buffers the cpu only reads the last cacheline of the RX descriptor
  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
  */
-static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
+static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
                            int src_idx, u32 dest_idx_unmasked)
 {
+       struct tg3 *tp = tnapi->tp;
        struct tg3_rx_buffer_desc *desc;
        struct ring_info *map, *src_map;
        struct sk_buff *skb;
@@ -4409,9 +4413,10 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
  * members of the RX descriptor are invariant.  See notes above
  * tg3_alloc_rx_skb for full details.
  */
-static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
+static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key,
                           int src_idx, u32 dest_idx_unmasked)
 {
+       struct tg3 *tp = tnapi->tp;
        struct tg3_rx_buffer_desc *src_desc, *dest_desc;
        struct ring_info *src_map, *dest_map;
        int dest_idx;
@@ -4447,13 +4452,6 @@ static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
        src_map->skb = NULL;
 }
 
-#if TG3_VLAN_TAG_USED
-static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
-{
-       return vlan_gro_receive(&tp->napi, tp->vlgrp, vlan_tag, skb);
-}
-#endif
-
 /* The RX ring scheme is composed of multiple rings which post fresh
  * buffers to the chip, and one special ring the chip uses to report
  * status back to the host.
@@ -4478,15 +4476,16 @@ static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
  * If both the host and chip were to write into the same ring, cache line
  * eviction could occur since both entities want it in an exclusive state.
  */
-static int tg3_rx(struct tg3 *tp, int budget)
+static int tg3_rx(struct tg3_napi *tnapi, int budget)
 {
+       struct tg3 *tp = tnapi->tp;
        u32 work_mask, rx_std_posted = 0;
-       u32 sw_idx = tp->rx_rcb_ptr;
+       u32 sw_idx = tnapi->rx_rcb_ptr;
        u16 hw_idx;
        int received;
        struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
 
-       hw_idx = tp->hw_status->idx[0].rx_producer;
+       hw_idx = tnapi->hw_status->idx[0].rx_producer;
        /*
         * We need to order the read of hw_idx and the read of
         * the opaque cookie.
@@ -4495,7 +4494,7 @@ static int tg3_rx(struct tg3 *tp, int budget)
        work_mask = 0;
        received = 0;
        while (sw_idx != hw_idx && budget > 0) {
-               struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
+               struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
                unsigned int len;
                struct sk_buff *skb;
                dma_addr_t dma_addr;
@@ -4522,7 +4521,7 @@ static int tg3_rx(struct tg3 *tp, int budget)
                if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
                    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
                drop_it:
-                       tg3_recycle_rx(tp, opaque_key,
+                       tg3_recycle_rx(tnapi, opaque_key,
                                       desc_idx, *post_ptr);
                drop_it_no_recycle:
                        /* Other statistics kept track of by card. */
@@ -4542,7 +4541,7 @@ static int tg3_rx(struct tg3 *tp, int budget)
                ) {
                        int skb_size;
 
-                       skb_size = tg3_alloc_rx_skb(tp, opaque_key,
+                       skb_size = tg3_alloc_rx_skb(tnapi, opaque_key,
                                                    desc_idx, *post_ptr);
                        if (skb_size < 0)
                                goto drop_it;
@@ -4554,7 +4553,7 @@ static int tg3_rx(struct tg3 *tp, int budget)
                } else {
                        struct sk_buff *copy_skb;
 
-                       tg3_recycle_rx(tp, opaque_key,
+                       tg3_recycle_rx(tnapi, opaque_key,
                                       desc_idx, *post_ptr);
 
                        copy_skb = netdev_alloc_skb(tp->dev,
@@ -4591,11 +4590,11 @@ static int tg3_rx(struct tg3 *tp, int budget)
 #if TG3_VLAN_TAG_USED
                if (tp->vlgrp != NULL &&
                    desc->type_flags & RXD_FLAG_VLAN) {
-                       tg3_vlan_rx(tp, skb,
-                                   desc->err_vlan & RXD_VLAN_MASK);
+                       vlan_gro_receive(&tnapi->napi, tp->vlgrp,
+                                        desc->err_vlan & RXD_VLAN_MASK, skb);
                } else
 #endif
-                       napi_gro_receive(&tp->napi, skb);
+                       napi_gro_receive(&tnapi->napi, skb);
 
                received++;
                budget--;
@@ -4617,14 +4616,14 @@ next_pkt_nopost:
 
                /* Refresh hw_idx to see if there is new work */
                if (sw_idx == hw_idx) {
-                       hw_idx = tp->hw_status->idx[0].rx_producer;
+                       hw_idx = tnapi->hw_status->idx[0].rx_producer;
                        rmb();
                }
        }
 
        /* ACK the status ring. */
-       tp->rx_rcb_ptr = sw_idx;
-       tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
+       tnapi->rx_rcb_ptr = sw_idx;
+       tw32_rx_mbox(tnapi->consmbox, sw_idx);
 
        /* Refill RX ring(s). */
        if (work_mask & RXD_OPAQUE_RING_STD) {
@@ -4642,9 +4641,10 @@ next_pkt_nopost:
        return received;
 }
 
-static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
+static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
 {
-       struct tg3_hw_status *sblk = tp->hw_status;
+       struct tg3 *tp = tnapi->tp;
+       struct tg3_hw_status *sblk = tnapi->hw_status;
 
        /* handle link change and other phy events */
        if (!(tp->tg3_flags &
@@ -4668,8 +4668,8 @@ static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
        }
 
        /* run TX completion thread */
-       if (sblk->idx[0].tx_consumer != tp->tx_cons) {
-               tg3_tx(tp);
+       if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
+               tg3_tx(tnapi);
                if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
                        return work_done;
        }
@@ -4678,20 +4678,21 @@ static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
         * All RX "locking" is done by ensuring outside
         * code synchronizes with tg3->napi.poll()
         */
-       if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
-               work_done += tg3_rx(tp, budget - work_done);
+       if (sblk->idx[0].rx_producer != tnapi->rx_rcb_ptr)
+               work_done += tg3_rx(tnapi, budget - work_done);
 
        return work_done;
 }
 
 static int tg3_poll(struct napi_struct *napi, int budget)
 {
-       struct tg3 *tp = container_of(napi, struct tg3, napi);
+       struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
+       struct tg3 *tp = tnapi->tp;
        int work_done = 0;
-       struct tg3_hw_status *sblk = tp->hw_status;
+       struct tg3_hw_status *sblk = tnapi->hw_status;
 
        while (1) {
-               work_done = tg3_poll_work(tp, work_done, budget);
+               work_done = tg3_poll_work(tnapi, work_done, budget);
 
                if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
                        goto tx_recovery;
@@ -4700,19 +4701,19 @@ static int tg3_poll(struct napi_struct *napi, int budget)
                        break;
 
                if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
-                       /* tp->last_tag is used in tg3_restart_ints() below
+                       /* tp->last_tag is used in tg3_int_reenable() below
                         * to tell the hw how much work has been processed,
                         * so we must read it before checking for more work.
                         */
-                       tp->last_tag = sblk->status_tag;
-                       tp->last_irq_tag = tp->last_tag;
+                       tnapi->last_tag = sblk->status_tag;
+                       tnapi->last_irq_tag = tnapi->last_tag;
                        rmb();
                } else
                        sblk->status &= ~SD_STATUS_UPDATED;
 
-               if (likely(!tg3_has_work(tp))) {
+               if (likely(!tg3_has_work(tnapi))) {
                        napi_complete(napi);
-                       tg3_restart_ints(tp);
+                       tg3_int_reenable(tnapi);
                        break;
                }
        }
@@ -4763,14 +4764,14 @@ static inline void tg3_full_unlock(struct tg3 *tp)
  */
 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
 {
-       struct net_device *dev = dev_id;
-       struct tg3 *tp = netdev_priv(dev);
+       struct tg3_napi *tnapi = dev_id;
+       struct tg3 *tp = tnapi->tp;
 
-       prefetch(tp->hw_status);
-       prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
+       prefetch(tnapi->hw_status);
+       prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
 
        if (likely(!tg3_irq_sync(tp)))
-               napi_schedule(&tp->napi);
+               napi_schedule(&tnapi->napi);
 
        return IRQ_HANDLED;
 }
@@ -4781,11 +4782,11 @@ static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
  */
 static irqreturn_t tg3_msi(int irq, void *dev_id)
 {
-       struct net_device *dev = dev_id;
-       struct tg3 *tp = netdev_priv(dev);
+       struct tg3_napi *tnapi = dev_id;
+       struct tg3 *tp = tnapi->tp;
 
-       prefetch(tp->hw_status);
-       prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
+       prefetch(tnapi->hw_status);
+       prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
        /*
         * Writing any value to intr-mbox-0 clears PCI INTA# and
         * chip-internal interrupt pending events.
@@ -4795,16 +4796,16 @@ static irqreturn_t tg3_msi(int irq, void *dev_id)
         */
        tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
        if (likely(!tg3_irq_sync(tp)))
-               napi_schedule(&tp->napi);
+               napi_schedule(&tnapi->napi);
 
        return IRQ_RETVAL(1);
 }
 
 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
 {
-       struct net_device *dev = dev_id;
-       struct tg3 *tp = netdev_priv(dev);
-       struct tg3_hw_status *sblk = tp->hw_status;
+       struct tg3_napi *tnapi = dev_id;
+       struct tg3 *tp = tnapi->tp;
+       struct tg3_hw_status *sblk = tnapi->hw_status;
        unsigned int handled = 1;
 
        /* In INTx mode, it is possible for the interrupt to arrive at
@@ -4835,9 +4836,9 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id)
        if (tg3_irq_sync(tp))
                goto out;
        sblk->status &= ~SD_STATUS_UPDATED;
-       if (likely(tg3_has_work(tp))) {
-               prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
-               napi_schedule(&tp->napi);
+       if (likely(tg3_has_work(tnapi))) {
+               prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
+               napi_schedule(&tnapi->napi);
        } else {
                /* No work, shared interrupt perhaps?  re-enable
                 * interrupts, and flush that PCI write
@@ -4851,9 +4852,9 @@ out:
 
 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
 {
-       struct net_device *dev = dev_id;
-       struct tg3 *tp = netdev_priv(dev);
-       struct tg3_hw_status *sblk = tp->hw_status;
+       struct tg3_napi *tnapi = dev_id;
+       struct tg3 *tp = tnapi->tp;
+       struct tg3_hw_status *sblk = tnapi->hw_status;
        unsigned int handled = 1;
 
        /* In INTx mode, it is possible for the interrupt to arrive at
@@ -4861,7 +4862,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
         * Reading the PCI State register will confirm whether the
         * interrupt is ours and will flush the status block.
         */
-       if (unlikely(sblk->status_tag == tp->last_irq_tag)) {
+       if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
                if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
                    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
                        handled = 0;
@@ -4888,14 +4889,14 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
         * so that the above check can report that the screaming interrupts
         * are unhandled.  Eventually they will be silenced.
         */
-       tp->last_irq_tag = sblk->status_tag;
+       tnapi->last_irq_tag = sblk->status_tag;
 
        if (tg3_irq_sync(tp))
                goto out;
 
-       prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
+       prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
 
-       napi_schedule(&tp->napi);
+       napi_schedule(&tnapi->napi);
 
 out:
        return IRQ_RETVAL(handled);
@@ -4904,9 +4905,9 @@ out:
 /* ISR for interrupt test */
 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
 {
-       struct net_device *dev = dev_id;
-       struct tg3 *tp = netdev_priv(dev);
-       struct tg3_hw_status *sblk = tp->hw_status;
+       struct tg3_napi *tnapi = dev_id;
+       struct tg3 *tp = tnapi->tp;
+       struct tg3_hw_status *sblk = tnapi->hw_status;
 
        if ((sblk->status & SD_STATUS_UPDATED) ||
            !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
@@ -4936,7 +4937,7 @@ static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
                tg3_full_unlock(tp);
                del_timer_sync(&tp->timer);
                tp->irq_sync = 0;
-               napi_enable(&tp->napi);
+               napi_enable(&tp->napi[0].napi);
                dev_close(tp->dev);
                tg3_full_lock(tp, 0);
        }
@@ -5043,13 +5044,14 @@ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
 #endif
 }
 
-static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
+static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
 
 /* Workaround 4GB and 40-bit hardware DMA bugs. */
 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
                                       u32 last_plus_one, u32 *start,
                                       u32 base_flags, u32 mss)
 {
+       struct tg3_napi *tnapi = &tp->napi[0];
        struct sk_buff *new_skb;
        dma_addr_t new_addr = 0;
        u32 entry = *start;
@@ -5084,7 +5086,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
                        dev_kfree_skb(new_skb);
                        new_skb = NULL;
                } else {
-                       tg3_set_txd(tp, entry, new_addr, new_skb->len,
+                       tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
                                    base_flags, 1 | (mss << 1));
                        *start = NEXT_TX(entry);
                }
@@ -5093,11 +5095,10 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
        /* Now clean up the sw ring entries. */
        i = 0;
        while (entry != last_plus_one) {
-               if (i == 0) {
-                       tp->tx_buffers[entry].skb = new_skb;
-               } else {
-                       tp->tx_buffers[entry].skb = NULL;
-               }
+               if (i == 0)
+                       tnapi->tx_buffers[entry].skb = new_skb;
+               else
+                       tnapi->tx_buffers[entry].skb = NULL;
                entry = NEXT_TX(entry);
                i++;
        }
@@ -5108,11 +5109,11 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
        return ret;
 }
 
-static void tg3_set_txd(struct tg3 *tp, int entry,
+static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
                        dma_addr_t mapping, int len, u32 flags,
                        u32 mss_and_is_end)
 {
-       struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
+       struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
        int is_end = (mss_and_is_end & 0x1);
        u32 mss = (mss_and_is_end >> 1);
        u32 vlan_tag = 0;
@@ -5134,12 +5135,14 @@ static void tg3_set_txd(struct tg3 *tp, int entry,
 /* hard_start_xmit for devices that don't have any bugs and
  * support TG3_FLG2_HW_TSO_2 only.
  */
-static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
+                                 struct net_device *dev)
 {
        struct tg3 *tp = netdev_priv(dev);
        u32 len, entry, base_flags, mss;
        struct skb_shared_info *sp;
        dma_addr_t mapping;
+       struct tg3_napi *tnapi = &tp->napi[0];
 
        len = skb_headlen(skb);
 
@@ -5148,7 +5151,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
         * interrupt.  Furthermore, IRQ processing runs lockless so we have
         * no IRQ context deadlocks to worry about either.  Rejoice!
         */
-       if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
+       if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
                if (!netif_queue_stopped(dev)) {
                        netif_stop_queue(dev);
 
@@ -5159,7 +5162,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_BUSY;
        }
 
-       entry = tp->tx_prod;
+       entry = tnapi->tx_prod;
        base_flags = 0;
        mss = 0;
        if ((mss = skb_shinfo(skb)->gso_size) != 0) {
@@ -5207,9 +5210,9 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        mapping = sp->dma_head;
 
-       tp->tx_buffers[entry].skb = skb;
+       tnapi->tx_buffers[entry].skb = skb;
 
-       tg3_set_txd(tp, entry, mapping, len, base_flags,
+       tg3_set_txd(tnapi, entry, mapping, len, base_flags,
                    (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
 
        entry = NEXT_TX(entry);
@@ -5224,9 +5227,9 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
                        len = frag->size;
                        mapping = sp->dma_maps[i];
-                       tp->tx_buffers[entry].skb = NULL;
+                       tnapi->tx_buffers[entry].skb = NULL;
 
-                       tg3_set_txd(tp, entry, mapping, len,
+                       tg3_set_txd(tnapi, entry, mapping, len,
                                    base_flags, (i == last) | (mss << 1));
 
                        entry = NEXT_TX(entry);
@@ -5234,12 +5237,12 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        /* Packets are ready, update Tx producer idx local and on card. */
-       tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
+       tw32_tx_mbox(tnapi->prodmbox, entry);
 
-       tp->tx_prod = entry;
-       if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
+       tnapi->tx_prod = entry;
+       if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
                netif_stop_queue(dev);
-               if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
+               if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
                        netif_wake_queue(tp->dev);
        }
 
@@ -5249,7 +5252,8 @@ out_unlock:
        return NETDEV_TX_OK;
 }
 
-static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
+static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
+                                         struct net_device *);
 
 /* Use GSO to workaround a rare TSO bug that may be triggered when the
  * TSO header is greater than 80 bytes.
@@ -5257,11 +5261,12 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
 {
        struct sk_buff *segs, *nskb;
+       u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
 
        /* Estimate the number of fragments in the worst case */
-       if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
+       if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
                netif_stop_queue(tp->dev);
-               if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
+               if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
                        return NETDEV_TX_BUSY;
 
                netif_wake_queue(tp->dev);
@@ -5287,13 +5292,15 @@ tg3_tso_bug_end:
 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
  */
-static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
+                                         struct net_device *dev)
 {
        struct tg3 *tp = netdev_priv(dev);
        u32 len, entry, base_flags, mss;
        struct skb_shared_info *sp;
        int would_hit_hwbug;
        dma_addr_t mapping;
+       struct tg3_napi *tnapi = &tp->napi[0];
 
        len = skb_headlen(skb);
 
@@ -5302,7 +5309,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
         * interrupt.  Furthermore, IRQ processing runs lockless so we have
         * no IRQ context deadlocks to worry about either.  Rejoice!
         */
-       if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
+       if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
                if (!netif_queue_stopped(dev)) {
                        netif_stop_queue(dev);
 
@@ -5313,7 +5320,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_BUSY;
        }
 
-       entry = tp->tx_prod;
+       entry = tnapi->tx_prod;
        base_flags = 0;
        if (skb->ip_summed == CHECKSUM_PARTIAL)
                base_flags |= TXD_FLAG_TCPUDP_CSUM;
@@ -5383,7 +5390,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
 
        mapping = sp->dma_head;
 
-       tp->tx_buffers[entry].skb = skb;
+       tnapi->tx_buffers[entry].skb = skb;
 
        would_hit_hwbug = 0;
 
@@ -5392,7 +5399,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
        else if (tg3_4g_overflow_test(mapping, len))
                would_hit_hwbug = 1;
 
-       tg3_set_txd(tp, entry, mapping, len, base_flags,
+       tg3_set_txd(tnapi, entry, mapping, len, base_flags,
                    (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
 
        entry = NEXT_TX(entry);
@@ -5408,7 +5415,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
                        len = frag->size;
                        mapping = sp->dma_maps[i];
 
-                       tp->tx_buffers[entry].skb = NULL;
+                       tnapi->tx_buffers[entry].skb = NULL;
 
                        if (tg3_4g_overflow_test(mapping, len))
                                would_hit_hwbug = 1;
@@ -5417,10 +5424,10 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
                                would_hit_hwbug = 1;
 
                        if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
-                               tg3_set_txd(tp, entry, mapping, len,
+                               tg3_set_txd(tnapi, entry, mapping, len,
                                            base_flags, (i == last)|(mss << 1));
                        else
-                               tg3_set_txd(tp, entry, mapping, len,
+                               tg3_set_txd(tnapi, entry, mapping, len,
                                            base_flags, (i == last));
 
                        entry = NEXT_TX(entry);
@@ -5445,12 +5452,12 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
        }
 
        /* Packets are ready, update Tx producer idx local and on card. */
-       tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
+       tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, entry);
 
-       tp->tx_prod = entry;
-       if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
+       tnapi->tx_prod = entry;
+       if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
                netif_stop_queue(dev);
-               if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
+               if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
                        netif_wake_queue(tp->dev);
        }
 
@@ -5521,8 +5528,8 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
 static void tg3_rx_prodring_free(struct tg3 *tp,
                                 struct tg3_rx_prodring_set *tpr)
 {
-       struct ring_info *rxp;
        int i;
+       struct ring_info *rxp;
 
        for (i = 0; i < TG3_RX_RING_SIZE; i++) {
                rxp = &tpr->rx_std_buffers[i];
@@ -5566,6 +5573,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
                                 struct tg3_rx_prodring_set *tpr)
 {
        u32 i, rx_pkt_dma_sz;
+       struct tg3_napi *tnapi = &tp->napi[0];
 
        /* Zero out all descriptors. */
        memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
@@ -5592,7 +5600,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
 
        /* Now allocate fresh SKBs for each rx ring. */
        for (i = 0; i < tp->rx_pending; i++) {
-               if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
+               if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_STD, -1, i) < 0) {
                        printk(KERN_WARNING PFX
                               "%s: Using a smaller RX standard ring, "
                               "only %d out of %d buffers were allocated "
@@ -5623,7 +5631,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
                }
 
                for (i = 0; i < tp->rx_jumbo_pending; i++) {
-                       if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
+                       if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_JUMBO,
                                             -1, i) < 0) {
                                printk(KERN_WARNING PFX
                                       "%s: Using a smaller RX jumbo ring, "
@@ -5708,13 +5716,14 @@ err_out:
  */
 static void tg3_free_rings(struct tg3 *tp)
 {
+       struct tg3_napi *tnapi = &tp->napi[0];
        int i;
 
        for (i = 0; i < TG3_TX_RING_SIZE; ) {
                struct tx_ring_info *txp;
                struct sk_buff *skb;
 
-               txp = &tp->tx_buffers[i];
+               txp = &tnapi->tx_buffers[i];
                skb = txp->skb;
 
                if (skb == NULL) {
@@ -5743,12 +5752,16 @@ static void tg3_free_rings(struct tg3 *tp)
  */
 static int tg3_init_rings(struct tg3 *tp)
 {
+       struct tg3_napi *tnapi = &tp->napi[0];
+
        /* Free up all the SKBs. */
        tg3_free_rings(tp);
 
        /* Zero out all descriptors. */
-       memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
-       memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
+       memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
+
+       tnapi->rx_rcb_ptr = 0;
+       memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
 
        return tg3_rx_prodring_alloc(tp, &tp->prodring[0]);
 }
@@ -5759,22 +5772,25 @@ static int tg3_init_rings(struct tg3 *tp)
  */
 static void tg3_free_consistent(struct tg3 *tp)
 {
-       kfree(tp->tx_buffers);
-       tp->tx_buffers = NULL;
-       if (tp->rx_rcb) {
-               pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
-                                   tp->rx_rcb, tp->rx_rcb_mapping);
-               tp->rx_rcb = NULL;
-       }
-       if (tp->tx_ring) {
+       struct tg3_napi *tnapi = &tp->napi[0];
+
+       kfree(tnapi->tx_buffers);
+       tnapi->tx_buffers = NULL;
+       if (tnapi->tx_ring) {
                pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
-                       tp->tx_ring, tp->tx_desc_mapping);
-               tp->tx_ring = NULL;
+                       tnapi->tx_ring, tnapi->tx_desc_mapping);
+               tnapi->tx_ring = NULL;
        }
-       if (tp->hw_status) {
+       if (tnapi->rx_rcb) {
+               pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
+                                   tnapi->rx_rcb, tnapi->rx_rcb_mapping);
+               tnapi->rx_rcb = NULL;
+       }
+       if (tnapi->hw_status) {
                pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
-                                   tp->hw_status, tp->status_mapping);
-               tp->hw_status = NULL;
+                                   tnapi->hw_status,
+                                   tnapi->status_mapping);
+               tnapi->hw_status = NULL;
        }
        if (tp->hw_stats) {
                pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
@@ -5790,37 +5806,43 @@ static void tg3_free_consistent(struct tg3 *tp)
  */
 static int tg3_alloc_consistent(struct tg3 *tp)
 {
+       struct tg3_napi *tnapi = &tp->napi[0];
+
        if (tg3_rx_prodring_init(tp, &tp->prodring[0]))
                return -ENOMEM;
 
-       tp->tx_buffers = kzalloc(sizeof(struct tx_ring_info) *
-                                TG3_TX_RING_SIZE, GFP_KERNEL);
-       if (!tp->tx_buffers)
+       tnapi->tx_buffers = kzalloc(sizeof(struct tx_ring_info) *
+                                   TG3_TX_RING_SIZE, GFP_KERNEL);
+       if (!tnapi->tx_buffers)
                goto err_out;
 
-       tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
-                                         &tp->rx_rcb_mapping);
-       if (!tp->rx_rcb)
+       tnapi->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
+                                             &tnapi->tx_desc_mapping);
+       if (!tnapi->tx_ring)
                goto err_out;
 
-       tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
-                                          &tp->tx_desc_mapping);
-       if (!tp->tx_ring)
+       tnapi->hw_status = pci_alloc_consistent(tp->pdev,
+                                               TG3_HW_STATUS_SIZE,
+                                               &tnapi->status_mapping);
+       if (!tnapi->hw_status)
                goto err_out;
 
-       tp->hw_status = pci_alloc_consistent(tp->pdev,
-                                            TG3_HW_STATUS_SIZE,
-                                            &tp->status_mapping);
-       if (!tp->hw_status)
+       memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
+
+       tnapi->rx_rcb = pci_alloc_consistent(tp->pdev,
+                                            TG3_RX_RCB_RING_BYTES(tp),
+                                            &tnapi->rx_rcb_mapping);
+       if (!tnapi->rx_rcb)
                goto err_out;
 
+       memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
+
        tp->hw_stats = pci_alloc_consistent(tp->pdev,
                                            sizeof(struct tg3_hw_stats),
                                            &tp->stats_mapping);
        if (!tp->hw_stats)
                goto err_out;
 
-       memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
        memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
 
        return 0;
@@ -5882,6 +5904,7 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int
 static int tg3_abort_hw(struct tg3 *tp, int silent)
 {
        int i, err;
+       struct tg3_napi *tnapi = &tp->napi[0];
 
        tg3_disable_ints(tp);
 
@@ -5933,8 +5956,8 @@ static int tg3_abort_hw(struct tg3 *tp, int silent)
        err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
        err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
 
-       if (tp->hw_status)
-               memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
+       if (tnapi->hw_status)
+               memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
        if (tp->hw_stats)
                memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
 
@@ -6261,12 +6284,12 @@ static int tg3_chip_reset(struct tg3 *tp)
         * sharing or irqpoll.
         */
        tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
-       if (tp->hw_status) {
-               tp->hw_status->status = 0;
-               tp->hw_status->status_tag = 0;
+       if (tp->napi[0].hw_status) {
+               tp->napi[0].hw_status->status = 0;
+               tp->napi[0].hw_status->status_tag = 0;
        }
-       tp->last_tag = 0;
-       tp->last_irq_tag = 0;
+       tp->napi[0].last_tag = 0;
+       tp->napi[0].last_irq_tag = 0;
        smp_mb();
        synchronize_irq(tp->pdev->irq);
 
@@ -7078,13 +7101,15 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                                      BDINFO_FLAGS_DISABLED);
        }
 
-       tp->tx_prod = 0;
-       tp->tx_cons = 0;
-       tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
+       tp->napi[0].tx_prod = 0;
+       tp->napi[0].tx_cons = 0;
        tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
 
+       val = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
+       tw32_mailbox(val, 0);
+
        tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
-                      tp->tx_desc_mapping,
+                      tp->napi[0].tx_desc_mapping,
                       (TG3_TX_RING_SIZE <<
                        BDINFO_FLAGS_MAXLEN_SHIFT),
                       NIC_SRAM_TX_BUFFER_DESC);
@@ -7100,11 +7125,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                }
        }
 
-       tp->rx_rcb_ptr = 0;
-       tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
+       tw32_rx_mbox(tp->napi[0].consmbox, 0);
 
        tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
-                      tp->rx_rcb_mapping,
+                      tp->napi[0].rx_rcb_mapping,
                       (TG3_RX_RCB_RING_SIZE(tp) <<
                        BDINFO_FLAGS_MAXLEN_SHIFT),
                       0);
@@ -7207,9 +7231,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
 
        /* set status block DMA address */
        tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
-            ((u64) tp->status_mapping >> 32));
+            ((u64) tp->napi[0].status_mapping >> 32));
        tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
-            ((u64) tp->status_mapping & 0xffffffff));
+            ((u64) tp->napi[0].status_mapping & 0xffffffff));
 
        if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
                /* Status/statistics block address.  See tg3_timer,
@@ -7238,7 +7262,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                tg3_write_mem(tp, i, 0);
                udelay(40);
        }
-       memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
+       memset(tp->napi[0].hw_status, 0, TG3_HW_STATUS_SIZE);
 
        if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
                tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
@@ -7291,7 +7315,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
        udelay(100);
 
-       tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
+       tw32_mailbox_f(tp->napi[0].int_mbox, 0);
 
        if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
                tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
@@ -7593,7 +7617,7 @@ static void tg3_timer(unsigned long __opaque)
                 * IRQ status the mailbox/status_block protocol the chip
                 * uses with the cpu is race prone.
                 */
-               if (tp->hw_status->status & SD_STATUS_UPDATED) {
+               if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
                        tw32(GRC_LOCAL_CTRL,
                             tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
                } else {
@@ -7703,7 +7727,7 @@ static int tg3_request_irq(struct tg3 *tp)
 {
        irq_handler_t fn;
        unsigned long flags;
-       struct net_device *dev = tp->dev;
+       char *name = tp->dev->name;
 
        if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
                fn = tg3_msi;
@@ -7716,11 +7740,12 @@ static int tg3_request_irq(struct tg3 *tp)
                        fn = tg3_interrupt_tagged;
                flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
        }
-       return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
+       return request_irq(tp->pdev->irq, fn, flags, name, &tp->napi[0]);
 }
 
 static int tg3_test_interrupt(struct tg3 *tp)
 {
+       struct tg3_napi *tnapi = &tp->napi[0];
        struct net_device *dev = tp->dev;
        int err, i, intr_ok = 0;
 
@@ -7729,14 +7754,14 @@ static int tg3_test_interrupt(struct tg3 *tp)
 
        tg3_disable_ints(tp);
 
-       free_irq(tp->pdev->irq, dev);
+       free_irq(tp->pdev->irq, tnapi);
 
        err = request_irq(tp->pdev->irq, tg3_test_isr,
-                         IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
+                         IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
        if (err)
                return err;
 
-       tp->hw_status->status &= ~SD_STATUS_UPDATED;
+       tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
        tg3_enable_ints(tp);
 
        tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
@@ -7745,8 +7770,7 @@ static int tg3_test_interrupt(struct tg3 *tp)
        for (i = 0; i < 5; i++) {
                u32 int_mbox, misc_host_ctrl;
 
-               int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
-                                       TG3_64BIT_REG_LOW);
+               int_mbox = tr32_mailbox(tnapi->int_mbox);
                misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
 
                if ((int_mbox != 0) ||
@@ -7760,7 +7784,7 @@ static int tg3_test_interrupt(struct tg3 *tp)
 
        tg3_disable_ints(tp);
 
-       free_irq(tp->pdev->irq, dev);
+       free_irq(tp->pdev->irq, tnapi);
 
        err = tg3_request_irq(tp);
 
@@ -7778,7 +7802,6 @@ static int tg3_test_interrupt(struct tg3 *tp)
  */
 static int tg3_test_msi(struct tg3 *tp)
 {
-       struct net_device *dev = tp->dev;
        int err;
        u16 pci_cmd;
 
@@ -7809,7 +7832,8 @@ static int tg3_test_msi(struct tg3 *tp)
               "the PCI maintainer and include system chipset information.\n",
                       tp->dev->name);
 
-       free_irq(tp->pdev->irq, dev);
+       free_irq(tp->pdev->irq, &tp->napi[0]);
+
        pci_disable_msi(tp->pdev);
 
        tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
@@ -7829,7 +7853,7 @@ static int tg3_test_msi(struct tg3 *tp)
        tg3_full_unlock(tp);
 
        if (err)
-               free_irq(tp->pdev->irq, dev);
+               free_irq(tp->pdev->irq, &tp->napi[0]);
 
        return err;
 }
@@ -7865,6 +7889,33 @@ static int tg3_request_firmware(struct tg3 *tp)
        return 0;
 }
 
+static void tg3_ints_init(struct tg3 *tp)
+{
+       if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
+               /* All MSI supporting chips should support tagged
+                * status.  Assert that this is the case.
+                */
+               if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
+                       printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
+                              "Not using MSI.\n", tp->dev->name);
+               } else if (pci_enable_msi(tp->pdev) == 0) {
+                       u32 msi_mode;
+
+                       msi_mode = tr32(MSGINT_MODE);
+                       tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
+                       tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
+               }
+       }
+}
+
+static void tg3_ints_fini(struct tg3 *tp)
+{
+               if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
+                       pci_disable_msi(tp->pdev);
+                       tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
+               }
+}
+
 static int tg3_open(struct net_device *dev)
 {
        struct tg3 *tp = netdev_priv(dev);
@@ -7906,33 +7957,14 @@ static int tg3_open(struct net_device *dev)
        if (err)
                return err;
 
-       if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
-               /* All MSI supporting chips should support tagged
-                * status.  Assert that this is the case.
-                */
-               if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
-                       printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
-                              "Not using MSI.\n", tp->dev->name);
-               } else if (pci_enable_msi(tp->pdev) == 0) {
-                       u32 msi_mode;
+       tg3_ints_init(tp);
 
-                       msi_mode = tr32(MSGINT_MODE);
-                       tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
-                       tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
-               }
-       }
-       err = tg3_request_irq(tp);
+       napi_enable(&tp->napi[0].napi);
 
-       if (err) {
-               if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
-                       pci_disable_msi(tp->pdev);
-                       tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
-               }
-               tg3_free_consistent(tp);
-               return err;
-       }
+       err = tg3_request_irq(tp);
 
-       napi_enable(&tp->napi);
+       if (err)
+               goto err_out1;
 
        tg3_full_lock(tp, 0);
 
@@ -7960,36 +7992,19 @@ static int tg3_open(struct net_device *dev)
 
        tg3_full_unlock(tp);
 
-       if (err) {
-               napi_disable(&tp->napi);
-               free_irq(tp->pdev->irq, dev);
-               if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
-                       pci_disable_msi(tp->pdev);
-                       tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
-               }
-               tg3_free_consistent(tp);
-               return err;
-       }
+       if (err)
+               goto err_out2;
 
        if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
                err = tg3_test_msi(tp);
 
                if (err) {
                        tg3_full_lock(tp, 0);
-
-                       if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
-                               pci_disable_msi(tp->pdev);
-                               tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
-                       }
                        tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
                        tg3_free_rings(tp);
-                       tg3_free_consistent(tp);
-
                        tg3_full_unlock(tp);
 
-                       napi_disable(&tp->napi);
-
-                       return err;
+                       goto err_out1;
                }
 
                if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
@@ -8015,6 +8030,15 @@ static int tg3_open(struct net_device *dev)
        netif_start_queue(dev);
 
        return 0;
+
+err_out2:
+       free_irq(tp->pdev->irq, &tp->napi[0]);
+
+err_out1:
+       napi_disable(&tp->napi[0].napi);
+       tg3_ints_fini(tp);
+       tg3_free_consistent(tp);
+       return err;
 }
 
 #if 0
@@ -8023,6 +8047,7 @@ static int tg3_open(struct net_device *dev)
        u32 val32, val32_2, val32_3, val32_4, val32_5;
        u16 val16;
        int i;
+       struct tg3_hw_status *sblk = tp->napi[0]->hw_status;
 
        pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
        pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
@@ -8175,14 +8200,15 @@ static int tg3_open(struct net_device *dev)
               val32, val32_2, val32_3, val32_4, val32_5);
 
        /* SW status block */
-       printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
-              tp->hw_status->status,
-              tp->hw_status->status_tag,
-              tp->hw_status->rx_jumbo_consumer,
-              tp->hw_status->rx_consumer,
-              tp->hw_status->rx_mini_consumer,
-              tp->hw_status->idx[0].rx_producer,
-              tp->hw_status->idx[0].tx_consumer);
+       printk(KERN_DEBUG
+        "Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
+              sblk->status,
+              sblk->status_tag,
+              sblk->rx_jumbo_consumer,
+              sblk->rx_consumer,
+              sblk->rx_mini_consumer,
+              sblk->idx[0].rx_producer,
+              sblk->idx[0].tx_consumer);
 
        /* SW statistics block */
        printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
@@ -8252,7 +8278,7 @@ static int tg3_close(struct net_device *dev)
 {
        struct tg3 *tp = netdev_priv(dev);
 
-       napi_disable(&tp->napi);
+       napi_disable(&tp->napi[0].napi);
        cancel_work_sync(&tp->reset_task);
 
        netif_stop_queue(dev);
@@ -8272,11 +8298,9 @@ static int tg3_close(struct net_device *dev)
 
        tg3_full_unlock(tp);
 
-       free_irq(tp->pdev->irq, dev);
-       if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
-               pci_disable_msi(tp->pdev);
-               tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
-       }
+       free_irq(tp->pdev->irq, &tp->napi[0]);
+
+       tg3_ints_fini(tp);
 
        memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
               sizeof(tp->net_stats_prev));
@@ -9078,7 +9102,7 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *
        else
                ering->rx_jumbo_pending = 0;
 
-       ering->tx_pending = tp->tx_pending;
+       ering->tx_pending = tp->napi[0].tx_pending;
 }
 
 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
@@ -9108,7 +9132,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
            tp->rx_pending > 63)
                tp->rx_pending = 63;
        tp->rx_jumbo_pending = ering->rx_jumbo_pending;
-       tp->tx_pending = ering->tx_pending;
+       tp->napi[0].tx_pending = ering->tx_pending;
 
        if (netif_running(dev)) {
                tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
@@ -9822,8 +9846,12 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
        dma_addr_t map;
        int num_pkts, tx_len, rx_len, i, err;
        struct tg3_rx_buffer_desc *desc;
+       struct tg3_napi *tnapi, *rnapi;
        struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
 
+       tnapi = &tp->napi[0];
+       rnapi = &tp->napi[0];
+
        if (loopback_mode == TG3_MAC_LOOPBACK) {
                /* HW errata - mac loopback fails in some cases on 5780.
                 * Normal traffic and PHY loopback are not affected by
@@ -9905,18 +9933,17 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
 
        udelay(10);
 
-       rx_start_idx = tp->hw_status->idx[0].rx_producer;
+       rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
 
        num_pkts = 0;
 
-       tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
+       tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1);
 
-       tp->tx_prod++;
+       tnapi->tx_prod++;
        num_pkts++;
 
-       tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
-                    tp->tx_prod);
-       tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
+       tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
+       tr32_mailbox(tnapi->prodmbox);
 
        udelay(10);
 
@@ -9927,9 +9954,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
 
                udelay(10);
 
-               tx_idx = tp->hw_status->idx[0].tx_consumer;
-               rx_idx = tp->hw_status->idx[0].rx_producer;
-               if ((tx_idx == tp->tx_prod) &&
+               tx_idx = tnapi->hw_status->idx[0].tx_consumer;
+               rx_idx = rnapi->hw_status->idx[0].rx_producer;
+               if ((tx_idx == tnapi->tx_prod) &&
                    (rx_idx == (rx_start_idx + num_pkts)))
                        break;
        }
@@ -9937,13 +9964,13 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
        pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
        dev_kfree_skb(skb);
 
-       if (tx_idx != tp->tx_prod)
+       if (tx_idx != tnapi->tx_prod)
                goto out;
 
        if (rx_idx != rx_start_idx + num_pkts)
                goto out;
 
-       desc = &tp->rx_rcb[rx_start_idx];
+       desc = &rnapi->rx_rcb[rx_start_idx];
        desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
        opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
        if (opaque_key != RXD_OPAQUE_RING_STD)
@@ -13396,9 +13423,13 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
 
        tp->rx_pending = TG3_DEF_RX_RING_PENDING;
        tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
-       tp->tx_pending = TG3_DEF_TX_RING_PENDING;
 
-       netif_napi_add(dev, &tp->napi, tg3_poll, 64);
+       tp->napi[0].tp = tp;
+       tp->napi[0].int_mbox = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
+       tp->napi[0].consmbox = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
+       tp->napi[0].prodmbox = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
+       tp->napi[0].tx_pending = TG3_DEF_TX_RING_PENDING;
+       netif_napi_add(dev, &tp->napi[0].napi, tg3_poll, 64);
        dev->ethtool_ops = &tg3_ethtool_ops;
        dev->watchdog_timeo = TG3_TX_TIMEOUT;
        dev->irq = pdev->irq;