{
tw32(TG3PCI_MISC_HOST_CTRL,
(tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
- tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
+ tw32_mailbox_f(tp->napi[0].int_mbox, 0x00000001);
}
static inline void tg3_cond_int(struct tg3 *tp)
{
if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
- (tp->hw_status->status & SD_STATUS_UPDATED))
+ (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
else
tw32(HOSTCC_MODE, tp->coalesce_mode |
static void tg3_enable_ints(struct tg3 *tp)
{
+ struct tg3_napi *tnapi = &tp->napi[0];
tp->irq_sync = 0;
wmb();
tw32(TG3PCI_MISC_HOST_CTRL,
(tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
- tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
- (tp->last_tag << 24));
+ tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
- tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
- (tp->last_tag << 24));
+ tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
tg3_cond_int(tp);
}
static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
{
struct tg3 *tp = tnapi->tp;
- struct tg3_hw_status *sblk = tp->hw_status;
+ struct tg3_hw_status *sblk = tnapi->hw_status;
unsigned int work_exists = 0;
/* check for phy events */
{
struct tg3 *tp = tnapi->tp;
- tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
- tp->last_tag << 24);
+ tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
mmiowb();
/* When doing tagged status, this work check is unnecessary.
static inline void tg3_netif_start(struct tg3 *tp)
{
+ struct tg3_napi *tnapi = &tp->napi[0];
netif_wake_queue(tp->dev);
/* NOTE: unconditional netif_wake_queue is only appropriate
* so long as all callers are assured to have free tx slots
* (such as after tg3_init_hw)
*/
- napi_enable(&tp->napi[0].napi);
- tp->hw_status->status |= SD_STATUS_UPDATED;
+ napi_enable(&tnapi->napi);
+ tnapi->hw_status->status |= SD_STATUS_UPDATED;
tg3_enable_ints(tp);
}
else
current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
- tp->hw_status->status =
+ tp->napi[0].hw_status->status =
(SD_STATUS_UPDATED |
- (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
+ (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
for (i = 0; i < 100; i++) {
tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
static void tg3_tx(struct tg3_napi *tnapi)
{
struct tg3 *tp = tnapi->tp;
- u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
+ u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
u32 sw_idx = tp->tx_cons;
while (sw_idx != hw_idx) {
int received;
struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
- hw_idx = tp->hw_status->idx[0].rx_producer;
+ hw_idx = tnapi->hw_status->idx[0].rx_producer;
/*
* We need to order the read of hw_idx and the read of
* the opaque cookie.
/* Refresh hw_idx to see if there is new work */
if (sw_idx == hw_idx) {
- hw_idx = tp->hw_status->idx[0].rx_producer;
+ hw_idx = tnapi->hw_status->idx[0].rx_producer;
rmb();
}
}
static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
{
struct tg3 *tp = tnapi->tp;
- struct tg3_hw_status *sblk = tp->hw_status;
+ struct tg3_hw_status *sblk = tnapi->hw_status;
/* handle link change and other phy events */
if (!(tp->tg3_flags &
struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
struct tg3 *tp = tnapi->tp;
int work_done = 0;
- struct tg3_hw_status *sblk = tp->hw_status;
+ struct tg3_hw_status *sblk = tnapi->hw_status;
while (1) {
work_done = tg3_poll_work(tnapi, work_done, budget);
* to tell the hw how much work has been processed,
* so we must read it before checking for more work.
*/
- tp->last_tag = sblk->status_tag;
- tp->last_irq_tag = tp->last_tag;
+ tnapi->last_tag = sblk->status_tag;
+ tnapi->last_irq_tag = tnapi->last_tag;
rmb();
} else
sblk->status &= ~SD_STATUS_UPDATED;
struct tg3_napi *tnapi = dev_id;
struct tg3 *tp = tnapi->tp;
- prefetch(tp->hw_status);
+ prefetch(tnapi->hw_status);
prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
if (likely(!tg3_irq_sync(tp)))
struct tg3_napi *tnapi = dev_id;
struct tg3 *tp = tnapi->tp;
- prefetch(tp->hw_status);
+ prefetch(tnapi->hw_status);
prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
/*
* Writing any value to intr-mbox-0 clears PCI INTA# and
{
struct tg3_napi *tnapi = dev_id;
struct tg3 *tp = tnapi->tp;
- struct tg3_hw_status *sblk = tp->hw_status;
+ struct tg3_hw_status *sblk = tnapi->hw_status;
unsigned int handled = 1;
/* In INTx mode, it is possible for the interrupt to arrive at
{
struct tg3_napi *tnapi = dev_id;
struct tg3 *tp = tnapi->tp;
- struct tg3_hw_status *sblk = tp->hw_status;
+ struct tg3_hw_status *sblk = tnapi->hw_status;
unsigned int handled = 1;
/* In INTx mode, it is possible for the interrupt to arrive at
* Reading the PCI State register will confirm whether the
* interrupt is ours and will flush the status block.
*/
- if (unlikely(sblk->status_tag == tp->last_irq_tag)) {
+ if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
handled = 0;
* so that the above check can report that the screaming interrupts
* are unhandled. Eventually they will be silenced.
*/
- tp->last_irq_tag = sblk->status_tag;
+ tnapi->last_irq_tag = sblk->status_tag;
if (tg3_irq_sync(tp))
goto out;
{
struct tg3_napi *tnapi = dev_id;
struct tg3 *tp = tnapi->tp;
- struct tg3_hw_status *sblk = tp->hw_status;
+ struct tg3_hw_status *sblk = tnapi->hw_status;
if ((sblk->status & SD_STATUS_UPDATED) ||
!(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
*/
static void tg3_free_consistent(struct tg3 *tp)
{
+ struct tg3_napi *tnapi = &tp->napi[0];
+
kfree(tp->tx_buffers);
tp->tx_buffers = NULL;
if (tp->rx_rcb) {
tp->tx_ring, tp->tx_desc_mapping);
tp->tx_ring = NULL;
}
- if (tp->hw_status) {
+ if (tnapi->hw_status) {
pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
- tp->hw_status, tp->status_mapping);
- tp->hw_status = NULL;
+ tnapi->hw_status,
+ tnapi->status_mapping);
+ tnapi->hw_status = NULL;
}
if (tp->hw_stats) {
pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
*/
static int tg3_alloc_consistent(struct tg3 *tp)
{
+ struct tg3_napi *tnapi = &tp->napi[0];
+
if (tg3_rx_prodring_init(tp, &tp->prodring[0]))
return -ENOMEM;
if (!tp->tx_ring)
goto err_out;
- tp->hw_status = pci_alloc_consistent(tp->pdev,
- TG3_HW_STATUS_SIZE,
- &tp->status_mapping);
- if (!tp->hw_status)
+ tnapi->hw_status = pci_alloc_consistent(tp->pdev,
+ TG3_HW_STATUS_SIZE,
+ &tnapi->status_mapping);
+ if (!tnapi->hw_status)
goto err_out;
+ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
+
tp->hw_stats = pci_alloc_consistent(tp->pdev,
sizeof(struct tg3_hw_stats),
&tp->stats_mapping);
if (!tp->hw_stats)
goto err_out;
- memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
return 0;
static int tg3_abort_hw(struct tg3 *tp, int silent)
{
int i, err;
+ struct tg3_napi *tnapi = &tp->napi[0];
tg3_disable_ints(tp);
err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
- if (tp->hw_status)
- memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
+ if (tnapi->hw_status)
+ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
if (tp->hw_stats)
memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
* sharing or irqpoll.
*/
tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
- if (tp->hw_status) {
- tp->hw_status->status = 0;
- tp->hw_status->status_tag = 0;
+ if (tp->napi[0].hw_status) {
+ tp->napi[0].hw_status->status = 0;
+ tp->napi[0].hw_status->status_tag = 0;
}
- tp->last_tag = 0;
- tp->last_irq_tag = 0;
+ tp->napi[0].last_tag = 0;
+ tp->napi[0].last_irq_tag = 0;
smp_mb();
synchronize_irq(tp->pdev->irq);
/* set status block DMA address */
tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
- ((u64) tp->status_mapping >> 32));
+ ((u64) tp->napi[0].status_mapping >> 32));
tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
- ((u64) tp->status_mapping & 0xffffffff));
+ ((u64) tp->napi[0].status_mapping & 0xffffffff));
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
/* Status/statistics block address. See tg3_timer,
tg3_write_mem(tp, i, 0);
udelay(40);
}
- memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
+ memset(tp->napi[0].hw_status, 0, TG3_HW_STATUS_SIZE);
if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
udelay(100);
- tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
+ tw32_mailbox_f(tp->napi[0].int_mbox, 0);
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
* IRQ status the mailbox/status_block protocol the chip
* uses with the cpu is race prone.
*/
- if (tp->hw_status->status & SD_STATUS_UPDATED) {
+ if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
tw32(GRC_LOCAL_CTRL,
tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
} else {
if (err)
return err;
- tp->hw_status->status &= ~SD_STATUS_UPDATED;
+ tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
tg3_enable_ints(tp);
tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
for (i = 0; i < 5; i++) {
u32 int_mbox, misc_host_ctrl;
- int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
- TG3_64BIT_REG_LOW);
+ int_mbox = tr32_mailbox(tnapi->int_mbox);
misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
if ((int_mbox != 0) ||
u32 val32, val32_2, val32_3, val32_4, val32_5;
u16 val16;
int i;
+ struct tg3_hw_status *sblk = tp->napi[0]->hw_status;
pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
val32, val32_2, val32_3, val32_4, val32_5);
/* SW status block */
- printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
- tp->hw_status->status,
- tp->hw_status->status_tag,
- tp->hw_status->rx_jumbo_consumer,
- tp->hw_status->rx_consumer,
- tp->hw_status->rx_mini_consumer,
- tp->hw_status->idx[0].rx_producer,
- tp->hw_status->idx[0].tx_consumer);
+ printk(KERN_DEBUG
+ "Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
+ sblk->status,
+ sblk->status_tag,
+ sblk->rx_jumbo_consumer,
+ sblk->rx_consumer,
+ sblk->rx_mini_consumer,
+ sblk->idx[0].rx_producer,
+ sblk->idx[0].tx_consumer);
/* SW statistics block */
printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
dma_addr_t map;
int num_pkts, tx_len, rx_len, i, err;
struct tg3_rx_buffer_desc *desc;
+ struct tg3_napi *tnapi, *rnapi;
struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
+ tnapi = &tp->napi[0];
+ rnapi = &tp->napi[0];
+
if (loopback_mode == TG3_MAC_LOOPBACK) {
/* HW errata - mac loopback fails in some cases on 5780.
* Normal traffic and PHY loopback are not affected by
udelay(10);
- rx_start_idx = tp->hw_status->idx[0].rx_producer;
+ rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
num_pkts = 0;
udelay(10);
- tx_idx = tp->hw_status->idx[0].tx_consumer;
- rx_idx = tp->hw_status->idx[0].rx_producer;
+ tx_idx = tnapi->hw_status->idx[0].tx_consumer;
+ rx_idx = rnapi->hw_status->idx[0].rx_producer;
if ((tx_idx == tp->tx_prod) &&
(rx_idx == (rx_start_idx + num_pkts)))
break;
tp->tx_pending = TG3_DEF_TX_RING_PENDING;
tp->napi[0].tp = tp;
+ tp->napi[0].int_mbox = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
netif_napi_add(dev, &tp->napi[0].napi, tg3_poll, 64);
dev->ethtool_ops = &tg3_ethtool_ops;
dev->watchdog_timeo = TG3_TX_TIMEOUT;