BROCADE BNA 10 GIGABIT ETHERNET DRIVER
M: Rasesh Mody <rmody@brocade.com>
- M: Debashis Dutt <ddutt@brocade.com>
L: netdev@vger.kernel.org
S: Supported
-F: drivers/net/bna/
+F: drivers/net/ethernet/brocade/bna/
BSG (block layer generic sg v4 driver)
M: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
--- /dev/null
- status |= GRETH_TXBD_CSALL;
+/*
+ * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC.
+ *
+ * 2005-2010 (c) Aeroflex Gaisler AB
+ *
+ * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs
+ * available in the GRLIB VHDL IP core library.
+ *
+ * Full documentation of both cores can be found here:
+ * http://www.gaisler.com/products/grlib/grip.pdf
+ *
+ * The Gigabit version supports scatter/gather DMA, any alignment of
+ * buffers and checksum offloading.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Contributors: Kristoffer Glembo
+ * Daniel Hellstrom
+ * Marko Isomaki
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/io.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <asm/cacheflush.h>
+#include <asm/byteorder.h>
+
+#ifdef CONFIG_SPARC
+#include <asm/idprom.h>
+#endif
+
+#include "greth.h"
+
+#define GRETH_DEF_MSG_ENABLE \
+ (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR)
+
+static int greth_debug = -1; /* -1 == use GRETH_DEF_MSG_ENABLE as value */
+module_param(greth_debug, int, 0);
+MODULE_PARM_DESC(greth_debug, "GRETH bitmapped debugging message enable value");
+
+/* Accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */
+static int macaddr[6];
+module_param_array(macaddr, int, NULL, 0);
+MODULE_PARM_DESC(macaddr, "GRETH Ethernet MAC address");
+
+static int greth_edcl = 1;
+module_param(greth_edcl, int, 0);
+MODULE_PARM_DESC(greth_edcl, "GRETH EDCL usage indicator. Set to 1 if EDCL is used.");
+
+static int greth_open(struct net_device *dev);
+static netdev_tx_t greth_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static netdev_tx_t greth_start_xmit_gbit(struct sk_buff *skb,
+ struct net_device *dev);
+static int greth_rx(struct net_device *dev, int limit);
+static int greth_rx_gbit(struct net_device *dev, int limit);
+static void greth_clean_tx(struct net_device *dev);
+static void greth_clean_tx_gbit(struct net_device *dev);
+static irqreturn_t greth_interrupt(int irq, void *dev_id);
+static int greth_close(struct net_device *dev);
+static int greth_set_mac_add(struct net_device *dev, void *p);
+static void greth_set_multicast_list(struct net_device *dev);
+
+#define GRETH_REGLOAD(a) (be32_to_cpu(__raw_readl(&(a))))
+#define GRETH_REGSAVE(a, v) (__raw_writel(cpu_to_be32(v), &(a)))
+#define GRETH_REGORIN(a, v) (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) | (v))))
+#define GRETH_REGANDIN(a, v) (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) & (v))))
+
+#define NEXT_TX(N) (((N) + 1) & GRETH_TXBD_NUM_MASK)
+#define SKIP_TX(N, C) (((N) + C) & GRETH_TXBD_NUM_MASK)
+#define NEXT_RX(N) (((N) + 1) & GRETH_RXBD_NUM_MASK)
+
+static void greth_print_rx_packet(void *addr, int len)
+{
+ print_hex_dump(KERN_DEBUG, "RX: ", DUMP_PREFIX_OFFSET, 16, 1,
+ addr, len, true);
+}
+
+static void greth_print_tx_packet(struct sk_buff *skb)
+{
+ int i;
+ int length;
+
+ if (skb_shinfo(skb)->nr_frags == 0)
+ length = skb->len;
+ else
+ length = skb_headlen(skb);
+
+ print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, length, true);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+
+ print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb_frag_address(&skb_shinfo(skb)->frags[i]),
+ skb_shinfo(skb)->frags[i].size, true);
+ }
+}
+
+static inline void greth_enable_tx(struct greth_private *greth)
+{
+ wmb();
+ GRETH_REGORIN(greth->regs->control, GRETH_TXEN);
+}
+
+static inline void greth_disable_tx(struct greth_private *greth)
+{
+ GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN);
+}
+
+static inline void greth_enable_rx(struct greth_private *greth)
+{
+ wmb();
+ GRETH_REGORIN(greth->regs->control, GRETH_RXEN);
+}
+
+static inline void greth_disable_rx(struct greth_private *greth)
+{
+ GRETH_REGANDIN(greth->regs->control, ~GRETH_RXEN);
+}
+
+static inline void greth_enable_irqs(struct greth_private *greth)
+{
+ GRETH_REGORIN(greth->regs->control, GRETH_RXI | GRETH_TXI);
+}
+
+static inline void greth_disable_irqs(struct greth_private *greth)
+{
+ GRETH_REGANDIN(greth->regs->control, ~(GRETH_RXI|GRETH_TXI));
+}
+
+static inline void greth_write_bd(u32 *bd, u32 val)
+{
+ __raw_writel(cpu_to_be32(val), bd);
+}
+
+static inline u32 greth_read_bd(u32 *bd)
+{
+ return be32_to_cpu(__raw_readl(bd));
+}
+
+static void greth_clean_rings(struct greth_private *greth)
+{
+ int i;
+ struct greth_bd *rx_bdp = greth->rx_bd_base;
+ struct greth_bd *tx_bdp = greth->tx_bd_base;
+
+ if (greth->gbit_mac) {
+
+ /* Free and unmap RX buffers */
+ for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) {
+ if (greth->rx_skbuff[i] != NULL) {
+ dev_kfree_skb(greth->rx_skbuff[i]);
+ dma_unmap_single(greth->dev,
+ greth_read_bd(&rx_bdp->addr),
+ MAX_FRAME_SIZE+NET_IP_ALIGN,
+ DMA_FROM_DEVICE);
+ }
+ }
+
+ /* TX buffers */
+ while (greth->tx_free < GRETH_TXBD_NUM) {
+
+ struct sk_buff *skb = greth->tx_skbuff[greth->tx_last];
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ tx_bdp = greth->tx_bd_base + greth->tx_last;
+ greth->tx_last = NEXT_TX(greth->tx_last);
+
+ dma_unmap_single(greth->dev,
+ greth_read_bd(&tx_bdp->addr),
+ skb_headlen(skb),
+ DMA_TO_DEVICE);
+
+ for (i = 0; i < nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ tx_bdp = greth->tx_bd_base + greth->tx_last;
+
+ dma_unmap_page(greth->dev,
+ greth_read_bd(&tx_bdp->addr),
+ frag->size,
+ DMA_TO_DEVICE);
+
+ greth->tx_last = NEXT_TX(greth->tx_last);
+ }
+ greth->tx_free += nr_frags+1;
+ dev_kfree_skb(skb);
+ }
+
+
+ } else { /* 10/100 Mbps MAC */
+
+ for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) {
+ kfree(greth->rx_bufs[i]);
+ dma_unmap_single(greth->dev,
+ greth_read_bd(&rx_bdp->addr),
+ MAX_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+ }
+ for (i = 0; i < GRETH_TXBD_NUM; i++, tx_bdp++) {
+ kfree(greth->tx_bufs[i]);
+ dma_unmap_single(greth->dev,
+ greth_read_bd(&tx_bdp->addr),
+ MAX_FRAME_SIZE,
+ DMA_TO_DEVICE);
+ }
+ }
+}
+
+static int greth_init_rings(struct greth_private *greth)
+{
+ struct sk_buff *skb;
+ struct greth_bd *rx_bd, *tx_bd;
+ u32 dma_addr;
+ int i;
+
+ rx_bd = greth->rx_bd_base;
+ tx_bd = greth->tx_bd_base;
+
+ /* Initialize descriptor rings and buffers */
+ if (greth->gbit_mac) {
+
+ for (i = 0; i < GRETH_RXBD_NUM; i++) {
+ skb = netdev_alloc_skb(greth->netdev, MAX_FRAME_SIZE+NET_IP_ALIGN);
+ if (skb == NULL) {
+ if (netif_msg_ifup(greth))
+ dev_err(greth->dev, "Error allocating DMA ring.\n");
+ goto cleanup;
+ }
+ skb_reserve(skb, NET_IP_ALIGN);
+ dma_addr = dma_map_single(greth->dev,
+ skb->data,
+ MAX_FRAME_SIZE+NET_IP_ALIGN,
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(greth->dev, dma_addr)) {
+ if (netif_msg_ifup(greth))
+ dev_err(greth->dev, "Could not create initial DMA mapping\n");
+ goto cleanup;
+ }
+ greth->rx_skbuff[i] = skb;
+ greth_write_bd(&rx_bd[i].addr, dma_addr);
+ greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
+ }
+
+ } else {
+
+ /* 10/100 MAC uses a fixed set of buffers and copy to/from SKBs */
+ for (i = 0; i < GRETH_RXBD_NUM; i++) {
+
+ greth->rx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
+
+ if (greth->rx_bufs[i] == NULL) {
+ if (netif_msg_ifup(greth))
+ dev_err(greth->dev, "Error allocating DMA ring.\n");
+ goto cleanup;
+ }
+
+ dma_addr = dma_map_single(greth->dev,
+ greth->rx_bufs[i],
+ MAX_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(greth->dev, dma_addr)) {
+ if (netif_msg_ifup(greth))
+ dev_err(greth->dev, "Could not create initial DMA mapping\n");
+ goto cleanup;
+ }
+ greth_write_bd(&rx_bd[i].addr, dma_addr);
+ greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
+ }
+ for (i = 0; i < GRETH_TXBD_NUM; i++) {
+
+ greth->tx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
+
+ if (greth->tx_bufs[i] == NULL) {
+ if (netif_msg_ifup(greth))
+ dev_err(greth->dev, "Error allocating DMA ring.\n");
+ goto cleanup;
+ }
+
+ dma_addr = dma_map_single(greth->dev,
+ greth->tx_bufs[i],
+ MAX_FRAME_SIZE,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(greth->dev, dma_addr)) {
+ if (netif_msg_ifup(greth))
+ dev_err(greth->dev, "Could not create initial DMA mapping\n");
+ goto cleanup;
+ }
+ greth_write_bd(&tx_bd[i].addr, dma_addr);
+ greth_write_bd(&tx_bd[i].stat, 0);
+ }
+ }
+ greth_write_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat,
+ greth_read_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat) | GRETH_BD_WR);
+
+ /* Initialize pointers. */
+ greth->rx_cur = 0;
+ greth->tx_next = 0;
+ greth->tx_last = 0;
+ greth->tx_free = GRETH_TXBD_NUM;
+
+ /* Initialize descriptor base address */
+ GRETH_REGSAVE(greth->regs->tx_desc_p, greth->tx_bd_base_phys);
+ GRETH_REGSAVE(greth->regs->rx_desc_p, greth->rx_bd_base_phys);
+
+ return 0;
+
+cleanup:
+ greth_clean_rings(greth);
+ return -ENOMEM;
+}
+
+static int greth_open(struct net_device *dev)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ int err;
+
+ err = greth_init_rings(greth);
+ if (err) {
+ if (netif_msg_ifup(greth))
+ dev_err(&dev->dev, "Could not allocate memory for DMA rings\n");
+ return err;
+ }
+
+ err = request_irq(greth->irq, greth_interrupt, 0, "eth", (void *) dev);
+ if (err) {
+ if (netif_msg_ifup(greth))
+ dev_err(&dev->dev, "Could not allocate interrupt %d\n", dev->irq);
+ greth_clean_rings(greth);
+ return err;
+ }
+
+ if (netif_msg_ifup(greth))
+ dev_dbg(&dev->dev, " starting queue\n");
+ netif_start_queue(dev);
+
+ GRETH_REGSAVE(greth->regs->status, 0xFF);
+
+ napi_enable(&greth->napi);
+
+ greth_enable_irqs(greth);
+ greth_enable_tx(greth);
+ greth_enable_rx(greth);
+ return 0;
+
+}
+
+static int greth_close(struct net_device *dev)
+{
+ struct greth_private *greth = netdev_priv(dev);
+
+ napi_disable(&greth->napi);
+
+ greth_disable_irqs(greth);
+ greth_disable_tx(greth);
+ greth_disable_rx(greth);
+
+ netif_stop_queue(dev);
+
+ free_irq(greth->irq, (void *) dev);
+
+ greth_clean_rings(greth);
+
+ return 0;
+}
+
+static netdev_tx_t
+greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ struct greth_bd *bdp;
+ int err = NETDEV_TX_OK;
+ u32 status, dma_addr, ctrl;
+ unsigned long flags;
+
+ /* Clean TX Ring */
+ greth_clean_tx(greth->netdev);
+
+ if (unlikely(greth->tx_free <= 0)) {
+ spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
+ ctrl = GRETH_REGLOAD(greth->regs->control);
+ /* Enable TX IRQ only if not already in poll() routine */
+ if (ctrl & GRETH_RXI)
+ GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&greth->devlock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (netif_msg_pktdata(greth))
+ greth_print_tx_packet(skb);
+
+
+ if (unlikely(skb->len > MAX_FRAME_SIZE)) {
+ dev->stats.tx_errors++;
+ goto out;
+ }
+
+ bdp = greth->tx_bd_base + greth->tx_next;
+ dma_addr = greth_read_bd(&bdp->addr);
+
+ memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len);
+
+ dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
+
+ status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN);
++ greth->tx_bufs_length[greth->tx_next] = skb->len & GRETH_BD_LEN;
+
+ /* Wrap around descriptor ring */
+ if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
+ status |= GRETH_BD_WR;
+ }
+
+ greth->tx_next = NEXT_TX(greth->tx_next);
+ greth->tx_free--;
+
+ /* Write descriptor control word and enable transmission */
+ greth_write_bd(&bdp->stat, status);
+ spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
+ greth_enable_tx(greth);
+ spin_unlock_irqrestore(&greth->devlock, flags);
+
+out:
+ dev_kfree_skb(skb);
+ return err;
+}
+
+
+static netdev_tx_t
+greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ struct greth_bd *bdp;
+ u32 status = 0, dma_addr, ctrl;
+ int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
+ unsigned long flags;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ /* Clean TX Ring */
+ greth_clean_tx_gbit(dev);
+
+ if (greth->tx_free < nr_frags + 1) {
+ spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
+ ctrl = GRETH_REGLOAD(greth->regs->control);
+ /* Enable TX IRQ only if not already in poll() routine */
+ if (ctrl & GRETH_RXI)
+ GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&greth->devlock, flags);
+ err = NETDEV_TX_BUSY;
+ goto out;
+ }
+
+ if (netif_msg_pktdata(greth))
+ greth_print_tx_packet(skb);
+
+ if (unlikely(skb->len > MAX_FRAME_SIZE)) {
+ dev->stats.tx_errors++;
+ goto out;
+ }
+
+ /* Save skb pointer. */
+ greth->tx_skbuff[greth->tx_next] = skb;
+
+ /* Linear buf */
+ if (nr_frags != 0)
+ status = GRETH_TXBD_MORE;
+
- status = GRETH_TXBD_CSALL | GRETH_BD_EN;
++ if (skb->ip_summed == CHECKSUM_PARTIAL)
++ status |= GRETH_TXBD_CSALL;
+ status |= skb_headlen(skb) & GRETH_BD_LEN;
+ if (greth->tx_next == GRETH_TXBD_NUM_MASK)
+ status |= GRETH_BD_WR;
+
+
+ bdp = greth->tx_bd_base + greth->tx_next;
+ greth_write_bd(&bdp->stat, status);
+ dma_addr = dma_map_single(greth->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
+ goto map_error;
+
+ greth_write_bd(&bdp->addr, dma_addr);
+
+ curr_tx = NEXT_TX(greth->tx_next);
+
+ /* Frags */
+ for (i = 0; i < nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ greth->tx_skbuff[curr_tx] = NULL;
+ bdp = greth->tx_bd_base + curr_tx;
+
++ status = GRETH_BD_EN;
++ if (skb->ip_summed == CHECKSUM_PARTIAL)
++ status |= GRETH_TXBD_CSALL;
+ status |= frag->size & GRETH_BD_LEN;
+
+ /* Wrap around descriptor ring */
+ if (curr_tx == GRETH_TXBD_NUM_MASK)
+ status |= GRETH_BD_WR;
+
+ /* More fragments left */
+ if (i < nr_frags - 1)
+ status |= GRETH_TXBD_MORE;
+ else
+ status |= GRETH_BD_IE; /* enable IRQ on last fragment */
+
+ greth_write_bd(&bdp->stat, status);
+
+ dma_addr = skb_frag_dma_map(greth->dev, frag, 0, frag->size,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
+ goto frag_map_error;
+
+ greth_write_bd(&bdp->addr, dma_addr);
+
+ curr_tx = NEXT_TX(curr_tx);
+ }
+
+ wmb();
+
+ /* Enable the descriptor chain by enabling the first descriptor */
+ bdp = greth->tx_bd_base + greth->tx_next;
+ greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
+ greth->tx_next = curr_tx;
+ greth->tx_free -= nr_frags + 1;
+
+ wmb();
+
+ spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
+ greth_enable_tx(greth);
+ spin_unlock_irqrestore(&greth->devlock, flags);
+
+ return NETDEV_TX_OK;
+
+frag_map_error:
+ /* Unmap SKB mappings that succeeded and disable descriptor */
+ for (i = 0; greth->tx_next + i != curr_tx; i++) {
+ bdp = greth->tx_bd_base + greth->tx_next + i;
+ dma_unmap_single(greth->dev,
+ greth_read_bd(&bdp->addr),
+ greth_read_bd(&bdp->stat) & GRETH_BD_LEN,
+ DMA_TO_DEVICE);
+ greth_write_bd(&bdp->stat, 0);
+ }
+map_error:
+ if (net_ratelimit())
+ dev_warn(greth->dev, "Could not create TX DMA mapping\n");
+ dev_kfree_skb(skb);
+out:
+ return err;
+}
+
+static irqreturn_t greth_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct greth_private *greth;
+ u32 status, ctrl;
+ irqreturn_t retval = IRQ_NONE;
+
+ greth = netdev_priv(dev);
+
+ spin_lock(&greth->devlock);
+
+ /* Get the interrupt events that caused us to be here. */
+ status = GRETH_REGLOAD(greth->regs->status);
+
+ /* Must see if interrupts are enabled also, INT_TX|INT_RX flags may be
+ * set regardless of whether IRQ is enabled or not. Especially
+ * important when shared IRQ.
+ */
+ ctrl = GRETH_REGLOAD(greth->regs->control);
+
+ /* Handle rx and tx interrupts through poll */
+ if (((status & (GRETH_INT_RE | GRETH_INT_RX)) && (ctrl & GRETH_RXI)) ||
+ ((status & (GRETH_INT_TE | GRETH_INT_TX)) && (ctrl & GRETH_TXI))) {
+ retval = IRQ_HANDLED;
+
+ /* Disable interrupts and schedule poll() */
+ greth_disable_irqs(greth);
+ napi_schedule(&greth->napi);
+ }
+
+ mmiowb();
+ spin_unlock(&greth->devlock);
+
+ return retval;
+}
+
+static void greth_clean_tx(struct net_device *dev)
+{
+ struct greth_private *greth;
+ struct greth_bd *bdp;
+ u32 stat;
+
+ greth = netdev_priv(dev);
+
+ while (1) {
+ bdp = greth->tx_bd_base + greth->tx_last;
+ GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
+ mb();
+ stat = greth_read_bd(&bdp->stat);
+
+ if (unlikely(stat & GRETH_BD_EN))
+ break;
+
+ if (greth->tx_free == GRETH_TXBD_NUM)
+ break;
+
+ /* Check status for errors */
+ if (unlikely(stat & GRETH_TXBD_STATUS)) {
+ dev->stats.tx_errors++;
+ if (stat & GRETH_TXBD_ERR_AL)
+ dev->stats.tx_aborted_errors++;
+ if (stat & GRETH_TXBD_ERR_UE)
+ dev->stats.tx_fifo_errors++;
+ }
+ dev->stats.tx_packets++;
++ dev->stats.tx_bytes += greth->tx_bufs_length[greth->tx_last];
+ greth->tx_last = NEXT_TX(greth->tx_last);
+ greth->tx_free++;
+ }
+
+ if (greth->tx_free > 0) {
+ netif_wake_queue(dev);
+ }
+
+}
+
+static inline void greth_update_tx_stats(struct net_device *dev, u32 stat)
+{
+ /* Check status for errors */
+ if (unlikely(stat & GRETH_TXBD_STATUS)) {
+ dev->stats.tx_errors++;
+ if (stat & GRETH_TXBD_ERR_AL)
+ dev->stats.tx_aborted_errors++;
+ if (stat & GRETH_TXBD_ERR_UE)
+ dev->stats.tx_fifo_errors++;
+ if (stat & GRETH_TXBD_ERR_LC)
+ dev->stats.tx_aborted_errors++;
+ }
+ dev->stats.tx_packets++;
+}
+
+static void greth_clean_tx_gbit(struct net_device *dev)
+{
+ struct greth_private *greth;
+ struct greth_bd *bdp, *bdp_last_frag;
+ struct sk_buff *skb;
+ u32 stat;
+ int nr_frags, i;
+
+ greth = netdev_priv(dev);
+
+ while (greth->tx_free < GRETH_TXBD_NUM) {
+
+ skb = greth->tx_skbuff[greth->tx_last];
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ /* We only clean fully completed SKBs */
+ bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags);
+
+ GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
+ mb();
+ stat = greth_read_bd(&bdp_last_frag->stat);
+
+ if (stat & GRETH_BD_EN)
+ break;
+
+ greth->tx_skbuff[greth->tx_last] = NULL;
+
+ greth_update_tx_stats(dev, stat);
++ dev->stats.tx_bytes += skb->len;
+
+ bdp = greth->tx_bd_base + greth->tx_last;
+
+ greth->tx_last = NEXT_TX(greth->tx_last);
+
+ dma_unmap_single(greth->dev,
+ greth_read_bd(&bdp->addr),
+ skb_headlen(skb),
+ DMA_TO_DEVICE);
+
+ for (i = 0; i < nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ bdp = greth->tx_bd_base + greth->tx_last;
+
+ dma_unmap_page(greth->dev,
+ greth_read_bd(&bdp->addr),
+ frag->size,
+ DMA_TO_DEVICE);
+
+ greth->tx_last = NEXT_TX(greth->tx_last);
+ }
+ greth->tx_free += nr_frags+1;
+ dev_kfree_skb(skb);
+ }
+
+ if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1)))
+ netif_wake_queue(dev);
+}
+
+static int greth_rx(struct net_device *dev, int limit)
+{
+ struct greth_private *greth;
+ struct greth_bd *bdp;
+ struct sk_buff *skb;
+ int pkt_len;
+ int bad, count;
+ u32 status, dma_addr;
+ unsigned long flags;
+
+ greth = netdev_priv(dev);
+
+ for (count = 0; count < limit; ++count) {
+
+ bdp = greth->rx_bd_base + greth->rx_cur;
+ GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
+ mb();
+ status = greth_read_bd(&bdp->stat);
+
+ if (unlikely(status & GRETH_BD_EN)) {
+ break;
+ }
+
+ dma_addr = greth_read_bd(&bdp->addr);
+ bad = 0;
+
+ /* Check status for errors. */
+ if (unlikely(status & GRETH_RXBD_STATUS)) {
+ if (status & GRETH_RXBD_ERR_FT) {
+ dev->stats.rx_length_errors++;
+ bad = 1;
+ }
+ if (status & (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE)) {
+ dev->stats.rx_frame_errors++;
+ bad = 1;
+ }
+ if (status & GRETH_RXBD_ERR_CRC) {
+ dev->stats.rx_crc_errors++;
+ bad = 1;
+ }
+ }
+ if (unlikely(bad)) {
+ dev->stats.rx_errors++;
+
+ } else {
+
+ pkt_len = status & GRETH_BD_LEN;
+
+ skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
+
+ if (unlikely(skb == NULL)) {
+
+ if (net_ratelimit())
+ dev_warn(&dev->dev, "low on memory - " "packet dropped\n");
+
+ dev->stats.rx_dropped++;
+
+ } else {
+ skb_reserve(skb, NET_IP_ALIGN);
+ skb->dev = dev;
+
+ dma_sync_single_for_cpu(greth->dev,
+ dma_addr,
+ pkt_len,
+ DMA_FROM_DEVICE);
+
+ if (netif_msg_pktdata(greth))
+ greth_print_rx_packet(phys_to_virt(dma_addr), pkt_len);
+
+ memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len);
+
+ skb->protocol = eth_type_trans(skb, dev);
++ dev->stats.rx_bytes += pkt_len;
+ dev->stats.rx_packets++;
+ netif_receive_skb(skb);
+ }
+ }
+
+ status = GRETH_BD_EN | GRETH_BD_IE;
+ if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
+ status |= GRETH_BD_WR;
+ }
+
+ wmb();
+ greth_write_bd(&bdp->stat, status);
+
+ dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);
+
+ spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */
+ greth_enable_rx(greth);
+ spin_unlock_irqrestore(&greth->devlock, flags);
+
+ greth->rx_cur = NEXT_RX(greth->rx_cur);
+ }
+
+ return count;
+}
+
+static inline int hw_checksummed(u32 status)
+{
+
+ if (status & GRETH_RXBD_IP_FRAG)
+ return 0;
+
+ if (status & GRETH_RXBD_IP && status & GRETH_RXBD_IP_CSERR)
+ return 0;
+
+ if (status & GRETH_RXBD_UDP && status & GRETH_RXBD_UDP_CSERR)
+ return 0;
+
+ if (status & GRETH_RXBD_TCP && status & GRETH_RXBD_TCP_CSERR)
+ return 0;
+
+ return 1;
+}
+
+static int greth_rx_gbit(struct net_device *dev, int limit)
+{
+ struct greth_private *greth;
+ struct greth_bd *bdp;
+ struct sk_buff *skb, *newskb;
+ int pkt_len;
+ int bad, count = 0;
+ u32 status, dma_addr;
+ unsigned long flags;
+
+ greth = netdev_priv(dev);
+
+ for (count = 0; count < limit; ++count) {
+
+ bdp = greth->rx_bd_base + greth->rx_cur;
+ skb = greth->rx_skbuff[greth->rx_cur];
+ GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
+ mb();
+ status = greth_read_bd(&bdp->stat);
+ bad = 0;
+
+ if (status & GRETH_BD_EN)
+ break;
+
+ /* Check status for errors. */
+ if (unlikely(status & GRETH_RXBD_STATUS)) {
+
+ if (status & GRETH_RXBD_ERR_FT) {
+ dev->stats.rx_length_errors++;
+ bad = 1;
+ } else if (status &
+ (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE | GRETH_RXBD_ERR_LE)) {
+ dev->stats.rx_frame_errors++;
+ bad = 1;
+ } else if (status & GRETH_RXBD_ERR_CRC) {
+ dev->stats.rx_crc_errors++;
+ bad = 1;
+ }
+ }
+
+ /* Allocate new skb to replace current, not needed if the
+ * current skb can be reused */
+ if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) {
+ skb_reserve(newskb, NET_IP_ALIGN);
+
+ dma_addr = dma_map_single(greth->dev,
+ newskb->data,
+ MAX_FRAME_SIZE + NET_IP_ALIGN,
+ DMA_FROM_DEVICE);
+
+ if (!dma_mapping_error(greth->dev, dma_addr)) {
+ /* Process the incoming frame. */
+ pkt_len = status & GRETH_BD_LEN;
+
+ dma_unmap_single(greth->dev,
+ greth_read_bd(&bdp->addr),
+ MAX_FRAME_SIZE + NET_IP_ALIGN,
+ DMA_FROM_DEVICE);
+
+ if (netif_msg_pktdata(greth))
+ greth_print_rx_packet(phys_to_virt(greth_read_bd(&bdp->addr)), pkt_len);
+
+ skb_put(skb, pkt_len);
+
+ if (dev->features & NETIF_F_RXCSUM && hw_checksummed(status))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+
+ skb->protocol = eth_type_trans(skb, dev);
+ dev->stats.rx_packets++;
++ dev->stats.rx_bytes += pkt_len;
+ netif_receive_skb(skb);
+
+ greth->rx_skbuff[greth->rx_cur] = newskb;
+ greth_write_bd(&bdp->addr, dma_addr);
+ } else {
+ if (net_ratelimit())
+ dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n");
+ dev_kfree_skb(newskb);
+ /* reusing current skb, so it is a drop */
+ dev->stats.rx_dropped++;
+ }
+ } else if (bad) {
+ /* Bad Frame transfer, the skb is reused */
+ dev->stats.rx_dropped++;
+ } else {
+ /* Failed Allocating a new skb. This is rather stupid
+ * but the current "filled" skb is reused, as if
+ * transfer failure. One could argue that RX descriptor
+ * table handling should be divided into cleaning and
+ * filling as the TX part of the driver
+ */
+ if (net_ratelimit())
+ dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n");
+ /* reusing current skb, so it is a drop */
+ dev->stats.rx_dropped++;
+ }
+
+ status = GRETH_BD_EN | GRETH_BD_IE;
+ if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
+ status |= GRETH_BD_WR;
+ }
+
+ wmb();
+ greth_write_bd(&bdp->stat, status);
+ spin_lock_irqsave(&greth->devlock, flags);
+ greth_enable_rx(greth);
+ spin_unlock_irqrestore(&greth->devlock, flags);
+ greth->rx_cur = NEXT_RX(greth->rx_cur);
+ }
+
+ return count;
+
+}
+
+static int greth_poll(struct napi_struct *napi, int budget)
+{
+ struct greth_private *greth;
+ int work_done = 0;
+ unsigned long flags;
+ u32 mask, ctrl;
+ greth = container_of(napi, struct greth_private, napi);
+
+restart_txrx_poll:
+ if (netif_queue_stopped(greth->netdev)) {
+ if (greth->gbit_mac)
+ greth_clean_tx_gbit(greth->netdev);
+ else
+ greth_clean_tx(greth->netdev);
+ }
+
+ if (greth->gbit_mac) {
+ work_done += greth_rx_gbit(greth->netdev, budget - work_done);
+ } else {
+ work_done += greth_rx(greth->netdev, budget - work_done);
+ }
+
+ if (work_done < budget) {
+
+ spin_lock_irqsave(&greth->devlock, flags);
+
+ ctrl = GRETH_REGLOAD(greth->regs->control);
+ if (netif_queue_stopped(greth->netdev)) {
+ GRETH_REGSAVE(greth->regs->control,
+ ctrl | GRETH_TXI | GRETH_RXI);
+ mask = GRETH_INT_RX | GRETH_INT_RE |
+ GRETH_INT_TX | GRETH_INT_TE;
+ } else {
+ GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI);
+ mask = GRETH_INT_RX | GRETH_INT_RE;
+ }
+
+ if (GRETH_REGLOAD(greth->regs->status) & mask) {
+ GRETH_REGSAVE(greth->regs->control, ctrl);
+ spin_unlock_irqrestore(&greth->devlock, flags);
+ goto restart_txrx_poll;
+ } else {
+ __napi_complete(napi);
+ spin_unlock_irqrestore(&greth->devlock, flags);
+ }
+ }
+
+ return work_done;
+}
+
+static int greth_set_mac_add(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+ struct greth_private *greth;
+ struct greth_regs *regs;
+
+ greth = netdev_priv(dev);
+ regs = (struct greth_regs *) greth->regs;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
+ GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
+ dev->dev_addr[4] << 8 | dev->dev_addr[5]);
+
+ return 0;
+}
+
+static u32 greth_hash_get_index(__u8 *addr)
+{
+ return (ether_crc(6, addr)) & 0x3F;
+}
+
+static void greth_set_hash_filter(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+ struct greth_private *greth = netdev_priv(dev);
+ struct greth_regs *regs = (struct greth_regs *) greth->regs;
+ u32 mc_filter[2];
+ unsigned int bitnr;
+
+ mc_filter[0] = mc_filter[1] = 0;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ bitnr = greth_hash_get_index(ha->addr);
+ mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
+ }
+
+ GRETH_REGSAVE(regs->hash_msb, mc_filter[1]);
+ GRETH_REGSAVE(regs->hash_lsb, mc_filter[0]);
+}
+
+static void greth_set_multicast_list(struct net_device *dev)
+{
+ int cfg;
+ struct greth_private *greth = netdev_priv(dev);
+ struct greth_regs *regs = (struct greth_regs *) greth->regs;
+
+ cfg = GRETH_REGLOAD(regs->control);
+ if (dev->flags & IFF_PROMISC)
+ cfg |= GRETH_CTRL_PR;
+ else
+ cfg &= ~GRETH_CTRL_PR;
+
+ if (greth->multicast) {
+ if (dev->flags & IFF_ALLMULTI) {
+ GRETH_REGSAVE(regs->hash_msb, -1);
+ GRETH_REGSAVE(regs->hash_lsb, -1);
+ cfg |= GRETH_CTRL_MCEN;
+ GRETH_REGSAVE(regs->control, cfg);
+ return;
+ }
+
+ if (netdev_mc_empty(dev)) {
+ cfg &= ~GRETH_CTRL_MCEN;
+ GRETH_REGSAVE(regs->control, cfg);
+ return;
+ }
+
+ /* Setup multicast filter */
+ greth_set_hash_filter(dev);
+ cfg |= GRETH_CTRL_MCEN;
+ }
+ GRETH_REGSAVE(regs->control, cfg);
+}
+
+static u32 greth_get_msglevel(struct net_device *dev)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ return greth->msg_enable;
+}
+
+static void greth_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ greth->msg_enable = value;
+}
+static int greth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ struct phy_device *phy = greth->phy;
+
+ if (!phy)
+ return -ENODEV;
+
+ return phy_ethtool_gset(phy, cmd);
+}
+
+static int greth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ struct phy_device *phy = greth->phy;
+
+ if (!phy)
+ return -ENODEV;
+
+ return phy_ethtool_sset(phy, cmd);
+}
+
+static int greth_get_regs_len(struct net_device *dev)
+{
+ return sizeof(struct greth_regs);
+}
+
+static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct greth_private *greth = netdev_priv(dev);
+
+ strncpy(info->driver, dev_driver_string(greth->dev), 32);
+ strncpy(info->version, "revision: 1.0", 32);
+ strncpy(info->bus_info, greth->dev->bus->name, 32);
+ strncpy(info->fw_version, "N/A", 32);
+ info->eedump_len = 0;
+ info->regdump_len = sizeof(struct greth_regs);
+}
+
+static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
+{
+ int i;
+ struct greth_private *greth = netdev_priv(dev);
+ u32 __iomem *greth_regs = (u32 __iomem *) greth->regs;
+ u32 *buff = p;
+
+ for (i = 0; i < sizeof(struct greth_regs) / sizeof(u32); i++)
+ buff[i] = greth_read_bd(&greth_regs[i]);
+}
+
+static const struct ethtool_ops greth_ethtool_ops = {
+ .get_msglevel = greth_get_msglevel,
+ .set_msglevel = greth_set_msglevel,
+ .get_settings = greth_get_settings,
+ .set_settings = greth_set_settings,
+ .get_drvinfo = greth_get_drvinfo,
+ .get_regs_len = greth_get_regs_len,
+ .get_regs = greth_get_regs,
+ .get_link = ethtool_op_get_link,
+};
+
+static struct net_device_ops greth_netdev_ops = {
+ .ndo_open = greth_open,
+ .ndo_stop = greth_close,
+ .ndo_start_xmit = greth_start_xmit,
+ .ndo_set_mac_address = greth_set_mac_add,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static inline int wait_for_mdio(struct greth_private *greth)
+{
+ unsigned long timeout = jiffies + 4*HZ/100;
+ while (GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_BUSY) {
+ if (time_after(jiffies, timeout))
+ return 0;
+ }
+ return 1;
+}
+
+static int greth_mdio_read(struct mii_bus *bus, int phy, int reg)
+{
+ struct greth_private *greth = bus->priv;
+ int data;
+
+ if (!wait_for_mdio(greth))
+ return -EBUSY;
+
+ GRETH_REGSAVE(greth->regs->mdio, ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 2);
+
+ if (!wait_for_mdio(greth))
+ return -EBUSY;
+
+ if (!(GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_NVALID)) {
+ data = (GRETH_REGLOAD(greth->regs->mdio) >> 16) & 0xFFFF;
+ return data;
+
+ } else {
+ return -1;
+ }
+}
+
+static int greth_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
+{
+ struct greth_private *greth = bus->priv;
+
+ if (!wait_for_mdio(greth))
+ return -EBUSY;
+
+ GRETH_REGSAVE(greth->regs->mdio,
+ ((val & 0xFFFF) << 16) | ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 1);
+
+ if (!wait_for_mdio(greth))
+ return -EBUSY;
+
+ return 0;
+}
+
+static int greth_mdio_reset(struct mii_bus *bus)
+{
+ return 0;
+}
+
+static void greth_link_change(struct net_device *dev)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ struct phy_device *phydev = greth->phy;
+ unsigned long flags;
+ int status_change = 0;
+ u32 ctrl;
+
+ spin_lock_irqsave(&greth->devlock, flags);
+
+ if (phydev->link) {
+
+ if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) {
+ ctrl = GRETH_REGLOAD(greth->regs->control) &
+ ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB);
+
+ if (phydev->duplex)
+ ctrl |= GRETH_CTRL_FD;
+
+ if (phydev->speed == SPEED_100)
+ ctrl |= GRETH_CTRL_SP;
+ else if (phydev->speed == SPEED_1000)
+ ctrl |= GRETH_CTRL_GB;
+
+ GRETH_REGSAVE(greth->regs->control, ctrl);
+ greth->speed = phydev->speed;
+ greth->duplex = phydev->duplex;
+ status_change = 1;
+ }
+ }
+
+ if (phydev->link != greth->link) {
+ if (!phydev->link) {
+ greth->speed = 0;
+ greth->duplex = -1;
+ }
+ greth->link = phydev->link;
+
+ status_change = 1;
+ }
+
+ spin_unlock_irqrestore(&greth->devlock, flags);
+
+ if (status_change) {
+ if (phydev->link)
+ pr_debug("%s: link up (%d/%s)\n",
+ dev->name, phydev->speed,
+ DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
+ else
+ pr_debug("%s: link down\n", dev->name);
+ }
+}
+
+static int greth_mdio_probe(struct net_device *dev)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ struct phy_device *phy = NULL;
+ int ret;
+
+ /* Find the first PHY */
+ phy = phy_find_first(greth->mdio);
+
+ if (!phy) {
+ if (netif_msg_probe(greth))
+ dev_err(&dev->dev, "no PHY found\n");
+ return -ENXIO;
+ }
+
+ ret = phy_connect_direct(dev, phy, &greth_link_change,
+ 0, greth->gbit_mac ?
+ PHY_INTERFACE_MODE_GMII :
+ PHY_INTERFACE_MODE_MII);
+ if (ret) {
+ if (netif_msg_ifup(greth))
+ dev_err(&dev->dev, "could not attach to PHY\n");
+ return ret;
+ }
+
+ if (greth->gbit_mac)
+ phy->supported &= PHY_GBIT_FEATURES;
+ else
+ phy->supported &= PHY_BASIC_FEATURES;
+
+ phy->advertising = phy->supported;
+
+ greth->link = 0;
+ greth->speed = 0;
+ greth->duplex = -1;
+ greth->phy = phy;
+
+ return 0;
+}
+
+static inline int phy_aneg_done(struct phy_device *phydev)
+{
+ int retval;
+
+ retval = phy_read(phydev, MII_BMSR);
+
+ return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
+}
+
+static int greth_mdio_init(struct greth_private *greth)
+{
+ int ret, phy;
+ unsigned long timeout;
+
+ greth->mdio = mdiobus_alloc();
+ if (!greth->mdio) {
+ return -ENOMEM;
+ }
+
+ greth->mdio->name = "greth-mdio";
+ snprintf(greth->mdio->id, MII_BUS_ID_SIZE, "%s-%d", greth->mdio->name, greth->irq);
+ greth->mdio->read = greth_mdio_read;
+ greth->mdio->write = greth_mdio_write;
+ greth->mdio->reset = greth_mdio_reset;
+ greth->mdio->priv = greth;
+
+ greth->mdio->irq = greth->mdio_irqs;
+
+ for (phy = 0; phy < PHY_MAX_ADDR; phy++)
+ greth->mdio->irq[phy] = PHY_POLL;
+
+ ret = mdiobus_register(greth->mdio);
+ if (ret) {
+ goto error;
+ }
+
+ ret = greth_mdio_probe(greth->netdev);
+ if (ret) {
+ if (netif_msg_probe(greth))
+ dev_err(&greth->netdev->dev, "failed to probe MDIO bus\n");
+ goto unreg_mdio;
+ }
+
+ phy_start(greth->phy);
+
+ /* If Ethernet debug link is used make autoneg happen right away */
+ if (greth->edcl && greth_edcl == 1) {
+ phy_start_aneg(greth->phy);
+ timeout = jiffies + 6*HZ;
+ while (!phy_aneg_done(greth->phy) && time_before(jiffies, timeout)) {
+ }
+ genphy_read_status(greth->phy);
+ greth_link_change(greth->netdev);
+ }
+
+ return 0;
+
+unreg_mdio:
+ mdiobus_unregister(greth->mdio);
+error:
+ mdiobus_free(greth->mdio);
+ return ret;
+}
+
+/* Initialize the GRETH MAC */
+static int __devinit greth_of_probe(struct platform_device *ofdev)
+{
+ struct net_device *dev;
+ struct greth_private *greth;
+ struct greth_regs *regs;
+
+ int i;
+ int err;
+ int tmp;
+ unsigned long timeout;
+
+ dev = alloc_etherdev(sizeof(struct greth_private));
+
+ if (dev == NULL)
+ return -ENOMEM;
+
+ greth = netdev_priv(dev);
+ greth->netdev = dev;
+ greth->dev = &ofdev->dev;
+
+ if (greth_debug > 0)
+ greth->msg_enable = greth_debug;
+ else
+ greth->msg_enable = GRETH_DEF_MSG_ENABLE;
+
+ spin_lock_init(&greth->devlock);
+
+ greth->regs = of_ioremap(&ofdev->resource[0], 0,
+ resource_size(&ofdev->resource[0]),
+ "grlib-greth regs");
+
+ if (greth->regs == NULL) {
+ if (netif_msg_probe(greth))
+ dev_err(greth->dev, "ioremap failure.\n");
+ err = -EIO;
+ goto error1;
+ }
+
+ regs = (struct greth_regs *) greth->regs;
+ greth->irq = ofdev->archdata.irqs[0];
+
+ dev_set_drvdata(greth->dev, dev);
+ SET_NETDEV_DEV(dev, greth->dev);
+
+ if (netif_msg_probe(greth))
+ dev_dbg(greth->dev, "reseting controller.\n");
+
+ /* Reset the controller. */
+ GRETH_REGSAVE(regs->control, GRETH_RESET);
+
+ /* Wait for MAC to reset itself */
+ timeout = jiffies + HZ/100;
+ while (GRETH_REGLOAD(regs->control) & GRETH_RESET) {
+ if (time_after(jiffies, timeout)) {
+ err = -EIO;
+ if (netif_msg_probe(greth))
+ dev_err(greth->dev, "timeout when waiting for reset.\n");
+ goto error2;
+ }
+ }
+
+ /* Get default PHY address */
+ greth->phyaddr = (GRETH_REGLOAD(regs->mdio) >> 11) & 0x1F;
+
+ /* Check if we have GBIT capable MAC */
+ tmp = GRETH_REGLOAD(regs->control);
+ greth->gbit_mac = (tmp >> 27) & 1;
+
+ /* Check for multicast capability */
+ greth->multicast = (tmp >> 25) & 1;
+
+ greth->edcl = (tmp >> 31) & 1;
+
+ /* If we have EDCL we disable the EDCL speed-duplex FSM so
+ * it doesn't interfere with the software */
+ if (greth->edcl != 0)
+ GRETH_REGORIN(regs->control, GRETH_CTRL_DISDUPLEX);
+
+ /* Check if MAC can handle MDIO interrupts */
+ greth->mdio_int_en = (tmp >> 26) & 1;
+
+ err = greth_mdio_init(greth);
+ if (err) {
+ if (netif_msg_probe(greth))
+ dev_err(greth->dev, "failed to register MDIO bus\n");
+ goto error2;
+ }
+
+ /* Allocate TX descriptor ring in coherent memory */
+ greth->tx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
+ 1024,
+ &greth->tx_bd_base_phys,
+ GFP_KERNEL);
+
+ if (!greth->tx_bd_base) {
+ if (netif_msg_probe(greth))
+ dev_err(&dev->dev, "could not allocate descriptor memory.\n");
+ err = -ENOMEM;
+ goto error3;
+ }
+
+ memset(greth->tx_bd_base, 0, 1024);
+
+ /* Allocate RX descriptor ring in coherent memory */
+ greth->rx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
+ 1024,
+ &greth->rx_bd_base_phys,
+ GFP_KERNEL);
+
+ if (!greth->rx_bd_base) {
+ if (netif_msg_probe(greth))
+ dev_err(greth->dev, "could not allocate descriptor memory.\n");
+ err = -ENOMEM;
+ goto error4;
+ }
+
+ memset(greth->rx_bd_base, 0, 1024);
+
+ /* Get MAC address from: module param, OF property or ID prom */
+ for (i = 0; i < 6; i++) {
+ if (macaddr[i] != 0)
+ break;
+ }
+ if (i == 6) {
+ const unsigned char *addr;
+ int len;
+ addr = of_get_property(ofdev->dev.of_node, "local-mac-address",
+ &len);
+ if (addr != NULL && len == 6) {
+ for (i = 0; i < 6; i++)
+ macaddr[i] = (unsigned int) addr[i];
+ } else {
+#ifdef CONFIG_SPARC
+ for (i = 0; i < 6; i++)
+ macaddr[i] = (unsigned int) idprom->id_ethaddr[i];
+#endif
+ }
+ }
+
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = macaddr[i];
+
+ macaddr[5]++;
+
+ if (!is_valid_ether_addr(&dev->dev_addr[0])) {
+ if (netif_msg_probe(greth))
+ dev_err(greth->dev, "no valid ethernet address, aborting.\n");
+ err = -EINVAL;
+ goto error5;
+ }
+
+ GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
+ GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
+ dev->dev_addr[4] << 8 | dev->dev_addr[5]);
+
+ /* Clear all pending interrupts except PHY irq */
+ GRETH_REGSAVE(regs->status, 0xFF);
+
+ if (greth->gbit_mac) {
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_RXCSUM;
+ dev->features = dev->hw_features | NETIF_F_HIGHDMA;
+ greth_netdev_ops.ndo_start_xmit = greth_start_xmit_gbit;
+ }
+
+ if (greth->multicast) {
+ greth_netdev_ops.ndo_set_rx_mode = greth_set_multicast_list;
+ dev->flags |= IFF_MULTICAST;
+ } else {
+ dev->flags &= ~IFF_MULTICAST;
+ }
+
+ dev->netdev_ops = &greth_netdev_ops;
+ dev->ethtool_ops = &greth_ethtool_ops;
+
+ err = register_netdev(dev);
+ if (err) {
+ if (netif_msg_probe(greth))
+ dev_err(greth->dev, "netdevice registration failed.\n");
+ goto error5;
+ }
+
+ /* setup NAPI */
+ netif_napi_add(dev, &greth->napi, greth_poll, 64);
+
+ return 0;
+
+error5:
+ dma_free_coherent(greth->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
+error4:
+ dma_free_coherent(greth->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
+error3:
+ mdiobus_unregister(greth->mdio);
+error2:
+ of_iounmap(&ofdev->resource[0], greth->regs, resource_size(&ofdev->resource[0]));
+error1:
+ free_netdev(dev);
+ return err;
+}
+
+static int __devexit greth_of_remove(struct platform_device *of_dev)
+{
+ struct net_device *ndev = dev_get_drvdata(&of_dev->dev);
+ struct greth_private *greth = netdev_priv(ndev);
+
+ /* Free descriptor areas */
+ dma_free_coherent(&of_dev->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
+
+ dma_free_coherent(&of_dev->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
+
+ dev_set_drvdata(&of_dev->dev, NULL);
+
+ if (greth->phy)
+ phy_stop(greth->phy);
+ mdiobus_unregister(greth->mdio);
+
+ unregister_netdev(ndev);
+ free_netdev(ndev);
+
+ of_iounmap(&of_dev->resource[0], greth->regs, resource_size(&of_dev->resource[0]));
+
+ return 0;
+}
+
+static struct of_device_id greth_of_match[] = {
+ {
+ .name = "GAISLER_ETHMAC",
+ },
+ {
+ .name = "01_01d",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, greth_of_match);
+
+static struct platform_driver greth_of_driver = {
+ .driver = {
+ .name = "grlib-greth",
+ .owner = THIS_MODULE,
+ .of_match_table = greth_of_match,
+ },
+ .probe = greth_of_probe,
+ .remove = __devexit_p(greth_of_remove),
+};
+
+static int __init greth_init(void)
+{
+ return platform_driver_register(&greth_of_driver);
+}
+
+static void __exit greth_cleanup(void)
+{
+ platform_driver_unregister(&greth_of_driver);
+}
+
+module_init(greth_init);
+module_exit(greth_cleanup);
+
+MODULE_AUTHOR("Aeroflex Gaisler AB.");
+MODULE_DESCRIPTION("Aeroflex Gaisler Ethernet MAC driver");
+MODULE_LICENSE("GPL");
--- /dev/null
+#ifndef GRETH_H
+#define GRETH_H
+
+#include <linux/phy.h>
+
+/* Register bits and masks */
+#define GRETH_RESET 0x40
+#define GRETH_MII_BUSY 0x8
+#define GRETH_MII_NVALID 0x10
+
+#define GRETH_CTRL_FD 0x10
+#define GRETH_CTRL_PR 0x20
+#define GRETH_CTRL_SP 0x80
+#define GRETH_CTRL_GB 0x100
+#define GRETH_CTRL_PSTATIEN 0x400
+#define GRETH_CTRL_MCEN 0x800
+#define GRETH_CTRL_DISDUPLEX 0x1000
+#define GRETH_STATUS_PHYSTAT 0x100
+
+#define GRETH_BD_EN 0x800
+#define GRETH_BD_WR 0x1000
+#define GRETH_BD_IE 0x2000
+#define GRETH_BD_LEN 0x7FF
+
+#define GRETH_TXEN 0x1
+#define GRETH_INT_TE 0x2
+#define GRETH_INT_TX 0x8
+#define GRETH_TXI 0x4
+#define GRETH_TXBD_STATUS 0x0001C000
+#define GRETH_TXBD_MORE 0x20000
+#define GRETH_TXBD_IPCS 0x40000
+#define GRETH_TXBD_TCPCS 0x80000
+#define GRETH_TXBD_UDPCS 0x100000
+#define GRETH_TXBD_CSALL (GRETH_TXBD_IPCS | GRETH_TXBD_TCPCS | GRETH_TXBD_UDPCS)
+#define GRETH_TXBD_ERR_LC 0x10000
+#define GRETH_TXBD_ERR_UE 0x4000
+#define GRETH_TXBD_ERR_AL 0x8000
+
+#define GRETH_INT_RE 0x1
+#define GRETH_INT_RX 0x4
+#define GRETH_RXEN 0x2
+#define GRETH_RXI 0x8
+#define GRETH_RXBD_STATUS 0xFFFFC000
+#define GRETH_RXBD_ERR_AE 0x4000
+#define GRETH_RXBD_ERR_FT 0x8000
+#define GRETH_RXBD_ERR_CRC 0x10000
+#define GRETH_RXBD_ERR_OE 0x20000
+#define GRETH_RXBD_ERR_LE 0x40000
+#define GRETH_RXBD_IP 0x80000
+#define GRETH_RXBD_IP_CSERR 0x100000
+#define GRETH_RXBD_UDP 0x200000
+#define GRETH_RXBD_UDP_CSERR 0x400000
+#define GRETH_RXBD_TCP 0x800000
+#define GRETH_RXBD_TCP_CSERR 0x1000000
+#define GRETH_RXBD_IP_FRAG 0x2000000
+#define GRETH_RXBD_MCAST 0x4000000
+
+/* Descriptor parameters */
+#define GRETH_TXBD_NUM 128
+#define GRETH_TXBD_NUM_MASK (GRETH_TXBD_NUM-1)
+#define GRETH_TX_BUF_SIZE 2048
+#define GRETH_RXBD_NUM 128
+#define GRETH_RXBD_NUM_MASK (GRETH_RXBD_NUM-1)
+#define GRETH_RX_BUF_SIZE 2048
+
+/* Buffers per page */
+#define GRETH_RX_BUF_PPGAE (PAGE_SIZE/GRETH_RX_BUF_SIZE)
+#define GRETH_TX_BUF_PPGAE (PAGE_SIZE/GRETH_TX_BUF_SIZE)
+
+/* How many pages are needed for buffers */
+#define GRETH_RX_BUF_PAGE_NUM (GRETH_RXBD_NUM/GRETH_RX_BUF_PPGAE)
+#define GRETH_TX_BUF_PAGE_NUM (GRETH_TXBD_NUM/GRETH_TX_BUF_PPGAE)
+
+/* Buffer size.
+ * Gbit MAC uses tagged maximum frame size which is 1518 excluding CRC.
+ * Set to 1520 to make all buffers word aligned for non-gbit MAC.
+ */
+#define MAX_FRAME_SIZE 1520
+
+/* GRETH APB registers */
+struct greth_regs {
+ u32 control;
+ u32 status;
+ u32 esa_msb;
+ u32 esa_lsb;
+ u32 mdio;
+ u32 tx_desc_p;
+ u32 rx_desc_p;
+ u32 edclip;
+ u32 hash_msb;
+ u32 hash_lsb;
+};
+
+/* GRETH buffer descriptor */
+struct greth_bd {
+ u32 stat;
+ u32 addr;
+};
+
+struct greth_private {
+ struct sk_buff *rx_skbuff[GRETH_RXBD_NUM];
+ struct sk_buff *tx_skbuff[GRETH_TXBD_NUM];
+
+ unsigned char *tx_bufs[GRETH_TXBD_NUM];
+ unsigned char *rx_bufs[GRETH_RXBD_NUM];
++ u16 tx_bufs_length[GRETH_TXBD_NUM];
+
+ u16 tx_next;
+ u16 tx_last;
+ u16 tx_free;
+ u16 rx_cur;
+
+ struct greth_regs *regs; /* Address of controller registers. */
+ struct greth_bd *rx_bd_base; /* Address of Rx BDs. */
+ struct greth_bd *tx_bd_base; /* Address of Tx BDs. */
+ dma_addr_t rx_bd_base_phys;
+ dma_addr_t tx_bd_base_phys;
+
+ int irq;
+
+ struct device *dev; /* Pointer to platform_device->dev */
+ struct net_device *netdev;
+ struct napi_struct napi;
+ spinlock_t devlock;
+
+ struct phy_device *phy;
+ struct mii_bus *mdio;
+ int mdio_irqs[PHY_MAX_ADDR];
+ unsigned int link;
+ unsigned int speed;
+ unsigned int duplex;
+
+ u32 msg_enable;
+
+ u8 phyaddr;
+ u8 multicast;
+ u8 gbit_mac;
+ u8 mdio_int_en;
+ u8 edcl;
+};
+
+#endif
--- /dev/null
+/*
+ * linux/drivers/net/am79c961.c
+ *
+ * by Russell King <rmk@arm.linux.org.uk> 1995-2001.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Derived from various things including skeleton.c
+ *
+ * This is a special driver for the am79c961A Lance chip used in the
+ * Intel (formally Digital Equipment Corp) EBSA110 platform. Please
+ * note that this can not be built as a module (it doesn't make sense).
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/bitops.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+#include <asm/system.h>
+
+#define TX_BUFFERS 15
+#define RX_BUFFERS 25
+
+#include "am79c961a.h"
+
+static irqreturn_t
+am79c961_interrupt (int irq, void *dev_id);
+
+static unsigned int net_debug = NET_DEBUG;
+
+static const char version[] =
+ "am79c961 ethernet driver (C) 1995-2001 Russell King v0.04\n";
+
+/* --------------------------------------------------------------------------- */
+
+#ifdef __arm__
+static void write_rreg(u_long base, u_int reg, u_int val)
+{
+ asm volatile(
+ "str%?h %1, [%2] @ NET_RAP\n\t"
+ "str%?h %0, [%2, #-4] @ NET_RDP"
+ :
+ : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
+}
+
+static inline unsigned short read_rreg(u_long base_addr, u_int reg)
+{
+ unsigned short v;
+ asm volatile(
+ "str%?h %1, [%2] @ NET_RAP\n\t"
+ "ldr%?h %0, [%2, #-4] @ NET_RDP"
+ : "=r" (v)
+ : "r" (reg), "r" (ISAIO_BASE + 0x0464));
+ return v;
+}
+
+static inline void write_ireg(u_long base, u_int reg, u_int val)
+{
+ asm volatile(
+ "str%?h %1, [%2] @ NET_RAP\n\t"
+ "str%?h %0, [%2, #8] @ NET_IDP"
+ :
+ : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
+}
+
+static inline unsigned short read_ireg(u_long base_addr, u_int reg)
+{
+ u_short v;
+ asm volatile(
+ "str%?h %1, [%2] @ NAT_RAP\n\t"
+ "ldr%?h %0, [%2, #8] @ NET_IDP\n\t"
+ : "=r" (v)
+ : "r" (reg), "r" (ISAIO_BASE + 0x0464));
+ return v;
+}
+
+#define am_writeword(dev,off,val) __raw_writew(val, ISAMEM_BASE + ((off) << 1))
+#define am_readword(dev,off) __raw_readw(ISAMEM_BASE + ((off) << 1))
+
+static void
+am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
+{
+ offset = ISAMEM_BASE + (offset << 1);
+ length = (length + 1) & ~1;
+ if ((int)buf & 2) {
+ asm volatile("str%?h %2, [%0], #4"
+ : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
+ buf += 2;
+ length -= 2;
+ }
+ while (length > 8) {
+ register unsigned int tmp asm("r2"), tmp2 asm("r3");
+ asm volatile(
+ "ldm%?ia %0!, {%1, %2}"
+ : "+r" (buf), "=&r" (tmp), "=&r" (tmp2));
+ length -= 8;
+ asm volatile(
+ "str%?h %1, [%0], #4\n\t"
+ "mov%? %1, %1, lsr #16\n\t"
+ "str%?h %1, [%0], #4\n\t"
+ "str%?h %2, [%0], #4\n\t"
+ "mov%? %2, %2, lsr #16\n\t"
+ "str%?h %2, [%0], #4"
+ : "+r" (offset), "=&r" (tmp), "=&r" (tmp2));
+ }
+ while (length > 0) {
+ asm volatile("str%?h %2, [%0], #4"
+ : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
+ buf += 2;
+ length -= 2;
+ }
+}
+
+static void
+am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
+{
+ offset = ISAMEM_BASE + (offset << 1);
+ length = (length + 1) & ~1;
+ if ((int)buf & 2) {
+ unsigned int tmp;
+ asm volatile(
+ "ldr%?h %2, [%0], #4\n\t"
+ "str%?b %2, [%1], #1\n\t"
+ "mov%? %2, %2, lsr #8\n\t"
+ "str%?b %2, [%1], #1"
+ : "=&r" (offset), "=&r" (buf), "=r" (tmp): "0" (offset), "1" (buf));
+ length -= 2;
+ }
+ while (length > 8) {
+ register unsigned int tmp asm("r2"), tmp2 asm("r3"), tmp3;
+ asm volatile(
+ "ldr%?h %2, [%0], #4\n\t"
+ "ldr%?h %4, [%0], #4\n\t"
+ "ldr%?h %3, [%0], #4\n\t"
+ "orr%? %2, %2, %4, lsl #16\n\t"
+ "ldr%?h %4, [%0], #4\n\t"
+ "orr%? %3, %3, %4, lsl #16\n\t"
+ "stm%?ia %1!, {%2, %3}"
+ : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2), "=r" (tmp3)
+ : "0" (offset), "1" (buf));
+ length -= 8;
+ }
+ while (length > 0) {
+ unsigned int tmp;
+ asm volatile(
+ "ldr%?h %2, [%0], #4\n\t"
+ "str%?b %2, [%1], #1\n\t"
+ "mov%? %2, %2, lsr #8\n\t"
+ "str%?b %2, [%1], #1"
+ : "=&r" (offset), "=&r" (buf), "=r" (tmp) : "0" (offset), "1" (buf));
+ length -= 2;
+ }
+}
+#else
+#error Not compatible
+#endif
+
+static int
+am79c961_ramtest(struct net_device *dev, unsigned int val)
+{
+ unsigned char *buffer = kmalloc (65536, GFP_KERNEL);
+ int i, error = 0, errorcount = 0;
+
+ if (!buffer)
+ return 0;
+ memset (buffer, val, 65536);
+ am_writebuffer(dev, 0, buffer, 65536);
+ memset (buffer, val ^ 255, 65536);
+ am_readbuffer(dev, 0, buffer, 65536);
+ for (i = 0; i < 65536; i++) {
+ if (buffer[i] != val && !error) {
+ printk ("%s: buffer error (%02X %02X) %05X - ", dev->name, val, buffer[i], i);
+ error = 1;
+ errorcount ++;
+ } else if (error && buffer[i] == val) {
+ printk ("%05X\n", i);
+ error = 0;
+ }
+ }
+ if (error)
+ printk ("10000\n");
+ kfree (buffer);
+ return errorcount;
+}
+
+static void am79c961_mc_hash(char *addr, u16 *hash)
+{
+ int idx, bit;
+ u32 crc;
+
+ crc = ether_crc_le(ETH_ALEN, addr);
+
+ idx = crc >> 30;
+ bit = (crc >> 26) & 15;
+
+ hash[idx] |= 1 << bit;
+}
+
+static unsigned int am79c961_get_rx_mode(struct net_device *dev, u16 *hash)
+{
+ unsigned int mode = MODE_PORT_10BT;
+
+ if (dev->flags & IFF_PROMISC) {
+ mode |= MODE_PROMISC;
+ memset(hash, 0xff, 4 * sizeof(*hash));
+ } else if (dev->flags & IFF_ALLMULTI) {
+ memset(hash, 0xff, 4 * sizeof(*hash));
+ } else {
+ struct netdev_hw_addr *ha;
+
+ memset(hash, 0, 4 * sizeof(*hash));
+
+ netdev_for_each_mc_addr(ha, dev)
+ am79c961_mc_hash(ha->addr, hash);
+ }
+
+ return mode;
+}
+
+static void
+am79c961_init_for_open(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+ unsigned char *p;
+ u_int hdr_addr, first_free_addr;
+ u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash);
+ int i;
+
+ /*
+ * Stop the chip.
+ */
+ spin_lock_irqsave(&priv->chip_lock, flags);
+ write_rreg (dev->base_addr, CSR0, CSR0_BABL|CSR0_CERR|CSR0_MISS|CSR0_MERR|CSR0_TINT|CSR0_RINT|CSR0_STOP);
+ spin_unlock_irqrestore(&priv->chip_lock, flags);
+
+ write_ireg (dev->base_addr, 5, 0x00a0); /* Receive address LED */
+ write_ireg (dev->base_addr, 6, 0x0081); /* Collision LED */
+ write_ireg (dev->base_addr, 7, 0x0090); /* XMIT LED */
+ write_ireg (dev->base_addr, 2, 0x0000); /* MODE register selects media */
+
+ for (i = LADRL; i <= LADRH; i++)
+ write_rreg (dev->base_addr, i, multi_hash[i - LADRL]);
+
+ for (i = PADRL, p = dev->dev_addr; i <= PADRH; i++, p += 2)
+ write_rreg (dev->base_addr, i, p[0] | (p[1] << 8));
+
+ write_rreg (dev->base_addr, MODE, mode);
+ write_rreg (dev->base_addr, POLLINT, 0);
+ write_rreg (dev->base_addr, SIZERXR, -RX_BUFFERS);
+ write_rreg (dev->base_addr, SIZETXR, -TX_BUFFERS);
+
+ first_free_addr = RX_BUFFERS * 8 + TX_BUFFERS * 8 + 16;
+ hdr_addr = 0;
+
+ priv->rxhead = 0;
+ priv->rxtail = 0;
+ priv->rxhdr = hdr_addr;
+
+ for (i = 0; i < RX_BUFFERS; i++) {
+ priv->rxbuffer[i] = first_free_addr;
+ am_writeword (dev, hdr_addr, first_free_addr);
+ am_writeword (dev, hdr_addr + 2, RMD_OWN);
+ am_writeword (dev, hdr_addr + 4, (-1600));
+ am_writeword (dev, hdr_addr + 6, 0);
+ first_free_addr += 1600;
+ hdr_addr += 8;
+ }
+ priv->txhead = 0;
+ priv->txtail = 0;
+ priv->txhdr = hdr_addr;
+ for (i = 0; i < TX_BUFFERS; i++) {
+ priv->txbuffer[i] = first_free_addr;
+ am_writeword (dev, hdr_addr, first_free_addr);
+ am_writeword (dev, hdr_addr + 2, TMD_STP|TMD_ENP);
+ am_writeword (dev, hdr_addr + 4, 0xf000);
+ am_writeword (dev, hdr_addr + 6, 0);
+ first_free_addr += 1600;
+ hdr_addr += 8;
+ }
+
+ write_rreg (dev->base_addr, BASERXL, priv->rxhdr);
+ write_rreg (dev->base_addr, BASERXH, 0);
+ write_rreg (dev->base_addr, BASETXL, priv->txhdr);
+ write_rreg (dev->base_addr, BASERXH, 0);
+ write_rreg (dev->base_addr, CSR0, CSR0_STOP);
+ write_rreg (dev->base_addr, CSR3, CSR3_IDONM|CSR3_BABLM|CSR3_DXSUFLO);
+ write_rreg (dev->base_addr, CSR4, CSR4_APAD_XMIT|CSR4_MFCOM|CSR4_RCVCCOM|CSR4_TXSTRTM|CSR4_JABM);
+ write_rreg (dev->base_addr, CSR0, CSR0_IENA|CSR0_STRT);
+}
+
+static void am79c961_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct dev_priv *priv = netdev_priv(dev);
+ unsigned int lnkstat, carrier;
++ unsigned long flags;
+
++ spin_lock_irqsave(&priv->chip_lock, flags);
+ lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST;
++ spin_unlock_irqrestore(&priv->chip_lock, flags);
+ carrier = netif_carrier_ok(dev);
+
+ if (lnkstat && !carrier) {
+ netif_carrier_on(dev);
+ printk("%s: link up\n", dev->name);
+ } else if (!lnkstat && carrier) {
+ netif_carrier_off(dev);
+ printk("%s: link down\n", dev->name);
+ }
+
+ mod_timer(&priv->timer, jiffies + msecs_to_jiffies(500));
+}
+
+/*
+ * Open/initialize the board.
+ */
+static int
+am79c961_open(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ int ret;
+
+ ret = request_irq(dev->irq, am79c961_interrupt, 0, dev->name, dev);
+ if (ret)
+ return ret;
+
+ am79c961_init_for_open(dev);
+
+ netif_carrier_off(dev);
+
+ priv->timer.expires = jiffies;
+ add_timer(&priv->timer);
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/*
+ * The inverse routine to am79c961_open().
+ */
+static int
+am79c961_close(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ del_timer_sync(&priv->timer);
+
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+
+ spin_lock_irqsave(&priv->chip_lock, flags);
+ write_rreg (dev->base_addr, CSR0, CSR0_STOP);
+ write_rreg (dev->base_addr, CSR3, CSR3_MASKALL);
+ spin_unlock_irqrestore(&priv->chip_lock, flags);
+
+ free_irq (dev->irq, dev);
+
+ return 0;
+}
+
+/*
+ * Set or clear promiscuous/multicast mode filter for this adapter.
+ */
+static void am79c961_setmulticastlist (struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+ u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash);
+ int i, stopped;
+
+ spin_lock_irqsave(&priv->chip_lock, flags);
+
+ stopped = read_rreg(dev->base_addr, CSR0) & CSR0_STOP;
+
+ if (!stopped) {
+ /*
+ * Put the chip into suspend mode
+ */
+ write_rreg(dev->base_addr, CTRL1, CTRL1_SPND);
+
+ /*
+ * Spin waiting for chip to report suspend mode
+ */
+ while ((read_rreg(dev->base_addr, CTRL1) & CTRL1_SPND) == 0) {
+ spin_unlock_irqrestore(&priv->chip_lock, flags);
+ nop();
+ spin_lock_irqsave(&priv->chip_lock, flags);
+ }
+ }
+
+ /*
+ * Update the multicast hash table
+ */
+ for (i = 0; i < ARRAY_SIZE(multi_hash); i++)
+ write_rreg(dev->base_addr, i + LADRL, multi_hash[i]);
+
+ /*
+ * Write the mode register
+ */
+ write_rreg(dev->base_addr, MODE, mode);
+
+ if (!stopped) {
+ /*
+ * Put the chip back into running mode
+ */
+ write_rreg(dev->base_addr, CTRL1, 0);
+ }
+
+ spin_unlock_irqrestore(&priv->chip_lock, flags);
+}
+
+static void am79c961_timeout(struct net_device *dev)
+{
+ printk(KERN_WARNING "%s: transmit timed out, network cable problem?\n",
+ dev->name);
+
+ /*
+ * ought to do some setup of the tx side here
+ */
+
+ netif_wake_queue(dev);
+}
+
+/*
+ * Transmit a packet
+ */
+static int
+am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ unsigned int hdraddr, bufaddr;
+ unsigned int head;
+ unsigned long flags;
+
+ head = priv->txhead;
+ hdraddr = priv->txhdr + (head << 3);
+ bufaddr = priv->txbuffer[head];
+ head += 1;
+ if (head >= TX_BUFFERS)
+ head = 0;
+
+ am_writebuffer (dev, bufaddr, skb->data, skb->len);
+ am_writeword (dev, hdraddr + 4, -skb->len);
+ am_writeword (dev, hdraddr + 2, TMD_OWN|TMD_STP|TMD_ENP);
+ priv->txhead = head;
+
+ spin_lock_irqsave(&priv->chip_lock, flags);
+ write_rreg (dev->base_addr, CSR0, CSR0_TDMD|CSR0_IENA);
+ spin_unlock_irqrestore(&priv->chip_lock, flags);
+
+ /*
+ * If the next packet is owned by the ethernet device,
+ * then the tx ring is full and we can't add another
+ * packet.
+ */
+ if (am_readword(dev, priv->txhdr + (priv->txhead << 3) + 2) & TMD_OWN)
+ netif_stop_queue(dev);
+
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+/*
+ * If we have a good packet(s), get it/them out of the buffers.
+ */
+static void
+am79c961_rx(struct net_device *dev, struct dev_priv *priv)
+{
+ do {
+ struct sk_buff *skb;
+ u_int hdraddr;
+ u_int pktaddr;
+ u_int status;
+ int len;
+
+ hdraddr = priv->rxhdr + (priv->rxtail << 3);
+ pktaddr = priv->rxbuffer[priv->rxtail];
+
+ status = am_readword (dev, hdraddr + 2);
+ if (status & RMD_OWN) /* do we own it? */
+ break;
+
+ priv->rxtail ++;
+ if (priv->rxtail >= RX_BUFFERS)
+ priv->rxtail = 0;
+
+ if ((status & (RMD_ERR|RMD_STP|RMD_ENP)) != (RMD_STP|RMD_ENP)) {
+ am_writeword (dev, hdraddr + 2, RMD_OWN);
+ dev->stats.rx_errors++;
+ if (status & RMD_ERR) {
+ if (status & RMD_FRAM)
+ dev->stats.rx_frame_errors++;
+ if (status & RMD_CRC)
+ dev->stats.rx_crc_errors++;
+ } else if (status & RMD_STP)
+ dev->stats.rx_length_errors++;
+ continue;
+ }
+
+ len = am_readword(dev, hdraddr + 6);
+ skb = dev_alloc_skb(len + 2);
+
+ if (skb) {
+ skb_reserve(skb, 2);
+
+ am_readbuffer(dev, pktaddr, skb_put(skb, len), len);
+ am_writeword(dev, hdraddr + 2, RMD_OWN);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_bytes += len;
+ dev->stats.rx_packets++;
+ } else {
+ am_writeword (dev, hdraddr + 2, RMD_OWN);
+ printk (KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name);
+ dev->stats.rx_dropped++;
+ break;
+ }
+ } while (1);
+}
+
+/*
+ * Update stats for the transmitted packet
+ */
+static void
+am79c961_tx(struct net_device *dev, struct dev_priv *priv)
+{
+ do {
+ short len;
+ u_int hdraddr;
+ u_int status;
+
+ hdraddr = priv->txhdr + (priv->txtail << 3);
+ status = am_readword (dev, hdraddr + 2);
+ if (status & TMD_OWN)
+ break;
+
+ priv->txtail ++;
+ if (priv->txtail >= TX_BUFFERS)
+ priv->txtail = 0;
+
+ if (status & TMD_ERR) {
+ u_int status2;
+
+ dev->stats.tx_errors++;
+
+ status2 = am_readword (dev, hdraddr + 6);
+
+ /*
+ * Clear the error byte
+ */
+ am_writeword (dev, hdraddr + 6, 0);
+
+ if (status2 & TST_RTRY)
+ dev->stats.collisions += 16;
+ if (status2 & TST_LCOL)
+ dev->stats.tx_window_errors++;
+ if (status2 & TST_LCAR)
+ dev->stats.tx_carrier_errors++;
+ if (status2 & TST_UFLO)
+ dev->stats.tx_fifo_errors++;
+ continue;
+ }
+ dev->stats.tx_packets++;
+ len = am_readword (dev, hdraddr + 4);
+ dev->stats.tx_bytes += -len;
+ } while (priv->txtail != priv->txhead);
+
+ netif_wake_queue(dev);
+}
+
+static irqreturn_t
+am79c961_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct dev_priv *priv = netdev_priv(dev);
+ u_int status, n = 100;
+ int handled = 0;
+
+ do {
+ status = read_rreg(dev->base_addr, CSR0);
+ write_rreg(dev->base_addr, CSR0, status &
+ (CSR0_IENA|CSR0_TINT|CSR0_RINT|
+ CSR0_MERR|CSR0_MISS|CSR0_CERR|CSR0_BABL));
+
+ if (status & CSR0_RINT) {
+ handled = 1;
+ am79c961_rx(dev, priv);
+ }
+ if (status & CSR0_TINT) {
+ handled = 1;
+ am79c961_tx(dev, priv);
+ }
+ if (status & CSR0_MISS) {
+ handled = 1;
+ dev->stats.rx_dropped++;
+ }
+ if (status & CSR0_CERR) {
+ handled = 1;
+ mod_timer(&priv->timer, jiffies);
+ }
+ } while (--n && status & (CSR0_RINT | CSR0_TINT));
+
+ return IRQ_RETVAL(handled);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void am79c961_poll_controller(struct net_device *dev)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ am79c961_interrupt(dev->irq, dev);
+ local_irq_restore(flags);
+}
+#endif
+
+/*
+ * Initialise the chip. Note that we always expect
+ * to be entered with interrupts enabled.
+ */
+static int
+am79c961_hw_init(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+
+ spin_lock_irq(&priv->chip_lock);
+ write_rreg (dev->base_addr, CSR0, CSR0_STOP);
+ write_rreg (dev->base_addr, CSR3, CSR3_MASKALL);
+ spin_unlock_irq(&priv->chip_lock);
+
+ am79c961_ramtest(dev, 0x66);
+ am79c961_ramtest(dev, 0x99);
+
+ return 0;
+}
+
+static void __init am79c961_banner(void)
+{
+ static unsigned version_printed;
+
+ if (net_debug && version_printed++ == 0)
+ printk(KERN_INFO "%s", version);
+}
+static const struct net_device_ops am79c961_netdev_ops = {
+ .ndo_open = am79c961_open,
+ .ndo_stop = am79c961_close,
+ .ndo_start_xmit = am79c961_sendpacket,
+ .ndo_set_rx_mode = am79c961_setmulticastlist,
+ .ndo_tx_timeout = am79c961_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = am79c961_poll_controller,
+#endif
+};
+
+static int __devinit am79c961_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct net_device *dev;
+ struct dev_priv *priv;
+ int i, ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!res)
+ return -ENODEV;
+
+ dev = alloc_etherdev(sizeof(struct dev_priv));
+ ret = -ENOMEM;
+ if (!dev)
+ goto out;
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ priv = netdev_priv(dev);
+
+ /*
+ * Fixed address and IRQ lines here.
+ * The PNP initialisation should have been
+ * done by the ether bootp loader.
+ */
+ dev->base_addr = res->start;
+ ret = platform_get_irq(pdev, 0);
+
+ if (ret < 0) {
+ ret = -ENODEV;
+ goto nodev;
+ }
+ dev->irq = ret;
+
+ ret = -ENODEV;
+ if (!request_region(dev->base_addr, 0x18, dev->name))
+ goto nodev;
+
+ /*
+ * Reset the device.
+ */
+ inb(dev->base_addr + NET_RESET);
+ udelay(5);
+
+ /*
+ * Check the manufacturer part of the
+ * ether address.
+ */
+ if (inb(dev->base_addr) != 0x08 ||
+ inb(dev->base_addr + 2) != 0x00 ||
+ inb(dev->base_addr + 4) != 0x2b)
+ goto release;
+
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = inb(dev->base_addr + i * 2) & 0xff;
+
+ am79c961_banner();
+
+ spin_lock_init(&priv->chip_lock);
+ init_timer(&priv->timer);
+ priv->timer.data = (unsigned long)dev;
+ priv->timer.function = am79c961_timer;
+
+ if (am79c961_hw_init(dev))
+ goto release;
+
+ dev->netdev_ops = &am79c961_netdev_ops;
+
+ ret = register_netdev(dev);
+ if (ret == 0) {
+ printk(KERN_INFO "%s: ether address %pM\n",
+ dev->name, dev->dev_addr);
+ return 0;
+ }
+
+release:
+ release_region(dev->base_addr, 0x18);
+nodev:
+ free_netdev(dev);
+out:
+ return ret;
+}
+
+static struct platform_driver am79c961_driver = {
+ .probe = am79c961_probe,
+ .driver = {
+ .name = "am79c961",
+ },
+};
+
+static int __init am79c961_init(void)
+{
+ return platform_driver_register(&am79c961_driver);
+}
+
+__initcall(am79c961_init);
--- /dev/null
- #define MAX_RX_SGE_CNT (RX_SGE_CNT - 2)
+/* bnx2x.h: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ */
+
+#ifndef BNX2X_H
+#define BNX2X_H
+#include <linux/netdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/types.h>
+
+/* compilation time flags */
+
+/* define this to make the driver freeze on error to allow getting debug info
+ * (you will need to reboot afterwards) */
+/* #define BNX2X_STOP_ON_ERROR */
+
+#define DRV_MODULE_VERSION "1.70.00-0"
+#define DRV_MODULE_RELDATE "2011/06/13"
+#define BNX2X_BC_VER 0x040200
+
+#if defined(CONFIG_DCB)
+#define BCM_DCBNL
+#endif
+#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
+#define BCM_CNIC 1
+#include "../cnic_if.h"
+#endif
+
+#ifdef BCM_CNIC
+#define BNX2X_MIN_MSIX_VEC_CNT 3
+#define BNX2X_MSIX_VEC_FP_START 2
+#else
+#define BNX2X_MIN_MSIX_VEC_CNT 2
+#define BNX2X_MSIX_VEC_FP_START 1
+#endif
+
+#include <linux/mdio.h>
+
+#include "bnx2x_reg.h"
+#include "bnx2x_fw_defs.h"
+#include "bnx2x_hsi.h"
+#include "bnx2x_link.h"
+#include "bnx2x_sp.h"
+#include "bnx2x_dcb.h"
+#include "bnx2x_stats.h"
+
+/* error/debug prints */
+
+#define DRV_MODULE_NAME "bnx2x"
+
+/* for messages that are currently off */
+#define BNX2X_MSG_OFF 0
+#define BNX2X_MSG_MCP 0x010000 /* was: NETIF_MSG_HW */
+#define BNX2X_MSG_STATS 0x020000 /* was: NETIF_MSG_TIMER */
+#define BNX2X_MSG_NVM 0x040000 /* was: NETIF_MSG_HW */
+#define BNX2X_MSG_DMAE 0x080000 /* was: NETIF_MSG_HW */
+#define BNX2X_MSG_SP 0x100000 /* was: NETIF_MSG_INTR */
+#define BNX2X_MSG_FP 0x200000 /* was: NETIF_MSG_INTR */
+
+/* regular debug print */
+#define DP(__mask, fmt, ...) \
+do { \
+ if (bp->msg_enable & (__mask)) \
+ pr_notice("[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ bp->dev ? (bp->dev->name) : "?", \
+ ##__VA_ARGS__); \
+} while (0)
+
+#define DP_CONT(__mask, fmt, ...) \
+do { \
+ if (bp->msg_enable & (__mask)) \
+ pr_cont(fmt, ##__VA_ARGS__); \
+} while (0)
+
+/* errors debug print */
+#define BNX2X_DBG_ERR(fmt, ...) \
+do { \
+ if (netif_msg_probe(bp)) \
+ pr_err("[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ bp->dev ? (bp->dev->name) : "?", \
+ ##__VA_ARGS__); \
+} while (0)
+
+/* for errors (never masked) */
+#define BNX2X_ERR(fmt, ...) \
+do { \
+ pr_err("[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ bp->dev ? (bp->dev->name) : "?", \
+ ##__VA_ARGS__); \
+} while (0)
+
+#define BNX2X_ERROR(fmt, ...) \
+ pr_err("[%s:%d]" fmt, __func__, __LINE__, ##__VA_ARGS__)
+
+
+/* before we have a dev->name use dev_info() */
+#define BNX2X_DEV_INFO(fmt, ...) \
+do { \
+ if (netif_msg_probe(bp)) \
+ dev_info(&bp->pdev->dev, fmt, ##__VA_ARGS__); \
+} while (0)
+
+#ifdef BNX2X_STOP_ON_ERROR
+void bnx2x_int_disable(struct bnx2x *bp);
+#define bnx2x_panic() \
+do { \
+ bp->panic = 1; \
+ BNX2X_ERR("driver assert\n"); \
+ bnx2x_int_disable(bp); \
+ bnx2x_panic_dump(bp); \
+} while (0)
+#else
+#define bnx2x_panic() \
+do { \
+ bp->panic = 1; \
+ BNX2X_ERR("driver assert\n"); \
+ bnx2x_panic_dump(bp); \
+} while (0)
+#endif
+
+#define bnx2x_mc_addr(ha) ((ha)->addr)
+#define bnx2x_uc_addr(ha) ((ha)->addr)
+
+#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff)
+#define U64_HI(x) (u32)(((u64)(x)) >> 32)
+#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
+
+
+#define REG_ADDR(bp, offset) ((bp->regview) + (offset))
+
+#define REG_RD(bp, offset) readl(REG_ADDR(bp, offset))
+#define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset))
+#define REG_RD16(bp, offset) readw(REG_ADDR(bp, offset))
+
+#define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset))
+#define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset))
+#define REG_WR16(bp, offset, val) writew((u16)val, REG_ADDR(bp, offset))
+
+#define REG_RD_IND(bp, offset) bnx2x_reg_rd_ind(bp, offset)
+#define REG_WR_IND(bp, offset, val) bnx2x_reg_wr_ind(bp, offset, val)
+
+#define REG_RD_DMAE(bp, offset, valp, len32) \
+ do { \
+ bnx2x_read_dmae(bp, offset, len32);\
+ memcpy(valp, bnx2x_sp(bp, wb_data[0]), (len32) * 4); \
+ } while (0)
+
+#define REG_WR_DMAE(bp, offset, valp, len32) \
+ do { \
+ memcpy(bnx2x_sp(bp, wb_data[0]), valp, (len32) * 4); \
+ bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), \
+ offset, len32); \
+ } while (0)
+
+#define REG_WR_DMAE_LEN(bp, offset, valp, len32) \
+ REG_WR_DMAE(bp, offset, valp, len32)
+
+#define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \
+ do { \
+ memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \
+ bnx2x_write_big_buf_wb(bp, addr, len32); \
+ } while (0)
+
+#define SHMEM_ADDR(bp, field) (bp->common.shmem_base + \
+ offsetof(struct shmem_region, field))
+#define SHMEM_RD(bp, field) REG_RD(bp, SHMEM_ADDR(bp, field))
+#define SHMEM_WR(bp, field, val) REG_WR(bp, SHMEM_ADDR(bp, field), val)
+
+#define SHMEM2_ADDR(bp, field) (bp->common.shmem2_base + \
+ offsetof(struct shmem2_region, field))
+#define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field))
+#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val)
+#define MF_CFG_ADDR(bp, field) (bp->common.mf_cfg_base + \
+ offsetof(struct mf_cfg, field))
+#define MF2_CFG_ADDR(bp, field) (bp->common.mf2_cfg_base + \
+ offsetof(struct mf2_cfg, field))
+
+#define MF_CFG_RD(bp, field) REG_RD(bp, MF_CFG_ADDR(bp, field))
+#define MF_CFG_WR(bp, field, val) REG_WR(bp,\
+ MF_CFG_ADDR(bp, field), (val))
+#define MF2_CFG_RD(bp, field) REG_RD(bp, MF2_CFG_ADDR(bp, field))
+
+#define SHMEM2_HAS(bp, field) ((bp)->common.shmem2_base && \
+ (SHMEM2_RD((bp), size) > \
+ offsetof(struct shmem2_region, field)))
+
+#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg)
+#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val)
+
+/* SP SB indices */
+
+/* General SP events - stats query, cfc delete, etc */
+#define HC_SP_INDEX_ETH_DEF_CONS 3
+
+/* EQ completions */
+#define HC_SP_INDEX_EQ_CONS 7
+
+/* FCoE L2 connection completions */
+#define HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS 6
+#define HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS 4
+/* iSCSI L2 */
+#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5
+#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1
+
+/* Special clients parameters */
+
+/* SB indices */
+/* FCoE L2 */
+#define BNX2X_FCOE_L2_RX_INDEX \
+ (&bp->def_status_blk->sp_sb.\
+ index_values[HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS])
+
+#define BNX2X_FCOE_L2_TX_INDEX \
+ (&bp->def_status_blk->sp_sb.\
+ index_values[HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS])
+
+/**
+ * CIDs and CLIDs:
+ * CLIDs below is a CLID for func 0, then the CLID for other
+ * functions will be calculated by the formula:
+ *
+ * FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X
+ *
+ */
+/* iSCSI L2 */
+#define BNX2X_ISCSI_ETH_CL_ID_IDX 1
+#define BNX2X_ISCSI_ETH_CID 49
+
+/* FCoE L2 */
+#define BNX2X_FCOE_ETH_CL_ID_IDX 2
+#define BNX2X_FCOE_ETH_CID 50
+
+/** Additional rings budgeting */
+#ifdef BCM_CNIC
+#define CNIC_PRESENT 1
+#define FCOE_PRESENT 1
+#else
+#define CNIC_PRESENT 0
+#define FCOE_PRESENT 0
+#endif /* BCM_CNIC */
+#define NON_ETH_CONTEXT_USE (FCOE_PRESENT)
+
+#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
+ AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
+
+#define SM_RX_ID 0
+#define SM_TX_ID 1
+
+/* defines for multiple tx priority indices */
+#define FIRST_TX_ONLY_COS_INDEX 1
+#define FIRST_TX_COS_INDEX 0
+
+/* defines for decodeing the fastpath index and the cos index out of the
+ * transmission queue index
+ */
+#define MAX_TXQS_PER_COS FP_SB_MAX_E1x
+
+#define TXQ_TO_FP(txq_index) ((txq_index) % MAX_TXQS_PER_COS)
+#define TXQ_TO_COS(txq_index) ((txq_index) / MAX_TXQS_PER_COS)
+
+/* rules for calculating the cids of tx-only connections */
+#define CID_TO_FP(cid) ((cid) % MAX_TXQS_PER_COS)
+#define CID_COS_TO_TX_ONLY_CID(cid, cos) (cid + cos * MAX_TXQS_PER_COS)
+
+/* fp index inside class of service range */
+#define FP_COS_TO_TXQ(fp, cos) ((fp)->index + cos * MAX_TXQS_PER_COS)
+
+/*
+ * 0..15 eth cos0
+ * 16..31 eth cos1 if applicable
+ * 32..47 eth cos2 If applicable
+ * fcoe queue follows eth queues (16, 32, 48 depending on cos)
+ */
+#define MAX_ETH_TXQ_IDX(bp) (MAX_TXQS_PER_COS * (bp)->max_cos)
+#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp))
+
+/* fast path */
+struct sw_rx_bd {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+};
+
+struct sw_tx_bd {
+ struct sk_buff *skb;
+ u16 first_bd;
+ u8 flags;
+/* Set on the first BD descriptor when there is a split BD */
+#define BNX2X_TSO_SPLIT_BD (1<<0)
+};
+
+struct sw_rx_page {
+ struct page *page;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+};
+
+union db_prod {
+ struct doorbell_set_prod data;
+ u32 raw;
+};
+
++/* dropless fc FW/HW related params */
++#define BRB_SIZE(bp) (CHIP_IS_E3(bp) ? 1024 : 512)
++#define MAX_AGG_QS(bp) (CHIP_IS_E1(bp) ? \
++ ETH_MAX_AGGREGATION_QUEUES_E1 :\
++ ETH_MAX_AGGREGATION_QUEUES_E1H_E2)
++#define FW_DROP_LEVEL(bp) (3 + MAX_SPQ_PENDING + MAX_AGG_QS(bp))
++#define FW_PREFETCH_CNT 16
++#define DROPLESS_FC_HEADROOM 100
+
+/* MC hsi */
+#define BCM_PAGE_SHIFT 12
+#define BCM_PAGE_SIZE (1 << BCM_PAGE_SHIFT)
+#define BCM_PAGE_MASK (~(BCM_PAGE_SIZE - 1))
+#define BCM_PAGE_ALIGN(addr) (((addr) + BCM_PAGE_SIZE - 1) & BCM_PAGE_MASK)
+
+#define PAGES_PER_SGE_SHIFT 0
+#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT)
+#define SGE_PAGE_SIZE PAGE_SIZE
+#define SGE_PAGE_SHIFT PAGE_SHIFT
+#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
+
+/* SGE ring related macros */
+#define NUM_RX_SGE_PAGES 2
+#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
- (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1)
++#define NEXT_PAGE_SGE_DESC_CNT 2
++#define MAX_RX_SGE_CNT (RX_SGE_CNT - NEXT_PAGE_SGE_DESC_CNT)
+/* RX_SGE_CNT is promised to be a power of 2 */
+#define RX_SGE_MASK (RX_SGE_CNT - 1)
+#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES)
+#define MAX_RX_SGE (NUM_RX_SGE - 1)
+#define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \
- #define MAX_TX_DESC_CNT (TX_DESC_CNT - 1)
++ (MAX_RX_SGE_CNT - 1)) ? \
++ (x) + 1 + NEXT_PAGE_SGE_DESC_CNT : \
++ (x) + 1)
+#define RX_SGE(x) ((x) & MAX_RX_SGE)
+
++/*
++ * Number of required SGEs is the sum of two:
++ * 1. Number of possible opened aggregations (next packet for
++ * these aggregations will probably consume SGE immidiatelly)
++ * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only
++ * after placement on BD for new TPA aggregation)
++ *
++ * Takes into account NEXT_PAGE_SGE_DESC_CNT "next" elements on each page
++ */
++#define NUM_SGE_REQ (MAX_AGG_QS(bp) + \
++ (BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2)
++#define NUM_SGE_PG_REQ ((NUM_SGE_REQ + MAX_RX_SGE_CNT - 1) / \
++ MAX_RX_SGE_CNT)
++#define SGE_TH_LO(bp) (NUM_SGE_REQ + \
++ NUM_SGE_PG_REQ * NEXT_PAGE_SGE_DESC_CNT)
++#define SGE_TH_HI(bp) (SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM)
++
+/* Manipulate a bit vector defined as an array of u64 */
+
+/* Number of bits in one sge_mask array element */
+#define BIT_VEC64_ELEM_SZ 64
+#define BIT_VEC64_ELEM_SHIFT 6
+#define BIT_VEC64_ELEM_MASK ((u64)BIT_VEC64_ELEM_SZ - 1)
+
+
+#define __BIT_VEC64_SET_BIT(el, bit) \
+ do { \
+ el = ((el) | ((u64)0x1 << (bit))); \
+ } while (0)
+
+#define __BIT_VEC64_CLEAR_BIT(el, bit) \
+ do { \
+ el = ((el) & (~((u64)0x1 << (bit)))); \
+ } while (0)
+
+
+#define BIT_VEC64_SET_BIT(vec64, idx) \
+ __BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
+ (idx) & BIT_VEC64_ELEM_MASK)
+
+#define BIT_VEC64_CLEAR_BIT(vec64, idx) \
+ __BIT_VEC64_CLEAR_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
+ (idx) & BIT_VEC64_ELEM_MASK)
+
+#define BIT_VEC64_TEST_BIT(vec64, idx) \
+ (((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT] >> \
+ ((idx) & BIT_VEC64_ELEM_MASK)) & 0x1)
+
+/* Creates a bitmask of all ones in less significant bits.
+ idx - index of the most significant bit in the created mask */
+#define BIT_VEC64_ONES_MASK(idx) \
+ (((u64)0x1 << (((idx) & BIT_VEC64_ELEM_MASK) + 1)) - 1)
+#define BIT_VEC64_ELEM_ONE_MASK ((u64)(~0))
+
+/*******************************************************/
+
+
+
+/* Number of u64 elements in SGE mask array */
+#define RX_SGE_MASK_LEN ((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \
+ BIT_VEC64_ELEM_SZ)
+#define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1)
+#define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK)
+
+union host_hc_status_block {
+ /* pointer to fp status block e1x */
+ struct host_hc_status_block_e1x *e1x_sb;
+ /* pointer to fp status block e2 */
+ struct host_hc_status_block_e2 *e2_sb;
+};
+
+struct bnx2x_agg_info {
+ /*
+ * First aggregation buffer is an skb, the following - are pages.
+ * We will preallocate the skbs for each aggregation when
+ * we open the interface and will replace the BD at the consumer
+ * with this one when we receive the TPA_START CQE in order to
+ * keep the Rx BD ring consistent.
+ */
+ struct sw_rx_bd first_buf;
+ u8 tpa_state;
+#define BNX2X_TPA_START 1
+#define BNX2X_TPA_STOP 2
+#define BNX2X_TPA_ERROR 3
+ u8 placement_offset;
+ u16 parsing_flags;
+ u16 vlan_tag;
+ u16 len_on_bd;
+};
+
+#define Q_STATS_OFFSET32(stat_name) \
+ (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
+
+struct bnx2x_fp_txdata {
+
+ struct sw_tx_bd *tx_buf_ring;
+
+ union eth_tx_bd_types *tx_desc_ring;
+ dma_addr_t tx_desc_mapping;
+
+ u32 cid;
+
+ union db_prod tx_db;
+
+ u16 tx_pkt_prod;
+ u16 tx_pkt_cons;
+ u16 tx_bd_prod;
+ u16 tx_bd_cons;
+
+ unsigned long tx_pkt;
+
+ __le16 *tx_cons_sb;
+
+ int txq_index;
+};
+
+struct bnx2x_fastpath {
+ struct bnx2x *bp; /* parent */
+
+#define BNX2X_NAPI_WEIGHT 128
+ struct napi_struct napi;
+ union host_hc_status_block status_blk;
+ /* chip independed shortcuts into sb structure */
+ __le16 *sb_index_values;
+ __le16 *sb_running_index;
+ /* chip independed shortcut into rx_prods_offset memory */
+ u32 ustorm_rx_prods_offset;
+
+ u32 rx_buf_size;
+
+ dma_addr_t status_blk_mapping;
+
+ u8 max_cos; /* actual number of active tx coses */
+ struct bnx2x_fp_txdata txdata[BNX2X_MULTI_TX_COS];
+
+ struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */
+ struct sw_rx_page *rx_page_ring; /* SGE pages mappings ring */
+
+ struct eth_rx_bd *rx_desc_ring;
+ dma_addr_t rx_desc_mapping;
+
+ union eth_rx_cqe *rx_comp_ring;
+ dma_addr_t rx_comp_mapping;
+
+ /* SGE ring */
+ struct eth_rx_sge *rx_sge_ring;
+ dma_addr_t rx_sge_mapping;
+
+ u64 sge_mask[RX_SGE_MASK_LEN];
+
+ u32 cid;
+
+ __le16 fp_hc_idx;
+
+ u8 index; /* number in fp array */
+ u8 cl_id; /* eth client id */
+ u8 cl_qzone_id;
+ u8 fw_sb_id; /* status block number in FW */
+ u8 igu_sb_id; /* status block number in HW */
+
+ u16 rx_bd_prod;
+ u16 rx_bd_cons;
+ u16 rx_comp_prod;
+ u16 rx_comp_cons;
+ u16 rx_sge_prod;
+ /* The last maximal completed SGE */
+ u16 last_max_sge;
+ __le16 *rx_cons_sb;
+ unsigned long rx_pkt,
+ rx_calls;
+
+ /* TPA related */
+ struct bnx2x_agg_info tpa_info[ETH_MAX_AGGREGATION_QUEUES_E1H_E2];
+ u8 disable_tpa;
+#ifdef BNX2X_STOP_ON_ERROR
+ u64 tpa_queue_used;
+#endif
+
+ struct tstorm_per_queue_stats old_tclient;
+ struct ustorm_per_queue_stats old_uclient;
+ struct xstorm_per_queue_stats old_xclient;
+ struct bnx2x_eth_q_stats eth_q_stats;
+
+ /* The size is calculated using the following:
+ sizeof name field from netdev structure +
+ 4 ('-Xx-' string) +
+ 4 (for the digits and to make it DWORD aligned) */
+#define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
+ char name[FP_NAME_SIZE];
+
+ /* MACs object */
+ struct bnx2x_vlan_mac_obj mac_obj;
+
+ /* Queue State object */
+ struct bnx2x_queue_sp_obj q_obj;
+
+};
+
+#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
+
+/* Use 2500 as a mini-jumbo MTU for FCoE */
+#define BNX2X_FCOE_MINI_JUMBO_MTU 2500
+
+/* FCoE L2 `fastpath' entry is right after the eth entries */
+#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp)
+#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX])
+#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var)
+#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \
+ txdata[FIRST_TX_COS_INDEX].var)
+
+
+#define IS_ETH_FP(fp) (fp->index < \
+ BNX2X_NUM_ETH_QUEUES(fp->bp))
+#ifdef BCM_CNIC
+#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX)
+#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX)
+#else
+#define IS_FCOE_FP(fp) false
+#define IS_FCOE_IDX(idx) false
+#endif
+
+
+/* MC hsi */
+#define MAX_FETCH_BD 13 /* HW max BDs per packet */
+#define RX_COPY_THRESH 92
+
+#define NUM_TX_RINGS 16
+#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types))
- (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
++#define NEXT_PAGE_TX_DESC_CNT 1
++#define MAX_TX_DESC_CNT (TX_DESC_CNT - NEXT_PAGE_TX_DESC_CNT)
+#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS)
+#define MAX_TX_BD (NUM_TX_BD - 1)
+#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
+#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \
- #define MAX_RX_DESC_CNT (RX_DESC_CNT - 2)
++ (MAX_TX_DESC_CNT - 1)) ? \
++ (x) + 1 + NEXT_PAGE_TX_DESC_CNT : \
++ (x) + 1)
+#define TX_BD(x) ((x) & MAX_TX_BD)
+#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT)
+
+/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
+#define NUM_RX_RINGS 8
+#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
- #define MIN_RX_AVAIL 128
++#define NEXT_PAGE_RX_DESC_CNT 2
++#define MAX_RX_DESC_CNT (RX_DESC_CNT - NEXT_PAGE_RX_DESC_CNT)
+#define RX_DESC_MASK (RX_DESC_CNT - 1)
+#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS)
+#define MAX_RX_BD (NUM_RX_BD - 1)
+#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
- (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1)
++
++/* dropless fc calculations for BDs
++ *
++ * Number of BDs should as number of buffers in BRB:
++ * Low threshold takes into account NEXT_PAGE_RX_DESC_CNT
++ * "next" elements on each page
++ */
++#define NUM_BD_REQ BRB_SIZE(bp)
++#define NUM_BD_PG_REQ ((NUM_BD_REQ + MAX_RX_DESC_CNT - 1) / \
++ MAX_RX_DESC_CNT)
++#define BD_TH_LO(bp) (NUM_BD_REQ + \
++ NUM_BD_PG_REQ * NEXT_PAGE_RX_DESC_CNT + \
++ FW_DROP_LEVEL(bp))
++#define BD_TH_HI(bp) (BD_TH_LO(bp) + DROPLESS_FC_HEADROOM)
++
++#define MIN_RX_AVAIL ((bp)->dropless_fc ? BD_TH_HI(bp) + 128 : 128)
+
+#define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \
+ ETH_MIN_RX_CQES_WITH_TPA_E1 : \
+ ETH_MIN_RX_CQES_WITH_TPA_E1H_E2)
+#define MIN_RX_SIZE_NONTPA_HW ETH_MIN_RX_CQES_WITHOUT_TPA
+#define MIN_RX_SIZE_TPA (max_t(u32, MIN_RX_SIZE_TPA_HW, MIN_RX_AVAIL))
+#define MIN_RX_SIZE_NONTPA (max_t(u32, MIN_RX_SIZE_NONTPA_HW,\
+ MIN_RX_AVAIL))
+
+#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \
- #define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1)
++ (MAX_RX_DESC_CNT - 1)) ? \
++ (x) + 1 + NEXT_PAGE_RX_DESC_CNT : \
++ (x) + 1)
+#define RX_BD(x) ((x) & MAX_RX_BD)
+
+/*
+ * As long as CQE is X times bigger than BD entry we have to allocate X times
+ * more pages for CQ ring in order to keep it balanced with BD ring
+ */
+#define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd))
+#define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL)
+#define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
- (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
++#define NEXT_PAGE_RCQ_DESC_CNT 1
++#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - NEXT_PAGE_RCQ_DESC_CNT)
+#define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS)
+#define MAX_RCQ_BD (NUM_RCQ_BD - 1)
+#define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)
+#define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \
- #define HC_INDEX_TOE_RX_CQ_CONS 0 /* Formerly Ustorm TOE CQ index */
- /* (HC_INDEX_U_TOE_RX_CQ_CONS) */
- #define HC_INDEX_ETH_RX_CQ_CONS 1 /* Formerly Ustorm ETH CQ index */
- /* (HC_INDEX_U_ETH_RX_CQ_CONS) */
- #define HC_INDEX_ETH_RX_BD_CONS 2 /* Formerly Ustorm ETH BD index */
- /* (HC_INDEX_U_ETH_RX_BD_CONS) */
-
- #define HC_INDEX_TOE_TX_CQ_CONS 4 /* Formerly Cstorm TOE CQ index */
- /* (HC_INDEX_C_TOE_TX_CQ_CONS) */
- #define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 /* Formerly Cstorm ETH CQ index */
- /* (HC_INDEX_C_ETH_TX_CQ_CONS) */
- #define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 /* Formerly Cstorm ETH CQ index */
- /* (HC_INDEX_C_ETH_TX_CQ_CONS) */
- #define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 /* Formerly Cstorm ETH CQ index */
- /* (HC_INDEX_C_ETH_TX_CQ_CONS) */
++ (MAX_RCQ_DESC_CNT - 1)) ? \
++ (x) + 1 + NEXT_PAGE_RCQ_DESC_CNT : \
++ (x) + 1)
+#define RCQ_BD(x) ((x) & MAX_RCQ_BD)
+
++/* dropless fc calculations for RCQs
++ *
++ * Number of RCQs should be as number of buffers in BRB:
++ * Low threshold takes into account NEXT_PAGE_RCQ_DESC_CNT
++ * "next" elements on each page
++ */
++#define NUM_RCQ_REQ BRB_SIZE(bp)
++#define NUM_RCQ_PG_REQ ((NUM_BD_REQ + MAX_RCQ_DESC_CNT - 1) / \
++ MAX_RCQ_DESC_CNT)
++#define RCQ_TH_LO(bp) (NUM_RCQ_REQ + \
++ NUM_RCQ_PG_REQ * NEXT_PAGE_RCQ_DESC_CNT + \
++ FW_DROP_LEVEL(bp))
++#define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM)
++
+
+/* This is needed for determining of last_max */
+#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b))
+#define SUB_S32(a, b) (s32)((s32)(a) - (s32)(b))
+
+
+#define BNX2X_SWCID_SHIFT 17
+#define BNX2X_SWCID_MASK ((0x1 << BNX2X_SWCID_SHIFT) - 1)
+
+/* used on a CID received from the HW */
+#define SW_CID(x) (le32_to_cpu(x) & BNX2X_SWCID_MASK)
+#define CQE_CMD(x) (le32_to_cpu(x) >> \
+ COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT)
+
+#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr_hi), \
+ le32_to_cpu((bd)->addr_lo))
+#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
+
+#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */
+#define BNX2X_DB_SHIFT 7 /* 128 bytes*/
+#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT)
+#error "Min DB doorbell stride is 8"
+#endif
+#define DPM_TRIGER_TYPE 0x40
+#define DOORBELL(bp, cid, val) \
+ do { \
+ writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \
+ DPM_TRIGER_TYPE); \
+ } while (0)
+
+
+/* TX CSUM helpers */
+#define SKB_CS_OFF(skb) (offsetof(struct tcphdr, check) - \
+ skb->csum_offset)
+#define SKB_CS(skb) (*(u16 *)(skb_transport_header(skb) + \
+ skb->csum_offset))
+
+#define pbd_tcp_flags(skb) (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff)
+
+#define XMIT_PLAIN 0
+#define XMIT_CSUM_V4 0x1
+#define XMIT_CSUM_V6 0x2
+#define XMIT_CSUM_TCP 0x4
+#define XMIT_GSO_V4 0x8
+#define XMIT_GSO_V6 0x10
+
+#define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6)
+#define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6)
+
+
+/* stuff added to make the code fit 80Col */
+#define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE)
+#define CQE_TYPE_START(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_START_AGG)
+#define CQE_TYPE_STOP(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_STOP_AGG)
+#define CQE_TYPE_SLOW(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_RAMROD)
+#define CQE_TYPE_FAST(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_FASTPATH)
+
+#define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
+
+#define BNX2X_IP_CSUM_ERR(cqe) \
+ (!((cqe)->fast_path_cqe.status_flags & \
+ ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
+ ((cqe)->fast_path_cqe.type_error_flags & \
+ ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
+
+#define BNX2X_L4_CSUM_ERR(cqe) \
+ (!((cqe)->fast_path_cqe.status_flags & \
+ ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
+ ((cqe)->fast_path_cqe.type_error_flags & \
+ ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
+
+#define BNX2X_RX_CSUM_OK(cqe) \
+ (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
+
+#define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \
+ (((le16_to_cpu(flags) & \
+ PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \
+ PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) \
+ == PRS_FLAG_OVERETH_IPV4)
+#define BNX2X_RX_SUM_FIX(cqe) \
+ BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags)
+
+
+#define FP_USB_FUNC_OFF \
+ offsetof(struct cstorm_status_block_u, func)
+#define FP_CSB_FUNC_OFF \
+ offsetof(struct cstorm_status_block_c, func)
+
- #define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0
++#define HC_INDEX_ETH_RX_CQ_CONS 1
+
- #define BP_E1HVN(bp) (bp->pfid >> 1)
- #define BP_VN(bp) (BP_E1HVN(bp)) /*remove when approved*/
- #define BP_L_ID(bp) (BP_E1HVN(bp) << 2)
- #define BP_FW_MB_IDX(bp) (BP_PORT(bp) +\
- BP_VN(bp) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1))
++#define HC_INDEX_OOO_TX_CQ_CONS 4
+
++#define HC_INDEX_ETH_TX_CQ_CONS_COS0 5
++
++#define HC_INDEX_ETH_TX_CQ_CONS_COS1 6
++
++#define HC_INDEX_ETH_TX_CQ_CONS_COS2 7
++
++#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0
+
+#define BNX2X_RX_SB_INDEX \
+ (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS])
+
+#define BNX2X_TX_SB_INDEX_BASE BNX2X_TX_SB_INDEX_COS0
+
+#define BNX2X_TX_SB_INDEX_COS0 \
+ (&fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0])
+
+/* end of fast path */
+
+/* common */
+
+struct bnx2x_common {
+
+ u32 chip_id;
+/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
+#define CHIP_ID(bp) (bp->common.chip_id & 0xfffffff0)
+
+#define CHIP_NUM(bp) (bp->common.chip_id >> 16)
+#define CHIP_NUM_57710 0x164e
+#define CHIP_NUM_57711 0x164f
+#define CHIP_NUM_57711E 0x1650
+#define CHIP_NUM_57712 0x1662
+#define CHIP_NUM_57712_MF 0x1663
+#define CHIP_NUM_57713 0x1651
+#define CHIP_NUM_57713E 0x1652
+#define CHIP_NUM_57800 0x168a
+#define CHIP_NUM_57800_MF 0x16a5
+#define CHIP_NUM_57810 0x168e
+#define CHIP_NUM_57810_MF 0x16ae
+#define CHIP_NUM_57840 0x168d
+#define CHIP_NUM_57840_MF 0x16ab
+#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710)
+#define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711)
+#define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E)
+#define CHIP_IS_57712(bp) (CHIP_NUM(bp) == CHIP_NUM_57712)
+#define CHIP_IS_57712_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57712_MF)
+#define CHIP_IS_57800(bp) (CHIP_NUM(bp) == CHIP_NUM_57800)
+#define CHIP_IS_57800_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_MF)
+#define CHIP_IS_57810(bp) (CHIP_NUM(bp) == CHIP_NUM_57810)
+#define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF)
+#define CHIP_IS_57840(bp) (CHIP_NUM(bp) == CHIP_NUM_57840)
+#define CHIP_IS_57840_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_MF)
+#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \
+ CHIP_IS_57711E(bp))
+#define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \
+ CHIP_IS_57712_MF(bp))
+#define CHIP_IS_E3(bp) (CHIP_IS_57800(bp) || \
+ CHIP_IS_57800_MF(bp) || \
+ CHIP_IS_57810(bp) || \
+ CHIP_IS_57810_MF(bp) || \
+ CHIP_IS_57840(bp) || \
+ CHIP_IS_57840_MF(bp))
+#define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp)))
+#define USES_WARPCORE(bp) (CHIP_IS_E3(bp))
+#define IS_E1H_OFFSET (!CHIP_IS_E1(bp))
+
+#define CHIP_REV_SHIFT 12
+#define CHIP_REV_MASK (0xF << CHIP_REV_SHIFT)
+#define CHIP_REV_VAL(bp) (bp->common.chip_id & CHIP_REV_MASK)
+#define CHIP_REV_Ax (0x0 << CHIP_REV_SHIFT)
+#define CHIP_REV_Bx (0x1 << CHIP_REV_SHIFT)
+/* assume maximum 5 revisions */
+#define CHIP_REV_IS_SLOW(bp) (CHIP_REV_VAL(bp) > 0x00005000)
+/* Emul versions are A=>0xe, B=>0xc, C=>0xa, D=>8, E=>6 */
+#define CHIP_REV_IS_EMUL(bp) ((CHIP_REV_IS_SLOW(bp)) && \
+ !(CHIP_REV_VAL(bp) & 0x00001000))
+/* FPGA versions are A=>0xf, B=>0xd, C=>0xb, D=>9, E=>7 */
+#define CHIP_REV_IS_FPGA(bp) ((CHIP_REV_IS_SLOW(bp)) && \
+ (CHIP_REV_VAL(bp) & 0x00001000))
+
+#define CHIP_TIME(bp) ((CHIP_REV_IS_EMUL(bp)) ? 2000 : \
+ ((CHIP_REV_IS_FPGA(bp)) ? 200 : 1))
+
+#define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0)
+#define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f)
+#define CHIP_REV_SIM(bp) (((CHIP_REV_MASK - CHIP_REV_VAL(bp)) >>\
+ (CHIP_REV_SHIFT + 1)) \
+ << CHIP_REV_SHIFT)
+#define CHIP_REV(bp) (CHIP_REV_IS_SLOW(bp) ? \
+ CHIP_REV_SIM(bp) :\
+ CHIP_REV_VAL(bp))
+#define CHIP_IS_E3B0(bp) (CHIP_IS_E3(bp) && \
+ (CHIP_REV(bp) == CHIP_REV_Bx))
+#define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \
+ (CHIP_REV(bp) == CHIP_REV_Ax))
+
+ int flash_size;
+#define BNX2X_NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */
+#define BNX2X_NVRAM_TIMEOUT_COUNT 30000
+#define BNX2X_NVRAM_PAGE_SIZE 256
+
+ u32 shmem_base;
+ u32 shmem2_base;
+ u32 mf_cfg_base;
+ u32 mf2_cfg_base;
+
+ u32 hw_config;
+
+ u32 bc_ver;
+
+ u8 int_block;
+#define INT_BLOCK_HC 0
+#define INT_BLOCK_IGU 1
+#define INT_BLOCK_MODE_NORMAL 0
+#define INT_BLOCK_MODE_BW_COMP 2
+#define CHIP_INT_MODE_IS_NBC(bp) \
+ (!CHIP_IS_E1x(bp) && \
+ !((bp)->common.int_block & INT_BLOCK_MODE_BW_COMP))
+#define CHIP_INT_MODE_IS_BC(bp) (!CHIP_INT_MODE_IS_NBC(bp))
+
+ u8 chip_port_mode;
+#define CHIP_4_PORT_MODE 0x0
+#define CHIP_2_PORT_MODE 0x1
+#define CHIP_PORT_MODE_NONE 0x2
+#define CHIP_MODE(bp) (bp->common.chip_port_mode)
+#define CHIP_MODE_IS_4_PORT(bp) (CHIP_MODE(bp) == CHIP_4_PORT_MODE)
+};
+
+/* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */
+#define BNX2X_IGU_STAS_MSG_VF_CNT 64
+#define BNX2X_IGU_STAS_MSG_PF_CNT 4
+
+/* end of common */
+
+/* port */
+
+struct bnx2x_port {
+ u32 pmf;
+
+ u32 link_config[LINK_CONFIG_SIZE];
+
+ u32 supported[LINK_CONFIG_SIZE];
+/* link settings - missing defines */
+#define SUPPORTED_2500baseX_Full (1 << 15)
+
+ u32 advertising[LINK_CONFIG_SIZE];
+/* link settings - missing defines */
+#define ADVERTISED_2500baseX_Full (1 << 15)
+
+ u32 phy_addr;
+
+ /* used to synchronize phy accesses */
+ struct mutex phy_mutex;
+ int need_hw_lock;
+
+ u32 port_stx;
+
+ struct nig_stats old_nig_stats;
+};
+
+/* end of port */
+
+#define STATS_OFFSET32(stat_name) \
+ (offsetof(struct bnx2x_eth_stats, stat_name) / 4)
+
+/* slow path */
+
+/* slow path work-queue */
+extern struct workqueue_struct *bnx2x_wq;
+
+#define BNX2X_MAX_NUM_OF_VFS 64
+#define BNX2X_VF_ID_INVALID 0xFF
+
+/*
+ * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is
+ * control by the number of fast-path status blocks supported by the
+ * device (HW/FW). Each fast-path status block (FP-SB) aka non-default
+ * status block represents an independent interrupts context that can
+ * serve a regular L2 networking queue. However special L2 queues such
+ * as the FCoE queue do not require a FP-SB and other components like
+ * the CNIC may consume FP-SB reducing the number of possible L2 queues
+ *
+ * If the maximum number of FP-SB available is X then:
+ * a. If CNIC is supported it consumes 1 FP-SB thus the max number of
+ * regular L2 queues is Y=X-1
+ * b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor)
+ * c. If the FCoE L2 queue is supported the actual number of L2 queues
+ * is Y+1
+ * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for
+ * slow-path interrupts) or Y+2 if CNIC is supported (one additional
+ * FP interrupt context for the CNIC).
+ * e. The number of HW context (CID count) is always X or X+1 if FCoE
+ * L2 queue is supported. the cid for the FCoE L2 queue is always X.
+ */
+
+/* fast-path interrupt contexts E1x */
+#define FP_SB_MAX_E1x 16
+/* fast-path interrupt contexts E2 */
+#define FP_SB_MAX_E2 HC_SB_MAX_SB_E2
+
+union cdu_context {
+ struct eth_context eth;
+ char pad[1024];
+};
+
+/* CDU host DB constants */
+#define CDU_ILT_PAGE_SZ_HW 3
+#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 64K */
+#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
+
+#ifdef BCM_CNIC
+#define CNIC_ISCSI_CID_MAX 256
+#define CNIC_FCOE_CID_MAX 2048
+#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX)
+#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS)
+#endif
+
+#define QM_ILT_PAGE_SZ_HW 0
+#define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 4K */
+#define QM_CID_ROUND 1024
+
+#ifdef BCM_CNIC
+/* TM (timers) host DB constants */
+#define TM_ILT_PAGE_SZ_HW 0
+#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */
+/* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */
+#define TM_CONN_NUM 1024
+#define TM_ILT_SZ (8 * TM_CONN_NUM)
+#define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ)
+
+/* SRC (Searcher) host DB constants */
+#define SRC_ILT_PAGE_SZ_HW 0
+#define SRC_ILT_PAGE_SZ (4096 << SRC_ILT_PAGE_SZ_HW) /* 4K */
+#define SRC_HASH_BITS 10
+#define SRC_CONN_NUM (1 << SRC_HASH_BITS) /* 1024 */
+#define SRC_ILT_SZ (sizeof(struct src_ent) * SRC_CONN_NUM)
+#define SRC_T2_SZ SRC_ILT_SZ
+#define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ)
+
+#endif
+
+#define MAX_DMAE_C 8
+
+/* DMA memory not used in fastpath */
+struct bnx2x_slowpath {
+ union {
+ struct mac_configuration_cmd e1x;
+ struct eth_classify_rules_ramrod_data e2;
+ } mac_rdata;
+
+
+ union {
+ struct tstorm_eth_mac_filter_config e1x;
+ struct eth_filter_rules_ramrod_data e2;
+ } rx_mode_rdata;
+
+ union {
+ struct mac_configuration_cmd e1;
+ struct eth_multicast_rules_ramrod_data e2;
+ } mcast_rdata;
+
+ struct eth_rss_update_ramrod_data rss_rdata;
+
+ /* Queue State related ramrods are always sent under rtnl_lock */
+ union {
+ struct client_init_ramrod_data init_data;
+ struct client_update_ramrod_data update_data;
+ } q_rdata;
+
+ union {
+ struct function_start_data func_start;
+ /* pfc configuration for DCBX ramrod */
+ struct flow_control_configuration pfc_config;
+ } func_rdata;
+
+ /* used by dmae command executer */
+ struct dmae_command dmae[MAX_DMAE_C];
+
+ u32 stats_comp;
+ union mac_stats mac_stats;
+ struct nig_stats nig_stats;
+ struct host_port_stats port_stats;
+ struct host_func_stats func_stats;
+ struct host_func_stats func_stats_base;
+
+ u32 wb_comp;
+ u32 wb_data[4];
+};
+
+#define bnx2x_sp(bp, var) (&bp->slowpath->var)
+#define bnx2x_sp_mapping(bp, var) \
+ (bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var))
+
+
+/* attn group wiring */
+#define MAX_DYNAMIC_ATTN_GRPS 8
+
+struct attn_route {
+ u32 sig[5];
+};
+
+struct iro {
+ u32 base;
+ u16 m1;
+ u16 m2;
+ u16 m3;
+ u16 size;
+};
+
+struct hw_context {
+ union cdu_context *vcxt;
+ dma_addr_t cxt_mapping;
+ size_t size;
+};
+
+/* forward */
+struct bnx2x_ilt;
+
+
+enum bnx2x_recovery_state {
+ BNX2X_RECOVERY_DONE,
+ BNX2X_RECOVERY_INIT,
+ BNX2X_RECOVERY_WAIT,
+ BNX2X_RECOVERY_FAILED
+};
+
+/*
+ * Event queue (EQ or event ring) MC hsi
+ * NUM_EQ_PAGES and EQ_DESC_CNT_PAGE must be power of 2
+ */
+#define NUM_EQ_PAGES 1
+#define EQ_DESC_CNT_PAGE (BCM_PAGE_SIZE / sizeof(union event_ring_elem))
+#define EQ_DESC_MAX_PAGE (EQ_DESC_CNT_PAGE - 1)
+#define NUM_EQ_DESC (EQ_DESC_CNT_PAGE * NUM_EQ_PAGES)
+#define EQ_DESC_MASK (NUM_EQ_DESC - 1)
+#define MAX_EQ_AVAIL (EQ_DESC_MAX_PAGE * NUM_EQ_PAGES - 2)
+
+/* depends on EQ_DESC_CNT_PAGE being a power of 2 */
+#define NEXT_EQ_IDX(x) ((((x) & EQ_DESC_MAX_PAGE) == \
+ (EQ_DESC_MAX_PAGE - 1)) ? (x) + 2 : (x) + 1)
+
+/* depends on the above and on NUM_EQ_PAGES being a power of 2 */
+#define EQ_DESC(x) ((x) & EQ_DESC_MASK)
+
+#define BNX2X_EQ_INDEX \
+ (&bp->def_status_blk->sp_sb.\
+ index_values[HC_SP_INDEX_EQ_CONS])
+
+/* This is a data that will be used to create a link report message.
+ * We will keep the data used for the last link report in order
+ * to prevent reporting the same link parameters twice.
+ */
+struct bnx2x_link_report_data {
+ u16 line_speed; /* Effective line speed */
+ unsigned long link_report_flags;/* BNX2X_LINK_REPORT_XXX flags */
+};
+
+enum {
+ BNX2X_LINK_REPORT_FD, /* Full DUPLEX */
+ BNX2X_LINK_REPORT_LINK_DOWN,
+ BNX2X_LINK_REPORT_RX_FC_ON,
+ BNX2X_LINK_REPORT_TX_FC_ON,
+};
+
+enum {
+ BNX2X_PORT_QUERY_IDX,
+ BNX2X_PF_QUERY_IDX,
+ BNX2X_FIRST_QUEUE_QUERY_IDX,
+};
+
+struct bnx2x_fw_stats_req {
+ struct stats_query_header hdr;
+ struct stats_query_entry query[STATS_QUERY_CMD_COUNT];
+};
+
+struct bnx2x_fw_stats_data {
+ struct stats_counter storm_counters;
+ struct per_port_stats port;
+ struct per_pf_stats pf;
+ struct per_queue_stats queue_stats[1];
+};
+
+/* Public slow path states */
+enum {
+ BNX2X_SP_RTNL_SETUP_TC,
+ BNX2X_SP_RTNL_TX_TIMEOUT,
+};
+
+
+struct bnx2x {
+ /* Fields used in the tx and intr/napi performance paths
+ * are grouped together in the beginning of the structure
+ */
+ struct bnx2x_fastpath *fp;
+ void __iomem *regview;
+ void __iomem *doorbells;
+ u16 db_size;
+
+ u8 pf_num; /* absolute PF number */
+ u8 pfid; /* per-path PF number */
+ int base_fw_ndsb; /**/
+#define BP_PATH(bp) (CHIP_IS_E1x(bp) ? 0 : (bp->pf_num & 1))
+#define BP_PORT(bp) (bp->pfid & 1)
+#define BP_FUNC(bp) (bp->pfid)
+#define BP_ABS_FUNC(bp) (bp->pf_num)
- BP_E1HVN(bp))
++#define BP_VN(bp) ((bp)->pfid >> 1)
++#define BP_MAX_VN_NUM(bp) (CHIP_MODE_IS_4_PORT(bp) ? 2 : 4)
++#define BP_L_ID(bp) (BP_VN(bp) << 2)
++#define BP_FW_MB_IDX_VN(bp, vn) (BP_PORT(bp) +\
++ (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1))
++#define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp))
+
+ struct net_device *dev;
+ struct pci_dev *pdev;
+
+ const struct iro *iro_arr;
+#define IRO (bp->iro_arr)
+
+ enum bnx2x_recovery_state recovery_state;
+ int is_leader;
+ struct msix_entry *msix_table;
+
+ int tx_ring_size;
+
+/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
+#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
+#define ETH_MIN_PACKET_SIZE 60
+#define ETH_MAX_PACKET_SIZE 1500
+#define ETH_MAX_JUMBO_PACKET_SIZE 9600
+
+ /* Max supported alignment is 256 (8 shift) */
+#define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \
+ L1_CACHE_SHIFT : 8)
+ /* FW use 2 Cache lines Alignment for start packet and size */
+#define BNX2X_FW_RX_ALIGN (2 << BNX2X_RX_ALIGN_SHIFT)
+#define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5)
+
+ struct host_sp_status_block *def_status_blk;
+#define DEF_SB_IGU_ID 16
+#define DEF_SB_ID HC_SP_SB_ID
+ __le16 def_idx;
+ __le16 def_att_idx;
+ u32 attn_state;
+ struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS];
+
+ /* slow path ring */
+ struct eth_spe *spq;
+ dma_addr_t spq_mapping;
+ u16 spq_prod_idx;
+ struct eth_spe *spq_prod_bd;
+ struct eth_spe *spq_last_bd;
+ __le16 *dsb_sp_prod;
+ atomic_t cq_spq_left; /* ETH_XXX ramrods credit */
+ /* used to synchronize spq accesses */
+ spinlock_t spq_lock;
+
+ /* event queue */
+ union event_ring_elem *eq_ring;
+ dma_addr_t eq_mapping;
+ u16 eq_prod;
+ u16 eq_cons;
+ __le16 *eq_cons_sb;
+ atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */
+
+
+
+ /* Counter for marking that there is a STAT_QUERY ramrod pending */
+ u16 stats_pending;
+ /* Counter for completed statistics ramrods */
+ u16 stats_comp;
+
+ /* End of fields used in the performance code paths */
+
+ int panic;
+ int msg_enable;
+
+ u32 flags;
+#define PCIX_FLAG (1 << 0)
+#define PCI_32BIT_FLAG (1 << 1)
+#define ONE_PORT_FLAG (1 << 2)
+#define NO_WOL_FLAG (1 << 3)
+#define USING_DAC_FLAG (1 << 4)
+#define USING_MSIX_FLAG (1 << 5)
+#define USING_MSI_FLAG (1 << 6)
+#define DISABLE_MSI_FLAG (1 << 7)
+#define TPA_ENABLE_FLAG (1 << 8)
+#define NO_MCP_FLAG (1 << 9)
+
+#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG)
+#define MF_FUNC_DIS (1 << 11)
+#define OWN_CNIC_IRQ (1 << 12)
+#define NO_ISCSI_OOO_FLAG (1 << 13)
+#define NO_ISCSI_FLAG (1 << 14)
+#define NO_FCOE_FLAG (1 << 15)
+
+#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
+#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
+#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG)
+
+ int pm_cap;
+ int mrrs;
+
+ struct delayed_work sp_task;
+ struct delayed_work sp_rtnl_task;
+
+ struct delayed_work period_task;
+ struct timer_list timer;
+ int current_interval;
+
+ u16 fw_seq;
+ u16 fw_drv_pulse_wr_seq;
+ u32 func_stx;
+
+ struct link_params link_params;
+ struct link_vars link_vars;
+ u32 link_cnt;
+ struct bnx2x_link_report_data last_reported_link;
+
+ struct mdio_if_info mdio;
+
+ struct bnx2x_common common;
+ struct bnx2x_port port;
+
+ struct cmng_struct_per_port cmng;
+ u32 vn_weight_sum;
+ u32 mf_config[E1HVN_MAX];
+ u32 mf2_config[E2_FUNC_MAX];
+ u32 path_has_ovlan; /* E3 */
+ u16 mf_ov;
+ u8 mf_mode;
+#define IS_MF(bp) (bp->mf_mode != 0)
+#define IS_MF_SI(bp) (bp->mf_mode == MULTI_FUNCTION_SI)
+#define IS_MF_SD(bp) (bp->mf_mode == MULTI_FUNCTION_SD)
+
+ u8 wol;
+
+ int rx_ring_size;
+
+ u16 tx_quick_cons_trip_int;
+ u16 tx_quick_cons_trip;
+ u16 tx_ticks_int;
+ u16 tx_ticks;
+
+ u16 rx_quick_cons_trip_int;
+ u16 rx_quick_cons_trip;
+ u16 rx_ticks_int;
+ u16 rx_ticks;
+/* Maximal coalescing timeout in us */
+#define BNX2X_MAX_COALESCE_TOUT (0xf0*12)
+
+ u32 lin_cnt;
+
+ u16 state;
+#define BNX2X_STATE_CLOSED 0
+#define BNX2X_STATE_OPENING_WAIT4_LOAD 0x1000
+#define BNX2X_STATE_OPENING_WAIT4_PORT 0x2000
+#define BNX2X_STATE_OPEN 0x3000
+#define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000
+#define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000
+
+#define BNX2X_STATE_DIAG 0xe000
+#define BNX2X_STATE_ERROR 0xf000
+
+ int multi_mode;
+#define BNX2X_MAX_PRIORITY 8
+#define BNX2X_MAX_ENTRIES_PER_PRI 16
+#define BNX2X_MAX_COS 3
+#define BNX2X_MAX_TX_COS 2
+ int num_queues;
+ int disable_tpa;
+
+ u32 rx_mode;
+#define BNX2X_RX_MODE_NONE 0
+#define BNX2X_RX_MODE_NORMAL 1
+#define BNX2X_RX_MODE_ALLMULTI 2
+#define BNX2X_RX_MODE_PROMISC 3
+#define BNX2X_MAX_MULTICAST 64
+
+ u8 igu_dsb_id;
+ u8 igu_base_sb;
+ u8 igu_sb_cnt;
+ dma_addr_t def_status_blk_mapping;
+
+ struct bnx2x_slowpath *slowpath;
+ dma_addr_t slowpath_mapping;
+
+ /* Total number of FW statistics requests */
+ u8 fw_stats_num;
+
+ /*
+ * This is a memory buffer that will contain both statistics
+ * ramrod request and data.
+ */
+ void *fw_stats;
+ dma_addr_t fw_stats_mapping;
+
+ /*
+ * FW statistics request shortcut (points at the
+ * beginning of fw_stats buffer).
+ */
+ struct bnx2x_fw_stats_req *fw_stats_req;
+ dma_addr_t fw_stats_req_mapping;
+ int fw_stats_req_sz;
+
+ /*
+ * FW statistics data shortcut (points at the begining of
+ * fw_stats buffer + fw_stats_req_sz).
+ */
+ struct bnx2x_fw_stats_data *fw_stats_data;
+ dma_addr_t fw_stats_data_mapping;
+ int fw_stats_data_sz;
+
+ struct hw_context context;
+
+ struct bnx2x_ilt *ilt;
+#define BP_ILT(bp) ((bp)->ilt)
+#define ILT_MAX_LINES 256
+/*
+ * Maximum supported number of RSS queues: number of IGU SBs minus one that goes
+ * to CNIC.
+ */
+#define BNX2X_MAX_RSS_COUNT(bp) ((bp)->igu_sb_cnt - CNIC_PRESENT)
+
+/*
+ * Maximum CID count that might be required by the bnx2x:
+ * Max Tss * Max_Tx_Multi_Cos + CNIC L2 Clients (FCoE and iSCSI related)
+ */
+#define BNX2X_L2_CID_COUNT(bp) (MAX_TXQS_PER_COS * BNX2X_MULTI_TX_COS +\
+ NON_ETH_CONTEXT_USE + CNIC_PRESENT)
+#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
+ ILT_PAGE_CIDS))
+#define BNX2X_DB_SIZE(bp) (BNX2X_L2_CID_COUNT(bp) * (1 << BNX2X_DB_SHIFT))
+
+ int qm_cid_count;
+
+ int dropless_fc;
+
+#ifdef BCM_CNIC
+ u32 cnic_flags;
+#define BNX2X_CNIC_FLAG_MAC_SET 1
+ void *t2;
+ dma_addr_t t2_mapping;
+ struct cnic_ops __rcu *cnic_ops;
+ void *cnic_data;
+ u32 cnic_tag;
+ struct cnic_eth_dev cnic_eth_dev;
+ union host_hc_status_block cnic_sb;
+ dma_addr_t cnic_sb_mapping;
+ struct eth_spe *cnic_kwq;
+ struct eth_spe *cnic_kwq_prod;
+ struct eth_spe *cnic_kwq_cons;
+ struct eth_spe *cnic_kwq_last;
+ u16 cnic_kwq_pending;
+ u16 cnic_spq_pending;
+ u8 fip_mac[ETH_ALEN];
+ struct mutex cnic_mutex;
+ struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj;
+
+ /* Start index of the "special" (CNIC related) L2 cleints */
+ u8 cnic_base_cl_id;
+#endif
+
+ int dmae_ready;
+ /* used to synchronize dmae accesses */
+ spinlock_t dmae_lock;
+
+ /* used to protect the FW mail box */
+ struct mutex fw_mb_mutex;
+
+ /* used to synchronize stats collecting */
+ int stats_state;
+
+ /* used for synchronization of concurrent threads statistics handling */
+ spinlock_t stats_lock;
+
+ /* used by dmae command loader */
+ struct dmae_command stats_dmae;
+ int executer_idx;
+
+ u16 stats_counter;
+ struct bnx2x_eth_stats eth_stats;
+
+ struct z_stream_s *strm;
+ void *gunzip_buf;
+ dma_addr_t gunzip_mapping;
+ int gunzip_outlen;
+#define FW_BUF_SIZE 0x8000
+#define GUNZIP_BUF(bp) (bp->gunzip_buf)
+#define GUNZIP_PHYS(bp) (bp->gunzip_mapping)
+#define GUNZIP_OUTLEN(bp) (bp->gunzip_outlen)
+
+ struct raw_op *init_ops;
+ /* Init blocks offsets inside init_ops */
+ u16 *init_ops_offsets;
+ /* Data blob - has 32 bit granularity */
+ u32 *init_data;
+ u32 init_mode_flags;
+#define INIT_MODE_FLAGS(bp) (bp->init_mode_flags)
+ /* Zipped PRAM blobs - raw data */
+ const u8 *tsem_int_table_data;
+ const u8 *tsem_pram_data;
+ const u8 *usem_int_table_data;
+ const u8 *usem_pram_data;
+ const u8 *xsem_int_table_data;
+ const u8 *xsem_pram_data;
+ const u8 *csem_int_table_data;
+ const u8 *csem_pram_data;
+#define INIT_OPS(bp) (bp->init_ops)
+#define INIT_OPS_OFFSETS(bp) (bp->init_ops_offsets)
+#define INIT_DATA(bp) (bp->init_data)
+#define INIT_TSEM_INT_TABLE_DATA(bp) (bp->tsem_int_table_data)
+#define INIT_TSEM_PRAM_DATA(bp) (bp->tsem_pram_data)
+#define INIT_USEM_INT_TABLE_DATA(bp) (bp->usem_int_table_data)
+#define INIT_USEM_PRAM_DATA(bp) (bp->usem_pram_data)
+#define INIT_XSEM_INT_TABLE_DATA(bp) (bp->xsem_int_table_data)
+#define INIT_XSEM_PRAM_DATA(bp) (bp->xsem_pram_data)
+#define INIT_CSEM_INT_TABLE_DATA(bp) (bp->csem_int_table_data)
+#define INIT_CSEM_PRAM_DATA(bp) (bp->csem_pram_data)
+
+#define PHY_FW_VER_LEN 20
+ char fw_ver[32];
+ const struct firmware *firmware;
+
+ /* DCB support on/off */
+ u16 dcb_state;
+#define BNX2X_DCB_STATE_OFF 0
+#define BNX2X_DCB_STATE_ON 1
+
+ /* DCBX engine mode */
+ int dcbx_enabled;
+#define BNX2X_DCBX_ENABLED_OFF 0
+#define BNX2X_DCBX_ENABLED_ON_NEG_OFF 1
+#define BNX2X_DCBX_ENABLED_ON_NEG_ON 2
+#define BNX2X_DCBX_ENABLED_INVALID (-1)
+
+ bool dcbx_mode_uset;
+
+ struct bnx2x_config_dcbx_params dcbx_config_params;
+ struct bnx2x_dcbx_port_params dcbx_port_params;
+ int dcb_version;
+
+ /* CAM credit pools */
+ struct bnx2x_credit_pool_obj macs_pool;
+
+ /* RX_MODE object */
+ struct bnx2x_rx_mode_obj rx_mode_obj;
+
+ /* MCAST object */
+ struct bnx2x_mcast_obj mcast_obj;
+
+ /* RSS configuration object */
+ struct bnx2x_rss_config_obj rss_conf_obj;
+
+ /* Function State controlling object */
+ struct bnx2x_func_sp_obj func_obj;
+
+ unsigned long sp_state;
+
+ /* operation indication for the sp_rtnl task */
+ unsigned long sp_rtnl_state;
+
+ /* DCBX Negotation results */
+ struct dcbx_features dcbx_local_feat;
+ u32 dcbx_error;
+
+#ifdef BCM_DCBNL
+ struct dcbx_features dcbx_remote_feat;
+ u32 dcbx_remote_flags;
+#endif
+ u32 pending_max;
+
+ /* multiple tx classes of service */
+ u8 max_cos;
+
+ /* priority to cos mapping */
+ u8 prio_to_cos[8];
+};
+
+/* Tx queues may be less or equal to Rx queues */
+extern int num_queues;
+#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
+#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE)
+#define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp)
+
+#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
+
+#define BNX2X_MAX_QUEUES(bp) BNX2X_MAX_RSS_COUNT(bp)
+/* #define is_eth_multi(bp) (BNX2X_NUM_ETH_QUEUES(bp) > 1) */
+
+#define RSS_IPV4_CAP_MASK \
+ TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY
+
+#define RSS_IPV4_TCP_CAP_MASK \
+ TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY
+
+#define RSS_IPV6_CAP_MASK \
+ TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY
+
+#define RSS_IPV6_TCP_CAP_MASK \
+ TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY
+
+/* func init flags */
+#define FUNC_FLG_RSS 0x0001
+#define FUNC_FLG_STATS 0x0002
+/* removed FUNC_FLG_UNMATCHED 0x0004 */
+#define FUNC_FLG_TPA 0x0008
+#define FUNC_FLG_SPQ 0x0010
+#define FUNC_FLG_LEADING 0x0020 /* PF only */
+
+
+struct bnx2x_func_init_params {
+ /* dma */
+ dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */
+ dma_addr_t spq_map; /* valid iff FUNC_FLG_SPQ */
+
+ u16 func_flgs;
+ u16 func_id; /* abs fid */
+ u16 pf_id;
+ u16 spq_prod; /* valid iff FUNC_FLG_SPQ */
+};
+
+#define for_each_eth_queue(bp, var) \
+ for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++)
+
+#define for_each_nondefault_eth_queue(bp, var) \
+ for ((var) = 1; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++)
+
+#define for_each_queue(bp, var) \
+ for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
+ if (skip_queue(bp, var)) \
+ continue; \
+ else
+
+/* Skip forwarding FP */
+#define for_each_rx_queue(bp, var) \
+ for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
+ if (skip_rx_queue(bp, var)) \
+ continue; \
+ else
+
+/* Skip OOO FP */
+#define for_each_tx_queue(bp, var) \
+ for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
+ if (skip_tx_queue(bp, var)) \
+ continue; \
+ else
+
+#define for_each_nondefault_queue(bp, var) \
+ for ((var) = 1; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
+ if (skip_queue(bp, var)) \
+ continue; \
+ else
+
+#define for_each_cos_in_tx_queue(fp, var) \
+ for ((var) = 0; (var) < (fp)->max_cos; (var)++)
+
+/* skip rx queue
+ * if FCOE l2 support is disabled and this is the fcoe L2 queue
+ */
+#define skip_rx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
+
+/* skip tx queue
+ * if FCOE l2 support is disabled and this is the fcoe L2 queue
+ */
+#define skip_tx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
+
+#define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
+
+
+
+
+/**
+ * bnx2x_set_mac_one - configure a single MAC address
+ *
+ * @bp: driver handle
+ * @mac: MAC to configure
+ * @obj: MAC object handle
+ * @set: if 'true' add a new MAC, otherwise - delete
+ * @mac_type: the type of the MAC to configure (e.g. ETH, UC list)
+ * @ramrod_flags: RAMROD_XXX flags (e.g. RAMROD_CONT, RAMROD_COMP_WAIT)
+ *
+ * Configures one MAC according to provided parameters or continues the
+ * execution of previously scheduled commands if RAMROD_CONT is set in
+ * ramrod_flags.
+ *
+ * Returns zero if operation has successfully completed, a positive value if the
+ * operation has been successfully scheduled and a negative - if a requested
+ * operations has failed.
+ */
+int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
+ struct bnx2x_vlan_mac_obj *obj, bool set,
+ int mac_type, unsigned long *ramrod_flags);
+/**
+ * Deletes all MACs configured for the specific MAC object.
+ *
+ * @param bp Function driver instance
+ * @param mac_obj MAC object to cleanup
+ *
+ * @return zero if all MACs were cleaned
+ */
+
+/**
+ * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object
+ *
+ * @bp: driver handle
+ * @mac_obj: MAC object handle
+ * @mac_type: type of the MACs to clear (BNX2X_XXX_MAC)
+ * @wait_for_comp: if 'true' block until completion
+ *
+ * Deletes all MACs of the specific type (e.g. ETH, UC list).
+ *
+ * Returns zero if operation has successfully completed, a positive value if the
+ * operation has been successfully scheduled and a negative - if a requested
+ * operations has failed.
+ */
+int bnx2x_del_all_macs(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *mac_obj,
+ int mac_type, bool wait_for_comp);
+
+/* Init Function API */
+void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p);
+int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
+int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
+int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode);
+int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
+void bnx2x_read_mf_cfg(struct bnx2x *bp);
+
+
+/* dmae */
+void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
+void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
+ u32 len32);
+void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
+u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type);
+u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode);
+u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
+ bool with_comp, u8 comp_type);
+
+
+void bnx2x_calc_fc_adv(struct bnx2x *bp);
+int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
+ u32 data_hi, u32 data_lo, int cmd_type);
+void bnx2x_update_coalesce(struct bnx2x *bp);
+int bnx2x_get_cur_phy_idx(struct bnx2x *bp);
+
+static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
+ int wait)
+{
+ u32 val;
+
+ do {
+ val = REG_RD(bp, reg);
+ if (val == expected)
+ break;
+ ms -= wait;
+ msleep(wait);
+
+ } while (ms > 0);
+
+ return val;
+}
+
+#define BNX2X_ILT_ZALLOC(x, y, size) \
+ do { \
+ x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
+ if (x) \
+ memset(x, 0, size); \
+ } while (0)
+
+#define BNX2X_ILT_FREE(x, y, size) \
+ do { \
+ if (x) { \
+ dma_free_coherent(&bp->pdev->dev, size, x, y); \
+ x = NULL; \
+ y = 0; \
+ } \
+ } while (0)
+
+#define ILOG2(x) (ilog2((x)))
+
+#define ILT_NUM_PAGE_ENTRIES (3072)
+/* In 57710/11 we use whole table since we have 8 func
+ * In 57712 we have only 4 func, but use same size per func, then only half of
+ * the table in use
+ */
+#define ILT_PER_FUNC (ILT_NUM_PAGE_ENTRIES/8)
+
+#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
+/*
+ * the phys address is shifted right 12 bits and has an added
+ * 1=valid bit added to the 53rd bit
+ * then since this is a wide register(TM)
+ * we split it into two 32 bit writes
+ */
+#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
+#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
+
+/* load/unload mode */
+#define LOAD_NORMAL 0
+#define LOAD_OPEN 1
+#define LOAD_DIAG 2
+#define UNLOAD_NORMAL 0
+#define UNLOAD_CLOSE 1
+#define UNLOAD_RECOVERY 2
+
+
+/* DMAE command defines */
+#define DMAE_TIMEOUT -1
+#define DMAE_PCI_ERROR -2 /* E2 and onward */
+#define DMAE_NOT_RDY -3
+#define DMAE_PCI_ERR_FLAG 0x80000000
+
+#define DMAE_SRC_PCI 0
+#define DMAE_SRC_GRC 1
+
+#define DMAE_DST_NONE 0
+#define DMAE_DST_PCI 1
+#define DMAE_DST_GRC 2
+
+#define DMAE_COMP_PCI 0
+#define DMAE_COMP_GRC 1
+
+/* E2 and onward - PCI error handling in the completion */
+
+#define DMAE_COMP_REGULAR 0
+#define DMAE_COM_SET_ERR 1
+
+#define DMAE_CMD_SRC_PCI (DMAE_SRC_PCI << \
+ DMAE_COMMAND_SRC_SHIFT)
+#define DMAE_CMD_SRC_GRC (DMAE_SRC_GRC << \
+ DMAE_COMMAND_SRC_SHIFT)
+
+#define DMAE_CMD_DST_PCI (DMAE_DST_PCI << \
+ DMAE_COMMAND_DST_SHIFT)
+#define DMAE_CMD_DST_GRC (DMAE_DST_GRC << \
+ DMAE_COMMAND_DST_SHIFT)
+
+#define DMAE_CMD_C_DST_PCI (DMAE_COMP_PCI << \
+ DMAE_COMMAND_C_DST_SHIFT)
+#define DMAE_CMD_C_DST_GRC (DMAE_COMP_GRC << \
+ DMAE_COMMAND_C_DST_SHIFT)
+
+#define DMAE_CMD_C_ENABLE DMAE_COMMAND_C_TYPE_ENABLE
+
+#define DMAE_CMD_ENDIANITY_NO_SWAP (0 << DMAE_COMMAND_ENDIANITY_SHIFT)
+#define DMAE_CMD_ENDIANITY_B_SWAP (1 << DMAE_COMMAND_ENDIANITY_SHIFT)
+#define DMAE_CMD_ENDIANITY_DW_SWAP (2 << DMAE_COMMAND_ENDIANITY_SHIFT)
+#define DMAE_CMD_ENDIANITY_B_DW_SWAP (3 << DMAE_COMMAND_ENDIANITY_SHIFT)
+
+#define DMAE_CMD_PORT_0 0
+#define DMAE_CMD_PORT_1 DMAE_COMMAND_PORT
+
+#define DMAE_CMD_SRC_RESET DMAE_COMMAND_SRC_RESET
+#define DMAE_CMD_DST_RESET DMAE_COMMAND_DST_RESET
+#define DMAE_CMD_E1HVN_SHIFT DMAE_COMMAND_E1HVN_SHIFT
+
+#define DMAE_SRC_PF 0
+#define DMAE_SRC_VF 1
+
+#define DMAE_DST_PF 0
+#define DMAE_DST_VF 1
+
+#define DMAE_C_SRC 0
+#define DMAE_C_DST 1
+
+#define DMAE_LEN32_RD_MAX 0x80
+#define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000)
+
+#define DMAE_COMP_VAL 0x60d0d0ae /* E2 and on - upper bit
+ indicates eror */
+
+#define MAX_DMAE_C_PER_PORT 8
+#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
- (BP_E1HVN(bp) << BNX2X_SWCID_SHIFT) | \
++ BP_VN(bp))
+#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
+ E1HVN_MAX)
+
+/* PCIE link and speed */
+#define PCICFG_LINK_WIDTH 0x1f00000
+#define PCICFG_LINK_WIDTH_SHIFT 20
+#define PCICFG_LINK_SPEED 0xf0000
+#define PCICFG_LINK_SPEED_SHIFT 16
+
+
+#define BNX2X_NUM_TESTS 7
+
+#define BNX2X_PHY_LOOPBACK 0
+#define BNX2X_MAC_LOOPBACK 1
+#define BNX2X_PHY_LOOPBACK_FAILED 1
+#define BNX2X_MAC_LOOPBACK_FAILED 2
+#define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \
+ BNX2X_PHY_LOOPBACK_FAILED)
+
+
+#define STROM_ASSERT_ARRAY_SIZE 50
+
+
+/* must be used on a CID before placing it on a HW ring */
+#define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \
++ (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \
+ (x))
+
+#define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe))
+#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1)
+
+
+#define BNX2X_BTR 4
+#define MAX_SPQ_PENDING 8
+
+/* CMNG constants, as derived from system spec calculations */
+/* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */
+#define DEF_MIN_RATE 100
+/* resolution of the rate shaping timer - 400 usec */
+#define RS_PERIODIC_TIMEOUT_USEC 400
+/* number of bytes in single QM arbitration cycle -
+ * coefficient for calculating the fairness timer */
+#define QM_ARB_BYTES 160000
+/* resolution of Min algorithm 1:100 */
+#define MIN_RES 100
+/* how many bytes above threshold for the minimal credit of Min algorithm*/
+#define MIN_ABOVE_THRESH 32768
+/* Fairness algorithm integration time coefficient -
+ * for calculating the actual Tfair */
+#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES)
+/* Memory of fairness algorithm . 2 cycles */
+#define FAIR_MEM 2
+
+
+#define ATTN_NIG_FOR_FUNC (1L << 8)
+#define ATTN_SW_TIMER_4_FUNC (1L << 9)
+#define GPIO_2_FUNC (1L << 10)
+#define GPIO_3_FUNC (1L << 11)
+#define GPIO_4_FUNC (1L << 12)
+#define ATTN_GENERAL_ATTN_1 (1L << 13)
+#define ATTN_GENERAL_ATTN_2 (1L << 14)
+#define ATTN_GENERAL_ATTN_3 (1L << 15)
+#define ATTN_GENERAL_ATTN_4 (1L << 13)
+#define ATTN_GENERAL_ATTN_5 (1L << 14)
+#define ATTN_GENERAL_ATTN_6 (1L << 15)
+
+#define ATTN_HARD_WIRED_MASK 0xff00
+#define ATTENTION_ID 4
+
+
+/* stuff added to make the code fit 80Col */
+
+#define BNX2X_PMF_LINK_ASSERT \
+ GENERAL_ATTEN_OFFSET(LINK_SYNC_ATTENTION_BIT_FUNC_0 + BP_FUNC(bp))
+
+#define BNX2X_MC_ASSERT_BITS \
+ (GENERAL_ATTEN_OFFSET(TSTORM_FATAL_ASSERT_ATTENTION_BIT) | \
+ GENERAL_ATTEN_OFFSET(USTORM_FATAL_ASSERT_ATTENTION_BIT) | \
+ GENERAL_ATTEN_OFFSET(CSTORM_FATAL_ASSERT_ATTENTION_BIT) | \
+ GENERAL_ATTEN_OFFSET(XSTORM_FATAL_ASSERT_ATTENTION_BIT))
+
+#define BNX2X_MCP_ASSERT \
+ GENERAL_ATTEN_OFFSET(MCP_FATAL_ASSERT_ATTENTION_BIT)
+
+#define BNX2X_GRC_TIMEOUT GENERAL_ATTEN_OFFSET(LATCHED_ATTN_TIMEOUT_GRC)
+#define BNX2X_GRC_RSV (GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCR) | \
+ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCT) | \
+ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCN) | \
+ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCU) | \
+ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \
+ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC))
+
+#define HW_INTERRUT_ASSERT_SET_0 \
+ (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT)
+#define HW_PRTY_ASSERT_SET_0 (AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR)
+#define HW_INTERRUT_ASSERT_SET_1 \
+ (AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT)
+#define HW_PRTY_ASSERT_SET_1 (AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR)
+#define HW_INTERRUT_ASSERT_SET_2 \
+ (AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT)
+#define HW_PRTY_ASSERT_SET_2 (AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR)
+
+#define HW_PRTY_ASSERT_SET_3 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
+
+#define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)
+
+#define RSS_FLAGS(bp) \
+ (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \
+ TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \
+ TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \
+ TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | \
+ (bp->multi_mode << \
+ TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT))
+#define MULTI_MASK 0x7f
+
+
+#define DEF_USB_FUNC_OFF offsetof(struct cstorm_def_status_block_u, func)
+#define DEF_CSB_FUNC_OFF offsetof(struct cstorm_def_status_block_c, func)
+#define DEF_XSB_FUNC_OFF offsetof(struct xstorm_def_status_block, func)
+#define DEF_TSB_FUNC_OFF offsetof(struct tstorm_def_status_block, func)
+
+#define DEF_USB_IGU_INDEX_OFF \
+ offsetof(struct cstorm_def_status_block_u, igu_index)
+#define DEF_CSB_IGU_INDEX_OFF \
+ offsetof(struct cstorm_def_status_block_c, igu_index)
+#define DEF_XSB_IGU_INDEX_OFF \
+ offsetof(struct xstorm_def_status_block, igu_index)
+#define DEF_TSB_IGU_INDEX_OFF \
+ offsetof(struct tstorm_def_status_block, igu_index)
+
+#define DEF_USB_SEGMENT_OFF \
+ offsetof(struct cstorm_def_status_block_u, segment)
+#define DEF_CSB_SEGMENT_OFF \
+ offsetof(struct cstorm_def_status_block_c, segment)
+#define DEF_XSB_SEGMENT_OFF \
+ offsetof(struct xstorm_def_status_block, segment)
+#define DEF_TSB_SEGMENT_OFF \
+ offsetof(struct tstorm_def_status_block, segment)
+
+#define BNX2X_SP_DSB_INDEX \
+ (&bp->def_status_blk->sp_sb.\
+ index_values[HC_SP_INDEX_ETH_DEF_CONS])
+
+#define SET_FLAG(value, mask, flag) \
+ do {\
+ (value) &= ~(mask);\
+ (value) |= ((flag) << (mask##_SHIFT));\
+ } while (0)
+
+#define GET_FLAG(value, mask) \
+ (((value) & (mask)) >> (mask##_SHIFT))
+
+#define GET_FIELD(value, fname) \
+ (((value) & (fname##_MASK)) >> (fname##_SHIFT))
+
+#define CAM_IS_INVALID(x) \
+ (GET_FLAG(x.flags, \
+ MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \
+ (T_ETH_MAC_COMMAND_INVALIDATE))
+
+/* Number of u32 elements in MC hash array */
+#define MC_HASH_SIZE 8
+#define MC_HASH_OFFSET(bp, i) (BAR_TSTRORM_INTMEM + \
+ TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(BP_FUNC(bp)) + i*4)
+
+
+#ifndef PXP2_REG_PXP2_INT_STS
+#define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0
+#endif
+
+#ifndef ETH_MAX_RX_CLIENTS_E2
+#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H
+#endif
+
+#define BNX2X_VPD_LEN 128
+#define VENDOR_ID_LEN 4
+
+/* Congestion management fairness mode */
+#define CMNG_FNS_NONE 0
+#define CMNG_FNS_MINMAX 1
+
+#define HC_SEG_ACCESS_DEF 0 /*Driver decision 0-3*/
+#define HC_SEG_ACCESS_ATTN 4
+#define HC_SEG_ACCESS_NORM 0 /*Driver decision 0-1*/
+
+static const u32 dmae_reg_go_c[] = {
+ DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
+ DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
+ DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
+ DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
+};
+
+void bnx2x_set_ethtool_ops(struct net_device *netdev);
+void bnx2x_notify_link_changed(struct bnx2x *bp);
+#endif /* bnx2x.h */
--- /dev/null
- int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
- ETH_MAX_AGGREGATION_QUEUES_E1H_E2;
+/* bnx2x_cmn.c: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <net/ipv6.h>
+#include <net/ip6_checksum.h>
+#include <linux/firmware.h>
+#include <linux/prefetch.h>
+#include "bnx2x_cmn.h"
+#include "bnx2x_init.h"
+#include "bnx2x_sp.h"
+
+
+
+/**
+ * bnx2x_bz_fp - zero content of the fastpath structure.
+ *
+ * @bp: driver handle
+ * @index: fastpath index to be zeroed
+ *
+ * Makes sure the contents of the bp->fp[index].napi is kept
+ * intact.
+ */
+static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
+{
+ struct bnx2x_fastpath *fp = &bp->fp[index];
+ struct napi_struct orig_napi = fp->napi;
+ /* bzero bnx2x_fastpath contents */
+ memset(fp, 0, sizeof(*fp));
+
+ /* Restore the NAPI object as it has been already initialized */
+ fp->napi = orig_napi;
+
+ fp->bp = bp;
+ fp->index = index;
+ if (IS_ETH_FP(fp))
+ fp->max_cos = bp->max_cos;
+ else
+ /* Special queues support only one CoS */
+ fp->max_cos = 1;
+
+ /*
+ * set the tpa flag for each queue. The tpa flag determines the queue
+ * minimal size so it must be set prior to queue memory allocation
+ */
+ fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0);
+
+#ifdef BCM_CNIC
+ /* We don't want TPA on an FCoE L2 ring */
+ if (IS_FCOE_FP(fp))
+ fp->disable_tpa = 1;
+#endif
+}
+
+/**
+ * bnx2x_move_fp - move content of the fastpath structure.
+ *
+ * @bp: driver handle
+ * @from: source FP index
+ * @to: destination FP index
+ *
+ * Makes sure the contents of the bp->fp[to].napi is kept
+ * intact.
+ */
+static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
+{
+ struct bnx2x_fastpath *from_fp = &bp->fp[from];
+ struct bnx2x_fastpath *to_fp = &bp->fp[to];
+ struct napi_struct orig_napi = to_fp->napi;
+ /* Move bnx2x_fastpath contents */
+ memcpy(to_fp, from_fp, sizeof(*to_fp));
+ to_fp->index = to;
+
+ /* Restore the NAPI object as it has been already initialized */
+ to_fp->napi = orig_napi;
+}
+
+int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
+
+/* free skb in the packet ring at pos idx
+ * return idx of last bd freed
+ */
+static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
+ u16 idx)
+{
+ struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
+ struct eth_tx_start_bd *tx_start_bd;
+ struct eth_tx_bd *tx_data_bd;
+ struct sk_buff *skb = tx_buf->skb;
+ u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
+ int nbd;
+
+ /* prefetch skb end pointer to speedup dev_kfree_skb() */
+ prefetch(&skb->end);
+
+ DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
+ txdata->txq_index, idx, tx_buf, skb);
+
+ /* unmap first bd */
+ DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
+ tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
+ dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
+ BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
+
+
+ nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
+#ifdef BNX2X_STOP_ON_ERROR
+ if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
+ BNX2X_ERR("BAD nbd!\n");
+ bnx2x_panic();
+ }
+#endif
+ new_cons = nbd + tx_buf->first_bd;
+
+ /* Get the next bd */
+ bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+
+ /* Skip a parse bd... */
+ --nbd;
+ bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+
+ /* ...and the TSO split header bd since they have no mapping */
+ if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
+ --nbd;
+ bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+ }
+
+ /* now free frags */
+ while (nbd > 0) {
+
+ DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
+ tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
+ dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
+ BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
+ if (--nbd)
+ bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+ }
+
+ /* release skb */
+ WARN_ON(!skb);
+ dev_kfree_skb_any(skb);
+ tx_buf->first_bd = 0;
+ tx_buf->skb = NULL;
+
+ return new_cons;
+}
+
+int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
+{
+ struct netdev_queue *txq;
+ u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return -1;
+#endif
+
+ txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
+ hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
+ sw_cons = txdata->tx_pkt_cons;
+
+ while (sw_cons != hw_cons) {
+ u16 pkt_cons;
+
+ pkt_cons = TX_BD(sw_cons);
+
+ DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
+ " pkt_cons %u\n",
+ txdata->txq_index, hw_cons, sw_cons, pkt_cons);
+
+ bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons);
+ sw_cons++;
+ }
+
+ txdata->tx_pkt_cons = sw_cons;
+ txdata->tx_bd_cons = bd_cons;
+
+ /* Need to make the tx_bd_cons update visible to start_xmit()
+ * before checking for netif_tx_queue_stopped(). Without the
+ * memory barrier, there is a small possibility that
+ * start_xmit() will miss it and cause the queue to be stopped
+ * forever.
+ * On the other hand we need an rmb() here to ensure the proper
+ * ordering of bit testing in the following
+ * netif_tx_queue_stopped(txq) call.
+ */
+ smp_mb();
+
+ if (unlikely(netif_tx_queue_stopped(txq))) {
+ /* Taking tx_lock() is needed to prevent reenabling the queue
+ * while it's empty. This could have happen if rx_action() gets
+ * suspended in bnx2x_tx_int() after the condition before
+ * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
+ *
+ * stops the queue->sees fresh tx_bd_cons->releases the queue->
+ * sends some packets consuming the whole queue again->
+ * stops the queue
+ */
+
+ __netif_tx_lock(txq, smp_processor_id());
+
+ if ((netif_tx_queue_stopped(txq)) &&
+ (bp->state == BNX2X_STATE_OPEN) &&
+ (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
+ netif_tx_wake_queue(txq);
+
+ __netif_tx_unlock(txq);
+ }
+ return 0;
+}
+
+static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
+ u16 idx)
+{
+ u16 last_max = fp->last_max_sge;
+
+ if (SUB_S16(idx, last_max) > 0)
+ fp->last_max_sge = idx;
+}
+
+static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
+ struct eth_fast_path_rx_cqe *fp_cqe)
+{
+ struct bnx2x *bp = fp->bp;
+ u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
+ le16_to_cpu(fp_cqe->len_on_bd)) >>
+ SGE_PAGE_SHIFT;
+ u16 last_max, last_elem, first_elem;
+ u16 delta = 0;
+ u16 i;
+
+ if (!sge_len)
+ return;
+
+ /* First mark all used pages */
+ for (i = 0; i < sge_len; i++)
+ BIT_VEC64_CLEAR_BIT(fp->sge_mask,
+ RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
+
+ DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
+ sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
+
+ /* Here we assume that the last SGE index is the biggest */
+ prefetch((void *)(fp->sge_mask));
+ bnx2x_update_last_max_sge(fp,
+ le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
+
+ last_max = RX_SGE(fp->last_max_sge);
+ last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
+ first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
+
+ /* If ring is not full */
+ if (last_elem + 1 != first_elem)
+ last_elem++;
+
+ /* Now update the prod */
+ for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
+ if (likely(fp->sge_mask[i]))
+ break;
+
+ fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
+ delta += BIT_VEC64_ELEM_SZ;
+ }
+
+ if (delta > 0) {
+ fp->rx_sge_prod += delta;
+ /* clear page-end entries */
+ bnx2x_clear_sge_mask_next_elems(fp);
+ }
+
+ DP(NETIF_MSG_RX_STATUS,
+ "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
+ fp->last_max_sge, fp->rx_sge_prod);
+}
+
+static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
+ struct sk_buff *skb, u16 cons, u16 prod,
+ struct eth_fast_path_rx_cqe *cqe)
+{
+ struct bnx2x *bp = fp->bp;
+ struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
+ struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
+ struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
+ dma_addr_t mapping;
+ struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
+ struct sw_rx_bd *first_buf = &tpa_info->first_buf;
+
+ /* print error if current state != stop */
+ if (tpa_info->tpa_state != BNX2X_TPA_STOP)
+ BNX2X_ERR("start of bin not in stop [%d]\n", queue);
+
+ /* Try to map an empty skb from the aggregation info */
+ mapping = dma_map_single(&bp->pdev->dev,
+ first_buf->skb->data,
+ fp->rx_buf_size, DMA_FROM_DEVICE);
+ /*
+ * ...if it fails - move the skb from the consumer to the producer
+ * and set the current aggregation state as ERROR to drop it
+ * when TPA_STOP arrives.
+ */
+
+ if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+ /* Move the BD from the consumer to the producer */
+ bnx2x_reuse_rx_skb(fp, cons, prod);
+ tpa_info->tpa_state = BNX2X_TPA_ERROR;
+ return;
+ }
+
+ /* move empty skb from pool to prod */
+ prod_rx_buf->skb = first_buf->skb;
+ dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
+ /* point prod_bd to new skb */
+ prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+ prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+
+ /* move partial skb from cons to pool (don't unmap yet) */
+ *first_buf = *cons_rx_buf;
+
+ /* mark bin state as START */
+ tpa_info->parsing_flags =
+ le16_to_cpu(cqe->pars_flags.flags);
+ tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
+ tpa_info->tpa_state = BNX2X_TPA_START;
+ tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
+ tpa_info->placement_offset = cqe->placement_offset;
+
+#ifdef BNX2X_STOP_ON_ERROR
+ fp->tpa_queue_used |= (1 << queue);
+#ifdef _ASM_GENERIC_INT_L64_H
+ DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
+#else
+ DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
+#endif
+ fp->tpa_queue_used);
+#endif
+}
+
+/* Timestamp option length allowed for TPA aggregation:
+ *
+ * nop nop kind length echo val
+ */
+#define TPA_TSTAMP_OPT_LEN 12
+/**
+ * bnx2x_set_lro_mss - calculate the approximate value of the MSS
+ *
+ * @bp: driver handle
+ * @parsing_flags: parsing flags from the START CQE
+ * @len_on_bd: total length of the first packet for the
+ * aggregation.
+ *
+ * Approximate value of the MSS for this aggregation calculated using
+ * the first packet of it.
+ */
+static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
+ u16 len_on_bd)
+{
+ /*
+ * TPA arrgregation won't have either IP options or TCP options
+ * other than timestamp or IPv6 extension headers.
+ */
+ u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
+
+ if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
+ PRS_FLAG_OVERETH_IPV6)
+ hdrs_len += sizeof(struct ipv6hdr);
+ else /* IPv4 */
+ hdrs_len += sizeof(struct iphdr);
+
+
+ /* Check if there was a TCP timestamp, if there is it's will
+ * always be 12 bytes length: nop nop kind length echo val.
+ *
+ * Otherwise FW would close the aggregation.
+ */
+ if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
+ hdrs_len += TPA_TSTAMP_OPT_LEN;
+
+ return len_on_bd - hdrs_len;
+}
+
+static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ u16 queue, struct sk_buff *skb,
+ struct eth_end_agg_rx_cqe *cqe,
+ u16 cqe_idx)
+{
+ struct sw_rx_page *rx_pg, old_rx_pg;
+ u32 i, frag_len, frag_size, pages;
+ int err;
+ int j;
+ struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
+ u16 len_on_bd = tpa_info->len_on_bd;
+
+ frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
+ pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
+
+ /* This is needed in order to enable forwarding support */
+ if (frag_size)
+ skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
+ tpa_info->parsing_flags, len_on_bd);
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
+ BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
+ pages, cqe_idx);
+ BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
+ bnx2x_panic();
+ return -EINVAL;
+ }
+#endif
+
+ /* Run through the SGL and compose the fragmented skb */
+ for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
+ u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
+
+ /* FW gives the indices of the SGE as if the ring is an array
+ (meaning that "next" element will consume 2 indices) */
+ frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
+ rx_pg = &fp->rx_page_ring[sge_idx];
+ old_rx_pg = *rx_pg;
+
+ /* If we fail to allocate a substitute page, we simply stop
+ where we are and drop the whole packet */
+ err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
+ if (unlikely(err)) {
+ fp->eth_q_stats.rx_skb_alloc_failed++;
+ return err;
+ }
+
+ /* Unmap the page as we r going to pass it to the stack */
+ dma_unmap_page(&bp->pdev->dev,
+ dma_unmap_addr(&old_rx_pg, mapping),
+ SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
+
+ /* Add one frag and update the appropriate fields in the skb */
+ skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
+
+ skb->data_len += frag_len;
+ skb->truesize += frag_len;
+ skb->len += frag_len;
+
+ frag_size -= frag_len;
+ }
+
+ return 0;
+}
+
+static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ u16 queue, struct eth_end_agg_rx_cqe *cqe,
+ u16 cqe_idx)
+{
+ struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
+ struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
+ u8 pad = tpa_info->placement_offset;
+ u16 len = tpa_info->len_on_bd;
+ struct sk_buff *skb = rx_buf->skb;
+ /* alloc new skb */
+ struct sk_buff *new_skb;
+ u8 old_tpa_state = tpa_info->tpa_state;
+
+ tpa_info->tpa_state = BNX2X_TPA_STOP;
+
+ /* If we there was an error during the handling of the TPA_START -
+ * drop this aggregation.
+ */
+ if (old_tpa_state == BNX2X_TPA_ERROR)
+ goto drop;
+
+ /* Try to allocate the new skb */
+ new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
+
+ /* Unmap skb in the pool anyway, as we are going to change
+ pool entry status to BNX2X_TPA_STOP even if new skb allocation
+ fails. */
+ dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
+ fp->rx_buf_size, DMA_FROM_DEVICE);
+
+ if (likely(new_skb)) {
+ prefetch(skb);
+ prefetch(((char *)(skb)) + L1_CACHE_BYTES);
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (pad + len > fp->rx_buf_size) {
+ BNX2X_ERR("skb_put is about to fail... "
+ "pad %d len %d rx_buf_size %d\n",
+ pad, len, fp->rx_buf_size);
+ bnx2x_panic();
+ return;
+ }
+#endif
+
+ skb_reserve(skb, pad);
+ skb_put(skb, len);
+
+ skb->protocol = eth_type_trans(skb, bp->dev);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
+ if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
+ __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
+ napi_gro_receive(&fp->napi, skb);
+ } else {
+ DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
+ " - dropping packet!\n");
+ dev_kfree_skb_any(skb);
+ }
+
+
+ /* put new skb in bin */
+ rx_buf->skb = new_skb;
+
+ return;
+ }
+
+drop:
+ /* drop the packet and keep the buffer in the bin */
+ DP(NETIF_MSG_RX_STATUS,
+ "Failed to allocate or map a new skb - dropping packet!\n");
+ fp->eth_q_stats.rx_skb_alloc_failed++;
+}
+
+/* Set Toeplitz hash value in the skb using the value from the
+ * CQE (calculated by HW).
+ */
+static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
+ struct sk_buff *skb)
+{
+ /* Set Toeplitz hash from CQE */
+ if ((bp->dev->features & NETIF_F_RXHASH) &&
+ (cqe->fast_path_cqe.status_flags &
+ ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
+ skb->rxhash =
+ le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
+}
+
+int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
+{
+ struct bnx2x *bp = fp->bp;
+ u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
+ u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
+ int rx_pkt = 0;
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return 0;
+#endif
+
+ /* CQ "next element" is of the size of the regular element,
+ that's why it's ok here */
+ hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
+ if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
+ hw_comp_cons++;
+
+ bd_cons = fp->rx_bd_cons;
+ bd_prod = fp->rx_bd_prod;
+ bd_prod_fw = bd_prod;
+ sw_comp_cons = fp->rx_comp_cons;
+ sw_comp_prod = fp->rx_comp_prod;
+
+ /* Memory barrier necessary as speculative reads of the rx
+ * buffer can be ahead of the index in the status block
+ */
+ rmb();
+
+ DP(NETIF_MSG_RX_STATUS,
+ "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
+ fp->index, hw_comp_cons, sw_comp_cons);
+
+ while (sw_comp_cons != hw_comp_cons) {
+ struct sw_rx_bd *rx_buf = NULL;
+ struct sk_buff *skb;
+ union eth_rx_cqe *cqe;
+ struct eth_fast_path_rx_cqe *cqe_fp;
+ u8 cqe_fp_flags;
+ enum eth_rx_cqe_type cqe_fp_type;
+ u16 len, pad;
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return 0;
+#endif
+
+ comp_ring_cons = RCQ_BD(sw_comp_cons);
+ bd_prod = RX_BD(bd_prod);
+ bd_cons = RX_BD(bd_cons);
+
+ /* Prefetch the page containing the BD descriptor
+ at producer's index. It will be needed when new skb is
+ allocated */
+ prefetch((void *)(PAGE_ALIGN((unsigned long)
+ (&fp->rx_desc_ring[bd_prod])) -
+ PAGE_SIZE + 1));
+
+ cqe = &fp->rx_comp_ring[comp_ring_cons];
+ cqe_fp = &cqe->fast_path_cqe;
+ cqe_fp_flags = cqe_fp->type_error_flags;
+ cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
+
+ DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
+ " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
+ cqe_fp_flags, cqe_fp->status_flags,
+ le32_to_cpu(cqe_fp->rss_hash_result),
+ le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len));
+
+ /* is this a slowpath msg? */
+ if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
+ bnx2x_sp_event(fp, cqe);
+ goto next_cqe;
+
+ /* this is an rx packet */
+ } else {
+ rx_buf = &fp->rx_buf_ring[bd_cons];
+ skb = rx_buf->skb;
+ prefetch(skb);
+
+ if (!CQE_TYPE_FAST(cqe_fp_type)) {
+#ifdef BNX2X_STOP_ON_ERROR
+ /* sanity check */
+ if (fp->disable_tpa &&
+ (CQE_TYPE_START(cqe_fp_type) ||
+ CQE_TYPE_STOP(cqe_fp_type)))
+ BNX2X_ERR("START/STOP packet while "
+ "disable_tpa type %x\n",
+ CQE_TYPE(cqe_fp_type));
+#endif
+
+ if (CQE_TYPE_START(cqe_fp_type)) {
+ u16 queue = cqe_fp->queue_index;
+ DP(NETIF_MSG_RX_STATUS,
+ "calling tpa_start on queue %d\n",
+ queue);
+
+ bnx2x_tpa_start(fp, queue, skb,
+ bd_cons, bd_prod,
+ cqe_fp);
+
+ /* Set Toeplitz hash for LRO skb */
+ bnx2x_set_skb_rxhash(bp, cqe, skb);
+
+ goto next_rx;
+
+ } else {
+ u16 queue =
+ cqe->end_agg_cqe.queue_index;
+ DP(NETIF_MSG_RX_STATUS,
+ "calling tpa_stop on queue %d\n",
+ queue);
+
+ bnx2x_tpa_stop(bp, fp, queue,
+ &cqe->end_agg_cqe,
+ comp_ring_cons);
+#ifdef BNX2X_STOP_ON_ERROR
+ if (bp->panic)
+ return 0;
+#endif
+
+ bnx2x_update_sge_prod(fp, cqe_fp);
+ goto next_cqe;
+ }
+ }
+ /* non TPA */
+ len = le16_to_cpu(cqe_fp->pkt_len);
+ pad = cqe_fp->placement_offset;
+ dma_sync_single_for_cpu(&bp->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ pad + RX_COPY_THRESH,
+ DMA_FROM_DEVICE);
+ prefetch(((char *)(skb)) + L1_CACHE_BYTES);
+
+ /* is this an error packet? */
+ if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
+ DP(NETIF_MSG_RX_ERR,
+ "ERROR flags %x rx packet %u\n",
+ cqe_fp_flags, sw_comp_cons);
+ fp->eth_q_stats.rx_err_discard_pkt++;
+ goto reuse_rx;
+ }
+
+ /* Since we don't have a jumbo ring
+ * copy small packets if mtu > 1500
+ */
+ if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
+ (len <= RX_COPY_THRESH)) {
+ struct sk_buff *new_skb;
+
+ new_skb = netdev_alloc_skb(bp->dev, len + pad);
+ if (new_skb == NULL) {
+ DP(NETIF_MSG_RX_ERR,
+ "ERROR packet dropped "
+ "because of alloc failure\n");
+ fp->eth_q_stats.rx_skb_alloc_failed++;
+ goto reuse_rx;
+ }
+
+ /* aligned copy */
+ skb_copy_from_linear_data_offset(skb, pad,
+ new_skb->data + pad, len);
+ skb_reserve(new_skb, pad);
+ skb_put(new_skb, len);
+
+ bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
+
+ skb = new_skb;
+
+ } else
+ if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
+ dma_unmap_single(&bp->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ fp->rx_buf_size,
+ DMA_FROM_DEVICE);
+ skb_reserve(skb, pad);
+ skb_put(skb, len);
+
+ } else {
+ DP(NETIF_MSG_RX_ERR,
+ "ERROR packet dropped because "
+ "of alloc failure\n");
+ fp->eth_q_stats.rx_skb_alloc_failed++;
+reuse_rx:
+ bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
+ goto next_rx;
+ }
+
+ skb->protocol = eth_type_trans(skb, bp->dev);
+
+ /* Set Toeplitz hash for a none-LRO skb */
+ bnx2x_set_skb_rxhash(bp, cqe, skb);
+
+ skb_checksum_none_assert(skb);
+
+ if (bp->dev->features & NETIF_F_RXCSUM) {
+
+ if (likely(BNX2X_RX_CSUM_OK(cqe)))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ fp->eth_q_stats.hw_csum_err++;
+ }
+ }
+
+ skb_record_rx_queue(skb, fp->index);
+
+ if (le16_to_cpu(cqe_fp->pars_flags.flags) &
+ PARSING_FLAGS_VLAN)
+ __vlan_hwaccel_put_tag(skb,
+ le16_to_cpu(cqe_fp->vlan_tag));
+ napi_gro_receive(&fp->napi, skb);
+
+
+next_rx:
+ rx_buf->skb = NULL;
+
+ bd_cons = NEXT_RX_IDX(bd_cons);
+ bd_prod = NEXT_RX_IDX(bd_prod);
+ bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
+ rx_pkt++;
+next_cqe:
+ sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
+ sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
+
+ if (rx_pkt == budget)
+ break;
+ } /* while */
+
+ fp->rx_bd_cons = bd_cons;
+ fp->rx_bd_prod = bd_prod_fw;
+ fp->rx_comp_cons = sw_comp_cons;
+ fp->rx_comp_prod = sw_comp_prod;
+
+ /* Update producers */
+ bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
+ fp->rx_sge_prod);
+
+ fp->rx_pkt += rx_pkt;
+ fp->rx_calls++;
+
+ return rx_pkt;
+}
+
+static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
+{
+ struct bnx2x_fastpath *fp = fp_cookie;
+ struct bnx2x *bp = fp->bp;
+ u8 cos;
+
+ DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
+ "[fp %d fw_sd %d igusb %d]\n",
+ fp->index, fp->fw_sb_id, fp->igu_sb_id);
+ bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return IRQ_HANDLED;
+#endif
+
+ /* Handle Rx and Tx according to MSI-X vector */
+ prefetch(fp->rx_cons_sb);
+
+ for_each_cos_in_tx_queue(fp, cos)
+ prefetch(fp->txdata[cos].tx_cons_sb);
+
+ prefetch(&fp->sb_running_index[SM_RX_ID]);
+ napi_schedule(&bnx2x_fp(bp, fp->index, napi));
+
+ return IRQ_HANDLED;
+}
+
+/* HW Lock for shared dual port PHYs */
+void bnx2x_acquire_phy_lock(struct bnx2x *bp)
+{
+ mutex_lock(&bp->port.phy_mutex);
+
+ if (bp->port.need_hw_lock)
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
+}
+
+void bnx2x_release_phy_lock(struct bnx2x *bp)
+{
+ if (bp->port.need_hw_lock)
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
+
+ mutex_unlock(&bp->port.phy_mutex);
+}
+
+/* calculates MF speed according to current linespeed and MF configuration */
+u16 bnx2x_get_mf_speed(struct bnx2x *bp)
+{
+ u16 line_speed = bp->link_vars.line_speed;
+ if (IS_MF(bp)) {
+ u16 maxCfg = bnx2x_extract_max_cfg(bp,
+ bp->mf_config[BP_VN(bp)]);
+
+ /* Calculate the current MAX line speed limit for the MF
+ * devices
+ */
+ if (IS_MF_SI(bp))
+ line_speed = (line_speed * maxCfg) / 100;
+ else { /* SD mode */
+ u16 vn_max_rate = maxCfg * 100;
+
+ if (vn_max_rate < line_speed)
+ line_speed = vn_max_rate;
+ }
+ }
+
+ return line_speed;
+}
+
+/**
+ * bnx2x_fill_report_data - fill link report data to report
+ *
+ * @bp: driver handle
+ * @data: link state to update
+ *
+ * It uses a none-atomic bit operations because is called under the mutex.
+ */
+static inline void bnx2x_fill_report_data(struct bnx2x *bp,
+ struct bnx2x_link_report_data *data)
+{
+ u16 line_speed = bnx2x_get_mf_speed(bp);
+
+ memset(data, 0, sizeof(*data));
+
+ /* Fill the report data: efective line speed */
+ data->line_speed = line_speed;
+
+ /* Link is down */
+ if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
+ __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+ &data->link_report_flags);
+
+ /* Full DUPLEX */
+ if (bp->link_vars.duplex == DUPLEX_FULL)
+ __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
+
+ /* Rx Flow Control is ON */
+ if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
+ __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
+
+ /* Tx Flow Control is ON */
+ if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
+}
+
+/**
+ * bnx2x_link_report - report link status to OS.
+ *
+ * @bp: driver handle
+ *
+ * Calls the __bnx2x_link_report() under the same locking scheme
+ * as a link/PHY state managing code to ensure a consistent link
+ * reporting.
+ */
+
+void bnx2x_link_report(struct bnx2x *bp)
+{
+ bnx2x_acquire_phy_lock(bp);
+ __bnx2x_link_report(bp);
+ bnx2x_release_phy_lock(bp);
+}
+
+/**
+ * __bnx2x_link_report - report link status to OS.
+ *
+ * @bp: driver handle
+ *
+ * None atomic inmlementation.
+ * Should be called under the phy_lock.
+ */
+void __bnx2x_link_report(struct bnx2x *bp)
+{
+ struct bnx2x_link_report_data cur_data;
+
+ /* reread mf_cfg */
+ if (!CHIP_IS_E1(bp))
+ bnx2x_read_mf_cfg(bp);
+
+ /* Read the current link report info */
+ bnx2x_fill_report_data(bp, &cur_data);
+
+ /* Don't report link down or exactly the same link status twice */
+ if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
+ (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+ &bp->last_reported_link.link_report_flags) &&
+ test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+ &cur_data.link_report_flags)))
+ return;
+
+ bp->link_cnt++;
+
+ /* We are going to report a new link parameters now -
+ * remember the current data for the next time.
+ */
+ memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
+
+ if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+ &cur_data.link_report_flags)) {
+ netif_carrier_off(bp->dev);
+ netdev_err(bp->dev, "NIC Link is Down\n");
+ return;
+ } else {
+ const char *duplex;
+ const char *flow;
+
+ netif_carrier_on(bp->dev);
+
+ if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
+ &cur_data.link_report_flags))
+ duplex = "full";
+ else
+ duplex = "half";
+
+ /* Handle the FC at the end so that only these flags would be
+ * possibly set. This way we may easily check if there is no FC
+ * enabled.
+ */
+ if (cur_data.link_report_flags) {
+ if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
+ &cur_data.link_report_flags)) {
+ if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
+ &cur_data.link_report_flags))
+ flow = "ON - receive & transmit";
+ else
+ flow = "ON - receive";
+ } else {
+ flow = "ON - transmit";
+ }
+ } else {
+ flow = "none";
+ }
+ netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
+ cur_data.line_speed, duplex, flow);
+ }
+}
+
+void bnx2x_init_rx_rings(struct bnx2x *bp)
+{
+ int func = BP_FUNC(bp);
- for (i = 0; i < max_agg_queues; i++) {
+ u16 ring_prod;
+ int i, j;
+
+ /* Allocate TPA resources */
+ for_each_rx_queue(bp, j) {
+ struct bnx2x_fastpath *fp = &bp->fp[j];
+
+ DP(NETIF_MSG_IFUP,
+ "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
+
+ if (!fp->disable_tpa) {
+ /* Fill the per-aggregtion pool */
- max_agg_queues);
++ for (i = 0; i < MAX_AGG_QS(bp); i++) {
+ struct bnx2x_agg_info *tpa_info =
+ &fp->tpa_info[i];
+ struct sw_rx_bd *first_buf =
+ &tpa_info->first_buf;
+
+ first_buf->skb = netdev_alloc_skb(bp->dev,
+ fp->rx_buf_size);
+ if (!first_buf->skb) {
+ BNX2X_ERR("Failed to allocate TPA "
+ "skb pool for queue[%d] - "
+ "disabling TPA on this "
+ "queue!\n", j);
+ bnx2x_free_tpa_pool(bp, fp, i);
+ fp->disable_tpa = 1;
+ break;
+ }
+ dma_unmap_addr_set(first_buf, mapping, 0);
+ tpa_info->tpa_state = BNX2X_TPA_STOP;
+ }
+
+ /* "next page" elements initialization */
+ bnx2x_set_next_page_sgl(fp);
+
+ /* set SGEs bit mask */
+ bnx2x_init_sge_ring_bit_mask(fp);
+
+ /* Allocate SGEs and initialize the ring elements */
+ for (i = 0, ring_prod = 0;
+ i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
+
+ if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
+ BNX2X_ERR("was only able to allocate "
+ "%d rx sges\n", i);
+ BNX2X_ERR("disabling TPA for "
+ "queue[%d]\n", j);
+ /* Cleanup already allocated elements */
+ bnx2x_free_rx_sge_range(bp, fp,
+ ring_prod);
+ bnx2x_free_tpa_pool(bp, fp,
- bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
- ETH_MAX_AGGREGATION_QUEUES_E1 :
- ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
++ MAX_AGG_QS(bp));
+ fp->disable_tpa = 1;
+ ring_prod = 0;
+ break;
+ }
+ ring_prod = NEXT_SGE_IDX(ring_prod);
+ }
+
+ fp->rx_sge_prod = ring_prod;
+ }
+ }
+
+ for_each_rx_queue(bp, j) {
+ struct bnx2x_fastpath *fp = &bp->fp[j];
+
+ fp->rx_bd_cons = 0;
+
+ /* Activate BD ring */
+ /* Warning!
+ * this will generate an interrupt (to the TSTORM)
+ * must only be done after chip is initialized
+ */
+ bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
+ fp->rx_sge_prod);
+
+ if (j != 0)
+ continue;
+
+ if (CHIP_IS_E1(bp)) {
+ REG_WR(bp, BAR_USTRORM_INTMEM +
+ USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
+ U64_LO(fp->rx_comp_mapping));
+ REG_WR(bp, BAR_USTRORM_INTMEM +
+ USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
+ U64_HI(fp->rx_comp_mapping));
+ }
+ }
+}
+
+static void bnx2x_free_tx_skbs(struct bnx2x *bp)
+{
+ int i;
+ u8 cos;
+
+ for_each_tx_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ for_each_cos_in_tx_queue(fp, cos) {
+ struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+
+ u16 bd_cons = txdata->tx_bd_cons;
+ u16 sw_prod = txdata->tx_pkt_prod;
+ u16 sw_cons = txdata->tx_pkt_cons;
+
+ while (sw_cons != sw_prod) {
+ bd_cons = bnx2x_free_tx_pkt(bp, txdata,
+ TX_BD(sw_cons));
+ sw_cons++;
+ }
+ }
+ }
+}
+
+static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
+{
+ struct bnx2x *bp = fp->bp;
+ int i;
+
+ /* ring wasn't allocated */
+ if (fp->rx_buf_ring == NULL)
+ return;
+
+ for (i = 0; i < NUM_RX_BD; i++) {
+ struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
+ struct sk_buff *skb = rx_buf->skb;
+
+ if (skb == NULL)
+ continue;
+ dma_unmap_single(&bp->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ fp->rx_buf_size, DMA_FROM_DEVICE);
+
+ rx_buf->skb = NULL;
+ dev_kfree_skb(skb);
+ }
+}
+
+static void bnx2x_free_rx_skbs(struct bnx2x *bp)
+{
+ int j;
+
+ for_each_rx_queue(bp, j) {
+ struct bnx2x_fastpath *fp = &bp->fp[j];
+
+ bnx2x_free_rx_bds(fp);
+
+ if (!fp->disable_tpa)
- int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
- MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
++ bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
+ }
+}
+
+void bnx2x_free_skbs(struct bnx2x *bp)
+{
+ bnx2x_free_tx_skbs(bp);
+ bnx2x_free_rx_skbs(bp);
+}
+
+void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
+{
+ /* load old values */
+ u32 mf_cfg = bp->mf_config[BP_VN(bp)];
+
+ if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
+ /* leave all but MAX value */
+ mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
+
+ /* set new MAX value */
+ mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
+ & FUNC_MF_CFG_MAX_BW_MASK;
+
+ bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
+ }
+}
+
+/**
+ * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
+ *
+ * @bp: driver handle
+ * @nvecs: number of vectors to be released
+ */
+static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
+{
+ int i, offset = 0;
+
+ if (nvecs == offset)
+ return;
+ free_irq(bp->msix_table[offset].vector, bp->dev);
+ DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
+ bp->msix_table[offset].vector);
+ offset++;
+#ifdef BCM_CNIC
+ if (nvecs == offset)
+ return;
+ offset++;
+#endif
+
+ for_each_eth_queue(bp, i) {
+ if (nvecs == offset)
+ return;
+ DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d "
+ "irq\n", i, bp->msix_table[offset].vector);
+
+ free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
+ }
+}
+
+void bnx2x_free_irq(struct bnx2x *bp)
+{
+ if (bp->flags & USING_MSIX_FLAG)
+ bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
+ CNIC_PRESENT + 1);
+ else if (bp->flags & USING_MSI_FLAG)
+ free_irq(bp->pdev->irq, bp->dev);
+ else
+ free_irq(bp->pdev->irq, bp->dev);
+}
+
+int bnx2x_enable_msix(struct bnx2x *bp)
+{
+ int msix_vec = 0, i, rc, req_cnt;
+
+ bp->msix_table[msix_vec].entry = msix_vec;
+ DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
+ bp->msix_table[0].entry);
+ msix_vec++;
+
+#ifdef BCM_CNIC
+ bp->msix_table[msix_vec].entry = msix_vec;
+ DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
+ bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
+ msix_vec++;
+#endif
+ /* We need separate vectors for ETH queues only (not FCoE) */
+ for_each_eth_queue(bp, i) {
+ bp->msix_table[msix_vec].entry = msix_vec;
+ DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
+ "(fastpath #%u)\n", msix_vec, msix_vec, i);
+ msix_vec++;
+ }
+
+ req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
+
+ rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
+
+ /*
+ * reconfigure number of tx/rx queues according to available
+ * MSI-X vectors
+ */
+ if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
+ /* how less vectors we will have? */
+ int diff = req_cnt - rc;
+
+ DP(NETIF_MSG_IFUP,
+ "Trying to use less MSI-X vectors: %d\n", rc);
+
+ rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
+
+ if (rc) {
+ DP(NETIF_MSG_IFUP,
+ "MSI-X is not attainable rc %d\n", rc);
+ return rc;
+ }
+ /*
+ * decrease number of queues by number of unallocated entries
+ */
+ bp->num_queues -= diff;
+
+ DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
+ bp->num_queues);
+ } else if (rc) {
+ /* fall to INTx if not enough memory */
+ if (rc == -ENOMEM)
+ bp->flags |= DISABLE_MSI_FLAG;
+ DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
+ return rc;
+ }
+
+ bp->flags |= USING_MSIX_FLAG;
+
+ return 0;
+}
+
+static int bnx2x_req_msix_irqs(struct bnx2x *bp)
+{
+ int i, rc, offset = 0;
+
+ rc = request_irq(bp->msix_table[offset++].vector,
+ bnx2x_msix_sp_int, 0,
+ bp->dev->name, bp->dev);
+ if (rc) {
+ BNX2X_ERR("request sp irq failed\n");
+ return -EBUSY;
+ }
+
+#ifdef BCM_CNIC
+ offset++;
+#endif
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
+ bp->dev->name, i);
+
+ rc = request_irq(bp->msix_table[offset].vector,
+ bnx2x_msix_fp_int, 0, fp->name, fp);
+ if (rc) {
+ BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
+ bp->msix_table[offset].vector, rc);
+ bnx2x_free_msix_irqs(bp, offset);
+ return -EBUSY;
+ }
+
+ offset++;
+ }
+
+ i = BNX2X_NUM_ETH_QUEUES(bp);
+ offset = 1 + CNIC_PRESENT;
+ netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
+ " ... fp[%d] %d\n",
+ bp->msix_table[0].vector,
+ 0, bp->msix_table[offset].vector,
+ i - 1, bp->msix_table[offset + i - 1].vector);
+
+ return 0;
+}
+
+int bnx2x_enable_msi(struct bnx2x *bp)
+{
+ int rc;
+
+ rc = pci_enable_msi(bp->pdev);
+ if (rc) {
+ DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
+ return -1;
+ }
+ bp->flags |= USING_MSI_FLAG;
+
+ return 0;
+}
+
+static int bnx2x_req_irq(struct bnx2x *bp)
+{
+ unsigned long flags;
+ int rc;
+
+ if (bp->flags & USING_MSI_FLAG)
+ flags = 0;
+ else
+ flags = IRQF_SHARED;
+
+ rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
+ bp->dev->name, bp->dev);
+ return rc;
+}
+
+static inline int bnx2x_setup_irqs(struct bnx2x *bp)
+{
+ int rc = 0;
+ if (bp->flags & USING_MSIX_FLAG) {
+ rc = bnx2x_req_msix_irqs(bp);
+ if (rc)
+ return rc;
+ } else {
+ bnx2x_ack_int(bp);
+ rc = bnx2x_req_irq(bp);
+ if (rc) {
+ BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
+ return rc;
+ }
+ if (bp->flags & USING_MSI_FLAG) {
+ bp->dev->irq = bp->pdev->irq;
+ netdev_info(bp->dev, "using MSI IRQ %d\n",
+ bp->pdev->irq);
+ }
+ }
+
+ return 0;
+}
+
+static inline void bnx2x_napi_enable(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_rx_queue(bp, i)
+ napi_enable(&bnx2x_fp(bp, i, napi));
+}
+
+static inline void bnx2x_napi_disable(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_rx_queue(bp, i)
+ napi_disable(&bnx2x_fp(bp, i, napi));
+}
+
+void bnx2x_netif_start(struct bnx2x *bp)
+{
+ if (netif_running(bp->dev)) {
+ bnx2x_napi_enable(bp);
+ bnx2x_int_enable(bp);
+ if (bp->state == BNX2X_STATE_OPEN)
+ netif_tx_wake_all_queues(bp->dev);
+ }
+}
+
+void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
+{
+ bnx2x_int_disable_sync(bp, disable_hw);
+ bnx2x_napi_disable(bp);
+}
+
+u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+#ifdef BCM_CNIC
+ if (!NO_FCOE(bp)) {
+ struct ethhdr *hdr = (struct ethhdr *)skb->data;
+ u16 ether_type = ntohs(hdr->h_proto);
+
+ /* Skip VLAN tag if present */
+ if (ether_type == ETH_P_8021Q) {
+ struct vlan_ethhdr *vhdr =
+ (struct vlan_ethhdr *)skb->data;
+
+ ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
+ }
+
+ /* If ethertype is FCoE or FIP - use FCoE ring */
+ if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
+ return bnx2x_fcoe_tx(bp, txq_index);
+ }
+#endif
+ /* select a non-FCoE queue */
+ return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
+}
+
+void bnx2x_set_num_queues(struct bnx2x *bp)
+{
+ switch (bp->multi_mode) {
+ case ETH_RSS_MODE_DISABLED:
+ bp->num_queues = 1;
+ break;
+ case ETH_RSS_MODE_REGULAR:
+ bp->num_queues = bnx2x_calc_num_queues(bp);
+ break;
+
+ default:
+ bp->num_queues = 1;
+ break;
+ }
+
+ /* Add special queues */
+ bp->num_queues += NON_ETH_CONTEXT_USE;
+}
+
+/**
+ * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
+ *
+ * @bp: Driver handle
+ *
+ * We currently support for at most 16 Tx queues for each CoS thus we will
+ * allocate a multiple of 16 for ETH L2 rings according to the value of the
+ * bp->max_cos.
+ *
+ * If there is an FCoE L2 queue the appropriate Tx queue will have the next
+ * index after all ETH L2 indices.
+ *
+ * If the actual number of Tx queues (for each CoS) is less than 16 then there
+ * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
+ * 16..31,...) with indicies that are not coupled with any real Tx queue.
+ *
+ * The proper configuration of skb->queue_mapping is handled by
+ * bnx2x_select_queue() and __skb_tx_hash().
+ *
+ * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
+ * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
+ */
+static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
+{
+ int rc, tx, rx;
+
+ tx = MAX_TXQS_PER_COS * bp->max_cos;
+ rx = BNX2X_NUM_ETH_QUEUES(bp);
+
+/* account for fcoe queue */
+#ifdef BCM_CNIC
+ if (!NO_FCOE(bp)) {
+ rx += FCOE_PRESENT;
+ tx += FCOE_PRESENT;
+ }
+#endif
+
+ rc = netif_set_real_num_tx_queues(bp->dev, tx);
+ if (rc) {
+ BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
+ return rc;
+ }
+ rc = netif_set_real_num_rx_queues(bp->dev, rx);
+ if (rc) {
+ BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
+ return rc;
+ }
+
+ DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n",
+ tx, rx);
+
+ return rc;
+}
+
+static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ /* Always use a mini-jumbo MTU for the FCoE L2 ring */
+ if (IS_FCOE_IDX(i))
+ /*
+ * Although there are no IP frames expected to arrive to
+ * this ring we still want to add an
+ * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
+ * overrun attack.
+ */
+ fp->rx_buf_size =
+ BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
+ BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
+ else
+ fp->rx_buf_size =
+ bp->dev->mtu + ETH_OVREHEAD +
+ BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
+ }
+}
+
+static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
+{
+ int i;
+ u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
+ u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
+
+ /*
+ * Prepare the inital contents fo the indirection table if RSS is
+ * enabled
+ */
+ if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
+ for (i = 0; i < sizeof(ind_table); i++)
+ ind_table[i] =
+ bp->fp->cl_id + (i % num_eth_queues);
+ }
+
+ /*
+ * For 57710 and 57711 SEARCHER configuration (rss_keys) is
+ * per-port, so if explicit configuration is needed , do it only
+ * for a PMF.
+ *
+ * For 57712 and newer on the other hand it's a per-function
+ * configuration.
+ */
+ return bnx2x_config_rss_pf(bp, ind_table,
+ bp->port.pmf || !CHIP_IS_E1x(bp));
+}
+
+int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
+{
+ struct bnx2x_config_rss_params params = {0};
+ int i;
+
+ /* Although RSS is meaningless when there is a single HW queue we
+ * still need it enabled in order to have HW Rx hash generated.
+ *
+ * if (!is_eth_multi(bp))
+ * bp->multi_mode = ETH_RSS_MODE_DISABLED;
+ */
+
+ params.rss_obj = &bp->rss_conf_obj;
+
+ __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
+
+ /* RSS mode */
+ switch (bp->multi_mode) {
+ case ETH_RSS_MODE_DISABLED:
+ __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags);
+ break;
+ case ETH_RSS_MODE_REGULAR:
+ __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
+ break;
+ case ETH_RSS_MODE_VLAN_PRI:
+ __set_bit(BNX2X_RSS_MODE_VLAN_PRI, ¶ms.rss_flags);
+ break;
+ case ETH_RSS_MODE_E1HOV_PRI:
+ __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, ¶ms.rss_flags);
+ break;
+ case ETH_RSS_MODE_IP_DSCP:
+ __set_bit(BNX2X_RSS_MODE_IP_DSCP, ¶ms.rss_flags);
+ break;
+ default:
+ BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
+ return -EINVAL;
+ }
+
+ /* If RSS is enabled */
+ if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
+ /* RSS configuration */
+ __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
+ __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
+ __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
+ __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
+
+ /* Hash bits */
+ params.rss_result_mask = MULTI_MASK;
+
+ memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
+
+ if (config_hash) {
+ /* RSS keys */
+ for (i = 0; i < sizeof(params.rss_key) / 4; i++)
+ params.rss_key[i] = random32();
+
+ __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
+ }
+ }
+
+ return bnx2x_config_rss(bp, ¶ms);
+}
+
+static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
+{
+ struct bnx2x_func_state_params func_params = {0};
+
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_HW_INIT;
+
+ func_params.params.hw_init.load_phase = load_code;
+
+ return bnx2x_func_state_change(bp, &func_params);
+}
+
+/*
+ * Cleans the object that have internal lists without sending
+ * ramrods. Should be run when interrutps are disabled.
+ */
+static void bnx2x_squeeze_objects(struct bnx2x *bp)
+{
+ int rc;
+ unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
+ struct bnx2x_mcast_ramrod_params rparam = {0};
+ struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
+
+ /***************** Cleanup MACs' object first *************************/
+
+ /* Wait for completion of requested */
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ /* Perform a dry cleanup */
+ __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
+
+ /* Clean ETH primary MAC */
+ __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
+ rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
+ &ramrod_flags);
+ if (rc != 0)
+ BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
+
+ /* Cleanup UC list */
+ vlan_mac_flags = 0;
+ __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
+ rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
+ &ramrod_flags);
+ if (rc != 0)
+ BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
+
+ /***************** Now clean mcast object *****************************/
+ rparam.mcast_obj = &bp->mcast_obj;
+ __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
+
+ /* Add a DEL command... */
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
+ if (rc < 0)
+ BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
+ "object: %d\n", rc);
+
+ /* ...and wait until all pending commands are cleared */
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
+ while (rc != 0) {
+ if (rc < 0) {
+ BNX2X_ERR("Failed to clean multi-cast object: %d\n",
+ rc);
+ return;
+ }
+
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
+ }
+}
+
+#ifndef BNX2X_STOP_ON_ERROR
+#define LOAD_ERROR_EXIT(bp, label) \
+ do { \
+ (bp)->state = BNX2X_STATE_ERROR; \
+ goto label; \
+ } while (0)
+#else
+#define LOAD_ERROR_EXIT(bp, label) \
+ do { \
+ (bp)->state = BNX2X_STATE_ERROR; \
+ (bp)->panic = 1; \
+ return -EBUSY; \
+ } while (0)
+#endif
+
+/* must be called with rtnl_lock */
+int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
+{
+ int port = BP_PORT(bp);
+ u32 load_code;
+ int i, rc;
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return -EPERM;
+#endif
+
+ bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
+
+ /* Set the initial link reported state to link down */
+ bnx2x_acquire_phy_lock(bp);
+ memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
+ __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+ &bp->last_reported_link.link_report_flags);
+ bnx2x_release_phy_lock(bp);
+
+ /* must be called before memory allocation and HW init */
+ bnx2x_ilt_set_info(bp);
+
+ /*
+ * Zero fastpath structures preserving invariants like napi, which are
+ * allocated only once, fp index, max_cos, bp pointer.
+ * Also set fp->disable_tpa.
+ */
+ for_each_queue(bp, i)
+ bnx2x_bz_fp(bp, i);
+
+
+ /* Set the receive queues buffer size */
+ bnx2x_set_rx_buf_size(bp);
+
+ if (bnx2x_alloc_mem(bp))
+ return -ENOMEM;
+
+ /* As long as bnx2x_alloc_mem() may possibly update
+ * bp->num_queues, bnx2x_set_real_num_queues() should always
+ * come after it.
+ */
+ rc = bnx2x_set_real_num_queues(bp);
+ if (rc) {
+ BNX2X_ERR("Unable to set real_num_queues\n");
+ LOAD_ERROR_EXIT(bp, load_error0);
+ }
+
+ /* configure multi cos mappings in kernel.
+ * this configuration may be overriden by a multi class queue discipline
+ * or by a dcbx negotiation result.
+ */
+ bnx2x_setup_tc(bp->dev, bp->max_cos);
+
+ bnx2x_napi_enable(bp);
+
+ /* Send LOAD_REQUEST command to MCP
+ * Returns the type of LOAD command:
+ * if it is the first port to be initialized
+ * common blocks should be initialized, otherwise - not
+ */
+ if (!BP_NOMCP(bp)) {
+ load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
+ if (!load_code) {
+ BNX2X_ERR("MCP response failure, aborting\n");
+ rc = -EBUSY;
+ LOAD_ERROR_EXIT(bp, load_error1);
+ }
+ if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
+ rc = -EBUSY; /* other port in diagnostic mode */
+ LOAD_ERROR_EXIT(bp, load_error1);
+ }
+
+ } else {
+ int path = BP_PATH(bp);
+
+ DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
+ path, load_count[path][0], load_count[path][1],
+ load_count[path][2]);
+ load_count[path][0]++;
+ load_count[path][1 + port]++;
+ DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
+ path, load_count[path][0], load_count[path][1],
+ load_count[path][2]);
+ if (load_count[path][0] == 1)
+ load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
+ else if (load_count[path][1 + port] == 1)
+ load_code = FW_MSG_CODE_DRV_LOAD_PORT;
+ else
+ load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
+ }
+
+ if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
+ (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
+ (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
+ bp->port.pmf = 1;
+ /*
+ * We need the barrier to ensure the ordering between the
+ * writing to bp->port.pmf here and reading it from the
+ * bnx2x_periodic_task().
+ */
+ smp_mb();
+ queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
+ } else
+ bp->port.pmf = 0;
+
+ DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
+
+ /* Init Function state controlling object */
+ bnx2x__init_func_obj(bp);
+
+ /* Initialize HW */
+ rc = bnx2x_init_hw(bp, load_code);
+ if (rc) {
+ BNX2X_ERR("HW init failed, aborting\n");
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+ LOAD_ERROR_EXIT(bp, load_error2);
+ }
+
+ /* Connect to IRQs */
+ rc = bnx2x_setup_irqs(bp);
+ if (rc) {
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+ LOAD_ERROR_EXIT(bp, load_error2);
+ }
+
+ /* Setup NIC internals and enable interrupts */
+ bnx2x_nic_init(bp, load_code);
+
+ /* Init per-function objects */
+ bnx2x_init_bp_objs(bp);
+
+ if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
+ (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
+ (bp->common.shmem2_base)) {
+ if (SHMEM2_HAS(bp, dcc_support))
+ SHMEM2_WR(bp, dcc_support,
+ (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
+ SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
+ }
+
+ bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
+ rc = bnx2x_func_start(bp);
+ if (rc) {
+ BNX2X_ERR("Function start failed!\n");
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
+
+ /* Send LOAD_DONE command to MCP */
+ if (!BP_NOMCP(bp)) {
+ load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+ if (!load_code) {
+ BNX2X_ERR("MCP response failure, aborting\n");
+ rc = -EBUSY;
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
+ }
+
+ rc = bnx2x_setup_leading(bp);
+ if (rc) {
+ BNX2X_ERR("Setup leading failed!\n");
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
+
+#ifdef BCM_CNIC
+ /* Enable Timer scan */
+ REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
+#endif
+
+ for_each_nondefault_queue(bp, i) {
+ rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
+ if (rc)
+ LOAD_ERROR_EXIT(bp, load_error4);
+ }
+
+ rc = bnx2x_init_rss_pf(bp);
+ if (rc)
+ LOAD_ERROR_EXIT(bp, load_error4);
+
+ /* Now when Clients are configured we are ready to work */
+ bp->state = BNX2X_STATE_OPEN;
+
+ /* Configure a ucast MAC */
+ rc = bnx2x_set_eth_mac(bp, true);
+ if (rc)
+ LOAD_ERROR_EXIT(bp, load_error4);
+
+ if (bp->pending_max) {
+ bnx2x_update_max_mf_config(bp, bp->pending_max);
+ bp->pending_max = 0;
+ }
+
+ if (bp->port.pmf)
+ bnx2x_initial_phy_init(bp, load_mode);
+
+ /* Start fast path */
+
+ /* Initialize Rx filter. */
+ netif_addr_lock_bh(bp->dev);
+ bnx2x_set_rx_mode(bp->dev);
+ netif_addr_unlock_bh(bp->dev);
+
+ /* Start the Tx */
+ switch (load_mode) {
+ case LOAD_NORMAL:
+ /* Tx queue should be only reenabled */
+ netif_tx_wake_all_queues(bp->dev);
+ break;
+
+ case LOAD_OPEN:
+ netif_tx_start_all_queues(bp->dev);
+ smp_mb__after_clear_bit();
+ break;
+
+ case LOAD_DIAG:
+ bp->state = BNX2X_STATE_DIAG;
+ break;
+
+ default:
+ break;
+ }
+
+ if (!bp->port.pmf)
+ bnx2x__link_status_update(bp);
+
+ /* start the timer */
+ mod_timer(&bp->timer, jiffies + bp->current_interval);
+
+#ifdef BCM_CNIC
+ bnx2x_setup_cnic_irq_info(bp);
+ if (bp->state == BNX2X_STATE_OPEN)
+ bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
+#endif
+ bnx2x_inc_load_cnt(bp);
+
+ /* Wait for all pending SP commands to complete */
+ if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
+ BNX2X_ERR("Timeout waiting for SP elements to complete\n");
+ bnx2x_nic_unload(bp, UNLOAD_CLOSE);
+ return -EBUSY;
+ }
+
+ bnx2x_dcbx_init(bp);
+ return 0;
+
+#ifndef BNX2X_STOP_ON_ERROR
+load_error4:
+#ifdef BCM_CNIC
+ /* Disable Timer scan */
+ REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
+#endif
+load_error3:
+ bnx2x_int_disable_sync(bp, 1);
+
+ /* Clean queueable objects */
+ bnx2x_squeeze_objects(bp);
+
+ /* Free SKBs, SGEs, TPA pool and driver internals */
+ bnx2x_free_skbs(bp);
+ for_each_rx_queue(bp, i)
+ bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
+
+ /* Release IRQs */
+ bnx2x_free_irq(bp);
+load_error2:
+ if (!BP_NOMCP(bp)) {
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+ }
+
+ bp->port.pmf = 0;
+load_error1:
+ bnx2x_napi_disable(bp);
+load_error0:
+ bnx2x_free_mem(bp);
+
+ return rc;
+#endif /* ! BNX2X_STOP_ON_ERROR */
+}
+
+/* must be called with rtnl_lock */
+int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
+{
+ int i;
+ bool global = false;
+
+ if ((bp->state == BNX2X_STATE_CLOSED) ||
+ (bp->state == BNX2X_STATE_ERROR)) {
+ /* We can get here if the driver has been unloaded
+ * during parity error recovery and is either waiting for a
+ * leader to complete or for other functions to unload and
+ * then ifdown has been issued. In this case we want to
+ * unload and let other functions to complete a recovery
+ * process.
+ */
+ bp->recovery_state = BNX2X_RECOVERY_DONE;
+ bp->is_leader = 0;
+ bnx2x_release_leader_lock(bp);
+ smp_mb();
+
+ DP(NETIF_MSG_HW, "Releasing a leadership...\n");
+
+ return -EINVAL;
+ }
+
+ /*
+ * It's important to set the bp->state to the value different from
+ * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
+ * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
+ */
+ bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
+ smp_mb();
+
+ /* Stop Tx */
+ bnx2x_tx_disable(bp);
+
+#ifdef BCM_CNIC
+ bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
+#endif
+
+ bp->rx_mode = BNX2X_RX_MODE_NONE;
+
+ del_timer_sync(&bp->timer);
+
+ /* Set ALWAYS_ALIVE bit in shmem */
+ bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
+
+ bnx2x_drv_pulse(bp);
+
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+
+ /* Cleanup the chip if needed */
+ if (unload_mode != UNLOAD_RECOVERY)
+ bnx2x_chip_cleanup(bp, unload_mode);
+ else {
+ /* Send the UNLOAD_REQUEST to the MCP */
+ bnx2x_send_unload_req(bp, unload_mode);
+
+ /*
+ * Prevent transactions to host from the functions on the
+ * engine that doesn't reset global blocks in case of global
+ * attention once gloabl blocks are reset and gates are opened
+ * (the engine which leader will perform the recovery
+ * last).
+ */
+ if (!CHIP_IS_E1x(bp))
+ bnx2x_pf_disable(bp);
+
+ /* Disable HW interrupts, NAPI */
+ bnx2x_netif_stop(bp, 1);
+
+ /* Release IRQs */
+ bnx2x_free_irq(bp);
+
+ /* Report UNLOAD_DONE to MCP */
+ bnx2x_send_unload_done(bp);
+ }
+
+ /*
+ * At this stage no more interrupts will arrive so we may safly clean
+ * the queueable objects here in case they failed to get cleaned so far.
+ */
+ bnx2x_squeeze_objects(bp);
+
+ /* There should be no more pending SP commands at this stage */
+ bp->sp_state = 0;
+
+ bp->port.pmf = 0;
+
+ /* Free SKBs, SGEs, TPA pool and driver internals */
+ bnx2x_free_skbs(bp);
+ for_each_rx_queue(bp, i)
+ bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
+
+ bnx2x_free_mem(bp);
+
+ bp->state = BNX2X_STATE_CLOSED;
+
+ /* Check if there are pending parity attentions. If there are - set
+ * RECOVERY_IN_PROGRESS.
+ */
+ if (bnx2x_chk_parity_attn(bp, &global, false)) {
+ bnx2x_set_reset_in_progress(bp);
+
+ /* Set RESET_IS_GLOBAL if needed */
+ if (global)
+ bnx2x_set_reset_global(bp);
+ }
+
+
+ /* The last driver must disable a "close the gate" if there is no
+ * parity attention or "process kill" pending.
+ */
+ if (!bnx2x_dec_load_cnt(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
+ bnx2x_disable_close_the_gate(bp);
+
+ return 0;
+}
+
+int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
+{
+ u16 pmcsr;
+
+ /* If there is no power capability, silently succeed */
+ if (!bp->pm_cap) {
+ DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
+ return 0;
+ }
+
+ pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
+
+ switch (state) {
+ case PCI_D0:
+ pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
+ ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
+ PCI_PM_CTRL_PME_STATUS));
+
+ if (pmcsr & PCI_PM_CTRL_STATE_MASK)
+ /* delay required during transition out of D3hot */
+ msleep(20);
+ break;
+
+ case PCI_D3hot:
+ /* If there are other clients above don't
+ shut down the power */
+ if (atomic_read(&bp->pdev->enable_cnt) != 1)
+ return 0;
+ /* Don't shut down the power for emulation and FPGA */
+ if (CHIP_REV_IS_SLOW(bp))
+ return 0;
+
+ pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+ pmcsr |= 3;
+
+ if (bp->wol)
+ pmcsr |= PCI_PM_CTRL_PME_ENABLE;
+
+ pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
+ pmcsr);
+
+ /* No more memory access after this point until
+ * device is brought back to D0.
+ */
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * net_device service functions
+ */
+int bnx2x_poll(struct napi_struct *napi, int budget)
+{
+ int work_done = 0;
+ u8 cos;
+ struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
+ napi);
+ struct bnx2x *bp = fp->bp;
+
+ while (1) {
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic)) {
+ napi_complete(napi);
+ return 0;
+ }
+#endif
+
+ for_each_cos_in_tx_queue(fp, cos)
+ if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
+ bnx2x_tx_int(bp, &fp->txdata[cos]);
+
+
+ if (bnx2x_has_rx_work(fp)) {
+ work_done += bnx2x_rx_int(fp, budget - work_done);
+
+ /* must not complete if we consumed full budget */
+ if (work_done >= budget)
+ break;
+ }
+
+ /* Fall out from the NAPI loop if needed */
+ if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+#ifdef BCM_CNIC
+ /* No need to update SB for FCoE L2 ring as long as
+ * it's connected to the default SB and the SB
+ * has been updated when NAPI was scheduled.
+ */
+ if (IS_FCOE_FP(fp)) {
+ napi_complete(napi);
+ break;
+ }
+#endif
+
+ bnx2x_update_fpsb_idx(fp);
+ /* bnx2x_has_rx_work() reads the status block,
+ * thus we need to ensure that status block indices
+ * have been actually read (bnx2x_update_fpsb_idx)
+ * prior to this check (bnx2x_has_rx_work) so that
+ * we won't write the "newer" value of the status block
+ * to IGU (if there was a DMA right after
+ * bnx2x_has_rx_work and if there is no rmb, the memory
+ * reading (bnx2x_update_fpsb_idx) may be postponed
+ * to right before bnx2x_ack_sb). In this case there
+ * will never be another interrupt until there is
+ * another update of the status block, while there
+ * is still unhandled work.
+ */
+ rmb();
+
+ if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+ napi_complete(napi);
+ /* Re-enable interrupts */
+ DP(NETIF_MSG_HW,
+ "Update index to %d\n", fp->fp_hc_idx);
+ bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
+ le16_to_cpu(fp->fp_hc_idx),
+ IGU_INT_ENABLE, 1);
+ break;
+ }
+ }
+ }
+
+ return work_done;
+}
+
+/* we split the first BD into headers and data BDs
+ * to ease the pain of our fellow microcode engineers
+ * we use one mapping for both BDs
+ * So far this has only been observed to happen
+ * in Other Operating Systems(TM)
+ */
+static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
+ struct bnx2x_fp_txdata *txdata,
+ struct sw_tx_bd *tx_buf,
+ struct eth_tx_start_bd **tx_bd, u16 hlen,
+ u16 bd_prod, int nbd)
+{
+ struct eth_tx_start_bd *h_tx_bd = *tx_bd;
+ struct eth_tx_bd *d_tx_bd;
+ dma_addr_t mapping;
+ int old_len = le16_to_cpu(h_tx_bd->nbytes);
+
+ /* first fix first BD */
+ h_tx_bd->nbd = cpu_to_le16(nbd);
+ h_tx_bd->nbytes = cpu_to_le16(hlen);
+
+ DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
+ "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
+ h_tx_bd->addr_lo, h_tx_bd->nbd);
+
+ /* now get a new data BD
+ * (after the pbd) and fill it */
+ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+ d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
+
+ mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
+ le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
+
+ d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+ d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+ d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
+
+ /* this marks the BD as one that has no individual mapping */
+ tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
+
+ DP(NETIF_MSG_TX_QUEUED,
+ "TSO split data size is %d (%x:%x)\n",
+ d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
+
+ /* update tx_bd */
+ *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
+
+ return bd_prod;
+}
+
+static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
+{
+ if (fix > 0)
+ csum = (u16) ~csum_fold(csum_sub(csum,
+ csum_partial(t_header - fix, fix, 0)));
+
+ else if (fix < 0)
+ csum = (u16) ~csum_fold(csum_add(csum,
+ csum_partial(t_header, -fix, 0)));
+
+ return swab16(csum);
+}
+
+static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
+{
+ u32 rc;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ rc = XMIT_PLAIN;
+
+ else {
+ if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
+ rc = XMIT_CSUM_V6;
+ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ rc |= XMIT_CSUM_TCP;
+
+ } else {
+ rc = XMIT_CSUM_V4;
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ rc |= XMIT_CSUM_TCP;
+ }
+ }
+
+ if (skb_is_gso_v6(skb))
+ rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
+ else if (skb_is_gso(skb))
+ rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
+
+ return rc;
+}
+
+#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
+/* check if packet requires linearization (packet is too fragmented)
+ no need to check fragmentation if page size > 8K (there will be no
+ violation to FW restrictions) */
+static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
+ u32 xmit_type)
+{
+ int to_copy = 0;
+ int hlen = 0;
+ int first_bd_sz = 0;
+
+ /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
+ if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
+
+ if (xmit_type & XMIT_GSO) {
+ unsigned short lso_mss = skb_shinfo(skb)->gso_size;
+ /* Check if LSO packet needs to be copied:
+ 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
+ int wnd_size = MAX_FETCH_BD - 3;
+ /* Number of windows to check */
+ int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
+ int wnd_idx = 0;
+ int frag_idx = 0;
+ u32 wnd_sum = 0;
+
+ /* Headers length */
+ hlen = (int)(skb_transport_header(skb) - skb->data) +
+ tcp_hdrlen(skb);
+
+ /* Amount of data (w/o headers) on linear part of SKB*/
+ first_bd_sz = skb_headlen(skb) - hlen;
+
+ wnd_sum = first_bd_sz;
+
+ /* Calculate the first sum - it's special */
+ for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
+ wnd_sum +=
+ skb_shinfo(skb)->frags[frag_idx].size;
+
+ /* If there was data on linear skb data - check it */
+ if (first_bd_sz > 0) {
+ if (unlikely(wnd_sum < lso_mss)) {
+ to_copy = 1;
+ goto exit_lbl;
+ }
+
+ wnd_sum -= first_bd_sz;
+ }
+
+ /* Others are easier: run through the frag list and
+ check all windows */
+ for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
+ wnd_sum +=
+ skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
+
+ if (unlikely(wnd_sum < lso_mss)) {
+ to_copy = 1;
+ break;
+ }
+ wnd_sum -=
+ skb_shinfo(skb)->frags[wnd_idx].size;
+ }
+ } else {
+ /* in non-LSO too fragmented packet should always
+ be linearized */
+ to_copy = 1;
+ }
+ }
+
+exit_lbl:
+ if (unlikely(to_copy))
+ DP(NETIF_MSG_TX_QUEUED,
+ "Linearization IS REQUIRED for %s packet. "
+ "num_frags %d hlen %d first_bd_sz %d\n",
+ (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
+ skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
+
+ return to_copy;
+}
+#endif
+
+static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
+ u32 xmit_type)
+{
+ *parsing_data |= (skb_shinfo(skb)->gso_size <<
+ ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
+ ETH_TX_PARSE_BD_E2_LSO_MSS;
+ if ((xmit_type & XMIT_GSO_V6) &&
+ (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
+ *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
+}
+
+/**
+ * bnx2x_set_pbd_gso - update PBD in GSO case.
+ *
+ * @skb: packet skb
+ * @pbd: parse BD
+ * @xmit_type: xmit flags
+ */
+static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
+ struct eth_tx_parse_bd_e1x *pbd,
+ u32 xmit_type)
+{
+ pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+ pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
+ pbd->tcp_flags = pbd_tcp_flags(skb);
+
+ if (xmit_type & XMIT_GSO_V4) {
+ pbd->ip_id = swab16(ip_hdr(skb)->id);
+ pbd->tcp_pseudo_csum =
+ swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0));
+
+ } else
+ pbd->tcp_pseudo_csum =
+ swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0));
+
+ pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
+}
+
+/**
+ * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
+ *
+ * @bp: driver handle
+ * @skb: packet skb
+ * @parsing_data: data to be updated
+ * @xmit_type: xmit flags
+ *
+ * 57712 related
+ */
+static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
+ u32 *parsing_data, u32 xmit_type)
+{
+ *parsing_data |=
+ ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
+ ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
+ ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
+
+ if (xmit_type & XMIT_CSUM_TCP) {
+ *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
+ ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
+ ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
+
+ return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
+ } else
+ /* We support checksum offload for TCP and UDP only.
+ * No need to pass the UDP header length - it's a constant.
+ */
+ return skb_transport_header(skb) +
+ sizeof(struct udphdr) - skb->data;
+}
+
+static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
+ struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
+{
+ tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
+
+ if (xmit_type & XMIT_CSUM_V4)
+ tx_start_bd->bd_flags.as_bitfield |=
+ ETH_TX_BD_FLAGS_IP_CSUM;
+ else
+ tx_start_bd->bd_flags.as_bitfield |=
+ ETH_TX_BD_FLAGS_IPV6;
+
+ if (!(xmit_type & XMIT_CSUM_TCP))
+ tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
+}
+
+/**
+ * bnx2x_set_pbd_csum - update PBD with checksum and return header length
+ *
+ * @bp: driver handle
+ * @skb: packet skb
+ * @pbd: parse BD to be updated
+ * @xmit_type: xmit flags
+ */
+static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
+ struct eth_tx_parse_bd_e1x *pbd,
+ u32 xmit_type)
+{
+ u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
+
+ /* for now NS flag is not used in Linux */
+ pbd->global_data =
+ (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
+ ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
+
+ pbd->ip_hlen_w = (skb_transport_header(skb) -
+ skb_network_header(skb)) >> 1;
+
+ hlen += pbd->ip_hlen_w;
+
+ /* We support checksum offload for TCP and UDP only */
+ if (xmit_type & XMIT_CSUM_TCP)
+ hlen += tcp_hdrlen(skb) / 2;
+ else
+ hlen += sizeof(struct udphdr) / 2;
+
+ pbd->total_hlen_w = cpu_to_le16(hlen);
+ hlen = hlen*2;
+
+ if (xmit_type & XMIT_CSUM_TCP) {
+ pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
+
+ } else {
+ s8 fix = SKB_CS_OFF(skb); /* signed! */
+
+ DP(NETIF_MSG_TX_QUEUED,
+ "hlen %d fix %d csum before fix %x\n",
+ le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
+
+ /* HW bug: fixup the CSUM */
+ pbd->tcp_pseudo_csum =
+ bnx2x_csum_fix(skb_transport_header(skb),
+ SKB_CS(skb), fix);
+
+ DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
+ pbd->tcp_pseudo_csum);
+ }
+
+ return hlen;
+}
+
+/* called with netif_tx_lock
+ * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
+ * netif_wake_queue()
+ */
+netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ struct bnx2x_fastpath *fp;
+ struct netdev_queue *txq;
+ struct bnx2x_fp_txdata *txdata;
+ struct sw_tx_bd *tx_buf;
+ struct eth_tx_start_bd *tx_start_bd, *first_bd;
+ struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
+ struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
+ struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
+ u32 pbd_e2_parsing_data = 0;
+ u16 pkt_prod, bd_prod;
+ int nbd, txq_index, fp_index, txdata_index;
+ dma_addr_t mapping;
+ u32 xmit_type = bnx2x_xmit_type(bp, skb);
+ int i;
+ u8 hlen = 0;
+ __le16 pkt_size = 0;
+ struct ethhdr *eth;
+ u8 mac_type = UNICAST_ADDRESS;
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return NETDEV_TX_BUSY;
+#endif
+
+ txq_index = skb_get_queue_mapping(skb);
+ txq = netdev_get_tx_queue(dev, txq_index);
+
+ BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
+
+ /* decode the fastpath index and the cos index from the txq */
+ fp_index = TXQ_TO_FP(txq_index);
+ txdata_index = TXQ_TO_COS(txq_index);
+
+#ifdef BCM_CNIC
+ /*
+ * Override the above for the FCoE queue:
+ * - FCoE fp entry is right after the ETH entries.
+ * - FCoE L2 queue uses bp->txdata[0] only.
+ */
+ if (unlikely(!NO_FCOE(bp) && (txq_index ==
+ bnx2x_fcoe_tx(bp, txq_index)))) {
+ fp_index = FCOE_IDX;
+ txdata_index = 0;
+ }
+#endif
+
+ /* enable this debug print to view the transmission queue being used
+ DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d\n",
+ txq_index, fp_index, txdata_index); */
+
+ /* locate the fastpath and the txdata */
+ fp = &bp->fp[fp_index];
+ txdata = &fp->txdata[txdata_index];
+
+ /* enable this debug print to view the tranmission details
+ DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d"
+ " tx_data ptr %p fp pointer %p\n",
+ txdata->cid, fp_index, txdata_index, txdata, fp); */
+
+ if (unlikely(bnx2x_tx_avail(bp, txdata) <
+ (skb_shinfo(skb)->nr_frags + 3))) {
+ fp->eth_q_stats.driver_xoff++;
+ netif_tx_stop_queue(txq);
+ BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
+ "protocol(%x,%x) gso type %x xmit_type %x\n",
+ txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
+ ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
+
+ eth = (struct ethhdr *)skb->data;
+
+ /* set flag according to packet type (UNICAST_ADDRESS is default)*/
+ if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
+ if (is_broadcast_ether_addr(eth->h_dest))
+ mac_type = BROADCAST_ADDRESS;
+ else
+ mac_type = MULTICAST_ADDRESS;
+ }
+
+#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
+ /* First, check if we need to linearize the skb (due to FW
+ restrictions). No need to check fragmentation if page size > 8K
+ (there will be no violation to FW restrictions) */
+ if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
+ /* Statistics of linearization */
+ bp->lin_cnt++;
+ if (skb_linearize(skb) != 0) {
+ DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
+ "silently dropping this SKB\n");
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ }
+#endif
+ /* Map skb linear data for DMA */
+ mapping = dma_map_single(&bp->pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+ DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - "
+ "silently dropping this SKB\n");
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ /*
+ Please read carefully. First we use one BD which we mark as start,
+ then we have a parsing info BD (used for TSO or xsum),
+ and only then we have the rest of the TSO BDs.
+ (don't forget to mark the last one as last,
+ and to unmap only AFTER you write to the BD ...)
+ And above all, all pdb sizes are in words - NOT DWORDS!
+ */
+
+ /* get current pkt produced now - advance it just before sending packet
+ * since mapping of pages may fail and cause packet to be dropped
+ */
+ pkt_prod = txdata->tx_pkt_prod;
+ bd_prod = TX_BD(txdata->tx_bd_prod);
+
+ /* get a tx_buf and first BD
+ * tx_start_bd may be changed during SPLIT,
+ * but first_bd will always stay first
+ */
+ tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
+ tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
+ first_bd = tx_start_bd;
+
+ tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
+ SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
+ mac_type);
+
+ /* header nbd */
+ SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
+
+ /* remember the first BD of the packet */
+ tx_buf->first_bd = txdata->tx_bd_prod;
+ tx_buf->skb = skb;
+ tx_buf->flags = 0;
+
+ DP(NETIF_MSG_TX_QUEUED,
+ "sending pkt %u @%p next_idx %u bd %u @%p\n",
+ pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
+
+ if (vlan_tx_tag_present(skb)) {
+ tx_start_bd->vlan_or_ethertype =
+ cpu_to_le16(vlan_tx_tag_get(skb));
+ tx_start_bd->bd_flags.as_bitfield |=
+ (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
+ } else
+ tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
+
+ /* turn on parsing and get a BD */
+ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+
+ if (xmit_type & XMIT_CSUM)
+ bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
+
+ if (!CHIP_IS_E1x(bp)) {
+ pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
+ memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
+ /* Set PBD in checksum offload case */
+ if (xmit_type & XMIT_CSUM)
+ hlen = bnx2x_set_pbd_csum_e2(bp, skb,
+ &pbd_e2_parsing_data,
+ xmit_type);
+ if (IS_MF_SI(bp)) {
+ /*
+ * fill in the MAC addresses in the PBD - for local
+ * switching
+ */
+ bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
+ &pbd_e2->src_mac_addr_mid,
+ &pbd_e2->src_mac_addr_lo,
+ eth->h_source);
+ bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
+ &pbd_e2->dst_mac_addr_mid,
+ &pbd_e2->dst_mac_addr_lo,
+ eth->h_dest);
+ }
+ } else {
+ pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
+ memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
+ /* Set PBD in checksum offload case */
+ if (xmit_type & XMIT_CSUM)
+ hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
+
+ }
+
+ /* Setup the data pointer of the first BD of the packet */
+ tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+ tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+ nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
+ tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
+ pkt_size = tx_start_bd->nbytes;
+
+ DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
+ " nbytes %d flags %x vlan %x\n",
+ tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
+ le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
+ tx_start_bd->bd_flags.as_bitfield,
+ le16_to_cpu(tx_start_bd->vlan_or_ethertype));
+
+ if (xmit_type & XMIT_GSO) {
+
+ DP(NETIF_MSG_TX_QUEUED,
+ "TSO packet len %d hlen %d total len %d tso size %d\n",
+ skb->len, hlen, skb_headlen(skb),
+ skb_shinfo(skb)->gso_size);
+
+ tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
+
+ if (unlikely(skb_headlen(skb) > hlen))
+ bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
+ &tx_start_bd, hlen,
+ bd_prod, ++nbd);
+ if (!CHIP_IS_E1x(bp))
+ bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
+ xmit_type);
+ else
+ bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
+ }
+
+ /* Set the PBD's parsing_data field if not zero
+ * (for the chips newer than 57711).
+ */
+ if (pbd_e2_parsing_data)
+ pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
+
+ tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
+
+ /* Handle fragmented skb */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, frag->size,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+
+ DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
+ "dropping packet...\n");
+
+ /* we need unmap all buffers already mapped
+ * for this SKB;
+ * first_bd->nbd need to be properly updated
+ * before call to bnx2x_free_tx_pkt
+ */
+ first_bd->nbd = cpu_to_le16(nbd);
+ bnx2x_free_tx_pkt(bp, txdata,
+ TX_BD(txdata->tx_pkt_prod));
+ return NETDEV_TX_OK;
+ }
+
+ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+ tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
+ if (total_pkt_bd == NULL)
+ total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
+
+ tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+ tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+ tx_data_bd->nbytes = cpu_to_le16(frag->size);
+ le16_add_cpu(&pkt_size, frag->size);
+ nbd++;
+
+ DP(NETIF_MSG_TX_QUEUED,
+ "frag %d bd @%p addr (%x:%x) nbytes %d\n",
+ i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
+ le16_to_cpu(tx_data_bd->nbytes));
+ }
+
+ DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
+
+ /* update with actual num BDs */
+ first_bd->nbd = cpu_to_le16(nbd);
+
+ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+
+ /* now send a tx doorbell, counting the next BD
+ * if the packet contains or ends with it
+ */
+ if (TX_BD_POFF(bd_prod) < nbd)
+ nbd++;
+
+ /* total_pkt_bytes should be set on the first data BD if
+ * it's not an LSO packet and there is more than one
+ * data BD. In this case pkt_size is limited by an MTU value.
+ * However we prefer to set it for an LSO packet (while we don't
+ * have to) in order to save some CPU cycles in a none-LSO
+ * case, when we much more care about them.
+ */
+ if (total_pkt_bd != NULL)
+ total_pkt_bd->total_pkt_bytes = pkt_size;
+
+ if (pbd_e1x)
+ DP(NETIF_MSG_TX_QUEUED,
+ "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
+ " tcp_flags %x xsum %x seq %u hlen %u\n",
+ pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
+ pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
+ pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
+ le16_to_cpu(pbd_e1x->total_hlen_w));
+ if (pbd_e2)
+ DP(NETIF_MSG_TX_QUEUED,
+ "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
+ pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
+ pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
+ pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
+ pbd_e2->parsing_data);
+ DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
+
+ txdata->tx_pkt_prod++;
+ /*
+ * Make sure that the BD data is updated before updating the producer
+ * since FW might read the BD right after the producer is updated.
+ * This is only applicable for weak-ordered memory model archs such
+ * as IA-64. The following barrier is also mandatory since FW will
+ * assumes packets must have BDs.
+ */
+ wmb();
+
+ txdata->tx_db.data.prod += nbd;
+ barrier();
+
+ DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
+
+ mmiowb();
+
+ txdata->tx_bd_prod += nbd;
+
+ if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
+ netif_tx_stop_queue(txq);
+
+ /* paired memory barrier is in bnx2x_tx_int(), we have to keep
+ * ordering of set_bit() in netif_tx_stop_queue() and read of
+ * fp->bd_tx_cons */
+ smp_mb();
+
+ fp->eth_q_stats.driver_xoff++;
+ if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
+ netif_tx_wake_queue(txq);
+ }
+ txdata->tx_pkt++;
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * bnx2x_setup_tc - routine to configure net_device for multi tc
+ *
+ * @netdev: net device to configure
+ * @tc: number of traffic classes to enable
+ *
+ * callback connected to the ndo_setup_tc function pointer
+ */
+int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
+{
+ int cos, prio, count, offset;
+ struct bnx2x *bp = netdev_priv(dev);
+
+ /* setup tc must be called under rtnl lock */
+ ASSERT_RTNL();
+
+ /* no traffic classes requested. aborting */
+ if (!num_tc) {
+ netdev_reset_tc(dev);
+ return 0;
+ }
+
+ /* requested to support too many traffic classes */
+ if (num_tc > bp->max_cos) {
+ DP(NETIF_MSG_TX_ERR, "support for too many traffic classes"
+ " requested: %d. max supported is %d\n",
+ num_tc, bp->max_cos);
+ return -EINVAL;
+ }
+
+ /* declare amount of supported traffic classes */
+ if (netdev_set_num_tc(dev, num_tc)) {
+ DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes\n",
+ num_tc);
+ return -EINVAL;
+ }
+
+ /* configure priority to traffic class mapping */
+ for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
+ netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
+ DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n",
+ prio, bp->prio_to_cos[prio]);
+ }
+
+
+ /* Use this configuration to diffrentiate tc0 from other COSes
+ This can be used for ets or pfc, and save the effort of setting
+ up a multio class queue disc or negotiating DCBX with a switch
+ netdev_set_prio_tc_map(dev, 0, 0);
+ DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
+ for (prio = 1; prio < 16; prio++) {
+ netdev_set_prio_tc_map(dev, prio, 1);
+ DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
+ } */
+
+ /* configure traffic class to transmission queue mapping */
+ for (cos = 0; cos < bp->max_cos; cos++) {
+ count = BNX2X_NUM_ETH_QUEUES(bp);
+ offset = cos * MAX_TXQS_PER_COS;
+ netdev_set_tc_queue(dev, cos, count, offset);
+ DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d\n",
+ cos, offset, count);
+ }
+
+ return 0;
+}
+
+/* called with rtnl_lock */
+int bnx2x_change_mac_addr(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+ struct bnx2x *bp = netdev_priv(dev);
+ int rc = 0;
+
+ if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
+ return -EINVAL;
+
+ if (netif_running(dev)) {
+ rc = bnx2x_set_eth_mac(bp, false);
+ if (rc)
+ return rc;
+ }
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ if (netif_running(dev))
+ rc = bnx2x_set_eth_mac(bp, true);
+
+ return rc;
+}
+
+static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
+{
+ union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
+ struct bnx2x_fastpath *fp = &bp->fp[fp_index];
+ u8 cos;
+
+ /* Common */
+#ifdef BCM_CNIC
+ if (IS_FCOE_IDX(fp_index)) {
+ memset(sb, 0, sizeof(union host_hc_status_block));
+ fp->status_blk_mapping = 0;
+
+ } else {
+#endif
+ /* status blocks */
+ if (!CHIP_IS_E1x(bp))
+ BNX2X_PCI_FREE(sb->e2_sb,
+ bnx2x_fp(bp, fp_index,
+ status_blk_mapping),
+ sizeof(struct host_hc_status_block_e2));
+ else
+ BNX2X_PCI_FREE(sb->e1x_sb,
+ bnx2x_fp(bp, fp_index,
+ status_blk_mapping),
+ sizeof(struct host_hc_status_block_e1x));
+#ifdef BCM_CNIC
+ }
+#endif
+ /* Rx */
+ if (!skip_rx_queue(bp, fp_index)) {
+ bnx2x_free_rx_bds(fp);
+
+ /* fastpath rx rings: rx_buf rx_desc rx_comp */
+ BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
+ BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
+ bnx2x_fp(bp, fp_index, rx_desc_mapping),
+ sizeof(struct eth_rx_bd) * NUM_RX_BD);
+
+ BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
+ bnx2x_fp(bp, fp_index, rx_comp_mapping),
+ sizeof(struct eth_fast_path_rx_cqe) *
+ NUM_RCQ_BD);
+
+ /* SGE ring */
+ BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
+ BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
+ bnx2x_fp(bp, fp_index, rx_sge_mapping),
+ BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
+ }
+
+ /* Tx */
+ if (!skip_tx_queue(bp, fp_index)) {
+ /* fastpath tx rings: tx_buf tx_desc */
+ for_each_cos_in_tx_queue(fp, cos) {
+ struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+
+ DP(BNX2X_MSG_SP,
+ "freeing tx memory of fp %d cos %d cid %d\n",
+ fp_index, cos, txdata->cid);
+
+ BNX2X_FREE(txdata->tx_buf_ring);
+ BNX2X_PCI_FREE(txdata->tx_desc_ring,
+ txdata->tx_desc_mapping,
+ sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+ }
+ }
+ /* end of fastpath */
+}
+
+void bnx2x_free_fp_mem(struct bnx2x *bp)
+{
+ int i;
+ for_each_queue(bp, i)
+ bnx2x_free_fp_mem_at(bp, i);
+}
+
+static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
+{
+ union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
+ if (!CHIP_IS_E1x(bp)) {
+ bnx2x_fp(bp, index, sb_index_values) =
+ (__le16 *)status_blk.e2_sb->sb.index_values;
+ bnx2x_fp(bp, index, sb_running_index) =
+ (__le16 *)status_blk.e2_sb->sb.running_index;
+ } else {
+ bnx2x_fp(bp, index, sb_index_values) =
+ (__le16 *)status_blk.e1x_sb->sb.index_values;
+ bnx2x_fp(bp, index, sb_running_index) =
+ (__le16 *)status_blk.e1x_sb->sb.running_index;
+ }
+}
+
+static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
+{
+ union host_hc_status_block *sb;
+ struct bnx2x_fastpath *fp = &bp->fp[index];
+ int ring_size = 0;
+ u8 cos;
++ int rx_ring_size = 0;
+
+ /* if rx_ring_size specified - use it */
- /* allocate at least number of buffers required by FW */
- rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
- MIN_RX_SIZE_TPA,
- rx_ring_size);
++ if (!bp->rx_ring_size) {
+
++ rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
++
++ /* allocate at least number of buffers required by FW */
++ rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
++ MIN_RX_SIZE_TPA, rx_ring_size);
++
++ bp->rx_ring_size = rx_ring_size;
++ } else
++ rx_ring_size = bp->rx_ring_size;
+
+ /* Common */
+ sb = &bnx2x_fp(bp, index, status_blk);
+#ifdef BCM_CNIC
+ if (!IS_FCOE_IDX(index)) {
+#endif
+ /* status blocks */
+ if (!CHIP_IS_E1x(bp))
+ BNX2X_PCI_ALLOC(sb->e2_sb,
+ &bnx2x_fp(bp, index, status_blk_mapping),
+ sizeof(struct host_hc_status_block_e2));
+ else
+ BNX2X_PCI_ALLOC(sb->e1x_sb,
+ &bnx2x_fp(bp, index, status_blk_mapping),
+ sizeof(struct host_hc_status_block_e1x));
+#ifdef BCM_CNIC
+ }
+#endif
+
+ /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
+ * set shortcuts for it.
+ */
+ if (!IS_FCOE_IDX(index))
+ set_sb_shortcuts(bp, index);
+
+ /* Tx */
+ if (!skip_tx_queue(bp, index)) {
+ /* fastpath tx rings: tx_buf tx_desc */
+ for_each_cos_in_tx_queue(fp, cos) {
+ struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+
+ DP(BNX2X_MSG_SP, "allocating tx memory of "
+ "fp %d cos %d\n",
+ index, cos);
+
+ BNX2X_ALLOC(txdata->tx_buf_ring,
+ sizeof(struct sw_tx_bd) * NUM_TX_BD);
+ BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
+ &txdata->tx_desc_mapping,
+ sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+ }
+ }
+
+ /* Rx */
+ if (!skip_rx_queue(bp, index)) {
+ /* fastpath rx rings: rx_buf rx_desc rx_comp */
+ BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
+ sizeof(struct sw_rx_bd) * NUM_RX_BD);
+ BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
+ &bnx2x_fp(bp, index, rx_desc_mapping),
+ sizeof(struct eth_rx_bd) * NUM_RX_BD);
+
+ BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
+ &bnx2x_fp(bp, index, rx_comp_mapping),
+ sizeof(struct eth_fast_path_rx_cqe) *
+ NUM_RCQ_BD);
+
+ /* SGE ring */
+ BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
+ sizeof(struct sw_rx_page) * NUM_RX_SGE);
+ BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
+ &bnx2x_fp(bp, index, rx_sge_mapping),
+ BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
+ /* RX BD ring */
+ bnx2x_set_next_page_rx_bd(fp);
+
+ /* CQ ring */
+ bnx2x_set_next_page_rx_cq(fp);
+
+ /* BDs */
+ ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
+ if (ring_size < rx_ring_size)
+ goto alloc_mem_err;
+ }
+
+ return 0;
+
+/* handles low memory cases */
+alloc_mem_err:
+ BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
+ index, ring_size);
+ /* FW will drop all packets if queue is not big enough,
+ * In these cases we disable the queue
+ * Min size is different for OOO, TPA and non-TPA queues
+ */
+ if (ring_size < (fp->disable_tpa ?
+ MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
+ /* release memory allocated for this queue */
+ bnx2x_free_fp_mem_at(bp, index);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+int bnx2x_alloc_fp_mem(struct bnx2x *bp)
+{
+ int i;
+
+ /**
+ * 1. Allocate FP for leading - fatal if error
+ * 2. {CNIC} Allocate FCoE FP - fatal if error
+ * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
+ * 4. Allocate RSS - fix number of queues if error
+ */
+
+ /* leading */
+ if (bnx2x_alloc_fp_mem_at(bp, 0))
+ return -ENOMEM;
+
+#ifdef BCM_CNIC
+ if (!NO_FCOE(bp))
+ /* FCoE */
+ if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
+ /* we will fail load process instead of mark
+ * NO_FCOE_FLAG
+ */
+ return -ENOMEM;
+#endif
+
+ /* RSS */
+ for_each_nondefault_eth_queue(bp, i)
+ if (bnx2x_alloc_fp_mem_at(bp, i))
+ break;
+
+ /* handle memory failures */
+ if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
+ int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
+
+ WARN_ON(delta < 0);
+#ifdef BCM_CNIC
+ /**
+ * move non eth FPs next to last eth FP
+ * must be done in that order
+ * FCOE_IDX < FWD_IDX < OOO_IDX
+ */
+
+ /* move FCoE fp even NO_FCOE_FLAG is on */
+ bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
+#endif
+ bp->num_queues -= delta;
+ BNX2X_ERR("Adjusted num of queues from %d to %d\n",
+ bp->num_queues + delta, bp->num_queues);
+ }
+
+ return 0;
+}
+
+void bnx2x_free_mem_bp(struct bnx2x *bp)
+{
+ kfree(bp->fp);
+ kfree(bp->msix_table);
+ kfree(bp->ilt);
+}
+
+int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
+{
+ struct bnx2x_fastpath *fp;
+ struct msix_entry *tbl;
+ struct bnx2x_ilt *ilt;
+ int msix_table_size = 0;
+
+ /*
+ * The biggest MSI-X table we might need is as a maximum number of fast
+ * path IGU SBs plus default SB (for PF).
+ */
+ msix_table_size = bp->igu_sb_cnt + 1;
+
+ /* fp array: RSS plus CNIC related L2 queues */
+ fp = kzalloc((BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE) *
+ sizeof(*fp), GFP_KERNEL);
+ if (!fp)
+ goto alloc_err;
+ bp->fp = fp;
+
+ /* msix table */
+ tbl = kzalloc(msix_table_size * sizeof(*tbl), GFP_KERNEL);
+ if (!tbl)
+ goto alloc_err;
+ bp->msix_table = tbl;
+
+ /* ilt */
+ ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
+ if (!ilt)
+ goto alloc_err;
+ bp->ilt = ilt;
+
+ return 0;
+alloc_err:
+ bnx2x_free_mem_bp(bp);
+ return -ENOMEM;
+
+}
+
+int bnx2x_reload_if_running(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (unlikely(!netif_running(dev)))
+ return 0;
+
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ return bnx2x_nic_load(bp, LOAD_NORMAL);
+}
+
+int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
+{
+ u32 sel_phy_idx = 0;
+ if (bp->link_params.num_phys <= 1)
+ return INT_PHY;
+
+ if (bp->link_vars.link_up) {
+ sel_phy_idx = EXT_PHY1;
+ /* In case link is SERDES, check if the EXT_PHY2 is the one */
+ if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
+ (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
+ sel_phy_idx = EXT_PHY2;
+ } else {
+
+ switch (bnx2x_phy_selection(&bp->link_params)) {
+ case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+ sel_phy_idx = EXT_PHY1;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+ sel_phy_idx = EXT_PHY2;
+ break;
+ }
+ }
+
+ return sel_phy_idx;
+
+}
+int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
+{
+ u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
+ /*
+ * The selected actived PHY is always after swapping (in case PHY
+ * swapping is enabled). So when swapping is enabled, we need to reverse
+ * the configuration
+ */
+
+ if (bp->link_params.multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
+ if (sel_phy_idx == EXT_PHY1)
+ sel_phy_idx = EXT_PHY2;
+ else if (sel_phy_idx == EXT_PHY2)
+ sel_phy_idx = EXT_PHY1;
+ }
+ return LINK_CONFIG_IDX(sel_phy_idx);
+}
+
+#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
+int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+ switch (type) {
+ case NETDEV_FCOE_WWNN:
+ *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
+ cp->fcoe_wwn_node_name_lo);
+ break;
+ case NETDEV_FCOE_WWPN:
+ *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
+ cp->fcoe_wwn_port_name_lo);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#endif
+
+/* called with rtnl_lock */
+int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ pr_err("Handling parity error recovery. Try again later\n");
+ return -EAGAIN;
+ }
+
+ if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
+ ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
+ return -EINVAL;
+
+ /* This does not race with packet allocation
+ * because the actual alloc size is
+ * only updated as part of load
+ */
+ dev->mtu = new_mtu;
+
+ return bnx2x_reload_if_running(dev);
+}
+
+u32 bnx2x_fix_features(struct net_device *dev, u32 features)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ /* TPA requires Rx CSUM offloading */
+ if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
+ features &= ~NETIF_F_LRO;
+
+ return features;
+}
+
+int bnx2x_set_features(struct net_device *dev, u32 features)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u32 flags = bp->flags;
+ bool bnx2x_reload = false;
+
+ if (features & NETIF_F_LRO)
+ flags |= TPA_ENABLE_FLAG;
+ else
+ flags &= ~TPA_ENABLE_FLAG;
+
+ if (features & NETIF_F_LOOPBACK) {
+ if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
+ bp->link_params.loopback_mode = LOOPBACK_BMAC;
+ bnx2x_reload = true;
+ }
+ } else {
+ if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
+ bp->link_params.loopback_mode = LOOPBACK_NONE;
+ bnx2x_reload = true;
+ }
+ }
+
+ if (flags ^ bp->flags) {
+ bp->flags = flags;
+ bnx2x_reload = true;
+ }
+
+ if (bnx2x_reload) {
+ if (bp->recovery_state == BNX2X_RECOVERY_DONE)
+ return bnx2x_reload_if_running(dev);
+ /* else: bnx2x_nic_load() will be called at end of recovery */
+ }
+
+ return 0;
+}
+
+void bnx2x_tx_timeout(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (!bp->panic)
+ bnx2x_panic();
+#endif
+
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+
+ /* This allows the netif to be shutdown gracefully before resetting */
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+}
+
+int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2x *bp;
+
+ if (!dev) {
+ dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
+ return -ENODEV;
+ }
+ bp = netdev_priv(dev);
+
+ rtnl_lock();
+
+ pci_save_state(pdev);
+
+ if (!netif_running(dev)) {
+ rtnl_unlock();
+ return 0;
+ }
+
+ netif_device_detach(dev);
+
+ bnx2x_nic_unload(bp, UNLOAD_CLOSE);
+
+ bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
+
+ rtnl_unlock();
+
+ return 0;
+}
+
+int bnx2x_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2x *bp;
+ int rc;
+
+ if (!dev) {
+ dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
+ return -ENODEV;
+ }
+ bp = netdev_priv(dev);
+
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ pr_err("Handling parity error recovery. Try again later\n");
+ return -EAGAIN;
+ }
+
+ rtnl_lock();
+
+ pci_restore_state(pdev);
+
+ if (!netif_running(dev)) {
+ rtnl_unlock();
+ return 0;
+ }
+
+ bnx2x_set_power_state(bp, PCI_D0);
+ netif_device_attach(dev);
+
+ /* Since the chip was reset, clear the FW sequence number */
+ bp->fw_seq = 0;
+ rc = bnx2x_nic_load(bp, LOAD_OPEN);
+
+ rtnl_unlock();
+
+ return rc;
+}
+
+
+void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
+ u32 cid)
+{
+ /* ustorm cxt validation */
+ cxt->ustorm_ag_context.cdu_usage =
+ CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
+ CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
+ /* xcontext validation */
+ cxt->xstorm_ag_context.cdu_reserved =
+ CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
+ CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
+}
+
+static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
+ u8 fw_sb_id, u8 sb_index,
+ u8 ticks)
+{
+
+ u32 addr = BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
+ REG_WR8(bp, addr, ticks);
+ DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
+ port, fw_sb_id, sb_index, ticks);
+}
+
+static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
+ u16 fw_sb_id, u8 sb_index,
+ u8 disable)
+{
+ u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
+ u32 addr = BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
+ u16 flags = REG_RD16(bp, addr);
+ /* clear and set */
+ flags &= ~HC_INDEX_DATA_HC_ENABLED;
+ flags |= enable_flag;
+ REG_WR16(bp, addr, flags);
+ DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
+ port, fw_sb_id, sb_index, disable);
+}
+
+void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
+ u8 sb_index, u8 disable, u16 usec)
+{
+ int port = BP_PORT(bp);
+ u8 ticks = usec / BNX2X_BTR;
+
+ storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
+
+ disable = disable ? 1 : (usec ? 0 : 1);
+ storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
+}
--- /dev/null
- cmd->advertising &= bp->port.supported[cfg_idx];
+/* bnx2x_ethtool.c: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/crc32.h>
+
+
+#include "bnx2x.h"
+#include "bnx2x_cmn.h"
+#include "bnx2x_dump.h"
+#include "bnx2x_init.h"
+#include "bnx2x_sp.h"
+
+/* Note: in the format strings below %s is replaced by the queue-name which is
+ * either its index or 'fcoe' for the fcoe queue. Make sure the format string
+ * length does not exceed ETH_GSTRING_LEN - MAX_QUEUE_NAME_LEN + 2
+ */
+#define MAX_QUEUE_NAME_LEN 4
+static const struct {
+ long offset;
+ int size;
+ char string[ETH_GSTRING_LEN];
+} bnx2x_q_stats_arr[] = {
+/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" },
+ { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
+ 8, "[%s]: rx_ucast_packets" },
+ { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
+ 8, "[%s]: rx_mcast_packets" },
+ { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
+ 8, "[%s]: rx_bcast_packets" },
+ { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%s]: rx_discards" },
+ { Q_STATS_OFFSET32(rx_err_discard_pkt),
+ 4, "[%s]: rx_phy_ip_err_discards"},
+ { Q_STATS_OFFSET32(rx_skb_alloc_failed),
+ 4, "[%s]: rx_skb_alloc_discard" },
+ { Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" },
+
+ { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" },
+/* 10 */{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
+ 8, "[%s]: tx_ucast_packets" },
+ { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
+ 8, "[%s]: tx_mcast_packets" },
+ { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
+ 8, "[%s]: tx_bcast_packets" },
+ { Q_STATS_OFFSET32(total_tpa_aggregations_hi),
+ 8, "[%s]: tpa_aggregations" },
+ { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
+ 8, "[%s]: tpa_aggregated_frames"},
+ { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%s]: tpa_bytes"}
+};
+
+#define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr)
+
+static const struct {
+ long offset;
+ int size;
+ u32 flags;
+#define STATS_FLAGS_PORT 1
+#define STATS_FLAGS_FUNC 2
+#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
+ char string[ETH_GSTRING_LEN];
+} bnx2x_stats_arr[] = {
+/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
+ 8, STATS_FLAGS_BOTH, "rx_bytes" },
+ { STATS_OFFSET32(error_bytes_received_hi),
+ 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
+ { STATS_OFFSET32(total_unicast_packets_received_hi),
+ 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
+ { STATS_OFFSET32(total_multicast_packets_received_hi),
+ 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
+ { STATS_OFFSET32(total_broadcast_packets_received_hi),
+ 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
+ { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
+ 8, STATS_FLAGS_PORT, "rx_crc_errors" },
+ { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
+ 8, STATS_FLAGS_PORT, "rx_align_errors" },
+ { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
+ 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
+ { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
+ 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
+/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
+ 8, STATS_FLAGS_PORT, "rx_fragments" },
+ { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
+ 8, STATS_FLAGS_PORT, "rx_jabbers" },
+ { STATS_OFFSET32(no_buff_discard_hi),
+ 8, STATS_FLAGS_BOTH, "rx_discards" },
+ { STATS_OFFSET32(mac_filter_discard),
+ 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
+ { STATS_OFFSET32(mf_tag_discard),
+ 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
+ { STATS_OFFSET32(brb_drop_hi),
+ 8, STATS_FLAGS_PORT, "rx_brb_discard" },
+ { STATS_OFFSET32(brb_truncate_hi),
+ 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
+ { STATS_OFFSET32(pause_frames_received_hi),
+ 8, STATS_FLAGS_PORT, "rx_pause_frames" },
+ { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
+ 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
+ { STATS_OFFSET32(nig_timer_max),
+ 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
+/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
+ 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
+ { STATS_OFFSET32(rx_skb_alloc_failed),
+ 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
+ { STATS_OFFSET32(hw_csum_err),
+ 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
+
+ { STATS_OFFSET32(total_bytes_transmitted_hi),
+ 8, STATS_FLAGS_BOTH, "tx_bytes" },
+ { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
+ 8, STATS_FLAGS_PORT, "tx_error_bytes" },
+ { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
+ 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
+ { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
+ 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
+ { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
+ 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
+ { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
+ 8, STATS_FLAGS_PORT, "tx_mac_errors" },
+ { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
+ 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
+/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
+ 8, STATS_FLAGS_PORT, "tx_single_collisions" },
+ { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
+ 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
+ { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
+ 8, STATS_FLAGS_PORT, "tx_deferred" },
+ { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
+ 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
+ { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
+ 8, STATS_FLAGS_PORT, "tx_late_collisions" },
+ { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
+ 8, STATS_FLAGS_PORT, "tx_total_collisions" },
+ { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
+ 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
+ { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
+ 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
+ { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
+ 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
+ { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
+ 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
+/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
+ 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
+ { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
+ 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
+ { STATS_OFFSET32(etherstatspktsover1522octets_hi),
+ 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
+ { STATS_OFFSET32(pause_frames_sent_hi),
+ 8, STATS_FLAGS_PORT, "tx_pause_frames" },
+ { STATS_OFFSET32(total_tpa_aggregations_hi),
+ 8, STATS_FLAGS_FUNC, "tpa_aggregations" },
+ { STATS_OFFSET32(total_tpa_aggregated_frames_hi),
+ 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
+ { STATS_OFFSET32(total_tpa_bytes_hi),
+ 8, STATS_FLAGS_FUNC, "tpa_bytes"}
+};
+
+#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
+static int bnx2x_get_port_type(struct bnx2x *bp)
+{
+ int port_type;
+ u32 phy_idx = bnx2x_get_cur_phy_idx(bp);
+ switch (bp->link_params.phy[phy_idx].media_type) {
+ case ETH_PHY_SFP_FIBER:
+ case ETH_PHY_XFP_FIBER:
+ case ETH_PHY_KR:
+ case ETH_PHY_CX4:
+ port_type = PORT_FIBRE;
+ break;
+ case ETH_PHY_DA_TWINAX:
+ port_type = PORT_DA;
+ break;
+ case ETH_PHY_BASE_T:
+ port_type = PORT_TP;
+ break;
+ case ETH_PHY_NOT_PRESENT:
+ port_type = PORT_NONE;
+ break;
+ case ETH_PHY_UNSPECIFIED:
+ default:
+ port_type = PORT_OTHER;
+ break;
+ }
+ return port_type;
+}
+
+static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int cfg_idx = bnx2x_get_link_cfg_idx(bp);
+
+ /* Dual Media boards present all available port types */
+ cmd->supported = bp->port.supported[cfg_idx] |
+ (bp->port.supported[cfg_idx ^ 1] &
+ (SUPPORTED_TP | SUPPORTED_FIBRE));
+ cmd->advertising = bp->port.advertising[cfg_idx];
+
+ if ((bp->state == BNX2X_STATE_OPEN) &&
+ !(bp->flags & MF_FUNC_DIS) &&
+ (bp->link_vars.link_up)) {
+ ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed);
+ cmd->duplex = bp->link_vars.duplex;
+ } else {
+ ethtool_cmd_speed_set(
+ cmd, bp->link_params.req_line_speed[cfg_idx]);
+ cmd->duplex = bp->link_params.req_duplex[cfg_idx];
+ }
+
+ if (IS_MF(bp))
+ ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp));
+
+ cmd->port = bnx2x_get_port_type(bp);
+
+ cmd->phy_address = bp->mdio.prtad;
+ cmd->transceiver = XCVR_INTERNAL;
+
+ if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG)
+ cmd->autoneg = AUTONEG_ENABLE;
+ else
+ cmd->autoneg = AUTONEG_DISABLE;
+
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 0;
+
+ DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
+ " supported 0x%x advertising 0x%x speed %u\n"
+ " duplex %d port %d phy_address %d transceiver %d\n"
+ " autoneg %d maxtxpkt %d maxrxpkt %d\n",
+ cmd->cmd, cmd->supported, cmd->advertising,
+ ethtool_cmd_speed(cmd),
+ cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
+ cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+
+ return 0;
+}
+
+static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
+ u32 speed;
+
+ if (IS_MF_SD(bp))
+ return 0;
+
+ DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
+ " supported 0x%x advertising 0x%x speed %u\n"
+ " duplex %d port %d phy_address %d transceiver %d\n"
+ " autoneg %d maxtxpkt %d maxrxpkt %d\n",
+ cmd->cmd, cmd->supported, cmd->advertising,
+ ethtool_cmd_speed(cmd),
+ cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
+ cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+
+ speed = ethtool_cmd_speed(cmd);
+
+ if (IS_MF_SI(bp)) {
+ u32 part;
+ u32 line_speed = bp->link_vars.line_speed;
+
+ /* use 10G if no link detected */
+ if (!line_speed)
+ line_speed = 10000;
+
+ if (bp->common.bc_ver < REQ_BC_VER_4_SET_MF_BW) {
+ BNX2X_DEV_INFO("To set speed BC %X or higher "
+ "is required, please upgrade BC\n",
+ REQ_BC_VER_4_SET_MF_BW);
+ return -EINVAL;
+ }
+
+ part = (speed * 100) / line_speed;
+
+ if (line_speed < speed || !part) {
+ BNX2X_DEV_INFO("Speed setting should be in a range "
+ "from 1%% to 100%% "
+ "of actual line speed\n");
+ return -EINVAL;
+ }
+
+ if (bp->state != BNX2X_STATE_OPEN)
+ /* store value for following "load" */
+ bp->pending_max = part;
+ else
+ bnx2x_update_max_mf_config(bp, part);
+
+ return 0;
+ }
+
+ cfg_idx = bnx2x_get_link_cfg_idx(bp);
+ old_multi_phy_config = bp->link_params.multi_phy_config;
+ switch (cmd->port) {
+ case PORT_TP:
+ if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
+ break; /* no port change */
+
+ if (!(bp->port.supported[0] & SUPPORTED_TP ||
+ bp->port.supported[1] & SUPPORTED_TP)) {
+ DP(NETIF_MSG_LINK, "Unsupported port type\n");
+ return -EINVAL;
+ }
+ bp->link_params.multi_phy_config &=
+ ~PORT_HW_CFG_PHY_SELECTION_MASK;
+ if (bp->link_params.multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED)
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+ else
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+ break;
+ case PORT_FIBRE:
+ if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE)
+ break; /* no port change */
+
+ if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
+ bp->port.supported[1] & SUPPORTED_FIBRE)) {
+ DP(NETIF_MSG_LINK, "Unsupported port type\n");
+ return -EINVAL;
+ }
+ bp->link_params.multi_phy_config &=
+ ~PORT_HW_CFG_PHY_SELECTION_MASK;
+ if (bp->link_params.multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED)
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+ else
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Unsupported port type\n");
+ return -EINVAL;
+ }
+ /* Save new config in case command complete successuly */
+ new_multi_phy_config = bp->link_params.multi_phy_config;
+ /* Get the new cfg_idx */
+ cfg_idx = bnx2x_get_link_cfg_idx(bp);
+ /* Restore old config in case command failed */
+ bp->link_params.multi_phy_config = old_multi_phy_config;
+ DP(NETIF_MSG_LINK, "cfg_idx = %x\n", cfg_idx);
+
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
+ DP(NETIF_MSG_LINK, "Autoneg not supported\n");
+ return -EINVAL;
+ }
+
+ /* advertise the requested speed and duplex if supported */
- bp->link_params.req_duplex[cfg_idx] = DUPLEX_FULL;
- bp->port.advertising[cfg_idx] |= (ADVERTISED_Autoneg |
++ if (cmd->advertising & ~(bp->port.supported[cfg_idx])) {
++ DP(NETIF_MSG_LINK, "Advertisement parameters "
++ "are not supported\n");
++ return -EINVAL;
++ }
+
+ bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG;
- if (bp->state == BNX2X_STATE_OPEN && bp->num_queues)
- ering->rx_pending = MAX_RX_AVAIL/bp->num_queues;
- else
- ering->rx_pending = MAX_RX_AVAIL;
++ bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
++ bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg |
+ cmd->advertising);
++ if (cmd->advertising) {
++
++ bp->link_params.speed_cap_mask[cfg_idx] = 0;
++ if (cmd->advertising & ADVERTISED_10baseT_Half) {
++ bp->link_params.speed_cap_mask[cfg_idx] |=
++ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF;
++ }
++ if (cmd->advertising & ADVERTISED_10baseT_Full)
++ bp->link_params.speed_cap_mask[cfg_idx] |=
++ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL;
+
++ if (cmd->advertising & ADVERTISED_100baseT_Full)
++ bp->link_params.speed_cap_mask[cfg_idx] |=
++ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL;
++
++ if (cmd->advertising & ADVERTISED_100baseT_Half) {
++ bp->link_params.speed_cap_mask[cfg_idx] |=
++ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF;
++ }
++ if (cmd->advertising & ADVERTISED_1000baseT_Half) {
++ bp->link_params.speed_cap_mask[cfg_idx] |=
++ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
++ }
++ if (cmd->advertising & (ADVERTISED_1000baseT_Full |
++ ADVERTISED_1000baseKX_Full))
++ bp->link_params.speed_cap_mask[cfg_idx] |=
++ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
++
++ if (cmd->advertising & (ADVERTISED_10000baseT_Full |
++ ADVERTISED_10000baseKX4_Full |
++ ADVERTISED_10000baseKR_Full))
++ bp->link_params.speed_cap_mask[cfg_idx] |=
++ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
++ }
+ } else { /* forced speed */
+ /* advertise the requested speed and duplex if supported */
+ switch (speed) {
+ case SPEED_10:
+ if (cmd->duplex == DUPLEX_FULL) {
+ if (!(bp->port.supported[cfg_idx] &
+ SUPPORTED_10baseT_Full)) {
+ DP(NETIF_MSG_LINK,
+ "10M full not supported\n");
+ return -EINVAL;
+ }
+
+ advertising = (ADVERTISED_10baseT_Full |
+ ADVERTISED_TP);
+ } else {
+ if (!(bp->port.supported[cfg_idx] &
+ SUPPORTED_10baseT_Half)) {
+ DP(NETIF_MSG_LINK,
+ "10M half not supported\n");
+ return -EINVAL;
+ }
+
+ advertising = (ADVERTISED_10baseT_Half |
+ ADVERTISED_TP);
+ }
+ break;
+
+ case SPEED_100:
+ if (cmd->duplex == DUPLEX_FULL) {
+ if (!(bp->port.supported[cfg_idx] &
+ SUPPORTED_100baseT_Full)) {
+ DP(NETIF_MSG_LINK,
+ "100M full not supported\n");
+ return -EINVAL;
+ }
+
+ advertising = (ADVERTISED_100baseT_Full |
+ ADVERTISED_TP);
+ } else {
+ if (!(bp->port.supported[cfg_idx] &
+ SUPPORTED_100baseT_Half)) {
+ DP(NETIF_MSG_LINK,
+ "100M half not supported\n");
+ return -EINVAL;
+ }
+
+ advertising = (ADVERTISED_100baseT_Half |
+ ADVERTISED_TP);
+ }
+ break;
+
+ case SPEED_1000:
+ if (cmd->duplex != DUPLEX_FULL) {
+ DP(NETIF_MSG_LINK, "1G half not supported\n");
+ return -EINVAL;
+ }
+
+ if (!(bp->port.supported[cfg_idx] &
+ SUPPORTED_1000baseT_Full)) {
+ DP(NETIF_MSG_LINK, "1G full not supported\n");
+ return -EINVAL;
+ }
+
+ advertising = (ADVERTISED_1000baseT_Full |
+ ADVERTISED_TP);
+ break;
+
+ case SPEED_2500:
+ if (cmd->duplex != DUPLEX_FULL) {
+ DP(NETIF_MSG_LINK,
+ "2.5G half not supported\n");
+ return -EINVAL;
+ }
+
+ if (!(bp->port.supported[cfg_idx]
+ & SUPPORTED_2500baseX_Full)) {
+ DP(NETIF_MSG_LINK,
+ "2.5G full not supported\n");
+ return -EINVAL;
+ }
+
+ advertising = (ADVERTISED_2500baseX_Full |
+ ADVERTISED_TP);
+ break;
+
+ case SPEED_10000:
+ if (cmd->duplex != DUPLEX_FULL) {
+ DP(NETIF_MSG_LINK, "10G half not supported\n");
+ return -EINVAL;
+ }
+
+ if (!(bp->port.supported[cfg_idx]
+ & SUPPORTED_10000baseT_Full)) {
+ DP(NETIF_MSG_LINK, "10G full not supported\n");
+ return -EINVAL;
+ }
+
+ advertising = (ADVERTISED_10000baseT_Full |
+ ADVERTISED_FIBRE);
+ break;
+
+ default:
+ DP(NETIF_MSG_LINK, "Unsupported speed %u\n", speed);
+ return -EINVAL;
+ }
+
+ bp->link_params.req_line_speed[cfg_idx] = speed;
+ bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
+ bp->port.advertising[cfg_idx] = advertising;
+ }
+
+ DP(NETIF_MSG_LINK, "req_line_speed %d\n"
+ " req_duplex %d advertising 0x%x\n",
+ bp->link_params.req_line_speed[cfg_idx],
+ bp->link_params.req_duplex[cfg_idx],
+ bp->port.advertising[cfg_idx]);
+
+ /* Set new config */
+ bp->link_params.multi_phy_config = new_multi_phy_config;
+ if (netif_running(dev)) {
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ bnx2x_link_set(bp);
+ }
+
+ return 0;
+}
+
+#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
+#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
+#define IS_E2_ONLINE(info) (((info) & RI_E2_ONLINE) == RI_E2_ONLINE)
+#define IS_E3_ONLINE(info) (((info) & RI_E3_ONLINE) == RI_E3_ONLINE)
+#define IS_E3B0_ONLINE(info) (((info) & RI_E3B0_ONLINE) == RI_E3B0_ONLINE)
+
+static inline bool bnx2x_is_reg_online(struct bnx2x *bp,
+ const struct reg_addr *reg_info)
+{
+ if (CHIP_IS_E1(bp))
+ return IS_E1_ONLINE(reg_info->info);
+ else if (CHIP_IS_E1H(bp))
+ return IS_E1H_ONLINE(reg_info->info);
+ else if (CHIP_IS_E2(bp))
+ return IS_E2_ONLINE(reg_info->info);
+ else if (CHIP_IS_E3A0(bp))
+ return IS_E3_ONLINE(reg_info->info);
+ else if (CHIP_IS_E3B0(bp))
+ return IS_E3B0_ONLINE(reg_info->info);
+ else
+ return false;
+}
+
+/******* Paged registers info selectors ********/
+static inline const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp)
+{
+ if (CHIP_IS_E2(bp))
+ return page_vals_e2;
+ else if (CHIP_IS_E3(bp))
+ return page_vals_e3;
+ else
+ return NULL;
+}
+
+static inline u32 __bnx2x_get_page_reg_num(struct bnx2x *bp)
+{
+ if (CHIP_IS_E2(bp))
+ return PAGE_MODE_VALUES_E2;
+ else if (CHIP_IS_E3(bp))
+ return PAGE_MODE_VALUES_E3;
+ else
+ return 0;
+}
+
+static inline const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp)
+{
+ if (CHIP_IS_E2(bp))
+ return page_write_regs_e2;
+ else if (CHIP_IS_E3(bp))
+ return page_write_regs_e3;
+ else
+ return NULL;
+}
+
+static inline u32 __bnx2x_get_page_write_num(struct bnx2x *bp)
+{
+ if (CHIP_IS_E2(bp))
+ return PAGE_WRITE_REGS_E2;
+ else if (CHIP_IS_E3(bp))
+ return PAGE_WRITE_REGS_E3;
+ else
+ return 0;
+}
+
+static inline const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp)
+{
+ if (CHIP_IS_E2(bp))
+ return page_read_regs_e2;
+ else if (CHIP_IS_E3(bp))
+ return page_read_regs_e3;
+ else
+ return NULL;
+}
+
+static inline u32 __bnx2x_get_page_read_num(struct bnx2x *bp)
+{
+ if (CHIP_IS_E2(bp))
+ return PAGE_READ_REGS_E2;
+ else if (CHIP_IS_E3(bp))
+ return PAGE_READ_REGS_E3;
+ else
+ return 0;
+}
+
+static inline int __bnx2x_get_regs_len(struct bnx2x *bp)
+{
+ int num_pages = __bnx2x_get_page_reg_num(bp);
+ int page_write_num = __bnx2x_get_page_write_num(bp);
+ const struct reg_addr *page_read_addr = __bnx2x_get_page_read_ar(bp);
+ int page_read_num = __bnx2x_get_page_read_num(bp);
+ int regdump_len = 0;
+ int i, j, k;
+
+ for (i = 0; i < REGS_COUNT; i++)
+ if (bnx2x_is_reg_online(bp, ®_addrs[i]))
+ regdump_len += reg_addrs[i].size;
+
+ for (i = 0; i < num_pages; i++)
+ for (j = 0; j < page_write_num; j++)
+ for (k = 0; k < page_read_num; k++)
+ if (bnx2x_is_reg_online(bp, &page_read_addr[k]))
+ regdump_len += page_read_addr[k].size;
+
+ return regdump_len;
+}
+
+static int bnx2x_get_regs_len(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int regdump_len = 0;
+
+ regdump_len = __bnx2x_get_regs_len(bp);
+ regdump_len *= 4;
+ regdump_len += sizeof(struct dump_hdr);
+
+ return regdump_len;
+}
+
+/**
+ * bnx2x_read_pages_regs - read "paged" registers
+ *
+ * @bp device handle
+ * @p output buffer
+ *
+ * Reads "paged" memories: memories that may only be read by first writing to a
+ * specific address ("write address") and then reading from a specific address
+ * ("read address"). There may be more than one write address per "page" and
+ * more than one read address per write address.
+ */
+static inline void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p)
+{
+ u32 i, j, k, n;
+ /* addresses of the paged registers */
+ const u32 *page_addr = __bnx2x_get_page_addr_ar(bp);
+ /* number of paged registers */
+ int num_pages = __bnx2x_get_page_reg_num(bp);
+ /* write addresses */
+ const u32 *write_addr = __bnx2x_get_page_write_ar(bp);
+ /* number of write addresses */
+ int write_num = __bnx2x_get_page_write_num(bp);
+ /* read addresses info */
+ const struct reg_addr *read_addr = __bnx2x_get_page_read_ar(bp);
+ /* number of read addresses */
+ int read_num = __bnx2x_get_page_read_num(bp);
+
+ for (i = 0; i < num_pages; i++) {
+ for (j = 0; j < write_num; j++) {
+ REG_WR(bp, write_addr[j], page_addr[i]);
+ for (k = 0; k < read_num; k++)
+ if (bnx2x_is_reg_online(bp, &read_addr[k]))
+ for (n = 0; n <
+ read_addr[k].size; n++)
+ *p++ = REG_RD(bp,
+ read_addr[k].addr + n*4);
+ }
+ }
+}
+
+static inline void __bnx2x_get_regs(struct bnx2x *bp, u32 *p)
+{
+ u32 i, j;
+
+ /* Read the regular registers */
+ for (i = 0; i < REGS_COUNT; i++)
+ if (bnx2x_is_reg_online(bp, ®_addrs[i]))
+ for (j = 0; j < reg_addrs[i].size; j++)
+ *p++ = REG_RD(bp, reg_addrs[i].addr + j*4);
+
+ /* Read "paged" registes */
+ bnx2x_read_pages_regs(bp, p);
+}
+
+static void bnx2x_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *_p)
+{
+ u32 *p = _p;
+ struct bnx2x *bp = netdev_priv(dev);
+ struct dump_hdr dump_hdr = {0};
+
+ regs->version = 0;
+ memset(p, 0, regs->len);
+
+ if (!netif_running(bp->dev))
+ return;
+
+ /* Disable parity attentions as long as following dump may
+ * cause false alarms by reading never written registers. We
+ * will re-enable parity attentions right after the dump.
+ */
+ bnx2x_disable_blocks_parity(bp);
+
+ dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
+ dump_hdr.dump_sign = dump_sign_all;
+ dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
+ dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
+ dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
+ dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
+
+ if (CHIP_IS_E1(bp))
+ dump_hdr.info = RI_E1_ONLINE;
+ else if (CHIP_IS_E1H(bp))
+ dump_hdr.info = RI_E1H_ONLINE;
+ else if (!CHIP_IS_E1x(bp))
+ dump_hdr.info = RI_E2_ONLINE |
+ (BP_PATH(bp) ? RI_PATH1_DUMP : RI_PATH0_DUMP);
+
+ memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
+ p += dump_hdr.hdr_size + 1;
+
+ /* Actually read the registers */
+ __bnx2x_get_regs(bp, p);
+
+ /* Re-enable parity attentions */
+ bnx2x_clear_blocks_parity(bp);
+ bnx2x_enable_blocks_parity(bp);
+}
+
+static void bnx2x_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u8 phy_fw_ver[PHY_FW_VER_LEN];
+
+ strcpy(info->driver, DRV_MODULE_NAME);
+ strcpy(info->version, DRV_MODULE_VERSION);
+
+ phy_fw_ver[0] = '\0';
+ if (bp->port.pmf) {
+ bnx2x_acquire_phy_lock(bp);
+ bnx2x_get_ext_phy_fw_version(&bp->link_params,
+ (bp->state != BNX2X_STATE_CLOSED),
+ phy_fw_ver, PHY_FW_VER_LEN);
+ bnx2x_release_phy_lock(bp);
+ }
+
+ strncpy(info->fw_version, bp->fw_ver, 32);
+ snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
+ "bc %d.%d.%d%s%s",
+ (bp->common.bc_ver & 0xff0000) >> 16,
+ (bp->common.bc_ver & 0xff00) >> 8,
+ (bp->common.bc_ver & 0xff),
+ ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
+ strcpy(info->bus_info, pci_name(bp->pdev));
+ info->n_stats = BNX2X_NUM_STATS;
+ info->testinfo_len = BNX2X_NUM_TESTS;
+ info->eedump_len = bp->common.flash_size;
+ info->regdump_len = bnx2x_get_regs_len(dev);
+}
+
+static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (bp->flags & NO_WOL_FLAG) {
+ wol->supported = 0;
+ wol->wolopts = 0;
+ } else {
+ wol->supported = WAKE_MAGIC;
+ if (bp->wol)
+ wol->wolopts = WAKE_MAGIC;
+ else
+ wol->wolopts = 0;
+ }
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EINVAL;
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ if (bp->flags & NO_WOL_FLAG)
+ return -EINVAL;
+
+ bp->wol = 1;
+ } else
+ bp->wol = 0;
+
+ return 0;
+}
+
+static u32 bnx2x_get_msglevel(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ return bp->msg_enable;
+}
+
+static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (capable(CAP_NET_ADMIN)) {
+ /* dump MCP trace */
+ if (level & BNX2X_MSG_MCP)
+ bnx2x_fw_dump_lvl(bp, KERN_INFO);
+ bp->msg_enable = level;
+ }
+}
+
+static int bnx2x_nway_reset(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (!bp->port.pmf)
+ return 0;
+
+ if (netif_running(dev)) {
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ bnx2x_link_set(bp);
+ }
+
+ return 0;
+}
+
+static u32 bnx2x_get_link(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (bp->flags & MF_FUNC_DIS || (bp->state != BNX2X_STATE_OPEN))
+ return 0;
+
+ return bp->link_vars.link_up;
+}
+
+static int bnx2x_get_eeprom_len(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ return bp->common.flash_size;
+}
+
+static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ int count, i;
+ u32 val = 0;
+
+ /* adjust timeout for emulation/FPGA */
+ count = BNX2X_NVRAM_TIMEOUT_COUNT;
+ if (CHIP_REV_IS_SLOW(bp))
+ count *= 100;
+
+ /* request access to nvram interface */
+ REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
+ (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
+
+ for (i = 0; i < count*10; i++) {
+ val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
+ if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
+ break;
+
+ udelay(5);
+ }
+
+ if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
+ DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int bnx2x_release_nvram_lock(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ int count, i;
+ u32 val = 0;
+
+ /* adjust timeout for emulation/FPGA */
+ count = BNX2X_NVRAM_TIMEOUT_COUNT;
+ if (CHIP_REV_IS_SLOW(bp))
+ count *= 100;
+
+ /* relinquish nvram interface */
+ REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
+ (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
+
+ for (i = 0; i < count*10; i++) {
+ val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
+ if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
+ break;
+
+ udelay(5);
+ }
+
+ if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
+ DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void bnx2x_enable_nvram_access(struct bnx2x *bp)
+{
+ u32 val;
+
+ val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
+
+ /* enable both bits, even on read */
+ REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
+ (val | MCPR_NVM_ACCESS_ENABLE_EN |
+ MCPR_NVM_ACCESS_ENABLE_WR_EN));
+}
+
+static void bnx2x_disable_nvram_access(struct bnx2x *bp)
+{
+ u32 val;
+
+ val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
+
+ /* disable both bits, even after read */
+ REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
+ (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
+ MCPR_NVM_ACCESS_ENABLE_WR_EN)));
+}
+
+static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
+ u32 cmd_flags)
+{
+ int count, i, rc;
+ u32 val;
+
+ /* build the command word */
+ cmd_flags |= MCPR_NVM_COMMAND_DOIT;
+
+ /* need to clear DONE bit separately */
+ REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
+
+ /* address of the NVRAM to read from */
+ REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
+ (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
+
+ /* issue a read command */
+ REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
+
+ /* adjust timeout for emulation/FPGA */
+ count = BNX2X_NVRAM_TIMEOUT_COUNT;
+ if (CHIP_REV_IS_SLOW(bp))
+ count *= 100;
+
+ /* wait for completion */
+ *ret_val = 0;
+ rc = -EBUSY;
+ for (i = 0; i < count; i++) {
+ udelay(5);
+ val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
+
+ if (val & MCPR_NVM_COMMAND_DONE) {
+ val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
+ /* we read nvram data in cpu order
+ * but ethtool sees it as an array of bytes
+ * converting to big-endian will do the work */
+ *ret_val = cpu_to_be32(val);
+ rc = 0;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
+ int buf_size)
+{
+ int rc;
+ u32 cmd_flags;
+ __be32 val;
+
+ if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
+ DP(BNX2X_MSG_NVM,
+ "Invalid parameter: offset 0x%x buf_size 0x%x\n",
+ offset, buf_size);
+ return -EINVAL;
+ }
+
+ if (offset + buf_size > bp->common.flash_size) {
+ DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
+ " buf_size (0x%x) > flash_size (0x%x)\n",
+ offset, buf_size, bp->common.flash_size);
+ return -EINVAL;
+ }
+
+ /* request access to nvram interface */
+ rc = bnx2x_acquire_nvram_lock(bp);
+ if (rc)
+ return rc;
+
+ /* enable access to nvram interface */
+ bnx2x_enable_nvram_access(bp);
+
+ /* read the first word(s) */
+ cmd_flags = MCPR_NVM_COMMAND_FIRST;
+ while ((buf_size > sizeof(u32)) && (rc == 0)) {
+ rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
+ memcpy(ret_buf, &val, 4);
+
+ /* advance to the next dword */
+ offset += sizeof(u32);
+ ret_buf += sizeof(u32);
+ buf_size -= sizeof(u32);
+ cmd_flags = 0;
+ }
+
+ if (rc == 0) {
+ cmd_flags |= MCPR_NVM_COMMAND_LAST;
+ rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
+ memcpy(ret_buf, &val, 4);
+ }
+
+ /* disable access to nvram interface */
+ bnx2x_disable_nvram_access(bp);
+ bnx2x_release_nvram_lock(bp);
+
+ return rc;
+}
+
+static int bnx2x_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *eebuf)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int rc;
+
+ if (!netif_running(dev))
+ return -EAGAIN;
+
+ DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
+ " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
+ eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
+ eeprom->len, eeprom->len);
+
+ /* parameters already validated in ethtool_get_eeprom */
+
+ rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
+
+ return rc;
+}
+
+static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
+ u32 cmd_flags)
+{
+ int count, i, rc;
+
+ /* build the command word */
+ cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
+
+ /* need to clear DONE bit separately */
+ REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
+
+ /* write the data */
+ REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
+
+ /* address of the NVRAM to write to */
+ REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
+ (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
+
+ /* issue the write command */
+ REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
+
+ /* adjust timeout for emulation/FPGA */
+ count = BNX2X_NVRAM_TIMEOUT_COUNT;
+ if (CHIP_REV_IS_SLOW(bp))
+ count *= 100;
+
+ /* wait for completion */
+ rc = -EBUSY;
+ for (i = 0; i < count; i++) {
+ udelay(5);
+ val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
+ if (val & MCPR_NVM_COMMAND_DONE) {
+ rc = 0;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
+
+static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
+ int buf_size)
+{
+ int rc;
+ u32 cmd_flags;
+ u32 align_offset;
+ __be32 val;
+
+ if (offset + buf_size > bp->common.flash_size) {
+ DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
+ " buf_size (0x%x) > flash_size (0x%x)\n",
+ offset, buf_size, bp->common.flash_size);
+ return -EINVAL;
+ }
+
+ /* request access to nvram interface */
+ rc = bnx2x_acquire_nvram_lock(bp);
+ if (rc)
+ return rc;
+
+ /* enable access to nvram interface */
+ bnx2x_enable_nvram_access(bp);
+
+ cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
+ align_offset = (offset & ~0x03);
+ rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
+
+ if (rc == 0) {
+ val &= ~(0xff << BYTE_OFFSET(offset));
+ val |= (*data_buf << BYTE_OFFSET(offset));
+
+ /* nvram data is returned as an array of bytes
+ * convert it back to cpu order */
+ val = be32_to_cpu(val);
+
+ rc = bnx2x_nvram_write_dword(bp, align_offset, val,
+ cmd_flags);
+ }
+
+ /* disable access to nvram interface */
+ bnx2x_disable_nvram_access(bp);
+ bnx2x_release_nvram_lock(bp);
+
+ return rc;
+}
+
+static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
+ int buf_size)
+{
+ int rc;
+ u32 cmd_flags;
+ u32 val;
+ u32 written_so_far;
+
+ if (buf_size == 1) /* ethtool */
+ return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
+
+ if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
+ DP(BNX2X_MSG_NVM,
+ "Invalid parameter: offset 0x%x buf_size 0x%x\n",
+ offset, buf_size);
+ return -EINVAL;
+ }
+
+ if (offset + buf_size > bp->common.flash_size) {
+ DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
+ " buf_size (0x%x) > flash_size (0x%x)\n",
+ offset, buf_size, bp->common.flash_size);
+ return -EINVAL;
+ }
+
+ /* request access to nvram interface */
+ rc = bnx2x_acquire_nvram_lock(bp);
+ if (rc)
+ return rc;
+
+ /* enable access to nvram interface */
+ bnx2x_enable_nvram_access(bp);
+
+ written_so_far = 0;
+ cmd_flags = MCPR_NVM_COMMAND_FIRST;
+ while ((written_so_far < buf_size) && (rc == 0)) {
+ if (written_so_far == (buf_size - sizeof(u32)))
+ cmd_flags |= MCPR_NVM_COMMAND_LAST;
+ else if (((offset + 4) % BNX2X_NVRAM_PAGE_SIZE) == 0)
+ cmd_flags |= MCPR_NVM_COMMAND_LAST;
+ else if ((offset % BNX2X_NVRAM_PAGE_SIZE) == 0)
+ cmd_flags |= MCPR_NVM_COMMAND_FIRST;
+
+ memcpy(&val, data_buf, 4);
+
+ rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
+
+ /* advance to the next dword */
+ offset += sizeof(u32);
+ data_buf += sizeof(u32);
+ written_so_far += sizeof(u32);
+ cmd_flags = 0;
+ }
+
+ /* disable access to nvram interface */
+ bnx2x_disable_nvram_access(bp);
+ bnx2x_release_nvram_lock(bp);
+
+ return rc;
+}
+
+static int bnx2x_set_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *eebuf)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int port = BP_PORT(bp);
+ int rc = 0;
+ u32 ext_phy_config;
+ if (!netif_running(dev))
+ return -EAGAIN;
+
+ DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
+ " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
+ eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
+ eeprom->len, eeprom->len);
+
+ /* parameters already validated in ethtool_set_eeprom */
+
+ /* PHY eeprom can be accessed only by the PMF */
+ if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
+ !bp->port.pmf)
+ return -EINVAL;
+
+ ext_phy_config =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].external_phy_config);
+
+ if (eeprom->magic == 0x50485950) {
+ /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+
+ bnx2x_acquire_phy_lock(bp);
+ rc |= bnx2x_link_reset(&bp->link_params,
+ &bp->link_vars, 0);
+ if (XGXS_EXT_PHY_TYPE(ext_phy_config) ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
+ MISC_REGISTERS_GPIO_HIGH, port);
+ bnx2x_release_phy_lock(bp);
+ bnx2x_link_report(bp);
+
+ } else if (eeprom->magic == 0x50485952) {
+ /* 'PHYR' (0x50485952): re-init link after FW upgrade */
+ if (bp->state == BNX2X_STATE_OPEN) {
+ bnx2x_acquire_phy_lock(bp);
+ rc |= bnx2x_link_reset(&bp->link_params,
+ &bp->link_vars, 1);
+
+ rc |= bnx2x_phy_init(&bp->link_params,
+ &bp->link_vars);
+ bnx2x_release_phy_lock(bp);
+ bnx2x_calc_fc_adv(bp);
+ }
+ } else if (eeprom->magic == 0x53985943) {
+ /* 'PHYC' (0x53985943): PHY FW upgrade completed */
+ if (XGXS_EXT_PHY_TYPE(ext_phy_config) ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
+
+ /* DSP Remove Download Mode */
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
+ MISC_REGISTERS_GPIO_LOW, port);
+
+ bnx2x_acquire_phy_lock(bp);
+
+ bnx2x_sfx7101_sp_sw_reset(bp,
+ &bp->link_params.phy[EXT_PHY1]);
+
+ /* wait 0.5 sec to allow it to run */
+ msleep(500);
+ bnx2x_ext_phy_hw_reset(bp, port);
+ msleep(500);
+ bnx2x_release_phy_lock(bp);
+ }
+ } else
+ rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
+
+ return rc;
+}
+
+static int bnx2x_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ memset(coal, 0, sizeof(struct ethtool_coalesce));
+
+ coal->rx_coalesce_usecs = bp->rx_ticks;
+ coal->tx_coalesce_usecs = bp->tx_ticks;
+
+ return 0;
+}
+
+static int bnx2x_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
+ if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
+ bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
+
+ bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
+ if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
+ bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
+
+ if (netif_running(dev))
+ bnx2x_update_coalesce(bp);
+
+ return 0;
+}
+
+static void bnx2x_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ ering->rx_max_pending = MAX_RX_AVAIL;
+ ering->rx_mini_max_pending = 0;
+ ering->rx_jumbo_max_pending = 0;
+
+ if (bp->rx_ring_size)
+ ering->rx_pending = bp->rx_ring_size;
+ else
++ ering->rx_pending = MAX_RX_AVAIL;
+
+ ering->rx_mini_pending = 0;
+ ering->rx_jumbo_pending = 0;
+
+ ering->tx_max_pending = MAX_TX_AVAIL;
+ ering->tx_pending = bp->tx_ring_size;
+}
+
+static int bnx2x_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ pr_err("Handling parity error recovery. Try again later\n");
+ return -EAGAIN;
+ }
+
+ if ((ering->rx_pending > MAX_RX_AVAIL) ||
+ (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
+ MIN_RX_SIZE_TPA)) ||
+ (ering->tx_pending > MAX_TX_AVAIL) ||
+ (ering->tx_pending <= MAX_SKB_FRAGS + 4))
+ return -EINVAL;
+
+ bp->rx_ring_size = ering->rx_pending;
+ bp->tx_ring_size = ering->tx_pending;
+
+ return bnx2x_reload_if_running(dev);
+}
+
+static void bnx2x_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int cfg_idx = bnx2x_get_link_cfg_idx(bp);
+ epause->autoneg = (bp->link_params.req_flow_ctrl[cfg_idx] ==
+ BNX2X_FLOW_CTRL_AUTO);
+
+ epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
+ BNX2X_FLOW_CTRL_RX);
+ epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
+ BNX2X_FLOW_CTRL_TX);
+
+ DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
+ " autoneg %d rx_pause %d tx_pause %d\n",
+ epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
+}
+
+static int bnx2x_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u32 cfg_idx = bnx2x_get_link_cfg_idx(bp);
+ if (IS_MF(bp))
+ return 0;
+
+ DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
+ " autoneg %d rx_pause %d tx_pause %d\n",
+ epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
+
+ bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_AUTO;
+
+ if (epause->rx_pause)
+ bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_RX;
+
+ if (epause->tx_pause)
+ bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_TX;
+
+ if (bp->link_params.req_flow_ctrl[cfg_idx] == BNX2X_FLOW_CTRL_AUTO)
+ bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_NONE;
+
+ if (epause->autoneg) {
+ if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
+ DP(NETIF_MSG_LINK, "autoneg not supported\n");
+ return -EINVAL;
+ }
+
+ if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG) {
+ bp->link_params.req_flow_ctrl[cfg_idx] =
+ BNX2X_FLOW_CTRL_AUTO;
+ }
+ }
+
+ DP(NETIF_MSG_LINK,
+ "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl[cfg_idx]);
+
+ if (netif_running(dev)) {
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ bnx2x_link_set(bp);
+ }
+
+ return 0;
+}
+
+static const struct {
+ char string[ETH_GSTRING_LEN];
+} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
+ { "register_test (offline)" },
+ { "memory_test (offline)" },
+ { "loopback_test (offline)" },
+ { "nvram_test (online)" },
+ { "interrupt_test (online)" },
+ { "link_test (online)" },
+ { "idle check (online)" }
+};
+
+enum {
+ BNX2X_CHIP_E1_OFST = 0,
+ BNX2X_CHIP_E1H_OFST,
+ BNX2X_CHIP_E2_OFST,
+ BNX2X_CHIP_E3_OFST,
+ BNX2X_CHIP_E3B0_OFST,
+ BNX2X_CHIP_MAX_OFST
+};
+
+#define BNX2X_CHIP_MASK_E1 (1 << BNX2X_CHIP_E1_OFST)
+#define BNX2X_CHIP_MASK_E1H (1 << BNX2X_CHIP_E1H_OFST)
+#define BNX2X_CHIP_MASK_E2 (1 << BNX2X_CHIP_E2_OFST)
+#define BNX2X_CHIP_MASK_E3 (1 << BNX2X_CHIP_E3_OFST)
+#define BNX2X_CHIP_MASK_E3B0 (1 << BNX2X_CHIP_E3B0_OFST)
+
+#define BNX2X_CHIP_MASK_ALL ((1 << BNX2X_CHIP_MAX_OFST) - 1)
+#define BNX2X_CHIP_MASK_E1X (BNX2X_CHIP_MASK_E1 | BNX2X_CHIP_MASK_E1H)
+
+static int bnx2x_test_registers(struct bnx2x *bp)
+{
+ int idx, i, rc = -ENODEV;
+ u32 wr_val = 0, hw;
+ int port = BP_PORT(bp);
+ static const struct {
+ u32 hw;
+ u32 offset0;
+ u32 offset1;
+ u32 mask;
+ } reg_tbl[] = {
+/* 0 */ { BNX2X_CHIP_MASK_ALL,
+ BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
+ { BNX2X_CHIP_MASK_ALL,
+ DORQ_REG_DB_ADDR0, 4, 0xffffffff },
+ { BNX2X_CHIP_MASK_E1X,
+ HC_REG_AGG_INT_0, 4, 0x000003ff },
+ { BNX2X_CHIP_MASK_ALL,
+ PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
+ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2 | BNX2X_CHIP_MASK_E3,
+ PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
+ { BNX2X_CHIP_MASK_E3B0,
+ PBF_REG_INIT_CRD_Q0, 4, 0x000007ff },
+ { BNX2X_CHIP_MASK_ALL,
+ PRS_REG_CID_PORT_0, 4, 0x00ffffff },
+ { BNX2X_CHIP_MASK_ALL,
+ PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
+ { BNX2X_CHIP_MASK_ALL,
+ PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
+ { BNX2X_CHIP_MASK_ALL,
+ PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
+/* 10 */ { BNX2X_CHIP_MASK_ALL,
+ PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
+ { BNX2X_CHIP_MASK_ALL,
+ PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
+ { BNX2X_CHIP_MASK_ALL,
+ QM_REG_CONNNUM_0, 4, 0x000fffff },
+ { BNX2X_CHIP_MASK_ALL,
+ TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
+ { BNX2X_CHIP_MASK_ALL,
+ SRC_REG_KEYRSS0_0, 40, 0xffffffff },
+ { BNX2X_CHIP_MASK_ALL,
+ SRC_REG_KEYRSS0_7, 40, 0xffffffff },
+ { BNX2X_CHIP_MASK_ALL,
+ XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
+ { BNX2X_CHIP_MASK_ALL,
+ XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
+ { BNX2X_CHIP_MASK_ALL,
+ XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
+/* 20 */ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+ NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
+ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+ NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
+/* 30 */ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
+ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+ NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001},
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
+ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+ NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
+ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+ NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
+
+ { BNX2X_CHIP_MASK_ALL, 0xffffffff, 0, 0x00000000 }
+ };
+
+ if (!netif_running(bp->dev))
+ return rc;
+
+ if (CHIP_IS_E1(bp))
+ hw = BNX2X_CHIP_MASK_E1;
+ else if (CHIP_IS_E1H(bp))
+ hw = BNX2X_CHIP_MASK_E1H;
+ else if (CHIP_IS_E2(bp))
+ hw = BNX2X_CHIP_MASK_E2;
+ else if (CHIP_IS_E3B0(bp))
+ hw = BNX2X_CHIP_MASK_E3B0;
+ else /* e3 A0 */
+ hw = BNX2X_CHIP_MASK_E3;
+
+ /* Repeat the test twice:
+ First by writing 0x00000000, second by writing 0xffffffff */
+ for (idx = 0; idx < 2; idx++) {
+
+ switch (idx) {
+ case 0:
+ wr_val = 0;
+ break;
+ case 1:
+ wr_val = 0xffffffff;
+ break;
+ }
+
+ for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
+ u32 offset, mask, save_val, val;
+ if (!(hw & reg_tbl[i].hw))
+ continue;
+
+ offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
+ mask = reg_tbl[i].mask;
+
+ save_val = REG_RD(bp, offset);
+
+ REG_WR(bp, offset, wr_val & mask);
+
+ val = REG_RD(bp, offset);
+
+ /* Restore the original register's value */
+ REG_WR(bp, offset, save_val);
+
+ /* verify value is as expected */
+ if ((val & mask) != (wr_val & mask)) {
+ DP(NETIF_MSG_HW,
+ "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
+ offset, val, wr_val, mask);
+ goto test_reg_exit;
+ }
+ }
+ }
+
+ rc = 0;
+
+test_reg_exit:
+ return rc;
+}
+
+static int bnx2x_test_memory(struct bnx2x *bp)
+{
+ int i, j, rc = -ENODEV;
+ u32 val, index;
+ static const struct {
+ u32 offset;
+ int size;
+ } mem_tbl[] = {
+ { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
+ { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
+ { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
+ { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
+ { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
+ { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
+ { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
+
+ { 0xffffffff, 0 }
+ };
+
+ static const struct {
+ char *name;
+ u32 offset;
+ u32 hw_mask[BNX2X_CHIP_MAX_OFST];
+ } prty_tbl[] = {
+ { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS,
+ {0x3ffc0, 0, 0, 0} },
+ { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS,
+ {0x2, 0x2, 0, 0} },
+ { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS,
+ {0, 0, 0, 0} },
+ { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS,
+ {0x3ffc0, 0, 0, 0} },
+ { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS,
+ {0x3ffc0, 0, 0, 0} },
+ { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS,
+ {0x3ffc1, 0, 0, 0} },
+
+ { NULL, 0xffffffff, {0, 0, 0, 0} }
+ };
+
+ if (!netif_running(bp->dev))
+ return rc;
+
+ if (CHIP_IS_E1(bp))
+ index = BNX2X_CHIP_E1_OFST;
+ else if (CHIP_IS_E1H(bp))
+ index = BNX2X_CHIP_E1H_OFST;
+ else if (CHIP_IS_E2(bp))
+ index = BNX2X_CHIP_E2_OFST;
+ else /* e3 */
+ index = BNX2X_CHIP_E3_OFST;
+
+ /* pre-Check the parity status */
+ for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
+ val = REG_RD(bp, prty_tbl[i].offset);
+ if (val & ~(prty_tbl[i].hw_mask[index])) {
+ DP(NETIF_MSG_HW,
+ "%s is 0x%x\n", prty_tbl[i].name, val);
+ goto test_mem_exit;
+ }
+ }
+
+ /* Go through all the memories */
+ for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
+ for (j = 0; j < mem_tbl[i].size; j++)
+ REG_RD(bp, mem_tbl[i].offset + j*4);
+
+ /* Check the parity status */
+ for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
+ val = REG_RD(bp, prty_tbl[i].offset);
+ if (val & ~(prty_tbl[i].hw_mask[index])) {
+ DP(NETIF_MSG_HW,
+ "%s is 0x%x\n", prty_tbl[i].name, val);
+ goto test_mem_exit;
+ }
+ }
+
+ rc = 0;
+
+test_mem_exit:
+ return rc;
+}
+
+static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes)
+{
+ int cnt = 1400;
+
+ if (link_up) {
+ while (bnx2x_link_test(bp, is_serdes) && cnt--)
+ msleep(20);
+
+ if (cnt <= 0 && bnx2x_link_test(bp, is_serdes))
+ DP(NETIF_MSG_LINK, "Timeout waiting for link up\n");
+ }
+}
+
+static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
+{
+ unsigned int pkt_size, num_pkts, i;
+ struct sk_buff *skb;
+ unsigned char *packet;
+ struct bnx2x_fastpath *fp_rx = &bp->fp[0];
+ struct bnx2x_fastpath *fp_tx = &bp->fp[0];
+ struct bnx2x_fp_txdata *txdata = &fp_tx->txdata[0];
+ u16 tx_start_idx, tx_idx;
+ u16 rx_start_idx, rx_idx;
+ u16 pkt_prod, bd_prod, rx_comp_cons;
+ struct sw_tx_bd *tx_buf;
+ struct eth_tx_start_bd *tx_start_bd;
+ struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
+ struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
+ dma_addr_t mapping;
+ union eth_rx_cqe *cqe;
+ u8 cqe_fp_flags, cqe_fp_type;
+ struct sw_rx_bd *rx_buf;
+ u16 len;
+ int rc = -ENODEV;
+
+ /* check the loopback mode */
+ switch (loopback_mode) {
+ case BNX2X_PHY_LOOPBACK:
+ if (bp->link_params.loopback_mode != LOOPBACK_XGXS)
+ return -EINVAL;
+ break;
+ case BNX2X_MAC_LOOPBACK:
+ bp->link_params.loopback_mode = CHIP_IS_E3(bp) ?
+ LOOPBACK_XMAC : LOOPBACK_BMAC;
+ bnx2x_phy_init(&bp->link_params, &bp->link_vars);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* prepare the loopback packet */
+ pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
+ bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
+ skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size);
+ if (!skb) {
+ rc = -ENOMEM;
+ goto test_loopback_exit;
+ }
+ packet = skb_put(skb, pkt_size);
+ memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
+ memset(packet + ETH_ALEN, 0, ETH_ALEN);
+ memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
+ for (i = ETH_HLEN; i < pkt_size; i++)
+ packet[i] = (unsigned char) (i & 0xff);
+ mapping = dma_map_single(&bp->pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+ rc = -ENOMEM;
+ dev_kfree_skb(skb);
+ BNX2X_ERR("Unable to map SKB\n");
+ goto test_loopback_exit;
+ }
+
+ /* send the loopback packet */
+ num_pkts = 0;
+ tx_start_idx = le16_to_cpu(*txdata->tx_cons_sb);
+ rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
+
+ pkt_prod = txdata->tx_pkt_prod++;
+ tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
+ tx_buf->first_bd = txdata->tx_bd_prod;
+ tx_buf->skb = skb;
+ tx_buf->flags = 0;
+
+ bd_prod = TX_BD(txdata->tx_bd_prod);
+ tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
+ tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+ tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+ tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
+ tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
+ tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
+ tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
+ SET_FLAG(tx_start_bd->general_data,
+ ETH_TX_START_BD_ETH_ADDR_TYPE,
+ UNICAST_ADDRESS);
+ SET_FLAG(tx_start_bd->general_data,
+ ETH_TX_START_BD_HDR_NBDS,
+ 1);
+
+ /* turn on parsing and get a BD */
+ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+
+ pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
+ pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
+
+ memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
+ memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
+
+ wmb();
+
+ txdata->tx_db.data.prod += 2;
+ barrier();
+ DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
+
+ mmiowb();
+ barrier();
+
+ num_pkts++;
+ txdata->tx_bd_prod += 2; /* start + pbd */
+
+ udelay(100);
+
+ tx_idx = le16_to_cpu(*txdata->tx_cons_sb);
+ if (tx_idx != tx_start_idx + num_pkts)
+ goto test_loopback_exit;
+
+ /* Unlike HC IGU won't generate an interrupt for status block
+ * updates that have been performed while interrupts were
+ * disabled.
+ */
+ if (bp->common.int_block == INT_BLOCK_IGU) {
+ /* Disable local BHes to prevent a dead-lock situation between
+ * sch_direct_xmit() and bnx2x_run_loopback() (calling
+ * bnx2x_tx_int()), as both are taking netif_tx_lock().
+ */
+ local_bh_disable();
+ bnx2x_tx_int(bp, txdata);
+ local_bh_enable();
+ }
+
+ rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
+ if (rx_idx != rx_start_idx + num_pkts)
+ goto test_loopback_exit;
+
+ rx_comp_cons = le16_to_cpu(fp_rx->rx_comp_cons);
+ cqe = &fp_rx->rx_comp_ring[RCQ_BD(rx_comp_cons)];
+ cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
+ cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
+ if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
+ goto test_loopback_rx_exit;
+
+ len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
+ if (len != pkt_size)
+ goto test_loopback_rx_exit;
+
+ rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
+ dma_sync_single_for_cpu(&bp->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ fp_rx->rx_buf_size, DMA_FROM_DEVICE);
+ skb = rx_buf->skb;
+ skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
+ for (i = ETH_HLEN; i < pkt_size; i++)
+ if (*(skb->data + i) != (unsigned char) (i & 0xff))
+ goto test_loopback_rx_exit;
+
+ rc = 0;
+
+test_loopback_rx_exit:
+
+ fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
+ fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
+ fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
+ fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
+
+ /* Update producers */
+ bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
+ fp_rx->rx_sge_prod);
+
+test_loopback_exit:
+ bp->link_params.loopback_mode = LOOPBACK_NONE;
+
+ return rc;
+}
+
+static int bnx2x_test_loopback(struct bnx2x *bp)
+{
+ int rc = 0, res;
+
+ if (BP_NOMCP(bp))
+ return rc;
+
+ if (!netif_running(bp->dev))
+ return BNX2X_LOOPBACK_FAILED;
+
+ bnx2x_netif_stop(bp, 1);
+ bnx2x_acquire_phy_lock(bp);
+
+ res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK);
+ if (res) {
+ DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
+ rc |= BNX2X_PHY_LOOPBACK_FAILED;
+ }
+
+ res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK);
+ if (res) {
+ DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
+ rc |= BNX2X_MAC_LOOPBACK_FAILED;
+ }
+
+ bnx2x_release_phy_lock(bp);
+ bnx2x_netif_start(bp);
+
+ return rc;
+}
+
+#define CRC32_RESIDUAL 0xdebb20e3
+
+static int bnx2x_test_nvram(struct bnx2x *bp)
+{
+ static const struct {
+ int offset;
+ int size;
+ } nvram_tbl[] = {
+ { 0, 0x14 }, /* bootstrap */
+ { 0x14, 0xec }, /* dir */
+ { 0x100, 0x350 }, /* manuf_info */
+ { 0x450, 0xf0 }, /* feature_info */
+ { 0x640, 0x64 }, /* upgrade_key_info */
+ { 0x708, 0x70 }, /* manuf_key_info */
+ { 0, 0 }
+ };
+ __be32 buf[0x350 / 4];
+ u8 *data = (u8 *)buf;
+ int i, rc;
+ u32 magic, crc;
+
+ if (BP_NOMCP(bp))
+ return 0;
+
+ rc = bnx2x_nvram_read(bp, 0, data, 4);
+ if (rc) {
+ DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
+ goto test_nvram_exit;
+ }
+
+ magic = be32_to_cpu(buf[0]);
+ if (magic != 0x669955aa) {
+ DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
+ rc = -ENODEV;
+ goto test_nvram_exit;
+ }
+
+ for (i = 0; nvram_tbl[i].size; i++) {
+
+ rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
+ nvram_tbl[i].size);
+ if (rc) {
+ DP(NETIF_MSG_PROBE,
+ "nvram_tbl[%d] read data (rc %d)\n", i, rc);
+ goto test_nvram_exit;
+ }
+
+ crc = ether_crc_le(nvram_tbl[i].size, data);
+ if (crc != CRC32_RESIDUAL) {
+ DP(NETIF_MSG_PROBE,
+ "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
+ rc = -ENODEV;
+ goto test_nvram_exit;
+ }
+ }
+
+test_nvram_exit:
+ return rc;
+}
+
+/* Send an EMPTY ramrod on the first queue */
+static int bnx2x_test_intr(struct bnx2x *bp)
+{
+ struct bnx2x_queue_state_params params = {0};
+
+ if (!netif_running(bp->dev))
+ return -ENODEV;
+
+ params.q_obj = &bp->fp->q_obj;
+ params.cmd = BNX2X_Q_CMD_EMPTY;
+
+ __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
+
+ return bnx2x_queue_state_change(bp, ¶ms);
+}
+
+static void bnx2x_self_test(struct net_device *dev,
+ struct ethtool_test *etest, u64 *buf)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u8 is_serdes;
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ pr_err("Handling parity error recovery. Try again later\n");
+ etest->flags |= ETH_TEST_FL_FAILED;
+ return;
+ }
+
+ memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
+
+ if (!netif_running(dev))
+ return;
+
+ /* offline tests are not supported in MF mode */
+ if (IS_MF(bp))
+ etest->flags &= ~ETH_TEST_FL_OFFLINE;
+ is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
+
+ if (etest->flags & ETH_TEST_FL_OFFLINE) {
+ int port = BP_PORT(bp);
+ u32 val;
+ u8 link_up;
+
+ /* save current value of input enable for TX port IF */
+ val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
+ /* disable input for TX port IF */
+ REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
+
+ link_up = bp->link_vars.link_up;
+
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ bnx2x_nic_load(bp, LOAD_DIAG);
+ /* wait until link state is restored */
+ bnx2x_wait_for_link(bp, 1, is_serdes);
+
+ if (bnx2x_test_registers(bp) != 0) {
+ buf[0] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+ if (bnx2x_test_memory(bp) != 0) {
+ buf[1] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
+ buf[2] = bnx2x_test_loopback(bp);
+ if (buf[2] != 0)
+ etest->flags |= ETH_TEST_FL_FAILED;
+
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+
+ /* restore input for TX port IF */
+ REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
+
+ bnx2x_nic_load(bp, LOAD_NORMAL);
+ /* wait until link state is restored */
+ bnx2x_wait_for_link(bp, link_up, is_serdes);
+ }
+ if (bnx2x_test_nvram(bp) != 0) {
+ buf[3] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+ if (bnx2x_test_intr(bp) != 0) {
+ buf[4] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
+ if (bnx2x_link_test(bp, is_serdes) != 0) {
+ buf[5] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
+#ifdef BNX2X_EXTRA_DEBUG
+ bnx2x_panic_dump(bp);
+#endif
+}
+
+#define IS_PORT_STAT(i) \
+ ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
+#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
+#define IS_MF_MODE_STAT(bp) \
+ (IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
+
+/* ethtool statistics are displayed for all regular ethernet queues and the
+ * fcoe L2 queue if not disabled
+ */
+static inline int bnx2x_num_stat_queues(struct bnx2x *bp)
+{
+ return BNX2X_NUM_ETH_QUEUES(bp);
+}
+
+static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int i, num_stats;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ if (is_multi(bp)) {
+ num_stats = bnx2x_num_stat_queues(bp) *
+ BNX2X_NUM_Q_STATS;
+ if (!IS_MF_MODE_STAT(bp))
+ num_stats += BNX2X_NUM_STATS;
+ } else {
+ if (IS_MF_MODE_STAT(bp)) {
+ num_stats = 0;
+ for (i = 0; i < BNX2X_NUM_STATS; i++)
+ if (IS_FUNC_STAT(i))
+ num_stats++;
+ } else
+ num_stats = BNX2X_NUM_STATS;
+ }
+ return num_stats;
+
+ case ETH_SS_TEST:
+ return BNX2X_NUM_TESTS;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int i, j, k;
+ char queue_name[MAX_QUEUE_NAME_LEN+1];
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ if (is_multi(bp)) {
+ k = 0;
+ for_each_eth_queue(bp, i) {
+ memset(queue_name, 0, sizeof(queue_name));
+ sprintf(queue_name, "%d", i);
+ for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
+ snprintf(buf + (k + j)*ETH_GSTRING_LEN,
+ ETH_GSTRING_LEN,
+ bnx2x_q_stats_arr[j].string,
+ queue_name);
+ k += BNX2X_NUM_Q_STATS;
+ }
+ if (IS_MF_MODE_STAT(bp))
+ break;
+ for (j = 0; j < BNX2X_NUM_STATS; j++)
+ strcpy(buf + (k + j)*ETH_GSTRING_LEN,
+ bnx2x_stats_arr[j].string);
+ } else {
+ for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
+ if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
+ continue;
+ strcpy(buf + j*ETH_GSTRING_LEN,
+ bnx2x_stats_arr[i].string);
+ j++;
+ }
+ }
+ break;
+
+ case ETH_SS_TEST:
+ memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
+ break;
+ }
+}
+
+static void bnx2x_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *buf)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u32 *hw_stats, *offset;
+ int i, j, k;
+
+ if (is_multi(bp)) {
+ k = 0;
+ for_each_eth_queue(bp, i) {
+ hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
+ for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
+ if (bnx2x_q_stats_arr[j].size == 0) {
+ /* skip this counter */
+ buf[k + j] = 0;
+ continue;
+ }
+ offset = (hw_stats +
+ bnx2x_q_stats_arr[j].offset);
+ if (bnx2x_q_stats_arr[j].size == 4) {
+ /* 4-byte counter */
+ buf[k + j] = (u64) *offset;
+ continue;
+ }
+ /* 8-byte counter */
+ buf[k + j] = HILO_U64(*offset, *(offset + 1));
+ }
+ k += BNX2X_NUM_Q_STATS;
+ }
+ if (IS_MF_MODE_STAT(bp))
+ return;
+ hw_stats = (u32 *)&bp->eth_stats;
+ for (j = 0; j < BNX2X_NUM_STATS; j++) {
+ if (bnx2x_stats_arr[j].size == 0) {
+ /* skip this counter */
+ buf[k + j] = 0;
+ continue;
+ }
+ offset = (hw_stats + bnx2x_stats_arr[j].offset);
+ if (bnx2x_stats_arr[j].size == 4) {
+ /* 4-byte counter */
+ buf[k + j] = (u64) *offset;
+ continue;
+ }
+ /* 8-byte counter */
+ buf[k + j] = HILO_U64(*offset, *(offset + 1));
+ }
+ } else {
+ hw_stats = (u32 *)&bp->eth_stats;
+ for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
+ if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
+ continue;
+ if (bnx2x_stats_arr[i].size == 0) {
+ /* skip this counter */
+ buf[j] = 0;
+ j++;
+ continue;
+ }
+ offset = (hw_stats + bnx2x_stats_arr[i].offset);
+ if (bnx2x_stats_arr[i].size == 4) {
+ /* 4-byte counter */
+ buf[j] = (u64) *offset;
+ j++;
+ continue;
+ }
+ /* 8-byte counter */
+ buf[j] = HILO_U64(*offset, *(offset + 1));
+ j++;
+ }
+ }
+}
+
+static int bnx2x_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EAGAIN;
+
+ if (!bp->port.pmf)
+ return -EOPNOTSUPP;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return 1; /* cycle on/off once per second */
+
+ case ETHTOOL_ID_ON:
+ bnx2x_set_led(&bp->link_params, &bp->link_vars,
+ LED_MODE_ON, SPEED_1000);
+ break;
+
+ case ETHTOOL_ID_OFF:
+ bnx2x_set_led(&bp->link_params, &bp->link_vars,
+ LED_MODE_FRONT_PANEL_OFF, 0);
+
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ bnx2x_set_led(&bp->link_params, &bp->link_vars,
+ LED_MODE_OPER,
+ bp->link_vars.line_speed);
+ }
+
+ return 0;
+}
+
+static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ u32 *rules __always_unused)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = BNX2X_NUM_ETH_QUEUES(bp);
+ return 0;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int bnx2x_get_rxfh_indir(struct net_device *dev,
+ struct ethtool_rxfh_indir *indir)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ size_t copy_size =
+ min_t(size_t, indir->size, T_ETH_INDIRECTION_TABLE_SIZE);
+ u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
+ size_t i;
+
+ if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
+ return -EOPNOTSUPP;
+
+ /* Get the current configuration of the RSS indirection table */
+ bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table);
+
+ /*
+ * We can't use a memcpy() as an internal storage of an
+ * indirection table is a u8 array while indir->ring_index
+ * points to an array of u32.
+ *
+ * Indirection table contains the FW Client IDs, so we need to
+ * align the returned table to the Client ID of the leading RSS
+ * queue.
+ */
+ for (i = 0; i < copy_size; i++)
+ indir->ring_index[i] = ind_table[i] - bp->fp->cl_id;
+
+ indir->size = T_ETH_INDIRECTION_TABLE_SIZE;
+
+ return 0;
+}
+
+static int bnx2x_set_rxfh_indir(struct net_device *dev,
+ const struct ethtool_rxfh_indir *indir)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ size_t i;
+ u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
+ u32 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
+
+ if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
+ return -EOPNOTSUPP;
+
+ /* validate the size */
+ if (indir->size != T_ETH_INDIRECTION_TABLE_SIZE)
+ return -EINVAL;
+
+ for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
+ /* validate the indices */
+ if (indir->ring_index[i] >= num_eth_queues)
+ return -EINVAL;
+ /*
+ * The same as in bnx2x_get_rxfh_indir: we can't use a memcpy()
+ * as an internal storage of an indirection table is a u8 array
+ * while indir->ring_index points to an array of u32.
+ *
+ * Indirection table contains the FW Client IDs, so we need to
+ * align the received table to the Client ID of the leading RSS
+ * queue
+ */
+ ind_table[i] = indir->ring_index[i] + bp->fp->cl_id;
+ }
+
+ return bnx2x_config_rss_pf(bp, ind_table, false);
+}
+
+static const struct ethtool_ops bnx2x_ethtool_ops = {
+ .get_settings = bnx2x_get_settings,
+ .set_settings = bnx2x_set_settings,
+ .get_drvinfo = bnx2x_get_drvinfo,
+ .get_regs_len = bnx2x_get_regs_len,
+ .get_regs = bnx2x_get_regs,
+ .get_wol = bnx2x_get_wol,
+ .set_wol = bnx2x_set_wol,
+ .get_msglevel = bnx2x_get_msglevel,
+ .set_msglevel = bnx2x_set_msglevel,
+ .nway_reset = bnx2x_nway_reset,
+ .get_link = bnx2x_get_link,
+ .get_eeprom_len = bnx2x_get_eeprom_len,
+ .get_eeprom = bnx2x_get_eeprom,
+ .set_eeprom = bnx2x_set_eeprom,
+ .get_coalesce = bnx2x_get_coalesce,
+ .set_coalesce = bnx2x_set_coalesce,
+ .get_ringparam = bnx2x_get_ringparam,
+ .set_ringparam = bnx2x_set_ringparam,
+ .get_pauseparam = bnx2x_get_pauseparam,
+ .set_pauseparam = bnx2x_set_pauseparam,
+ .self_test = bnx2x_self_test,
+ .get_sset_count = bnx2x_get_sset_count,
+ .get_strings = bnx2x_get_strings,
+ .set_phys_id = bnx2x_set_phys_id,
+ .get_ethtool_stats = bnx2x_get_ethtool_stats,
+ .get_rxnfc = bnx2x_get_rxnfc,
+ .get_rxfh_indir = bnx2x_get_rxfh_indir,
+ .set_rxfh_indir = bnx2x_set_rxfh_indir,
+};
+
+void bnx2x_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops);
+}
--- /dev/null
- /* Calculate and set BW for this COS*/
- const u32 cos_bw_nig = (bw * min_w_val_nig) / total_bw;
- const u32 cos_bw_pbf = (bw * min_w_val_pbf) / total_bw;
+/* Copyright 2008-2011 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Written by Yaniv Rosner
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/mutex.h>
+
+#include "bnx2x.h"
+#include "bnx2x_cmn.h"
+
+
+/********************************************************/
+#define ETH_HLEN 14
+/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
+#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
+#define ETH_MIN_PACKET_SIZE 60
+#define ETH_MAX_PACKET_SIZE 1500
+#define ETH_MAX_JUMBO_PACKET_SIZE 9600
+#define MDIO_ACCESS_TIMEOUT 1000
+#define BMAC_CONTROL_RX_ENABLE 2
+#define WC_LANE_MAX 4
+#define I2C_SWITCH_WIDTH 2
+#define I2C_BSC0 0
+#define I2C_BSC1 1
+#define I2C_WA_RETRY_CNT 3
+#define MCPR_IMC_COMMAND_READ_OP 1
+#define MCPR_IMC_COMMAND_WRITE_OP 2
+
+/***********************************************************/
+/* Shortcut definitions */
+/***********************************************************/
+
+#define NIG_LATCH_BC_ENABLE_MI_INT 0
+
+#define NIG_STATUS_EMAC0_MI_INT \
+ NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT
+#define NIG_STATUS_XGXS0_LINK10G \
+ NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G
+#define NIG_STATUS_XGXS0_LINK_STATUS \
+ NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS
+#define NIG_STATUS_XGXS0_LINK_STATUS_SIZE \
+ NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE
+#define NIG_STATUS_SERDES0_LINK_STATUS \
+ NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS
+#define NIG_MASK_MI_INT \
+ NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT
+#define NIG_MASK_XGXS0_LINK10G \
+ NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G
+#define NIG_MASK_XGXS0_LINK_STATUS \
+ NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS
+#define NIG_MASK_SERDES0_LINK_STATUS \
+ NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS
+
+#define MDIO_AN_CL73_OR_37_COMPLETE \
+ (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | \
+ MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE)
+
+#define XGXS_RESET_BITS \
+ (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW | \
+ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ | \
+ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN | \
+ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD | \
+ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB)
+
+#define SERDES_RESET_BITS \
+ (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW | \
+ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ | \
+ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN | \
+ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD)
+
+#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37
+#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73
+#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM
+#define AUTONEG_PARALLEL \
+ SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION
+#define AUTONEG_SGMII_FIBER_AUTODET \
+ SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT
+#define AUTONEG_REMOTE_PHY SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY
+
+#define GP_STATUS_PAUSE_RSOLUTION_TXSIDE \
+ MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE
+#define GP_STATUS_PAUSE_RSOLUTION_RXSIDE \
+ MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE
+#define GP_STATUS_SPEED_MASK \
+ MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK
+#define GP_STATUS_10M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M
+#define GP_STATUS_100M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M
+#define GP_STATUS_1G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G
+#define GP_STATUS_2_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G
+#define GP_STATUS_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G
+#define GP_STATUS_6G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G
+#define GP_STATUS_10G_HIG \
+ MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG
+#define GP_STATUS_10G_CX4 \
+ MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4
+#define GP_STATUS_1G_KX MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX
+#define GP_STATUS_10G_KX4 \
+ MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4
+#define GP_STATUS_10G_KR MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR
+#define GP_STATUS_10G_XFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI
+#define GP_STATUS_20G_DXGXS MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS
+#define GP_STATUS_10G_SFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI
+#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD
+#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD
+#define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD
+#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4
+#define LINK_100TXFD LINK_STATUS_SPEED_AND_DUPLEX_100TXFD
+#define LINK_1000THD LINK_STATUS_SPEED_AND_DUPLEX_1000THD
+#define LINK_1000TFD LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
+#define LINK_1000XFD LINK_STATUS_SPEED_AND_DUPLEX_1000XFD
+#define LINK_2500THD LINK_STATUS_SPEED_AND_DUPLEX_2500THD
+#define LINK_2500TFD LINK_STATUS_SPEED_AND_DUPLEX_2500TFD
+#define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD
+#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
+#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
+#define LINK_20GTFD LINK_STATUS_SPEED_AND_DUPLEX_20GTFD
+#define LINK_20GXFD LINK_STATUS_SPEED_AND_DUPLEX_20GXFD
+
+
+
+/* */
+#define SFP_EEPROM_CON_TYPE_ADDR 0x2
+ #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
+ #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
+
+
+#define SFP_EEPROM_COMP_CODE_ADDR 0x3
+ #define SFP_EEPROM_COMP_CODE_SR_MASK (1<<4)
+ #define SFP_EEPROM_COMP_CODE_LR_MASK (1<<5)
+ #define SFP_EEPROM_COMP_CODE_LRM_MASK (1<<6)
+
+#define SFP_EEPROM_FC_TX_TECH_ADDR 0x8
+ #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
+ #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8
+
+#define SFP_EEPROM_OPTIONS_ADDR 0x40
+ #define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1
+#define SFP_EEPROM_OPTIONS_SIZE 2
+
+#define EDC_MODE_LINEAR 0x0022
+#define EDC_MODE_LIMITING 0x0044
+#define EDC_MODE_PASSIVE_DAC 0x0055
+
+
+/* BRB thresholds for E2*/
+#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE 170
+#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
+
+#define PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE 250
+#define PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0
+
+#define PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE 10
+#define PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 90
+
+#define PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE 50
+#define PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE 250
+
+/* BRB thresholds for E3A0 */
+#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE 290
+#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
+
+#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE 410
+#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0
+
+#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE 10
+#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 170
+
+#define PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE 50
+#define PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE 410
+
+
+/* BRB thresholds for E3B0 2 port mode*/
+#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE 1025
+#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
+
+#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE 1025
+#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0
+
+#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE 10
+#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 1025
+
+#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE 50
+#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE 1025
+
+/* only for E3B0*/
+#define PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR 1025
+#define PFC_E3B0_2P_BRB_FULL_LB_XON_THR 1025
+
+/* Lossy +Lossless GUARANTIED == GUART */
+#define PFC_E3B0_2P_MIX_PAUSE_LB_GUART 284
+/* Lossless +Lossless*/
+#define PFC_E3B0_2P_PAUSE_LB_GUART 236
+/* Lossy +Lossy*/
+#define PFC_E3B0_2P_NON_PAUSE_LB_GUART 342
+
+/* Lossy +Lossless*/
+#define PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART 284
+/* Lossless +Lossless*/
+#define PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART 236
+/* Lossy +Lossy*/
+#define PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART 336
+#define PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST 80
+
+#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART 0
+#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST 0
+
+/* BRB thresholds for E3B0 4 port mode */
+#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE 304
+#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
+
+#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE 384
+#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0
+
+#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE 10
+#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 304
+
+#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE 50
+#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE 384
+
+
+/* only for E3B0*/
+#define PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR 304
+#define PFC_E3B0_4P_BRB_FULL_LB_XON_THR 384
+#define PFC_E3B0_4P_LB_GUART 120
+
+#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART 120
+#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST 80
+
+#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART 80
+#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST 120
+
+#define DCBX_INVALID_COS (0xFF)
+
+#define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000)
+#define ETS_BW_LIMIT_CREDIT_WEIGHT (0x5000)
+#define ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS (1360)
+#define ETS_E3B0_NIG_MIN_W_VAL_20GBPS (2720)
+#define ETS_E3B0_PBF_MIN_W_VAL (10000)
+
+#define MAX_PACKET_SIZE (9700)
+#define WC_UC_TIMEOUT 100
+
+/**********************************************************/
+/* INTERFACE */
+/**********************************************************/
+
+#define CL22_WR_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
+ bnx2x_cl45_write(_bp, _phy, \
+ (_phy)->def_md_devad, \
+ (_bank + (_addr & 0xf)), \
+ _val)
+
+#define CL22_RD_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
+ bnx2x_cl45_read(_bp, _phy, \
+ (_phy)->def_md_devad, \
+ (_bank + (_addr & 0xf)), \
+ _val)
+
+static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
+{
+ u32 val = REG_RD(bp, reg);
+
+ val |= bits;
+ REG_WR(bp, reg, val);
+ return val;
+}
+
+static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
+{
+ u32 val = REG_RD(bp, reg);
+
+ val &= ~bits;
+ REG_WR(bp, reg, val);
+ return val;
+}
+
+/******************************************************************/
+/* EPIO/GPIO section */
+/******************************************************************/
+static void bnx2x_get_epio(struct bnx2x *bp, u32 epio_pin, u32 *en)
+{
+ u32 epio_mask, gp_oenable;
+ *en = 0;
+ /* Sanity check */
+ if (epio_pin > 31) {
+ DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to get\n", epio_pin);
+ return;
+ }
+
+ epio_mask = 1 << epio_pin;
+ /* Set this EPIO to output */
+ gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE);
+ REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable & ~epio_mask);
+
+ *en = (REG_RD(bp, MCP_REG_MCPR_GP_INPUTS) & epio_mask) >> epio_pin;
+}
+static void bnx2x_set_epio(struct bnx2x *bp, u32 epio_pin, u32 en)
+{
+ u32 epio_mask, gp_output, gp_oenable;
+
+ /* Sanity check */
+ if (epio_pin > 31) {
+ DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to set\n", epio_pin);
+ return;
+ }
+ DP(NETIF_MSG_LINK, "Setting EPIO pin %d to %d\n", epio_pin, en);
+ epio_mask = 1 << epio_pin;
+ /* Set this EPIO to output */
+ gp_output = REG_RD(bp, MCP_REG_MCPR_GP_OUTPUTS);
+ if (en)
+ gp_output |= epio_mask;
+ else
+ gp_output &= ~epio_mask;
+
+ REG_WR(bp, MCP_REG_MCPR_GP_OUTPUTS, gp_output);
+
+ /* Set the value for this EPIO */
+ gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE);
+ REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable | epio_mask);
+}
+
+static void bnx2x_set_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 val)
+{
+ if (pin_cfg == PIN_CFG_NA)
+ return;
+ if (pin_cfg >= PIN_CFG_EPIO0) {
+ bnx2x_set_epio(bp, pin_cfg - PIN_CFG_EPIO0, val);
+ } else {
+ u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3;
+ u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2;
+ bnx2x_set_gpio(bp, gpio_num, (u8)val, gpio_port);
+ }
+}
+
+static u32 bnx2x_get_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 *val)
+{
+ if (pin_cfg == PIN_CFG_NA)
+ return -EINVAL;
+ if (pin_cfg >= PIN_CFG_EPIO0) {
+ bnx2x_get_epio(bp, pin_cfg - PIN_CFG_EPIO0, val);
+ } else {
+ u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3;
+ u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2;
+ *val = bnx2x_get_gpio(bp, gpio_num, gpio_port);
+ }
+ return 0;
+
+}
+/******************************************************************/
+/* ETS section */
+/******************************************************************/
+static void bnx2x_ets_e2e3a0_disabled(struct link_params *params)
+{
+ /* ETS disabled configuration*/
+ struct bnx2x *bp = params->bp;
+
+ DP(NETIF_MSG_LINK, "ETS E2E3 disabled configuration\n");
+
+ /*
+ * mapping between entry priority to client number (0,1,2 -debug and
+ * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
+ * 3bits client num.
+ * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
+ * cos1-100 cos0-011 dbg1-010 dbg0-001 MCP-000
+ */
+
+ REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688);
+ /*
+ * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+ * as strict. Bits 0,1,2 - debug and management entries, 3 -
+ * COS0 entry, 4 - COS1 entry.
+ * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
+ * bit4 bit3 bit2 bit1 bit0
+ * MCP and debug are strict
+ */
+
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
+ /* defines which entries (clients) are subjected to WFQ arbitration */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
+ /*
+ * For strict priority entries defines the number of consecutive
+ * slots for the highest priority.
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
+ /*
+ * mapping between the CREDIT_WEIGHT registers and actual client
+ * numbers
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0);
+
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, 0);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, 0);
+ REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0);
+ /* ETS mode disable */
+ REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
+ /*
+ * If ETS mode is enabled (there is no strict priority) defines a WFQ
+ * weight for COS0/COS1.
+ */
+ REG_WR(bp, PBF_REG_COS0_WEIGHT, 0x2710);
+ REG_WR(bp, PBF_REG_COS1_WEIGHT, 0x2710);
+ /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter */
+ REG_WR(bp, PBF_REG_COS0_UPPER_BOUND, 0x989680);
+ REG_WR(bp, PBF_REG_COS1_UPPER_BOUND, 0x989680);
+ /* Defines the number of consecutive slots for the strict priority */
+ REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
+}
+/******************************************************************************
+* Description:
+* Getting min_w_val will be set according to line speed .
+*.
+******************************************************************************/
+static u32 bnx2x_ets_get_min_w_val_nig(const struct link_vars *vars)
+{
+ u32 min_w_val = 0;
+ /* Calculate min_w_val.*/
+ if (vars->link_up) {
+ if (SPEED_20000 == vars->line_speed)
+ min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
+ else
+ min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS;
+ } else
+ min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
+ /**
+ * If the link isn't up (static configuration for example ) The
+ * link will be according to 20GBPS.
+ */
+ return min_w_val;
+}
+/******************************************************************************
+* Description:
+* Getting credit upper bound form min_w_val.
+*.
+******************************************************************************/
+static u32 bnx2x_ets_get_credit_upper_bound(const u32 min_w_val)
+{
+ const u32 credit_upper_bound = (u32)MAXVAL((150 * min_w_val),
+ MAX_PACKET_SIZE);
+ return credit_upper_bound;
+}
+/******************************************************************************
+* Description:
+* Set credit upper bound for NIG.
+*.
+******************************************************************************/
+static void bnx2x_ets_e3b0_set_credit_upper_bound_nig(
+ const struct link_params *params,
+ const u32 min_w_val)
+{
+ struct bnx2x *bp = params->bp;
+ const u8 port = params->port;
+ const u32 credit_upper_bound =
+ bnx2x_ets_get_credit_upper_bound(min_w_val);
+
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 :
+ NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, credit_upper_bound);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 :
+ NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, credit_upper_bound);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 :
+ NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2, credit_upper_bound);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 :
+ NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3, credit_upper_bound);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 :
+ NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4, credit_upper_bound);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 :
+ NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5, credit_upper_bound);
+
+ if (0 == port) {
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6,
+ credit_upper_bound);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7,
+ credit_upper_bound);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8,
+ credit_upper_bound);
+ }
+}
+/******************************************************************************
+* Description:
+* Will return the NIG ETS registers to init values.Except
+* credit_upper_bound.
+* That isn't used in this configuration (No WFQ is enabled) and will be
+* configured acording to spec
+*.
+******************************************************************************/
+static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
+ const struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ const u8 port = params->port;
+ const u32 min_w_val = bnx2x_ets_get_min_w_val_nig(vars);
+ /**
+ * mapping between entry priority to client number (0,1,2 -debug and
+ * management clients, 3 - COS0 client, 4 - COS1, ... 8 -
+ * COS5)(HIGHEST) 4bits client num.TODO_ETS - Should be done by
+ * reset value or init tool
+ */
+ if (port) {
+ REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB, 0x543210);
+ REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB, 0x0);
+ } else {
+ REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 0x76543210);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 0x8);
+ }
+ /**
+ * For strict priority entries defines the number of consecutive
+ * slots for the highest priority.
+ */
+ /* TODO_ETS - Should be done by reset value or init tool */
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS :
+ NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
+ /**
+ * mapping between the CREDIT_WEIGHT registers and actual client
+ * numbers
+ */
+ /* TODO_ETS - Should be done by reset value or init tool */
+ if (port) {
+ /*Port 1 has 6 COS*/
+ REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 0x210543);
+ REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x0);
+ } else {
+ /*Port 0 has 9 COS*/
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB,
+ 0x43210876);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x5);
+ }
+
+ /**
+ * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+ * as strict. Bits 0,1,2 - debug and management entries, 3 -
+ * COS0 entry, 4 - COS1 entry.
+ * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
+ * bit4 bit3 bit2 bit1 bit0
+ * MCP and debug are strict
+ */
+ if (port)
+ REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT, 0x3f);
+ else
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1ff);
+ /* defines which entries (clients) are subjected to WFQ arbitration */
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ :
+ NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
+
+ /**
+ * Please notice the register address are note continuous and a
+ * for here is note appropriate.In 2 port mode port0 only COS0-5
+ * can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4
+ * port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT
+ * are never used for WFQ
+ */
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0x0);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0x0);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2, 0x0);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3, 0x0);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4, 0x0);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5, 0x0);
+ if (0 == port) {
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6, 0x0);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7, 0x0);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8, 0x0);
+ }
+
+ bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val);
+}
+/******************************************************************************
+* Description:
+* Set credit upper bound for PBF.
+*.
+******************************************************************************/
+static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf(
+ const struct link_params *params,
+ const u32 min_w_val)
+{
+ struct bnx2x *bp = params->bp;
+ const u32 credit_upper_bound =
+ bnx2x_ets_get_credit_upper_bound(min_w_val);
+ const u8 port = params->port;
+ u32 base_upper_bound = 0;
+ u8 max_cos = 0;
+ u8 i = 0;
+ /**
+ * In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4
+ * port mode port1 has COS0-2 that can be used for WFQ.
+ */
+ if (0 == port) {
+ base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0;
+ max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
+ } else {
+ base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P1;
+ max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1;
+ }
+
+ for (i = 0; i < max_cos; i++)
+ REG_WR(bp, base_upper_bound + (i << 2), credit_upper_bound);
+}
+
+/******************************************************************************
+* Description:
+* Will return the PBF ETS registers to init values.Except
+* credit_upper_bound.
+* That isn't used in this configuration (No WFQ is enabled) and will be
+* configured acording to spec
+*.
+******************************************************************************/
+static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ const u8 port = params->port;
+ const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL;
+ u8 i = 0;
+ u32 base_weight = 0;
+ u8 max_cos = 0;
+
+ /**
+ * mapping between entry priority to client number 0 - COS0
+ * client, 2 - COS1, ... 5 - COS5)(HIGHEST) 4bits client num.
+ * TODO_ETS - Should be done by reset value or init tool
+ */
+ if (port)
+ /* 0x688 (|011|0 10|00 1|000) */
+ REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , 0x688);
+ else
+ /* (10 1|100 |011|0 10|00 1|000) */
+ REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , 0x2C688);
+
+ /* TODO_ETS - Should be done by reset value or init tool */
+ if (port)
+ /* 0x688 (|011|0 10|00 1|000)*/
+ REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1, 0x688);
+ else
+ /* 0x2C688 (10 1|100 |011|0 10|00 1|000) */
+ REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0, 0x2C688);
+
+ REG_WR(bp, (port) ? PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 :
+ PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0 , 0x100);
+
+
+ REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 :
+ PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 , 0);
+
+ REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 :
+ PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 , 0);
+ /**
+ * In 2 port mode port0 has COS0-5 that can be used for WFQ.
+ * In 4 port mode port1 has COS0-2 that can be used for WFQ.
+ */
+ if (0 == port) {
+ base_weight = PBF_REG_COS0_WEIGHT_P0;
+ max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
+ } else {
+ base_weight = PBF_REG_COS0_WEIGHT_P1;
+ max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1;
+ }
+
+ for (i = 0; i < max_cos; i++)
+ REG_WR(bp, base_weight + (0x4 * i), 0);
+
+ bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
+}
+/******************************************************************************
+* Description:
+* E3B0 disable will return basicly the values to init values.
+*.
+******************************************************************************/
+static int bnx2x_ets_e3b0_disabled(const struct link_params *params,
+ const struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+
+ if (!CHIP_IS_E3B0(bp)) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_e3b0_disabled the chip isn't E3B0\n");
+ return -EINVAL;
+ }
+
+ bnx2x_ets_e3b0_nig_disabled(params, vars);
+
+ bnx2x_ets_e3b0_pbf_disabled(params);
+
+ return 0;
+}
+
+/******************************************************************************
+* Description:
+* Disable will return basicly the values to init values.
+*.
+******************************************************************************/
+int bnx2x_ets_disabled(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ int bnx2x_status = 0;
+
+ if ((CHIP_IS_E2(bp)) || (CHIP_IS_E3A0(bp)))
+ bnx2x_ets_e2e3a0_disabled(params);
+ else if (CHIP_IS_E3B0(bp))
+ bnx2x_status = bnx2x_ets_e3b0_disabled(params, vars);
+ else {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_disabled - chip not supported\n");
+ return -EINVAL;
+ }
+
+ return bnx2x_status;
+}
+
+/******************************************************************************
+* Description
+* Set the COS mappimg to SP and BW until this point all the COS are not
+* set as SP or BW.
+******************************************************************************/
+static int bnx2x_ets_e3b0_cli_map(const struct link_params *params,
+ const struct bnx2x_ets_params *ets_params,
+ const u8 cos_sp_bitmap,
+ const u8 cos_bw_bitmap)
+{
+ struct bnx2x *bp = params->bp;
+ const u8 port = params->port;
+ const u8 nig_cli_sp_bitmap = 0x7 | (cos_sp_bitmap << 3);
+ const u8 pbf_cli_sp_bitmap = cos_sp_bitmap;
+ const u8 nig_cli_subject2wfq_bitmap = cos_bw_bitmap << 3;
+ const u8 pbf_cli_subject2wfq_bitmap = cos_bw_bitmap;
+
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT :
+ NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, nig_cli_sp_bitmap);
+
+ REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 :
+ PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 , pbf_cli_sp_bitmap);
+
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ :
+ NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
+ nig_cli_subject2wfq_bitmap);
+
+ REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 :
+ PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0,
+ pbf_cli_subject2wfq_bitmap);
+
+ return 0;
+}
+
+/******************************************************************************
+* Description:
+* This function is needed because NIG ARB_CREDIT_WEIGHT_X are
+* not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
+******************************************************************************/
+static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
+ const u8 cos_entry,
+ const u32 min_w_val_nig,
+ const u32 min_w_val_pbf,
+ const u16 total_bw,
+ const u8 bw,
+ const u8 port)
+{
+ u32 nig_reg_adress_crd_weight = 0;
+ u32 pbf_reg_adress_crd_weight = 0;
-
- if (0 == ets_params->cos[cos_idx].params.bw_params.bw) {
- DP(NETIF_MSG_LINK,
- "bnx2x_ets_E3B0_config BW was set to 0\n");
- return -EINVAL;
++ /* Calculate and set BW for this COS - use 1 instead of 0 for BW */
++ const u32 cos_bw_nig = ((bw ? bw : 1) * min_w_val_nig) / total_bw;
++ const u32 cos_bw_pbf = ((bw ? bw : 1) * min_w_val_pbf) / total_bw;
+
+ switch (cos_entry) {
+ case 0:
+ nig_reg_adress_crd_weight =
+ (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0;
+ pbf_reg_adress_crd_weight = (port) ?
+ PBF_REG_COS0_WEIGHT_P1 : PBF_REG_COS0_WEIGHT_P0;
+ break;
+ case 1:
+ nig_reg_adress_crd_weight = (port) ?
+ NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1;
+ pbf_reg_adress_crd_weight = (port) ?
+ PBF_REG_COS1_WEIGHT_P1 : PBF_REG_COS1_WEIGHT_P0;
+ break;
+ case 2:
+ nig_reg_adress_crd_weight = (port) ?
+ NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2;
+
+ pbf_reg_adress_crd_weight = (port) ?
+ PBF_REG_COS2_WEIGHT_P1 : PBF_REG_COS2_WEIGHT_P0;
+ break;
+ case 3:
+ if (port)
+ return -EINVAL;
+ nig_reg_adress_crd_weight =
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3;
+ pbf_reg_adress_crd_weight =
+ PBF_REG_COS3_WEIGHT_P0;
+ break;
+ case 4:
+ if (port)
+ return -EINVAL;
+ nig_reg_adress_crd_weight =
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4;
+ pbf_reg_adress_crd_weight = PBF_REG_COS4_WEIGHT_P0;
+ break;
+ case 5:
+ if (port)
+ return -EINVAL;
+ nig_reg_adress_crd_weight =
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5;
+ pbf_reg_adress_crd_weight = PBF_REG_COS5_WEIGHT_P0;
+ break;
+ }
+
+ REG_WR(bp, nig_reg_adress_crd_weight, cos_bw_nig);
+
+ REG_WR(bp, pbf_reg_adress_crd_weight, cos_bw_pbf);
+
+ return 0;
+}
+/******************************************************************************
+* Description:
+* Calculate the total BW.A value of 0 isn't legal.
+*.
+******************************************************************************/
+static int bnx2x_ets_e3b0_get_total_bw(
+ const struct link_params *params,
+ const struct bnx2x_ets_params *ets_params,
+ u16 *total_bw)
+{
+ struct bnx2x *bp = params->bp;
+ u8 cos_idx = 0;
+
+ *total_bw = 0 ;
+ /* Calculate total BW requested */
+ for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
+ if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) {
- *total_bw +=
- ets_params->cos[cos_idx].params.bw_params.bw;
- }
++ *total_bw +=
++ ets_params->cos[cos_idx].params.bw_params.bw;
+ }
- /*Check taotl BW is valid */
+ }
+
- val |= XMAC_CTRL_REG_CORE_LOCAL_LPBK;
++ /* Check total BW is valid */
+ if ((100 != *total_bw) || (0 == *total_bw)) {
+ if (0 == *total_bw) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_E3B0_config toatl BW shouldn't be 0\n");
+ return -EINVAL;
+ }
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_E3B0_config toatl BW should be 100\n");
+ /**
+ * We can handle a case whre the BW isn't 100 this can happen
+ * if the TC are joined.
+ */
+ }
+ return 0;
+}
+
+/******************************************************************************
+* Description:
+* Invalidate all the sp_pri_to_cos.
+*.
+******************************************************************************/
+static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos)
+{
+ u8 pri = 0;
+ for (pri = 0; pri < DCBX_MAX_NUM_COS; pri++)
+ sp_pri_to_cos[pri] = DCBX_INVALID_COS;
+}
+/******************************************************************************
+* Description:
+* Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
+* according to sp_pri_to_cos.
+*.
+******************************************************************************/
+static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
+ u8 *sp_pri_to_cos, const u8 pri,
+ const u8 cos_entry)
+{
+ struct bnx2x *bp = params->bp;
+ const u8 port = params->port;
+ const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
+ DCBX_E3B0_MAX_NUM_COS_PORT0;
+
+ if (DCBX_INVALID_COS != sp_pri_to_cos[pri]) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
+ "parameter There can't be two COS's with "
+ "the same strict pri\n");
+ return -EINVAL;
+ }
+
+ if (pri > max_num_of_cos) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
+ "parameter Illegal strict priority\n");
+ return -EINVAL;
+ }
+
+ sp_pri_to_cos[pri] = cos_entry;
+ return 0;
+
+}
+
+/******************************************************************************
+* Description:
+* Returns the correct value according to COS and priority in
+* the sp_pri_cli register.
+*.
+******************************************************************************/
+static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset,
+ const u8 pri_set,
+ const u8 pri_offset,
+ const u8 entry_size)
+{
+ u64 pri_cli_nig = 0;
+ pri_cli_nig = ((u64)(cos + cos_offset)) << (entry_size *
+ (pri_set + pri_offset));
+
+ return pri_cli_nig;
+}
+/******************************************************************************
+* Description:
+* Returns the correct value according to COS and priority in the
+* sp_pri_cli register for NIG.
+*.
+******************************************************************************/
+static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set)
+{
+ /* MCP Dbg0 and dbg1 are always with higher strict pri*/
+ const u8 nig_cos_offset = 3;
+ const u8 nig_pri_offset = 3;
+
+ return bnx2x_e3b0_sp_get_pri_cli_reg(cos, nig_cos_offset, pri_set,
+ nig_pri_offset, 4);
+
+}
+/******************************************************************************
+* Description:
+* Returns the correct value according to COS and priority in the
+* sp_pri_cli register for PBF.
+*.
+******************************************************************************/
+static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set)
+{
+ const u8 pbf_cos_offset = 0;
+ const u8 pbf_pri_offset = 0;
+
+ return bnx2x_e3b0_sp_get_pri_cli_reg(cos, pbf_cos_offset, pri_set,
+ pbf_pri_offset, 3);
+
+}
+
+/******************************************************************************
+* Description:
+* Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
+* according to sp_pri_to_cos.(which COS has higher priority)
+*.
+******************************************************************************/
+static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
+ u8 *sp_pri_to_cos)
+{
+ struct bnx2x *bp = params->bp;
+ u8 i = 0;
+ const u8 port = params->port;
+ /* MCP Dbg0 and dbg1 are always with higher strict pri*/
+ u64 pri_cli_nig = 0x210;
+ u32 pri_cli_pbf = 0x0;
+ u8 pri_set = 0;
+ u8 pri_bitmask = 0;
+ const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
+ DCBX_E3B0_MAX_NUM_COS_PORT0;
+
+ u8 cos_bit_to_set = (1 << max_num_of_cos) - 1;
+
+ /* Set all the strict priority first */
+ for (i = 0; i < max_num_of_cos; i++) {
+ if (DCBX_INVALID_COS != sp_pri_to_cos[i]) {
+ if (DCBX_MAX_NUM_COS <= sp_pri_to_cos[i]) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_e3b0_sp_set_pri_cli_reg "
+ "invalid cos entry\n");
+ return -EINVAL;
+ }
+
+ pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig(
+ sp_pri_to_cos[i], pri_set);
+
+ pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf(
+ sp_pri_to_cos[i], pri_set);
+ pri_bitmask = 1 << sp_pri_to_cos[i];
+ /* COS is used remove it from bitmap.*/
+ if (0 == (pri_bitmask & cos_bit_to_set)) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_e3b0_sp_set_pri_cli_reg "
+ "invalid There can't be two COS's with"
+ " the same strict pri\n");
+ return -EINVAL;
+ }
+ cos_bit_to_set &= ~pri_bitmask;
+ pri_set++;
+ }
+ }
+
+ /* Set all the Non strict priority i= COS*/
+ for (i = 0; i < max_num_of_cos; i++) {
+ pri_bitmask = 1 << i;
+ /* Check if COS was already used for SP */
+ if (pri_bitmask & cos_bit_to_set) {
+ /* COS wasn't used for SP */
+ pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig(
+ i, pri_set);
+
+ pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf(
+ i, pri_set);
+ /* COS is used remove it from bitmap.*/
+ cos_bit_to_set &= ~pri_bitmask;
+ pri_set++;
+ }
+ }
+
+ if (pri_set != max_num_of_cos) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_set_pri_cli_reg not all "
+ "entries were set\n");
+ return -EINVAL;
+ }
+
+ if (port) {
+ /* Only 6 usable clients*/
+ REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB,
+ (u32)pri_cli_nig);
+
+ REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , pri_cli_pbf);
+ } else {
+ /* Only 9 usable clients*/
+ const u32 pri_cli_nig_lsb = (u32) (pri_cli_nig);
+ const u32 pri_cli_nig_msb = (u32) ((pri_cli_nig >> 32) & 0xF);
+
+ REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB,
+ pri_cli_nig_lsb);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB,
+ pri_cli_nig_msb);
+
+ REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , pri_cli_pbf);
+ }
+ return 0;
+}
+
+/******************************************************************************
+* Description:
+* Configure the COS to ETS according to BW and SP settings.
+******************************************************************************/
+int bnx2x_ets_e3b0_config(const struct link_params *params,
+ const struct link_vars *vars,
+ const struct bnx2x_ets_params *ets_params)
+{
+ struct bnx2x *bp = params->bp;
+ int bnx2x_status = 0;
+ const u8 port = params->port;
+ u16 total_bw = 0;
+ const u32 min_w_val_nig = bnx2x_ets_get_min_w_val_nig(vars);
+ const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL;
+ u8 cos_bw_bitmap = 0;
+ u8 cos_sp_bitmap = 0;
+ u8 sp_pri_to_cos[DCBX_MAX_NUM_COS] = {0};
+ const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
+ DCBX_E3B0_MAX_NUM_COS_PORT0;
+ u8 cos_entry = 0;
+
+ if (!CHIP_IS_E3B0(bp)) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_e3b0_disabled the chip isn't E3B0\n");
+ return -EINVAL;
+ }
+
+ if ((ets_params->num_of_cos > max_num_of_cos)) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config the number of COS "
+ "isn't supported\n");
+ return -EINVAL;
+ }
+
+ /* Prepare sp strict priority parameters*/
+ bnx2x_ets_e3b0_sp_pri_to_cos_init(sp_pri_to_cos);
+
+ /* Prepare BW parameters*/
+ bnx2x_status = bnx2x_ets_e3b0_get_total_bw(params, ets_params,
+ &total_bw);
+ if (0 != bnx2x_status) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_E3B0_config get_total_bw failed\n");
+ return -EINVAL;
+ }
+
+ /**
+ * Upper bound is set according to current link speed (min_w_val
+ * should be the same for upper bound and COS credit val).
+ */
+ bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig);
+ bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
+
+
+ for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) {
+ if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) {
+ cos_bw_bitmap |= (1 << cos_entry);
+ /**
+ * The function also sets the BW in HW(not the mappin
+ * yet)
+ */
+ bnx2x_status = bnx2x_ets_e3b0_set_cos_bw(
+ bp, cos_entry, min_w_val_nig, min_w_val_pbf,
+ total_bw,
+ ets_params->cos[cos_entry].params.bw_params.bw,
+ port);
+ } else if (bnx2x_cos_state_strict ==
+ ets_params->cos[cos_entry].state){
+ cos_sp_bitmap |= (1 << cos_entry);
+
+ bnx2x_status = bnx2x_ets_e3b0_sp_pri_to_cos_set(
+ params,
+ sp_pri_to_cos,
+ ets_params->cos[cos_entry].params.sp_params.pri,
+ cos_entry);
+
+ } else {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_e3b0_config cos state not valid\n");
+ return -EINVAL;
+ }
+ if (0 != bnx2x_status) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_e3b0_config set cos bw failed\n");
+ return bnx2x_status;
+ }
+ }
+
+ /* Set SP register (which COS has higher priority) */
+ bnx2x_status = bnx2x_ets_e3b0_sp_set_pri_cli_reg(params,
+ sp_pri_to_cos);
+
+ if (0 != bnx2x_status) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_E3B0_config set_pri_cli_reg failed\n");
+ return bnx2x_status;
+ }
+
+ /* Set client mapping of BW and strict */
+ bnx2x_status = bnx2x_ets_e3b0_cli_map(params, ets_params,
+ cos_sp_bitmap,
+ cos_bw_bitmap);
+
+ if (0 != bnx2x_status) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config SP failed\n");
+ return bnx2x_status;
+ }
+ return 0;
+}
+static void bnx2x_ets_bw_limit_common(const struct link_params *params)
+{
+ /* ETS disabled configuration */
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
+ /*
+ * defines which entries (clients) are subjected to WFQ arbitration
+ * COS0 0x8
+ * COS1 0x10
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18);
+ /*
+ * mapping between the ARB_CREDIT_WEIGHT registers and actual
+ * client numbers (WEIGHT_0 does not actually have to represent
+ * client 0)
+ * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
+ * cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A);
+
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0,
+ ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1,
+ ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
+
+ /* ETS mode enabled*/
+ REG_WR(bp, PBF_REG_ETS_ENABLED, 1);
+
+ /* Defines the number of consecutive slots for the strict priority */
+ REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
+ /*
+ * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+ * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0
+ * entry, 4 - COS1 entry.
+ * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
+ * bit4 bit3 bit2 bit1 bit0
+ * MCP and debug are strict
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
+
+ /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/
+ REG_WR(bp, PBF_REG_COS0_UPPER_BOUND,
+ ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
+ REG_WR(bp, PBF_REG_COS1_UPPER_BOUND,
+ ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
+}
+
+void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
+ const u32 cos1_bw)
+{
+ /* ETS disabled configuration*/
+ struct bnx2x *bp = params->bp;
+ const u32 total_bw = cos0_bw + cos1_bw;
+ u32 cos0_credit_weight = 0;
+ u32 cos1_credit_weight = 0;
+
+ DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
+
+ if ((0 == total_bw) ||
+ (0 == cos0_bw) ||
+ (0 == cos1_bw)) {
+ DP(NETIF_MSG_LINK, "Total BW can't be zero\n");
+ return;
+ }
+
+ cos0_credit_weight = (cos0_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/
+ total_bw;
+ cos1_credit_weight = (cos1_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/
+ total_bw;
+
+ bnx2x_ets_bw_limit_common(params);
+
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, cos0_credit_weight);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, cos1_credit_weight);
+
+ REG_WR(bp, PBF_REG_COS0_WEIGHT, cos0_credit_weight);
+ REG_WR(bp, PBF_REG_COS1_WEIGHT, cos1_credit_weight);
+}
+
+int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
+{
+ /* ETS disabled configuration*/
+ struct bnx2x *bp = params->bp;
+ u32 val = 0;
+
+ DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n");
+ /*
+ * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+ * as strict. Bits 0,1,2 - debug and management entries,
+ * 3 - COS0 entry, 4 - COS1 entry.
+ * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
+ * bit4 bit3 bit2 bit1 bit0
+ * MCP and debug are strict
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F);
+ /*
+ * For strict priority entries defines the number of consecutive slots
+ * for the highest priority.
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
+ /* ETS mode disable */
+ REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
+ /* Defines the number of consecutive slots for the strict priority */
+ REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0x100);
+
+ /* Defines the number of consecutive slots for the strict priority */
+ REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos);
+
+ /*
+ * mapping between entry priority to client number (0,1,2 -debug and
+ * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
+ * 3bits client num.
+ * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
+ * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000
+ * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000
+ */
+ val = (0 == strict_cos) ? 0x2318 : 0x22E0;
+ REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val);
+
+ return 0;
+}
+/******************************************************************/
+/* PFC section */
+/******************************************************************/
+
+static void bnx2x_update_pfc_xmac(struct link_params *params,
+ struct link_vars *vars,
+ u8 is_lb)
+{
+ struct bnx2x *bp = params->bp;
+ u32 xmac_base;
+ u32 pause_val, pfc0_val, pfc1_val;
+
+ /* XMAC base adrr */
+ xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+
+ /* Initialize pause and pfc registers */
+ pause_val = 0x18000;
+ pfc0_val = 0xFFFF8000;
+ pfc1_val = 0x2;
+
+ /* No PFC support */
+ if (!(params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED)) {
+
+ /*
+ * RX flow control - Process pause frame in receive direction
+ */
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
+ pause_val |= XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN;
+
+ /*
+ * TX flow control - Send pause packet when buffer is full
+ */
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ pause_val |= XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN;
+ } else {/* PFC support */
+ pfc1_val |= XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN |
+ XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN |
+ XMAC_PFC_CTRL_HI_REG_RX_PFC_EN |
+ XMAC_PFC_CTRL_HI_REG_TX_PFC_EN;
+ }
+
+ /* Write pause and PFC registers */
+ REG_WR(bp, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val);
+ REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val);
+ REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val);
+
+
+ /* Set MAC address for source TX Pause/PFC frames */
+ REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_LO,
+ ((params->mac_addr[2] << 24) |
+ (params->mac_addr[3] << 16) |
+ (params->mac_addr[4] << 8) |
+ (params->mac_addr[5])));
+ REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_HI,
+ ((params->mac_addr[0] << 8) |
+ (params->mac_addr[1])));
+
+ udelay(30);
+}
+
+
+static void bnx2x_emac_get_pfc_stat(struct link_params *params,
+ u32 pfc_frames_sent[2],
+ u32 pfc_frames_received[2])
+{
+ /* Read pfc statistic */
+ struct bnx2x *bp = params->bp;
+ u32 emac_base = params->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ u32 val_xon = 0;
+ u32 val_xoff = 0;
+
+ DP(NETIF_MSG_LINK, "pfc statistic read from EMAC\n");
+
+ /* PFC received frames */
+ val_xoff = REG_RD(bp, emac_base +
+ EMAC_REG_RX_PFC_STATS_XOFF_RCVD);
+ val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT;
+ val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_RCVD);
+ val_xon &= EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT;
+
+ pfc_frames_received[0] = val_xon + val_xoff;
+
+ /* PFC received sent */
+ val_xoff = REG_RD(bp, emac_base +
+ EMAC_REG_RX_PFC_STATS_XOFF_SENT);
+ val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT;
+ val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_SENT);
+ val_xon &= EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT;
+
+ pfc_frames_sent[0] = val_xon + val_xoff;
+}
+
+/* Read pfc statistic*/
+void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
+ u32 pfc_frames_sent[2],
+ u32 pfc_frames_received[2])
+{
+ /* Read pfc statistic */
+ struct bnx2x *bp = params->bp;
+
+ DP(NETIF_MSG_LINK, "pfc statistic\n");
+
+ if (!vars->link_up)
+ return;
+
+ if (MAC_TYPE_EMAC == vars->mac_type) {
+ DP(NETIF_MSG_LINK, "About to read PFC stats from EMAC\n");
+ bnx2x_emac_get_pfc_stat(params, pfc_frames_sent,
+ pfc_frames_received);
+ }
+}
+/******************************************************************/
+/* MAC/PBF section */
+/******************************************************************/
+static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port)
+{
+ u32 mode, emac_base;
+ /**
+ * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
+ * (a value of 49==0x31) and make sure that the AUTO poll is off
+ */
+
+ if (CHIP_IS_E2(bp))
+ emac_base = GRCBASE_EMAC0;
+ else
+ emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ mode = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
+ mode &= ~(EMAC_MDIO_MODE_AUTO_POLL |
+ EMAC_MDIO_MODE_CLOCK_CNT);
+ if (USES_WARPCORE(bp))
+ mode |= (74L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
+ else
+ mode |= (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
+
+ mode |= (EMAC_MDIO_MODE_CLAUSE_45);
+ REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE, mode);
+
+ udelay(40);
+}
+
+static void bnx2x_emac_init(struct link_params *params,
+ struct link_vars *vars)
+{
+ /* reset and unreset the emac core */
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ u32 val;
+ u16 timeout;
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
+ udelay(5);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+ (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
+
+ /* init emac - use read-modify-write */
+ /* self clear reset */
+ val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
+ EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
+
+ timeout = 200;
+ do {
+ val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
+ DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
+ if (!timeout) {
+ DP(NETIF_MSG_LINK, "EMAC timeout!\n");
+ return;
+ }
+ timeout--;
+ } while (val & EMAC_MODE_RESET);
+ bnx2x_set_mdio_clk(bp, params->chip_id, port);
+ /* Set mac address */
+ val = ((params->mac_addr[0] << 8) |
+ params->mac_addr[1]);
+ EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH, val);
+
+ val = ((params->mac_addr[2] << 24) |
+ (params->mac_addr[3] << 16) |
+ (params->mac_addr[4] << 8) |
+ params->mac_addr[5]);
+ EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + 4, val);
+}
+
+static void bnx2x_set_xumac_nig(struct link_params *params,
+ u16 tx_pause_en,
+ u8 enable)
+{
+ struct bnx2x *bp = params->bp;
+
+ REG_WR(bp, params->port ? NIG_REG_P1_MAC_IN_EN : NIG_REG_P0_MAC_IN_EN,
+ enable);
+ REG_WR(bp, params->port ? NIG_REG_P1_MAC_OUT_EN : NIG_REG_P0_MAC_OUT_EN,
+ enable);
+ REG_WR(bp, params->port ? NIG_REG_P1_MAC_PAUSE_OUT_EN :
+ NIG_REG_P0_MAC_PAUSE_OUT_EN, tx_pause_en);
+}
+
+static void bnx2x_umac_enable(struct link_params *params,
+ struct link_vars *vars, u8 lb)
+{
+ u32 val;
+ u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
+ struct bnx2x *bp = params->bp;
+ /* Reset UMAC */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
+ usleep_range(1000, 1000);
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+ (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
+
+ DP(NETIF_MSG_LINK, "enabling UMAC\n");
+
+ /**
+ * This register determines on which events the MAC will assert
+ * error on the i/f to the NIG along w/ EOP.
+ */
+
+ /**
+ * BD REG_WR(bp, NIG_REG_P0_MAC_RSV_ERR_MASK +
+ * params->port*0x14, 0xfffff.
+ */
+ /* This register opens the gate for the UMAC despite its name */
+ REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1);
+
+ val = UMAC_COMMAND_CONFIG_REG_PROMIS_EN |
+ UMAC_COMMAND_CONFIG_REG_PAD_EN |
+ UMAC_COMMAND_CONFIG_REG_SW_RESET |
+ UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK;
+ switch (vars->line_speed) {
+ case SPEED_10:
+ val |= (0<<2);
+ break;
+ case SPEED_100:
+ val |= (1<<2);
+ break;
+ case SPEED_1000:
+ val |= (2<<2);
+ break;
+ case SPEED_2500:
+ val |= (3<<2);
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Invalid speed for UMAC %d\n",
+ vars->line_speed);
+ break;
+ }
+ if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+ val |= UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE;
+
+ if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
+ val |= UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE;
+
+ REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
+ udelay(50);
+
+ /* Set MAC address for source TX Pause/PFC frames (under SW reset) */
+ REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR0,
+ ((params->mac_addr[2] << 24) |
+ (params->mac_addr[3] << 16) |
+ (params->mac_addr[4] << 8) |
+ (params->mac_addr[5])));
+ REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR1,
+ ((params->mac_addr[0] << 8) |
+ (params->mac_addr[1])));
+
+ /* Enable RX and TX */
+ val &= ~UMAC_COMMAND_CONFIG_REG_PAD_EN;
+ val |= UMAC_COMMAND_CONFIG_REG_TX_ENA |
+ UMAC_COMMAND_CONFIG_REG_RX_ENA;
+ REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
+ udelay(50);
+
+ /* Remove SW Reset */
+ val &= ~UMAC_COMMAND_CONFIG_REG_SW_RESET;
+
+ /* Check loopback mode */
+ if (lb)
+ val |= UMAC_COMMAND_CONFIG_REG_LOOP_ENA;
+ REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
+
+ /*
+ * Maximum Frame Length (RW). Defines a 14-Bit maximum frame
+ * length used by the MAC receive logic to check frames.
+ */
+ REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710);
+ bnx2x_set_xumac_nig(params,
+ ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1);
+ vars->mac_type = MAC_TYPE_UMAC;
+
+}
+
+static u8 bnx2x_is_4_port_mode(struct bnx2x *bp)
+{
+ u32 port4mode_ovwr_val;
+ /* Check 4-port override enabled */
+ port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
+ if (port4mode_ovwr_val & (1<<0)) {
+ /* Return 4-port mode override value */
+ return ((port4mode_ovwr_val & (1<<1)) == (1<<1));
+ }
+ /* Return 4-port mode from input pin */
+ return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN);
+}
+
+/* Define the XMAC mode */
+static void bnx2x_xmac_init(struct bnx2x *bp, u32 max_speed)
+{
+ u32 is_port4mode = bnx2x_is_4_port_mode(bp);
+
+ /**
+ * In 4-port mode, need to set the mode only once, so if XMAC is
+ * already out of reset, it means the mode has already been set,
+ * and it must not* reset the XMAC again, since it controls both
+ * ports of the path
+ **/
+
+ if (is_port4mode && (REG_RD(bp, MISC_REG_RESET_REG_2) &
+ MISC_REGISTERS_RESET_REG_2_XMAC)) {
+ DP(NETIF_MSG_LINK,
+ "XMAC already out of reset in 4-port mode\n");
+ return;
+ }
+
+ /* Hard reset */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ MISC_REGISTERS_RESET_REG_2_XMAC);
+ usleep_range(1000, 1000);
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+ MISC_REGISTERS_RESET_REG_2_XMAC);
+ if (is_port4mode) {
+ DP(NETIF_MSG_LINK, "Init XMAC to 2 ports x 10G per path\n");
+
+ /* Set the number of ports on the system side to up to 2 */
+ REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 1);
+
+ /* Set the number of ports on the Warp Core to 10G */
+ REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3);
+ } else {
+ /* Set the number of ports on the system side to 1 */
+ REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 0);
+ if (max_speed == SPEED_10000) {
+ DP(NETIF_MSG_LINK,
+ "Init XMAC to 10G x 1 port per path\n");
+ /* Set the number of ports on the Warp Core to 10G */
+ REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3);
+ } else {
+ DP(NETIF_MSG_LINK,
+ "Init XMAC to 20G x 2 ports per path\n");
+ /* Set the number of ports on the Warp Core to 20G */
+ REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 1);
+ }
+ }
+ /* Soft reset */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
+ usleep_range(1000, 1000);
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+ MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
+
+}
+
+static void bnx2x_xmac_disable(struct link_params *params)
+{
+ u8 port = params->port;
+ struct bnx2x *bp = params->bp;
+ u32 pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+
+ if (REG_RD(bp, MISC_REG_RESET_REG_2) &
+ MISC_REGISTERS_RESET_REG_2_XMAC) {
+ /*
+ * Send an indication to change the state in the NIG back to XON
+ * Clearing this bit enables the next set of this bit to get
+ * rising edge
+ */
+ pfc_ctrl = REG_RD(bp, xmac_base + XMAC_REG_PFC_CTRL_HI);
+ REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI,
+ (pfc_ctrl & ~(1<<1)));
+ REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI,
+ (pfc_ctrl | (1<<1)));
+ DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port);
+ REG_WR(bp, xmac_base + XMAC_REG_CTRL, 0);
+ usleep_range(1000, 1000);
+ bnx2x_set_xumac_nig(params, 0, 0);
+ REG_WR(bp, xmac_base + XMAC_REG_CTRL,
+ XMAC_CTRL_REG_SOFT_RESET);
+ }
+}
+
+static int bnx2x_xmac_enable(struct link_params *params,
+ struct link_vars *vars, u8 lb)
+{
+ u32 val, xmac_base;
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "enabling XMAC\n");
+
+ xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+
+ bnx2x_xmac_init(bp, vars->line_speed);
+
+ /*
+ * This register determines on which events the MAC will assert
+ * error on the i/f to the NIG along w/ EOP.
+ */
+
+ /*
+ * This register tells the NIG whether to send traffic to UMAC
+ * or XMAC
+ */
+ REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0);
+
+ /* Set Max packet size */
+ REG_WR(bp, xmac_base + XMAC_REG_RX_MAX_SIZE, 0x2710);
+
+ /* CRC append for Tx packets */
+ REG_WR(bp, xmac_base + XMAC_REG_TX_CTRL, 0xC800);
+
+ /* update PFC */
+ bnx2x_update_pfc_xmac(params, vars, 0);
+
+ /* Enable TX and RX */
+ val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN;
+
+ /* Check loopback mode */
+ if (lb)
- * LED blink andsetting rate in ON mode.
++ val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK;
+ REG_WR(bp, xmac_base + XMAC_REG_CTRL, val);
+ bnx2x_set_xumac_nig(params,
+ ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1);
+
+ vars->mac_type = MAC_TYPE_XMAC;
+
+ return 0;
+}
+static int bnx2x_emac_enable(struct link_params *params,
+ struct link_vars *vars, u8 lb)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ u32 val;
+
+ DP(NETIF_MSG_LINK, "enabling EMAC\n");
+
+ /* Disable BMAC */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+
+ /* enable emac and not bmac */
+ REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
+
+ /* ASIC */
+ if (vars->phy_flags & PHY_XGXS_FLAG) {
+ u32 ser_lane = ((params->lane_config &
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+
+ DP(NETIF_MSG_LINK, "XGXS\n");
+ /* select the master lanes (out of 0-3) */
+ REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, ser_lane);
+ /* select XGXS */
+ REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
+
+ } else { /* SerDes */
+ DP(NETIF_MSG_LINK, "SerDes\n");
+ /* select SerDes */
+ REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
+ }
+
+ bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
+ EMAC_RX_MODE_RESET);
+ bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
+ EMAC_TX_MODE_RESET);
+
+ if (CHIP_REV_IS_SLOW(bp)) {
+ /* config GMII mode */
+ val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
+ EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
+ } else { /* ASIC */
+ /* pause enable/disable */
+ bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
+ EMAC_RX_MODE_FLOW_EN);
+
+ bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
+ (EMAC_TX_MODE_EXT_PAUSE_EN |
+ EMAC_TX_MODE_FLOW_EN));
+ if (!(params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED)) {
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
+ bnx2x_bits_en(bp, emac_base +
+ EMAC_REG_EMAC_RX_MODE,
+ EMAC_RX_MODE_FLOW_EN);
+
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ bnx2x_bits_en(bp, emac_base +
+ EMAC_REG_EMAC_TX_MODE,
+ (EMAC_TX_MODE_EXT_PAUSE_EN |
+ EMAC_TX_MODE_FLOW_EN));
+ } else
+ bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
+ EMAC_TX_MODE_FLOW_EN);
+ }
+
+ /* KEEP_VLAN_TAG, promiscuous */
+ val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
+ val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
+
+ /*
+ * Setting this bit causes MAC control frames (except for pause
+ * frames) to be passed on for processing. This setting has no
+ * affect on the operation of the pause frames. This bit effects
+ * all packets regardless of RX Parser packet sorting logic.
+ * Turn the PFC off to make sure we are in Xon state before
+ * enabling it.
+ */
+ EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0);
+ if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
+ DP(NETIF_MSG_LINK, "PFC is enabled\n");
+ /* Enable PFC again */
+ EMAC_WR(bp, EMAC_REG_RX_PFC_MODE,
+ EMAC_REG_RX_PFC_MODE_RX_EN |
+ EMAC_REG_RX_PFC_MODE_TX_EN |
+ EMAC_REG_RX_PFC_MODE_PRIORITIES);
+
+ EMAC_WR(bp, EMAC_REG_RX_PFC_PARAM,
+ ((0x0101 <<
+ EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT) |
+ (0x00ff <<
+ EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT)));
+ val |= EMAC_RX_MODE_KEEP_MAC_CONTROL;
+ }
+ EMAC_WR(bp, EMAC_REG_EMAC_RX_MODE, val);
+
+ /* Set Loopback */
+ val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
+ if (lb)
+ val |= 0x810;
+ else
+ val &= ~0x810;
+ EMAC_WR(bp, EMAC_REG_EMAC_MODE, val);
+
+ /* enable emac */
+ REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1);
+
+ /* enable emac for jumbo packets */
+ EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE,
+ (EMAC_RX_MTU_SIZE_JUMBO_ENA |
+ (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)));
+
+ /* strip CRC */
+ REG_WR(bp, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
+
+ /* disable the NIG in/out to the bmac */
+ REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x0);
+ REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
+ REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
+
+ /* enable the NIG in/out to the emac */
+ REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1);
+ val = 0;
+ if ((params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED) ||
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+ val = 1;
+
+ REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
+ REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
+
+ REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
+
+ vars->mac_type = MAC_TYPE_EMAC;
+ return 0;
+}
+
+static void bnx2x_update_pfc_bmac1(struct link_params *params,
+ struct link_vars *vars)
+{
+ u32 wb_data[2];
+ struct bnx2x *bp = params->bp;
+ u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM;
+
+ u32 val = 0x14;
+ if ((!(params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED)) &&
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
+ /* Enable BigMAC to react on received Pause packets */
+ val |= (1<<5);
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2);
+
+ /* tx control */
+ val = 0xc0;
+ if (!(params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED) &&
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+ val |= 0x800000;
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_data, 2);
+}
+
+static void bnx2x_update_pfc_bmac2(struct link_params *params,
+ struct link_vars *vars,
+ u8 is_lb)
+{
+ /*
+ * Set rx control: Strip CRC and enable BigMAC to relay
+ * control packets to the system as well
+ */
+ u32 wb_data[2];
+ struct bnx2x *bp = params->bp;
+ u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM;
+ u32 val = 0x14;
+
+ if ((!(params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED)) &&
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
+ /* Enable BigMAC to react on received Pause packets */
+ val |= (1<<5);
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, wb_data, 2);
+ udelay(30);
+
+ /* Tx control */
+ val = 0xc0;
+ if (!(params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED) &&
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+ val |= 0x800000;
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_CONTROL, wb_data, 2);
+
+ if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
+ DP(NETIF_MSG_LINK, "PFC is enabled\n");
+ /* Enable PFC RX & TX & STATS and set 8 COS */
+ wb_data[0] = 0x0;
+ wb_data[0] |= (1<<0); /* RX */
+ wb_data[0] |= (1<<1); /* TX */
+ wb_data[0] |= (1<<2); /* Force initial Xon */
+ wb_data[0] |= (1<<3); /* 8 cos */
+ wb_data[0] |= (1<<5); /* STATS */
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL,
+ wb_data, 2);
+ /* Clear the force Xon */
+ wb_data[0] &= ~(1<<2);
+ } else {
+ DP(NETIF_MSG_LINK, "PFC is disabled\n");
+ /* disable PFC RX & TX & STATS and set 8 COS */
+ wb_data[0] = 0x8;
+ wb_data[1] = 0;
+ }
+
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2);
+
+ /*
+ * Set Time (based unit is 512 bit time) between automatic
+ * re-sending of PP packets amd enable automatic re-send of
+ * Per-Priroity Packet as long as pp_gen is asserted and
+ * pp_disable is low.
+ */
+ val = 0x8000;
+ if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
+ val |= (1<<16); /* enable automatic re-send */
+
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL,
+ wb_data, 2);
+
+ /* mac control */
+ val = 0x3; /* Enable RX and TX */
+ if (is_lb) {
+ val |= 0x4; /* Local loopback */
+ DP(NETIF_MSG_LINK, "enable bmac loopback\n");
+ }
+ /* When PFC enabled, Pass pause frames towards the NIG. */
+ if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
+ val |= ((1<<6)|(1<<5));
+
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
+}
+
+
+/* PFC BRB internal port configuration params */
+struct bnx2x_pfc_brb_threshold_val {
+ u32 pause_xoff;
+ u32 pause_xon;
+ u32 full_xoff;
+ u32 full_xon;
+};
+
+struct bnx2x_pfc_brb_e3b0_val {
+ u32 full_lb_xoff_th;
+ u32 full_lb_xon_threshold;
+ u32 lb_guarantied;
+ u32 mac_0_class_t_guarantied;
+ u32 mac_0_class_t_guarantied_hyst;
+ u32 mac_1_class_t_guarantied;
+ u32 mac_1_class_t_guarantied_hyst;
+};
+
+struct bnx2x_pfc_brb_th_val {
+ struct bnx2x_pfc_brb_threshold_val pauseable_th;
+ struct bnx2x_pfc_brb_threshold_val non_pauseable_th;
+};
+static int bnx2x_pfc_brb_get_config_params(
+ struct link_params *params,
+ struct bnx2x_pfc_brb_th_val *config_val)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "Setting PFC BRB configuration\n");
+ if (CHIP_IS_E2(bp)) {
+ config_val->pauseable_th.pause_xoff =
+ PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+ config_val->pauseable_th.pause_xon =
+ PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE;
+ config_val->pauseable_th.full_xoff =
+ PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
+ config_val->pauseable_th.full_xon =
+ PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
+ /* non pause able*/
+ config_val->non_pauseable_th.pause_xoff =
+ PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+ config_val->non_pauseable_th.pause_xon =
+ PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+ config_val->non_pauseable_th.full_xoff =
+ PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+ config_val->non_pauseable_th.full_xon =
+ PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+ } else if (CHIP_IS_E3A0(bp)) {
+ config_val->pauseable_th.pause_xoff =
+ PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+ config_val->pauseable_th.pause_xon =
+ PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE;
+ config_val->pauseable_th.full_xoff =
+ PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
+ config_val->pauseable_th.full_xon =
+ PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
+ /* non pause able*/
+ config_val->non_pauseable_th.pause_xoff =
+ PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+ config_val->non_pauseable_th.pause_xon =
+ PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+ config_val->non_pauseable_th.full_xoff =
+ PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+ config_val->non_pauseable_th.full_xon =
+ PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+ } else if (CHIP_IS_E3B0(bp)) {
+ if (params->phy[INT_PHY].flags &
+ FLAGS_4_PORT_MODE) {
+ config_val->pauseable_th.pause_xoff =
+ PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+ config_val->pauseable_th.pause_xon =
+ PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE;
+ config_val->pauseable_th.full_xoff =
+ PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
+ config_val->pauseable_th.full_xon =
+ PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
+ /* non pause able*/
+ config_val->non_pauseable_th.pause_xoff =
+ PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+ config_val->non_pauseable_th.pause_xon =
+ PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+ config_val->non_pauseable_th.full_xoff =
+ PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+ config_val->non_pauseable_th.full_xon =
+ PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+ } else {
+ config_val->pauseable_th.pause_xoff =
+ PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+ config_val->pauseable_th.pause_xon =
+ PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE;
+ config_val->pauseable_th.full_xoff =
+ PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
+ config_val->pauseable_th.full_xon =
+ PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
+ /* non pause able*/
+ config_val->non_pauseable_th.pause_xoff =
+ PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+ config_val->non_pauseable_th.pause_xon =
+ PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+ config_val->non_pauseable_th.full_xoff =
+ PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+ config_val->non_pauseable_th.full_xon =
+ PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+ }
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+
+
+static void bnx2x_pfc_brb_get_e3b0_config_params(struct link_params *params,
+ struct bnx2x_pfc_brb_e3b0_val
+ *e3b0_val,
+ u32 cos0_pauseable,
+ u32 cos1_pauseable)
+{
+ if (params->phy[INT_PHY].flags & FLAGS_4_PORT_MODE) {
+ e3b0_val->full_lb_xoff_th =
+ PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR;
+ e3b0_val->full_lb_xon_threshold =
+ PFC_E3B0_4P_BRB_FULL_LB_XON_THR;
+ e3b0_val->lb_guarantied =
+ PFC_E3B0_4P_LB_GUART;
+ e3b0_val->mac_0_class_t_guarantied =
+ PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART;
+ e3b0_val->mac_0_class_t_guarantied_hyst =
+ PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST;
+ e3b0_val->mac_1_class_t_guarantied =
+ PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART;
+ e3b0_val->mac_1_class_t_guarantied_hyst =
+ PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST;
+ } else {
+ e3b0_val->full_lb_xoff_th =
+ PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR;
+ e3b0_val->full_lb_xon_threshold =
+ PFC_E3B0_2P_BRB_FULL_LB_XON_THR;
+ e3b0_val->mac_0_class_t_guarantied_hyst =
+ PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST;
+ e3b0_val->mac_1_class_t_guarantied =
+ PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART;
+ e3b0_val->mac_1_class_t_guarantied_hyst =
+ PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST;
+
+ if (cos0_pauseable != cos1_pauseable) {
+ /* nonpauseable= Lossy + pauseable = Lossless*/
+ e3b0_val->lb_guarantied =
+ PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
+ e3b0_val->mac_0_class_t_guarantied =
+ PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART;
+ } else if (cos0_pauseable) {
+ /* Lossless +Lossless*/
+ e3b0_val->lb_guarantied =
+ PFC_E3B0_2P_PAUSE_LB_GUART;
+ e3b0_val->mac_0_class_t_guarantied =
+ PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART;
+ } else {
+ /* Lossy +Lossy*/
+ e3b0_val->lb_guarantied =
+ PFC_E3B0_2P_NON_PAUSE_LB_GUART;
+ e3b0_val->mac_0_class_t_guarantied =
+ PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART;
+ }
+ }
+}
+static int bnx2x_update_pfc_brb(struct link_params *params,
+ struct link_vars *vars,
+ struct bnx2x_nig_brb_pfc_port_params
+ *pfc_params)
+{
+ struct bnx2x *bp = params->bp;
+ struct bnx2x_pfc_brb_th_val config_val = { {0} };
+ struct bnx2x_pfc_brb_threshold_val *reg_th_config =
+ &config_val.pauseable_th;
+ struct bnx2x_pfc_brb_e3b0_val e3b0_val = {0};
+ int set_pfc = params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED;
+ int bnx2x_status = 0;
+ u8 port = params->port;
+
+ /* default - pause configuration */
+ reg_th_config = &config_val.pauseable_th;
+ bnx2x_status = bnx2x_pfc_brb_get_config_params(params, &config_val);
+ if (0 != bnx2x_status)
+ return bnx2x_status;
+
+ if (set_pfc && pfc_params)
+ /* First COS */
+ if (!pfc_params->cos0_pauseable)
+ reg_th_config = &config_val.non_pauseable_th;
+ /*
+ * The number of free blocks below which the pause signal to class 0
+ * of MAC #n is asserted. n=0,1
+ */
+ REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 :
+ BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 ,
+ reg_th_config->pause_xoff);
+ /*
+ * The number of free blocks above which the pause signal to class 0
+ * of MAC #n is de-asserted. n=0,1
+ */
+ REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XON_THRESHOLD_1 :
+ BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , reg_th_config->pause_xon);
+ /*
+ * The number of free blocks below which the full signal to class 0
+ * of MAC #n is asserted. n=0,1
+ */
+ REG_WR(bp, (port) ? BRB1_REG_FULL_0_XOFF_THRESHOLD_1 :
+ BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , reg_th_config->full_xoff);
+ /*
+ * The number of free blocks above which the full signal to class 0
+ * of MAC #n is de-asserted. n=0,1
+ */
+ REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 :
+ BRB1_REG_FULL_0_XON_THRESHOLD_0 , reg_th_config->full_xon);
+
+ if (set_pfc && pfc_params) {
+ /* Second COS */
+ if (pfc_params->cos1_pauseable)
+ reg_th_config = &config_val.pauseable_th;
+ else
+ reg_th_config = &config_val.non_pauseable_th;
+ /*
+ * The number of free blocks below which the pause signal to
+ * class 1 of MAC #n is asserted. n=0,1
+ **/
+ REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 :
+ BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0,
+ reg_th_config->pause_xoff);
+ /*
+ * The number of free blocks above which the pause signal to
+ * class 1 of MAC #n is de-asserted. n=0,1
+ */
+ REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 :
+ BRB1_REG_PAUSE_1_XON_THRESHOLD_0,
+ reg_th_config->pause_xon);
+ /*
+ * The number of free blocks below which the full signal to
+ * class 1 of MAC #n is asserted. n=0,1
+ */
+ REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 :
+ BRB1_REG_FULL_1_XOFF_THRESHOLD_0,
+ reg_th_config->full_xoff);
+ /*
+ * The number of free blocks above which the full signal to
+ * class 1 of MAC #n is de-asserted. n=0,1
+ */
+ REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 :
+ BRB1_REG_FULL_1_XON_THRESHOLD_0,
+ reg_th_config->full_xon);
+
+
+ if (CHIP_IS_E3B0(bp)) {
+ /*Should be done by init tool */
+ /*
+ * BRB_empty_for_dup = BRB1_REG_BRB_EMPTY_THRESHOLD
+ * reset value
+ * 944
+ */
+
+ /**
+ * The hysteresis on the guarantied buffer space for the Lb port
+ * before signaling XON.
+ **/
+ REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST, 80);
+
+ bnx2x_pfc_brb_get_e3b0_config_params(
+ params,
+ &e3b0_val,
+ pfc_params->cos0_pauseable,
+ pfc_params->cos1_pauseable);
+ /**
+ * The number of free blocks below which the full signal to the
+ * LB port is asserted.
+ */
+ REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD,
+ e3b0_val.full_lb_xoff_th);
+ /**
+ * The number of free blocks above which the full signal to the
+ * LB port is de-asserted.
+ */
+ REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD,
+ e3b0_val.full_lb_xon_threshold);
+ /**
+ * The number of blocks guarantied for the MAC #n port. n=0,1
+ */
+
+ /*The number of blocks guarantied for the LB port.*/
+ REG_WR(bp, BRB1_REG_LB_GUARANTIED,
+ e3b0_val.lb_guarantied);
+
+ /**
+ * The number of blocks guarantied for the MAC #n port.
+ */
+ REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0,
+ 2 * e3b0_val.mac_0_class_t_guarantied);
+ REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1,
+ 2 * e3b0_val.mac_1_class_t_guarantied);
+ /**
+ * The number of blocks guarantied for class #t in MAC0. t=0,1
+ */
+ REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED,
+ e3b0_val.mac_0_class_t_guarantied);
+ REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED,
+ e3b0_val.mac_0_class_t_guarantied);
+ /**
+ * The hysteresis on the guarantied buffer space for class in
+ * MAC0. t=0,1
+ */
+ REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST,
+ e3b0_val.mac_0_class_t_guarantied_hyst);
+ REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST,
+ e3b0_val.mac_0_class_t_guarantied_hyst);
+
+ /**
+ * The number of blocks guarantied for class #t in MAC1.t=0,1
+ */
+ REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED,
+ e3b0_val.mac_1_class_t_guarantied);
+ REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED,
+ e3b0_val.mac_1_class_t_guarantied);
+ /**
+ * The hysteresis on the guarantied buffer space for class #t
+ * in MAC1. t=0,1
+ */
+ REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST,
+ e3b0_val.mac_1_class_t_guarantied_hyst);
+ REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST,
+ e3b0_val.mac_1_class_t_guarantied_hyst);
+
+ }
+
+ }
+
+ return bnx2x_status;
+}
+
+/******************************************************************************
+* Description:
+* This function is needed because NIG ARB_CREDIT_WEIGHT_X are
+* not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
+******************************************************************************/
+int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp,
+ u8 cos_entry,
+ u32 priority_mask, u8 port)
+{
+ u32 nig_reg_rx_priority_mask_add = 0;
+
+ switch (cos_entry) {
+ case 0:
+ nig_reg_rx_priority_mask_add = (port) ?
+ NIG_REG_P1_RX_COS0_PRIORITY_MASK :
+ NIG_REG_P0_RX_COS0_PRIORITY_MASK;
+ break;
+ case 1:
+ nig_reg_rx_priority_mask_add = (port) ?
+ NIG_REG_P1_RX_COS1_PRIORITY_MASK :
+ NIG_REG_P0_RX_COS1_PRIORITY_MASK;
+ break;
+ case 2:
+ nig_reg_rx_priority_mask_add = (port) ?
+ NIG_REG_P1_RX_COS2_PRIORITY_MASK :
+ NIG_REG_P0_RX_COS2_PRIORITY_MASK;
+ break;
+ case 3:
+ if (port)
+ return -EINVAL;
+ nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS3_PRIORITY_MASK;
+ break;
+ case 4:
+ if (port)
+ return -EINVAL;
+ nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS4_PRIORITY_MASK;
+ break;
+ case 5:
+ if (port)
+ return -EINVAL;
+ nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS5_PRIORITY_MASK;
+ break;
+ }
+
+ REG_WR(bp, nig_reg_rx_priority_mask_add, priority_mask);
+
+ return 0;
+}
+static void bnx2x_update_mng(struct link_params *params, u32 link_status)
+{
+ struct bnx2x *bp = params->bp;
+
+ REG_WR(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ port_mb[params->port].link_status), link_status);
+}
+
+static void bnx2x_update_pfc_nig(struct link_params *params,
+ struct link_vars *vars,
+ struct bnx2x_nig_brb_pfc_port_params *nig_params)
+{
+ u32 xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en = 0;
+ u32 llfc_enable = 0, xcm0_out_en = 0, p0_hwpfc_enable = 0;
+ u32 pkt_priority_to_cos = 0;
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+
+ int set_pfc = params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED;
+ DP(NETIF_MSG_LINK, "updating pfc nig parameters\n");
+
+ /*
+ * When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
+ * MAC control frames (that are not pause packets)
+ * will be forwarded to the XCM.
+ */
+ xcm_mask = REG_RD(bp,
+ port ? NIG_REG_LLH1_XCM_MASK :
+ NIG_REG_LLH0_XCM_MASK);
+ /*
+ * nig params will override non PFC params, since it's possible to
+ * do transition from PFC to SAFC
+ */
+ if (set_pfc) {
+ pause_enable = 0;
+ llfc_out_en = 0;
+ llfc_enable = 0;
+ if (CHIP_IS_E3(bp))
+ ppp_enable = 0;
+ else
+ ppp_enable = 1;
+ xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
+ NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
+ xcm0_out_en = 0;
+ p0_hwpfc_enable = 1;
+ } else {
+ if (nig_params) {
+ llfc_out_en = nig_params->llfc_out_en;
+ llfc_enable = nig_params->llfc_enable;
+ pause_enable = nig_params->pause_enable;
+ } else /*defaul non PFC mode - PAUSE */
+ pause_enable = 1;
+
+ xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
+ NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
+ xcm0_out_en = 1;
+ }
+
+ if (CHIP_IS_E3(bp))
+ REG_WR(bp, port ? NIG_REG_BRB1_PAUSE_IN_EN :
+ NIG_REG_BRB0_PAUSE_IN_EN, pause_enable);
+ REG_WR(bp, port ? NIG_REG_LLFC_OUT_EN_1 :
+ NIG_REG_LLFC_OUT_EN_0, llfc_out_en);
+ REG_WR(bp, port ? NIG_REG_LLFC_ENABLE_1 :
+ NIG_REG_LLFC_ENABLE_0, llfc_enable);
+ REG_WR(bp, port ? NIG_REG_PAUSE_ENABLE_1 :
+ NIG_REG_PAUSE_ENABLE_0, pause_enable);
+
+ REG_WR(bp, port ? NIG_REG_PPP_ENABLE_1 :
+ NIG_REG_PPP_ENABLE_0, ppp_enable);
+
+ REG_WR(bp, port ? NIG_REG_LLH1_XCM_MASK :
+ NIG_REG_LLH0_XCM_MASK, xcm_mask);
+
+ REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
+
+ /* output enable for RX_XCM # IF */
+ REG_WR(bp, NIG_REG_XCM0_OUT_EN, xcm0_out_en);
+
+ /* HW PFC TX enable */
+ REG_WR(bp, NIG_REG_P0_HWPFC_ENABLE, p0_hwpfc_enable);
+
+ if (nig_params) {
+ u8 i = 0;
+ pkt_priority_to_cos = nig_params->pkt_priority_to_cos;
+
+ for (i = 0; i < nig_params->num_of_rx_cos_priority_mask; i++)
+ bnx2x_pfc_nig_rx_priority_mask(bp, i,
+ nig_params->rx_cos_priority_mask[i], port);
+
+ REG_WR(bp, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 :
+ NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0,
+ nig_params->llfc_high_priority_classes);
+
+ REG_WR(bp, port ? NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 :
+ NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0,
+ nig_params->llfc_low_priority_classes);
+ }
+ REG_WR(bp, port ? NIG_REG_P1_PKT_PRIORITY_TO_COS :
+ NIG_REG_P0_PKT_PRIORITY_TO_COS,
+ pkt_priority_to_cos);
+}
+
+int bnx2x_update_pfc(struct link_params *params,
+ struct link_vars *vars,
+ struct bnx2x_nig_brb_pfc_port_params *pfc_params)
+{
+ /*
+ * The PFC and pause are orthogonal to one another, meaning when
+ * PFC is enabled, the pause are disabled, and when PFC is
+ * disabled, pause are set according to the pause result.
+ */
+ u32 val;
+ struct bnx2x *bp = params->bp;
+ int bnx2x_status = 0;
+ u8 bmac_loopback = (params->loopback_mode == LOOPBACK_BMAC);
+
+ if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
+ vars->link_status |= LINK_STATUS_PFC_ENABLED;
+ else
+ vars->link_status &= ~LINK_STATUS_PFC_ENABLED;
+
+ bnx2x_update_mng(params, vars->link_status);
+
+ /* update NIG params */
+ bnx2x_update_pfc_nig(params, vars, pfc_params);
+
+ /* update BRB params */
+ bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params);
+ if (0 != bnx2x_status)
+ return bnx2x_status;
+
+ if (!vars->link_up)
+ return bnx2x_status;
+
+ DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n");
+ if (CHIP_IS_E3(bp))
+ bnx2x_update_pfc_xmac(params, vars, 0);
+ else {
+ val = REG_RD(bp, MISC_REG_RESET_REG_2);
+ if ((val &
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))
+ == 0) {
+ DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n");
+ bnx2x_emac_enable(params, vars, 0);
+ return bnx2x_status;
+ }
+
+ if (CHIP_IS_E2(bp))
+ bnx2x_update_pfc_bmac2(params, vars, bmac_loopback);
+ else
+ bnx2x_update_pfc_bmac1(params, vars);
+
+ val = 0;
+ if ((params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED) ||
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+ val = 1;
+ REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val);
+ }
+ return bnx2x_status;
+}
+
+
+static int bnx2x_bmac1_enable(struct link_params *params,
+ struct link_vars *vars,
+ u8 is_lb)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM;
+ u32 wb_data[2];
+ u32 val;
+
+ DP(NETIF_MSG_LINK, "Enabling BigMAC1\n");
+
+ /* XGXS control */
+ wb_data[0] = 0x3c;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
+ wb_data, 2);
+
+ /* tx MAC SA */
+ wb_data[0] = ((params->mac_addr[2] << 24) |
+ (params->mac_addr[3] << 16) |
+ (params->mac_addr[4] << 8) |
+ params->mac_addr[5]);
+ wb_data[1] = ((params->mac_addr[0] << 8) |
+ params->mac_addr[1]);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2);
+
+ /* mac control */
+ val = 0x3;
+ if (is_lb) {
+ val |= 0x4;
+ DP(NETIF_MSG_LINK, "enable bmac loopback\n");
+ }
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2);
+
+ /* set rx mtu */
+ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2);
+
+ bnx2x_update_pfc_bmac1(params, vars);
+
+ /* set tx mtu */
+ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2);
+
+ /* set cnt max size */
+ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2);
+
+ /* configure safc */
+ wb_data[0] = 0x1000200;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
+ wb_data, 2);
+
+ return 0;
+}
+
+static int bnx2x_bmac2_enable(struct link_params *params,
+ struct link_vars *vars,
+ u8 is_lb)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM;
+ u32 wb_data[2];
+
+ DP(NETIF_MSG_LINK, "Enabling BigMAC2\n");
+
+ wb_data[0] = 0;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
+ udelay(30);
+
+ /* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */
+ wb_data[0] = 0x3c;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_XGXS_CONTROL,
+ wb_data, 2);
+
+ udelay(30);
+
+ /* tx MAC SA */
+ wb_data[0] = ((params->mac_addr[2] << 24) |
+ (params->mac_addr[3] << 16) |
+ (params->mac_addr[4] << 8) |
+ params->mac_addr[5]);
+ wb_data[1] = ((params->mac_addr[0] << 8) |
+ params->mac_addr[1]);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR,
+ wb_data, 2);
+
+ udelay(30);
+
+ /* Configure SAFC */
+ wb_data[0] = 0x1000200;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS,
+ wb_data, 2);
+ udelay(30);
+
+ /* set rx mtu */
+ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2);
+ udelay(30);
+
+ /* set tx mtu */
+ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2);
+ udelay(30);
+ /* set cnt max size */
+ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2);
+ udelay(30);
+ bnx2x_update_pfc_bmac2(params, vars, is_lb);
+
+ return 0;
+}
+
+static int bnx2x_bmac_enable(struct link_params *params,
+ struct link_vars *vars,
+ u8 is_lb)
+{
+ int rc = 0;
+ u8 port = params->port;
+ struct bnx2x *bp = params->bp;
+ u32 val;
+ /* reset and unreset the BigMac */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+ msleep(1);
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+
+ /* enable access for bmac registers */
+ REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
+
+ /* Enable BMAC according to BMAC type*/
+ if (CHIP_IS_E2(bp))
+ rc = bnx2x_bmac2_enable(params, vars, is_lb);
+ else
+ rc = bnx2x_bmac1_enable(params, vars, is_lb);
+ REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
+ REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
+ REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
+ val = 0;
+ if ((params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED) ||
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+ val = 1;
+ REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
+ REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
+ REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x0);
+ REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0);
+ REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x1);
+ REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
+
+ vars->mac_type = MAC_TYPE_BMAC;
+ return rc;
+}
+
+static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
+{
+ u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM;
+ u32 wb_data[2];
+ u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
+
+ /* Only if the bmac is out of reset */
+ if (REG_RD(bp, MISC_REG_RESET_REG_2) &
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) &&
+ nig_bmac_enable) {
+
+ if (CHIP_IS_E2(bp)) {
+ /* Clear Rx Enable bit in BMAC_CONTROL register */
+ REG_RD_DMAE(bp, bmac_addr +
+ BIGMAC2_REGISTER_BMAC_CONTROL,
+ wb_data, 2);
+ wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
+ REG_WR_DMAE(bp, bmac_addr +
+ BIGMAC2_REGISTER_BMAC_CONTROL,
+ wb_data, 2);
+ } else {
+ /* Clear Rx Enable bit in BMAC_CONTROL register */
+ REG_RD_DMAE(bp, bmac_addr +
+ BIGMAC_REGISTER_BMAC_CONTROL,
+ wb_data, 2);
+ wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
+ REG_WR_DMAE(bp, bmac_addr +
+ BIGMAC_REGISTER_BMAC_CONTROL,
+ wb_data, 2);
+ }
+ msleep(1);
+ }
+}
+
+static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
+ u32 line_speed)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ u32 init_crd, crd;
+ u32 count = 1000;
+
+ /* disable port */
+ REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
+
+ /* wait for init credit */
+ init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
+ crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
+ DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd);
+
+ while ((init_crd != crd) && count) {
+ msleep(5);
+
+ crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
+ count--;
+ }
+ crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
+ if (init_crd != crd) {
+ DP(NETIF_MSG_LINK, "BUG! init_crd 0x%x != crd 0x%x\n",
+ init_crd, crd);
+ return -EINVAL;
+ }
+
+ if (flow_ctrl & BNX2X_FLOW_CTRL_RX ||
+ line_speed == SPEED_10 ||
+ line_speed == SPEED_100 ||
+ line_speed == SPEED_1000 ||
+ line_speed == SPEED_2500) {
+ REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1);
+ /* update threshold */
+ REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
+ /* update init credit */
+ init_crd = 778; /* (800-18-4) */
+
+ } else {
+ u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
+ ETH_OVREHEAD)/16;
+ REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
+ /* update threshold */
+ REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
+ /* update init credit */
+ switch (line_speed) {
+ case SPEED_10000:
+ init_crd = thresh + 553 - 22;
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
+ line_speed);
+ return -EINVAL;
+ }
+ }
+ REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd);
+ DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
+ line_speed, init_crd);
+
+ /* probe the credit changes */
+ REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
+ msleep(5);
+ REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
+
+ /* enable port */
+ REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
+ return 0;
+}
+
+/**
+ * bnx2x_get_emac_base - retrive emac base address
+ *
+ * @bp: driver handle
+ * @mdc_mdio_access: access type
+ * @port: port id
+ *
+ * This function selects the MDC/MDIO access (through emac0 or
+ * emac1) depend on the mdc_mdio_access, port, port swapped. Each
+ * phy has a default access mode, which could also be overridden
+ * by nvram configuration. This parameter, whether this is the
+ * default phy configuration, or the nvram overrun
+ * configuration, is passed here as mdc_mdio_access and selects
+ * the emac_base for the CL45 read/writes operations
+ */
+static u32 bnx2x_get_emac_base(struct bnx2x *bp,
+ u32 mdc_mdio_access, u8 port)
+{
+ u32 emac_base = 0;
+ switch (mdc_mdio_access) {
+ case SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE:
+ break;
+ case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0:
+ if (REG_RD(bp, NIG_REG_PORT_SWAP))
+ emac_base = GRCBASE_EMAC1;
+ else
+ emac_base = GRCBASE_EMAC0;
+ break;
+ case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1:
+ if (REG_RD(bp, NIG_REG_PORT_SWAP))
+ emac_base = GRCBASE_EMAC0;
+ else
+ emac_base = GRCBASE_EMAC1;
+ break;
+ case SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH:
+ emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ break;
+ case SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED:
+ emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
+ break;
+ default:
+ break;
+ }
+ return emac_base;
+
+}
+
+/******************************************************************/
+/* CL22 access functions */
+/******************************************************************/
+static int bnx2x_cl22_write(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u16 reg, u16 val)
+{
+ u32 tmp, mode;
+ u8 i;
+ int rc = 0;
+ /* Switch to CL22 */
+ mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
+ mode & ~EMAC_MDIO_MODE_CLAUSE_45);
+
+ /* address */
+ tmp = ((phy->addr << 21) | (reg << 16) | val |
+ EMAC_MDIO_COMM_COMMAND_WRITE_22 |
+ EMAC_MDIO_COMM_START_BUSY);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
+
+ for (i = 0; i < 50; i++) {
+ udelay(10);
+
+ tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+ if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
+ udelay(5);
+ break;
+ }
+ }
+ if (tmp & EMAC_MDIO_COMM_START_BUSY) {
+ DP(NETIF_MSG_LINK, "write phy register failed\n");
+ rc = -EFAULT;
+ }
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode);
+ return rc;
+}
+
+static int bnx2x_cl22_read(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u16 reg, u16 *ret_val)
+{
+ u32 val, mode;
+ u16 i;
+ int rc = 0;
+
+ /* Switch to CL22 */
+ mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
+ mode & ~EMAC_MDIO_MODE_CLAUSE_45);
+
+ /* address */
+ val = ((phy->addr << 21) | (reg << 16) |
+ EMAC_MDIO_COMM_COMMAND_READ_22 |
+ EMAC_MDIO_COMM_START_BUSY);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
+
+ for (i = 0; i < 50; i++) {
+ udelay(10);
+
+ val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+ if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
+ *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
+ udelay(5);
+ break;
+ }
+ }
+ if (val & EMAC_MDIO_COMM_START_BUSY) {
+ DP(NETIF_MSG_LINK, "read phy register failed\n");
+
+ *ret_val = 0;
+ rc = -EFAULT;
+ }
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode);
+ return rc;
+}
+
+/******************************************************************/
+/* CL45 access functions */
+/******************************************************************/
+static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 *ret_val)
+{
+ u32 val;
+ u16 i;
+ int rc = 0;
+ if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
+ bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+ EMAC_MDIO_STATUS_10MB);
+ /* address */
+ val = ((phy->addr << 21) | (devad << 16) | reg |
+ EMAC_MDIO_COMM_COMMAND_ADDRESS |
+ EMAC_MDIO_COMM_START_BUSY);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
+
+ for (i = 0; i < 50; i++) {
+ udelay(10);
+
+ val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+ if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
+ udelay(5);
+ break;
+ }
+ }
+ if (val & EMAC_MDIO_COMM_START_BUSY) {
+ DP(NETIF_MSG_LINK, "read phy register failed\n");
+ netdev_err(bp->dev, "MDC/MDIO access timeout\n");
+ *ret_val = 0;
+ rc = -EFAULT;
+ } else {
+ /* data */
+ val = ((phy->addr << 21) | (devad << 16) |
+ EMAC_MDIO_COMM_COMMAND_READ_45 |
+ EMAC_MDIO_COMM_START_BUSY);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
+
+ for (i = 0; i < 50; i++) {
+ udelay(10);
+
+ val = REG_RD(bp, phy->mdio_ctrl +
+ EMAC_REG_EMAC_MDIO_COMM);
+ if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
+ *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
+ break;
+ }
+ }
+ if (val & EMAC_MDIO_COMM_START_BUSY) {
+ DP(NETIF_MSG_LINK, "read phy register failed\n");
+ netdev_err(bp->dev, "MDC/MDIO access timeout\n");
+ *ret_val = 0;
+ rc = -EFAULT;
+ }
+ }
+ /* Work around for E3 A0 */
+ if (phy->flags & FLAGS_MDC_MDIO_WA) {
+ phy->flags ^= FLAGS_DUMMY_READ;
+ if (phy->flags & FLAGS_DUMMY_READ) {
+ u16 temp_val;
+ bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val);
+ }
+ }
+
+ if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
+ bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+ EMAC_MDIO_STATUS_10MB);
+ return rc;
+}
+
+static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 val)
+{
+ u32 tmp;
+ u8 i;
+ int rc = 0;
+ if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
+ bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+ EMAC_MDIO_STATUS_10MB);
+
+ /* address */
+
+ tmp = ((phy->addr << 21) | (devad << 16) | reg |
+ EMAC_MDIO_COMM_COMMAND_ADDRESS |
+ EMAC_MDIO_COMM_START_BUSY);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
+
+ for (i = 0; i < 50; i++) {
+ udelay(10);
+
+ tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+ if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
+ udelay(5);
+ break;
+ }
+ }
+ if (tmp & EMAC_MDIO_COMM_START_BUSY) {
+ DP(NETIF_MSG_LINK, "write phy register failed\n");
+ netdev_err(bp->dev, "MDC/MDIO access timeout\n");
+ rc = -EFAULT;
+
+ } else {
+ /* data */
+ tmp = ((phy->addr << 21) | (devad << 16) | val |
+ EMAC_MDIO_COMM_COMMAND_WRITE_45 |
+ EMAC_MDIO_COMM_START_BUSY);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
+
+ for (i = 0; i < 50; i++) {
+ udelay(10);
+
+ tmp = REG_RD(bp, phy->mdio_ctrl +
+ EMAC_REG_EMAC_MDIO_COMM);
+ if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
+ udelay(5);
+ break;
+ }
+ }
+ if (tmp & EMAC_MDIO_COMM_START_BUSY) {
+ DP(NETIF_MSG_LINK, "write phy register failed\n");
+ netdev_err(bp->dev, "MDC/MDIO access timeout\n");
+ rc = -EFAULT;
+ }
+ }
+ /* Work around for E3 A0 */
+ if (phy->flags & FLAGS_MDC_MDIO_WA) {
+ phy->flags ^= FLAGS_DUMMY_READ;
+ if (phy->flags & FLAGS_DUMMY_READ) {
+ u16 temp_val;
+ bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val);
+ }
+ }
+ if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
+ bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+ EMAC_MDIO_STATUS_10MB);
+ return rc;
+}
+
+
+/******************************************************************/
+/* BSC access functions from E3 */
+/******************************************************************/
+static void bnx2x_bsc_module_sel(struct link_params *params)
+{
+ int idx;
+ u32 board_cfg, sfp_ctrl;
+ u32 i2c_pins[I2C_SWITCH_WIDTH], i2c_val[I2C_SWITCH_WIDTH];
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ /* Read I2C output PINs */
+ board_cfg = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.shared_hw_config.board));
+ i2c_pins[I2C_BSC0] = board_cfg & SHARED_HW_CFG_E3_I2C_MUX0_MASK;
+ i2c_pins[I2C_BSC1] = (board_cfg & SHARED_HW_CFG_E3_I2C_MUX1_MASK) >>
+ SHARED_HW_CFG_E3_I2C_MUX1_SHIFT;
+
+ /* Read I2C output value */
+ sfp_ctrl = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_cmn_pin_cfg));
+ i2c_val[I2C_BSC0] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX0_MASK) > 0;
+ i2c_val[I2C_BSC1] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX1_MASK) > 0;
+ DP(NETIF_MSG_LINK, "Setting BSC switch\n");
+ for (idx = 0; idx < I2C_SWITCH_WIDTH; idx++)
+ bnx2x_set_cfg_pin(bp, i2c_pins[idx], i2c_val[idx]);
+}
+
+static int bnx2x_bsc_read(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 sl_devid,
+ u16 sl_addr,
+ u8 lc_addr,
+ u8 xfer_cnt,
+ u32 *data_array)
+{
+ u32 val, i;
+ int rc = 0;
+ struct bnx2x *bp = params->bp;
+
+ if ((sl_devid != 0xa0) && (sl_devid != 0xa2)) {
+ DP(NETIF_MSG_LINK, "invalid sl_devid 0x%x\n", sl_devid);
+ return -EINVAL;
+ }
+
+ if (xfer_cnt > 16) {
+ DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n",
+ xfer_cnt);
+ return -EINVAL;
+ }
+ bnx2x_bsc_module_sel(params);
+
+ xfer_cnt = 16 - lc_addr;
+
+ /* enable the engine */
+ val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+ val |= MCPR_IMC_COMMAND_ENABLE;
+ REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
+
+ /* program slave device ID */
+ val = (sl_devid << 16) | sl_addr;
+ REG_WR(bp, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val);
+
+ /* start xfer with 0 byte to update the address pointer ???*/
+ val = (MCPR_IMC_COMMAND_ENABLE) |
+ (MCPR_IMC_COMMAND_WRITE_OP <<
+ MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
+ (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0);
+ REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
+
+ /* poll for completion */
+ i = 0;
+ val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+ while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
+ udelay(10);
+ val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+ if (i++ > 1000) {
+ DP(NETIF_MSG_LINK, "wr 0 byte timed out after %d try\n",
+ i);
+ rc = -EFAULT;
+ break;
+ }
+ }
+ if (rc == -EFAULT)
+ return rc;
+
+ /* start xfer with read op */
+ val = (MCPR_IMC_COMMAND_ENABLE) |
+ (MCPR_IMC_COMMAND_READ_OP <<
+ MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
+ (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) |
+ (xfer_cnt);
+ REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
+
+ /* poll for completion */
+ i = 0;
+ val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+ while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
+ udelay(10);
+ val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+ if (i++ > 1000) {
+ DP(NETIF_MSG_LINK, "rd op timed out after %d try\n", i);
+ rc = -EFAULT;
+ break;
+ }
+ }
+ if (rc == -EFAULT)
+ return rc;
+
+ for (i = (lc_addr >> 2); i < 4; i++) {
+ data_array[i] = REG_RD(bp, (MCP_REG_MCPR_IMC_DATAREG0 + i*4));
+#ifdef __BIG_ENDIAN
+ data_array[i] = ((data_array[i] & 0x000000ff) << 24) |
+ ((data_array[i] & 0x0000ff00) << 8) |
+ ((data_array[i] & 0x00ff0000) >> 8) |
+ ((data_array[i] & 0xff000000) >> 24);
+#endif
+ }
+ return rc;
+}
+
+static void bnx2x_cl45_read_or_write(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 or_val)
+{
+ u16 val;
+ bnx2x_cl45_read(bp, phy, devad, reg, &val);
+ bnx2x_cl45_write(bp, phy, devad, reg, val | or_val);
+}
+
+int bnx2x_phy_read(struct link_params *params, u8 phy_addr,
+ u8 devad, u16 reg, u16 *ret_val)
+{
+ u8 phy_index;
+ /*
+ * Probe for the phy according to the given phy_addr, and execute
+ * the read request on it
+ */
+ for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
+ if (params->phy[phy_index].addr == phy_addr) {
+ return bnx2x_cl45_read(params->bp,
+ ¶ms->phy[phy_index], devad,
+ reg, ret_val);
+ }
+ }
+ return -EINVAL;
+}
+
+int bnx2x_phy_write(struct link_params *params, u8 phy_addr,
+ u8 devad, u16 reg, u16 val)
+{
+ u8 phy_index;
+ /*
+ * Probe for the phy according to the given phy_addr, and execute
+ * the write request on it
+ */
+ for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
+ if (params->phy[phy_index].addr == phy_addr) {
+ return bnx2x_cl45_write(params->bp,
+ ¶ms->phy[phy_index], devad,
+ reg, val);
+ }
+ }
+ return -EINVAL;
+}
+static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ u8 lane = 0;
+ struct bnx2x *bp = params->bp;
+ u32 path_swap, path_swap_ovr;
+ u8 path, port;
+
+ path = BP_PATH(bp);
+ port = params->port;
+
+ if (bnx2x_is_4_port_mode(bp)) {
+ u32 port_swap, port_swap_ovr;
+
+ /*figure out path swap value */
+ path_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP_OVWR);
+ if (path_swap_ovr & 0x1)
+ path_swap = (path_swap_ovr & 0x2);
+ else
+ path_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP);
+
+ if (path_swap)
+ path = path ^ 1;
+
+ /*figure out port swap value */
+ port_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP_OVWR);
+ if (port_swap_ovr & 0x1)
+ port_swap = (port_swap_ovr & 0x2);
+ else
+ port_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP);
+
+ if (port_swap)
+ port = port ^ 1;
+
+ lane = (port<<1) + path;
+ } else { /* two port mode - no port swap */
+
+ /*figure out path swap value */
+ path_swap_ovr =
+ REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP_OVWR);
+ if (path_swap_ovr & 0x1) {
+ path_swap = (path_swap_ovr & 0x2);
+ } else {
+ path_swap =
+ REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP);
+ }
+ if (path_swap)
+ path = path ^ 1;
+
+ lane = path << 1 ;
+ }
+ return lane;
+}
+
+static void bnx2x_set_aer_mmd(struct link_params *params,
+ struct bnx2x_phy *phy)
+{
+ u32 ser_lane;
+ u16 offset, aer_val;
+ struct bnx2x *bp = params->bp;
+ ser_lane = ((params->lane_config &
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+
+ offset = (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ?
+ (phy->addr + ser_lane) : 0;
+
+ if (USES_WARPCORE(bp)) {
+ aer_val = bnx2x_get_warpcore_lane(phy, params);
+ /*
+ * In Dual-lane mode, two lanes are joined together,
+ * so in order to configure them, the AER broadcast method is
+ * used here.
+ * 0x200 is the broadcast address for lanes 0,1
+ * 0x201 is the broadcast address for lanes 2,3
+ */
+ if (phy->flags & FLAGS_WC_DUAL_MODE)
+ aer_val = (aer_val >> 1) | 0x200;
+ } else if (CHIP_IS_E2(bp))
+ aer_val = 0x3800 + offset - 1;
+ else
+ aer_val = 0x3800 + offset;
+ DP(NETIF_MSG_LINK, "Set AER to 0x%x\n", aer_val);
+ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, aer_val);
+
+}
+
+/******************************************************************/
+/* Internal phy section */
+/******************************************************************/
+
+static void bnx2x_set_serdes_access(struct bnx2x *bp, u8 port)
+{
+ u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+
+ /* Set Clause 22 */
+ REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 1);
+ REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245f8000);
+ udelay(500);
+ REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245d000f);
+ udelay(500);
+ /* Set Clause 45 */
+ REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 0);
+}
+
+static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
+{
+ u32 val;
+
+ DP(NETIF_MSG_LINK, "bnx2x_serdes_deassert\n");
+
+ val = SERDES_RESET_BITS << (port*16);
+
+ /* reset and unreset the SerDes/XGXS */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
+ udelay(500);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
+
+ bnx2x_set_serdes_access(bp, port);
+
+ REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + port*0x10,
+ DEFAULT_PHY_DEV_ADDR);
+}
+
+static void bnx2x_xgxs_deassert(struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port;
+ u32 val;
+ DP(NETIF_MSG_LINK, "bnx2x_xgxs_deassert\n");
+ port = params->port;
+
+ val = XGXS_RESET_BITS << (port*16);
+
+ /* reset and unreset the SerDes/XGXS */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
+ udelay(500);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
+
+ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + port*0x18, 0);
+ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
+ params->phy[INT_PHY].def_md_devad);
+}
+
+static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
+ struct link_params *params, u16 *ieee_fc)
+{
+ struct bnx2x *bp = params->bp;
+ *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
+ /**
+ * resolve pause mode and advertisement Please refer to Table
+ * 28B-3 of the 802.3ab-1999 spec
+ */
+
+ switch (phy->req_flow_ctrl) {
+ case BNX2X_FLOW_CTRL_AUTO:
+ if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH)
+ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ else
+ *ieee_fc |=
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+ break;
+
+ case BNX2X_FLOW_CTRL_TX:
+ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+ break;
+
+ case BNX2X_FLOW_CTRL_RX:
+ case BNX2X_FLOW_CTRL_BOTH:
+ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ break;
+
+ case BNX2X_FLOW_CTRL_NONE:
+ default:
+ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
+ break;
+ }
+ DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc);
+}
+
+static void set_phy_vars(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 actual_phy_idx, phy_index, link_cfg_idx;
+ u8 phy_config_swapped = params->multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED;
+ for (phy_index = INT_PHY; phy_index < params->num_phys;
+ phy_index++) {
+ link_cfg_idx = LINK_CONFIG_IDX(phy_index);
+ actual_phy_idx = phy_index;
+ if (phy_config_swapped) {
+ if (phy_index == EXT_PHY1)
+ actual_phy_idx = EXT_PHY2;
+ else if (phy_index == EXT_PHY2)
+ actual_phy_idx = EXT_PHY1;
+ }
+ params->phy[actual_phy_idx].req_flow_ctrl =
+ params->req_flow_ctrl[link_cfg_idx];
+
+ params->phy[actual_phy_idx].req_line_speed =
+ params->req_line_speed[link_cfg_idx];
+
+ params->phy[actual_phy_idx].speed_cap_mask =
+ params->speed_cap_mask[link_cfg_idx];
+
+ params->phy[actual_phy_idx].req_duplex =
+ params->req_duplex[link_cfg_idx];
+
+ if (params->req_line_speed[link_cfg_idx] ==
+ SPEED_AUTO_NEG)
+ vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
+
+ DP(NETIF_MSG_LINK, "req_flow_ctrl %x, req_line_speed %x,"
+ " speed_cap_mask %x\n",
+ params->phy[actual_phy_idx].req_flow_ctrl,
+ params->phy[actual_phy_idx].req_line_speed,
+ params->phy[actual_phy_idx].speed_cap_mask);
+ }
+}
+
+static void bnx2x_ext_phy_set_pause(struct link_params *params,
+ struct bnx2x_phy *phy,
+ struct link_vars *vars)
+{
+ u16 val;
+ struct bnx2x *bp = params->bp;
+ /* read modify write pause advertizing */
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val);
+
+ val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
+
+ /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
+ bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
+ val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
+ }
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
+ val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
+ }
+ DP(NETIF_MSG_LINK, "Ext phy AN advertize 0x%x\n", val);
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val);
+}
+
+static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
+{ /* LD LP */
+ switch (pause_result) { /* ASYM P ASYM P */
+ case 0xb: /* 1 0 1 1 */
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_TX;
+ break;
+
+ case 0xe: /* 1 1 1 0 */
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
+ break;
+
+ case 0x5: /* 0 1 0 1 */
+ case 0x7: /* 0 1 1 1 */
+ case 0xd: /* 1 1 0 1 */
+ case 0xf: /* 1 1 1 1 */
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
+ break;
+
+ default:
+ break;
+ }
+ if (pause_result & (1<<0))
+ vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE;
+ if (pause_result & (1<<1))
+ vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE;
+}
+
+static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 ld_pause; /* local */
+ u16 lp_pause; /* link partner */
+ u16 pause_result;
+ u8 ret = 0;
+ /* read twice */
+
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+
+ if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO)
+ vars->flow_ctrl = phy->req_flow_ctrl;
+ else if (phy->req_line_speed != SPEED_AUTO_NEG)
+ vars->flow_ctrl = params->req_fc_auto_adv;
+ else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
+ ret = 1;
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) {
+ bnx2x_cl22_read(bp, phy,
+ 0x4, &ld_pause);
+ bnx2x_cl22_read(bp, phy,
+ 0x5, &lp_pause);
+ } else {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_ADV_PAUSE, &ld_pause);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
+ }
+ pause_result = (ld_pause &
+ MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
+ pause_result |= (lp_pause &
+ MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
+ DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n",
+ pause_result);
+ bnx2x_pause_resolve(vars, pause_result);
+ }
+ return ret;
+}
+/******************************************************************/
+/* Warpcore section */
+/******************************************************************/
+/* The init_internal_warpcore should mirror the xgxs,
+ * i.e. reset the lane (if needed), set aer for the
+ * init configuration, and set/clear SGMII flag. Internal
+ * phy init is done purely in phy_init stage.
+ */
+static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars) {
+ u16 val16 = 0, lane, bam37 = 0;
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n");
+ /* Check adding advertisement for 1G KX */
+ if (((vars->line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+ (vars->line_speed == SPEED_1000)) {
+ u16 sd_digital;
+ val16 |= (1<<5);
+
+ /* Enable CL37 1G Parallel Detect */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &sd_digital);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+ (sd_digital | 0x1));
+
+ DP(NETIF_MSG_LINK, "Advertize 1G\n");
+ }
+ if (((vars->line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
+ (vars->line_speed == SPEED_10000)) {
+ /* Check adding advertisement for 10G KR */
+ val16 |= (1<<7);
+ /* Enable 10G Parallel Detect */
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_WC_REG_PAR_DET_10G_CTRL, 1);
+
+ DP(NETIF_MSG_LINK, "Advertize 10G\n");
+ }
+
+ /* Set Transmit PMD settings */
+ lane = bnx2x_get_warpcore_lane(phy, params);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
+ ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
+ (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
+ (0x09 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL,
+ 0x03f0);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL,
+ 0x03f0);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
+ 0x383f);
+
+ /* Advertised speeds */
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16);
+
++ /* Advertised and set FEC (Forward Error Correction) */
++ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
++ MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2,
++ (MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY |
++ MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ));
++
+ /* Enable CL37 BAM */
+ if (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].default_cfg)) &
+ PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, &bam37);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, bam37 | 1);
+ DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
+ }
+
+ /* Advertise pause */
+ bnx2x_ext_phy_set_pause(params, phy, vars);
+
+ /* Enable Autoneg */
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1000);
+
+ /* Over 1G - AN local device user page 1 */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL3_UP1, 0x1f);
+
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC7, &val16);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC7, val16 | 0x100);
+}
+
+static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val;
+
+ /* Disable Autoneg */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7);
+
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_WC_REG_PAR_DET_10G_CTRL, 0);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0x3f00);
+
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0);
+
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL3_UP1, 0x1);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC7, 0xa);
+
+ /* Disable CL36 PCS Tx */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0);
+
+ /* Double Wide Single Data Rate @ pll rate */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF);
+
+ /* Leave cl72 training enable, needed for KR */
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150,
+ 0x2);
+
+ /* Leave CL72 enabled */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
+ &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
+ val | 0x3800);
+
+ /* Set speed via PMA/PMD register */
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040);
+
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0xB);
+
+ /*Enable encoded forced speed */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x30);
+
+ /* Turn TX scramble payload only the 64/66 scrambler */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX66_CONTROL, 0x9);
+
+ /* Turn RX scramble payload only the 64/66 scrambler */
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_CONTROL, 0xF9);
+
+ /* set and clear loopback to cause a reset to 64/66 decoder */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x4000);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0);
+
+}
+
+static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u8 is_xfi)
+{
+ struct bnx2x *bp = params->bp;
+ u16 misc1_val, tap_val, tx_driver_val, lane, val;
+ /* Hold rxSeqStart */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val | 0x8000));
+
+ /* Hold tx_fifo_reset */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, (val | 0x1));
+
+ /* Disable CL73 AN */
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
+
+ /* Disable 100FX Enable and Auto-Detect */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_FX100_CTRL1, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_FX100_CTRL1, (val & 0xFFFA));
+
+ /* Disable 100FX Idle detect */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_FX100_CTRL3, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_FX100_CTRL3, (val | 0x0080));
+
+ /* Set Block address to Remote PHY & Clear forced_speed[5] */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC3, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC3, (val & 0xFF7F));
+
+ /* Turn off auto-detect & fiber mode */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+ (val & 0xFFEE));
+
+ /* Set filter_force_link, disable_false_link and parallel_detect */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+ ((val | 0x0006) & 0xFFFE));
+
+ /* Set XFI / SFI */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_MISC1, &misc1_val);
+
+ misc1_val &= ~(0x1f);
+
+ if (is_xfi) {
+ misc1_val |= 0x5;
+ tap_val = ((0x08 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
+ (0x37 << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
+ (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
+ tx_driver_val =
+ ((0x00 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
+ (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
+ (0x03 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
+
+ } else {
+ misc1_val |= 0x9;
+ tap_val = ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
+ (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
+ (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
+ tx_driver_val =
+ ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
+ (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
+ (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
+ }
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val);
+
+ /* Set Transmit PMD settings */
+ lane = bnx2x_get_warpcore_lane(phy, params);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX_FIR_TAP,
+ tap_val | MDIO_WC_REG_TX_FIR_TAP_ENABLE);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
+ tx_driver_val);
+
+ /* Enable fiber mode, enable and invert sig_det */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, val | 0xd);
+
+ /* Set Block address to Remote PHY & Set forced_speed[5], 40bit mode */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC3, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC3, val | 0x8080);
+
+ /* 10G XFI Full Duplex */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x100);
+
+ /* Release tx_fifo_reset */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, val & 0xFFFE);
+
+ /* Release rxSeqStart */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val & 0x7FFF));
+}
+
+static void bnx2x_warpcore_set_20G_KR2(struct bnx2x *bp,
+ struct bnx2x_phy *phy)
+{
+ DP(NETIF_MSG_LINK, "KR2 still not supported !!!\n");
+}
+
+static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u16 lane)
+{
+ /* Rx0 anaRxControl1G */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX0_ANARXCONTROL1G, 0x90);
+
+ /* Rx2 anaRxControl1G */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX2_ANARXCONTROL1G, 0x90);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW0, 0xE070);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW1, 0xC0D0);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW2, 0xA0B0);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW3, 0x8090);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW0_MASK, 0xF0F0);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW1_MASK, 0xF0F0);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW2_MASK, 0xF0F0);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW3_MASK, 0xF0F0);
+
+ /* Serdes Digital Misc1 */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6008);
+
+ /* Serdes Digital4 Misc3 */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC3, 0x8088);
+
+ /* Set Transmit PMD settings */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX_FIR_TAP,
+ ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
+ (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
+ (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET) |
+ MDIO_WC_REG_TX_FIR_TAP_ENABLE));
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
+ ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
+ (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
+ (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
+}
+
+static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u8 fiber_mode)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val16, digctrl_kx1, digctrl_kx2;
+ u8 lane;
+
+ lane = bnx2x_get_warpcore_lane(phy, params);
+
+ /* Clear XFI clock comp in non-10G single lane mode. */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_CONTROL, &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13));
+
+ if (phy->req_line_speed == SPEED_AUTO_NEG) {
+ /* SGMII Autoneg */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
+ val16 | 0x1000);
+ DP(NETIF_MSG_LINK, "set SGMII AUTONEG\n");
+ } else {
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+ val16 &= 0xcfbf;
+ switch (phy->req_line_speed) {
+ case SPEED_10:
+ break;
+ case SPEED_100:
+ val16 |= 0x2000;
+ break;
+ case SPEED_1000:
+ val16 |= 0x0040;
+ break;
+ default:
+ DP(NETIF_MSG_LINK,
+ "Speed not supported: 0x%x\n", phy->req_line_speed);
+ return;
+ }
+
+ if (phy->req_duplex == DUPLEX_FULL)
+ val16 |= 0x0100;
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16);
+
+ DP(NETIF_MSG_LINK, "set SGMII force speed %d\n",
+ phy->req_line_speed);
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+ DP(NETIF_MSG_LINK, " (readback) %x\n", val16);
+ }
+
+ /* SGMII Slave mode and disable signal detect */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &digctrl_kx1);
+ if (fiber_mode)
+ digctrl_kx1 = 1;
+ else
+ digctrl_kx1 &= 0xff4a;
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+ digctrl_kx1);
+
+ /* Turn off parallel detect */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &digctrl_kx2);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+ (digctrl_kx2 & ~(1<<2)));
+
+ /* Re-enable parallel detect */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+ (digctrl_kx2 | (1<<2)));
+
+ /* Enable autodet */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+ (digctrl_kx1 | 0x10));
+}
+
+static void bnx2x_warpcore_reset_lane(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u8 reset)
+{
+ u16 val;
+ /* Take lane out of reset after configuration is finished */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC6, &val);
+ if (reset)
+ val |= 0xC000;
+ else
+ val &= 0x3FFF;
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC6, val);
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC6, &val);
+}
+
+
+ /* Clear SFI/XFI link settings registers */
+static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u16 lane)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val16;
+
+ /* Set XFI clock comp as default. */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_CONTROL, &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_CONTROL, val16 | (3<<13));
+
+ bnx2x_warpcore_reset_lane(bp, phy, 1);
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_FX100_CTRL1, 0x014a);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_FX100_CTRL3, 0x0800);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC3, 0x8008);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x0195);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x0007);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x0002);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000);
+ lane = bnx2x_get_warpcore_lane(phy, params);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX_FIR_TAP, 0x0000);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 0x0990);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140);
+ bnx2x_warpcore_reset_lane(bp, phy, 0);
+}
+
+static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp,
+ u32 chip_id,
+ u32 shmem_base, u8 port,
+ u8 *gpio_num, u8 *gpio_port)
+{
+ u32 cfg_pin;
+ *gpio_num = 0;
+ *gpio_port = 0;
+ if (CHIP_IS_E3(bp)) {
+ cfg_pin = (REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_sfp_ctrl)) &
+ PORT_HW_CFG_E3_MOD_ABS_MASK) >>
+ PORT_HW_CFG_E3_MOD_ABS_SHIFT;
+
+ /*
+ * Should not happen. This function called upon interrupt
+ * triggered by GPIO ( since EPIO can only generate interrupts
+ * to MCP).
+ * So if this function was called and none of the GPIOs was set,
+ * it means the shit hit the fan.
+ */
+ if ((cfg_pin < PIN_CFG_GPIO0_P0) ||
+ (cfg_pin > PIN_CFG_GPIO3_P1)) {
+ DP(NETIF_MSG_LINK,
+ "ERROR: Invalid cfg pin %x for module detect indication\n",
+ cfg_pin);
+ return -EINVAL;
+ }
+
+ *gpio_num = (cfg_pin - PIN_CFG_GPIO0_P0) & 0x3;
+ *gpio_port = (cfg_pin - PIN_CFG_GPIO0_P0) >> 2;
+ } else {
+ *gpio_num = MISC_REGISTERS_GPIO_3;
+ *gpio_port = port;
+ }
+ DP(NETIF_MSG_LINK, "MOD_ABS int GPIO%d_P%d\n", *gpio_num, *gpio_port);
+ return 0;
+}
+
+static int bnx2x_is_sfp_module_plugged(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u8 gpio_num, gpio_port;
+ u32 gpio_val;
+ if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id,
+ params->shmem_base, params->port,
+ &gpio_num, &gpio_port) != 0)
+ return 0;
+ gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port);
+
+ /* Call the handling function in case module is detected */
+ if (gpio_val == 0)
+ return 1;
+ else
+ return 0;
+}
+
+static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u32 serdes_net_if;
+ u8 fiber_mode;
+ u16 lane = bnx2x_get_warpcore_lane(phy, params);
+ serdes_net_if = (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].default_cfg)) &
+ PORT_HW_CFG_NET_SERDES_IF_MASK);
+ DP(NETIF_MSG_LINK, "Begin Warpcore init, link_speed %d, "
+ "serdes_net_if = 0x%x\n",
+ vars->line_speed, serdes_net_if);
+ bnx2x_set_aer_mmd(params, phy);
+
+ vars->phy_flags |= PHY_XGXS_FLAG;
+ if ((serdes_net_if == PORT_HW_CFG_NET_SERDES_IF_SGMII) ||
+ (phy->req_line_speed &&
+ ((phy->req_line_speed == SPEED_100) ||
+ (phy->req_line_speed == SPEED_10)))) {
+ vars->phy_flags |= PHY_SGMII_FLAG;
+ DP(NETIF_MSG_LINK, "Setting SGMII mode\n");
+ bnx2x_warpcore_clear_regs(phy, params, lane);
+ bnx2x_warpcore_set_sgmii_speed(phy, params, 0);
+ } else {
+ switch (serdes_net_if) {
+ case PORT_HW_CFG_NET_SERDES_IF_KR:
+ /* Enable KR Auto Neg */
+ if (params->loopback_mode == LOOPBACK_NONE)
+ bnx2x_warpcore_enable_AN_KR(phy, params, vars);
+ else {
+ DP(NETIF_MSG_LINK, "Setting KR 10G-Force\n");
+ bnx2x_warpcore_set_10G_KR(phy, params, vars);
+ }
+ break;
+
+ case PORT_HW_CFG_NET_SERDES_IF_XFI:
+ bnx2x_warpcore_clear_regs(phy, params, lane);
+ if (vars->line_speed == SPEED_10000) {
+ DP(NETIF_MSG_LINK, "Setting 10G XFI\n");
+ bnx2x_warpcore_set_10G_XFI(phy, params, 1);
+ } else {
+ if (SINGLE_MEDIA_DIRECT(params)) {
+ DP(NETIF_MSG_LINK, "1G Fiber\n");
+ fiber_mode = 1;
+ } else {
+ DP(NETIF_MSG_LINK, "10/100/1G SGMII\n");
+ fiber_mode = 0;
+ }
+ bnx2x_warpcore_set_sgmii_speed(phy,
+ params,
+ fiber_mode);
+ }
+
+ break;
+
+ case PORT_HW_CFG_NET_SERDES_IF_SFI:
+
+ bnx2x_warpcore_clear_regs(phy, params, lane);
+ if (vars->line_speed == SPEED_10000) {
+ DP(NETIF_MSG_LINK, "Setting 10G SFI\n");
+ bnx2x_warpcore_set_10G_XFI(phy, params, 0);
+ } else if (vars->line_speed == SPEED_1000) {
+ DP(NETIF_MSG_LINK, "Setting 1G Fiber\n");
+ bnx2x_warpcore_set_sgmii_speed(phy, params, 1);
+ }
+ /* Issue Module detection */
+ if (bnx2x_is_sfp_module_plugged(phy, params))
+ bnx2x_sfp_module_detection(phy, params);
+ break;
+
+ case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
+ if (vars->line_speed != SPEED_20000) {
+ DP(NETIF_MSG_LINK, "Speed not supported yet\n");
+ return;
+ }
+ DP(NETIF_MSG_LINK, "Setting 20G DXGXS\n");
+ bnx2x_warpcore_set_20G_DXGXS(bp, phy, lane);
+ /* Issue Module detection */
+
+ bnx2x_sfp_module_detection(phy, params);
+ break;
+
+ case PORT_HW_CFG_NET_SERDES_IF_KR2:
+ if (vars->line_speed != SPEED_20000) {
+ DP(NETIF_MSG_LINK, "Speed not supported yet\n");
+ return;
+ }
+ DP(NETIF_MSG_LINK, "Setting 20G KR2\n");
+ bnx2x_warpcore_set_20G_KR2(bp, phy);
+ break;
+
+ default:
+ DP(NETIF_MSG_LINK,
+ "Unsupported Serdes Net Interface 0x%x\n",
+ serdes_net_if);
+ return;
+ }
+ }
+
+ /* Take lane out of reset after configuration is finished */
+ bnx2x_warpcore_reset_lane(bp, phy, 0);
+ DP(NETIF_MSG_LINK, "Exit config init\n");
+}
+
+static void bnx2x_sfp_e3_set_transmitter(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 tx_en)
+{
+ struct bnx2x *bp = params->bp;
+ u32 cfg_pin;
+ u8 port = params->port;
+
+ cfg_pin = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_sfp_ctrl)) &
+ PORT_HW_CFG_TX_LASER_MASK;
+ /* Set the !tx_en since this pin is DISABLE_TX_LASER */
+ DP(NETIF_MSG_LINK, "Setting WC TX to %d\n", tx_en);
+ /* For 20G, the expected pin to be used is 3 pins after the current */
+
+ bnx2x_set_cfg_pin(bp, cfg_pin, tx_en ^ 1);
+ if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
+ bnx2x_set_cfg_pin(bp, cfg_pin + 3, tx_en ^ 1);
+}
+
+static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val16;
+ bnx2x_sfp_e3_set_transmitter(params, phy, 0);
+ bnx2x_set_mdio_clk(bp, params->chip_id, params->port);
+ bnx2x_set_aer_mmd(params, phy);
+ /* Global register */
+ bnx2x_warpcore_reset_lane(bp, phy, 1);
+
+ /* Clear loopback settings (if any) */
+ /* 10G & 20G */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 &
+ 0xBFFF);
+
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 & 0xfffe);
+
+ /* Update those 1-copy registers */
+ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, 0);
+ /* Enable 1G MDIO (1-copy) */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+ &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+ val16 & ~0x10);
+
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL2,
+ val16 & 0xff00);
+
+}
+
+static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val16;
+ u32 lane;
+ DP(NETIF_MSG_LINK, "Setting Warpcore loopback type %x, speed %d\n",
+ params->loopback_mode, phy->req_line_speed);
+
+ if (phy->req_line_speed < SPEED_10000) {
+ /* 10/100/1000 */
+
+ /* Update those 1-copy registers */
+ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, 0);
+ /* Enable 1G MDIO (1-copy) */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+ &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+ val16 | 0x10);
+ /* Set 1G loopback based on lane (1-copy) */
+ lane = bnx2x_get_warpcore_lane(phy, params);
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL2,
+ val16 | (1<<lane));
+
+ /* Switch back to 4-copy registers */
+ bnx2x_set_aer_mmd(params, phy);
+ /* Global loopback, not recommended. */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 |
+ 0x4000);
+ } else {
+ /* 10G & 20G */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 |
+ 0x4000);
+
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 | 0x1);
+ }
+}
+
+
+void bnx2x_link_status_update(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 link_10g_plus;
+ u8 port = params->port;
+ u32 sync_offset, media_types;
+ /* Update PHY configuration */
+ set_phy_vars(params, vars);
+
+ vars->link_status = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ port_mb[port].link_status));
+
+ vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
+ vars->phy_flags = PHY_XGXS_FLAG;
+ if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
+ vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
+
+ if (vars->link_up) {
+ DP(NETIF_MSG_LINK, "phy link up\n");
+
+ vars->phy_link_up = 1;
+ vars->duplex = DUPLEX_FULL;
+ switch (vars->link_status &
+ LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
+ case LINK_10THD:
+ vars->duplex = DUPLEX_HALF;
+ /* fall thru */
+ case LINK_10TFD:
+ vars->line_speed = SPEED_10;
+ break;
+
+ case LINK_100TXHD:
+ vars->duplex = DUPLEX_HALF;
+ /* fall thru */
+ case LINK_100T4:
+ case LINK_100TXFD:
+ vars->line_speed = SPEED_100;
+ break;
+
+ case LINK_1000THD:
+ vars->duplex = DUPLEX_HALF;
+ /* fall thru */
+ case LINK_1000TFD:
+ vars->line_speed = SPEED_1000;
+ break;
+
+ case LINK_2500THD:
+ vars->duplex = DUPLEX_HALF;
+ /* fall thru */
+ case LINK_2500TFD:
+ vars->line_speed = SPEED_2500;
+ break;
+
+ case LINK_10GTFD:
+ vars->line_speed = SPEED_10000;
+ break;
+ case LINK_20GTFD:
+ vars->line_speed = SPEED_20000;
+ break;
+ default:
+ break;
+ }
+ vars->flow_ctrl = 0;
+ if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED)
+ vars->flow_ctrl |= BNX2X_FLOW_CTRL_TX;
+
+ if (vars->link_status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED)
+ vars->flow_ctrl |= BNX2X_FLOW_CTRL_RX;
+
+ if (!vars->flow_ctrl)
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+
+ if (vars->line_speed &&
+ ((vars->line_speed == SPEED_10) ||
+ (vars->line_speed == SPEED_100))) {
+ vars->phy_flags |= PHY_SGMII_FLAG;
+ } else {
+ vars->phy_flags &= ~PHY_SGMII_FLAG;
+ }
+ if (vars->line_speed &&
+ USES_WARPCORE(bp) &&
+ (vars->line_speed == SPEED_1000))
+ vars->phy_flags |= PHY_SGMII_FLAG;
+ /* anything 10 and over uses the bmac */
+ link_10g_plus = (vars->line_speed >= SPEED_10000);
+
+ if (link_10g_plus) {
+ if (USES_WARPCORE(bp))
+ vars->mac_type = MAC_TYPE_XMAC;
+ else
+ vars->mac_type = MAC_TYPE_BMAC;
+ } else {
+ if (USES_WARPCORE(bp))
+ vars->mac_type = MAC_TYPE_UMAC;
+ else
+ vars->mac_type = MAC_TYPE_EMAC;
+ }
+ } else { /* link down */
+ DP(NETIF_MSG_LINK, "phy link down\n");
+
+ vars->phy_link_up = 0;
+
+ vars->line_speed = 0;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+
+ /* indicate no mac active */
+ vars->mac_type = MAC_TYPE_NONE;
+ if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
+ vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+ }
+
+ /* Sync media type */
+ sync_offset = params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].media_type);
+ media_types = REG_RD(bp, sync_offset);
+
+ params->phy[INT_PHY].media_type =
+ (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) >>
+ PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT;
+ params->phy[EXT_PHY1].media_type =
+ (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK) >>
+ PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT;
+ params->phy[EXT_PHY2].media_type =
+ (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK) >>
+ PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT;
+ DP(NETIF_MSG_LINK, "media_types = 0x%x\n", media_types);
+
+ /* Sync AEU offset */
+ sync_offset = params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].aeu_int_mask);
+
+ vars->aeu_int_mask = REG_RD(bp, sync_offset);
+
+ /* Sync PFC status */
+ if (vars->link_status & LINK_STATUS_PFC_ENABLED)
+ params->feature_config_flags |=
+ FEATURE_CONFIG_PFC_ENABLED;
+ else
+ params->feature_config_flags &=
+ ~FEATURE_CONFIG_PFC_ENABLED;
+
+ DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x int_mask 0x%x\n",
+ vars->link_status, vars->phy_link_up, vars->aeu_int_mask);
+ DP(NETIF_MSG_LINK, "line_speed %x duplex %x flow_ctrl 0x%x\n",
+ vars->line_speed, vars->duplex, vars->flow_ctrl);
+}
+
+
+static void bnx2x_set_master_ln(struct link_params *params,
+ struct bnx2x_phy *phy)
+{
+ struct bnx2x *bp = params->bp;
+ u16 new_master_ln, ser_lane;
+ ser_lane = ((params->lane_config &
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+
+ /* set the master_ln for AN */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
+ &new_master_ln);
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2 ,
+ MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
+ (new_master_ln | ser_lane));
+}
+
+static int bnx2x_reset_unicore(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 set_serdes)
+{
+ struct bnx2x *bp = params->bp;
+ u16 mii_control;
+ u16 i;
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
+
+ /* reset the unicore */
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ (mii_control |
+ MDIO_COMBO_IEEO_MII_CONTROL_RESET));
+ if (set_serdes)
+ bnx2x_set_serdes_access(bp, params->port);
+
+ /* wait for the reset to self clear */
+ for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
+ udelay(5);
+
+ /* the reset erased the previous bank value */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ &mii_control);
+
+ if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
+ udelay(5);
+ return 0;
+ }
+ }
+
+ netdev_err(bp->dev, "Warning: PHY was not initialized,"
+ " Port %d\n",
+ params->port);
+ DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n");
+ return -EINVAL;
+
+}
+
+static void bnx2x_set_swap_lanes(struct link_params *params,
+ struct bnx2x_phy *phy)
+{
+ struct bnx2x *bp = params->bp;
+ /*
+ * Each two bits represents a lane number:
+ * No swap is 0123 => 0x1b no need to enable the swap
+ */
+ u16 ser_lane, rx_lane_swap, tx_lane_swap;
+
+ ser_lane = ((params->lane_config &
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+ rx_lane_swap = ((params->lane_config &
+ PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
+ tx_lane_swap = ((params->lane_config &
+ PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
+
+ if (rx_lane_swap != 0x1b) {
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_RX_LN_SWAP,
+ (rx_lane_swap |
+ MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
+ MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
+ } else {
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
+ }
+
+ if (tx_lane_swap != 0x1b) {
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_TX_LN_SWAP,
+ (tx_lane_swap |
+ MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
+ } else {
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
+ }
+}
+
+static void bnx2x_set_parallel_detection(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 control2;
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
+ &control2);
+ if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
+ control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
+ else
+ control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
+ DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n",
+ phy->speed_cap_mask, control2);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
+ control2);
+
+ if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
+ DP(NETIF_MSG_LINK, "XGXS\n");
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_10G_PARALLEL_DETECT,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
+
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_10G_PARALLEL_DETECT,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
+ &control2);
+
+
+ control2 |=
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_10G_PARALLEL_DETECT,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
+ control2);
+
+ /* Disable parallel detection of HiG */
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
+ MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
+ MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
+ }
+}
+
+static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars,
+ u8 enable_cl73)
+{
+ struct bnx2x *bp = params->bp;
+ u16 reg_val;
+
+ /* CL37 Autoneg */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, ®_val);
+
+ /* CL37 Autoneg Enabled */
+ if (vars->line_speed == SPEED_AUTO_NEG)
+ reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
+ else /* CL37 Autoneg Disabled */
+ reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+ MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
+
+ /* Enable/Disable Autodetection */
+
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, ®_val);
+ reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN |
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT);
+ reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE;
+ if (vars->line_speed == SPEED_AUTO_NEG)
+ reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
+ else
+ reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
+
+ /* Enable TetonII and BAM autoneg */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_BAM_NEXT_PAGE,
+ MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
+ ®_val);
+ if (vars->line_speed == SPEED_AUTO_NEG) {
+ /* Enable BAM aneg Mode and TetonII aneg Mode */
+ reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
+ MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
+ } else {
+ /* TetonII and BAM Autoneg Disabled */
+ reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
+ MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
+ }
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_BAM_NEXT_PAGE,
+ MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
+ reg_val);
+
+ if (enable_cl73) {
+ /* Enable Cl73 FSM status bits */
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_USERB0,
+ MDIO_CL73_USERB0_CL73_UCTRL,
+ 0xe);
+
+ /* Enable BAM Station Manager*/
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_USERB0,
+ MDIO_CL73_USERB0_CL73_BAM_CTRL1,
+ MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
+ MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
+ MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
+
+ /* Advertise CL73 link speeds */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV2,
+ ®_val);
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+ reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
+ reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV2,
+ reg_val);
+
+ /* CL73 Autoneg Enabled */
+ reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
+
+ } else /* CL73 Autoneg Disabled */
+ reg_val = 0;
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
+}
+
+/* program SerDes, forced speed */
+static void bnx2x_program_serdes(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 reg_val;
+
+ /* program duplex, disable autoneg and sgmii*/
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, ®_val);
+ reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
+ MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+ MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK);
+ if (phy->req_duplex == DUPLEX_FULL)
+ reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
+
+ /*
+ * program speed
+ * - needed only if the speed is greater than 1G (2.5G or 10G)
+ */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_MISC1, ®_val);
+ /* clearing the speed value before setting the right speed */
+ DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
+
+ reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK |
+ MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
+
+ if (!((vars->line_speed == SPEED_1000) ||
+ (vars->line_speed == SPEED_100) ||
+ (vars->line_speed == SPEED_10))) {
+
+ reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
+ MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
+ if (vars->line_speed == SPEED_10000)
+ reg_val |=
+ MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
+ }
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_MISC1, reg_val);
+
+}
+
+static void bnx2x_set_brcm_cl37_advertisement(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val = 0;
+
+ /* configure the 48 bits for BAM AN */
+
+ /* set extended capabilities */
+ if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
+ val |= MDIO_OVER_1G_UP1_2_5G;
+ if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+ val |= MDIO_OVER_1G_UP1_10G;
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_OVER_1G,
+ MDIO_OVER_1G_UP1, val);
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_OVER_1G,
+ MDIO_OVER_1G_UP3, 0x400);
+}
+
+static void bnx2x_set_ieee_aneg_advertisement(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u16 ieee_fc)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val;
+ /* for AN, we are always publishing full duplex */
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV1, &val);
+ val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH;
+ val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV1, val);
+}
+
+static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u8 enable_cl73)
+{
+ struct bnx2x *bp = params->bp;
+ u16 mii_control;
+
+ DP(NETIF_MSG_LINK, "bnx2x_restart_autoneg\n");
+ /* Enable and restart BAM/CL37 aneg */
+
+ if (enable_cl73) {
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+ &mii_control);
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+ (mii_control |
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
+ } else {
+
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ &mii_control);
+ DP(NETIF_MSG_LINK,
+ "bnx2x_restart_autoneg mii_control before = 0x%x\n",
+ mii_control);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ (mii_control |
+ MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+ MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
+ }
+}
+
+static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 control1;
+
+ /* in SGMII mode, the unicore is always slave */
+
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
+ &control1);
+ control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
+ /* set sgmii mode (and not fiber) */
+ control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
+ control1);
+
+ /* if forced speed */
+ if (!(vars->line_speed == SPEED_AUTO_NEG)) {
+ /* set speed, disable autoneg */
+ u16 mii_control;
+
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ &mii_control);
+ mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+ MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK|
+ MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
+
+ switch (vars->line_speed) {
+ case SPEED_100:
+ mii_control |=
+ MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
+ break;
+ case SPEED_1000:
+ mii_control |=
+ MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
+ break;
+ case SPEED_10:
+ /* there is nothing to set for 10M */
+ break;
+ default:
+ /* invalid speed for SGMII */
+ DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
+ vars->line_speed);
+ break;
+ }
+
+ /* setting the full duplex */
+ if (phy->req_duplex == DUPLEX_FULL)
+ mii_control |=
+ MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ mii_control);
+
+ } else { /* AN mode */
+ /* enable and restart AN */
+ bnx2x_restart_autoneg(phy, params, 0);
+ }
+}
+
+
+/*
+ * link management
+ */
+
+static int bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 pd_10g, status2_1000x;
+ if (phy->req_line_speed != SPEED_AUTO_NEG)
+ return 0;
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
+ &status2_1000x);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
+ &status2_1000x);
+ if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) {
+ DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n",
+ params->port);
+ return 1;
+ }
+
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_10G_PARALLEL_DETECT,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
+ &pd_10g);
+
+ if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) {
+ DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n",
+ params->port);
+ return 1;
+ }
+ return 0;
+}
+
+static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars,
+ u32 gp_status)
+{
+ struct bnx2x *bp = params->bp;
+ u16 ld_pause; /* local driver */
+ u16 lp_pause; /* link partner */
+ u16 pause_result;
+
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+
+ /* resolve from gp_status in case of AN complete and not sgmii */
+ if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO)
+ vars->flow_ctrl = phy->req_flow_ctrl;
+ else if (phy->req_line_speed != SPEED_AUTO_NEG)
+ vars->flow_ctrl = params->req_fc_auto_adv;
+ else if ((gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
+ (!(vars->phy_flags & PHY_SGMII_FLAG))) {
+ if (bnx2x_direct_parallel_detect_used(phy, params)) {
+ vars->flow_ctrl = params->req_fc_auto_adv;
+ return;
+ }
+ if ((gp_status &
+ (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
+ MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) ==
+ (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
+ MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
+
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV1,
+ &ld_pause);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_LP_ADV1,
+ &lp_pause);
+ pause_result = (ld_pause &
+ MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK)
+ >> 8;
+ pause_result |= (lp_pause &
+ MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK)
+ >> 10;
+ DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n",
+ pause_result);
+ } else {
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
+ &ld_pause);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
+ &lp_pause);
+ pause_result = (ld_pause &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
+ pause_result |= (lp_pause &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
+ DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n",
+ pause_result);
+ }
+ bnx2x_pause_resolve(vars, pause_result);
+ }
+ DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl);
+}
+
+static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 rx_status, ustat_val, cl37_fsm_received;
+ DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n");
+ /* Step 1: Make sure signal is detected */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_RX0,
+ MDIO_RX0_RX_STATUS,
+ &rx_status);
+ if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) !=
+ (MDIO_RX0_RX_STATUS_SIGDET)) {
+ DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73."
+ "rx_status(0x80b0) = 0x%x\n", rx_status);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
+ return;
+ }
+ /* Step 2: Check CL73 state machine */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_USERB0,
+ MDIO_CL73_USERB0_CL73_USTAT1,
+ &ustat_val);
+ if ((ustat_val &
+ (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
+ MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) !=
+ (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
+ MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) {
+ DP(NETIF_MSG_LINK, "CL73 state-machine is not stable. "
+ "ustat_val(0x8371) = 0x%x\n", ustat_val);
+ return;
+ }
+ /*
+ * Step 3: Check CL37 Message Pages received to indicate LP
+ * supports only CL37
+ */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_REMOTE_PHY,
+ MDIO_REMOTE_PHY_MISC_RX_STATUS,
+ &cl37_fsm_received);
+ if ((cl37_fsm_received &
+ (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
+ MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) !=
+ (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
+ MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) {
+ DP(NETIF_MSG_LINK, "No CL37 FSM were received. "
+ "misc_rx_status(0x8330) = 0x%x\n",
+ cl37_fsm_received);
+ return;
+ }
+ /*
+ * The combined cl37/cl73 fsm state information indicating that
+ * we are connected to a device which does not support cl73, but
+ * does support cl37 BAM. In this case we disable cl73 and
+ * restart cl37 auto-neg
+ */
+
+ /* Disable CL73 */
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+ 0);
+ /* Restart CL37 autoneg */
+ bnx2x_restart_autoneg(phy, params, 0);
+ DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n");
+}
+
+static void bnx2x_xgxs_an_resolve(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars,
+ u32 gp_status)
+{
+ if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE)
+ vars->link_status |=
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+
+ if (bnx2x_direct_parallel_detect_used(phy, params))
+ vars->link_status |=
+ LINK_STATUS_PARALLEL_DETECTION_USED;
+}
+static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars,
+ u16 is_link_up,
+ u16 speed_mask,
+ u16 is_duplex)
+{
+ struct bnx2x *bp = params->bp;
+ if (phy->req_line_speed == SPEED_AUTO_NEG)
+ vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
+ if (is_link_up) {
+ DP(NETIF_MSG_LINK, "phy link up\n");
+
+ vars->phy_link_up = 1;
+ vars->link_status |= LINK_STATUS_LINK_UP;
+
+ switch (speed_mask) {
+ case GP_STATUS_10M:
+ vars->line_speed = SPEED_10;
+ if (vars->duplex == DUPLEX_FULL)
+ vars->link_status |= LINK_10TFD;
+ else
+ vars->link_status |= LINK_10THD;
+ break;
+
+ case GP_STATUS_100M:
+ vars->line_speed = SPEED_100;
+ if (vars->duplex == DUPLEX_FULL)
+ vars->link_status |= LINK_100TXFD;
+ else
+ vars->link_status |= LINK_100TXHD;
+ break;
+
+ case GP_STATUS_1G:
+ case GP_STATUS_1G_KX:
+ vars->line_speed = SPEED_1000;
+ if (vars->duplex == DUPLEX_FULL)
+ vars->link_status |= LINK_1000TFD;
+ else
+ vars->link_status |= LINK_1000THD;
+ break;
+
+ case GP_STATUS_2_5G:
+ vars->line_speed = SPEED_2500;
+ if (vars->duplex == DUPLEX_FULL)
+ vars->link_status |= LINK_2500TFD;
+ else
+ vars->link_status |= LINK_2500THD;
+ break;
+
+ case GP_STATUS_5G:
+ case GP_STATUS_6G:
+ DP(NETIF_MSG_LINK,
+ "link speed unsupported gp_status 0x%x\n",
+ speed_mask);
+ return -EINVAL;
+
+ case GP_STATUS_10G_KX4:
+ case GP_STATUS_10G_HIG:
+ case GP_STATUS_10G_CX4:
+ case GP_STATUS_10G_KR:
+ case GP_STATUS_10G_SFI:
+ case GP_STATUS_10G_XFI:
+ vars->line_speed = SPEED_10000;
+ vars->link_status |= LINK_10GTFD;
+ break;
+ case GP_STATUS_20G_DXGXS:
+ vars->line_speed = SPEED_20000;
+ vars->link_status |= LINK_20GTFD;
+ break;
+ default:
+ DP(NETIF_MSG_LINK,
+ "link speed unsupported gp_status 0x%x\n",
+ speed_mask);
+ return -EINVAL;
+ }
+ } else { /* link_down */
+ DP(NETIF_MSG_LINK, "phy link down\n");
+
+ vars->phy_link_up = 0;
+
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ vars->mac_type = MAC_TYPE_NONE;
+ }
+ DP(NETIF_MSG_LINK, " phy_link_up %x line_speed %d\n",
+ vars->phy_link_up, vars->line_speed);
+ return 0;
+}
+
+static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+
+ struct bnx2x *bp = params->bp;
+
+ u16 gp_status, duplex = DUPLEX_HALF, link_up = 0, speed_mask;
+ int rc = 0;
+
+ /* Read gp_status */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_GP_STATUS,
+ MDIO_GP_STATUS_TOP_AN_STATUS1,
+ &gp_status);
+ if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
+ duplex = DUPLEX_FULL;
+ if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)
+ link_up = 1;
+ speed_mask = gp_status & GP_STATUS_SPEED_MASK;
+ DP(NETIF_MSG_LINK, "gp_status 0x%x, is_link_up %d, speed_mask 0x%x\n",
+ gp_status, link_up, speed_mask);
+ rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, speed_mask,
+ duplex);
+ if (rc == -EINVAL)
+ return rc;
+
+ if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
+ if (SINGLE_MEDIA_DIRECT(params)) {
+ bnx2x_flow_ctrl_resolve(phy, params, vars, gp_status);
+ if (phy->req_line_speed == SPEED_AUTO_NEG)
+ bnx2x_xgxs_an_resolve(phy, params, vars,
+ gp_status);
+ }
+ } else { /* link_down */
+ if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ SINGLE_MEDIA_DIRECT(params)) {
+ /* Check signal is detected */
+ bnx2x_check_fallback_to_cl37(phy, params);
+ }
+ }
+
+ DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n",
+ vars->duplex, vars->flow_ctrl, vars->link_status);
+ return rc;
+}
+
+static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+
+ struct bnx2x *bp = params->bp;
+
+ u8 lane;
+ u16 gp_status1, gp_speed, link_up, duplex = DUPLEX_FULL;
+ int rc = 0;
+ lane = bnx2x_get_warpcore_lane(phy, params);
+ /* Read gp_status */
+ if (phy->req_line_speed > SPEED_10000) {
+ u16 temp_link_up;
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ 1, &temp_link_up);
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ 1, &link_up);
+ DP(NETIF_MSG_LINK, "PCS RX link status = 0x%x-->0x%x\n",
+ temp_link_up, link_up);
+ link_up &= (1<<2);
+ if (link_up)
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ } else {
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_GP2_STATUS_GP_2_1, &gp_status1);
+ DP(NETIF_MSG_LINK, "0x81d1 = 0x%x\n", gp_status1);
+ /* Check for either KR or generic link up. */
+ gp_status1 = ((gp_status1 >> 8) & 0xf) |
+ ((gp_status1 >> 12) & 0xf);
+ link_up = gp_status1 & (1 << lane);
+ if (link_up && SINGLE_MEDIA_DIRECT(params)) {
+ u16 pd, gp_status4;
+ if (phy->req_line_speed == SPEED_AUTO_NEG) {
+ /* Check Autoneg complete */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_GP2_STATUS_GP_2_4,
+ &gp_status4);
+ if (gp_status4 & ((1<<12)<<lane))
+ vars->link_status |=
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+
+ /* Check parallel detect used */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_PAR_DET_10G_STATUS,
+ &pd);
+ if (pd & (1<<15))
+ vars->link_status |=
+ LINK_STATUS_PARALLEL_DETECTION_USED;
+ }
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ }
+ }
+
+ if (lane < 2) {
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_GP2_STATUS_GP_2_2, &gp_speed);
+ } else {
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_GP2_STATUS_GP_2_3, &gp_speed);
+ }
+ DP(NETIF_MSG_LINK, "lane %d gp_speed 0x%x\n", lane, gp_speed);
+
+ if ((lane & 1) == 0)
+ gp_speed <<= 8;
+ gp_speed &= 0x3f00;
+
+
+ rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed,
+ duplex);
+
+ DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n",
+ vars->duplex, vars->flow_ctrl, vars->link_status);
+ return rc;
+}
+static void bnx2x_set_gmii_tx_driver(struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ struct bnx2x_phy *phy = ¶ms->phy[INT_PHY];
+ u16 lp_up2;
+ u16 tx_driver;
+ u16 bank;
+
+ /* read precomp */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_OVER_1G,
+ MDIO_OVER_1G_LP_UP2, &lp_up2);
+
+ /* bits [10:7] at lp_up2, positioned at [15:12] */
+ lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
+ MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
+ MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
+
+ if (lp_up2 == 0)
+ return;
+
+ for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3;
+ bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) {
+ CL22_RD_OVER_CL45(bp, phy,
+ bank,
+ MDIO_TX0_TX_DRIVER, &tx_driver);
+
+ /* replace tx_driver bits [15:12] */
+ if (lp_up2 !=
+ (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
+ tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
+ tx_driver |= lp_up2;
+ CL22_WR_OVER_CL45(bp, phy,
+ bank,
+ MDIO_TX0_TX_DRIVER, tx_driver);
+ }
+ }
+}
+
+static int bnx2x_emac_program(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ u16 mode = 0;
+
+ DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
+ bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 +
+ EMAC_REG_EMAC_MODE,
+ (EMAC_MODE_25G_MODE |
+ EMAC_MODE_PORT_MII_10M |
+ EMAC_MODE_HALF_DUPLEX));
+ switch (vars->line_speed) {
+ case SPEED_10:
+ mode |= EMAC_MODE_PORT_MII_10M;
+ break;
+
+ case SPEED_100:
+ mode |= EMAC_MODE_PORT_MII;
+ break;
+
+ case SPEED_1000:
+ mode |= EMAC_MODE_PORT_GMII;
+ break;
+
+ case SPEED_2500:
+ mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII);
+ break;
+
+ default:
+ /* 10G not valid for EMAC */
+ DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
+ vars->line_speed);
+ return -EINVAL;
+ }
+
+ if (vars->duplex == DUPLEX_HALF)
+ mode |= EMAC_MODE_HALF_DUPLEX;
+ bnx2x_bits_en(bp,
+ GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
+ mode);
+
+ bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
+ return 0;
+}
+
+static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+
+ u16 bank, i = 0;
+ struct bnx2x *bp = params->bp;
+
+ for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3;
+ bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) {
+ CL22_WR_OVER_CL45(bp, phy,
+ bank,
+ MDIO_RX0_RX_EQ_BOOST,
+ phy->rx_preemphasis[i]);
+ }
+
+ for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3;
+ bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) {
+ CL22_WR_OVER_CL45(bp, phy,
+ bank,
+ MDIO_TX0_TX_DRIVER,
+ phy->tx_preemphasis[i]);
+ }
+}
+
+static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 enable_cl73 = (SINGLE_MEDIA_DIRECT(params) ||
+ (params->loopback_mode == LOOPBACK_XGXS));
+ if (!(vars->phy_flags & PHY_SGMII_FLAG)) {
+ if (SINGLE_MEDIA_DIRECT(params) &&
+ (params->feature_config_flags &
+ FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED))
+ bnx2x_set_preemphasis(phy, params);
+
+ /* forced speed requested? */
+ if (vars->line_speed != SPEED_AUTO_NEG ||
+ (SINGLE_MEDIA_DIRECT(params) &&
+ params->loopback_mode == LOOPBACK_EXT)) {
+ DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
+
+ /* disable autoneg */
+ bnx2x_set_autoneg(phy, params, vars, 0);
+
+ /* program speed and duplex */
+ bnx2x_program_serdes(phy, params, vars);
+
+ } else { /* AN_mode */
+ DP(NETIF_MSG_LINK, "not SGMII, AN\n");
+
+ /* AN enabled */
+ bnx2x_set_brcm_cl37_advertisement(phy, params);
+
+ /* program duplex & pause advertisement (for aneg) */
+ bnx2x_set_ieee_aneg_advertisement(phy, params,
+ vars->ieee_fc);
+
+ /* enable autoneg */
+ bnx2x_set_autoneg(phy, params, vars, enable_cl73);
+
+ /* enable and restart AN */
+ bnx2x_restart_autoneg(phy, params, enable_cl73);
+ }
+
+ } else { /* SGMII mode */
+ DP(NETIF_MSG_LINK, "SGMII\n");
+
+ bnx2x_initialize_sgmii_process(phy, params, vars);
+ }
+}
+
+static int bnx2x_prepare_xgxs(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ int rc;
+ vars->phy_flags |= PHY_XGXS_FLAG;
+ if ((phy->req_line_speed &&
+ ((phy->req_line_speed == SPEED_100) ||
+ (phy->req_line_speed == SPEED_10))) ||
+ (!phy->req_line_speed &&
+ (phy->speed_cap_mask >=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
+ (phy->speed_cap_mask <
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+ (phy->type == PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD))
+ vars->phy_flags |= PHY_SGMII_FLAG;
+ else
+ vars->phy_flags &= ~PHY_SGMII_FLAG;
+
+ bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+ bnx2x_set_aer_mmd(params, phy);
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
+ bnx2x_set_master_ln(params, phy);
+
+ rc = bnx2x_reset_unicore(params, phy, 0);
+ /* reset the SerDes and wait for reset bit return low */
+ if (rc != 0)
+ return rc;
+
+ bnx2x_set_aer_mmd(params, phy);
+ /* setting the masterLn_def again after the reset */
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
+ bnx2x_set_master_ln(params, phy);
+ bnx2x_set_swap_lanes(params, phy);
+ }
+
+ return rc;
+}
+
+static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ u16 cnt, ctrl;
+ /* Wait for soft reset to get cleared up to 1 sec */
+ for (cnt = 0; cnt < 1000; cnt++) {
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE)
+ bnx2x_cl22_read(bp, phy,
+ MDIO_PMA_REG_CTRL, &ctrl);
+ else
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_CTRL, &ctrl);
+ if (!(ctrl & (1<<15)))
+ break;
+ msleep(1);
+ }
+
+ if (cnt == 1000)
+ netdev_err(bp->dev, "Warning: PHY was not initialized,"
+ " Port %d\n",
+ params->port);
+ DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt);
+ return cnt;
+}
+
+static void bnx2x_link_int_enable(struct link_params *params)
+{
+ u8 port = params->port;
+ u32 mask;
+ struct bnx2x *bp = params->bp;
+
+ /* Setting the status to report on link up for either XGXS or SerDes */
+ if (CHIP_IS_E3(bp)) {
+ mask = NIG_MASK_XGXS0_LINK_STATUS;
+ if (!(SINGLE_MEDIA_DIRECT(params)))
+ mask |= NIG_MASK_MI_INT;
+ } else if (params->switch_cfg == SWITCH_CFG_10G) {
+ mask = (NIG_MASK_XGXS0_LINK10G |
+ NIG_MASK_XGXS0_LINK_STATUS);
+ DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
+ if (!(SINGLE_MEDIA_DIRECT(params)) &&
+ params->phy[INT_PHY].type !=
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) {
+ mask |= NIG_MASK_MI_INT;
+ DP(NETIF_MSG_LINK, "enabled external phy int\n");
+ }
+
+ } else { /* SerDes */
+ mask = NIG_MASK_SERDES0_LINK_STATUS;
+ DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n");
+ if (!(SINGLE_MEDIA_DIRECT(params)) &&
+ params->phy[INT_PHY].type !=
+ PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN) {
+ mask |= NIG_MASK_MI_INT;
+ DP(NETIF_MSG_LINK, "enabled external phy int\n");
+ }
+ }
+ bnx2x_bits_en(bp,
+ NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
+ mask);
+
+ DP(NETIF_MSG_LINK, "port %x, is_xgxs %x, int_status 0x%x\n", port,
+ (params->switch_cfg == SWITCH_CFG_10G),
+ REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
+ DP(NETIF_MSG_LINK, " int_mask 0x%x, MI_INT %x, SERDES_LINK %x\n",
+ REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
+ REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
+ REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS+port*0x3c));
+ DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
+ REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
+ REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
+}
+
+static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
+ u8 exp_mi_int)
+{
+ u32 latch_status = 0;
+
+ /*
+ * Disable the MI INT ( external phy int ) by writing 1 to the
+ * status register. Link down indication is high-active-signal,
+ * so in this case we need to write the status to clear the XOR
+ */
+ /* Read Latched signals */
+ latch_status = REG_RD(bp,
+ NIG_REG_LATCH_STATUS_0 + port*8);
+ DP(NETIF_MSG_LINK, "latch_status = 0x%x\n", latch_status);
+ /* Handle only those with latched-signal=up.*/
+ if (exp_mi_int)
+ bnx2x_bits_en(bp,
+ NIG_REG_STATUS_INTERRUPT_PORT0
+ + port*4,
+ NIG_STATUS_EMAC0_MI_INT);
+ else
+ bnx2x_bits_dis(bp,
+ NIG_REG_STATUS_INTERRUPT_PORT0
+ + port*4,
+ NIG_STATUS_EMAC0_MI_INT);
+
+ if (latch_status & 1) {
+
+ /* For all latched-signal=up : Re-Arm Latch signals */
+ REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8,
+ (latch_status & 0xfffe) | (latch_status & 1));
+ }
+ /* For all latched-signal=up,Write original_signal to status */
+}
+
+static void bnx2x_link_int_ack(struct link_params *params,
+ struct link_vars *vars, u8 is_10g_plus)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ u32 mask;
+ /*
+ * First reset all status we assume only one line will be
+ * change at a time
+ */
+ bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
+ (NIG_STATUS_XGXS0_LINK10G |
+ NIG_STATUS_XGXS0_LINK_STATUS |
+ NIG_STATUS_SERDES0_LINK_STATUS));
+ if (vars->phy_link_up) {
+ if (USES_WARPCORE(bp))
+ mask = NIG_STATUS_XGXS0_LINK_STATUS;
+ else {
+ if (is_10g_plus)
+ mask = NIG_STATUS_XGXS0_LINK10G;
+ else if (params->switch_cfg == SWITCH_CFG_10G) {
+ /*
+ * Disable the link interrupt by writing 1 to
+ * the relevant lane in the status register
+ */
+ u32 ser_lane =
+ ((params->lane_config &
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+ mask = ((1 << ser_lane) <<
+ NIG_STATUS_XGXS0_LINK_STATUS_SIZE);
+ } else
+ mask = NIG_STATUS_SERDES0_LINK_STATUS;
+ }
+ DP(NETIF_MSG_LINK, "Ack link up interrupt with mask 0x%x\n",
+ mask);
+ bnx2x_bits_en(bp,
+ NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
+ mask);
+ }
+}
+
+static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
+{
+ u8 *str_ptr = str;
+ u32 mask = 0xf0000000;
+ u8 shift = 8*4;
+ u8 digit;
+ u8 remove_leading_zeros = 1;
+ if (*len < 10) {
+ /* Need more than 10chars for this format */
+ *str_ptr = '\0';
+ (*len)--;
+ return -EINVAL;
+ }
+ while (shift > 0) {
+
+ shift -= 4;
+ digit = ((num & mask) >> shift);
+ if (digit == 0 && remove_leading_zeros) {
+ mask = mask >> 4;
+ continue;
+ } else if (digit < 0xa)
+ *str_ptr = digit + '0';
+ else
+ *str_ptr = digit - 0xa + 'a';
+ remove_leading_zeros = 0;
+ str_ptr++;
+ (*len)--;
+ mask = mask >> 4;
+ if (shift == 4*4) {
+ *str_ptr = '.';
+ str_ptr++;
+ (*len)--;
+ remove_leading_zeros = 1;
+ }
+ }
+ return 0;
+}
+
+
+static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
+{
+ str[0] = '\0';
+ (*len)--;
+ return 0;
+}
+
+int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
+ u8 *version, u16 len)
+{
+ struct bnx2x *bp;
+ u32 spirom_ver = 0;
+ int status = 0;
+ u8 *ver_p = version;
+ u16 remain_len = len;
+ if (version == NULL || params == NULL)
+ return -EINVAL;
+ bp = params->bp;
+
+ /* Extract first external phy*/
+ version[0] = '\0';
+ spirom_ver = REG_RD(bp, params->phy[EXT_PHY1].ver_addr);
+
+ if (params->phy[EXT_PHY1].format_fw_ver) {
+ status |= params->phy[EXT_PHY1].format_fw_ver(spirom_ver,
+ ver_p,
+ &remain_len);
+ ver_p += (len - remain_len);
+ }
+ if ((params->num_phys == MAX_PHYS) &&
+ (params->phy[EXT_PHY2].ver_addr != 0)) {
+ spirom_ver = REG_RD(bp, params->phy[EXT_PHY2].ver_addr);
+ if (params->phy[EXT_PHY2].format_fw_ver) {
+ *ver_p = '/';
+ ver_p++;
+ remain_len--;
+ status |= params->phy[EXT_PHY2].format_fw_ver(
+ spirom_ver,
+ ver_p,
+ &remain_len);
+ ver_p = version + (len - remain_len);
+ }
+ }
+ *ver_p = '\0';
+ return status;
+}
+
+static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ u8 port = params->port;
+ struct bnx2x *bp = params->bp;
+
+ if (phy->req_line_speed != SPEED_1000) {
+ u32 md_devad = 0;
+
+ DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
+
+ if (!CHIP_IS_E3(bp)) {
+ /* change the uni_phy_addr in the nig */
+ md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
+ port*0x18));
+
+ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
+ 0x5);
+ }
+
+ bnx2x_cl45_write(bp, phy,
+ 5,
+ (MDIO_REG_BANK_AER_BLOCK +
+ (MDIO_AER_BLOCK_AER_REG & 0xf)),
+ 0x2800);
+
+ bnx2x_cl45_write(bp, phy,
+ 5,
+ (MDIO_REG_BANK_CL73_IEEEB0 +
+ (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
+ 0x6041);
+ msleep(200);
+ /* set aer mmd back */
+ bnx2x_set_aer_mmd(params, phy);
+
+ if (!CHIP_IS_E3(bp)) {
+ /* and md_devad */
+ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
+ md_devad);
+ }
+ } else {
+ u16 mii_ctrl;
+ DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
+ bnx2x_cl45_read(bp, phy, 5,
+ (MDIO_REG_BANK_COMBO_IEEE0 +
+ (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)),
+ &mii_ctrl);
+ bnx2x_cl45_write(bp, phy, 5,
+ (MDIO_REG_BANK_COMBO_IEEE0 +
+ (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)),
+ mii_ctrl |
+ MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK);
+ }
+}
+
+int bnx2x_set_led(struct link_params *params,
+ struct link_vars *vars, u8 mode, u32 speed)
+{
+ u8 port = params->port;
+ u16 hw_led_mode = params->hw_led_mode;
+ int rc = 0;
+ u8 phy_idx;
+ u32 tmp;
+ u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode);
+ DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n",
+ speed, hw_led_mode);
+ /* In case */
+ for (phy_idx = EXT_PHY1; phy_idx < MAX_PHYS; phy_idx++) {
+ if (params->phy[phy_idx].set_link_led) {
+ params->phy[phy_idx].set_link_led(
+ ¶ms->phy[phy_idx], params, mode);
+ }
+ }
+
+ switch (mode) {
+ case LED_MODE_FRONT_PANEL_OFF:
+ case LED_MODE_OFF:
+ REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
+ SHARED_HW_CFG_LED_MAC1);
+
+ tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+ EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
+ break;
+
+ case LED_MODE_OPER:
+ /*
+ * For all other phys, OPER mode is same as ON, so in case
+ * link is down, do nothing
+ */
+ if (!vars->link_up)
+ break;
+ case LED_MODE_ON:
+ if (((params->phy[EXT_PHY1].type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
+ (params->phy[EXT_PHY1].type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722)) &&
+ CHIP_IS_E2(bp) && params->num_phys == 2) {
+ /*
+ * This is a work-around for E2+8727 Configurations
+ */
+ if (mode == LED_MODE_ON ||
+ speed == SPEED_10000){
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
+ REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
+
+ tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+ EMAC_WR(bp, EMAC_REG_EMAC_LED,
+ (tmp | EMAC_LED_OVERRIDE));
+ /*
+ * return here without enabling traffic
- REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
++ * LED blink and setting rate in ON mode.
+ * In oper mode, enabling LED blink
+ * and setting rate is needed.
+ */
+ if (mode == LED_MODE_ON)
+ return rc;
+ }
+ } else if (SINGLE_MEDIA_DIRECT(params)) {
+ /*
+ * This is a work-around for HW issue found when link
+ * is up in CL73
+ */
- .flags = (FLAGS_HW_LOCK_REQUIRED |
- FLAGS_TX_ERROR_CHECK),
++ if ((!CHIP_IS_E3(bp)) ||
++ (CHIP_IS_E3(bp) &&
++ mode == LED_MODE_ON))
++ REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
++
+ if (CHIP_IS_E1x(bp) ||
+ CHIP_IS_E2(bp) ||
+ (mode == LED_MODE_ON))
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
+ else
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
+ hw_led_mode);
+ } else
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode);
+
+ REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
+ /* Set blinking rate to ~15.9Hz */
+ REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
+ LED_BLINK_RATE_VAL);
+ REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
+ port*4, 1);
+ tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+ EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp & (~EMAC_LED_OVERRIDE)));
+
+ if (CHIP_IS_E1(bp) &&
+ ((speed == SPEED_2500) ||
+ (speed == SPEED_1000) ||
+ (speed == SPEED_100) ||
+ (speed == SPEED_10))) {
+ /*
+ * On Everest 1 Ax chip versions for speeds less than
+ * 10G LED scheme is different
+ */
+ REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
+ + port*4, 1);
+ REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
+ port*4, 0);
+ REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 +
+ port*4, 1);
+ }
+ break;
+
+ default:
+ rc = -EINVAL;
+ DP(NETIF_MSG_LINK, "bnx2x_set_led: Invalid led mode %d\n",
+ mode);
+ break;
+ }
+ return rc;
+
+}
+
+/*
+ * This function comes to reflect the actual link state read DIRECTLY from the
+ * HW
+ */
+int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
+ u8 is_serdes)
+{
+ struct bnx2x *bp = params->bp;
+ u16 gp_status = 0, phy_index = 0;
+ u8 ext_phy_link_up = 0, serdes_phy_type;
+ struct link_vars temp_vars;
+ struct bnx2x_phy *int_phy = ¶ms->phy[INT_PHY];
+
+ if (CHIP_IS_E3(bp)) {
+ u16 link_up;
+ if (params->req_line_speed[LINK_CONFIG_IDX(INT_PHY)]
+ > SPEED_10000) {
+ /* Check 20G link */
+ bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD,
+ 1, &link_up);
+ bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD,
+ 1, &link_up);
+ link_up &= (1<<2);
+ } else {
+ /* Check 10G link and below*/
+ u8 lane = bnx2x_get_warpcore_lane(int_phy, params);
+ bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_GP2_STATUS_GP_2_1,
+ &gp_status);
+ gp_status = ((gp_status >> 8) & 0xf) |
+ ((gp_status >> 12) & 0xf);
+ link_up = gp_status & (1 << lane);
+ }
+ if (!link_up)
+ return -ESRCH;
+ } else {
+ CL22_RD_OVER_CL45(bp, int_phy,
+ MDIO_REG_BANK_GP_STATUS,
+ MDIO_GP_STATUS_TOP_AN_STATUS1,
+ &gp_status);
+ /* link is up only if both local phy and external phy are up */
+ if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
+ return -ESRCH;
+ }
+ /* In XGXS loopback mode, do not check external PHY */
+ if (params->loopback_mode == LOOPBACK_XGXS)
+ return 0;
+
+ switch (params->num_phys) {
+ case 1:
+ /* No external PHY */
+ return 0;
+ case 2:
+ ext_phy_link_up = params->phy[EXT_PHY1].read_status(
+ ¶ms->phy[EXT_PHY1],
+ params, &temp_vars);
+ break;
+ case 3: /* Dual Media */
+ for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+ phy_index++) {
+ serdes_phy_type = ((params->phy[phy_index].media_type ==
+ ETH_PHY_SFP_FIBER) ||
+ (params->phy[phy_index].media_type ==
+ ETH_PHY_XFP_FIBER) ||
+ (params->phy[phy_index].media_type ==
+ ETH_PHY_DA_TWINAX));
+
+ if (is_serdes != serdes_phy_type)
+ continue;
+ if (params->phy[phy_index].read_status) {
+ ext_phy_link_up |=
+ params->phy[phy_index].read_status(
+ ¶ms->phy[phy_index],
+ params, &temp_vars);
+ }
+ }
+ break;
+ }
+ if (ext_phy_link_up)
+ return 0;
+ return -ESRCH;
+}
+
+static int bnx2x_link_initialize(struct link_params *params,
+ struct link_vars *vars)
+{
+ int rc = 0;
+ u8 phy_index, non_ext_phy;
+ struct bnx2x *bp = params->bp;
+ /*
+ * In case of external phy existence, the line speed would be the
+ * line speed linked up by the external phy. In case it is direct
+ * only, then the line_speed during initialization will be
+ * equal to the req_line_speed
+ */
+ vars->line_speed = params->phy[INT_PHY].req_line_speed;
+
+ /*
+ * Initialize the internal phy in case this is a direct board
+ * (no external phys), or this board has external phy which requires
+ * to first.
+ */
+ if (!USES_WARPCORE(bp))
+ bnx2x_prepare_xgxs(¶ms->phy[INT_PHY], params, vars);
+ /* init ext phy and enable link state int */
+ non_ext_phy = (SINGLE_MEDIA_DIRECT(params) ||
+ (params->loopback_mode == LOOPBACK_XGXS));
+
+ if (non_ext_phy ||
+ (params->phy[EXT_PHY1].flags & FLAGS_INIT_XGXS_FIRST) ||
+ (params->loopback_mode == LOOPBACK_EXT_PHY)) {
+ struct bnx2x_phy *phy = ¶ms->phy[INT_PHY];
+ if (vars->line_speed == SPEED_AUTO_NEG &&
+ (CHIP_IS_E1x(bp) ||
+ CHIP_IS_E2(bp)))
+ bnx2x_set_parallel_detection(phy, params);
+ if (params->phy[INT_PHY].config_init)
+ params->phy[INT_PHY].config_init(phy,
+ params,
+ vars);
+ }
+
+ /* Init external phy*/
+ if (non_ext_phy) {
+ if (params->phy[INT_PHY].supported &
+ SUPPORTED_FIBRE)
+ vars->link_status |= LINK_STATUS_SERDES_LINK;
+ } else {
+ for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+ phy_index++) {
+ /*
+ * No need to initialize second phy in case of first
+ * phy only selection. In case of second phy, we do
+ * need to initialize the first phy, since they are
+ * connected.
+ */
+ if (params->phy[phy_index].supported &
+ SUPPORTED_FIBRE)
+ vars->link_status |= LINK_STATUS_SERDES_LINK;
+
+ if (phy_index == EXT_PHY2 &&
+ (bnx2x_phy_selection(params) ==
+ PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) {
+ DP(NETIF_MSG_LINK,
+ "Not initializing second phy\n");
+ continue;
+ }
+ params->phy[phy_index].config_init(
+ ¶ms->phy[phy_index],
+ params, vars);
+ }
+ }
+ /* Reset the interrupt indication after phy was initialized */
+ bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 +
+ params->port*4,
+ (NIG_STATUS_XGXS0_LINK10G |
+ NIG_STATUS_XGXS0_LINK_STATUS |
+ NIG_STATUS_SERDES0_LINK_STATUS |
+ NIG_MASK_MI_INT));
+ bnx2x_update_mng(params, vars->link_status);
+ return rc;
+}
+
+static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ /* reset the SerDes/XGXS */
+ REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
+ (0x1ff << (params->port*16)));
+}
+
+static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u8 gpio_port;
+ /* HW reset */
+ if (CHIP_IS_E2(bp))
+ gpio_port = BP_PATH(bp);
+ else
+ gpio_port = params->port;
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ gpio_port);
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ gpio_port);
+ DP(NETIF_MSG_LINK, "reset external PHY\n");
+}
+
+static int bnx2x_update_link_down(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+
+ DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
+ bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
+ vars->phy_flags &= ~PHY_PHYSICAL_LINK_FLAG;
+ /* indicate no mac active */
+ vars->mac_type = MAC_TYPE_NONE;
+
+ /* update shared memory */
+ vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK |
+ LINK_STATUS_LINK_UP |
+ LINK_STATUS_PHYSICAL_LINK_FLAG |
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE |
+ LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK |
+ LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK |
+ LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK);
+ vars->line_speed = 0;
+ bnx2x_update_mng(params, vars->link_status);
+
+ /* activate nig drain */
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
+
+ /* disable emac */
+ if (!CHIP_IS_E3(bp))
+ REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
+
+ msleep(10);
+ /* reset BigMac/Xmac */
+ if (CHIP_IS_E1x(bp) ||
+ CHIP_IS_E2(bp)) {
+ bnx2x_bmac_rx_disable(bp, params->port);
+ REG_WR(bp, GRCBASE_MISC +
+ MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+ }
+ if (CHIP_IS_E3(bp))
+ bnx2x_xmac_disable(params);
+
+ return 0;
+}
+
+static int bnx2x_update_link_up(struct link_params *params,
+ struct link_vars *vars,
+ u8 link_10g)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ int rc = 0;
+
+ vars->link_status |= (LINK_STATUS_LINK_UP |
+ LINK_STATUS_PHYSICAL_LINK_FLAG);
+ vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
+
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ vars->link_status |=
+ LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
+
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
+ vars->link_status |=
+ LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
+ if (USES_WARPCORE(bp)) {
+ if (link_10g) {
+ if (bnx2x_xmac_enable(params, vars, 0) ==
+ -ESRCH) {
+ DP(NETIF_MSG_LINK, "Found errors on XMAC\n");
+ vars->link_up = 0;
+ vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+ vars->link_status &= ~LINK_STATUS_LINK_UP;
+ }
+ } else
+ bnx2x_umac_enable(params, vars, 0);
+ bnx2x_set_led(params, vars,
+ LED_MODE_OPER, vars->line_speed);
+ }
+ if ((CHIP_IS_E1x(bp) ||
+ CHIP_IS_E2(bp))) {
+ if (link_10g) {
+ if (bnx2x_bmac_enable(params, vars, 0) ==
+ -ESRCH) {
+ DP(NETIF_MSG_LINK, "Found errors on BMAC\n");
+ vars->link_up = 0;
+ vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+ vars->link_status &= ~LINK_STATUS_LINK_UP;
+ }
+
+ bnx2x_set_led(params, vars,
+ LED_MODE_OPER, SPEED_10000);
+ } else {
+ rc = bnx2x_emac_program(params, vars);
+ bnx2x_emac_enable(params, vars, 0);
+
+ /* AN complete? */
+ if ((vars->link_status &
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)
+ && (!(vars->phy_flags & PHY_SGMII_FLAG)) &&
+ SINGLE_MEDIA_DIRECT(params))
+ bnx2x_set_gmii_tx_driver(params);
+ }
+ }
+
+ /* PBF - link up */
+ if (CHIP_IS_E1x(bp))
+ rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
+ vars->line_speed);
+
+ /* disable drain */
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
+
+ /* update shared memory */
+ bnx2x_update_mng(params, vars->link_status);
+ msleep(20);
+ return rc;
+}
+/*
+ * The bnx2x_link_update function should be called upon link
+ * interrupt.
+ * Link is considered up as follows:
+ * - DIRECT_SINGLE_MEDIA - Only XGXS link (internal link) needs
+ * to be up
+ * - SINGLE_MEDIA - The link between the 577xx and the external
+ * phy (XGXS) need to up as well as the external link of the
+ * phy (PHY_EXT1)
+ * - DUAL_MEDIA - The link between the 577xx and the first
+ * external phy needs to be up, and at least one of the 2
+ * external phy link must be up.
+ */
+int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ struct link_vars phy_vars[MAX_PHYS];
+ u8 port = params->port;
+ u8 link_10g_plus, phy_index;
+ u8 ext_phy_link_up = 0, cur_link_up;
+ int rc = 0;
+ u8 is_mi_int = 0;
+ u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed;
+ u8 active_external_phy = INT_PHY;
+ vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
+ for (phy_index = INT_PHY; phy_index < params->num_phys;
+ phy_index++) {
+ phy_vars[phy_index].flow_ctrl = 0;
+ phy_vars[phy_index].link_status = 0;
+ phy_vars[phy_index].line_speed = 0;
+ phy_vars[phy_index].duplex = DUPLEX_FULL;
+ phy_vars[phy_index].phy_link_up = 0;
+ phy_vars[phy_index].link_up = 0;
+ phy_vars[phy_index].fault_detected = 0;
+ }
+
+ if (USES_WARPCORE(bp))
+ bnx2x_set_aer_mmd(params, ¶ms->phy[INT_PHY]);
+
+ DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n",
+ port, (vars->phy_flags & PHY_XGXS_FLAG),
+ REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
+
+ is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT +
+ port*0x18) > 0);
+ DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n",
+ REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
+ is_mi_int,
+ REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
+
+ DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
+ REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
+ REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
+
+ /* disable emac */
+ if (!CHIP_IS_E3(bp))
+ REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
+
+ /*
+ * Step 1:
+ * Check external link change only for external phys, and apply
+ * priority selection between them in case the link on both phys
+ * is up. Note that instead of the common vars, a temporary
+ * vars argument is used since each phy may have different link/
+ * speed/duplex result
+ */
+ for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+ phy_index++) {
+ struct bnx2x_phy *phy = ¶ms->phy[phy_index];
+ if (!phy->read_status)
+ continue;
+ /* Read link status and params of this ext phy */
+ cur_link_up = phy->read_status(phy, params,
+ &phy_vars[phy_index]);
+ if (cur_link_up) {
+ DP(NETIF_MSG_LINK, "phy in index %d link is up\n",
+ phy_index);
+ } else {
+ DP(NETIF_MSG_LINK, "phy in index %d link is down\n",
+ phy_index);
+ continue;
+ }
+
+ if (!ext_phy_link_up) {
+ ext_phy_link_up = 1;
+ active_external_phy = phy_index;
+ } else {
+ switch (bnx2x_phy_selection(params)) {
+ case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+ /*
+ * In this option, the first PHY makes sure to pass the
+ * traffic through itself only.
+ * Its not clear how to reset the link on the second phy
+ */
+ active_external_phy = EXT_PHY1;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+ /*
+ * In this option, the first PHY makes sure to pass the
+ * traffic through the second PHY.
+ */
+ active_external_phy = EXT_PHY2;
+ break;
+ default:
+ /*
+ * Link indication on both PHYs with the following cases
+ * is invalid:
+ * - FIRST_PHY means that second phy wasn't initialized,
+ * hence its link is expected to be down
+ * - SECOND_PHY means that first phy should not be able
+ * to link up by itself (using configuration)
+ * - DEFAULT should be overriden during initialiazation
+ */
+ DP(NETIF_MSG_LINK, "Invalid link indication"
+ "mpc=0x%x. DISABLING LINK !!!\n",
+ params->multi_phy_config);
+ ext_phy_link_up = 0;
+ break;
+ }
+ }
+ }
+ prev_line_speed = vars->line_speed;
+ /*
+ * Step 2:
+ * Read the status of the internal phy. In case of
+ * DIRECT_SINGLE_MEDIA board, this link is the external link,
+ * otherwise this is the link between the 577xx and the first
+ * external phy
+ */
+ if (params->phy[INT_PHY].read_status)
+ params->phy[INT_PHY].read_status(
+ ¶ms->phy[INT_PHY],
+ params, vars);
+ /*
+ * The INT_PHY flow control reside in the vars. This include the
+ * case where the speed or flow control are not set to AUTO.
+ * Otherwise, the active external phy flow control result is set
+ * to the vars. The ext_phy_line_speed is needed to check if the
+ * speed is different between the internal phy and external phy.
+ * This case may be result of intermediate link speed change.
+ */
+ if (active_external_phy > INT_PHY) {
+ vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl;
+ /*
+ * Link speed is taken from the XGXS. AN and FC result from
+ * the external phy.
+ */
+ vars->link_status |= phy_vars[active_external_phy].link_status;
+
+ /*
+ * if active_external_phy is first PHY and link is up - disable
+ * disable TX on second external PHY
+ */
+ if (active_external_phy == EXT_PHY1) {
+ if (params->phy[EXT_PHY2].phy_specific_func) {
+ DP(NETIF_MSG_LINK,
+ "Disabling TX on EXT_PHY2\n");
+ params->phy[EXT_PHY2].phy_specific_func(
+ ¶ms->phy[EXT_PHY2],
+ params, DISABLE_TX);
+ }
+ }
+
+ ext_phy_line_speed = phy_vars[active_external_phy].line_speed;
+ vars->duplex = phy_vars[active_external_phy].duplex;
+ if (params->phy[active_external_phy].supported &
+ SUPPORTED_FIBRE)
+ vars->link_status |= LINK_STATUS_SERDES_LINK;
+ else
+ vars->link_status &= ~LINK_STATUS_SERDES_LINK;
+ DP(NETIF_MSG_LINK, "Active external phy selected: %x\n",
+ active_external_phy);
+ }
+
+ for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+ phy_index++) {
+ if (params->phy[phy_index].flags &
+ FLAGS_REARM_LATCH_SIGNAL) {
+ bnx2x_rearm_latch_signal(bp, port,
+ phy_index ==
+ active_external_phy);
+ break;
+ }
+ }
+ DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
+ " ext_phy_line_speed = %d\n", vars->flow_ctrl,
+ vars->link_status, ext_phy_line_speed);
+ /*
+ * Upon link speed change set the NIG into drain mode. Comes to
+ * deals with possible FIFO glitch due to clk change when speed
+ * is decreased without link down indicator
+ */
+
+ if (vars->phy_link_up) {
+ if (!(SINGLE_MEDIA_DIRECT(params)) && ext_phy_link_up &&
+ (ext_phy_line_speed != vars->line_speed)) {
+ DP(NETIF_MSG_LINK, "Internal link speed %d is"
+ " different than the external"
+ " link speed %d\n", vars->line_speed,
+ ext_phy_line_speed);
+ vars->phy_link_up = 0;
+ } else if (prev_line_speed != vars->line_speed) {
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4,
+ 0);
+ msleep(1);
+ }
+ }
+
+ /* anything 10 and over uses the bmac */
+ link_10g_plus = (vars->line_speed >= SPEED_10000);
+
+ bnx2x_link_int_ack(params, vars, link_10g_plus);
+
+ /*
+ * In case external phy link is up, and internal link is down
+ * (not initialized yet probably after link initialization, it
+ * needs to be initialized.
+ * Note that after link down-up as result of cable plug, the xgxs
+ * link would probably become up again without the need
+ * initialize it
+ */
+ if (!(SINGLE_MEDIA_DIRECT(params))) {
+ DP(NETIF_MSG_LINK, "ext_phy_link_up = %d, int_link_up = %d,"
+ " init_preceding = %d\n", ext_phy_link_up,
+ vars->phy_link_up,
+ params->phy[EXT_PHY1].flags &
+ FLAGS_INIT_XGXS_FIRST);
+ if (!(params->phy[EXT_PHY1].flags &
+ FLAGS_INIT_XGXS_FIRST)
+ && ext_phy_link_up && !vars->phy_link_up) {
+ vars->line_speed = ext_phy_line_speed;
+ if (vars->line_speed < SPEED_1000)
+ vars->phy_flags |= PHY_SGMII_FLAG;
+ else
+ vars->phy_flags &= ~PHY_SGMII_FLAG;
+
+ if (params->phy[INT_PHY].config_init)
+ params->phy[INT_PHY].config_init(
+ ¶ms->phy[INT_PHY], params,
+ vars);
+ }
+ }
+ /*
+ * Link is up only if both local phy and external phy (in case of
+ * non-direct board) are up and no fault detected on active PHY.
+ */
+ vars->link_up = (vars->phy_link_up &&
+ (ext_phy_link_up ||
+ SINGLE_MEDIA_DIRECT(params)) &&
+ (phy_vars[active_external_phy].fault_detected == 0));
+
+ if (vars->link_up)
+ rc = bnx2x_update_link_up(params, vars, link_10g_plus);
+ else
+ rc = bnx2x_update_link_down(params, vars);
+
+ return rc;
+}
+
+
+/*****************************************************************************/
+/* External Phy section */
+/*****************************************************************************/
+void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
+{
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+ msleep(1);
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
+}
+
+static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port,
+ u32 spirom_ver, u32 ver_addr)
+{
+ DP(NETIF_MSG_LINK, "FW version 0x%x:0x%x for port %d\n",
+ (u16)(spirom_ver>>16), (u16)spirom_ver, port);
+
+ if (ver_addr)
+ REG_WR(bp, ver_addr, spirom_ver);
+}
+
+static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u8 port)
+{
+ u16 fw_ver1, fw_ver2;
+
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER1, &fw_ver1);
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2, &fw_ver2);
+ bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2),
+ phy->ver_addr);
+}
+
+static void bnx2x_ext_phy_10G_an_resolve(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ struct link_vars *vars)
+{
+ u16 val;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_STATUS, &val);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_STATUS, &val);
+ if (val & (1<<5))
+ vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+ if ((val & (1<<0)) == 0)
+ vars->link_status |= LINK_STATUS_PARALLEL_DETECTION_USED;
+}
+
+/******************************************************************/
+/* common BCM8073/BCM8727 PHY SECTION */
+/******************************************************************/
+static void bnx2x_8073_resolve_fc(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ if (phy->req_line_speed == SPEED_10 ||
+ phy->req_line_speed == SPEED_100) {
+ vars->flow_ctrl = phy->req_flow_ctrl;
+ return;
+ }
+
+ if (bnx2x_ext_phy_resolve_fc(phy, params, vars) &&
+ (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE)) {
+ u16 pause_result;
+ u16 ld_pause; /* local */
+ u16 lp_pause; /* link partner */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_CL37_FC_LD, &ld_pause);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_CL37_FC_LP, &lp_pause);
+ pause_result = (ld_pause &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5;
+ pause_result |= (lp_pause &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
+
+ bnx2x_pause_resolve(vars, pause_result);
+ DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n",
+ pause_result);
+ }
+}
+static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u8 port)
+{
+ u32 count = 0;
+ u16 fw_ver1, fw_msgout;
+ int rc = 0;
+
+ /* Boot port from external ROM */
+ /* EDC grst */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ 0x0001);
+
+ /* ucode reboot and rst */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ 0x008c);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+
+ /* Reset internal microprocessor */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
+
+ /* Release srst bit */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+
+ /* Delay 100ms per the PHY specifications */
+ msleep(100);
+
+ /* 8073 sometimes taking longer to download */
+ do {
+ count++;
+ if (count > 300) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_8073_8727_external_rom_boot port %x:"
+ "Download failed. fw version = 0x%x\n",
+ port, fw_ver1);
+ rc = -EINVAL;
+ break;
+ }
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER1, &fw_ver1);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout);
+
+ msleep(1);
+ } while (fw_ver1 == 0 || fw_ver1 == 0x4321 ||
+ ((fw_msgout & 0xff) != 0x03 && (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)));
+
+ /* Clear ser_boot_ctl bit */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+ bnx2x_save_bcm_spirom_ver(bp, phy, port);
+
+ DP(NETIF_MSG_LINK,
+ "bnx2x_8073_8727_external_rom_boot port %x:"
+ "Download complete. fw version = 0x%x\n",
+ port, fw_ver1);
+
+ return rc;
+}
+
+/******************************************************************/
+/* BCM8073 PHY SECTION */
+/******************************************************************/
+static int bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
+{
+ /* This is only required for 8073A1, version 102 only */
+ u16 val;
+
+ /* Read 8073 HW revision*/
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_CHIP_REV, &val);
+
+ if (val != 1) {
+ /* No need to workaround in 8073 A1 */
+ return 0;
+ }
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2, &val);
+
+ /* SNR should be applied only for version 0x102 */
+ if (val != 0x102)
+ return 0;
+
+ return 1;
+}
+
+static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
+{
+ u16 val, cnt, cnt1 ;
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_CHIP_REV, &val);
+
+ if (val > 0) {
+ /* No need to workaround in 8073 A1 */
+ return 0;
+ }
+ /* XAUI workaround in 8073 A0: */
+
+ /*
+ * After loading the boot ROM and restarting Autoneg, poll
+ * Dev1, Reg $C820:
+ */
+
+ for (cnt = 0; cnt < 1000; cnt++) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
+ &val);
+ /*
+ * If bit [14] = 0 or bit [13] = 0, continue on with
+ * system initialization (XAUI work-around not required, as
+ * these bits indicate 2.5G or 1G link up).
+ */
+ if (!(val & (1<<14)) || !(val & (1<<13))) {
+ DP(NETIF_MSG_LINK, "XAUI work-around not required\n");
+ return 0;
+ } else if (!(val & (1<<15))) {
+ DP(NETIF_MSG_LINK, "bit 15 went off\n");
+ /*
+ * If bit 15 is 0, then poll Dev1, Reg $C841 until it's
+ * MSB (bit15) goes to 1 (indicating that the XAUI
+ * workaround has completed), then continue on with
+ * system initialization.
+ */
+ for (cnt1 = 0; cnt1 < 1000; cnt1++) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_XAUI_WA, &val);
+ if (val & (1<<15)) {
+ DP(NETIF_MSG_LINK,
+ "XAUI workaround has completed\n");
+ return 0;
+ }
+ msleep(3);
+ }
+ break;
+ }
+ msleep(3);
+ }
+ DP(NETIF_MSG_LINK, "Warning: XAUI work-around timeout !!!\n");
+ return -EINVAL;
+}
+
+static void bnx2x_807x_force_10G(struct bnx2x *bp, struct bnx2x_phy *phy)
+{
+ /* Force KR or KX */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0x000b);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0000);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000);
+}
+
+static void bnx2x_8073_set_pause_cl37(struct link_params *params,
+ struct bnx2x_phy *phy,
+ struct link_vars *vars)
+{
+ u16 cl37_val;
+ struct bnx2x *bp = params->bp;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &cl37_val);
+
+ cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
+ bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) {
+ cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC;
+ }
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
+ cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+ }
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
+ cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ }
+ DP(NETIF_MSG_LINK,
+ "Ext phy AN advertize cl37 0x%x\n", cl37_val);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, cl37_val);
+ msleep(500);
+}
+
+static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val = 0, tmp1;
+ u8 gpio_port;
+ DP(NETIF_MSG_LINK, "Init 8073\n");
+
+ if (CHIP_IS_E2(bp))
+ gpio_port = BP_PATH(bp);
+ else
+ gpio_port = params->port;
+ /* Restore normal power mode*/
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
+
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
+
+ /* enable LASI */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2));
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0004);
+
+ bnx2x_8073_set_pause_cl37(params, phy, vars);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
+
+ DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
+
+ /* Swap polarity if required - Must be done only in non-1G mode */
+ if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
+ /* Configure the 8073 to swap _P and _N of the KR lines */
+ DP(NETIF_MSG_LINK, "Swapping polarity for the 8073\n");
+ /* 10G Rx/Tx and 1G Tx signal polarity swap */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL, &val);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL,
+ (val | (3<<9)));
+ }
+
+
+ /* Enable CL37 BAM */
+ if (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].default_cfg)) &
+ PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8073_BAM, &val);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8073_BAM, val | 1);
+ DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
+ }
+ if (params->loopback_mode == LOOPBACK_EXT) {
+ bnx2x_807x_force_10G(bp, phy);
+ DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n");
+ return 0;
+ } else {
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0002);
+ }
+ if (phy->req_line_speed != SPEED_AUTO_NEG) {
+ if (phy->req_line_speed == SPEED_10000) {
+ val = (1<<7);
+ } else if (phy->req_line_speed == SPEED_2500) {
+ val = (1<<5);
+ /*
+ * Note that 2.5G works only when used with 1G
+ * advertisement
+ */
+ } else
+ val = (1<<5);
+ } else {
+ val = 0;
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+ val |= (1<<7);
+
+ /* Note that 2.5G works only when used with 1G advertisement */
+ if (phy->speed_cap_mask &
+ (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
+ val |= (1<<5);
+ DP(NETIF_MSG_LINK, "807x autoneg val = 0x%x\n", val);
+ }
+
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV, val);
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, &tmp1);
+
+ if (((phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) &&
+ (phy->req_line_speed == SPEED_AUTO_NEG)) ||
+ (phy->req_line_speed == SPEED_2500)) {
+ u16 phy_ver;
+ /* Allow 2.5G for A1 and above */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV,
+ &phy_ver);
+ DP(NETIF_MSG_LINK, "Add 2.5G\n");
+ if (phy_ver > 0)
+ tmp1 |= 1;
+ else
+ tmp1 &= 0xfffe;
+ } else {
+ DP(NETIF_MSG_LINK, "Disable 2.5G\n");
+ tmp1 &= 0xfffe;
+ }
+
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, tmp1);
+ /* Add support for CL37 (passive mode) II */
+
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &tmp1);
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD,
+ (tmp1 | ((phy->req_duplex == DUPLEX_FULL) ?
+ 0x20 : 0x40)));
+
+ /* Add support for CL37 (passive mode) III */
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
+
+ /*
+ * The SNR will improve about 2db by changing BW and FEE main
+ * tap. Rest commands are executed after link is up
+ * Change FFE main cursor to 5 in EDC register
+ */
+ if (bnx2x_8073_is_snr_needed(bp, phy))
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN,
+ 0xFB0C);
+
+ /* Enable FEC (Forware Error Correction) Request in the AN */
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, &tmp1);
+ tmp1 |= (1<<15);
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, tmp1);
+
+ bnx2x_ext_phy_set_pause(params, phy, vars);
+
+ /* Restart autoneg */
+ msleep(500);
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
+ DP(NETIF_MSG_LINK, "807x Autoneg Restart: Advertise 1G=%x, 10G=%x\n",
+ ((val & (1<<5)) > 0), ((val & (1<<7)) > 0));
+ return 0;
+}
+
+static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 link_up = 0;
+ u16 val1, val2;
+ u16 link_status = 0;
+ u16 an1000_status = 0;
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
+
+ DP(NETIF_MSG_LINK, "8703 LASI status 0x%x\n", val1);
+
+ /* clear the interrupt LASI status register */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val1);
+ DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n", val2, val1);
+ /* Clear MSG-OUT */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
+
+ /* Check the LASI */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2);
+
+ DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2);
+
+ /* Check the link status */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
+ DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
+ link_up = ((val1 & 4) == 4);
+ DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1);
+
+ if (link_up &&
+ ((phy->req_line_speed != SPEED_10000))) {
+ if (bnx2x_8073_xaui_wa(bp, phy) != 0)
+ return 0;
+ }
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status);
+
+ /* Check the link status on 1.1.2 */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
+ DP(NETIF_MSG_LINK, "KR PMA status 0x%x->0x%x,"
+ "an_link_status=0x%x\n", val2, val1, an1000_status);
+
+ link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1)));
+ if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) {
+ /*
+ * The SNR will improve about 2dbby changing the BW and FEE main
+ * tap. The 1st write to change FFE main tap is set before
+ * restart AN. Change PLL Bandwidth in EDC register
+ */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH,
+ 0x26BC);
+
+ /* Change CDR Bandwidth in EDC register */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CDR_BANDWIDTH,
+ 0x0333);
+ }
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
+ &link_status);
+
+ /* Bits 0..2 --> speed detected, bits 13..15--> link is down */
+ if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
+ link_up = 1;
+ vars->line_speed = SPEED_10000;
+ DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n",
+ params->port);
+ } else if ((link_status & (1<<1)) && (!(link_status & (1<<14)))) {
+ link_up = 1;
+ vars->line_speed = SPEED_2500;
+ DP(NETIF_MSG_LINK, "port %x: External link up in 2.5G\n",
+ params->port);
+ } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
+ link_up = 1;
+ vars->line_speed = SPEED_1000;
+ DP(NETIF_MSG_LINK, "port %x: External link up in 1G\n",
+ params->port);
+ } else {
+ link_up = 0;
+ DP(NETIF_MSG_LINK, "port %x: External link is down\n",
+ params->port);
+ }
+
+ if (link_up) {
+ /* Swap polarity if required */
+ if (params->lane_config &
+ PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
+ /* Configure the 8073 to swap P and N of the KR lines */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_XS_DEVAD,
+ MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1);
+ /*
+ * Set bit 3 to invert Rx in 1G mode and clear this bit
+ * when it`s in 10G mode.
+ */
+ if (vars->line_speed == SPEED_1000) {
+ DP(NETIF_MSG_LINK, "Swapping 1G polarity for"
+ "the 8073\n");
+ val1 |= (1<<3);
+ } else
+ val1 &= ~(1<<3);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_XS_DEVAD,
+ MDIO_XS_REG_8073_RX_CTRL_PCIE,
+ val1);
+ }
+ bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
+ bnx2x_8073_resolve_fc(phy, params, vars);
+ vars->duplex = DUPLEX_FULL;
+ }
+ return link_up;
+}
+
+static void bnx2x_8073_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u8 gpio_port;
+ if (CHIP_IS_E2(bp))
+ gpio_port = BP_PATH(bp);
+ else
+ gpio_port = params->port;
+ DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n",
+ gpio_port);
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ gpio_port);
+}
+
+/******************************************************************/
+/* BCM8705 PHY SECTION */
+/******************************************************************/
+static int bnx2x_8705_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "init 8705\n");
+ /* Restore normal power mode*/
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ /* HW reset */
+ bnx2x_ext_phy_hw_reset(bp, params->port);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
+ bnx2x_wait_reset_complete(bp, phy, params);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, 0x7fbf);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CMU_PLL_BYPASS, 0x0100);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_CNTL, 0x1);
+ /* BCM8705 doesn't have microcode, hence the 0 */
+ bnx2x_save_spirom_version(bp, params->port, params->shmem_base, 0);
+ return 0;
+}
+
+static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ u8 link_up = 0;
+ u16 val1, rx_sd;
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "read status 8705\n");
+ bnx2x_cl45_read(bp, phy,
+ MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1);
+ DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1);
+ DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, 0xc809, &val1);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, 0xc809, &val1);
+
+ DP(NETIF_MSG_LINK, "8705 1.c809 val=0x%x\n", val1);
+ link_up = ((rx_sd & 0x1) && (val1 & (1<<9)) && ((val1 & (1<<8)) == 0));
+ if (link_up) {
+ vars->line_speed = SPEED_10000;
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ }
+ return link_up;
+}
+
+/******************************************************************/
+/* SFP+ module Section */
+/******************************************************************/
+static void bnx2x_set_disable_pmd_transmit(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 pmd_dis)
+{
+ struct bnx2x *bp = params->bp;
+ /*
+ * Disable transmitter only for bootcodes which can enable it afterwards
+ * (for D3 link)
+ */
+ if (pmd_dis) {
+ if (params->feature_config_flags &
+ FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED)
+ DP(NETIF_MSG_LINK, "Disabling PMD transmitter\n");
+ else {
+ DP(NETIF_MSG_LINK, "NOT disabling PMD transmitter\n");
+ return;
+ }
+ } else
+ DP(NETIF_MSG_LINK, "Enabling PMD transmitter\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_DISABLE, pmd_dis);
+}
+
+static u8 bnx2x_get_gpio_port(struct link_params *params)
+{
+ u8 gpio_port;
+ u32 swap_val, swap_override;
+ struct bnx2x *bp = params->bp;
+ if (CHIP_IS_E2(bp))
+ gpio_port = BP_PATH(bp);
+ else
+ gpio_port = params->port;
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+ return gpio_port ^ (swap_val && swap_override);
+}
+
+static void bnx2x_sfp_e1e2_set_transmitter(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 tx_en)
+{
+ u16 val;
+ u8 port = params->port;
+ struct bnx2x *bp = params->bp;
+ u32 tx_en_mode;
+
+ /* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/
+ tx_en_mode = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].sfp_ctrl)) &
+ PORT_HW_CFG_TX_LASER_MASK;
+ DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x "
+ "mode = %x\n", tx_en, port, tx_en_mode);
+ switch (tx_en_mode) {
+ case PORT_HW_CFG_TX_LASER_MDIO:
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ &val);
+
+ if (tx_en)
+ val &= ~(1<<15);
+ else
+ val |= (1<<15);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ val);
+ break;
+ case PORT_HW_CFG_TX_LASER_GPIO0:
+ case PORT_HW_CFG_TX_LASER_GPIO1:
+ case PORT_HW_CFG_TX_LASER_GPIO2:
+ case PORT_HW_CFG_TX_LASER_GPIO3:
+ {
+ u16 gpio_pin;
+ u8 gpio_port, gpio_mode;
+ if (tx_en)
+ gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_HIGH;
+ else
+ gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_LOW;
+
+ gpio_pin = tx_en_mode - PORT_HW_CFG_TX_LASER_GPIO0;
+ gpio_port = bnx2x_get_gpio_port(params);
+ bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
+ break;
+ }
+ default:
+ DP(NETIF_MSG_LINK, "Invalid TX_LASER_MDIO 0x%x\n", tx_en_mode);
+ break;
+ }
+}
+
+static void bnx2x_sfp_set_transmitter(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 tx_en)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "Setting SFP+ transmitter to %d\n", tx_en);
+ if (CHIP_IS_E3(bp))
+ bnx2x_sfp_e3_set_transmitter(params, phy, tx_en);
+ else
+ bnx2x_sfp_e1e2_set_transmitter(params, phy, tx_en);
+}
+
+static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u16 addr, u8 byte_cnt, u8 *o_buf)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val = 0;
+ u16 i;
+ if (byte_cnt > 16) {
+ DP(NETIF_MSG_LINK,
+ "Reading from eeprom is limited to 0xf\n");
+ return -EINVAL;
+ }
+ /* Set the read command byte count */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
+ (byte_cnt | 0xa000));
+
+ /* Set the read command address */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
+ addr);
+
+ /* Activate read command */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+ 0x2c0f);
+
+ /* Wait up to 500us for command complete status */
+ for (i = 0; i < 100; i++) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+ if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
+ MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
+ break;
+ udelay(5);
+ }
+
+ if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) !=
+ MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) {
+ DP(NETIF_MSG_LINK,
+ "Got bad status 0x%x when reading from SFP+ EEPROM\n",
+ (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
+ return -EINVAL;
+ }
+
+ /* Read the buffer */
+ for (i = 0; i < byte_cnt; i++) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
+ o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK);
+ }
+
+ for (i = 0; i < 100; i++) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+ if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
+ MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
+ return 0;
+ msleep(1);
+ }
+ return -EINVAL;
+}
+
+static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u16 addr, u8 byte_cnt,
+ u8 *o_buf)
+{
+ int rc = 0;
+ u8 i, j = 0, cnt = 0;
+ u32 data_array[4];
+ u16 addr32;
+ struct bnx2x *bp = params->bp;
+ /*DP(NETIF_MSG_LINK, "bnx2x_direct_read_sfp_module_eeprom:"
+ " addr %d, cnt %d\n",
+ addr, byte_cnt);*/
+ if (byte_cnt > 16) {
+ DP(NETIF_MSG_LINK,
+ "Reading from eeprom is limited to 16 bytes\n");
+ return -EINVAL;
+ }
+
+ /* 4 byte aligned address */
+ addr32 = addr & (~0x3);
+ do {
+ rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt,
+ data_array);
+ } while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT));
+
+ if (rc == 0) {
+ for (i = (addr - addr32); i < byte_cnt + (addr - addr32); i++) {
+ o_buf[j] = *((u8 *)data_array + i);
+ j++;
+ }
+ }
+
+ return rc;
+}
+
+static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u16 addr, u8 byte_cnt, u8 *o_buf)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val, i;
+
+ if (byte_cnt > 16) {
+ DP(NETIF_MSG_LINK,
+ "Reading from eeprom is limited to 0xf\n");
+ return -EINVAL;
+ }
+
+ /* Need to read from 1.8000 to clear it */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+ &val);
+
+ /* Set the read command byte count */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
+ ((byte_cnt < 2) ? 2 : byte_cnt));
+
+ /* Set the read command address */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
+ addr);
+ /* Set the destination address */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ 0x8004,
+ MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
+
+ /* Activate read command */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+ 0x8002);
+ /*
+ * Wait appropriate time for two-wire command to finish before
+ * polling the status register
+ */
+ msleep(1);
+
+ /* Wait up to 500us for command complete status */
+ for (i = 0; i < 100; i++) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+ if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
+ MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
+ break;
+ udelay(5);
+ }
+
+ if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) !=
+ MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) {
+ DP(NETIF_MSG_LINK,
+ "Got bad status 0x%x when reading from SFP+ EEPROM\n",
+ (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
+ return -EFAULT;
+ }
+
+ /* Read the buffer */
+ for (i = 0; i < byte_cnt; i++) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
+ o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK);
+ }
+
+ for (i = 0; i < 100; i++) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+ if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
+ MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
+ return 0;
+ msleep(1);
+ }
+
+ return -EINVAL;
+}
+
+int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params, u16 addr,
+ u8 byte_cnt, u8 *o_buf)
+{
+ int rc = -EINVAL;
+ switch (phy->type) {
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+ rc = bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
+ byte_cnt, o_buf);
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
+ rc = bnx2x_8727_read_sfp_module_eeprom(phy, params, addr,
+ byte_cnt, o_buf);
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+ rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, params, addr,
+ byte_cnt, o_buf);
+ break;
+ }
+ return rc;
+}
+
+static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u16 *edc_mode)
+{
+ struct bnx2x *bp = params->bp;
+ u32 sync_offset = 0, phy_idx, media_types;
+ u8 val, check_limiting_mode = 0;
+ *edc_mode = EDC_MODE_LIMITING;
+
+ phy->media_type = ETH_PHY_UNSPECIFIED;
+ /* First check for copper cable */
+ if (bnx2x_read_sfp_module_eeprom(phy,
+ params,
+ SFP_EEPROM_CON_TYPE_ADDR,
+ 1,
+ &val) != 0) {
+ DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n");
+ return -EINVAL;
+ }
+
+ switch (val) {
+ case SFP_EEPROM_CON_TYPE_VAL_COPPER:
+ {
+ u8 copper_module_type;
+ phy->media_type = ETH_PHY_DA_TWINAX;
+ /*
+ * Check if its active cable (includes SFP+ module)
+ * of passive cable
+ */
+ if (bnx2x_read_sfp_module_eeprom(phy,
+ params,
+ SFP_EEPROM_FC_TX_TECH_ADDR,
+ 1,
+ &copper_module_type) != 0) {
+ DP(NETIF_MSG_LINK,
+ "Failed to read copper-cable-type"
+ " from SFP+ EEPROM\n");
+ return -EINVAL;
+ }
+
+ if (copper_module_type &
+ SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) {
+ DP(NETIF_MSG_LINK, "Active Copper cable detected\n");
+ check_limiting_mode = 1;
+ } else if (copper_module_type &
+ SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
+ DP(NETIF_MSG_LINK,
+ "Passive Copper cable detected\n");
+ *edc_mode =
+ EDC_MODE_PASSIVE_DAC;
+ } else {
+ DP(NETIF_MSG_LINK,
+ "Unknown copper-cable-type 0x%x !!!\n",
+ copper_module_type);
+ return -EINVAL;
+ }
+ break;
+ }
+ case SFP_EEPROM_CON_TYPE_VAL_LC:
+ phy->media_type = ETH_PHY_SFP_FIBER;
+ DP(NETIF_MSG_LINK, "Optic module detected\n");
+ check_limiting_mode = 1;
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n",
+ val);
+ return -EINVAL;
+ }
+ sync_offset = params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].media_type);
+ media_types = REG_RD(bp, sync_offset);
+ /* Update media type for non-PMF sync */
+ for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
+ if (&(params->phy[phy_idx]) == phy) {
+ media_types &= ~(PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK <<
+ (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx));
+ media_types |= ((phy->media_type &
+ PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) <<
+ (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx));
+ break;
+ }
+ }
+ REG_WR(bp, sync_offset, media_types);
+ if (check_limiting_mode) {
+ u8 options[SFP_EEPROM_OPTIONS_SIZE];
+ if (bnx2x_read_sfp_module_eeprom(phy,
+ params,
+ SFP_EEPROM_OPTIONS_ADDR,
+ SFP_EEPROM_OPTIONS_SIZE,
+ options) != 0) {
+ DP(NETIF_MSG_LINK,
+ "Failed to read Option field from module EEPROM\n");
+ return -EINVAL;
+ }
+ if ((options[0] & SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK))
+ *edc_mode = EDC_MODE_LINEAR;
+ else
+ *edc_mode = EDC_MODE_LIMITING;
+ }
+ DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode);
+ return 0;
+}
+/*
+ * This function read the relevant field from the module (SFP+), and verify it
+ * is compliant with this board
+ */
+static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u32 val, cmd;
+ u32 fw_resp, fw_cmd_param;
+ char vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE+1];
+ char vendor_pn[SFP_EEPROM_PART_NO_SIZE+1];
+ phy->flags &= ~FLAGS_SFP_NOT_APPROVED;
+ val = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].config));
+ if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
+ PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT) {
+ DP(NETIF_MSG_LINK, "NOT enforcing module verification\n");
+ return 0;
+ }
+
+ if (params->feature_config_flags &
+ FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY) {
+ /* Use specific phy request */
+ cmd = DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL;
+ } else if (params->feature_config_flags &
+ FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY) {
+ /* Use first phy request only in case of non-dual media*/
+ if (DUAL_MEDIA(params)) {
+ DP(NETIF_MSG_LINK,
+ "FW does not support OPT MDL verification\n");
+ return -EINVAL;
+ }
+ cmd = DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL;
+ } else {
+ /* No support in OPT MDL detection */
+ DP(NETIF_MSG_LINK,
+ "FW does not support OPT MDL verification\n");
+ return -EINVAL;
+ }
+
+ fw_cmd_param = FW_PARAM_SET(phy->addr, phy->type, phy->mdio_ctrl);
+ fw_resp = bnx2x_fw_command(bp, cmd, fw_cmd_param);
+ if (fw_resp == FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS) {
+ DP(NETIF_MSG_LINK, "Approved module\n");
+ return 0;
+ }
+
+ /* format the warning message */
+ if (bnx2x_read_sfp_module_eeprom(phy,
+ params,
+ SFP_EEPROM_VENDOR_NAME_ADDR,
+ SFP_EEPROM_VENDOR_NAME_SIZE,
+ (u8 *)vendor_name))
+ vendor_name[0] = '\0';
+ else
+ vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
+ if (bnx2x_read_sfp_module_eeprom(phy,
+ params,
+ SFP_EEPROM_PART_NO_ADDR,
+ SFP_EEPROM_PART_NO_SIZE,
+ (u8 *)vendor_pn))
+ vendor_pn[0] = '\0';
+ else
+ vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0';
+
+ netdev_err(bp->dev, "Warning: Unqualified SFP+ module detected,"
+ " Port %d from %s part number %s\n",
+ params->port, vendor_name, vendor_pn);
+ phy->flags |= FLAGS_SFP_NOT_APPROVED;
+ return -EINVAL;
+}
+
+static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
+ struct link_params *params)
+
+{
+ u8 val;
+ struct bnx2x *bp = params->bp;
+ u16 timeout;
+ /*
+ * Initialization time after hot-plug may take up to 300ms for
+ * some phys type ( e.g. JDSU )
+ */
+
+ for (timeout = 0; timeout < 60; timeout++) {
+ if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val)
+ == 0) {
+ DP(NETIF_MSG_LINK,
+ "SFP+ module initialization took %d ms\n",
+ timeout * 5);
+ return 0;
+ }
+ msleep(5);
+ }
+ return -EINVAL;
+}
+
+static void bnx2x_8727_power_module(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u8 is_power_up) {
+ /* Make sure GPIOs are not using for LED mode */
+ u16 val;
+ /*
+ * In the GPIO register, bit 4 is use to determine if the GPIOs are
+ * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
+ * output
+ * Bits 0-1 determine the GPIOs value for OUTPUT in case bit 4 val is 0
+ * Bits 8-9 determine the GPIOs value for INPUT in case bit 4 val is 1
+ * where the 1st bit is the over-current(only input), and 2nd bit is
+ * for power( only output )
+ *
+ * In case of NOC feature is disabled and power is up, set GPIO control
+ * as input to enable listening of over-current indication
+ */
+ if (phy->flags & FLAGS_NOC)
+ return;
+ if (is_power_up)
+ val = (1<<4);
+ else
+ /*
+ * Set GPIO control to OUTPUT, and set the power bit
+ * to according to the is_power_up
+ */
+ val = (1<<1);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_GPIO_CTRL,
+ val);
+}
+
+static int bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u16 edc_mode)
+{
+ u16 cur_limiting_mode;
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ &cur_limiting_mode);
+ DP(NETIF_MSG_LINK, "Current Limiting mode is 0x%x\n",
+ cur_limiting_mode);
+
+ if (edc_mode == EDC_MODE_LIMITING) {
+ DP(NETIF_MSG_LINK, "Setting LIMITING MODE\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ EDC_MODE_LIMITING);
+ } else { /* LRM mode ( default )*/
+
+ DP(NETIF_MSG_LINK, "Setting LRM MODE\n");
+
+ /*
+ * Changing to LRM mode takes quite few seconds. So do it only
+ * if current mode is limiting (default is LRM)
+ */
+ if (cur_limiting_mode != EDC_MODE_LIMITING)
+ return 0;
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_LRM_MODE,
+ 0);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ 0x128);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL0,
+ 0x4008);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_LRM_MODE,
+ 0xaaaa);
+ }
+ return 0;
+}
+
+static int bnx2x_8727_set_limiting_mode(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u16 edc_mode)
+{
+ u16 phy_identifier;
+ u16 rom_ver2_val;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ &phy_identifier);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ (phy_identifier & ~(1<<9)));
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ &rom_ver2_val);
+ /* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ (phy_identifier | (1<<9)));
+
+ return 0;
+}
+
+static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u32 action)
+{
+ struct bnx2x *bp = params->bp;
+
+ switch (action) {
+ case DISABLE_TX:
+ bnx2x_sfp_set_transmitter(params, phy, 0);
+ break;
+ case ENABLE_TX:
+ if (!(phy->flags & FLAGS_SFP_NOT_APPROVED))
+ bnx2x_sfp_set_transmitter(params, phy, 1);
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
+ action);
+ return;
+ }
+}
+
+static void bnx2x_set_e1e2_module_fault_led(struct link_params *params,
+ u8 gpio_mode)
+{
+ struct bnx2x *bp = params->bp;
+
+ u32 fault_led_gpio = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].sfp_ctrl)) &
+ PORT_HW_CFG_FAULT_MODULE_LED_MASK;
+ switch (fault_led_gpio) {
+ case PORT_HW_CFG_FAULT_MODULE_LED_DISABLED:
+ return;
+ case PORT_HW_CFG_FAULT_MODULE_LED_GPIO0:
+ case PORT_HW_CFG_FAULT_MODULE_LED_GPIO1:
+ case PORT_HW_CFG_FAULT_MODULE_LED_GPIO2:
+ case PORT_HW_CFG_FAULT_MODULE_LED_GPIO3:
+ {
+ u8 gpio_port = bnx2x_get_gpio_port(params);
+ u16 gpio_pin = fault_led_gpio -
+ PORT_HW_CFG_FAULT_MODULE_LED_GPIO0;
+ DP(NETIF_MSG_LINK, "Set fault module-detected led "
+ "pin %x port %x mode %x\n",
+ gpio_pin, gpio_port, gpio_mode);
+ bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
+ }
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Error: Invalid fault led mode 0x%x\n",
+ fault_led_gpio);
+ }
+}
+
+static void bnx2x_set_e3_module_fault_led(struct link_params *params,
+ u8 gpio_mode)
+{
+ u32 pin_cfg;
+ u8 port = params->port;
+ struct bnx2x *bp = params->bp;
+ pin_cfg = (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_sfp_ctrl)) &
+ PORT_HW_CFG_E3_FAULT_MDL_LED_MASK) >>
+ PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT;
+ DP(NETIF_MSG_LINK, "Setting Fault LED to %d using pin cfg %d\n",
+ gpio_mode, pin_cfg);
+ bnx2x_set_cfg_pin(bp, pin_cfg, gpio_mode);
+}
+
+static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
+ u8 gpio_mode)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "Setting SFP+ module fault LED to %d\n", gpio_mode);
+ if (CHIP_IS_E3(bp)) {
+ /*
+ * Low ==> if SFP+ module is supported otherwise
+ * High ==> if SFP+ module is not on the approved vendor list
+ */
+ bnx2x_set_e3_module_fault_led(params, gpio_mode);
+ } else
+ bnx2x_set_e1e2_module_fault_led(params, gpio_mode);
+}
+
+static void bnx2x_warpcore_power_module(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 power)
+{
+ u32 pin_cfg;
+ struct bnx2x *bp = params->bp;
+
+ pin_cfg = (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].e3_sfp_ctrl)) &
+ PORT_HW_CFG_E3_PWR_DIS_MASK) >>
+ PORT_HW_CFG_E3_PWR_DIS_SHIFT;
+
+ if (pin_cfg == PIN_CFG_NA)
+ return;
+ DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n",
+ power, pin_cfg);
+ /*
+ * Low ==> corresponding SFP+ module is powered
+ * high ==> the SFP+ module is powered down
+ */
+ bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1);
+}
+
+static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ bnx2x_warpcore_power_module(params, phy, 0);
+}
+
+static void bnx2x_power_sfp_module(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 power)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "Setting SFP+ power to %x\n", power);
+
+ switch (phy->type) {
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
+ bnx2x_8727_power_module(params->bp, phy, power);
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+ bnx2x_warpcore_power_module(params, phy, power);
+ break;
+ default:
+ break;
+ }
+}
+static void bnx2x_warpcore_set_limiting_mode(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u16 edc_mode)
+{
+ u16 val = 0;
+ u16 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT;
+ struct bnx2x *bp = params->bp;
+
+ u8 lane = bnx2x_get_warpcore_lane(phy, params);
+ /* This is a global register which controls all lanes */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val);
+ val &= ~(0xf << (lane << 2));
+
+ switch (edc_mode) {
+ case EDC_MODE_LINEAR:
+ case EDC_MODE_LIMITING:
+ mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT;
+ break;
+ case EDC_MODE_PASSIVE_DAC:
+ mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC;
+ break;
+ default:
+ break;
+ }
+
+ val |= (mode << (lane << 2));
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, val);
+ /* A must read */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val);
+
+ /* Restart microcode to re-read the new mode */
+ bnx2x_warpcore_reset_lane(bp, phy, 1);
+ bnx2x_warpcore_reset_lane(bp, phy, 0);
+
+}
+
+static void bnx2x_set_limiting_mode(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u16 edc_mode)
+{
+ switch (phy->type) {
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+ bnx2x_8726_set_limiting_mode(params->bp, phy, edc_mode);
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
+ bnx2x_8727_set_limiting_mode(params->bp, phy, edc_mode);
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+ bnx2x_warpcore_set_limiting_mode(params, phy, edc_mode);
+ break;
+ }
+}
+
+int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 edc_mode;
+ int rc = 0;
+
+ u32 val = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].config));
+
+ DP(NETIF_MSG_LINK, "SFP+ module plugged in/out detected on port %d\n",
+ params->port);
+ /* Power up module */
+ bnx2x_power_sfp_module(params, phy, 1);
+ if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) {
+ DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
+ return -EINVAL;
+ } else if (bnx2x_verify_sfp_module(phy, params) != 0) {
+ /* check SFP+ module compatibility */
+ DP(NETIF_MSG_LINK, "Module verification failed!!\n");
+ rc = -EINVAL;
+ /* Turn on fault module-detected led */
+ bnx2x_set_sfp_module_fault_led(params,
+ MISC_REGISTERS_GPIO_HIGH);
+
+ /* Check if need to power down the SFP+ module */
+ if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
+ PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN) {
+ DP(NETIF_MSG_LINK, "Shutdown SFP+ module!!\n");
+ bnx2x_power_sfp_module(params, phy, 0);
+ return rc;
+ }
+ } else {
+ /* Turn off fault module-detected led */
+ bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW);
+ }
+
+ /*
+ * Check and set limiting mode / LRM mode on 8726. On 8727 it
+ * is done automatically
+ */
+ bnx2x_set_limiting_mode(params, phy, edc_mode);
+
+ /*
+ * Enable transmit for this module if the module is approved, or
+ * if unapproved modules should also enable the Tx laser
+ */
+ if (rc == 0 ||
+ (val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
+ PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
+ bnx2x_sfp_set_transmitter(params, phy, 1);
+ else
+ bnx2x_sfp_set_transmitter(params, phy, 0);
+
+ return rc;
+}
+
+void bnx2x_handle_module_detect_int(struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ struct bnx2x_phy *phy;
+ u32 gpio_val;
+ u8 gpio_num, gpio_port;
+ if (CHIP_IS_E3(bp))
+ phy = ¶ms->phy[INT_PHY];
+ else
+ phy = ¶ms->phy[EXT_PHY1];
+
+ if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id, params->shmem_base,
+ params->port, &gpio_num, &gpio_port) ==
+ -EINVAL) {
+ DP(NETIF_MSG_LINK, "Failed to get MOD_ABS interrupt config\n");
+ return;
+ }
+
+ /* Set valid module led off */
+ bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH);
+
+ /* Get current gpio val reflecting module plugged in / out*/
+ gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port);
+
+ /* Call the handling function in case module is detected */
+ if (gpio_val == 0) {
+ bnx2x_power_sfp_module(params, phy, 1);
+ bnx2x_set_gpio_int(bp, gpio_num,
+ MISC_REGISTERS_GPIO_INT_OUTPUT_CLR,
+ gpio_port);
+ if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
+ bnx2x_sfp_module_detection(phy, params);
+ else
+ DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
+ } else {
+ u32 val = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].
+ config));
+ bnx2x_set_gpio_int(bp, gpio_num,
+ MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
+ gpio_port);
+ /*
+ * Module was plugged out.
+ * Disable transmit for this module
+ */
+ phy->media_type = ETH_PHY_NOT_PRESENT;
+ if (((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
+ PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) ||
+ CHIP_IS_E3(bp))
+ bnx2x_sfp_set_transmitter(params, phy, 0);
+ }
+}
+
+/******************************************************************/
+/* Used by 8706 and 8727 */
+/******************************************************************/
+static void bnx2x_sfp_mask_fault(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u16 alarm_status_offset,
+ u16 alarm_ctrl_offset)
+{
+ u16 alarm_status, val;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, alarm_status_offset,
+ &alarm_status);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, alarm_status_offset,
+ &alarm_status);
+ /* Mask or enable the fault event. */
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, &val);
+ if (alarm_status & (1<<0))
+ val &= ~(1<<0);
+ else
+ val |= (1<<0);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, val);
+}
+/******************************************************************/
+/* common BCM8706/BCM8726 PHY SECTION */
+/******************************************************************/
+static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ u8 link_up = 0;
+ u16 val1, val2, rx_sd, pcs_status;
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "XGXS 8706/8726\n");
+ /* Clear RX Alarm*/
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2);
+
+ bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT,
+ MDIO_PMA_LASI_TXCTRL);
+
+ /* clear LASI indication*/
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2);
+ DP(NETIF_MSG_LINK, "8706/8726 LASI status 0x%x--> 0x%x\n", val1, val2);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &pcs_status);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
+
+ DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
+ " link_status 0x%x\n", rx_sd, pcs_status, val2);
+ /*
+ * link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
+ * are set, or if the autoneg bit 1 is set
+ */
+ link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
+ if (link_up) {
+ if (val2 & (1<<1))
+ vars->line_speed = SPEED_1000;
+ else
+ vars->line_speed = SPEED_10000;
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ vars->duplex = DUPLEX_FULL;
+ }
+
+ /* Capture 10G link fault. Read twice to clear stale value. */
+ if (vars->line_speed == SPEED_10000) {
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_TXSTAT, &val1);
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_TXSTAT, &val1);
+ if (val1 & (1<<0))
+ vars->fault_detected = 1;
+ }
+
+ return link_up;
+}
+
+/******************************************************************/
+/* BCM8706 PHY SECTION */
+/******************************************************************/
+static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ u32 tx_en_mode;
+ u16 cnt, val, tmp1;
+ struct bnx2x *bp = params->bp;
+
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ /* HW reset */
+ bnx2x_ext_phy_hw_reset(bp, params->port);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
+ bnx2x_wait_reset_complete(bp, phy, params);
+
+ /* Wait until fw is loaded */
+ for (cnt = 0; cnt < 100; cnt++) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER1, &val);
+ if (val)
+ break;
+ msleep(10);
+ }
+ DP(NETIF_MSG_LINK, "XGXS 8706 is initialized after %d ms\n", cnt);
+ if ((params->feature_config_flags &
+ FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
+ u8 i;
+ u16 reg;
+ for (i = 0; i < 4; i++) {
+ reg = MDIO_XS_8706_REG_BANK_RX0 +
+ i*(MDIO_XS_8706_REG_BANK_RX1 -
+ MDIO_XS_8706_REG_BANK_RX0);
+ bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, reg, &val);
+ /* Clear first 3 bits of the control */
+ val &= ~0x7;
+ /* Set control bits according to configuration */
+ val |= (phy->rx_preemphasis[i] & 0x7);
+ DP(NETIF_MSG_LINK, "Setting RX Equalizer to BCM8706"
+ " reg 0x%x <-- val 0x%x\n", reg, val);
+ bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, reg, val);
+ }
+ }
+ /* Force speed */
+ if (phy->req_line_speed == SPEED_10000) {
+ DP(NETIF_MSG_LINK, "XGXS 8706 force 10Gbps\n");
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_DIGITAL_CTRL, 0x400);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL,
+ 0);
+ /* Arm LASI for link and Tx fault. */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 3);
+ } else {
+ /* Force 1Gbps using autoneg with 1G advertisement */
+
+ /* Allow CL37 through CL73 */
+ DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c);
+
+ /* Enable Full-Duplex advertisement on CL37 */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LP, 0x0020);
+ /* Enable CL37 AN */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
+ /* 1G support */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_ADV, (1<<5));
+
+ /* Enable clause 73 AN */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+ 0x0400);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL,
+ 0x0004);
+ }
+ bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
+
+ /*
+ * If TX Laser is controlled by GPIO_0, do not let PHY go into low
+ * power mode, if TX Laser is disabled
+ */
+
+ tx_en_mode = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].sfp_ctrl))
+ & PORT_HW_CFG_TX_LASER_MASK;
+
+ if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
+ DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, &tmp1);
+ tmp1 |= 0x1;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, tmp1);
+ }
+
+ return 0;
+}
+
+static int bnx2x_8706_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ return bnx2x_8706_8726_read_status(phy, params, vars);
+}
+
+/******************************************************************/
+/* BCM8726 PHY SECTION */
+/******************************************************************/
+static void bnx2x_8726_config_loopback(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "PMA/PMD ext_phy_loopback: 8726\n");
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0001);
+}
+
+static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ /* Need to wait 100ms after reset */
+ msleep(100);
+
+ /* Micro controller re-boot */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x018B);
+
+ /* Set soft reset */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+
+ /* wait for 150ms for microcode load */
+ msleep(150);
+
+ /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+
+ msleep(200);
+ bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
+}
+
+static u8 bnx2x_8726_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val1;
+ u8 link_up = bnx2x_8706_8726_read_status(phy, params, vars);
+ if (link_up) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
+ &val1);
+ if (val1 & (1<<15)) {
+ DP(NETIF_MSG_LINK, "Tx is disabled\n");
+ link_up = 0;
+ vars->line_speed = 0;
+ }
+ }
+ return link_up;
+}
+
+
+static int bnx2x_8726_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
+
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
+ bnx2x_wait_reset_complete(bp, phy, params);
+
+ bnx2x_8726_external_rom_boot(phy, params);
+
+ /*
+ * Need to call module detected on initialization since the module
+ * detection triggered by actual module insertion might occur before
+ * driver is loaded, and when driver is loaded, it reset all
+ * registers, including the transmitter
+ */
+ bnx2x_sfp_module_detection(phy, params);
+
+ if (phy->req_line_speed == SPEED_1000) {
+ DP(NETIF_MSG_LINK, "Setting 1G force\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x5);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+ 0x400);
+ } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) &&
+ ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
+ DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
+ /* Set Flow control */
+ bnx2x_ext_phy_set_pause(params, phy, vars);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_ADV, 0x20);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, 0x0020);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
+ /*
+ * Enable RX-ALARM control to receive interrupt for 1G speed
+ * change
+ */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x4);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+ 0x400);
+
+ } else { /* Default 10G. Set only LASI control */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 1);
+ }
+
+ /* Set TX PreEmphasis if needed */
+ if ((params->feature_config_flags &
+ FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
+ DP(NETIF_MSG_LINK,
+ "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x\n",
+ phy->tx_preemphasis[0],
+ phy->tx_preemphasis[1]);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8726_TX_CTRL1,
+ phy->tx_preemphasis[0]);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8726_TX_CTRL2,
+ phy->tx_preemphasis[1]);
+ }
+
+ return 0;
+
+}
+
+static void bnx2x_8726_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "bnx2x_8726_link_reset port %d\n", params->port);
+ /* Set serial boot control for external load */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL, 0x0001);
+}
+
+/******************************************************************/
+/* BCM8727 PHY SECTION */
+/******************************************************************/
+
+static void bnx2x_8727_set_link_led(struct bnx2x_phy *phy,
+ struct link_params *params, u8 mode)
+{
+ struct bnx2x *bp = params->bp;
+ u16 led_mode_bitmask = 0;
+ u16 gpio_pins_bitmask = 0;
+ u16 val;
+ /* Only NOC flavor requires to set the LED specifically */
+ if (!(phy->flags & FLAGS_NOC))
+ return;
+ switch (mode) {
+ case LED_MODE_FRONT_PANEL_OFF:
+ case LED_MODE_OFF:
+ led_mode_bitmask = 0;
+ gpio_pins_bitmask = 0x03;
+ break;
+ case LED_MODE_ON:
+ led_mode_bitmask = 0;
+ gpio_pins_bitmask = 0x02;
+ break;
+ case LED_MODE_OPER:
+ led_mode_bitmask = 0x60;
+ gpio_pins_bitmask = 0x11;
+ break;
+ }
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+ &val);
+ val &= 0xff8f;
+ val |= led_mode_bitmask;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+ val);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_GPIO_CTRL,
+ &val);
+ val &= 0xffe0;
+ val |= gpio_pins_bitmask;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_GPIO_CTRL,
+ val);
+}
+static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
+ struct link_params *params) {
+ u32 swap_val, swap_override;
+ u8 port;
+ /*
+ * The PHY reset is controlled by GPIO 1. Fake the port number
+ * to cancel the swap done in set_gpio()
+ */
+ struct bnx2x *bp = params->bp;
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+ port = (swap_val && swap_override) ^ 1;
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+}
+
+static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ u32 tx_en_mode;
+ u16 tmp1, val, mod_abs, tmp2;
+ u16 rx_alarm_ctrl_val;
+ u16 lasi_ctrl_val;
+ struct bnx2x *bp = params->bp;
+ /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
+
+ bnx2x_wait_reset_complete(bp, phy, params);
+ rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
+ /* Should be 0x6 to enable XS on Tx side. */
+ lasi_ctrl_val = 0x0006;
+
+ DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
+ /* enable LASI */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+ rx_alarm_ctrl_val);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL,
+ 0);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, lasi_ctrl_val);
+
+ /*
+ * Initially configure MOD_ABS to interrupt when module is
+ * presence( bit 8)
+ */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
+ /*
+ * Set EDC off by setting OPTXLOS signal input to low (bit 9).
+ * When the EDC is off it locks onto a reference clock and avoids
+ * becoming 'lost'
+ */
+ mod_abs &= ~(1<<8);
+ if (!(phy->flags & FLAGS_NOC))
+ mod_abs &= ~(1<<9);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
+
+
+ /* Enable/Disable PHY transmitter output */
+ bnx2x_set_disable_pmd_transmit(params, phy, 0);
+
+ /* Make MOD_ABS give interrupt on change */
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+ &val);
+ val |= (1<<12);
+ if (phy->flags & FLAGS_NOC)
+ val |= (3<<5);
+
+ /*
+ * Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
+ * status which reflect SFP+ module over-current
+ */
+ if (!(phy->flags & FLAGS_NOC))
+ val &= 0xff8f; /* Reset bits 4-6 */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, val);
+
+ bnx2x_8727_power_module(bp, phy, 1);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
+
+ /* Set option 1G speed */
+ if (phy->req_line_speed == SPEED_1000) {
+ DP(NETIF_MSG_LINK, "Setting 1G force\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
+ DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
+ /*
+ * Power down the XAUI until link is up in case of dual-media
+ * and 1G
+ */
+ if (DUAL_MEDIA(params)) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_GP, &val);
+ val |= (3<<10);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_GP, val);
+ }
+ } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) &&
+ ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
+
+ DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
+ } else {
+ /*
+ * Since the 8727 has only single reset pin, need to set the 10G
+ * registers although it is default
+ */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL,
+ 0x0020);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2,
+ 0x0008);
+ }
+
+ /*
+ * Set 2-wire transfer rate of SFP+ module EEPROM
+ * to 100Khz since some DACs(direct attached cables) do
+ * not work at 400Khz.
+ */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
+ 0xa001);
+
+ /* Set TX PreEmphasis if needed */
+ if ((params->feature_config_flags &
+ FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
+ DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x\n",
+ phy->tx_preemphasis[0],
+ phy->tx_preemphasis[1]);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL1,
+ phy->tx_preemphasis[0]);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL2,
+ phy->tx_preemphasis[1]);
+ }
+
+ /*
+ * If TX Laser is controlled by GPIO_0, do not let PHY go into low
+ * power mode, if TX Laser is disabled
+ */
+ tx_en_mode = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].sfp_ctrl))
+ & PORT_HW_CFG_TX_LASER_MASK;
+
+ if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
+
+ DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, &tmp2);
+ tmp2 |= 0x1000;
+ tmp2 &= 0xFFEF;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2);
+ }
+
+ return 0;
+}
+
+static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 mod_abs, rx_alarm_status;
+ u32 val = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].
+ config));
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
+ if (mod_abs & (1<<8)) {
+
+ /* Module is absent */
+ DP(NETIF_MSG_LINK,
+ "MOD_ABS indication show module is absent\n");
+ phy->media_type = ETH_PHY_NOT_PRESENT;
+ /*
+ * 1. Set mod_abs to detect next module
+ * presence event
+ * 2. Set EDC off by setting OPTXLOS signal input to low
+ * (bit 9).
+ * When the EDC is off it locks onto a reference clock and
+ * avoids becoming 'lost'.
+ */
+ mod_abs &= ~(1<<8);
+ if (!(phy->flags & FLAGS_NOC))
+ mod_abs &= ~(1<<9);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
+
+ /*
+ * Clear RX alarm since it stays up as long as
+ * the mod_abs wasn't changed
+ */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
+
+ } else {
+ /* Module is present */
+ DP(NETIF_MSG_LINK,
+ "MOD_ABS indication show module is present\n");
+ /*
+ * First disable transmitter, and if the module is ok, the
+ * module_detection will enable it
+ * 1. Set mod_abs to detect next module absent event ( bit 8)
+ * 2. Restore the default polarity of the OPRXLOS signal and
+ * this signal will then correctly indicate the presence or
+ * absence of the Rx signal. (bit 9)
+ */
+ mod_abs |= (1<<8);
+ if (!(phy->flags & FLAGS_NOC))
+ mod_abs |= (1<<9);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
+
+ /*
+ * Clear RX alarm since it stays up as long as the mod_abs
+ * wasn't changed. This is need to be done before calling the
+ * module detection, otherwise it will clear* the link update
+ * alarm
+ */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
+
+
+ if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
+ PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
+ bnx2x_sfp_set_transmitter(params, phy, 0);
+
+ if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
+ bnx2x_sfp_module_detection(phy, params);
+ else
+ DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
+ }
+
+ DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
+ rx_alarm_status);
+ /* No need to check link status in case of module plugged in/out */
+}
+
+static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+
+{
+ struct bnx2x *bp = params->bp;
+ u8 link_up = 0, oc_port = params->port;
+ u16 link_status = 0;
+ u16 rx_alarm_status, lasi_ctrl, val1;
+
+ /* If PHY is not initialized, do not check link status */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL,
+ &lasi_ctrl);
+ if (!lasi_ctrl)
+ return 0;
+
+ /* Check the LASI on Rx */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT,
+ &rx_alarm_status);
+ vars->line_speed = 0;
+ DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n", rx_alarm_status);
+
+ bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT,
+ MDIO_PMA_LASI_TXCTRL);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
+
+ DP(NETIF_MSG_LINK, "8727 LASI status 0x%x\n", val1);
+
+ /* Clear MSG-OUT */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
+
+ /*
+ * If a module is present and there is need to check
+ * for over current
+ */
+ if (!(phy->flags & FLAGS_NOC) && !(rx_alarm_status & (1<<5))) {
+ /* Check over-current using 8727 GPIO0 input*/
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_GPIO_CTRL,
+ &val1);
+
+ if ((val1 & (1<<8)) == 0) {
+ if (!CHIP_IS_E1x(bp))
+ oc_port = BP_PATH(bp) + (params->port << 1);
+ DP(NETIF_MSG_LINK,
+ "8727 Power fault has been detected on port %d\n",
+ oc_port);
+ netdev_err(bp->dev, "Error: Power fault on Port %d has"
+ " been detected and the power to "
+ "that SFP+ module has been removed"
+ " to prevent failure of the card."
+ " Please remove the SFP+ module and"
+ " restart the system to clear this"
+ " error.\n",
+ oc_port);
+ /* Disable all RX_ALARMs except for mod_abs */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_RXCTRL, (1<<5));
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, &val1);
+ /* Wait for module_absent_event */
+ val1 |= (1<<8);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, val1);
+ /* Clear RX alarm */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
+ return 0;
+ }
+ } /* Over current check */
+
+ /* When module absent bit is set, check module */
+ if (rx_alarm_status & (1<<5)) {
+ bnx2x_8727_handle_mod_abs(phy, params);
+ /* Enable all mod_abs and link detection bits */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+ ((1<<5) | (1<<2)));
+ }
+ DP(NETIF_MSG_LINK, "Enabling 8727 TX laser if SFP is approved\n");
+ bnx2x_8727_specific_func(phy, params, ENABLE_TX);
+ /* If transmitter is disabled, ignore false link up indication */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &val1);
+ if (val1 & (1<<15)) {
+ DP(NETIF_MSG_LINK, "Tx is disabled\n");
+ return 0;
+ }
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status);
+
+ /*
+ * Bits 0..2 --> speed detected,
+ * Bits 13..15--> link is down
+ */
+ if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
+ link_up = 1;
+ vars->line_speed = SPEED_10000;
+ DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n",
+ params->port);
+ } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
+ link_up = 1;
+ vars->line_speed = SPEED_1000;
+ DP(NETIF_MSG_LINK, "port %x: External link up in 1G\n",
+ params->port);
+ } else {
+ link_up = 0;
+ DP(NETIF_MSG_LINK, "port %x: External link is down\n",
+ params->port);
+ }
+
+ /* Capture 10G link fault. */
+ if (vars->line_speed == SPEED_10000) {
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_TXSTAT, &val1);
+
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_TXSTAT, &val1);
+
+ if (val1 & (1<<0)) {
+ vars->fault_detected = 1;
+ }
+ }
+
+ if (link_up) {
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ vars->duplex = DUPLEX_FULL;
+ DP(NETIF_MSG_LINK, "duplex = 0x%x\n", vars->duplex);
+ }
+
+ if ((DUAL_MEDIA(params)) &&
+ (phy->req_line_speed == SPEED_1000)) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_GP, &val1);
+ /*
+ * In case of dual-media board and 1G, power up the XAUI side,
+ * otherwise power it down. For 10G it is done automatically
+ */
+ if (link_up)
+ val1 &= ~(3<<10);
+ else
+ val1 |= (3<<10);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_GP, val1);
+ }
+ return link_up;
+}
+
+static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+
+ /* Enable/Disable PHY transmitter output */
+ bnx2x_set_disable_pmd_transmit(params, phy, 1);
+
+ /* Disable Transmitter */
+ bnx2x_sfp_set_transmitter(params, phy, 0);
+ /* Clear LASI */
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0);
+
+}
+
+/******************************************************************/
+/* BCM8481/BCM84823/BCM84833 PHY SECTION */
+/******************************************************************/
+static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ u16 val, fw_ver1, fw_ver2, cnt;
+ u8 port;
+ struct bnx2x *bp = params->bp;
+
+ port = params->port;
+
+ /* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/
+ /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
+
+ for (cnt = 0; cnt < 100; cnt++) {
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+ if (val & 1)
+ break;
+ udelay(5);
+ }
+ if (cnt == 100) {
+ DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(1)\n");
+ bnx2x_save_spirom_version(bp, port, 0,
+ phy->ver_addr);
+ return;
+ }
+
+
+ /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
+ for (cnt = 0; cnt < 100; cnt++) {
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+ if (val & 1)
+ break;
+ udelay(5);
+ }
+ if (cnt == 100) {
+ DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(2)\n");
+ bnx2x_save_spirom_version(bp, port, 0,
+ phy->ver_addr);
+ return;
+ }
+
+ /* lower 16 bits of the register SPI_FW_STATUS */
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
+ /* upper 16 bits of register SPI_FW_STATUS */
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
+
+ bnx2x_save_spirom_version(bp, port, (fw_ver2<<16) | fw_ver1,
+ phy->ver_addr);
+}
+
+static void bnx2x_848xx_set_led(struct bnx2x *bp,
+ struct bnx2x_phy *phy)
+{
+ u16 val;
+
+ /* PHYC_CTL_LED_CTL */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL, &val);
+ val &= 0xFE00;
+ val |= 0x0092;
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL, val);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x80);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x18);
+
+ /* Select activity source by Tx and Rx, as suggested by PHY AE */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x0006);
+
+ /* Select the closest activity blink rate to that in 10/100/1000 */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_BLINK,
+ 0);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val);
+ val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_84823_CTL_LED_CTL_1, val);
+
+ /* 'Interrupt Mask' */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD,
+ 0xFFFB, 0xFFFD);
+}
+
+static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 autoneg_val, an_1000_val, an_10_100_val;
+ u16 tmp_req_line_speed;
+
+ tmp_req_line_speed = phy->req_line_speed;
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ if (phy->req_line_speed == SPEED_10000)
+ phy->req_line_speed = SPEED_AUTO_NEG;
+
+ /*
+ * This phy uses the NIG latch mechanism since link indication
+ * arrives through its LED4 and not via its LASI signal, so we
+ * get steady signal instead of clear on read
+ */
+ bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
+ 1 << NIG_LATCH_BC_ENABLE_MI_INT);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0000);
+
+ bnx2x_848xx_set_led(bp, phy);
+
+ /* set 1000 speed advertisement */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
+ &an_1000_val);
+
+ bnx2x_ext_phy_set_pause(params, phy, vars);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_LEGACY_AN_ADV,
+ &an_10_100_val);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_MII_CTRL,
+ &autoneg_val);
+ /* Disable forced speed */
+ autoneg_val &= ~((1<<6) | (1<<8) | (1<<9) | (1<<12) | (1<<13));
+ an_10_100_val &= ~((1<<5) | (1<<6) | (1<<7) | (1<<8));
+
+ if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+ (phy->req_line_speed == SPEED_1000)) {
+ an_1000_val |= (1<<8);
+ autoneg_val |= (1<<9 | 1<<12);
+ if (phy->req_duplex == DUPLEX_FULL)
+ an_1000_val |= (1<<9);
+ DP(NETIF_MSG_LINK, "Advertising 1G\n");
+ } else
+ an_1000_val &= ~((1<<8) | (1<<9));
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
+ an_1000_val);
+
+ /* set 100 speed advertisement */
+ if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) &&
+ (phy->supported &
+ (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full)))) {
+ an_10_100_val |= (1<<7);
+ /* Enable autoneg and restart autoneg for legacy speeds */
+ autoneg_val |= (1<<9 | 1<<12);
+
+ if (phy->req_duplex == DUPLEX_FULL)
+ an_10_100_val |= (1<<8);
+ DP(NETIF_MSG_LINK, "Advertising 100M\n");
+ }
+ /* set 10 speed advertisement */
+ if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) &&
+ (phy->supported &
+ (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full)))) {
+ an_10_100_val |= (1<<5);
+ autoneg_val |= (1<<9 | 1<<12);
+ if (phy->req_duplex == DUPLEX_FULL)
+ an_10_100_val |= (1<<6);
+ DP(NETIF_MSG_LINK, "Advertising 10M\n");
+ }
+
+ /* Only 10/100 are allowed to work in FORCE mode */
+ if ((phy->req_line_speed == SPEED_100) &&
+ (phy->supported &
+ (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full))) {
+ autoneg_val |= (1<<13);
+ /* Enabled AUTO-MDIX when autoneg is disabled */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
+ (1<<15 | 1<<9 | 7<<0));
+ DP(NETIF_MSG_LINK, "Setting 100M force\n");
+ }
+ if ((phy->req_line_speed == SPEED_10) &&
+ (phy->supported &
+ (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full))) {
+ /* Enabled AUTO-MDIX when autoneg is disabled */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
+ (1<<15 | 1<<9 | 7<<0));
+ DP(NETIF_MSG_LINK, "Setting 10M force\n");
+ }
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_AN_ADV,
+ an_10_100_val);
+
+ if (phy->req_duplex == DUPLEX_FULL)
+ autoneg_val |= (1<<8);
+
+ /*
+ * Always write this if this is not 84833.
+ * For 84833, write it only when it's a forced speed.
+ */
+ if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+ ((autoneg_val & (1<<12)) == 0))
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val);
+
+ if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
+ (phy->req_line_speed == SPEED_10000)) {
+ DP(NETIF_MSG_LINK, "Advertising 10G\n");
+ /* Restart autoneg for 10G*/
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
+ 0x3200);
+ } else
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
+ 1);
+
+ /* Save spirom version */
+ bnx2x_save_848xx_spirom_version(phy, params);
+
+ phy->req_line_speed = tmp_req_line_speed;
+
+ return 0;
+}
+
+static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ /* Restore normal power mode*/
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+
+ /* HW reset */
+ bnx2x_ext_phy_hw_reset(bp, params->port);
+ bnx2x_wait_reset_complete(bp, phy, params);
+
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
+ return bnx2x_848xx_cmn_config_init(phy, params, vars);
+}
+
+
+#define PHY84833_HDSHK_WAIT 300
+static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ u32 idx;
+ u32 pair_swap;
+ u16 val;
+ u16 data;
+ struct bnx2x *bp = params->bp;
+ /* Do pair swap */
+
+ /* Check for configuration. */
+ pair_swap = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].xgbt_phy_cfg)) &
+ PORT_HW_CFG_RJ45_PAIR_SWAP_MASK;
+
+ if (pair_swap == 0)
+ return 0;
+
+ data = (u16)pair_swap;
+
+ /* Write CMD_OPEN_OVERRIDE to STATUS reg */
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG2,
+ PHY84833_CMD_OPEN_OVERRIDE);
+ for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
+ if (val == PHY84833_CMD_OPEN_FOR_CMDS)
+ break;
+ msleep(1);
+ }
+ if (idx >= PHY84833_HDSHK_WAIT) {
+ DP(NETIF_MSG_LINK, "Pairswap: FW not ready.\n");
+ return -EINVAL;
+ }
+
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG4,
+ data);
+ /* Issue pair swap command */
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG0,
+ PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE);
+ for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
+ if ((val == PHY84833_CMD_COMPLETE_PASS) ||
+ (val == PHY84833_CMD_COMPLETE_ERROR))
+ break;
+ msleep(1);
+ }
+ if ((idx >= PHY84833_HDSHK_WAIT) ||
+ (val == PHY84833_CMD_COMPLETE_ERROR)) {
+ DP(NETIF_MSG_LINK, "Pairswap: override failed.\n");
+ return -EINVAL;
+ }
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG2,
+ PHY84833_CMD_CLEAR_COMPLETE);
+ DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data);
+ return 0;
+}
+
+
+static u8 bnx2x_84833_get_reset_gpios(struct bnx2x *bp,
+ u32 shmem_base_path[],
+ u32 chip_id)
+{
+ u32 reset_pin[2];
+ u32 idx;
+ u8 reset_gpios;
+ if (CHIP_IS_E3(bp)) {
+ /* Assume that these will be GPIOs, not EPIOs. */
+ for (idx = 0; idx < 2; idx++) {
+ /* Map config param to register bit. */
+ reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[0].e3_cmn_pin_cfg));
+ reset_pin[idx] = (reset_pin[idx] &
+ PORT_HW_CFG_E3_PHY_RESET_MASK) >>
+ PORT_HW_CFG_E3_PHY_RESET_SHIFT;
+ reset_pin[idx] -= PIN_CFG_GPIO0_P0;
+ reset_pin[idx] = (1 << reset_pin[idx]);
+ }
+ reset_gpios = (u8)(reset_pin[0] | reset_pin[1]);
+ } else {
+ /* E2, look from diff place of shmem. */
+ for (idx = 0; idx < 2; idx++) {
+ reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[0].default_cfg));
+ reset_pin[idx] &= PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK;
+ reset_pin[idx] -= PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0;
+ reset_pin[idx] >>= PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT;
+ reset_pin[idx] = (1 << reset_pin[idx]);
+ }
+ reset_gpios = (u8)(reset_pin[0] | reset_pin[1]);
+ }
+
+ return reset_gpios;
+}
+
+static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u8 reset_gpios;
+ u32 other_shmem_base_addr = REG_RD(bp, params->shmem2_base +
+ offsetof(struct shmem2_region,
+ other_shmem_base_addr));
+
+ u32 shmem_base_path[2];
+ shmem_base_path[0] = params->shmem_base;
+ shmem_base_path[1] = other_shmem_base_addr;
+
+ reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path,
+ params->chip_id);
+
+ bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
+ udelay(10);
+ DP(NETIF_MSG_LINK, "84833 hw reset on pin values 0x%x\n",
+ reset_gpios);
+
+ return 0;
+}
+
+static int bnx2x_84833_common_init_phy(struct bnx2x *bp,
+ u32 shmem_base_path[],
+ u32 chip_id)
+{
+ u8 reset_gpios;
+
+ reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id);
+
+ bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
+ udelay(10);
+ bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+ msleep(800);
+ DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n",
+ reset_gpios);
+
+ return 0;
+}
+
+#define PHY84833_CONSTANT_LATENCY 1193
+static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port, initialize = 1;
+ u16 val;
+ u16 temp;
+ u32 actual_phy_selection, cms_enable, idx;
+ int rc = 0;
+
+ msleep(1);
+
+ if (!(CHIP_IS_E1(bp)))
+ port = BP_PATH(bp);
+ else
+ port = params->port;
+
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+ port);
+ } else {
+ /* MDIO reset */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_CTRL, 0x8000);
+ /* Bring PHY out of super isolate mode */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
+ val &= ~MDIO_84833_SUPER_ISOLATE;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
+ }
+
+ bnx2x_wait_reset_complete(bp, phy, params);
+
+ /* Wait for GPHY to come out of reset */
+ msleep(50);
+
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ bnx2x_84833_pair_swap_cfg(phy, params, vars);
+
+ /*
+ * BCM84823 requires that XGXS links up first @ 10G for normal behavior
+ */
+ temp = vars->line_speed;
+ vars->line_speed = SPEED_10000;
+ bnx2x_set_autoneg(¶ms->phy[INT_PHY], params, vars, 0);
+ bnx2x_program_serdes(¶ms->phy[INT_PHY], params, vars);
+ vars->line_speed = temp;
+
+ /* Set dual-media configuration according to configuration */
+
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_CTL_REG_84823_MEDIA, &val);
+ val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
+ MDIO_CTL_REG_84823_MEDIA_LINE_MASK |
+ MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN |
+ MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK |
+ MDIO_CTL_REG_84823_MEDIA_FIBER_1G);
+
+ if (CHIP_IS_E3(bp)) {
+ val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
+ MDIO_CTL_REG_84823_MEDIA_LINE_MASK);
+ } else {
+ val |= (MDIO_CTL_REG_84823_CTRL_MAC_XFI |
+ MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L);
+ }
+
+ actual_phy_selection = bnx2x_phy_selection(params);
+
+ switch (actual_phy_selection) {
+ case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
+ /* Do nothing. Essentially this is like the priority copper */
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+ val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+ val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
+ /* Do nothing here. The first PHY won't be initialized at all */
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
+ val |= MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN;
+ initialize = 0;
+ break;
+ }
+ if (params->phy[EXT_PHY2].req_line_speed == SPEED_1000)
+ val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G;
+
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_CTL_REG_84823_MEDIA, val);
+ DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
+ params->multi_phy_config, val);
+
+ /* AutogrEEEn */
+ if (params->feature_config_flags &
+ FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
+ /* Ensure that f/w is ready */
+ for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
+ if (val == PHY84833_CMD_OPEN_FOR_CMDS)
+ break;
+ usleep_range(1000, 1000);
+ }
+ if (idx >= PHY84833_HDSHK_WAIT) {
+ DP(NETIF_MSG_LINK, "AutogrEEEn: FW not ready.\n");
+ return -EINVAL;
+ }
+
+ /* Select EEE mode */
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG3,
+ 0x2);
+
+ /* Set Idle and Latency */
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG4,
+ PHY84833_CONSTANT_LATENCY + 1);
+
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_DATA3_REG,
+ PHY84833_CONSTANT_LATENCY + 1);
+
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_DATA4_REG,
+ PHY84833_CONSTANT_LATENCY);
+
+ /* Send EEE instruction to command register */
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG0,
+ PHY84833_DIAG_CMD_SET_EEE_MODE);
+
+ /* Ensure that the command has completed */
+ for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
+ if ((val == PHY84833_CMD_COMPLETE_PASS) ||
+ (val == PHY84833_CMD_COMPLETE_ERROR))
+ break;
+ usleep_range(1000, 1000);
+ }
+ if ((idx >= PHY84833_HDSHK_WAIT) ||
+ (val == PHY84833_CMD_COMPLETE_ERROR)) {
+ DP(NETIF_MSG_LINK, "AutogrEEEn: command failed.\n");
+ return -EINVAL;
+ }
+
+ /* Reset command handler */
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG2,
+ PHY84833_CMD_CLEAR_COMPLETE);
+ }
+
+ if (initialize)
+ rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
+ else
+ bnx2x_save_848xx_spirom_version(phy, params);
+ /* 84833 PHY has a better feature and doesn't need to support this. */
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
+ cms_enable = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].default_cfg)) &
+ PORT_HW_CFG_ENABLE_CMS_MASK;
+
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_CTL_REG_84823_USER_CTRL_REG, &val);
+ if (cms_enable)
+ val |= MDIO_CTL_REG_84823_USER_CTRL_CMS;
+ else
+ val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS;
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_CTL_REG_84823_USER_CTRL_REG, val);
+ }
+
+ return rc;
+}
+
+static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val, val1, val2;
+ u8 link_up = 0;
+
+
+ /* Check 10G-BaseT link status */
+ /* Check PMD signal ok */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, 0xFFFA, &val1);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL,
+ &val2);
+ DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2);
+
+ /* Check link 10G */
+ if (val2 & (1<<11)) {
+ vars->line_speed = SPEED_10000;
+ vars->duplex = DUPLEX_FULL;
+ link_up = 1;
+ bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
+ } else { /* Check Legacy speed link */
+ u16 legacy_status, legacy_speed;
+
+ /* Enable expansion register 0x42 (Operation mode status) */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_EXPANSION_REG_ACCESS, 0xf42);
+
+ /* Get legacy speed operation status */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_EXPANSION_REG_RD_RW,
+ &legacy_status);
+
+ DP(NETIF_MSG_LINK, "Legacy speed status = 0x%x\n",
+ legacy_status);
+ link_up = ((legacy_status & (1<<11)) == (1<<11));
+ if (link_up) {
+ legacy_speed = (legacy_status & (3<<9));
+ if (legacy_speed == (0<<9))
+ vars->line_speed = SPEED_10;
+ else if (legacy_speed == (1<<9))
+ vars->line_speed = SPEED_100;
+ else if (legacy_speed == (2<<9))
+ vars->line_speed = SPEED_1000;
+ else /* Should not happen */
+ vars->line_speed = 0;
+
+ if (legacy_status & (1<<8))
+ vars->duplex = DUPLEX_FULL;
+ else
+ vars->duplex = DUPLEX_HALF;
+
+ DP(NETIF_MSG_LINK,
+ "Link is up in %dMbps, is_duplex_full= %d\n",
+ vars->line_speed,
+ (vars->duplex == DUPLEX_FULL));
+ /* Check legacy speed AN resolution */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_LEGACY_MII_STATUS,
+ &val);
+ if (val & (1<<5))
+ vars->link_status |=
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_LEGACY_AN_EXPANSION,
+ &val);
+ if ((val & (1<<0)) == 0)
+ vars->link_status |=
+ LINK_STATUS_PARALLEL_DETECTION_USED;
+ }
+ }
+ if (link_up) {
+ DP(NETIF_MSG_LINK, "BCM84823: link speed is %d\n",
+ vars->line_speed);
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ }
+
+ return link_up;
+}
+
+
+static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len)
+{
+ int status = 0;
+ u32 spirom_ver;
+ spirom_ver = ((raw_ver & 0xF80) >> 7) << 16 | (raw_ver & 0x7F);
+ status = bnx2x_format_ver(spirom_ver, str, len);
+ return status;
+}
+
+static void bnx2x_8481_hw_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, 0);
+ bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, 1);
+}
+
+static void bnx2x_8481_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ bnx2x_cl45_write(params->bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000);
+ bnx2x_cl45_write(params->bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1);
+}
+
+static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port;
+ u16 val16;
+
+ if (!(CHIP_IS_E1(bp)))
+ port = BP_PATH(bp);
+ else
+ port = params->port;
+
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ port);
+ } else {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_CTL_DEVAD,
+ 0x400f, &val16);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_CTRL, 0x800);
+ }
+}
+
+static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
+ struct link_params *params, u8 mode)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val;
+ u8 port;
+
+ if (!(CHIP_IS_E1(bp)))
+ port = BP_PATH(bp);
+ else
+ port = params->port;
+
+ switch (mode) {
+ case LED_MODE_OFF:
+
+ DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OFF\n", port);
+
+ if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY1) {
+
+ /* Set LED masks */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x0);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x0);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x0);
+
+ } else {
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
+ }
+ break;
+ case LED_MODE_FRONT_PANEL_OFF:
+
+ DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE FRONT PANEL OFF\n",
+ port);
+
+ if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY1) {
+
+ /* Set LED masks */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x0);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x0);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x20);
+
+ } else {
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
+ }
+ break;
+ case LED_MODE_ON:
+
+ DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE ON\n", port);
+
+ if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY1) {
+ /* Set control reg */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ &val);
+ val &= 0x8000;
+ val |= 0x2492;
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ val);
+
+ /* Set LED masks */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x20);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x20);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x0);
+ } else {
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x20);
+ }
+ break;
+
+ case LED_MODE_OPER:
+
+ DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OPER\n", port);
+
+ if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY1) {
+
+ /* Set control reg */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ &val);
+
+ if (!((val &
+ MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK)
+ >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)) {
+ DP(NETIF_MSG_LINK, "Setting LINK_SIGNAL\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ 0xa492);
+ }
+
+ /* Set LED masks */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x10);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x80);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x98);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x40);
+
+ } else {
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x80);
+
+ /* Tell LED3 to blink on source */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ &val);
+ val &= ~(7<<6);
+ val |= (1<<6); /* A83B[8:6]= 1 */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ val);
+ }
+ break;
+ }
+
+ /*
+ * This is a workaround for E3+84833 until autoneg
+ * restart is fixed in f/w
+ */
+ if (CHIP_IS_E3(bp)) {
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_GP2_STATUS_GP_2_1, &val);
+ }
+}
+
+/******************************************************************/
+/* 54618SE PHY SECTION */
+/******************************************************************/
+static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port;
+ u16 autoneg_val, an_1000_val, an_10_100_val, fc_val, temp;
+ u32 cfg_pin;
+
+ DP(NETIF_MSG_LINK, "54618SE cfg init\n");
+ usleep_range(1000, 1000);
+
+ /* This works with E3 only, no need to check the chip
+ before determining the port. */
+ port = params->port;
+
+ cfg_pin = (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_cmn_pin_cfg)) &
+ PORT_HW_CFG_E3_PHY_RESET_MASK) >>
+ PORT_HW_CFG_E3_PHY_RESET_SHIFT;
+
+ /* Drive pin high to bring the GPHY out of reset. */
+ bnx2x_set_cfg_pin(bp, cfg_pin, 1);
+
+ /* wait for GPHY to reset */
+ msleep(50);
+
+ /* reset phy */
+ bnx2x_cl22_write(bp, phy,
+ MDIO_PMA_REG_CTRL, 0x8000);
+ bnx2x_wait_reset_complete(bp, phy, params);
+
+ /*wait for GPHY to reset */
+ msleep(50);
+
+ /* Configure LED4: set to INTR (0x6). */
+ /* Accessing shadow register 0xe. */
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_LED_SEL2);
+ bnx2x_cl22_read(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ &temp);
+ temp &= ~(0xf << 4);
+ temp |= (0x6 << 4);
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+ /* Configure INTR based on link status change. */
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_INTR_MASK,
+ ~MDIO_REG_INTR_MASK_LINK_STATUS);
+
+ /* Flip the signal detect polarity (set 0x1c.0x1e[8]). */
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_AUTO_DET_MED);
+ bnx2x_cl22_read(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ &temp);
+ temp |= MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD;
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+
+ /* Set up fc */
+ /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
+ bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+ fc_val = 0;
+ if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC)
+ fc_val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
+
+ if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
+ fc_val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
+
+ /* read all advertisement */
+ bnx2x_cl22_read(bp, phy,
+ 0x09,
+ &an_1000_val);
+
+ bnx2x_cl22_read(bp, phy,
+ 0x04,
+ &an_10_100_val);
+
+ bnx2x_cl22_read(bp, phy,
+ MDIO_PMA_REG_CTRL,
+ &autoneg_val);
+
+ /* Disable forced speed */
+ autoneg_val &= ~((1<<6) | (1<<8) | (1<<9) | (1<<12) | (1<<13));
+ an_10_100_val &= ~((1<<5) | (1<<6) | (1<<7) | (1<<8) | (1<<10) |
+ (1<<11));
+
+ if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+ (phy->req_line_speed == SPEED_1000)) {
+ an_1000_val |= (1<<8);
+ autoneg_val |= (1<<9 | 1<<12);
+ if (phy->req_duplex == DUPLEX_FULL)
+ an_1000_val |= (1<<9);
+ DP(NETIF_MSG_LINK, "Advertising 1G\n");
+ } else
+ an_1000_val &= ~((1<<8) | (1<<9));
+
+ bnx2x_cl22_write(bp, phy,
+ 0x09,
+ an_1000_val);
+ bnx2x_cl22_read(bp, phy,
+ 0x09,
+ &an_1000_val);
+
+ /* set 100 speed advertisement */
+ if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) {
+ an_10_100_val |= (1<<7);
+ /* Enable autoneg and restart autoneg for legacy speeds */
+ autoneg_val |= (1<<9 | 1<<12);
+
+ if (phy->req_duplex == DUPLEX_FULL)
+ an_10_100_val |= (1<<8);
+ DP(NETIF_MSG_LINK, "Advertising 100M\n");
+ }
+
+ /* set 10 speed advertisement */
+ if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) {
+ an_10_100_val |= (1<<5);
+ autoneg_val |= (1<<9 | 1<<12);
+ if (phy->req_duplex == DUPLEX_FULL)
+ an_10_100_val |= (1<<6);
+ DP(NETIF_MSG_LINK, "Advertising 10M\n");
+ }
+
+ /* Only 10/100 are allowed to work in FORCE mode */
+ if (phy->req_line_speed == SPEED_100) {
+ autoneg_val |= (1<<13);
+ /* Enabled AUTO-MDIX when autoneg is disabled */
+ bnx2x_cl22_write(bp, phy,
+ 0x18,
+ (1<<15 | 1<<9 | 7<<0));
+ DP(NETIF_MSG_LINK, "Setting 100M force\n");
+ }
+ if (phy->req_line_speed == SPEED_10) {
+ /* Enabled AUTO-MDIX when autoneg is disabled */
+ bnx2x_cl22_write(bp, phy,
+ 0x18,
+ (1<<15 | 1<<9 | 7<<0));
+ DP(NETIF_MSG_LINK, "Setting 10M force\n");
+ }
+
+ /* Check if we should turn on Auto-GrEEEn */
+ bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &temp);
+ if (temp == MDIO_REG_GPHY_ID_54618SE) {
+ if (params->feature_config_flags &
+ FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
+ temp = 6;
+ DP(NETIF_MSG_LINK, "Enabling Auto-GrEEEn\n");
+ } else {
+ temp = 0;
+ DP(NETIF_MSG_LINK, "Disabling Auto-GrEEEn\n");
+ }
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_CL45_ADDR_REG, MDIO_AN_DEVAD);
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_CL45_DATA_REG,
+ MDIO_REG_GPHY_EEE_ADV);
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_CL45_ADDR_REG,
+ (0x1 << 14) | MDIO_AN_DEVAD);
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_CL45_DATA_REG,
+ temp);
+ }
+
+ bnx2x_cl22_write(bp, phy,
+ 0x04,
+ an_10_100_val | fc_val);
+
+ if (phy->req_duplex == DUPLEX_FULL)
+ autoneg_val |= (1<<8);
+
+ bnx2x_cl22_write(bp, phy,
+ MDIO_PMA_REG_CTRL, autoneg_val);
+
+ return 0;
+}
+
+static void bnx2x_54618se_set_link_led(struct bnx2x_phy *phy,
+ struct link_params *params, u8 mode)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "54618SE set link led (mode=%x)\n", mode);
+ switch (mode) {
+ case LED_MODE_FRONT_PANEL_OFF:
+ case LED_MODE_OFF:
+ case LED_MODE_OPER:
+ case LED_MODE_ON:
+ default:
+ break;
+ }
+ return;
+}
+
+static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u32 cfg_pin;
+ u8 port;
+
+ /*
+ * In case of no EPIO routed to reset the GPHY, put it
+ * in low power mode.
+ */
+ bnx2x_cl22_write(bp, phy, MDIO_PMA_REG_CTRL, 0x800);
+ /*
+ * This works with E3 only, no need to check the chip
+ * before determining the port.
+ */
+ port = params->port;
+ cfg_pin = (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_cmn_pin_cfg)) &
+ PORT_HW_CFG_E3_PHY_RESET_MASK) >>
+ PORT_HW_CFG_E3_PHY_RESET_SHIFT;
+
+ /* Drive pin low to put GPHY in reset. */
+ bnx2x_set_cfg_pin(bp, cfg_pin, 0);
+}
+
+static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val;
+ u8 link_up = 0;
+ u16 legacy_status, legacy_speed;
+
+ /* Get speed operation status */
+ bnx2x_cl22_read(bp, phy,
+ 0x19,
+ &legacy_status);
+ DP(NETIF_MSG_LINK, "54618SE read_status: 0x%x\n", legacy_status);
+
+ /* Read status to clear the PHY interrupt. */
+ bnx2x_cl22_read(bp, phy,
+ MDIO_REG_INTR_STATUS,
+ &val);
+
+ link_up = ((legacy_status & (1<<2)) == (1<<2));
+
+ if (link_up) {
+ legacy_speed = (legacy_status & (7<<8));
+ if (legacy_speed == (7<<8)) {
+ vars->line_speed = SPEED_1000;
+ vars->duplex = DUPLEX_FULL;
+ } else if (legacy_speed == (6<<8)) {
+ vars->line_speed = SPEED_1000;
+ vars->duplex = DUPLEX_HALF;
+ } else if (legacy_speed == (5<<8)) {
+ vars->line_speed = SPEED_100;
+ vars->duplex = DUPLEX_FULL;
+ }
+ /* Omitting 100Base-T4 for now */
+ else if (legacy_speed == (3<<8)) {
+ vars->line_speed = SPEED_100;
+ vars->duplex = DUPLEX_HALF;
+ } else if (legacy_speed == (2<<8)) {
+ vars->line_speed = SPEED_10;
+ vars->duplex = DUPLEX_FULL;
+ } else if (legacy_speed == (1<<8)) {
+ vars->line_speed = SPEED_10;
+ vars->duplex = DUPLEX_HALF;
+ } else /* Should not happen */
+ vars->line_speed = 0;
+
+ DP(NETIF_MSG_LINK,
+ "Link is up in %dMbps, is_duplex_full= %d\n",
+ vars->line_speed,
+ (vars->duplex == DUPLEX_FULL));
+
+ /* Check legacy speed AN resolution */
+ bnx2x_cl22_read(bp, phy,
+ 0x01,
+ &val);
+ if (val & (1<<5))
+ vars->link_status |=
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+ bnx2x_cl22_read(bp, phy,
+ 0x06,
+ &val);
+ if ((val & (1<<0)) == 0)
+ vars->link_status |=
+ LINK_STATUS_PARALLEL_DETECTION_USED;
+
+ DP(NETIF_MSG_LINK, "BCM54618SE: link speed is %d\n",
+ vars->line_speed);
+
+ /* Report whether EEE is resolved. */
+ bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &val);
+ if (val == MDIO_REG_GPHY_ID_54618SE) {
+ if (vars->link_status &
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)
+ val = 0;
+ else {
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_CL45_ADDR_REG,
+ MDIO_AN_DEVAD);
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_CL45_DATA_REG,
+ MDIO_REG_GPHY_EEE_RESOLVED);
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_CL45_ADDR_REG,
+ (0x1 << 14) | MDIO_AN_DEVAD);
+ bnx2x_cl22_read(bp, phy,
+ MDIO_REG_GPHY_CL45_DATA_REG,
+ &val);
+ }
+ DP(NETIF_MSG_LINK, "EEE resolution: 0x%x\n", val);
+ }
+
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ }
+ return link_up;
+}
+
+static void bnx2x_54618se_config_loopback(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val;
+ u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
+
+ DP(NETIF_MSG_LINK, "2PMA/PMD ext_phy_loopback: 54618se\n");
+
+ /* Enable master/slave manual mmode and set to master */
+ /* mii write 9 [bits set 11 12] */
+ bnx2x_cl22_write(bp, phy, 0x09, 3<<11);
+
+ /* forced 1G and disable autoneg */
+ /* set val [mii read 0] */
+ /* set val [expr $val & [bits clear 6 12 13]] */
+ /* set val [expr $val | [bits set 6 8]] */
+ /* mii write 0 $val */
+ bnx2x_cl22_read(bp, phy, 0x00, &val);
+ val &= ~((1<<6) | (1<<12) | (1<<13));
+ val |= (1<<6) | (1<<8);
+ bnx2x_cl22_write(bp, phy, 0x00, val);
+
+ /* Set external loopback and Tx using 6dB coding */
+ /* mii write 0x18 7 */
+ /* set val [mii read 0x18] */
+ /* mii write 0x18 [expr $val | [bits set 10 15]] */
+ bnx2x_cl22_write(bp, phy, 0x18, 7);
+ bnx2x_cl22_read(bp, phy, 0x18, &val);
+ bnx2x_cl22_write(bp, phy, 0x18, val | (1<<10) | (1<<15));
+
+ /* This register opens the gate for the UMAC despite its name */
+ REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1);
+
+ /*
+ * Maximum Frame Length (RW). Defines a 14-Bit maximum frame
+ * length used by the MAC receive logic to check frames.
+ */
+ REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710);
+}
+
+/******************************************************************/
+/* SFX7101 PHY SECTION */
+/******************************************************************/
+static void bnx2x_7101_config_loopback(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ /* SFX7101_XGXS_TEST1 */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100);
+}
+
+static int bnx2x_7101_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ u16 fw_ver1, fw_ver2, val;
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "Setting the SFX7101 LASI indication\n");
+
+ /* Restore normal power mode*/
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ /* HW reset */
+ bnx2x_ext_phy_hw_reset(bp, params->port);
+ bnx2x_wait_reset_complete(bp, phy, params);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x1);
+ DP(NETIF_MSG_LINK, "Setting the SFX7101 LED to blink on traffic\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_7107_LED_CNTL, (1<<3));
+
+ bnx2x_ext_phy_set_pause(params, phy, vars);
+ /* Restart autoneg */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, &val);
+ val |= 0x200;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, val);
+
+ /* Save spirom version */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER1, &fw_ver1);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER2, &fw_ver2);
+ bnx2x_save_spirom_version(bp, params->port,
+ (u32)(fw_ver1<<16 | fw_ver2), phy->ver_addr);
+ return 0;
+}
+
+static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 link_up;
+ u16 val1, val2;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
+ DP(NETIF_MSG_LINK, "10G-base-T LASI status 0x%x->0x%x\n",
+ val2, val1);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
+ DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n",
+ val2, val1);
+ link_up = ((val1 & 4) == 4);
+ /* if link is up print the AN outcome of the SFX7101 PHY */
+ if (link_up) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
+ &val2);
+ vars->line_speed = SPEED_10000;
+ vars->duplex = DUPLEX_FULL;
+ DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n",
+ val2, (val2 & (1<<14)));
+ bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ }
+ return link_up;
+}
+
+static int bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len)
+{
+ if (*len < 5)
+ return -EINVAL;
+ str[0] = (spirom_ver & 0xFF);
+ str[1] = (spirom_ver & 0xFF00) >> 8;
+ str[2] = (spirom_ver & 0xFF0000) >> 16;
+ str[3] = (spirom_ver & 0xFF000000) >> 24;
+ str[4] = '\0';
+ *len -= 5;
+ return 0;
+}
+
+void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy)
+{
+ u16 val, cnt;
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7101_RESET, &val);
+
+ for (cnt = 0; cnt < 10; cnt++) {
+ msleep(50);
+ /* Writes a self-clearing reset */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7101_RESET,
+ (val | (1<<15)));
+ /* Wait for clear */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7101_RESET, &val);
+
+ if ((val & (1<<15)) == 0)
+ break;
+ }
+}
+
+static void bnx2x_7101_hw_reset(struct bnx2x_phy *phy,
+ struct link_params *params) {
+ /* Low power mode is controlled by GPIO 2 */
+ bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
+ /* The PHY reset is controlled by GPIO 1 */
+ bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
+}
+
+static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy,
+ struct link_params *params, u8 mode)
+{
+ u16 val = 0;
+ struct bnx2x *bp = params->bp;
+ switch (mode) {
+ case LED_MODE_FRONT_PANEL_OFF:
+ case LED_MODE_OFF:
+ val = 2;
+ break;
+ case LED_MODE_ON:
+ val = 1;
+ break;
+ case LED_MODE_OPER:
+ val = 0;
+ break;
+ }
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7107_LINK_LED_CNTL,
+ val);
+}
+
+/******************************************************************/
+/* STATIC PHY DECLARATION */
+/******************************************************************/
+
+static struct bnx2x_phy phy_null = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN,
+ .addr = 0,
+ .def_md_devad = 0,
+ .flags = FLAGS_INIT_XGXS_FIRST,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = 0,
+ .media_type = ETH_PHY_NOT_PRESENT,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)NULL,
+ .read_status = (read_status_t)NULL,
+ .link_reset = (link_reset_t)NULL,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)NULL,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static struct bnx2x_phy phy_serdes = {
+ .type = PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_2500baseX_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_xgxs_config_init,
+ .read_status = (read_status_t)bnx2x_link_settings_status,
+ .link_reset = (link_reset_t)bnx2x_int_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)NULL,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static struct bnx2x_phy phy_xgxs = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_2500baseX_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_CX4,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_xgxs_config_init,
+ .read_status = (read_status_t)bnx2x_link_settings_status,
+ .link_reset = (link_reset_t)bnx2x_int_link_reset,
+ .config_loopback = (config_loopback_t)bnx2x_set_xgxs_loopback,
+ .format_fw_ver = (format_fw_ver_t)NULL,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+static struct bnx2x_phy phy_warpcore = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
+ .addr = 0xff,
+ .def_md_devad = 0,
- .flags = (FLAGS_INIT_XGXS_FIRST |
- FLAGS_TX_ERROR_CHECK),
++ .flags = FLAGS_HW_LOCK_REQUIRED,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_20000baseKR2_Full |
+ SUPPORTED_20000baseMLD2_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_UNSPECIFIED,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ /* req_duplex = */0,
+ /* rsrv = */0,
+ .config_init = (config_init_t)bnx2x_warpcore_config_init,
+ .read_status = (read_status_t)bnx2x_warpcore_read_status,
+ .link_reset = (link_reset_t)bnx2x_warpcore_link_reset,
+ .config_loopback = (config_loopback_t)bnx2x_set_warpcore_loopback,
+ .format_fw_ver = (format_fw_ver_t)NULL,
+ .hw_reset = (hw_reset_t)bnx2x_warpcore_hw_reset,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
+
+static struct bnx2x_phy phy_7101 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = FLAGS_FAN_FAILURE_DET_REQ,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_7101_config_init,
+ .read_status = (read_status_t)bnx2x_7101_read_status,
+ .link_reset = (link_reset_t)bnx2x_common_ext_link_reset,
+ .config_loopback = (config_loopback_t)bnx2x_7101_config_loopback,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_7101_format_ver,
+ .hw_reset = (hw_reset_t)bnx2x_7101_hw_reset,
+ .set_link_led = (set_link_led_t)bnx2x_7101_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+static struct bnx2x_phy phy_8073 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = FLAGS_HW_LOCK_REQUIRED,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10000baseT_Full |
+ SUPPORTED_2500baseX_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_KR,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_8073_config_init,
+ .read_status = (read_status_t)bnx2x_8073_read_status,
+ .link_reset = (link_reset_t)bnx2x_8073_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+static struct bnx2x_phy phy_8705 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = FLAGS_INIT_XGXS_FIRST,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_XFP_FIBER,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_8705_config_init,
+ .read_status = (read_status_t)bnx2x_8705_read_status,
+ .link_reset = (link_reset_t)bnx2x_common_ext_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_null_format_ver,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+static struct bnx2x_phy phy_8706 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
+ .addr = 0xff,
+ .def_md_devad = 0,
- FLAGS_INIT_XGXS_FIRST |
- FLAGS_TX_ERROR_CHECK),
++ .flags = FLAGS_INIT_XGXS_FIRST,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_SFP_FIBER,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_8706_config_init,
+ .read_status = (read_status_t)bnx2x_8706_read_status,
+ .link_reset = (link_reset_t)bnx2x_common_ext_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static struct bnx2x_phy phy_8726 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = (FLAGS_HW_LOCK_REQUIRED |
- .flags = (FLAGS_FAN_FAILURE_DET_REQ |
- FLAGS_TX_ERROR_CHECK),
++ FLAGS_INIT_XGXS_FIRST),
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_NOT_PRESENT,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_8726_config_init,
+ .read_status = (read_status_t)bnx2x_8726_read_status,
+ .link_reset = (link_reset_t)bnx2x_8726_link_reset,
+ .config_loopback = (config_loopback_t)bnx2x_8726_config_loopback,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static struct bnx2x_phy phy_8727 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
+ .addr = 0xff,
+ .def_md_devad = 0,
++ .flags = FLAGS_FAN_FAILURE_DET_REQ,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_NOT_PRESENT,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_8727_config_init,
+ .read_status = (read_status_t)bnx2x_8727_read_status,
+ .link_reset = (link_reset_t)bnx2x_8727_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
+ .hw_reset = (hw_reset_t)bnx2x_8727_hw_reset,
+ .set_link_led = (set_link_led_t)bnx2x_8727_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)bnx2x_8727_specific_func
+};
+static struct bnx2x_phy phy_8481 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = FLAGS_FAN_FAILURE_DET_REQ |
+ FLAGS_REARM_LATCH_SIGNAL,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_8481_config_init,
+ .read_status = (read_status_t)bnx2x_848xx_read_status,
+ .link_reset = (link_reset_t)bnx2x_8481_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
+ .hw_reset = (hw_reset_t)bnx2x_8481_hw_reset,
+ .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static struct bnx2x_phy phy_84823 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = FLAGS_FAN_FAILURE_DET_REQ |
+ FLAGS_REARM_LATCH_SIGNAL,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_848x3_config_init,
+ .read_status = (read_status_t)bnx2x_848xx_read_status,
+ .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static struct bnx2x_phy phy_84833 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = FLAGS_FAN_FAILURE_DET_REQ |
+ FLAGS_REARM_LATCH_SIGNAL,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_848x3_config_init,
+ .read_status = (read_status_t)bnx2x_848xx_read_status,
+ .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
+ .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy,
+ .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static struct bnx2x_phy phy_54618se = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = FLAGS_INIT_XGXS_FIRST,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ /* req_duplex = */0,
+ /* rsrv = */0,
+ .config_init = (config_init_t)bnx2x_54618se_config_init,
+ .read_status = (read_status_t)bnx2x_54618se_read_status,
+ .link_reset = (link_reset_t)bnx2x_54618se_link_reset,
+ .config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback,
+ .format_fw_ver = (format_fw_ver_t)NULL,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)bnx2x_54618se_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+/*****************************************************************/
+/* */
+/* Populate the phy according. Main function: bnx2x_populate_phy */
+/* */
+/*****************************************************************/
+
+static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
+ struct bnx2x_phy *phy, u8 port,
+ u8 phy_index)
+{
+ /* Get the 4 lanes xgxs config rx and tx */
+ u32 rx = 0, tx = 0, i;
+ for (i = 0; i < 2; i++) {
+ /*
+ * INT_PHY and EXT_PHY1 share the same value location in the
+ * shmem. When num_phys is greater than 1, than this value
+ * applies only to EXT_PHY1
+ */
+ if (phy_index == INT_PHY || phy_index == EXT_PHY1) {
+ rx = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].xgxs_config_rx[i<<1]));
+
+ tx = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].xgxs_config_tx[i<<1]));
+ } else {
+ rx = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
+
+ tx = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
+ }
+
+ phy->rx_preemphasis[i << 1] = ((rx>>16) & 0xffff);
+ phy->rx_preemphasis[(i << 1) + 1] = (rx & 0xffff);
+
+ phy->tx_preemphasis[i << 1] = ((tx>>16) & 0xffff);
+ phy->tx_preemphasis[(i << 1) + 1] = (tx & 0xffff);
+ }
+}
+
+static u32 bnx2x_get_ext_phy_config(struct bnx2x *bp, u32 shmem_base,
+ u8 phy_index, u8 port)
+{
+ u32 ext_phy_config = 0;
+ switch (phy_index) {
+ case EXT_PHY1:
+ ext_phy_config = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].external_phy_config));
+ break;
+ case EXT_PHY2:
+ ext_phy_config = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].external_phy_config2));
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Invalid phy_index %d\n", phy_index);
+ return -EINVAL;
+ }
+
+ return ext_phy_config;
+}
+static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
+ struct bnx2x_phy *phy)
+{
+ u32 phy_addr;
+ u32 chip_id;
+ u32 switch_cfg = (REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_feature_config[port].link_config)) &
+ PORT_FEATURE_CONNECTED_SWITCH_MASK);
+ chip_id = REG_RD(bp, MISC_REG_CHIP_NUM) << 16;
+ DP(NETIF_MSG_LINK, ":chip_id = 0x%x\n", chip_id);
+ if (USES_WARPCORE(bp)) {
+ u32 serdes_net_if;
+ phy_addr = REG_RD(bp,
+ MISC_REG_WC0_CTRL_PHY_ADDR);
+ *phy = phy_warpcore;
+ if (REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR) == 0x3)
+ phy->flags |= FLAGS_4_PORT_MODE;
+ else
+ phy->flags &= ~FLAGS_4_PORT_MODE;
+ /* Check Dual mode */
+ serdes_net_if = (REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[port].default_cfg)) &
+ PORT_HW_CFG_NET_SERDES_IF_MASK);
+ /*
+ * Set the appropriate supported and flags indications per
+ * interface type of the chip
+ */
+ switch (serdes_net_if) {
+ case PORT_HW_CFG_NET_SERDES_IF_SGMII:
+ phy->supported &= (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ phy->media_type = ETH_PHY_BASE_T;
+ break;
+ case PORT_HW_CFG_NET_SERDES_IF_XFI:
+ phy->media_type = ETH_PHY_XFP_FIBER;
+ break;
+ case PORT_HW_CFG_NET_SERDES_IF_SFI:
+ phy->supported &= (SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ phy->media_type = ETH_PHY_SFP_FIBER;
+ break;
+ case PORT_HW_CFG_NET_SERDES_IF_KR:
+ phy->media_type = ETH_PHY_KR;
+ phy->supported &= (SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ break;
+ case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
+ phy->media_type = ETH_PHY_KR;
+ phy->flags |= FLAGS_WC_DUAL_MODE;
+ phy->supported &= (SUPPORTED_20000baseMLD2_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ break;
+ case PORT_HW_CFG_NET_SERDES_IF_KR2:
+ phy->media_type = ETH_PHY_KR;
+ phy->flags |= FLAGS_WC_DUAL_MODE;
+ phy->supported &= (SUPPORTED_20000baseKR2_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Unknown WC interface type 0x%x\n",
+ serdes_net_if);
+ break;
+ }
+
+ /*
+ * Enable MDC/MDIO work-around for E3 A0 since free running MDC
+ * was not set as expected. For B0, ECO will be enabled so there
+ * won't be an issue there
+ */
+ if (CHIP_REV(bp) == CHIP_REV_Ax)
+ phy->flags |= FLAGS_MDC_MDIO_WA;
+ else
+ phy->flags |= FLAGS_MDC_MDIO_WA_B0;
+ } else {
+ switch (switch_cfg) {
+ case SWITCH_CFG_1G:
+ phy_addr = REG_RD(bp,
+ NIG_REG_SERDES0_CTRL_PHY_ADDR +
+ port * 0x10);
+ *phy = phy_serdes;
+ break;
+ case SWITCH_CFG_10G:
+ phy_addr = REG_RD(bp,
+ NIG_REG_XGXS0_CTRL_PHY_ADDR +
+ port * 0x18);
+ *phy = phy_xgxs;
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Invalid switch_cfg\n");
+ return -EINVAL;
+ }
+ }
+ phy->addr = (u8)phy_addr;
+ phy->mdio_ctrl = bnx2x_get_emac_base(bp,
+ SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH,
+ port);
+ if (CHIP_IS_E2(bp))
+ phy->def_md_devad = E2_DEFAULT_PHY_DEV_ADDR;
+ else
+ phy->def_md_devad = DEFAULT_PHY_DEV_ADDR;
+
+ DP(NETIF_MSG_LINK, "Internal phy port=%d, addr=0x%x, mdio_ctl=0x%x\n",
+ port, phy->addr, phy->mdio_ctrl);
+
+ bnx2x_populate_preemphasis(bp, shmem_base, phy, port, INT_PHY);
+ return 0;
+}
+
+static int bnx2x_populate_ext_phy(struct bnx2x *bp,
+ u8 phy_index,
+ u32 shmem_base,
+ u32 shmem2_base,
+ u8 port,
+ struct bnx2x_phy *phy)
+{
+ u32 ext_phy_config, phy_type, config2;
+ u32 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH;
+ ext_phy_config = bnx2x_get_ext_phy_config(bp, shmem_base,
+ phy_index, port);
+ phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
+ /* Select the phy type */
+ switch (phy_type) {
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
+ mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED;
+ *phy = phy_8073;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
+ *phy = phy_8705;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
+ *phy = phy_8706;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+ mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
+ *phy = phy_8726;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC:
+ /* BCM8727_NOC => BCM8727 no over current */
+ mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
+ *phy = phy_8727;
+ phy->flags |= FLAGS_NOC;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+ mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
+ *phy = phy_8727;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
+ *phy = phy_8481;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
+ *phy = phy_84823;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
+ *phy = phy_84833;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE:
+ *phy = phy_54618se;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
+ *phy = phy_7101;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
+ *phy = phy_null;
+ return -EINVAL;
+ default:
+ *phy = phy_null;
+ return 0;
+ }
+
+ phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
+ bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index);
+
+ /*
+ * The shmem address of the phy version is located on different
+ * structures. In case this structure is too old, do not set
+ * the address
+ */
+ config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region,
+ dev_info.shared_hw_config.config2));
+ if (phy_index == EXT_PHY1) {
+ phy->ver_addr = shmem_base + offsetof(struct shmem_region,
+ port_mb[port].ext_phy_fw_version);
+
+ /* Check specific mdc mdio settings */
+ if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK)
+ mdc_mdio_access = config2 &
+ SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK;
+ } else {
+ u32 size = REG_RD(bp, shmem2_base);
+
+ if (size >
+ offsetof(struct shmem2_region, ext_phy_fw_version2)) {
+ phy->ver_addr = shmem2_base +
+ offsetof(struct shmem2_region,
+ ext_phy_fw_version2[port]);
+ }
+ /* Check specific mdc mdio settings */
+ if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK)
+ mdc_mdio_access = (config2 &
+ SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK) >>
+ (SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT -
+ SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT);
+ }
+ phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
+
+ /*
+ * In case mdc/mdio_access of the external phy is different than the
+ * mdc/mdio access of the XGXS, a HW lock must be taken in each access
+ * to prevent one port interfere with another port's CL45 operations.
+ */
+ if (mdc_mdio_access != SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH)
+ phy->flags |= FLAGS_HW_LOCK_REQUIRED;
+ DP(NETIF_MSG_LINK, "phy_type 0x%x port %d found in index %d\n",
+ phy_type, port, phy_index);
+ DP(NETIF_MSG_LINK, " addr=0x%x, mdio_ctl=0x%x\n",
+ phy->addr, phy->mdio_ctrl);
+ return 0;
+}
+
+static int bnx2x_populate_phy(struct bnx2x *bp, u8 phy_index, u32 shmem_base,
+ u32 shmem2_base, u8 port, struct bnx2x_phy *phy)
+{
+ int status = 0;
+ phy->type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN;
+ if (phy_index == INT_PHY)
+ return bnx2x_populate_int_phy(bp, shmem_base, port, phy);
+ status = bnx2x_populate_ext_phy(bp, phy_index, shmem_base, shmem2_base,
+ port, phy);
+ return status;
+}
+
+static void bnx2x_phy_def_cfg(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 phy_index)
+{
+ struct bnx2x *bp = params->bp;
+ u32 link_config;
+ /* Populate the default phy configuration for MF mode */
+ if (phy_index == EXT_PHY2) {
+ link_config = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].link_config2));
+ phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.
+ port_hw_config[params->port].speed_capability_mask2));
+ } else {
+ link_config = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].link_config));
+ phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.
+ port_hw_config[params->port].speed_capability_mask));
+ }
+ DP(NETIF_MSG_LINK,
+ "Default config phy idx %x cfg 0x%x speed_cap_mask 0x%x\n",
+ phy_index, link_config, phy->speed_cap_mask);
+
+ phy->req_duplex = DUPLEX_FULL;
+ switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
+ case PORT_FEATURE_LINK_SPEED_10M_HALF:
+ phy->req_duplex = DUPLEX_HALF;
+ case PORT_FEATURE_LINK_SPEED_10M_FULL:
+ phy->req_line_speed = SPEED_10;
+ break;
+ case PORT_FEATURE_LINK_SPEED_100M_HALF:
+ phy->req_duplex = DUPLEX_HALF;
+ case PORT_FEATURE_LINK_SPEED_100M_FULL:
+ phy->req_line_speed = SPEED_100;
+ break;
+ case PORT_FEATURE_LINK_SPEED_1G:
+ phy->req_line_speed = SPEED_1000;
+ break;
+ case PORT_FEATURE_LINK_SPEED_2_5G:
+ phy->req_line_speed = SPEED_2500;
+ break;
+ case PORT_FEATURE_LINK_SPEED_10G_CX4:
+ phy->req_line_speed = SPEED_10000;
+ break;
+ default:
+ phy->req_line_speed = SPEED_AUTO_NEG;
+ break;
+ }
+
+ switch (link_config & PORT_FEATURE_FLOW_CONTROL_MASK) {
+ case PORT_FEATURE_FLOW_CONTROL_AUTO:
+ phy->req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
+ break;
+ case PORT_FEATURE_FLOW_CONTROL_TX:
+ phy->req_flow_ctrl = BNX2X_FLOW_CTRL_TX;
+ break;
+ case PORT_FEATURE_FLOW_CONTROL_RX:
+ phy->req_flow_ctrl = BNX2X_FLOW_CTRL_RX;
+ break;
+ case PORT_FEATURE_FLOW_CONTROL_BOTH:
+ phy->req_flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
+ break;
+ default:
+ phy->req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ break;
+ }
+}
+
+u32 bnx2x_phy_selection(struct link_params *params)
+{
+ u32 phy_config_swapped, prio_cfg;
+ u32 return_cfg = PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT;
+
+ phy_config_swapped = params->multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED;
+
+ prio_cfg = params->multi_phy_config &
+ PORT_HW_CFG_PHY_SELECTION_MASK;
+
+ if (phy_config_swapped) {
+ switch (prio_cfg) {
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+ return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+ return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
+ return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
+ return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+ break;
+ }
+ } else
+ return_cfg = prio_cfg;
+
+ return return_cfg;
+}
+
+
+int bnx2x_phy_probe(struct link_params *params)
+{
+ u8 phy_index, actual_phy_idx, link_cfg_idx;
+ u32 phy_config_swapped, sync_offset, media_types;
+ struct bnx2x *bp = params->bp;
+ struct bnx2x_phy *phy;
+ params->num_phys = 0;
+ DP(NETIF_MSG_LINK, "Begin phy probe\n");
+ phy_config_swapped = params->multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED;
+
+ for (phy_index = INT_PHY; phy_index < MAX_PHYS;
+ phy_index++) {
+ link_cfg_idx = LINK_CONFIG_IDX(phy_index);
+ actual_phy_idx = phy_index;
+ if (phy_config_swapped) {
+ if (phy_index == EXT_PHY1)
+ actual_phy_idx = EXT_PHY2;
+ else if (phy_index == EXT_PHY2)
+ actual_phy_idx = EXT_PHY1;
+ }
+ DP(NETIF_MSG_LINK, "phy_config_swapped %x, phy_index %x,"
+ " actual_phy_idx %x\n", phy_config_swapped,
+ phy_index, actual_phy_idx);
+ phy = ¶ms->phy[actual_phy_idx];
+ if (bnx2x_populate_phy(bp, phy_index, params->shmem_base,
+ params->shmem2_base, params->port,
+ phy) != 0) {
+ params->num_phys = 0;
+ DP(NETIF_MSG_LINK, "phy probe failed in phy index %d\n",
+ phy_index);
+ for (phy_index = INT_PHY;
+ phy_index < MAX_PHYS;
+ phy_index++)
+ *phy = phy_null;
+ return -EINVAL;
+ }
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)
+ break;
+
+ sync_offset = params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].media_type);
+ media_types = REG_RD(bp, sync_offset);
+
+ /*
+ * Update media type for non-PMF sync only for the first time
+ * In case the media type changes afterwards, it will be updated
+ * using the update_status function
+ */
+ if ((media_types & (PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK <<
+ (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT *
+ actual_phy_idx))) == 0) {
+ media_types |= ((phy->media_type &
+ PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) <<
+ (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT *
+ actual_phy_idx));
+ }
+ REG_WR(bp, sync_offset, media_types);
+
+ bnx2x_phy_def_cfg(params, phy, phy_index);
+ params->num_phys++;
+ }
+
+ DP(NETIF_MSG_LINK, "End phy probe. #phys found %x\n", params->num_phys);
+ return 0;
+}
+
+void bnx2x_init_bmac_loopback(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ vars->link_up = 1;
+ vars->line_speed = SPEED_10000;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ vars->mac_type = MAC_TYPE_BMAC;
+
+ vars->phy_flags = PHY_XGXS_FLAG;
+
+ bnx2x_xgxs_deassert(params);
+
+ /* set bmac loopback */
+ bnx2x_bmac_enable(params, vars, 1);
+
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+}
+
+void bnx2x_init_emac_loopback(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ vars->link_up = 1;
+ vars->line_speed = SPEED_1000;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ vars->mac_type = MAC_TYPE_EMAC;
+
+ vars->phy_flags = PHY_XGXS_FLAG;
+
+ bnx2x_xgxs_deassert(params);
+ /* set bmac loopback */
+ bnx2x_emac_enable(params, vars, 1);
+ bnx2x_emac_program(params, vars);
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+}
+
+void bnx2x_init_xmac_loopback(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ vars->link_up = 1;
+ if (!params->req_line_speed[0])
+ vars->line_speed = SPEED_10000;
+ else
+ vars->line_speed = params->req_line_speed[0];
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ vars->mac_type = MAC_TYPE_XMAC;
+ vars->phy_flags = PHY_XGXS_FLAG;
+ /*
+ * Set WC to loopback mode since link is required to provide clock
+ * to the XMAC in 20G mode
+ */
+ bnx2x_set_aer_mmd(params, ¶ms->phy[0]);
+ bnx2x_warpcore_reset_lane(bp, ¶ms->phy[0], 0);
+ params->phy[INT_PHY].config_loopback(
+ ¶ms->phy[INT_PHY],
+ params);
+
+ bnx2x_xmac_enable(params, vars, 1);
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+}
+
+void bnx2x_init_umac_loopback(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ vars->link_up = 1;
+ vars->line_speed = SPEED_1000;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ vars->mac_type = MAC_TYPE_UMAC;
+ vars->phy_flags = PHY_XGXS_FLAG;
+ bnx2x_umac_enable(params, vars, 1);
+
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+}
+
+void bnx2x_init_xgxs_loopback(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ vars->link_up = 1;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ vars->duplex = DUPLEX_FULL;
+ if (params->req_line_speed[0] == SPEED_1000)
+ vars->line_speed = SPEED_1000;
+ else
+ vars->line_speed = SPEED_10000;
+
+ if (!USES_WARPCORE(bp))
+ bnx2x_xgxs_deassert(params);
+ bnx2x_link_initialize(params, vars);
+
+ if (params->req_line_speed[0] == SPEED_1000) {
+ if (USES_WARPCORE(bp))
+ bnx2x_umac_enable(params, vars, 0);
+ else {
+ bnx2x_emac_program(params, vars);
+ bnx2x_emac_enable(params, vars, 0);
+ }
+ } else {
+ if (USES_WARPCORE(bp))
+ bnx2x_xmac_enable(params, vars, 0);
+ else
+ bnx2x_bmac_enable(params, vars, 0);
+ }
+
+ if (params->loopback_mode == LOOPBACK_XGXS) {
+ /* set 10G XGXS loopback */
+ params->phy[INT_PHY].config_loopback(
+ ¶ms->phy[INT_PHY],
+ params);
+
+ } else {
+ /* set external phy loopback */
+ u8 phy_index;
+ for (phy_index = EXT_PHY1;
+ phy_index < params->num_phys; phy_index++) {
+ if (params->phy[phy_index].config_loopback)
+ params->phy[phy_index].config_loopback(
+ ¶ms->phy[phy_index],
+ params);
+ }
+ }
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+
+ bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
+}
+
+int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "Phy Initialization started\n");
+ DP(NETIF_MSG_LINK, "(1) req_speed %d, req_flowctrl %d\n",
+ params->req_line_speed[0], params->req_flow_ctrl[0]);
+ DP(NETIF_MSG_LINK, "(2) req_speed %d, req_flowctrl %d\n",
+ params->req_line_speed[1], params->req_flow_ctrl[1]);
+ vars->link_status = 0;
+ vars->phy_link_up = 0;
+ vars->link_up = 0;
+ vars->line_speed = 0;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ vars->mac_type = MAC_TYPE_NONE;
+ vars->phy_flags = 0;
+
+ /* disable attentions */
+ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
+ (NIG_MASK_XGXS0_LINK_STATUS |
+ NIG_MASK_XGXS0_LINK10G |
+ NIG_MASK_SERDES0_LINK_STATUS |
+ NIG_MASK_MI_INT));
+
+ bnx2x_emac_init(params, vars);
+
+ if (params->num_phys == 0) {
+ DP(NETIF_MSG_LINK, "No phy found for initialization !!\n");
+ return -EINVAL;
+ }
+ set_phy_vars(params, vars);
+
+ DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys);
+ switch (params->loopback_mode) {
+ case LOOPBACK_BMAC:
+ bnx2x_init_bmac_loopback(params, vars);
+ break;
+ case LOOPBACK_EMAC:
+ bnx2x_init_emac_loopback(params, vars);
+ break;
+ case LOOPBACK_XMAC:
+ bnx2x_init_xmac_loopback(params, vars);
+ break;
+ case LOOPBACK_UMAC:
+ bnx2x_init_umac_loopback(params, vars);
+ break;
+ case LOOPBACK_XGXS:
+ case LOOPBACK_EXT_PHY:
+ bnx2x_init_xgxs_loopback(params, vars);
+ break;
+ default:
+ if (!CHIP_IS_E3(bp)) {
+ if (params->switch_cfg == SWITCH_CFG_10G)
+ bnx2x_xgxs_deassert(params);
+ else
+ bnx2x_serdes_deassert(bp, params->port);
+ }
+ bnx2x_link_initialize(params, vars);
+ msleep(30);
+ bnx2x_link_int_enable(params);
+ break;
+ }
+ return 0;
+}
+
+int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
+ u8 reset_ext_phy)
+{
+ struct bnx2x *bp = params->bp;
+ u8 phy_index, port = params->port, clear_latch_ind = 0;
+ DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
+ /* disable attentions */
+ vars->link_status = 0;
+ bnx2x_update_mng(params, vars->link_status);
+ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
+ (NIG_MASK_XGXS0_LINK_STATUS |
+ NIG_MASK_XGXS0_LINK10G |
+ NIG_MASK_SERDES0_LINK_STATUS |
+ NIG_MASK_MI_INT));
+
+ /* activate nig drain */
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
+
+ /* disable nig egress interface */
+ if (!CHIP_IS_E3(bp)) {
+ REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0);
+ REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
+ }
+
+ /* Stop BigMac rx */
+ if (!CHIP_IS_E3(bp))
+ bnx2x_bmac_rx_disable(bp, port);
+ else
+ bnx2x_xmac_disable(params);
+ /* disable emac */
+ if (!CHIP_IS_E3(bp))
+ REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
+
+ msleep(10);
+ /* The PHY reset is controlled by GPIO 1
+ * Hold it as vars low
+ */
+ /* clear link led */
+ bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
+
+ if (reset_ext_phy) {
+ bnx2x_set_mdio_clk(bp, params->chip_id, port);
+ for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+ phy_index++) {
+ if (params->phy[phy_index].link_reset) {
+ bnx2x_set_aer_mmd(params,
+ ¶ms->phy[phy_index]);
+ params->phy[phy_index].link_reset(
+ ¶ms->phy[phy_index],
+ params);
+ }
+ if (params->phy[phy_index].flags &
+ FLAGS_REARM_LATCH_SIGNAL)
+ clear_latch_ind = 1;
+ }
+ }
+
+ if (clear_latch_ind) {
+ /* Clear latching indication */
+ bnx2x_rearm_latch_signal(bp, port, 0);
+ bnx2x_bits_dis(bp, NIG_REG_LATCH_BC_0 + port*4,
+ 1 << NIG_LATCH_BC_ENABLE_MI_INT);
+ }
+ if (params->phy[INT_PHY].link_reset)
+ params->phy[INT_PHY].link_reset(
+ ¶ms->phy[INT_PHY], params);
+ /* reset BigMac */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+
+ /* disable nig ingress interface */
+ if (!CHIP_IS_E3(bp)) {
+ REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0);
+ REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0);
+ }
+ vars->link_up = 0;
+ vars->phy_flags = 0;
+ return 0;
+}
+
+/****************************************************************************/
+/* Common function */
+/****************************************************************************/
+static int bnx2x_8073_common_init_phy(struct bnx2x *bp,
+ u32 shmem_base_path[],
+ u32 shmem2_base_path[], u8 phy_index,
+ u32 chip_id)
+{
+ struct bnx2x_phy phy[PORT_MAX];
+ struct bnx2x_phy *phy_blk[PORT_MAX];
+ u16 val;
+ s8 port = 0;
+ s8 port_of_path = 0;
+ u32 swap_val, swap_override;
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+ port ^= (swap_val && swap_override);
+ bnx2x_ext_phy_hw_reset(bp, port);
+ /* PART1 - Reset both phys */
+ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+ u32 shmem_base, shmem2_base;
+ /* In E2, same phy is using for port0 of the two paths */
+ if (CHIP_IS_E1x(bp)) {
+ shmem_base = shmem_base_path[0];
+ shmem2_base = shmem2_base_path[0];
+ port_of_path = port;
+ } else {
+ shmem_base = shmem_base_path[port];
+ shmem2_base = shmem2_base_path[port];
+ port_of_path = 0;
+ }
+
+ /* Extract the ext phy address for the port */
+ if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+ port_of_path, &phy[port]) !=
+ 0) {
+ DP(NETIF_MSG_LINK, "populate_phy failed\n");
+ return -EINVAL;
+ }
+ /* disable attentions */
+ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+ port_of_path*4,
+ (NIG_MASK_XGXS0_LINK_STATUS |
+ NIG_MASK_XGXS0_LINK10G |
+ NIG_MASK_SERDES0_LINK_STATUS |
+ NIG_MASK_MI_INT));
+
+ /* Need to take the phy out of low power mode in order
+ to write to access its registers */
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+ port);
+
+ /* Reset the phy */
+ bnx2x_cl45_write(bp, &phy[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_CTRL,
+ 1<<15);
+ }
+
+ /* Add delay of 150ms after reset */
+ msleep(150);
+
+ if (phy[PORT_0].addr & 0x1) {
+ phy_blk[PORT_0] = &(phy[PORT_1]);
+ phy_blk[PORT_1] = &(phy[PORT_0]);
+ } else {
+ phy_blk[PORT_0] = &(phy[PORT_0]);
+ phy_blk[PORT_1] = &(phy[PORT_1]);
+ }
+
+ /* PART2 - Download firmware to both phys */
+ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+ if (CHIP_IS_E1x(bp))
+ port_of_path = port;
+ else
+ port_of_path = 0;
+
+ DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
+ phy_blk[port]->addr);
+ if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
+ port_of_path))
+ return -EINVAL;
+
+ /* Only set bit 10 = 1 (Tx power down) */
+ bnx2x_cl45_read(bp, phy_blk[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN, &val);
+
+ /* Phase1 of TX_POWER_DOWN reset */
+ bnx2x_cl45_write(bp, phy_blk[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN,
+ (val | 1<<10));
+ }
+
+ /*
+ * Toggle Transmitter: Power down and then up with 600ms delay
+ * between
+ */
+ msleep(600);
+
+ /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */
+ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+ /* Phase2 of POWER_DOWN_RESET */
+ /* Release bit 10 (Release Tx power down) */
+ bnx2x_cl45_read(bp, phy_blk[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN, &val);
+
+ bnx2x_cl45_write(bp, phy_blk[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
+ msleep(15);
+
+ /* Read modify write the SPI-ROM version select register */
+ bnx2x_cl45_read(bp, phy_blk[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_EDC_FFE_MAIN, &val);
+ bnx2x_cl45_write(bp, phy_blk[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
+
+ /* set GPIO2 back to LOW */
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+ }
+ return 0;
+}
+static int bnx2x_8726_common_init_phy(struct bnx2x *bp,
+ u32 shmem_base_path[],
+ u32 shmem2_base_path[], u8 phy_index,
+ u32 chip_id)
+{
+ u32 val;
+ s8 port;
+ struct bnx2x_phy phy;
+ /* Use port1 because of the static port-swap */
+ /* Enable the module detection interrupt */
+ val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN);
+ val |= ((1<<MISC_REGISTERS_GPIO_3)|
+ (1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT)));
+ REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
+
+ bnx2x_ext_phy_hw_reset(bp, 0);
+ msleep(5);
+ for (port = 0; port < PORT_MAX; port++) {
+ u32 shmem_base, shmem2_base;
+
+ /* In E2, same phy is using for port0 of the two paths */
+ if (CHIP_IS_E1x(bp)) {
+ shmem_base = shmem_base_path[0];
+ shmem2_base = shmem2_base_path[0];
+ } else {
+ shmem_base = shmem_base_path[port];
+ shmem2_base = shmem2_base_path[port];
+ }
+ /* Extract the ext phy address for the port */
+ if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+ port, &phy) !=
+ 0) {
+ DP(NETIF_MSG_LINK, "populate phy failed\n");
+ return -EINVAL;
+ }
+
+ /* Reset phy*/
+ bnx2x_cl45_write(bp, &phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x0001);
+
+
+ /* Set fault module detected LED on */
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
+ MISC_REGISTERS_GPIO_HIGH,
+ port);
+ }
+
+ return 0;
+}
+static void bnx2x_get_ext_phy_reset_gpio(struct bnx2x *bp, u32 shmem_base,
+ u8 *io_gpio, u8 *io_port)
+{
+
+ u32 phy_gpio_reset = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[PORT_0].default_cfg));
+ switch (phy_gpio_reset) {
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0:
+ *io_gpio = 0;
+ *io_port = 0;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0:
+ *io_gpio = 1;
+ *io_port = 0;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0:
+ *io_gpio = 2;
+ *io_port = 0;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0:
+ *io_gpio = 3;
+ *io_port = 0;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1:
+ *io_gpio = 0;
+ *io_port = 1;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1:
+ *io_gpio = 1;
+ *io_port = 1;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1:
+ *io_gpio = 2;
+ *io_port = 1;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1:
+ *io_gpio = 3;
+ *io_port = 1;
+ break;
+ default:
+ /* Don't override the io_gpio and io_port */
+ break;
+ }
+}
+
+static int bnx2x_8727_common_init_phy(struct bnx2x *bp,
+ u32 shmem_base_path[],
+ u32 shmem2_base_path[], u8 phy_index,
+ u32 chip_id)
+{
+ s8 port, reset_gpio;
+ u32 swap_val, swap_override;
+ struct bnx2x_phy phy[PORT_MAX];
+ struct bnx2x_phy *phy_blk[PORT_MAX];
+ s8 port_of_path;
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+
+ reset_gpio = MISC_REGISTERS_GPIO_1;
+ port = 1;
+
+ /*
+ * Retrieve the reset gpio/port which control the reset.
+ * Default is GPIO1, PORT1
+ */
+ bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0],
+ (u8 *)&reset_gpio, (u8 *)&port);
+
+ /* Calculate the port based on port swap */
+ port ^= (swap_val && swap_override);
+
+ /* Initiate PHY reset*/
+ bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ port);
+ msleep(1);
+ bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+ port);
+
+ msleep(5);
+
+ /* PART1 - Reset both phys */
+ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+ u32 shmem_base, shmem2_base;
+
+ /* In E2, same phy is using for port0 of the two paths */
+ if (CHIP_IS_E1x(bp)) {
+ shmem_base = shmem_base_path[0];
+ shmem2_base = shmem2_base_path[0];
+ port_of_path = port;
+ } else {
+ shmem_base = shmem_base_path[port];
+ shmem2_base = shmem2_base_path[port];
+ port_of_path = 0;
+ }
+
+ /* Extract the ext phy address for the port */
+ if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+ port_of_path, &phy[port]) !=
+ 0) {
+ DP(NETIF_MSG_LINK, "populate phy failed\n");
+ return -EINVAL;
+ }
+ /* disable attentions */
+ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+ port_of_path*4,
+ (NIG_MASK_XGXS0_LINK_STATUS |
+ NIG_MASK_XGXS0_LINK10G |
+ NIG_MASK_SERDES0_LINK_STATUS |
+ NIG_MASK_MI_INT));
+
+
+ /* Reset the phy */
+ bnx2x_cl45_write(bp, &phy[port],
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
+ }
+
+ /* Add delay of 150ms after reset */
+ msleep(150);
+ if (phy[PORT_0].addr & 0x1) {
+ phy_blk[PORT_0] = &(phy[PORT_1]);
+ phy_blk[PORT_1] = &(phy[PORT_0]);
+ } else {
+ phy_blk[PORT_0] = &(phy[PORT_0]);
+ phy_blk[PORT_1] = &(phy[PORT_1]);
+ }
+ /* PART2 - Download firmware to both phys */
+ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+ if (CHIP_IS_E1x(bp))
+ port_of_path = port;
+ else
+ port_of_path = 0;
+ DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
+ phy_blk[port]->addr);
+ if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
+ port_of_path))
+ return -EINVAL;
+ /* Disable PHY transmitter output */
+ bnx2x_cl45_write(bp, phy_blk[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_DISABLE, 1);
+
+ }
+ return 0;
+}
+
+static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
+ u32 shmem2_base_path[], u8 phy_index,
+ u32 ext_phy_type, u32 chip_id)
+{
+ int rc = 0;
+
+ switch (ext_phy_type) {
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
+ rc = bnx2x_8073_common_init_phy(bp, shmem_base_path,
+ shmem2_base_path,
+ phy_index, chip_id);
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC:
+ rc = bnx2x_8727_common_init_phy(bp, shmem_base_path,
+ shmem2_base_path,
+ phy_index, chip_id);
+ break;
+
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+ /*
+ * GPIO1 affects both ports, so there's need to pull
+ * it for single port alone
+ */
+ rc = bnx2x_8726_common_init_phy(bp, shmem_base_path,
+ shmem2_base_path,
+ phy_index, chip_id);
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
+ /*
+ * GPIO3's are linked, and so both need to be toggled
+ * to obtain required 2us pulse.
+ */
+ rc = bnx2x_84833_common_init_phy(bp, shmem_base_path, chip_id);
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
+ rc = -EINVAL;
+ break;
+ default:
+ DP(NETIF_MSG_LINK,
+ "ext_phy 0x%x common init not required\n",
+ ext_phy_type);
+ break;
+ }
+
+ if (rc != 0)
+ netdev_err(bp->dev, "Warning: PHY was not initialized,"
+ " Port %d\n",
+ 0);
+ return rc;
+}
+
+int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
+ u32 shmem2_base_path[], u32 chip_id)
+{
+ int rc = 0;
+ u32 phy_ver, val;
+ u8 phy_index = 0;
+ u32 ext_phy_type, ext_phy_config;
+ bnx2x_set_mdio_clk(bp, chip_id, PORT_0);
+ bnx2x_set_mdio_clk(bp, chip_id, PORT_1);
+ DP(NETIF_MSG_LINK, "Begin common phy init\n");
+ if (CHIP_IS_E3(bp)) {
+ /* Enable EPIO */
+ val = REG_RD(bp, MISC_REG_GEN_PURP_HWG);
+ REG_WR(bp, MISC_REG_GEN_PURP_HWG, val | 1);
+ }
+ /* Check if common init was already done */
+ phy_ver = REG_RD(bp, shmem_base_path[0] +
+ offsetof(struct shmem_region,
+ port_mb[PORT_0].ext_phy_fw_version));
+ if (phy_ver) {
+ DP(NETIF_MSG_LINK, "Not doing common init; phy ver is 0x%x\n",
+ phy_ver);
+ return 0;
+ }
+
+ /* Read the ext_phy_type for arbitrary port(0) */
+ for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
+ phy_index++) {
+ ext_phy_config = bnx2x_get_ext_phy_config(bp,
+ shmem_base_path[0],
+ phy_index, 0);
+ ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
+ rc |= bnx2x_ext_phy_common_init(bp, shmem_base_path,
+ shmem2_base_path,
+ phy_index, ext_phy_type,
+ chip_id);
+ }
+ return rc;
+}
+
+static void bnx2x_check_over_curr(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u32 cfg_pin;
+ u8 port = params->port;
+ u32 pin_val;
+
+ cfg_pin = (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_cmn_pin_cfg1)) &
+ PORT_HW_CFG_E3_OVER_CURRENT_MASK) >>
+ PORT_HW_CFG_E3_OVER_CURRENT_SHIFT;
+
+ /* Ignore check if no external input PIN available */
+ if (bnx2x_get_cfg_pin(bp, cfg_pin, &pin_val) != 0)
+ return;
+
+ if (!pin_val) {
+ if ((vars->phy_flags & PHY_OVER_CURRENT_FLAG) == 0) {
+ netdev_err(bp->dev, "Error: Power fault on Port %d has"
+ " been detected and the power to "
+ "that SFP+ module has been removed"
+ " to prevent failure of the card."
+ " Please remove the SFP+ module and"
+ " restart the system to clear this"
+ " error.\n",
+ params->port);
+ vars->phy_flags |= PHY_OVER_CURRENT_FLAG;
+ }
+ } else
+ vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG;
+}
+
+static void bnx2x_analyze_link_error(struct link_params *params,
+ struct link_vars *vars, u32 lss_status)
+{
+ struct bnx2x *bp = params->bp;
+ /* Compare new value with previous value */
+ u8 led_mode;
+ u32 half_open_conn = (vars->phy_flags & PHY_HALF_OPEN_CONN_FLAG) > 0;
+
+ if ((lss_status ^ half_open_conn) == 0)
+ return;
+
+ /* If values differ */
+ DP(NETIF_MSG_LINK, "Link changed:%x %x->%x\n", vars->link_up,
+ half_open_conn, lss_status);
+
+ /*
+ * a. Update shmem->link_status accordingly
+ * b. Update link_vars->link_up
+ */
+ if (lss_status) {
+ DP(NETIF_MSG_LINK, "Remote Fault detected !!!\n");
+ vars->link_status &= ~LINK_STATUS_LINK_UP;
+ vars->link_up = 0;
+ vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+ /*
+ * Set LED mode to off since the PHY doesn't know about these
+ * errors
+ */
+ led_mode = LED_MODE_OFF;
+ } else {
+ DP(NETIF_MSG_LINK, "Remote Fault cleared\n");
+ vars->link_status |= LINK_STATUS_LINK_UP;
+ vars->link_up = 1;
+ vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
+ led_mode = LED_MODE_OPER;
+ }
+ /* Update the LED according to the link state */
+ bnx2x_set_led(params, vars, led_mode, SPEED_10000);
+
+ /* Update link status in the shared memory */
+ bnx2x_update_mng(params, vars->link_status);
+
+ /* C. Trigger General Attention */
+ vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT;
+ bnx2x_notify_link_changed(bp);
+}
+
+/******************************************************************************
+* Description:
+* This function checks for half opened connection change indication.
+* When such change occurs, it calls the bnx2x_analyze_link_error
+* to check if Remote Fault is set or cleared. Reception of remote fault
+* status message in the MAC indicates that the peer's MAC has detected
+* a fault, for example, due to break in the TX side of fiber.
+*
+******************************************************************************/
+static void bnx2x_check_half_open_conn(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u32 lss_status = 0;
+ u32 mac_base;
+ /* In case link status is physically up @ 10G do */
+ if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0)
+ return;
+
+ if (CHIP_IS_E3(bp) &&
+ (REG_RD(bp, MISC_REG_RESET_REG_2) &
+ (MISC_REGISTERS_RESET_REG_2_XMAC))) {
+ /* Check E3 XMAC */
+ /*
+ * Note that link speed cannot be queried here, since it may be
+ * zero while link is down. In case UMAC is active, LSS will
+ * simply not be set
+ */
+ mac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+
+ /* Clear stick bits (Requires rising edge) */
+ REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0);
+ REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS,
+ XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS |
+ XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS);
+ if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS))
+ lss_status = 1;
+
+ bnx2x_analyze_link_error(params, vars, lss_status);
+ } else if (REG_RD(bp, MISC_REG_RESET_REG_2) &
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) {
+ /* Check E1X / E2 BMAC */
+ u32 lss_status_reg;
+ u32 wb_data[2];
+ mac_base = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM;
+ /* Read BIGMAC_REGISTER_RX_LSS_STATUS */
+ if (CHIP_IS_E2(bp))
+ lss_status_reg = BIGMAC2_REGISTER_RX_LSS_STAT;
+ else
+ lss_status_reg = BIGMAC_REGISTER_RX_LSS_STATUS;
+
+ REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2);
+ lss_status = (wb_data[0] > 0);
+
+ bnx2x_analyze_link_error(params, vars, lss_status);
+ }
+}
+
+void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 phy_idx;
+ if (!params) {
+ DP(NETIF_MSG_LINK, "Uninitialized params !\n");
+ return;
+ }
+
+ for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
+ if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
+ bnx2x_set_aer_mmd(params, ¶ms->phy[phy_idx]);
+ bnx2x_check_half_open_conn(params, vars);
+ break;
+ }
+ }
+
+ if (CHIP_IS_E3(bp))
+ bnx2x_check_over_curr(params, vars);
+}
+
+u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base)
+{
+ u8 phy_index;
+ struct bnx2x_phy phy;
+ for (phy_index = INT_PHY; phy_index < MAX_PHYS;
+ phy_index++) {
+ if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+ 0, &phy) != 0) {
+ DP(NETIF_MSG_LINK, "populate phy failed\n");
+ return 0;
+ }
+
+ if (phy.flags & FLAGS_HW_LOCK_REQUIRED)
+ return 1;
+ }
+ return 0;
+}
+
+u8 bnx2x_fan_failure_det_req(struct bnx2x *bp,
+ u32 shmem_base,
+ u32 shmem2_base,
+ u8 port)
+{
+ u8 phy_index, fan_failure_det_req = 0;
+ struct bnx2x_phy phy;
+ for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
+ phy_index++) {
+ if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+ port, &phy)
+ != 0) {
+ DP(NETIF_MSG_LINK, "populate phy failed\n");
+ return 0;
+ }
+ fan_failure_det_req |= (phy.flags &
+ FLAGS_FAN_FAILURE_DET_REQ);
+ }
+ return fan_failure_det_req;
+}
+
+void bnx2x_hw_reset_phy(struct link_params *params)
+{
+ u8 phy_index;
+ struct bnx2x *bp = params->bp;
+ bnx2x_update_mng(params, 0);
+ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
+ (NIG_MASK_XGXS0_LINK_STATUS |
+ NIG_MASK_XGXS0_LINK10G |
+ NIG_MASK_SERDES0_LINK_STATUS |
+ NIG_MASK_MI_INT));
+
+ for (phy_index = INT_PHY; phy_index < MAX_PHYS;
+ phy_index++) {
+ if (params->phy[phy_index].hw_reset) {
+ params->phy[phy_index].hw_reset(
+ ¶ms->phy[phy_index],
+ params);
+ params->phy[phy_index] = phy_null;
+ }
+ }
+}
+
+void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars,
+ u32 chip_id, u32 shmem_base, u32 shmem2_base,
+ u8 port)
+{
+ u8 gpio_num = 0xff, gpio_port = 0xff, phy_index;
+ u32 val;
+ u32 offset, aeu_mask, swap_val, swap_override, sync_offset;
+ if (CHIP_IS_E3(bp)) {
+ if (bnx2x_get_mod_abs_int_cfg(bp, chip_id,
+ shmem_base,
+ port,
+ &gpio_num,
+ &gpio_port) != 0)
+ return;
+ } else {
+ struct bnx2x_phy phy;
+ for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
+ phy_index++) {
+ if (bnx2x_populate_phy(bp, phy_index, shmem_base,
+ shmem2_base, port, &phy)
+ != 0) {
+ DP(NETIF_MSG_LINK, "populate phy failed\n");
+ return;
+ }
+ if (phy.type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) {
+ gpio_num = MISC_REGISTERS_GPIO_3;
+ gpio_port = port;
+ break;
+ }
+ }
+ }
+
+ if (gpio_num == 0xff)
+ return;
+
+ /* Set GPIO3 to trigger SFP+ module insertion/removal */
+ bnx2x_set_gpio(bp, gpio_num, MISC_REGISTERS_GPIO_INPUT_HI_Z, gpio_port);
+
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+ gpio_port ^= (swap_val && swap_override);
+
+ vars->aeu_int_mask = AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 <<
+ (gpio_num + (gpio_port << 2));
+
+ sync_offset = shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].aeu_int_mask);
+ REG_WR(bp, sync_offset, vars->aeu_int_mask);
+
+ DP(NETIF_MSG_LINK, "Setting MOD_ABS (GPIO%d_P%d) AEU to 0x%x\n",
+ gpio_num, gpio_port, vars->aeu_int_mask);
+
+ if (port == 0)
+ offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
+ else
+ offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
+
+ /* Open appropriate AEU for interrupts */
+ aeu_mask = REG_RD(bp, offset);
+ aeu_mask |= vars->aeu_int_mask;
+ REG_WR(bp, offset, aeu_mask);
+
+ /* Enable the GPIO to trigger interrupt */
+ val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN);
+ val |= 1 << (gpio_num + (gpio_port << 2));
+ REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
+}
--- /dev/null
- opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
- (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
+/* bnx2x_main.c: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/device.h> /* for dev_info() */
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include <linux/time.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/workqueue.h>
+#include <linux/crc32.h>
+#include <linux/crc32c.h>
+#include <linux/prefetch.h>
+#include <linux/zlib.h>
+#include <linux/io.h>
+#include <linux/stringify.h>
+#include <linux/vmalloc.h>
+
+#include "bnx2x.h"
+#include "bnx2x_init.h"
+#include "bnx2x_init_ops.h"
+#include "bnx2x_cmn.h"
+#include "bnx2x_dcb.h"
+#include "bnx2x_sp.h"
+
+#include <linux/firmware.h>
+#include "bnx2x_fw_file_hdr.h"
+/* FW files */
+#define FW_FILE_VERSION \
+ __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
+ __stringify(BCM_5710_FW_MINOR_VERSION) "." \
+ __stringify(BCM_5710_FW_REVISION_VERSION) "." \
+ __stringify(BCM_5710_FW_ENGINEERING_VERSION)
+#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
+#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
+#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
+
+/* Time in jiffies before concluding the transmitter is hung */
+#define TX_TIMEOUT (5*HZ)
+
+static char version[] __devinitdata =
+ "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver "
+ DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("Eliezer Tamir");
+MODULE_DESCRIPTION("Broadcom NetXtreme II "
+ "BCM57710/57711/57711E/"
+ "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
+ "57840/57840_MF Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+MODULE_FIRMWARE(FW_FILE_NAME_E1);
+MODULE_FIRMWARE(FW_FILE_NAME_E1H);
+MODULE_FIRMWARE(FW_FILE_NAME_E2);
+
+static int multi_mode = 1;
+module_param(multi_mode, int, 0);
+MODULE_PARM_DESC(multi_mode, " Multi queue mode "
+ "(0 Disable; 1 Enable (default))");
+
+int num_queues;
+module_param(num_queues, int, 0);
+MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
+ " (default is as a number of CPUs)");
+
+static int disable_tpa;
+module_param(disable_tpa, int, 0);
+MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
+
+#define INT_MODE_INTx 1
+#define INT_MODE_MSI 2
+static int int_mode;
+module_param(int_mode, int, 0);
+MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
+ "(1 INT#x; 2 MSI)");
+
+static int dropless_fc;
+module_param(dropless_fc, int, 0);
+MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
+
+static int poll;
+module_param(poll, int, 0);
+MODULE_PARM_DESC(poll, " Use polling (for debug)");
+
+static int mrrs = -1;
+module_param(mrrs, int, 0);
+MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
+
+static int debug;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, " Default debug msglevel");
+
+
+
+struct workqueue_struct *bnx2x_wq;
+
+enum bnx2x_board_type {
+ BCM57710 = 0,
+ BCM57711,
+ BCM57711E,
+ BCM57712,
+ BCM57712_MF,
+ BCM57800,
+ BCM57800_MF,
+ BCM57810,
+ BCM57810_MF,
+ BCM57840,
+ BCM57840_MF
+};
+
+/* indexed by board_type, above */
+static struct {
+ char *name;
+} board_info[] __devinitdata = {
+ { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
+ { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
+ { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
+ { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
+ { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
+ { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
+ { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
+ { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
+ { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
+ { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
+ { "Broadcom NetXtreme II BCM57840 10/20 Gigabit "
+ "Ethernet Multi Function"}
+};
+
+#ifndef PCI_DEVICE_ID_NX2_57710
+#define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57711
+#define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57711E
+#define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57712
+#define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57712_MF
+#define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57800
+#define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57800_MF
+#define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57810
+#define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57810_MF
+#define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840
+#define PCI_DEVICE_ID_NX2_57840 CHIP_NUM_57840
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_MF
+#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
+#endif
+static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
+
+/****************************************************************************
+* General service functions
+****************************************************************************/
+
+static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
+ u32 addr, dma_addr_t mapping)
+{
+ REG_WR(bp, addr, U64_LO(mapping));
+ REG_WR(bp, addr + 4, U64_HI(mapping));
+}
+
+static inline void storm_memset_spq_addr(struct bnx2x *bp,
+ dma_addr_t mapping, u16 abs_fid)
+{
+ u32 addr = XSEM_REG_FAST_MEMORY +
+ XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
+
+ __storm_memset_dma_mapping(bp, addr, mapping);
+}
+
+static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
+ u16 pf_id)
+{
+ REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+ REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+ REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+}
+
+static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
+ u8 enable)
+{
+ REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+ REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+ REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+}
+
+static inline void storm_memset_eq_data(struct bnx2x *bp,
+ struct event_ring_data *eq_data,
+ u16 pfid)
+{
+ size_t size = sizeof(struct event_ring_data);
+
+ u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
+
+ __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
+}
+
+static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
+ u16 pfid)
+{
+ u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
+ REG_WR16(bp, addr, eq_prod);
+}
+
+/* used only at init
+ * locking is done by mcp
+ */
+static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
+{
+ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
+ pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
+ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
+ PCICFG_VENDOR_ID_OFFSET);
+}
+
+static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
+{
+ u32 val;
+
+ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
+ pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
+ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
+ PCICFG_VENDOR_ID_OFFSET);
+
+ return val;
+}
+
+#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
+#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
+#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
+#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
+#define DMAE_DP_DST_NONE "dst_addr [none]"
+
+static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
+ int msglvl)
+{
+ u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
+
+ switch (dmae->opcode & DMAE_COMMAND_DST) {
+ case DMAE_CMD_DST_PCI:
+ if (src_type == DMAE_CMD_SRC_PCI)
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+ dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ else
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%08x], len [%d*4], dst [%x:%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_lo >> 2,
+ dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ break;
+ case DMAE_CMD_DST_GRC:
+ if (src_type == DMAE_CMD_SRC_PCI)
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+ dmae->len, dmae->dst_addr_lo >> 2,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ else
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%08x], len [%d*4], dst [%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_lo >> 2,
+ dmae->len, dmae->dst_addr_lo >> 2,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ break;
+ default:
+ if (src_type == DMAE_CMD_SRC_PCI)
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n"
+ "comp_addr [%x:%08x] comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+ dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ else
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src_addr [%08x] len [%d * 4] dst_addr [none]\n"
+ "comp_addr [%x:%08x] comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_lo >> 2,
+ dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ break;
+ }
+
+}
+
+/* copy command into DMAE command memory and set DMAE command go */
+void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
+{
+ u32 cmd_offset;
+ int i;
+
+ cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
+ for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
+ REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
+
+ DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
+ idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
+ }
+ REG_WR(bp, dmae_reg_go_c[idx], 1);
+}
+
+u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
+{
+ return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
+ DMAE_CMD_C_ENABLE);
+}
+
+u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
+{
+ return opcode & ~DMAE_CMD_SRC_RESET;
+}
+
+u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
+ bool with_comp, u8 comp_type)
+{
+ u32 opcode = 0;
+
+ opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
+ (dst_type << DMAE_COMMAND_DST_SHIFT));
+
+ opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
+
+ opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
- val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
++ opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
++ (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
+ opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
+
+#ifdef __BIG_ENDIAN
+ opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
+#else
+ opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
+#endif
+ if (with_comp)
+ opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
+ return opcode;
+}
+
+static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
+ struct dmae_command *dmae,
+ u8 src_type, u8 dst_type)
+{
+ memset(dmae, 0, sizeof(struct dmae_command));
+
+ /* set the opcode */
+ dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
+ true, DMAE_COMP_PCI);
+
+ /* fill in the completion parameters */
+ dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
+ dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
+ dmae->comp_val = DMAE_COMP_VAL;
+}
+
+/* issue a dmae command over the init-channel and wailt for completion */
+static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
+ struct dmae_command *dmae)
+{
+ u32 *wb_comp = bnx2x_sp(bp, wb_comp);
+ int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
+ int rc = 0;
+
+ DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
+ bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
+ bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
+
+ /*
+ * Lock the dmae channel. Disable BHs to prevent a dead-lock
+ * as long as this code is called both from syscall context and
+ * from ndo_set_rx_mode() flow that may be called from BH.
+ */
+ spin_lock_bh(&bp->dmae_lock);
+
+ /* reset completion */
+ *wb_comp = 0;
+
+ /* post the command on the channel used for initializations */
+ bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
+
+ /* wait for completion */
+ udelay(5);
+ while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
+ DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
+
+ if (!cnt) {
+ BNX2X_ERR("DMAE timeout!\n");
+ rc = DMAE_TIMEOUT;
+ goto unlock;
+ }
+ cnt--;
+ udelay(50);
+ }
+ if (*wb_comp & DMAE_PCI_ERR_FLAG) {
+ BNX2X_ERR("DMAE PCI error!\n");
+ rc = DMAE_PCI_ERROR;
+ }
+
+ DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
+ bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
+ bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
+
+unlock:
+ spin_unlock_bh(&bp->dmae_lock);
+ return rc;
+}
+
+void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
+ u32 len32)
+{
+ struct dmae_command dmae;
+
+ if (!bp->dmae_ready) {
+ u32 *data = bnx2x_sp(bp, wb_data[0]);
+
+ DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
+ " using indirect\n", dst_addr, len32);
+ bnx2x_init_ind_wr(bp, dst_addr, data, len32);
+ return;
+ }
+
+ /* set opcode and fixed command fields */
+ bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
+
+ /* fill in addresses and len */
+ dmae.src_addr_lo = U64_LO(dma_addr);
+ dmae.src_addr_hi = U64_HI(dma_addr);
+ dmae.dst_addr_lo = dst_addr >> 2;
+ dmae.dst_addr_hi = 0;
+ dmae.len = len32;
+
+ bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
+
+ /* issue the command and wait for completion */
+ bnx2x_issue_dmae_with_comp(bp, &dmae);
+}
+
+void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
+{
+ struct dmae_command dmae;
+
+ if (!bp->dmae_ready) {
+ u32 *data = bnx2x_sp(bp, wb_data[0]);
+ int i;
+
+ DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
+ " using indirect\n", src_addr, len32);
+ for (i = 0; i < len32; i++)
+ data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
+ return;
+ }
+
+ /* set opcode and fixed command fields */
+ bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
+
+ /* fill in addresses and len */
+ dmae.src_addr_lo = src_addr >> 2;
+ dmae.src_addr_hi = 0;
+ dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
+ dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
+ dmae.len = len32;
+
+ bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
+
+ /* issue the command and wait for completion */
+ bnx2x_issue_dmae_with_comp(bp, &dmae);
+}
+
+static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
+ u32 addr, u32 len)
+{
+ int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
+ int offset = 0;
+
+ while (len > dmae_wr_max) {
+ bnx2x_write_dmae(bp, phys_addr + offset,
+ addr + offset, dmae_wr_max);
+ offset += dmae_wr_max * 4;
+ len -= dmae_wr_max;
+ }
+
+ bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
+}
+
+/* used only for slowpath so not inlined */
+static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
+{
+ u32 wb_write[2];
+
+ wb_write[0] = val_hi;
+ wb_write[1] = val_lo;
+ REG_WR_DMAE(bp, reg, wb_write, 2);
+}
+
+#ifdef USE_WB_RD
+static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
+{
+ u32 wb_data[2];
+
+ REG_RD_DMAE(bp, reg, wb_data, 2);
+
+ return HILO_U64(wb_data[0], wb_data[1]);
+}
+#endif
+
+static int bnx2x_mc_assert(struct bnx2x *bp)
+{
+ char last_idx;
+ int i, rc = 0;
+ u32 row0, row1, row2, row3;
+
+ /* XSTORM */
+ last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_ASSERT_LIST_INDEX_OFFSET);
+ if (last_idx)
+ BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
+
+ /* print the asserts */
+ for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
+
+ row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_ASSERT_LIST_OFFSET(i));
+ row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_ASSERT_LIST_OFFSET(i) + 4);
+ row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_ASSERT_LIST_OFFSET(i) + 8);
+ row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_ASSERT_LIST_OFFSET(i) + 12);
+
+ if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
+ BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
+ " 0x%08x 0x%08x 0x%08x\n",
+ i, row3, row2, row1, row0);
+ rc++;
+ } else {
+ break;
+ }
+ }
+
+ /* TSTORM */
+ last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_ASSERT_LIST_INDEX_OFFSET);
+ if (last_idx)
+ BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
+
+ /* print the asserts */
+ for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
+
+ row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_ASSERT_LIST_OFFSET(i));
+ row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_ASSERT_LIST_OFFSET(i) + 4);
+ row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_ASSERT_LIST_OFFSET(i) + 8);
+ row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_ASSERT_LIST_OFFSET(i) + 12);
+
+ if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
+ BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
+ " 0x%08x 0x%08x 0x%08x\n",
+ i, row3, row2, row1, row0);
+ rc++;
+ } else {
+ break;
+ }
+ }
+
+ /* CSTORM */
+ last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_ASSERT_LIST_INDEX_OFFSET);
+ if (last_idx)
+ BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
+
+ /* print the asserts */
+ for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
+
+ row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_ASSERT_LIST_OFFSET(i));
+ row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_ASSERT_LIST_OFFSET(i) + 4);
+ row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_ASSERT_LIST_OFFSET(i) + 8);
+ row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_ASSERT_LIST_OFFSET(i) + 12);
+
+ if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
+ BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
+ " 0x%08x 0x%08x 0x%08x\n",
+ i, row3, row2, row1, row0);
+ rc++;
+ } else {
+ break;
+ }
+ }
+
+ /* USTORM */
+ last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
+ USTORM_ASSERT_LIST_INDEX_OFFSET);
+ if (last_idx)
+ BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
+
+ /* print the asserts */
+ for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
+
+ row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
+ USTORM_ASSERT_LIST_OFFSET(i));
+ row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
+ USTORM_ASSERT_LIST_OFFSET(i) + 4);
+ row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
+ USTORM_ASSERT_LIST_OFFSET(i) + 8);
+ row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
+ USTORM_ASSERT_LIST_OFFSET(i) + 12);
+
+ if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
+ BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
+ " 0x%08x 0x%08x 0x%08x\n",
+ i, row3, row2, row1, row0);
+ rc++;
+ } else {
+ break;
+ }
+ }
+
+ return rc;
+}
+
+void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
+{
+ u32 addr, val;
+ u32 mark, offset;
+ __be32 data[9];
+ int word;
+ u32 trace_shmem_base;
+ if (BP_NOMCP(bp)) {
+ BNX2X_ERR("NO MCP - can not dump\n");
+ return;
+ }
+ netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
+ (bp->common.bc_ver & 0xff0000) >> 16,
+ (bp->common.bc_ver & 0xff00) >> 8,
+ (bp->common.bc_ver & 0xff));
+
+ val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
+ if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
+ printk("%s" "MCP PC at 0x%x\n", lvl, val);
+
+ if (BP_PATH(bp) == 0)
+ trace_shmem_base = bp->common.shmem_base;
+ else
+ trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
+ addr = trace_shmem_base - 0x0800 + 4;
+ mark = REG_RD(bp, addr);
+ mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
+ + ((mark + 0x3) & ~0x3) - 0x08000000;
+ printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
+
+ printk("%s", lvl);
+ for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
+ for (word = 0; word < 8; word++)
+ data[word] = htonl(REG_RD(bp, offset + 4*word));
+ data[8] = 0x0;
+ pr_cont("%s", (char *)data);
+ }
+ for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
+ for (word = 0; word < 8; word++)
+ data[word] = htonl(REG_RD(bp, offset + 4*word));
+ data[8] = 0x0;
+ pr_cont("%s", (char *)data);
+ }
+ printk("%s" "end of fw dump\n", lvl);
+}
+
+static inline void bnx2x_fw_dump(struct bnx2x *bp)
+{
+ bnx2x_fw_dump_lvl(bp, KERN_ERR);
+}
+
+void bnx2x_panic_dump(struct bnx2x *bp)
+{
+ int i;
+ u16 j;
+ struct hc_sp_status_block_data sp_sb_data;
+ int func = BP_FUNC(bp);
+#ifdef BNX2X_STOP_ON_ERROR
+ u16 start = 0, end = 0;
+ u8 cos;
+#endif
+
+ bp->stats_state = STATS_STATE_DISABLED;
+ DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
+
+ BNX2X_ERR("begin crash dump -----------------\n");
+
+ /* Indices */
+ /* Common */
+ BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
+ " spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
+ bp->def_idx, bp->def_att_idx, bp->attn_state,
+ bp->spq_prod_idx, bp->stats_counter);
+ BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
+ bp->def_status_blk->atten_status_block.attn_bits,
+ bp->def_status_blk->atten_status_block.attn_bits_ack,
+ bp->def_status_blk->atten_status_block.status_block_id,
+ bp->def_status_blk->atten_status_block.attn_bits_index);
+ BNX2X_ERR(" def (");
+ for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
+ pr_cont("0x%x%s",
+ bp->def_status_blk->sp_sb.index_values[i],
+ (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
+
+ for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
+ *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
+ i*sizeof(u32));
+
+ pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
+ sp_sb_data.igu_sb_id,
+ sp_sb_data.igu_seg_id,
+ sp_sb_data.p_func.pf_id,
+ sp_sb_data.p_func.vnic_id,
+ sp_sb_data.p_func.vf_id,
+ sp_sb_data.p_func.vf_valid,
+ sp_sb_data.state);
+
+
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ int loop;
+ struct hc_status_block_data_e2 sb_data_e2;
+ struct hc_status_block_data_e1x sb_data_e1x;
+ struct hc_status_block_sm *hc_sm_p =
+ CHIP_IS_E1x(bp) ?
+ sb_data_e1x.common.state_machine :
+ sb_data_e2.common.state_machine;
+ struct hc_index_data *hc_index_p =
+ CHIP_IS_E1x(bp) ?
+ sb_data_e1x.index_data :
+ sb_data_e2.index_data;
+ u8 data_size, cos;
+ u32 *sb_data_p;
+ struct bnx2x_fp_txdata txdata;
+
+ /* Rx */
+ BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
+ " rx_comp_prod(0x%x)"
+ " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
+ i, fp->rx_bd_prod, fp->rx_bd_cons,
+ fp->rx_comp_prod,
+ fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
+ BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
+ " fp_hc_idx(0x%x)\n",
+ fp->rx_sge_prod, fp->last_max_sge,
+ le16_to_cpu(fp->fp_hc_idx));
+
+ /* Tx */
+ for_each_cos_in_tx_queue(fp, cos)
+ {
+ txdata = fp->txdata[cos];
+ BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
+ " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
+ " *tx_cons_sb(0x%x)\n",
+ i, txdata.tx_pkt_prod,
+ txdata.tx_pkt_cons, txdata.tx_bd_prod,
+ txdata.tx_bd_cons,
+ le16_to_cpu(*txdata.tx_cons_sb));
+ }
+
+ loop = CHIP_IS_E1x(bp) ?
+ HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
+
+ /* host sb data */
+
+#ifdef BCM_CNIC
+ if (IS_FCOE_FP(fp))
+ continue;
+#endif
+ BNX2X_ERR(" run indexes (");
+ for (j = 0; j < HC_SB_MAX_SM; j++)
+ pr_cont("0x%x%s",
+ fp->sb_running_index[j],
+ (j == HC_SB_MAX_SM - 1) ? ")" : " ");
+
+ BNX2X_ERR(" indexes (");
+ for (j = 0; j < loop; j++)
+ pr_cont("0x%x%s",
+ fp->sb_index_values[j],
+ (j == loop - 1) ? ")" : " ");
+ /* fw sb data */
+ data_size = CHIP_IS_E1x(bp) ?
+ sizeof(struct hc_status_block_data_e1x) :
+ sizeof(struct hc_status_block_data_e2);
+ data_size /= sizeof(u32);
+ sb_data_p = CHIP_IS_E1x(bp) ?
+ (u32 *)&sb_data_e1x :
+ (u32 *)&sb_data_e2;
+ /* copy sb data in here */
+ for (j = 0; j < data_size; j++)
+ *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
+ j * sizeof(u32));
+
+ if (!CHIP_IS_E1x(bp)) {
+ pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) "
+ "vnic_id(0x%x) same_igu_sb_1b(0x%x) "
+ "state(0x%x)\n",
+ sb_data_e2.common.p_func.pf_id,
+ sb_data_e2.common.p_func.vf_id,
+ sb_data_e2.common.p_func.vf_valid,
+ sb_data_e2.common.p_func.vnic_id,
+ sb_data_e2.common.same_igu_sb_1b,
+ sb_data_e2.common.state);
+ } else {
+ pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) "
+ "vnic_id(0x%x) same_igu_sb_1b(0x%x) "
+ "state(0x%x)\n",
+ sb_data_e1x.common.p_func.pf_id,
+ sb_data_e1x.common.p_func.vf_id,
+ sb_data_e1x.common.p_func.vf_valid,
+ sb_data_e1x.common.p_func.vnic_id,
+ sb_data_e1x.common.same_igu_sb_1b,
+ sb_data_e1x.common.state);
+ }
+
+ /* SB_SMs data */
+ for (j = 0; j < HC_SB_MAX_SM; j++) {
+ pr_cont("SM[%d] __flags (0x%x) "
+ "igu_sb_id (0x%x) igu_seg_id(0x%x) "
+ "time_to_expire (0x%x) "
+ "timer_value(0x%x)\n", j,
+ hc_sm_p[j].__flags,
+ hc_sm_p[j].igu_sb_id,
+ hc_sm_p[j].igu_seg_id,
+ hc_sm_p[j].time_to_expire,
+ hc_sm_p[j].timer_value);
+ }
+
+ /* Indecies data */
+ for (j = 0; j < loop; j++) {
+ pr_cont("INDEX[%d] flags (0x%x) "
+ "timeout (0x%x)\n", j,
+ hc_index_p[j].flags,
+ hc_index_p[j].timeout);
+ }
+ }
+
+#ifdef BNX2X_STOP_ON_ERROR
+ /* Rings */
+ /* Rx */
+ for_each_rx_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
+ end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
+ for (j = start; j != end; j = RX_BD(j + 1)) {
+ u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
+ struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
+
+ BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
+ i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
+ }
+
+ start = RX_SGE(fp->rx_sge_prod);
+ end = RX_SGE(fp->last_max_sge);
+ for (j = start; j != end; j = RX_SGE(j + 1)) {
+ u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
+ struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
+
+ BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
+ i, j, rx_sge[1], rx_sge[0], sw_page->page);
+ }
+
+ start = RCQ_BD(fp->rx_comp_cons - 10);
+ end = RCQ_BD(fp->rx_comp_cons + 503);
+ for (j = start; j != end; j = RCQ_BD(j + 1)) {
+ u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
+
+ BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
+ i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
+ }
+ }
+
+ /* Tx */
+ for_each_tx_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ for_each_cos_in_tx_queue(fp, cos) {
+ struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+
+ start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
+ end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
+ for (j = start; j != end; j = TX_BD(j + 1)) {
+ struct sw_tx_bd *sw_bd =
+ &txdata->tx_buf_ring[j];
+
+ BNX2X_ERR("fp%d: txdata %d, "
+ "packet[%x]=[%p,%x]\n",
+ i, cos, j, sw_bd->skb,
+ sw_bd->first_bd);
+ }
+
+ start = TX_BD(txdata->tx_bd_cons - 10);
+ end = TX_BD(txdata->tx_bd_cons + 254);
+ for (j = start; j != end; j = TX_BD(j + 1)) {
+ u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
+
+ BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]="
+ "[%x:%x:%x:%x]\n",
+ i, cos, j, tx_bd[0], tx_bd[1],
+ tx_bd[2], tx_bd[3]);
+ }
+ }
+ }
+#endif
+ bnx2x_fw_dump(bp);
+ bnx2x_mc_assert(bp);
+ BNX2X_ERR("end crash dump -----------------\n");
+}
+
+/*
+ * FLR Support for E2
+ *
+ * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW
+ * initialization.
+ */
+#define FLR_WAIT_USEC 10000 /* 10 miliseconds */
+#define FLR_WAIT_INTERAVAL 50 /* usec */
+#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERAVAL) /* 200 */
+
+struct pbf_pN_buf_regs {
+ int pN;
+ u32 init_crd;
+ u32 crd;
+ u32 crd_freed;
+};
+
+struct pbf_pN_cmd_regs {
+ int pN;
+ u32 lines_occup;
+ u32 lines_freed;
+};
+
+static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
+ struct pbf_pN_buf_regs *regs,
+ u32 poll_count)
+{
+ u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
+ u32 cur_cnt = poll_count;
+
+ crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
+ crd = crd_start = REG_RD(bp, regs->crd);
+ init_crd = REG_RD(bp, regs->init_crd);
+
+ DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
+ DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd);
+ DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
+
+ while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
+ (init_crd - crd_start))) {
+ if (cur_cnt--) {
+ udelay(FLR_WAIT_INTERAVAL);
+ crd = REG_RD(bp, regs->crd);
+ crd_freed = REG_RD(bp, regs->crd_freed);
+ } else {
+ DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
+ regs->pN);
+ DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n",
+ regs->pN, crd);
+ DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
+ regs->pN, crd_freed);
+ break;
+ }
+ }
+ DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
+ poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN);
+}
+
+static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
+ struct pbf_pN_cmd_regs *regs,
+ u32 poll_count)
+{
+ u32 occup, to_free, freed, freed_start;
+ u32 cur_cnt = poll_count;
+
+ occup = to_free = REG_RD(bp, regs->lines_occup);
+ freed = freed_start = REG_RD(bp, regs->lines_freed);
+
+ DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
+ DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
+
+ while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
+ if (cur_cnt--) {
+ udelay(FLR_WAIT_INTERAVAL);
+ occup = REG_RD(bp, regs->lines_occup);
+ freed = REG_RD(bp, regs->lines_freed);
+ } else {
+ DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
+ regs->pN);
+ DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n",
+ regs->pN, occup);
+ DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
+ regs->pN, freed);
+ break;
+ }
+ }
+ DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
+ poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN);
+}
+
+static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
+ u32 expected, u32 poll_count)
+{
+ u32 cur_cnt = poll_count;
+ u32 val;
+
+ while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
+ udelay(FLR_WAIT_INTERAVAL);
+
+ return val;
+}
+
+static inline int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
+ char *msg, u32 poll_cnt)
+{
+ u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
+ if (val != 0) {
+ BNX2X_ERR("%s usage count=%d\n", msg, val);
+ return 1;
+ }
+ return 0;
+}
+
+static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
+{
+ /* adjust polling timeout */
+ if (CHIP_REV_IS_EMUL(bp))
+ return FLR_POLL_CNT * 2000;
+
+ if (CHIP_REV_IS_FPGA(bp))
+ return FLR_POLL_CNT * 120;
+
+ return FLR_POLL_CNT;
+}
+
+static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
+{
+ struct pbf_pN_cmd_regs cmd_regs[] = {
+ {0, (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_TQ_OCCUPANCY_Q0 :
+ PBF_REG_P0_TQ_OCCUPANCY,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_TQ_LINES_FREED_CNT_Q0 :
+ PBF_REG_P0_TQ_LINES_FREED_CNT},
+ {1, (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_TQ_OCCUPANCY_Q1 :
+ PBF_REG_P1_TQ_OCCUPANCY,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_TQ_LINES_FREED_CNT_Q1 :
+ PBF_REG_P1_TQ_LINES_FREED_CNT},
+ {4, (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_TQ_OCCUPANCY_LB_Q :
+ PBF_REG_P4_TQ_OCCUPANCY,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
+ PBF_REG_P4_TQ_LINES_FREED_CNT}
+ };
+
+ struct pbf_pN_buf_regs buf_regs[] = {
+ {0, (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_INIT_CRD_Q0 :
+ PBF_REG_P0_INIT_CRD ,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_CREDIT_Q0 :
+ PBF_REG_P0_CREDIT,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
+ PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
+ {1, (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_INIT_CRD_Q1 :
+ PBF_REG_P1_INIT_CRD,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_CREDIT_Q1 :
+ PBF_REG_P1_CREDIT,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
+ PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
+ {4, (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_INIT_CRD_LB_Q :
+ PBF_REG_P4_INIT_CRD,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_CREDIT_LB_Q :
+ PBF_REG_P4_CREDIT,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
+ PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
+ };
+
+ int i;
+
+ /* Verify the command queues are flushed P0, P1, P4 */
+ for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
+ bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
+
+
+ /* Verify the transmission buffers are flushed P0, P1, P4 */
+ for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
+ bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
+}
+
+#define OP_GEN_PARAM(param) \
+ (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
+
+#define OP_GEN_TYPE(type) \
+ (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
+
+#define OP_GEN_AGG_VECT(index) \
+ (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
+
+
+static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
+ u32 poll_cnt)
+{
+ struct sdm_op_gen op_gen = {0};
+
+ u32 comp_addr = BAR_CSTRORM_INTMEM +
+ CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
+ int ret = 0;
+
+ if (REG_RD(bp, comp_addr)) {
+ BNX2X_ERR("Cleanup complete is not 0\n");
+ return 1;
+ }
+
+ op_gen.command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
+ op_gen.command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
+ op_gen.command |= OP_GEN_AGG_VECT(clnup_func);
+ op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
+
+ DP(BNX2X_MSG_SP, "FW Final cleanup\n");
+ REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command);
+
+ if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
+ BNX2X_ERR("FW final cleanup did not succeed\n");
+ ret = 1;
+ }
+ /* Zero completion for nxt FLR */
+ REG_WR(bp, comp_addr, 0);
+
+ return ret;
+}
+
+static inline u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
+{
+ int pos;
+ u16 status;
+
+ pos = pci_pcie_cap(dev);
+ if (!pos)
+ return false;
+
+ pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
+ return status & PCI_EXP_DEVSTA_TRPND;
+}
+
+/* PF FLR specific routines
+*/
+static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
+{
+
+ /* wait for CFC PF usage-counter to zero (includes all the VFs) */
+ if (bnx2x_flr_clnup_poll_hw_counter(bp,
+ CFC_REG_NUM_LCIDS_INSIDE_PF,
+ "CFC PF usage counter timed out",
+ poll_cnt))
+ return 1;
+
+
+ /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
+ if (bnx2x_flr_clnup_poll_hw_counter(bp,
+ DORQ_REG_PF_USAGE_CNT,
+ "DQ PF usage counter timed out",
+ poll_cnt))
+ return 1;
+
+ /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
+ if (bnx2x_flr_clnup_poll_hw_counter(bp,
+ QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
+ "QM PF usage counter timed out",
+ poll_cnt))
+ return 1;
+
+ /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
+ if (bnx2x_flr_clnup_poll_hw_counter(bp,
+ TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
+ "Timers VNIC usage counter timed out",
+ poll_cnt))
+ return 1;
+ if (bnx2x_flr_clnup_poll_hw_counter(bp,
+ TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
+ "Timers NUM_SCANS usage counter timed out",
+ poll_cnt))
+ return 1;
+
+ /* Wait DMAE PF usage counter to zero */
+ if (bnx2x_flr_clnup_poll_hw_counter(bp,
+ dmae_reg_go_c[INIT_DMAE_C(bp)],
+ "DMAE dommand register timed out",
+ poll_cnt))
+ return 1;
+
+ return 0;
+}
+
+static void bnx2x_hw_enable_status(struct bnx2x *bp)
+{
+ u32 val;
+
+ val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
+ DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
+
+ val = REG_RD(bp, PBF_REG_DISABLE_PF);
+ DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
+
+ val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
+ DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
+
+ val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
+ DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
+
+ val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
+ DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
+
+ val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
+ DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
+
+ val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
+ DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
+
+ val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
+ DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
+ val);
+}
+
+static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
+{
+ u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
+
+ DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
+
+ /* Re-enable PF target read access */
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
+
+ /* Poll HW usage counters */
+ if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
+ return -EBUSY;
+
+ /* Zero the igu 'trailing edge' and 'leading edge' */
+
+ /* Send the FW cleanup command */
+ if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
+ return -EBUSY;
+
+ /* ATC cleanup */
+
+ /* Verify TX hw is flushed */
+ bnx2x_tx_hw_flushed(bp, poll_cnt);
+
+ /* Wait 100ms (not adjusted according to platform) */
+ msleep(100);
+
+ /* Verify no pending pci transactions */
+ if (bnx2x_is_pcie_pending(bp->pdev))
+ BNX2X_ERR("PCIE Transactions still pending\n");
+
+ /* Debug */
+ bnx2x_hw_enable_status(bp);
+
+ /*
+ * Master enable - Due to WB DMAE writes performed before this
+ * register is re-initialized as part of the regular function init
+ */
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
+
+ return 0;
+}
+
+static void bnx2x_hc_int_enable(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
+ u32 val = REG_RD(bp, addr);
+ int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
+ int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
+
+ if (msix) {
+ val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_INT_LINE_EN_0);
+ val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+ } else if (msi) {
+ val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
+ val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+ } else {
+ val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+ HC_CONFIG_0_REG_INT_LINE_EN_0 |
+ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+
+ if (!CHIP_IS_E1(bp)) {
+ DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
+ val, port, addr);
+
+ REG_WR(bp, addr, val);
+
+ val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
+ }
+ }
+
+ if (CHIP_IS_E1(bp))
+ REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
+
+ DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
+ val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
+
+ REG_WR(bp, addr, val);
+ /*
+ * Ensure that HC_CONFIG is written before leading/trailing edge config
+ */
+ mmiowb();
+ barrier();
+
+ if (!CHIP_IS_E1(bp)) {
+ /* init leading/trailing edge */
+ if (IS_MF(bp)) {
- val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
++ val = (0xee0f | (1 << (BP_VN(bp) + 4)));
+ if (bp->port.pmf)
+ /* enable nig and gpio3 attention */
+ val |= 0x1100;
+ } else
+ val = 0xffff;
+
+ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
+ REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
+ }
+
+ /* Make sure that interrupts are indeed enabled from here on */
+ mmiowb();
+}
+
+static void bnx2x_igu_int_enable(struct bnx2x *bp)
+{
+ u32 val;
+ int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
+ int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
+
+ val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
+
+ if (msix) {
+ val &= ~(IGU_PF_CONF_INT_LINE_EN |
+ IGU_PF_CONF_SINGLE_ISR_EN);
+ val |= (IGU_PF_CONF_FUNC_EN |
+ IGU_PF_CONF_MSI_MSIX_EN |
+ IGU_PF_CONF_ATTN_BIT_EN);
+ } else if (msi) {
+ val &= ~IGU_PF_CONF_INT_LINE_EN;
+ val |= (IGU_PF_CONF_FUNC_EN |
+ IGU_PF_CONF_MSI_MSIX_EN |
+ IGU_PF_CONF_ATTN_BIT_EN |
+ IGU_PF_CONF_SINGLE_ISR_EN);
+ } else {
+ val &= ~IGU_PF_CONF_MSI_MSIX_EN;
+ val |= (IGU_PF_CONF_FUNC_EN |
+ IGU_PF_CONF_INT_LINE_EN |
+ IGU_PF_CONF_ATTN_BIT_EN |
+ IGU_PF_CONF_SINGLE_ISR_EN);
+ }
+
+ DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
+ val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
+
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
+
+ barrier();
+
+ /* init leading/trailing edge */
+ if (IS_MF(bp)) {
- for (vn = VN_0; vn < E1HVN_MAX; vn++) {
++ val = (0xee0f | (1 << (BP_VN(bp) + 4)));
+ if (bp->port.pmf)
+ /* enable nig and gpio3 attention */
+ val |= 0x1100;
+ } else
+ val = 0xffff;
+
+ REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
+ REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
+
+ /* Make sure that interrupts are indeed enabled from here on */
+ mmiowb();
+}
+
+void bnx2x_int_enable(struct bnx2x *bp)
+{
+ if (bp->common.int_block == INT_BLOCK_HC)
+ bnx2x_hc_int_enable(bp);
+ else
+ bnx2x_igu_int_enable(bp);
+}
+
+static void bnx2x_hc_int_disable(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
+ u32 val = REG_RD(bp, addr);
+
+ /*
+ * in E1 we must use only PCI configuration space to disable
+ * MSI/MSIX capablility
+ * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
+ */
+ if (CHIP_IS_E1(bp)) {
+ /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
+ * Use mask register to prevent from HC sending interrupts
+ * after we exit the function
+ */
+ REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
+
+ val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_INT_LINE_EN_0 |
+ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+ } else
+ val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+ HC_CONFIG_0_REG_INT_LINE_EN_0 |
+ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+
+ DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
+ val, port, addr);
+
+ /* flush all outstanding writes */
+ mmiowb();
+
+ REG_WR(bp, addr, val);
+ if (REG_RD(bp, addr) != val)
+ BNX2X_ERR("BUG! proper val not read from IGU!\n");
+}
+
+static void bnx2x_igu_int_disable(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
+
+ val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
+ IGU_PF_CONF_INT_LINE_EN |
+ IGU_PF_CONF_ATTN_BIT_EN);
+
+ DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
+
+ /* flush all outstanding writes */
+ mmiowb();
+
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
+ if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
+ BNX2X_ERR("BUG! proper val not read from IGU!\n");
+}
+
+void bnx2x_int_disable(struct bnx2x *bp)
+{
+ if (bp->common.int_block == INT_BLOCK_HC)
+ bnx2x_hc_int_disable(bp);
+ else
+ bnx2x_igu_int_disable(bp);
+}
+
+void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
+{
+ int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
+ int i, offset;
+
+ if (disable_hw)
+ /* prevent the HW from sending interrupts */
+ bnx2x_int_disable(bp);
+
+ /* make sure all ISRs are done */
+ if (msix) {
+ synchronize_irq(bp->msix_table[0].vector);
+ offset = 1;
+#ifdef BCM_CNIC
+ offset++;
+#endif
+ for_each_eth_queue(bp, i)
+ synchronize_irq(bp->msix_table[offset++].vector);
+ } else
+ synchronize_irq(bp->pdev->irq);
+
+ /* make sure sp_task is not running */
+ cancel_delayed_work(&bp->sp_task);
+ cancel_delayed_work(&bp->period_task);
+ flush_workqueue(bnx2x_wq);
+}
+
+/* fast path */
+
+/*
+ * General service functions
+ */
+
+/* Return true if succeeded to acquire the lock */
+static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
+{
+ u32 lock_status;
+ u32 resource_bit = (1 << resource);
+ int func = BP_FUNC(bp);
+ u32 hw_lock_control_reg;
+
+ DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
+
+ /* Validating that the resource is within range */
+ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+ DP(NETIF_MSG_HW,
+ "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+ resource, HW_LOCK_MAX_RESOURCE_VALUE);
+ return false;
+ }
+
+ if (func <= 5)
+ hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
+ else
+ hw_lock_control_reg =
+ (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
+
+ /* Try to acquire the lock */
+ REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
+ lock_status = REG_RD(bp, hw_lock_control_reg);
+ if (lock_status & resource_bit)
+ return true;
+
+ DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
+ return false;
+}
+
+/**
+ * bnx2x_get_leader_lock_resource - get the recovery leader resource id
+ *
+ * @bp: driver handle
+ *
+ * Returns the recovery leader resource id according to the engine this function
+ * belongs to. Currently only only 2 engines is supported.
+ */
+static inline int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
+{
+ if (BP_PATH(bp))
+ return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
+ else
+ return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
+}
+
+/**
+ * bnx2x_trylock_leader_lock- try to aquire a leader lock.
+ *
+ * @bp: driver handle
+ *
+ * Tries to aquire a leader lock for cuurent engine.
+ */
+static inline bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
+{
+ return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
+}
+
+#ifdef BCM_CNIC
+static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
+#endif
+
+void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
+{
+ struct bnx2x *bp = fp->bp;
+ int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
+ int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
+ enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
+ struct bnx2x_queue_sp_obj *q_obj = &fp->q_obj;
+
+ DP(BNX2X_MSG_SP,
+ "fp %d cid %d got ramrod #%d state is %x type is %d\n",
+ fp->index, cid, command, bp->state,
+ rr_cqe->ramrod_cqe.ramrod_type);
+
+ switch (command) {
+ case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
+ DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
+ drv_cmd = BNX2X_Q_CMD_UPDATE;
+ break;
+
+ case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
+ DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
+ drv_cmd = BNX2X_Q_CMD_SETUP;
+ break;
+
+ case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
+ DP(NETIF_MSG_IFUP, "got MULTI[%d] tx-only setup ramrod\n", cid);
+ drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
+ break;
+
+ case (RAMROD_CMD_ID_ETH_HALT):
+ DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
+ drv_cmd = BNX2X_Q_CMD_HALT;
+ break;
+
+ case (RAMROD_CMD_ID_ETH_TERMINATE):
+ DP(BNX2X_MSG_SP, "got MULTI[%d] teminate ramrod\n", cid);
+ drv_cmd = BNX2X_Q_CMD_TERMINATE;
+ break;
+
+ case (RAMROD_CMD_ID_ETH_EMPTY):
+ DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
+ drv_cmd = BNX2X_Q_CMD_EMPTY;
+ break;
+
+ default:
+ BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
+ command, fp->index);
+ return;
+ }
+
+ if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
+ q_obj->complete_cmd(bp, q_obj, drv_cmd))
+ /* q_obj->complete_cmd() failure means that this was
+ * an unexpected completion.
+ *
+ * In this case we don't want to increase the bp->spq_left
+ * because apparently we haven't sent this command the first
+ * place.
+ */
+#ifdef BNX2X_STOP_ON_ERROR
+ bnx2x_panic();
+#else
+ return;
+#endif
+
+ smp_mb__before_atomic_inc();
+ atomic_inc(&bp->cq_spq_left);
+ /* push the change in bp->spq_left and towards the memory */
+ smp_mb__after_atomic_inc();
+
+ DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
+
+ return;
+}
+
+void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod)
+{
+ u32 start = BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset;
+
+ bnx2x_update_rx_prod_gen(bp, fp, bd_prod, rx_comp_prod, rx_sge_prod,
+ start);
+}
+
+irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
+{
+ struct bnx2x *bp = netdev_priv(dev_instance);
+ u16 status = bnx2x_ack_int(bp);
+ u16 mask;
+ int i;
+ u8 cos;
+
+ /* Return here if interrupt is shared and it's not for us */
+ if (unlikely(status == 0)) {
+ DP(NETIF_MSG_INTR, "not our interrupt!\n");
+ return IRQ_NONE;
+ }
+ DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return IRQ_HANDLED;
+#endif
+
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ mask = 0x2 << (fp->index + CNIC_PRESENT);
+ if (status & mask) {
+ /* Handle Rx or Tx according to SB id */
+ prefetch(fp->rx_cons_sb);
+ for_each_cos_in_tx_queue(fp, cos)
+ prefetch(fp->txdata[cos].tx_cons_sb);
+ prefetch(&fp->sb_running_index[SM_RX_ID]);
+ napi_schedule(&bnx2x_fp(bp, fp->index, napi));
+ status &= ~mask;
+ }
+ }
+
+#ifdef BCM_CNIC
+ mask = 0x2;
+ if (status & (mask | 0x1)) {
+ struct cnic_ops *c_ops = NULL;
+
+ if (likely(bp->state == BNX2X_STATE_OPEN)) {
+ rcu_read_lock();
+ c_ops = rcu_dereference(bp->cnic_ops);
+ if (c_ops)
+ c_ops->cnic_handler(bp->cnic_data, NULL);
+ rcu_read_unlock();
+ }
+
+ status &= ~mask;
+ }
+#endif
+
+ if (unlikely(status & 0x1)) {
+ queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+
+ status &= ~0x1;
+ if (!status)
+ return IRQ_HANDLED;
+ }
+
+ if (unlikely(status))
+ DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
+ status);
+
+ return IRQ_HANDLED;
+}
+
+/* Link */
+
+/*
+ * General service functions
+ */
+
+int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
+{
+ u32 lock_status;
+ u32 resource_bit = (1 << resource);
+ int func = BP_FUNC(bp);
+ u32 hw_lock_control_reg;
+ int cnt;
+
+ /* Validating that the resource is within range */
+ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+ DP(NETIF_MSG_HW,
+ "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+ resource, HW_LOCK_MAX_RESOURCE_VALUE);
+ return -EINVAL;
+ }
+
+ if (func <= 5) {
+ hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
+ } else {
+ hw_lock_control_reg =
+ (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
+ }
+
+ /* Validating that the resource is not already taken */
+ lock_status = REG_RD(bp, hw_lock_control_reg);
+ if (lock_status & resource_bit) {
+ DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
+ lock_status, resource_bit);
+ return -EEXIST;
+ }
+
+ /* Try for 5 second every 5ms */
+ for (cnt = 0; cnt < 1000; cnt++) {
+ /* Try to acquire the lock */
+ REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
+ lock_status = REG_RD(bp, hw_lock_control_reg);
+ if (lock_status & resource_bit)
+ return 0;
+
+ msleep(5);
+ }
+ DP(NETIF_MSG_HW, "Timeout\n");
+ return -EAGAIN;
+}
+
+int bnx2x_release_leader_lock(struct bnx2x *bp)
+{
+ return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
+}
+
+int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
+{
+ u32 lock_status;
+ u32 resource_bit = (1 << resource);
+ int func = BP_FUNC(bp);
+ u32 hw_lock_control_reg;
+
+ DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
+
+ /* Validating that the resource is within range */
+ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+ DP(NETIF_MSG_HW,
+ "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+ resource, HW_LOCK_MAX_RESOURCE_VALUE);
+ return -EINVAL;
+ }
+
+ if (func <= 5) {
+ hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
+ } else {
+ hw_lock_control_reg =
+ (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
+ }
+
+ /* Validating that the resource is currently taken */
+ lock_status = REG_RD(bp, hw_lock_control_reg);
+ if (!(lock_status & resource_bit)) {
+ DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
+ lock_status, resource_bit);
+ return -EFAULT;
+ }
+
+ REG_WR(bp, hw_lock_control_reg, resource_bit);
+ return 0;
+}
+
+
+int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
+{
+ /* The GPIO should be swapped if swap register is set and active */
+ int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
+ REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
+ int gpio_shift = gpio_num +
+ (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
+ u32 gpio_mask = (1 << gpio_shift);
+ u32 gpio_reg;
+ int value;
+
+ if (gpio_num > MISC_REGISTERS_GPIO_3) {
+ BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
+ return -EINVAL;
+ }
+
+ /* read GPIO value */
+ gpio_reg = REG_RD(bp, MISC_REG_GPIO);
+
+ /* get the requested pin value */
+ if ((gpio_reg & gpio_mask) == gpio_mask)
+ value = 1;
+ else
+ value = 0;
+
+ DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
+
+ return value;
+}
+
+int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
+{
+ /* The GPIO should be swapped if swap register is set and active */
+ int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
+ REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
+ int gpio_shift = gpio_num +
+ (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
+ u32 gpio_mask = (1 << gpio_shift);
+ u32 gpio_reg;
+
+ if (gpio_num > MISC_REGISTERS_GPIO_3) {
+ BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
+ return -EINVAL;
+ }
+
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+ /* read GPIO and mask except the float bits */
+ gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
+
+ switch (mode) {
+ case MISC_REGISTERS_GPIO_OUTPUT_LOW:
+ DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
+ gpio_num, gpio_shift);
+ /* clear FLOAT and set CLR */
+ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
+ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
+ break;
+
+ case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
+ DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
+ gpio_num, gpio_shift);
+ /* clear FLOAT and set SET */
+ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
+ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
+ break;
+
+ case MISC_REGISTERS_GPIO_INPUT_HI_Z:
+ DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
+ gpio_num, gpio_shift);
+ /* set FLOAT */
+ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
+ break;
+
+ default:
+ break;
+ }
+
+ REG_WR(bp, MISC_REG_GPIO, gpio_reg);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+
+ return 0;
+}
+
+int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
+{
+ u32 gpio_reg = 0;
+ int rc = 0;
+
+ /* Any port swapping should be handled by caller. */
+
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+ /* read GPIO and mask except the float bits */
+ gpio_reg = REG_RD(bp, MISC_REG_GPIO);
+ gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
+ gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
+ gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
+
+ switch (mode) {
+ case MISC_REGISTERS_GPIO_OUTPUT_LOW:
+ DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
+ /* set CLR */
+ gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
+ break;
+
+ case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
+ DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
+ /* set SET */
+ gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
+ break;
+
+ case MISC_REGISTERS_GPIO_INPUT_HI_Z:
+ DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
+ /* set FLOAT */
+ gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
+ break;
+
+ default:
+ BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc == 0)
+ REG_WR(bp, MISC_REG_GPIO, gpio_reg);
+
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+
+ return rc;
+}
+
+int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
+{
+ /* The GPIO should be swapped if swap register is set and active */
+ int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
+ REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
+ int gpio_shift = gpio_num +
+ (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
+ u32 gpio_mask = (1 << gpio_shift);
+ u32 gpio_reg;
+
+ if (gpio_num > MISC_REGISTERS_GPIO_3) {
+ BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
+ return -EINVAL;
+ }
+
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+ /* read GPIO int */
+ gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
+
+ switch (mode) {
+ case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
+ DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
+ "output low\n", gpio_num, gpio_shift);
+ /* clear SET and set CLR */
+ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
+ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
+ break;
+
+ case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
+ DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
+ "output high\n", gpio_num, gpio_shift);
+ /* clear CLR and set SET */
+ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
+ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
+ break;
+
+ default:
+ break;
+ }
+
+ REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+
+ return 0;
+}
+
+static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
+{
+ u32 spio_mask = (1 << spio_num);
+ u32 spio_reg;
+
+ if ((spio_num < MISC_REGISTERS_SPIO_4) ||
+ (spio_num > MISC_REGISTERS_SPIO_7)) {
+ BNX2X_ERR("Invalid SPIO %d\n", spio_num);
+ return -EINVAL;
+ }
+
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
+ /* read SPIO and mask except the float bits */
+ spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
+
+ switch (mode) {
+ case MISC_REGISTERS_SPIO_OUTPUT_LOW:
+ DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
+ /* clear FLOAT and set CLR */
+ spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
+ spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
+ break;
+
+ case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
+ DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
+ /* clear FLOAT and set SET */
+ spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
+ spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
+ break;
+
+ case MISC_REGISTERS_SPIO_INPUT_HI_Z:
+ DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
+ /* set FLOAT */
+ spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
+ break;
+
+ default:
+ break;
+ }
+
+ REG_WR(bp, MISC_REG_SPIO, spio_reg);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
+
+ return 0;
+}
+
+void bnx2x_calc_fc_adv(struct bnx2x *bp)
+{
+ u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
+ switch (bp->link_vars.ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
+ case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
+ bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
+ ADVERTISED_Pause);
+ break;
+
+ case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
+ bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
+ ADVERTISED_Pause);
+ break;
+
+ case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
+ bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
+ break;
+
+ default:
+ bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
+ ADVERTISED_Pause);
+ break;
+ }
+}
+
+u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
+{
+ if (!BP_NOMCP(bp)) {
+ u8 rc;
+ int cfx_idx = bnx2x_get_link_cfg_idx(bp);
+ u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
+ /*
+ * Initialize link parameters structure variables
+ * It is recommended to turn off RX FC for jumbo frames
+ * for better performance
+ */
+ if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
+ bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
+ else
+ bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
+
+ bnx2x_acquire_phy_lock(bp);
+
+ if (load_mode == LOAD_DIAG) {
+ struct link_params *lp = &bp->link_params;
+ lp->loopback_mode = LOOPBACK_XGXS;
+ /* do PHY loopback at 10G speed, if possible */
+ if (lp->req_line_speed[cfx_idx] < SPEED_10000) {
+ if (lp->speed_cap_mask[cfx_idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+ lp->req_line_speed[cfx_idx] =
+ SPEED_10000;
+ else
+ lp->req_line_speed[cfx_idx] =
+ SPEED_1000;
+ }
+ }
+
+ rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
+
+ bnx2x_release_phy_lock(bp);
+
+ bnx2x_calc_fc_adv(bp);
+
+ if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
+ bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
+ bnx2x_link_report(bp);
+ } else
+ queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
+ bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
+ return rc;
+ }
+ BNX2X_ERR("Bootcode is missing - can not initialize link\n");
+ return -EINVAL;
+}
+
+void bnx2x_link_set(struct bnx2x *bp)
+{
+ if (!BP_NOMCP(bp)) {
+ bnx2x_acquire_phy_lock(bp);
+ bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
+ bnx2x_phy_init(&bp->link_params, &bp->link_vars);
+ bnx2x_release_phy_lock(bp);
+
+ bnx2x_calc_fc_adv(bp);
+ } else
+ BNX2X_ERR("Bootcode is missing - can not set link\n");
+}
+
+static void bnx2x__link_reset(struct bnx2x *bp)
+{
+ if (!BP_NOMCP(bp)) {
+ bnx2x_acquire_phy_lock(bp);
+ bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
+ bnx2x_release_phy_lock(bp);
+ } else
+ BNX2X_ERR("Bootcode is missing - can not reset link\n");
+}
+
+u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
+{
+ u8 rc = 0;
+
+ if (!BP_NOMCP(bp)) {
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
+ is_serdes);
+ bnx2x_release_phy_lock(bp);
+ } else
+ BNX2X_ERR("Bootcode is missing - can not test link\n");
+
+ return rc;
+}
+
+static void bnx2x_init_port_minmax(struct bnx2x *bp)
+{
+ u32 r_param = bp->link_vars.line_speed / 8;
+ u32 fair_periodic_timeout_usec;
+ u32 t_fair;
+
+ memset(&(bp->cmng.rs_vars), 0,
+ sizeof(struct rate_shaping_vars_per_port));
+ memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
+
+ /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
+ bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
+
+ /* this is the threshold below which no timer arming will occur
+ 1.25 coefficient is for the threshold to be a little bigger
+ than the real time, to compensate for timer in-accuracy */
+ bp->cmng.rs_vars.rs_threshold =
+ (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
+
+ /* resolution of fairness timer */
+ fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
+ /* for 10G it is 1000usec. for 1G it is 10000usec. */
+ t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
+
+ /* this is the threshold below which we won't arm the timer anymore */
+ bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
+
+ /* we multiply by 1e3/8 to get bytes/msec.
+ We don't want the credits to pass a credit
+ of the t_fair*FAIR_MEM (algorithm resolution) */
+ bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
+ /* since each tick is 4 usec */
+ bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
+}
+
+/* Calculates the sum of vn_min_rates.
+ It's needed for further normalizing of the min_rates.
+ Returns:
+ sum of vn_min_rates.
+ or
+ 0 - if all the min_rates are 0.
+ In the later case fainess algorithm should be deactivated.
+ If not all min_rates are zero then those that are zeroes will be set to 1.
+ */
+static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
+{
+ int all_zero = 1;
+ int vn;
+
+ bp->vn_weight_sum = 0;
- int func = 2*vn + BP_PORT(bp);
++ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
+ u32 vn_cfg = bp->mf_config[vn];
+ u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
+ FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
+
+ /* Skip hidden vns */
+ if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
+ continue;
+
+ /* If min rate is zero - set it to 1 */
+ if (!vn_min_rate)
+ vn_min_rate = DEF_MIN_RATE;
+ else
+ all_zero = 0;
+
+ bp->vn_weight_sum += vn_min_rate;
+ }
+
+ /* if ETS or all min rates are zeros - disable fairness */
+ if (BNX2X_IS_ETS_ENABLED(bp)) {
+ bp->cmng.flags.cmng_enables &=
+ ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
+ DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
+ } else if (all_zero) {
+ bp->cmng.flags.cmng_enables &=
+ ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
+ DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
+ " fairness will be disabled\n");
+ } else
+ bp->cmng.flags.cmng_enables |=
+ CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
+}
+
++/* returns func by VN for current port */
++static inline int func_by_vn(struct bnx2x *bp, int vn)
++{
++ return 2 * vn + BP_PORT(bp);
++}
++
+static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
+{
+ struct rate_shaping_vars_per_vn m_rs_vn;
+ struct fairness_vars_per_vn m_fair_vn;
+ u32 vn_cfg = bp->mf_config[vn];
- for (vn = VN_0; vn < E1HVN_MAX; vn++) {
++ int func = func_by_vn(bp, vn);
+ u16 vn_min_rate, vn_max_rate;
+ int i;
+
+ /* If function is hidden - set min and max to zeroes */
+ if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
+ vn_min_rate = 0;
+ vn_max_rate = 0;
+
+ } else {
+ u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
+
+ vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
+ FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
+ /* If fairness is enabled (not all min rates are zeroes) and
+ if current min rate is zero - set it to 1.
+ This is a requirement of the algorithm. */
+ if (bp->vn_weight_sum && (vn_min_rate == 0))
+ vn_min_rate = DEF_MIN_RATE;
+
+ if (IS_MF_SI(bp))
+ /* maxCfg in percents of linkspeed */
+ vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
+ else
+ /* maxCfg is absolute in 100Mb units */
+ vn_max_rate = maxCfg * 100;
+ }
+
+ DP(NETIF_MSG_IFUP,
+ "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
+ func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
+
+ memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
+ memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
+
+ /* global vn counter - maximal Mbps for this vn */
+ m_rs_vn.vn_counter.rate = vn_max_rate;
+
+ /* quota - number of bytes transmitted in this period */
+ m_rs_vn.vn_counter.quota =
+ (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
+
+ if (bp->vn_weight_sum) {
+ /* credit for each period of the fairness algorithm:
+ number of bytes in T_FAIR (the vn share the port rate).
+ vn_weight_sum should not be larger than 10000, thus
+ T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
+ than zero */
+ m_fair_vn.vn_credit_delta =
+ max_t(u32, (vn_min_rate * (T_FAIR_COEF /
+ (8 * bp->vn_weight_sum))),
+ (bp->cmng.fair_vars.fair_threshold +
+ MIN_ABOVE_THRESH));
+ DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
+ m_fair_vn.vn_credit_delta);
+ }
+
+ /* Store it to internal memory */
+ for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
+ REG_WR(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
+ ((u32 *)(&m_rs_vn))[i]);
+
+ for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
+ REG_WR(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
+ ((u32 *)(&m_fair_vn))[i]);
+}
+
+static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
+{
+ if (CHIP_REV_IS_SLOW(bp))
+ return CMNG_FNS_NONE;
+ if (IS_MF(bp))
+ return CMNG_FNS_MINMAX;
+
+ return CMNG_FNS_NONE;
+}
+
+void bnx2x_read_mf_cfg(struct bnx2x *bp)
+{
+ int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
+
+ if (BP_NOMCP(bp))
+ return; /* what should be the default bvalue in this case */
+
+ /* For 2 port configuration the absolute function number formula
+ * is:
+ * abs_func = 2 * vn + BP_PORT + BP_PATH
+ *
+ * and there are 4 functions per port
+ *
+ * For 4 port configuration it is
+ * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
+ *
+ * and there are 2 functions per port
+ */
- for (vn = VN_0; vn < E1HVN_MAX; vn++)
++ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
+ int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
+
+ if (func >= E1H_FUNC_MAX)
+ break;
+
+ bp->mf_config[vn] =
+ MF_CFG_RD(bp, func_mf_config[func].config);
+ }
+}
+
+static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
+{
+
+ if (cmng_type == CMNG_FNS_MINMAX) {
+ int vn;
+
+ /* clear cmng_enables */
+ bp->cmng.flags.cmng_enables = 0;
+
+ /* read mf conf from shmem */
+ if (read_cfg)
+ bnx2x_read_mf_cfg(bp);
+
+ /* Init rate shaping and fairness contexts */
+ bnx2x_init_port_minmax(bp);
+
+ /* vn_weight_sum and enable fairness if not 0 */
+ bnx2x_calc_vn_weight_sum(bp);
+
+ /* calculate and set min-max rate for each vn */
+ if (bp->port.pmf)
- int port = BP_PORT(bp);
++ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
+ bnx2x_init_vn_minmax(bp, vn);
+
+ /* always enable rate shaping and fairness */
+ bp->cmng.flags.cmng_enables |=
+ CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
+ if (!bp->vn_weight_sum)
+ DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
+ " fairness will be disabled\n");
+ return;
+ }
+
+ /* rate shaping and fairness are disabled */
+ DP(NETIF_MSG_IFUP,
+ "rate shaping and fairness are disabled\n");
+}
+
+static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
+{
- for (vn = VN_0; vn < E1HVN_MAX; vn++) {
- if (vn == BP_E1HVN(bp))
+ int func;
+ int vn;
+
+ /* Set the attention towards other drivers on the same port */
- func = ((vn << 1) | port);
++ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
++ if (vn == BP_VN(bp))
+ continue;
+
- val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
++ func = func_by_vn(bp, vn);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
+ (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
+ }
+}
+
+/* This function is called upon link interrupt */
+static void bnx2x_link_attn(struct bnx2x *bp)
+{
+ /* Make sure that we are synced with the current statistics */
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+
+ bnx2x_link_update(&bp->link_params, &bp->link_vars);
+
+ if (bp->link_vars.link_up) {
+
+ /* dropless flow control */
+ if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
+ int port = BP_PORT(bp);
+ u32 pause_enabled = 0;
+
+ if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ pause_enabled = 1;
+
+ REG_WR(bp, BAR_USTRORM_INTMEM +
+ USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
+ pause_enabled);
+ }
+
+ if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
+ struct host_port_stats *pstats;
+
+ pstats = bnx2x_sp(bp, port_stats);
+ /* reset old mac stats */
+ memset(&(pstats->mac_stx[0]), 0,
+ sizeof(struct mac_stx));
+ }
+ if (bp->state == BNX2X_STATE_OPEN)
+ bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
+ }
+
+ if (bp->link_vars.link_up && bp->link_vars.line_speed) {
+ int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
+
+ if (cmng_fns != CMNG_FNS_NONE) {
+ bnx2x_cmng_fns_init(bp, false, cmng_fns);
+ storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+ } else
+ /* rate shaping and fairness are disabled */
+ DP(NETIF_MSG_IFUP,
+ "single function mode without fairness\n");
+ }
+
+ __bnx2x_link_report(bp);
+
+ if (IS_MF(bp))
+ bnx2x_link_sync_notify(bp);
+}
+
+void bnx2x__link_status_update(struct bnx2x *bp)
+{
+ if (bp->state != BNX2X_STATE_OPEN)
+ return;
+
+ bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
+
+ if (bp->link_vars.link_up)
+ bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
+ else
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+
+ /* indicate link status */
+ bnx2x_link_report(bp);
+}
+
+static void bnx2x_pmf_update(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ u32 val;
+
+ bp->port.pmf = 1;
+ DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
+
+ /*
+ * We need the mb() to ensure the ordering between the writing to
+ * bp->port.pmf here and reading it from the bnx2x_periodic_task().
+ */
+ smp_mb();
+
+ /* queue a periodic task */
+ queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
+
+ bnx2x_dcbx_pmf_update(bp);
+
+ /* enable nig attention */
- pause->sge_th_hi = 250;
- pause->sge_th_lo = 150;
++ val = (0xff0f | (1 << (BP_VN(bp) + 4)));
+ if (bp->common.int_block == INT_BLOCK_HC) {
+ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
+ REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
+ } else if (!CHIP_IS_E1x(bp)) {
+ REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
+ REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
+ }
+
+ bnx2x_stats_handle(bp, STATS_EVENT_PMF);
+}
+
+/* end of Link */
+
+/* slow path */
+
+/*
+ * General service functions
+ */
+
+/* send the MCP a request, block until there is a reply */
+u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
+{
+ int mb_idx = BP_FW_MB_IDX(bp);
+ u32 seq;
+ u32 rc = 0;
+ u32 cnt = 1;
+ u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
+
+ mutex_lock(&bp->fw_mb_mutex);
+ seq = ++bp->fw_seq;
+ SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
+ SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
+
+ DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
+ (command | seq), param);
+
+ do {
+ /* let the FW do it's magic ... */
+ msleep(delay);
+
+ rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
+
+ /* Give the FW up to 5 second (500*10ms) */
+ } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
+
+ DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
+ cnt*delay, rc, seq);
+
+ /* is this a reply to our command? */
+ if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
+ rc &= FW_MSG_CODE_MASK;
+ else {
+ /* FW BUG! */
+ BNX2X_ERR("FW failed to respond!\n");
+ bnx2x_fw_dump(bp);
+ rc = 0;
+ }
+ mutex_unlock(&bp->fw_mb_mutex);
+
+ return rc;
+}
+
+static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
+{
+#ifdef BCM_CNIC
+ /* Statistics are not supported for CNIC Clients at the moment */
+ if (IS_FCOE_FP(fp))
+ return false;
+#endif
+ return true;
+}
+
+void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
+{
+ if (CHIP_IS_E1x(bp)) {
+ struct tstorm_eth_function_common_config tcfg = {0};
+
+ storm_memset_func_cfg(bp, &tcfg, p->func_id);
+ }
+
+ /* Enable the function in the FW */
+ storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
+ storm_memset_func_en(bp, p->func_id, 1);
+
+ /* spq */
+ if (p->func_flgs & FUNC_FLG_SPQ) {
+ storm_memset_spq_addr(bp, p->spq_map, p->func_id);
+ REG_WR(bp, XSEM_REG_FAST_MEMORY +
+ XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
+ }
+}
+
+/**
+ * bnx2x_get_tx_only_flags - Return common flags
+ *
+ * @bp device handle
+ * @fp queue handle
+ * @zero_stats TRUE if statistics zeroing is needed
+ *
+ * Return the flags that are common for the Tx-only and not normal connections.
+ */
+static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp,
+ bool zero_stats)
+{
+ unsigned long flags = 0;
+
+ /* PF driver will always initialize the Queue to an ACTIVE state */
+ __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
+
+ /* tx only connections collect statistics (on the same index as the
+ * parent connection). The statistics are zeroed when the parent
+ * connection is initialized.
+ */
+ if (stat_counter_valid(bp, fp)) {
+ __set_bit(BNX2X_Q_FLG_STATS, &flags);
+ if (zero_stats)
+ __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
+ }
+
+ return flags;
+}
+
+static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp,
+ bool leading)
+{
+ unsigned long flags = 0;
+
+ /* calculate other queue flags */
+ if (IS_MF_SD(bp))
+ __set_bit(BNX2X_Q_FLG_OV, &flags);
+
+ if (IS_FCOE_FP(fp))
+ __set_bit(BNX2X_Q_FLG_FCOE, &flags);
+
+ if (!fp->disable_tpa) {
+ __set_bit(BNX2X_Q_FLG_TPA, &flags);
+ __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
+ }
+
+ if (leading) {
+ __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
+ __set_bit(BNX2X_Q_FLG_MCAST, &flags);
+ }
+
+ /* Always set HW VLAN stripping */
+ __set_bit(BNX2X_Q_FLG_VLAN, &flags);
+
+
+ return flags | bnx2x_get_common_flags(bp, fp, true);
+}
+
+static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
+ u8 cos)
+{
+ gen_init->stat_id = bnx2x_stats_id(fp);
+ gen_init->spcl_id = fp->cl_id;
+
+ /* Always use mini-jumbo MTU for FCoE L2 ring */
+ if (IS_FCOE_FP(fp))
+ gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
+ else
+ gen_init->mtu = bp->dev->mtu;
+
+ gen_init->cos = cos;
+}
+
+static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
+ struct bnx2x_rxq_setup_params *rxq_init)
+{
+ u8 max_sge = 0;
+ u16 sge_sz = 0;
+ u16 tpa_agg_size = 0;
+
+ if (!fp->disable_tpa) {
- pause->bd_th_hi = 350;
- pause->bd_th_lo = 250;
- pause->rcq_th_hi = 350;
- pause->rcq_th_lo = 250;
++ pause->sge_th_lo = SGE_TH_LO(bp);
++ pause->sge_th_hi = SGE_TH_HI(bp);
++
++ /* validate SGE ring has enough to cross high threshold */
++ WARN_ON(bp->dropless_fc &&
++ pause->sge_th_hi + FW_PREFETCH_CNT >
++ MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
++
+ tpa_agg_size = min_t(u32,
+ (min_t(u32, 8, MAX_SKB_FRAGS) *
+ SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
+ max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
+ SGE_PAGE_SHIFT;
+ max_sge = ((max_sge + PAGES_PER_SGE - 1) &
+ (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
+ sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
+ 0xffff);
+ }
+
+ /* pause - not for e1 */
+ if (!CHIP_IS_E1(bp)) {
- rxq_init->max_tpa_queues =
- (CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
- ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
++ pause->bd_th_lo = BD_TH_LO(bp);
++ pause->bd_th_hi = BD_TH_HI(bp);
++
++ pause->rcq_th_lo = RCQ_TH_LO(bp);
++ pause->rcq_th_hi = RCQ_TH_HI(bp);
++ /*
++ * validate that rings have enough entries to cross
++ * high thresholds
++ */
++ WARN_ON(bp->dropless_fc &&
++ pause->bd_th_hi + FW_PREFETCH_CNT >
++ bp->rx_ring_size);
++ WARN_ON(bp->dropless_fc &&
++ pause->rcq_th_hi + FW_PREFETCH_CNT >
++ NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
+
+ pause->pri_map = 1;
+ }
+
+ /* rxq setup */
+ rxq_init->dscr_map = fp->rx_desc_mapping;
+ rxq_init->sge_map = fp->rx_sge_mapping;
+ rxq_init->rcq_map = fp->rx_comp_mapping;
+ rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
+
+ /* This should be a maximum number of data bytes that may be
+ * placed on the BD (not including paddings).
+ */
+ rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN -
+ IP_HEADER_ALIGNMENT_PADDING;
+
+ rxq_init->cl_qzone_id = fp->cl_qzone_id;
+ rxq_init->tpa_agg_sz = tpa_agg_size;
+ rxq_init->sge_buf_sz = sge_sz;
+ rxq_init->max_sges_pkt = max_sge;
+ rxq_init->rss_engine_id = BP_FUNC(bp);
+
+ /* Maximum number or simultaneous TPA aggregation for this Queue.
+ *
+ * For PF Clients it should be the maximum avaliable number.
+ * VF driver(s) may want to define it to a smaller value.
+ */
- bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
++ rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
+
+ rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
+ rxq_init->fw_sb_id = fp->fw_sb_id;
+
+ if (IS_FCOE_FP(fp))
+ rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
+ else
+ rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
+}
+
+static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
+ u8 cos)
+{
+ txq_init->dscr_map = fp->txdata[cos].tx_desc_mapping;
+ txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
+ txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
+ txq_init->fw_sb_id = fp->fw_sb_id;
+
+ /*
+ * set the tss leading client id for TX classfication ==
+ * leading RSS client id
+ */
+ txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
+
+ if (IS_FCOE_FP(fp)) {
+ txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
+ txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
+ }
+}
+
+static void bnx2x_pf_init(struct bnx2x *bp)
+{
+ struct bnx2x_func_init_params func_init = {0};
+ struct event_ring_data eq_data = { {0} };
+ u16 flags;
+
+ if (!CHIP_IS_E1x(bp)) {
+ /* reset IGU PF statistics: MSIX + ATTN */
+ /* PF */
+ REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
+ BNX2X_IGU_STAS_MSG_VF_CNT*4 +
+ (CHIP_MODE_IS_4_PORT(bp) ?
+ BP_FUNC(bp) : BP_VN(bp))*4, 0);
+ /* ATTN */
+ REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
+ BNX2X_IGU_STAS_MSG_VF_CNT*4 +
+ BNX2X_IGU_STAS_MSG_PF_CNT*4 +
+ (CHIP_MODE_IS_4_PORT(bp) ?
+ BP_FUNC(bp) : BP_VN(bp))*4, 0);
+ }
+
+ /* function setup flags */
+ flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
+
+ /* This flag is relevant for E1x only.
+ * E2 doesn't have a TPA configuration in a function level.
+ */
+ flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
+
+ func_init.func_flgs = flags;
+ func_init.pf_id = BP_FUNC(bp);
+ func_init.func_id = BP_FUNC(bp);
+ func_init.spq_map = bp->spq_mapping;
+ func_init.spq_prod = bp->spq_prod_idx;
+
+ bnx2x_func_init(bp, &func_init);
+
+ memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
+
+ /*
+ * Congestion management values depend on the link rate
+ * There is no active link so initial link rate is set to 10 Gbps.
+ * When the link comes up The congestion management values are
+ * re-calculated according to the actual link rate.
+ */
+ bp->link_vars.line_speed = SPEED_10000;
+ bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
+
+ /* Only the PMF sets the HW */
+ if (bp->port.pmf)
+ storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+
+ /* init Event Queue */
+ eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
+ eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
+ eq_data.producer = bp->eq_prod;
+ eq_data.index_id = HC_SP_INDEX_EQ_CONS;
+ eq_data.sb_id = DEF_SB_ID;
+ storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
+}
+
+
+static void bnx2x_e1h_disable(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+
+ bnx2x_tx_disable(bp);
+
+ REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
+}
+
+static void bnx2x_e1h_enable(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+
+ REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
+
+ /* Tx queue should be only reenabled */
+ netif_tx_wake_all_queues(bp->dev);
+
+ /*
+ * Should not call netif_carrier_on since it will be called if the link
+ * is up when checking for link state
+ */
+}
+
+/* called due to MCP event (on pmf):
+ * reread new bandwidth configuration
+ * configure FW
+ * notify others function about the change
+ */
+static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
+{
+ if (bp->link_vars.link_up) {
+ bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
+ bnx2x_link_sync_notify(bp);
+ }
+ storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+}
+
+static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
+{
+ bnx2x_config_mf_bw(bp);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
+}
+
+static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
+{
+ DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
+
+ if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
+
+ /*
+ * This is the only place besides the function initialization
+ * where the bp->flags can change so it is done without any
+ * locks
+ */
+ if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
+ DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
+ bp->flags |= MF_FUNC_DIS;
+
+ bnx2x_e1h_disable(bp);
+ } else {
+ DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
+ bp->flags &= ~MF_FUNC_DIS;
+
+ bnx2x_e1h_enable(bp);
+ }
+ dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
+ }
+ if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
+ bnx2x_config_mf_bw(bp);
+ dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
+ }
+
+ /* Report results to MCP */
+ if (dcc_event)
+ bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
+ else
+ bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
+}
+
+/* must be called under the spq lock */
+static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
+{
+ struct eth_spe *next_spe = bp->spq_prod_bd;
+
+ if (bp->spq_prod_bd == bp->spq_last_bd) {
+ bp->spq_prod_bd = bp->spq;
+ bp->spq_prod_idx = 0;
+ DP(NETIF_MSG_TIMER, "end of spq\n");
+ } else {
+ bp->spq_prod_bd++;
+ bp->spq_prod_idx++;
+ }
+ return next_spe;
+}
+
+/* must be called under the spq lock */
+static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
+{
+ int func = BP_FUNC(bp);
+
+ /*
+ * Make sure that BD data is updated before writing the producer:
+ * BD data is written to the memory, the producer is read from the
+ * memory, thus we need a full memory barrier to ensure the ordering.
+ */
+ mb();
+
+ REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
+ bp->spq_prod_idx);
+ mmiowb();
+}
+
+/**
+ * bnx2x_is_contextless_ramrod - check if the current command ends on EQ
+ *
+ * @cmd: command to check
+ * @cmd_type: command type
+ */
+static inline bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
+{
+ if ((cmd_type == NONE_CONNECTION_TYPE) ||
+ (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
+ (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
+ (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
+ (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
+ (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
+ (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
+ return true;
+ else
+ return false;
+
+}
+
+
+/**
+ * bnx2x_sp_post - place a single command on an SP ring
+ *
+ * @bp: driver handle
+ * @command: command to place (e.g. SETUP, FILTER_RULES, etc.)
+ * @cid: SW CID the command is related to
+ * @data_hi: command private data address (high 32 bits)
+ * @data_lo: command private data address (low 32 bits)
+ * @cmd_type: command type (e.g. NONE, ETH)
+ *
+ * SP data is handled as if it's always an address pair, thus data fields are
+ * not swapped to little endian in upper functions. Instead this function swaps
+ * data as if it's two u32 fields.
+ */
+int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
+ u32 data_hi, u32 data_lo, int cmd_type)
+{
+ struct eth_spe *spe;
+ u16 type;
+ bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return -EIO;
+#endif
+
+ spin_lock_bh(&bp->spq_lock);
+
+ if (common) {
+ if (!atomic_read(&bp->eq_spq_left)) {
+ BNX2X_ERR("BUG! EQ ring full!\n");
+ spin_unlock_bh(&bp->spq_lock);
+ bnx2x_panic();
+ return -EBUSY;
+ }
+ } else if (!atomic_read(&bp->cq_spq_left)) {
+ BNX2X_ERR("BUG! SPQ ring full!\n");
+ spin_unlock_bh(&bp->spq_lock);
+ bnx2x_panic();
+ return -EBUSY;
+ }
+
+ spe = bnx2x_sp_get_next(bp);
+
+ /* CID needs port number to be encoded int it */
+ spe->hdr.conn_and_cmd_data =
+ cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
+ HW_CID(bp, cid));
+
+ type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
+
+ type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
+ SPE_HDR_FUNCTION_ID);
+
+ spe->hdr.type = cpu_to_le16(type);
+
+ spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
+ spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
+
+ /*
+ * It's ok if the actual decrement is issued towards the memory
+ * somewhere between the spin_lock and spin_unlock. Thus no
+ * more explict memory barrier is needed.
+ */
+ if (common)
+ atomic_dec(&bp->eq_spq_left);
+ else
+ atomic_dec(&bp->cq_spq_left);
+
+
+ DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
+ "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) "
+ "type(0x%x) left (CQ, EQ) (%x,%x)\n",
+ bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
+ (u32)(U64_LO(bp->spq_mapping) +
+ (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
+ HW_CID(bp, cid), data_hi, data_lo, type,
+ atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
+
+ bnx2x_sp_prod_update(bp);
+ spin_unlock_bh(&bp->spq_lock);
+ return 0;
+}
+
+/* acquire split MCP access lock register */
+static int bnx2x_acquire_alr(struct bnx2x *bp)
+{
+ u32 j, val;
+ int rc = 0;
+
+ might_sleep();
+ for (j = 0; j < 1000; j++) {
+ val = (1UL << 31);
+ REG_WR(bp, GRCBASE_MCP + 0x9c, val);
+ val = REG_RD(bp, GRCBASE_MCP + 0x9c);
+ if (val & (1L << 31))
+ break;
+
+ msleep(5);
+ }
+ if (!(val & (1L << 31))) {
+ BNX2X_ERR("Cannot acquire MCP access lock register\n");
+ rc = -EBUSY;
+ }
+
+ return rc;
+}
+
+/* release split MCP access lock register */
+static void bnx2x_release_alr(struct bnx2x *bp)
+{
+ REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
+}
+
+#define BNX2X_DEF_SB_ATT_IDX 0x0001
+#define BNX2X_DEF_SB_IDX 0x0002
+
+static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
+{
+ struct host_sp_status_block *def_sb = bp->def_status_blk;
+ u16 rc = 0;
+
+ barrier(); /* status block is written to by the chip */
+ if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
+ bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
+ rc |= BNX2X_DEF_SB_ATT_IDX;
+ }
+
+ if (bp->def_idx != def_sb->sp_sb.running_index) {
+ bp->def_idx = def_sb->sp_sb.running_index;
+ rc |= BNX2X_DEF_SB_IDX;
+ }
+
+ /* Do not reorder: indecies reading should complete before handling */
+ barrier();
+ return rc;
+}
+
+/*
+ * slow path service functions
+ */
+
+static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
+{
+ int port = BP_PORT(bp);
+ u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+ MISC_REG_AEU_MASK_ATTN_FUNC_0;
+ u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
+ NIG_REG_MASK_INTERRUPT_PORT0;
+ u32 aeu_mask;
+ u32 nig_mask = 0;
+ u32 reg_addr;
+
+ if (bp->attn_state & asserted)
+ BNX2X_ERR("IGU ERROR\n");
+
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+ aeu_mask = REG_RD(bp, aeu_addr);
+
+ DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
+ aeu_mask, asserted);
+ aeu_mask &= ~(asserted & 0x3ff);
+ DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
+
+ REG_WR(bp, aeu_addr, aeu_mask);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+
+ DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
+ bp->attn_state |= asserted;
+ DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
+
+ if (asserted & ATTN_HARD_WIRED_MASK) {
+ if (asserted & ATTN_NIG_FOR_FUNC) {
+
+ bnx2x_acquire_phy_lock(bp);
+
+ /* save nig interrupt mask */
+ nig_mask = REG_RD(bp, nig_int_mask_addr);
+
+ /* If nig_mask is not set, no need to call the update
+ * function.
+ */
+ if (nig_mask) {
+ REG_WR(bp, nig_int_mask_addr, 0);
+
+ bnx2x_link_attn(bp);
+ }
+
+ /* handle unicore attn? */
+ }
+ if (asserted & ATTN_SW_TIMER_4_FUNC)
+ DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
+
+ if (asserted & GPIO_2_FUNC)
+ DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
+
+ if (asserted & GPIO_3_FUNC)
+ DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
+
+ if (asserted & GPIO_4_FUNC)
+ DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
+
+ if (port == 0) {
+ if (asserted & ATTN_GENERAL_ATTN_1) {
+ DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
+ }
+ if (asserted & ATTN_GENERAL_ATTN_2) {
+ DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
+ }
+ if (asserted & ATTN_GENERAL_ATTN_3) {
+ DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
+ }
+ } else {
+ if (asserted & ATTN_GENERAL_ATTN_4) {
+ DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
+ }
+ if (asserted & ATTN_GENERAL_ATTN_5) {
+ DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
+ }
+ if (asserted & ATTN_GENERAL_ATTN_6) {
+ DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
+ }
+ }
+
+ } /* if hardwired */
+
+ if (bp->common.int_block == INT_BLOCK_HC)
+ reg_addr = (HC_REG_COMMAND_REG + port*32 +
+ COMMAND_REG_ATTN_BITS_SET);
+ else
+ reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
+
+ DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
+ (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
+ REG_WR(bp, reg_addr, asserted);
+
+ /* now set back the mask */
+ if (asserted & ATTN_NIG_FOR_FUNC) {
+ REG_WR(bp, nig_int_mask_addr, nig_mask);
+ bnx2x_release_phy_lock(bp);
+ }
+}
+
+static inline void bnx2x_fan_failure(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ u32 ext_phy_config;
+ /* mark the failure */
+ ext_phy_config =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].external_phy_config);
+
+ ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
+ ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
+ SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
+ ext_phy_config);
+
+ /* log the failure */
+ netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
+ " the driver to shutdown the card to prevent permanent"
+ " damage. Please contact OEM Support for assistance\n");
+}
+
+static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
+{
+ int port = BP_PORT(bp);
+ int reg_offset;
+ u32 val;
+
+ reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
+ MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
+
+ if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
+
+ val = REG_RD(bp, reg_offset);
+ val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
+ REG_WR(bp, reg_offset, val);
+
+ BNX2X_ERR("SPIO5 hw attention\n");
+
+ /* Fan failure attention */
+ bnx2x_hw_reset_phy(&bp->link_params);
+ bnx2x_fan_failure(bp);
+ }
+
+ if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
+ bnx2x_acquire_phy_lock(bp);
+ bnx2x_handle_module_detect_int(&bp->link_params);
+ bnx2x_release_phy_lock(bp);
+ }
+
+ if (attn & HW_INTERRUT_ASSERT_SET_0) {
+
+ val = REG_RD(bp, reg_offset);
+ val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
+ REG_WR(bp, reg_offset, val);
+
+ BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
+ (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
+ bnx2x_panic();
+ }
+}
+
+static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
+{
+ u32 val;
+
+ if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
+
+ val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
+ BNX2X_ERR("DB hw attention 0x%x\n", val);
+ /* DORQ discard attention */
+ if (val & 0x2)
+ BNX2X_ERR("FATAL error from DORQ\n");
+ }
+
+ if (attn & HW_INTERRUT_ASSERT_SET_1) {
+
+ int port = BP_PORT(bp);
+ int reg_offset;
+
+ reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
+ MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
+
+ val = REG_RD(bp, reg_offset);
+ val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
+ REG_WR(bp, reg_offset, val);
+
+ BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
+ (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
+ bnx2x_panic();
+ }
+}
+
+static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
+{
+ u32 val;
+
+ if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
+
+ val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
+ BNX2X_ERR("CFC hw attention 0x%x\n", val);
+ /* CFC error attention */
+ if (val & 0x2)
+ BNX2X_ERR("FATAL error from CFC\n");
+ }
+
+ if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
+ val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
+ BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
+ /* RQ_USDMDP_FIFO_OVERFLOW */
+ if (val & 0x18000)
+ BNX2X_ERR("FATAL error from PXP\n");
+
+ if (!CHIP_IS_E1x(bp)) {
+ val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
+ BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
+ }
+ }
+
+ if (attn & HW_INTERRUT_ASSERT_SET_2) {
+
+ int port = BP_PORT(bp);
+ int reg_offset;
+
+ reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
+ MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
+
+ val = REG_RD(bp, reg_offset);
+ val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
+ REG_WR(bp, reg_offset, val);
+
+ BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
+ (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
+ bnx2x_panic();
+ }
+}
+
+static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
+{
+ u32 val;
+
+ if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
+
+ if (attn & BNX2X_PMF_LINK_ASSERT) {
+ int func = BP_FUNC(bp);
+
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
+ bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
+ func_mf_config[BP_ABS_FUNC(bp)].config);
+ val = SHMEM_RD(bp,
+ func_mb[BP_FW_MB_IDX(bp)].drv_status);
+ if (val & DRV_STATUS_DCC_EVENT_MASK)
+ bnx2x_dcc_event(bp,
+ (val & DRV_STATUS_DCC_EVENT_MASK));
+
+ if (val & DRV_STATUS_SET_MF_BW)
+ bnx2x_set_mf_bw(bp);
+
+ if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
+ bnx2x_pmf_update(bp);
+
+ if (bp->port.pmf &&
+ (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
+ bp->dcbx_enabled > 0)
+ /* start dcbx state machine */
+ bnx2x_dcbx_set_params(bp,
+ BNX2X_DCBX_STATE_NEG_RECEIVED);
+ if (bp->link_vars.periodic_flags &
+ PERIODIC_FLAGS_LINK_EVENT) {
+ /* sync with link */
+ bnx2x_acquire_phy_lock(bp);
+ bp->link_vars.periodic_flags &=
+ ~PERIODIC_FLAGS_LINK_EVENT;
+ bnx2x_release_phy_lock(bp);
+ if (IS_MF(bp))
+ bnx2x_link_sync_notify(bp);
+ bnx2x_link_report(bp);
+ }
+ /* Always call it here: bnx2x_link_report() will
+ * prevent the link indication duplication.
+ */
+ bnx2x__link_status_update(bp);
+ } else if (attn & BNX2X_MC_ASSERT_BITS) {
+
+ BNX2X_ERR("MC assert!\n");
+ bnx2x_mc_assert(bp);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
+ bnx2x_panic();
+
+ } else if (attn & BNX2X_MCP_ASSERT) {
+
+ BNX2X_ERR("MCP assert!\n");
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
+ bnx2x_fw_dump(bp);
+
+ } else
+ BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
+ }
+
+ if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
+ BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
+ if (attn & BNX2X_GRC_TIMEOUT) {
+ val = CHIP_IS_E1(bp) ? 0 :
+ REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
+ BNX2X_ERR("GRC time-out 0x%08x\n", val);
+ }
+ if (attn & BNX2X_GRC_RSV) {
+ val = CHIP_IS_E1(bp) ? 0 :
+ REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
+ BNX2X_ERR("GRC reserved 0x%08x\n", val);
+ }
+ REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
+ }
+}
+
+/*
+ * Bits map:
+ * 0-7 - Engine0 load counter.
+ * 8-15 - Engine1 load counter.
+ * 16 - Engine0 RESET_IN_PROGRESS bit.
+ * 17 - Engine1 RESET_IN_PROGRESS bit.
+ * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active function
+ * on the engine
+ * 19 - Engine1 ONE_IS_LOADED.
+ * 20 - Chip reset flow bit. When set none-leader must wait for both engines
+ * leader to complete (check for both RESET_IN_PROGRESS bits and not for
+ * just the one belonging to its engine).
+ *
+ */
+#define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1
+
+#define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff
+#define BNX2X_PATH0_LOAD_CNT_SHIFT 0
+#define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00
+#define BNX2X_PATH1_LOAD_CNT_SHIFT 8
+#define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000
+#define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000
+#define BNX2X_GLOBAL_RESET_BIT 0x00040000
+
+/*
+ * Set the GLOBAL_RESET bit.
+ *
+ * Should be run under rtnl lock
+ */
+void bnx2x_set_reset_global(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
+ barrier();
+ mmiowb();
+}
+
+/*
+ * Clear the GLOBAL_RESET bit.
+ *
+ * Should be run under rtnl lock
+ */
+static inline void bnx2x_clear_reset_global(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
+ barrier();
+ mmiowb();
+}
+
+/*
+ * Checks the GLOBAL_RESET bit.
+ *
+ * should be run under rtnl lock
+ */
+static inline bool bnx2x_reset_is_global(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+ DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
+ return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
+}
+
+/*
+ * Clear RESET_IN_PROGRESS bit for the current engine.
+ *
+ * Should be run under rtnl lock
+ */
+static inline void bnx2x_set_reset_done(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 bit = BP_PATH(bp) ?
+ BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
+
+ /* Clear the bit */
+ val &= ~bit;
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
+ barrier();
+ mmiowb();
+}
+
+/*
+ * Set RESET_IN_PROGRESS for the current engine.
+ *
+ * should be run under rtnl lock
+ */
+void bnx2x_set_reset_in_progress(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 bit = BP_PATH(bp) ?
+ BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
+
+ /* Set the bit */
+ val |= bit;
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
+ barrier();
+ mmiowb();
+}
+
+/*
+ * Checks the RESET_IN_PROGRESS bit for the given engine.
+ * should be run under rtnl lock
+ */
+bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
+{
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 bit = engine ?
+ BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
+
+ /* return false if bit is set */
+ return (val & bit) ? false : true;
+}
+
+/*
+ * Increment the load counter for the current engine.
+ *
+ * should be run under rtnl lock
+ */
+void bnx2x_inc_load_cnt(struct bnx2x *bp)
+{
+ u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
+ BNX2X_PATH0_LOAD_CNT_MASK;
+ u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
+ BNX2X_PATH0_LOAD_CNT_SHIFT;
+
+ DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
+
+ /* get the current counter value */
+ val1 = (val & mask) >> shift;
+
+ /* increment... */
+ val1++;
+
+ /* clear the old value */
+ val &= ~mask;
+
+ /* set the new one */
+ val |= ((val1 << shift) & mask);
+
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
+ barrier();
+ mmiowb();
+}
+
+/**
+ * bnx2x_dec_load_cnt - decrement the load counter
+ *
+ * @bp: driver handle
+ *
+ * Should be run under rtnl lock.
+ * Decrements the load counter for the current engine. Returns
+ * the new counter value.
+ */
+u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
+{
+ u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
+ BNX2X_PATH0_LOAD_CNT_MASK;
+ u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
+ BNX2X_PATH0_LOAD_CNT_SHIFT;
+
+ DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
+
+ /* get the current counter value */
+ val1 = (val & mask) >> shift;
+
+ /* decrement... */
+ val1--;
+
+ /* clear the old value */
+ val &= ~mask;
+
+ /* set the new one */
+ val |= ((val1 << shift) & mask);
+
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
+ barrier();
+ mmiowb();
+
+ return val1;
+}
+
+/*
+ * Read the load counter for the current engine.
+ *
+ * should be run under rtnl lock
+ */
+static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp, int engine)
+{
+ u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
+ BNX2X_PATH0_LOAD_CNT_MASK);
+ u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
+ BNX2X_PATH0_LOAD_CNT_SHIFT);
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+ DP(NETIF_MSG_HW, "GLOB_REG=0x%08x\n", val);
+
+ val = (val & mask) >> shift;
+
+ DP(NETIF_MSG_HW, "load_cnt for engine %d = %d\n", engine, val);
+
+ return val;
+}
+
+/*
+ * Reset the load counter for the current engine.
+ *
+ * should be run under rtnl lock
+ */
+static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
+ BNX2X_PATH0_LOAD_CNT_MASK);
+
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask));
+}
+
+static inline void _print_next_block(int idx, const char *blk)
+{
+ pr_cont("%s%s", idx ? ", " : "", blk);
+}
+
+static inline int bnx2x_check_blocks_with_parity0(u32 sig, int par_num,
+ bool print)
+{
+ int i = 0;
+ u32 cur_bit = 0;
+ for (i = 0; sig; i++) {
+ cur_bit = ((u32)0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "BRB");
+ break;
+ case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "PARSER");
+ break;
+ case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "TSDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++,
+ "SEARCHER");
+ break;
+ case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "TCM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "TSEMI");
+ break;
+ case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "XPB");
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static inline int bnx2x_check_blocks_with_parity1(u32 sig, int par_num,
+ bool *global, bool print)
+{
+ int i = 0;
+ u32 cur_bit = 0;
+ for (i = 0; sig; i++) {
+ cur_bit = ((u32)0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "PBF");
+ break;
+ case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "QM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "TM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "XSDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "XCM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "XSEMI");
+ break;
+ case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++,
+ "DOORBELLQ");
+ break;
+ case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "NIG");
+ break;
+ case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++,
+ "VAUX PCI CORE");
+ *global = true;
+ break;
+ case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "DEBUG");
+ break;
+ case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "USDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "UCM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "USEMI");
+ break;
+ case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "UPB");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "CSDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "CCM");
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static inline int bnx2x_check_blocks_with_parity2(u32 sig, int par_num,
+ bool print)
+{
+ int i = 0;
+ u32 cur_bit = 0;
+ for (i = 0; sig; i++) {
+ cur_bit = ((u32)0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "CSEMI");
+ break;
+ case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "PXP");
+ break;
+ case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++,
+ "PXPPCICLOCKCLIENT");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "CFC");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "CDU");
+ break;
+ case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "DMAE");
+ break;
+ case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "IGU");
+ break;
+ case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "MISC");
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static inline int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
+ bool *global, bool print)
+{
+ int i = 0;
+ u32 cur_bit = 0;
+ for (i = 0; sig; i++) {
+ cur_bit = ((u32)0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
+ if (print)
+ _print_next_block(par_num++, "MCP ROM");
+ *global = true;
+ break;
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
+ if (print)
+ _print_next_block(par_num++,
+ "MCP UMP RX");
+ *global = true;
+ break;
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
+ if (print)
+ _print_next_block(par_num++,
+ "MCP UMP TX");
+ *global = true;
+ break;
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
+ if (print)
+ _print_next_block(par_num++,
+ "MCP SCPAD");
+ *global = true;
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static inline int bnx2x_check_blocks_with_parity4(u32 sig, int par_num,
+ bool print)
+{
+ int i = 0;
+ u32 cur_bit = 0;
+ for (i = 0; sig; i++) {
+ cur_bit = ((u32)0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "PGLUE_B");
+ break;
+ case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "ATC");
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static inline bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
+ u32 *sig)
+{
+ if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
+ (sig[1] & HW_PRTY_ASSERT_SET_1) ||
+ (sig[2] & HW_PRTY_ASSERT_SET_2) ||
+ (sig[3] & HW_PRTY_ASSERT_SET_3) ||
+ (sig[4] & HW_PRTY_ASSERT_SET_4)) {
+ int par_num = 0;
+ DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
+ "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x "
+ "[4]:0x%08x\n",
+ sig[0] & HW_PRTY_ASSERT_SET_0,
+ sig[1] & HW_PRTY_ASSERT_SET_1,
+ sig[2] & HW_PRTY_ASSERT_SET_2,
+ sig[3] & HW_PRTY_ASSERT_SET_3,
+ sig[4] & HW_PRTY_ASSERT_SET_4);
+ if (print)
+ netdev_err(bp->dev,
+ "Parity errors detected in blocks: ");
+ par_num = bnx2x_check_blocks_with_parity0(
+ sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print);
+ par_num = bnx2x_check_blocks_with_parity1(
+ sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print);
+ par_num = bnx2x_check_blocks_with_parity2(
+ sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print);
+ par_num = bnx2x_check_blocks_with_parity3(
+ sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print);
+ par_num = bnx2x_check_blocks_with_parity4(
+ sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print);
+
+ if (print)
+ pr_cont("\n");
+
+ return true;
+ } else
+ return false;
+}
+
+/**
+ * bnx2x_chk_parity_attn - checks for parity attentions.
+ *
+ * @bp: driver handle
+ * @global: true if there was a global attention
+ * @print: show parity attention in syslog
+ */
+bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
+{
+ struct attn_route attn = { {0} };
+ int port = BP_PORT(bp);
+
+ attn.sig[0] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
+ port*4);
+ attn.sig[1] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
+ port*4);
+ attn.sig[2] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
+ port*4);
+ attn.sig[3] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
+ port*4);
+
+ if (!CHIP_IS_E1x(bp))
+ attn.sig[4] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
+ port*4);
+
+ return bnx2x_parity_attn(bp, global, print, attn.sig);
+}
+
+
+static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
+{
+ u32 val;
+ if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
+
+ val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
+ BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "ADDRESS_ERROR\n");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "INCORRECT_RCV_BEHAVIOR\n");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "WAS_ERROR_ATTN\n");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "VF_LENGTH_VIOLATION_ATTN\n");
+ if (val &
+ PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "VF_GRC_SPACE_VIOLATION_ATTN\n");
+ if (val &
+ PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "VF_MSIX_BAR_VIOLATION_ATTN\n");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "TCPL_ERROR_ATTN\n");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "TCPL_IN_TWO_RCBS_ATTN\n");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "CSSNOOP_FIFO_OVERFLOW\n");
+ }
+ if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
+ val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
+ BNX2X_ERR("ATC hw attention 0x%x\n", val);
+ if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
+ if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
+ BNX2X_ERR("ATC_ATC_INT_STS_REG"
+ "_ATC_TCPL_TO_NOT_PEND\n");
+ if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_"
+ "ATC_GPA_MULTIPLE_HITS\n");
+ if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_"
+ "ATC_RCPL_TO_EMPTY_CNT\n");
+ if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
+ if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_"
+ "ATC_IREQ_LESS_THAN_STU\n");
+ }
+
+ if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
+ AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
+ BNX2X_ERR("FATAL parity attention set4 0x%x\n",
+ (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
+ AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
+ }
+
+}
+
+static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
+{
+ struct attn_route attn, *group_mask;
+ int port = BP_PORT(bp);
+ int index;
+ u32 reg_addr;
+ u32 val;
+ u32 aeu_mask;
+ bool global = false;
+
+ /* need to take HW lock because MCP or other port might also
+ try to handle this event */
+ bnx2x_acquire_alr(bp);
+
+ if (bnx2x_chk_parity_attn(bp, &global, true)) {
+#ifndef BNX2X_STOP_ON_ERROR
+ bp->recovery_state = BNX2X_RECOVERY_INIT;
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ /* Disable HW interrupts */
+ bnx2x_int_disable(bp);
+ /* In case of parity errors don't handle attentions so that
+ * other function would "see" parity errors.
+ */
+#else
+ bnx2x_panic();
+#endif
+ bnx2x_release_alr(bp);
+ return;
+ }
+
+ attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
+ attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
+ attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
+ attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
+ if (!CHIP_IS_E1x(bp))
+ attn.sig[4] =
+ REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
+ else
+ attn.sig[4] = 0;
+
+ DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
+ attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
+
+ for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
+ if (deasserted & (1 << index)) {
+ group_mask = &bp->attn_group[index];
+
+ DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
+ "%08x %08x %08x\n",
+ index,
+ group_mask->sig[0], group_mask->sig[1],
+ group_mask->sig[2], group_mask->sig[3],
+ group_mask->sig[4]);
+
+ bnx2x_attn_int_deasserted4(bp,
+ attn.sig[4] & group_mask->sig[4]);
+ bnx2x_attn_int_deasserted3(bp,
+ attn.sig[3] & group_mask->sig[3]);
+ bnx2x_attn_int_deasserted1(bp,
+ attn.sig[1] & group_mask->sig[1]);
+ bnx2x_attn_int_deasserted2(bp,
+ attn.sig[2] & group_mask->sig[2]);
+ bnx2x_attn_int_deasserted0(bp,
+ attn.sig[0] & group_mask->sig[0]);
+ }
+ }
+
+ bnx2x_release_alr(bp);
+
+ if (bp->common.int_block == INT_BLOCK_HC)
+ reg_addr = (HC_REG_COMMAND_REG + port*32 +
+ COMMAND_REG_ATTN_BITS_CLR);
+ else
+ reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
+
+ val = ~deasserted;
+ DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
+ (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
+ REG_WR(bp, reg_addr, val);
+
+ if (~bp->attn_state & deasserted)
+ BNX2X_ERR("IGU ERROR\n");
+
+ reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+ MISC_REG_AEU_MASK_ATTN_FUNC_0;
+
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+ aeu_mask = REG_RD(bp, reg_addr);
+
+ DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
+ aeu_mask, deasserted);
+ aeu_mask |= (deasserted & 0x3ff);
+ DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
+
+ REG_WR(bp, reg_addr, aeu_mask);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+
+ DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
+ bp->attn_state &= ~deasserted;
+ DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
+}
+
+static void bnx2x_attn_int(struct bnx2x *bp)
+{
+ /* read local copy of bits */
+ u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
+ attn_bits);
+ u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
+ attn_bits_ack);
+ u32 attn_state = bp->attn_state;
+
+ /* look for changed bits */
+ u32 asserted = attn_bits & ~attn_ack & ~attn_state;
+ u32 deasserted = ~attn_bits & attn_ack & attn_state;
+
+ DP(NETIF_MSG_HW,
+ "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
+ attn_bits, attn_ack, asserted, deasserted);
+
+ if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
+ BNX2X_ERR("BAD attention state\n");
+
+ /* handle bits that were raised */
+ if (asserted)
+ bnx2x_attn_int_asserted(bp, asserted);
+
+ if (deasserted)
+ bnx2x_attn_int_deasserted(bp, deasserted);
+}
+
+void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
+ u16 index, u8 op, u8 update)
+{
+ u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
+
+ bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
+ igu_addr);
+}
+
+static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
+{
+ /* No memory barriers */
+ storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
+ mmiowb(); /* keep prod updates ordered */
+}
+
+#ifdef BCM_CNIC
+static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
+ union event_ring_elem *elem)
+{
+ u8 err = elem->message.error;
+
+ if (!bp->cnic_eth_dev.starting_cid ||
+ (cid < bp->cnic_eth_dev.starting_cid &&
+ cid != bp->cnic_eth_dev.iscsi_l2_cid))
+ return 1;
+
+ DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
+
+ if (unlikely(err)) {
+
+ BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
+ cid);
+ bnx2x_panic_dump(bp);
+ }
+ bnx2x_cnic_cfc_comp(bp, cid, err);
+ return 0;
+}
+#endif
+
+static inline void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
+{
+ struct bnx2x_mcast_ramrod_params rparam;
+ int rc;
+
+ memset(&rparam, 0, sizeof(rparam));
+
+ rparam.mcast_obj = &bp->mcast_obj;
+
+ netif_addr_lock_bh(bp->dev);
+
+ /* Clear pending state for the last command */
+ bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
+
+ /* If there are pending mcast commands - send them */
+ if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
+ if (rc < 0)
+ BNX2X_ERR("Failed to send pending mcast commands: %d\n",
+ rc);
+ }
+
+ netif_addr_unlock_bh(bp->dev);
+}
+
+static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp,
+ union event_ring_elem *elem)
+{
+ unsigned long ramrod_flags = 0;
+ int rc = 0;
+ u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK;
+ struct bnx2x_vlan_mac_obj *vlan_mac_obj;
+
+ /* Always push next commands out, don't wait here */
+ __set_bit(RAMROD_CONT, &ramrod_flags);
+
+ switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
+ case BNX2X_FILTER_MAC_PENDING:
+#ifdef BCM_CNIC
+ if (cid == BNX2X_ISCSI_ETH_CID)
+ vlan_mac_obj = &bp->iscsi_l2_mac_obj;
+ else
+#endif
+ vlan_mac_obj = &bp->fp[cid].mac_obj;
+
+ break;
+ vlan_mac_obj = &bp->fp[cid].mac_obj;
+
+ case BNX2X_FILTER_MCAST_PENDING:
+ /* This is only relevant for 57710 where multicast MACs are
+ * configured as unicast MACs using the same ramrod.
+ */
+ bnx2x_handle_mcast_eqe(bp);
+ return;
+ default:
+ BNX2X_ERR("Unsupported classification command: %d\n",
+ elem->message.data.eth_event.echo);
+ return;
+ }
+
+ rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
+
+ if (rc < 0)
+ BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
+ else if (rc > 0)
+ DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
+
+}
+
+#ifdef BCM_CNIC
+static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
+#endif
+
+static inline void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
+{
+ netif_addr_lock_bh(bp->dev);
+
+ clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
+
+ /* Send rx_mode command again if was requested */
+ if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
+ bnx2x_set_storm_rx_mode(bp);
+#ifdef BCM_CNIC
+ else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
+ &bp->sp_state))
+ bnx2x_set_iscsi_eth_rx_mode(bp, true);
+ else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
+ &bp->sp_state))
+ bnx2x_set_iscsi_eth_rx_mode(bp, false);
+#endif
+
+ netif_addr_unlock_bh(bp->dev);
+}
+
+static inline struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
+ struct bnx2x *bp, u32 cid)
+{
+ DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
+#ifdef BCM_CNIC
+ if (cid == BNX2X_FCOE_ETH_CID)
+ return &bnx2x_fcoe(bp, q_obj);
+ else
+#endif
+ return &bnx2x_fp(bp, CID_TO_FP(cid), q_obj);
+}
+
+static void bnx2x_eq_int(struct bnx2x *bp)
+{
+ u16 hw_cons, sw_cons, sw_prod;
+ union event_ring_elem *elem;
+ u32 cid;
+ u8 opcode;
+ int spqe_cnt = 0;
+ struct bnx2x_queue_sp_obj *q_obj;
+ struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
+ struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
+
+ hw_cons = le16_to_cpu(*bp->eq_cons_sb);
+
+ /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
+ * when we get the the next-page we nned to adjust so the loop
+ * condition below will be met. The next element is the size of a
+ * regular element and hence incrementing by 1
+ */
+ if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
+ hw_cons++;
+
+ /* This function may never run in parallel with itself for a
+ * specific bp, thus there is no need in "paired" read memory
+ * barrier here.
+ */
+ sw_cons = bp->eq_cons;
+ sw_prod = bp->eq_prod;
+
+ DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n",
+ hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
+
+ for (; sw_cons != hw_cons;
+ sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
+
+
+ elem = &bp->eq_ring[EQ_DESC(sw_cons)];
+
+ cid = SW_CID(elem->message.data.cfc_del_event.cid);
+ opcode = elem->message.opcode;
+
+
+ /* handle eq element */
+ switch (opcode) {
+ case EVENT_RING_OPCODE_STAT_QUERY:
+ DP(NETIF_MSG_TIMER, "got statistics comp event %d\n",
+ bp->stats_comp++);
+ /* nothing to do with stats comp */
+ goto next_spqe;
+
+ case EVENT_RING_OPCODE_CFC_DEL:
+ /* handle according to cid range */
+ /*
+ * we may want to verify here that the bp state is
+ * HALTING
+ */
+ DP(BNX2X_MSG_SP,
+ "got delete ramrod for MULTI[%d]\n", cid);
+#ifdef BCM_CNIC
+ if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
+ goto next_spqe;
+#endif
+ q_obj = bnx2x_cid_to_q_obj(bp, cid);
+
+ if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
+ break;
+
+
+
+ goto next_spqe;
+
+ case EVENT_RING_OPCODE_STOP_TRAFFIC:
+ DP(BNX2X_MSG_SP, "got STOP TRAFFIC\n");
+ if (f_obj->complete_cmd(bp, f_obj,
+ BNX2X_F_CMD_TX_STOP))
+ break;
+ bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
+ goto next_spqe;
+
+ case EVENT_RING_OPCODE_START_TRAFFIC:
+ DP(BNX2X_MSG_SP, "got START TRAFFIC\n");
+ if (f_obj->complete_cmd(bp, f_obj,
+ BNX2X_F_CMD_TX_START))
+ break;
+ bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
+ goto next_spqe;
+ case EVENT_RING_OPCODE_FUNCTION_START:
+ DP(BNX2X_MSG_SP, "got FUNC_START ramrod\n");
+ if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
+ break;
+
+ goto next_spqe;
+
+ case EVENT_RING_OPCODE_FUNCTION_STOP:
+ DP(BNX2X_MSG_SP, "got FUNC_STOP ramrod\n");
+ if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
+ break;
+
+ goto next_spqe;
+ }
+
+ switch (opcode | bp->state) {
+ case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
+ BNX2X_STATE_OPEN):
+ case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
+ BNX2X_STATE_OPENING_WAIT4_PORT):
+ cid = elem->message.data.eth_event.echo &
+ BNX2X_SWCID_MASK;
+ DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
+ cid);
+ rss_raw->clear_pending(rss_raw);
+ break;
+
+ case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
+ case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
+ case (EVENT_RING_OPCODE_SET_MAC |
+ BNX2X_STATE_CLOSING_WAIT4_HALT):
+ case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
+ BNX2X_STATE_OPEN):
+ case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
+ BNX2X_STATE_DIAG):
+ case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
+ BNX2X_STATE_CLOSING_WAIT4_HALT):
+ DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n");
+ bnx2x_handle_classification_eqe(bp, elem);
+ break;
+
+ case (EVENT_RING_OPCODE_MULTICAST_RULES |
+ BNX2X_STATE_OPEN):
+ case (EVENT_RING_OPCODE_MULTICAST_RULES |
+ BNX2X_STATE_DIAG):
+ case (EVENT_RING_OPCODE_MULTICAST_RULES |
+ BNX2X_STATE_CLOSING_WAIT4_HALT):
+ DP(BNX2X_MSG_SP, "got mcast ramrod\n");
+ bnx2x_handle_mcast_eqe(bp);
+ break;
+
+ case (EVENT_RING_OPCODE_FILTERS_RULES |
+ BNX2X_STATE_OPEN):
+ case (EVENT_RING_OPCODE_FILTERS_RULES |
+ BNX2X_STATE_DIAG):
+ case (EVENT_RING_OPCODE_FILTERS_RULES |
+ BNX2X_STATE_CLOSING_WAIT4_HALT):
+ DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
+ bnx2x_handle_rx_mode_eqe(bp);
+ break;
+ default:
+ /* unknown event log error and continue */
+ BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
+ elem->message.opcode, bp->state);
+ }
+next_spqe:
+ spqe_cnt++;
+ } /* for */
+
+ smp_mb__before_atomic_inc();
+ atomic_add(spqe_cnt, &bp->eq_spq_left);
+
+ bp->eq_cons = sw_cons;
+ bp->eq_prod = sw_prod;
+ /* Make sure that above mem writes were issued towards the memory */
+ smp_wmb();
+
+ /* update producer */
+ bnx2x_update_eq_prod(bp, bp->eq_prod);
+}
+
+static void bnx2x_sp_task(struct work_struct *work)
+{
+ struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
+ u16 status;
+
+ status = bnx2x_update_dsb_idx(bp);
+/* if (status == 0) */
+/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
+
+ DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
+
+ /* HW attentions */
+ if (status & BNX2X_DEF_SB_ATT_IDX) {
+ bnx2x_attn_int(bp);
+ status &= ~BNX2X_DEF_SB_ATT_IDX;
+ }
+
+ /* SP events: STAT_QUERY and others */
+ if (status & BNX2X_DEF_SB_IDX) {
+#ifdef BCM_CNIC
+ struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
+
+ if ((!NO_FCOE(bp)) &&
+ (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+ /*
+ * Prevent local bottom-halves from running as
+ * we are going to change the local NAPI list.
+ */
+ local_bh_disable();
+ napi_schedule(&bnx2x_fcoe(bp, napi));
+ local_bh_enable();
+ }
+#endif
+ /* Handle EQ completions */
+ bnx2x_eq_int(bp);
+
+ bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
+ le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
+
+ status &= ~BNX2X_DEF_SB_IDX;
+ }
+
+ if (unlikely(status))
+ DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
+ status);
+
+ bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
+ le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
+}
+
+irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
+{
+ struct net_device *dev = dev_instance;
+ struct bnx2x *bp = netdev_priv(dev);
+
+ bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
+ IGU_INT_DISABLE, 0);
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return IRQ_HANDLED;
+#endif
+
+#ifdef BCM_CNIC
+ {
+ struct cnic_ops *c_ops;
+
+ rcu_read_lock();
+ c_ops = rcu_dereference(bp->cnic_ops);
+ if (c_ops)
+ c_ops->cnic_handler(bp->cnic_data, NULL);
+ rcu_read_unlock();
+ }
+#endif
+ queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+
+ return IRQ_HANDLED;
+}
+
+/* end of slow path */
+
+
+void bnx2x_drv_pulse(struct bnx2x *bp)
+{
+ SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
+ bp->fw_drv_pulse_wr_seq);
+}
+
+
+static void bnx2x_timer(unsigned long data)
+{
+ u8 cos;
+ struct bnx2x *bp = (struct bnx2x *) data;
+
+ if (!netif_running(bp->dev))
+ return;
+
+ if (poll) {
+ struct bnx2x_fastpath *fp = &bp->fp[0];
+
+ for_each_cos_in_tx_queue(fp, cos)
+ bnx2x_tx_int(bp, &fp->txdata[cos]);
+ bnx2x_rx_int(fp, 1000);
+ }
+
+ if (!BP_NOMCP(bp)) {
+ int mb_idx = BP_FW_MB_IDX(bp);
+ u32 drv_pulse;
+ u32 mcp_pulse;
+
+ ++bp->fw_drv_pulse_wr_seq;
+ bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
+ /* TBD - add SYSTEM_TIME */
+ drv_pulse = bp->fw_drv_pulse_wr_seq;
+ bnx2x_drv_pulse(bp);
+
+ mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
+ MCP_PULSE_SEQ_MASK);
+ /* The delta between driver pulse and mcp response
+ * should be 1 (before mcp response) or 0 (after mcp response)
+ */
+ if ((drv_pulse != mcp_pulse) &&
+ (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
+ /* someone lost a heartbeat... */
+ BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
+ drv_pulse, mcp_pulse);
+ }
+ }
+
+ if (bp->state == BNX2X_STATE_OPEN)
+ bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
+
+ mod_timer(&bp->timer, jiffies + bp->current_interval);
+}
+
+/* end of Statistics */
+
+/* nic init */
+
+/*
+ * nic init service functions
+ */
+
+static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
+{
+ u32 i;
+ if (!(len%4) && !(addr%4))
+ for (i = 0; i < len; i += 4)
+ REG_WR(bp, addr + i, fill);
+ else
+ for (i = 0; i < len; i++)
+ REG_WR8(bp, addr + i, fill);
+
+}
+
+/* helper: writes FP SP data to FW - data_size in dwords */
+static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
+ int fw_sb_id,
+ u32 *sb_data_p,
+ u32 data_size)
+{
+ int index;
+ for (index = 0; index < data_size; index++)
+ REG_WR(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
+ sizeof(u32)*index,
+ *(sb_data_p + index));
+}
+
+static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
+{
+ u32 *sb_data_p;
+ u32 data_size = 0;
+ struct hc_status_block_data_e2 sb_data_e2;
+ struct hc_status_block_data_e1x sb_data_e1x;
+
+ /* disable the function first */
+ if (!CHIP_IS_E1x(bp)) {
+ memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
+ sb_data_e2.common.state = SB_DISABLED;
+ sb_data_e2.common.p_func.vf_valid = false;
+ sb_data_p = (u32 *)&sb_data_e2;
+ data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
+ } else {
+ memset(&sb_data_e1x, 0,
+ sizeof(struct hc_status_block_data_e1x));
+ sb_data_e1x.common.state = SB_DISABLED;
+ sb_data_e1x.common.p_func.vf_valid = false;
+ sb_data_p = (u32 *)&sb_data_e1x;
+ data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
+ }
+ bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
+
+ bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
+ CSTORM_STATUS_BLOCK_SIZE);
+ bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
+ CSTORM_SYNC_BLOCK_SIZE);
+}
+
+/* helper: writes SP SB data to FW */
+static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
+ struct hc_sp_status_block_data *sp_sb_data)
+{
+ int func = BP_FUNC(bp);
+ int i;
+ for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
+ REG_WR(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
+ i*sizeof(u32),
+ *((u32 *)sp_sb_data + i));
+}
+
+static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
+{
+ int func = BP_FUNC(bp);
+ struct hc_sp_status_block_data sp_sb_data;
+ memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
+
+ sp_sb_data.state = SB_DISABLED;
+ sp_sb_data.p_func.vf_valid = false;
+
+ bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
+
+ bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
+ CSTORM_SP_STATUS_BLOCK_SIZE);
+ bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
+ CSTORM_SP_SYNC_BLOCK_SIZE);
+
+}
+
+
+static inline
+void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
+ int igu_sb_id, int igu_seg_id)
+{
+ hc_sm->igu_sb_id = igu_sb_id;
+ hc_sm->igu_seg_id = igu_seg_id;
+ hc_sm->timer_value = 0xFF;
+ hc_sm->time_to_expire = 0xFFFFFFFF;
+}
+
++
++/* allocates state machine ids. */
++static inline
++void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
++{
++ /* zero out state machine indices */
++ /* rx indices */
++ index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
++
++ /* tx indices */
++ index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
++ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
++ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
++ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
++
++ /* map indices */
++ /* rx indices */
++ index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
++ SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
++
++ /* tx indices */
++ index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
++ SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
++ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
++ SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
++ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
++ SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
++ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
++ SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
++}
++
+static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
+ u8 vf_valid, int fw_sb_id, int igu_sb_id)
+{
+ int igu_seg_id;
+
+ struct hc_status_block_data_e2 sb_data_e2;
+ struct hc_status_block_data_e1x sb_data_e1x;
+ struct hc_status_block_sm *hc_sm_p;
+ int data_size;
+ u32 *sb_data_p;
+
+ if (CHIP_INT_MODE_IS_BC(bp))
+ igu_seg_id = HC_SEG_ACCESS_NORM;
+ else
+ igu_seg_id = IGU_SEG_ACCESS_NORM;
+
+ bnx2x_zero_fp_sb(bp, fw_sb_id);
+
+ if (!CHIP_IS_E1x(bp)) {
+ memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
+ sb_data_e2.common.state = SB_ENABLED;
+ sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
+ sb_data_e2.common.p_func.vf_id = vfid;
+ sb_data_e2.common.p_func.vf_valid = vf_valid;
+ sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
+ sb_data_e2.common.same_igu_sb_1b = true;
+ sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
+ sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
+ hc_sm_p = sb_data_e2.common.state_machine;
+ sb_data_p = (u32 *)&sb_data_e2;
+ data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
++ bnx2x_map_sb_state_machines(sb_data_e2.index_data);
+ } else {
+ memset(&sb_data_e1x, 0,
+ sizeof(struct hc_status_block_data_e1x));
+ sb_data_e1x.common.state = SB_ENABLED;
+ sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
+ sb_data_e1x.common.p_func.vf_id = 0xff;
+ sb_data_e1x.common.p_func.vf_valid = false;
+ sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
+ sb_data_e1x.common.same_igu_sb_1b = true;
+ sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
+ sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
+ hc_sm_p = sb_data_e1x.common.state_machine;
+ sb_data_p = (u32 *)&sb_data_e1x;
+ data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
++ bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
+ }
+
+ bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
+ igu_sb_id, igu_seg_id);
+ bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
+ igu_sb_id, igu_seg_id);
+
+ DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
+
+ /* write indecies to HW */
+ bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
+}
+
+static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
+ u16 tx_usec, u16 rx_usec)
+{
+ bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
+ false, rx_usec);
+ bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
+ HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
+ tx_usec);
+ bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
+ HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
+ tx_usec);
+ bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
+ HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
+ tx_usec);
+}
+
+static void bnx2x_init_def_sb(struct bnx2x *bp)
+{
+ struct host_sp_status_block *def_sb = bp->def_status_blk;
+ dma_addr_t mapping = bp->def_status_blk_mapping;
+ int igu_sp_sb_index;
+ int igu_seg_id;
+ int port = BP_PORT(bp);
+ int func = BP_FUNC(bp);
+ int reg_offset;
+ u64 section;
+ int index;
+ struct hc_sp_status_block_data sp_sb_data;
+ memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
+
+ if (CHIP_INT_MODE_IS_BC(bp)) {
+ igu_sp_sb_index = DEF_SB_IGU_ID;
+ igu_seg_id = HC_SEG_ACCESS_DEF;
+ } else {
+ igu_sp_sb_index = bp->igu_dsb_id;
+ igu_seg_id = IGU_SEG_ACCESS_DEF;
+ }
+
+ /* ATTN */
+ section = ((u64)mapping) + offsetof(struct host_sp_status_block,
+ atten_status_block);
+ def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
+
+ bp->attn_state = 0;
+
+ reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
+ MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
+ for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
+ int sindex;
+ /* take care of sig[0]..sig[4] */
+ for (sindex = 0; sindex < 4; sindex++)
+ bp->attn_group[index].sig[sindex] =
+ REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
+
+ if (!CHIP_IS_E1x(bp))
+ /*
+ * enable5 is separate from the rest of the registers,
+ * and therefore the address skip is 4
+ * and not 16 between the different groups
+ */
+ bp->attn_group[index].sig[4] = REG_RD(bp,
+ reg_offset + 0x10 + 0x4*index);
+ else
+ bp->attn_group[index].sig[4] = 0;
+ }
+
+ if (bp->common.int_block == INT_BLOCK_HC) {
+ reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
+ HC_REG_ATTN_MSG0_ADDR_L);
+
+ REG_WR(bp, reg_offset, U64_LO(section));
+ REG_WR(bp, reg_offset + 4, U64_HI(section));
+ } else if (!CHIP_IS_E1x(bp)) {
+ REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
+ REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
+ }
+
+ section = ((u64)mapping) + offsetof(struct host_sp_status_block,
+ sp_sb);
+
+ bnx2x_zero_sp_sb(bp);
+
+ sp_sb_data.state = SB_ENABLED;
+ sp_sb_data.host_sb_addr.lo = U64_LO(section);
+ sp_sb_data.host_sb_addr.hi = U64_HI(section);
+ sp_sb_data.igu_sb_id = igu_sp_sb_index;
+ sp_sb_data.igu_seg_id = igu_seg_id;
+ sp_sb_data.p_func.pf_id = func;
+ sp_sb_data.p_func.vnic_id = BP_VN(bp);
+ sp_sb_data.p_func.vf_id = 0xff;
+
+ bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
+
+ bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
+}
+
+void bnx2x_update_coalesce(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_eth_queue(bp, i)
+ bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
+ bp->tx_ticks, bp->rx_ticks);
+}
+
+static void bnx2x_init_sp_ring(struct bnx2x *bp)
+{
+ spin_lock_init(&bp->spq_lock);
+ atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
+
+ bp->spq_prod_idx = 0;
+ bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
+ bp->spq_prod_bd = bp->spq;
+ bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
+}
+
+static void bnx2x_init_eq_ring(struct bnx2x *bp)
+{
+ int i;
+ for (i = 1; i <= NUM_EQ_PAGES; i++) {
+ union event_ring_elem *elem =
+ &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
+
+ elem->next_page.addr.hi =
+ cpu_to_le32(U64_HI(bp->eq_mapping +
+ BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
+ elem->next_page.addr.lo =
+ cpu_to_le32(U64_LO(bp->eq_mapping +
+ BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
+ }
+ bp->eq_cons = 0;
+ bp->eq_prod = NUM_EQ_DESC;
+ bp->eq_cons_sb = BNX2X_EQ_INDEX;
+ /* we want a warning message before it gets rought... */
+ atomic_set(&bp->eq_spq_left,
+ min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
+}
+
+
+/* called with netif_addr_lock_bh() */
+void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
+ unsigned long rx_mode_flags,
+ unsigned long rx_accept_flags,
+ unsigned long tx_accept_flags,
+ unsigned long ramrod_flags)
+{
+ struct bnx2x_rx_mode_ramrod_params ramrod_param;
+ int rc;
+
+ memset(&ramrod_param, 0, sizeof(ramrod_param));
+
+ /* Prepare ramrod parameters */
+ ramrod_param.cid = 0;
+ ramrod_param.cl_id = cl_id;
+ ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
+ ramrod_param.func_id = BP_FUNC(bp);
+
+ ramrod_param.pstate = &bp->sp_state;
+ ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;
+
+ ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
+ ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
+
+ set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
+
+ ramrod_param.ramrod_flags = ramrod_flags;
+ ramrod_param.rx_mode_flags = rx_mode_flags;
+
+ ramrod_param.rx_accept_flags = rx_accept_flags;
+ ramrod_param.tx_accept_flags = tx_accept_flags;
+
+ rc = bnx2x_config_rx_mode(bp, &ramrod_param);
+ if (rc < 0) {
+ BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
+ return;
+ }
+}
+
+/* called with netif_addr_lock_bh() */
+void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
+{
+ unsigned long rx_mode_flags = 0, ramrod_flags = 0;
+ unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
+
+#ifdef BCM_CNIC
+ if (!NO_FCOE(bp))
+
+ /* Configure rx_mode of FCoE Queue */
+ __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
+#endif
+
+ switch (bp->rx_mode) {
+ case BNX2X_RX_MODE_NONE:
+ /*
+ * 'drop all' supersedes any accept flags that may have been
+ * passed to the function.
+ */
+ break;
+ case BNX2X_RX_MODE_NORMAL:
+ __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_MULTICAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
+
+ /* internal switching mode */
+ __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_MULTICAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
+
+ break;
+ case BNX2X_RX_MODE_ALLMULTI:
+ __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
+
+ /* internal switching mode */
+ __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
+
+ break;
+ case BNX2X_RX_MODE_PROMISC:
+ /* According to deffinition of SI mode, iface in promisc mode
+ * should receive matched and unmatched (in resolution of port)
+ * unicast packets.
+ */
+ __set_bit(BNX2X_ACCEPT_UNMATCHED, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
+
+ /* internal switching mode */
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
+
+ if (IS_MF_SI(bp))
+ __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &tx_accept_flags);
+ else
+ __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
+
+ break;
+ default:
+ BNX2X_ERR("Unknown rx_mode: %d\n", bp->rx_mode);
+ return;
+ }
+
+ if (bp->rx_mode != BNX2X_RX_MODE_NONE) {
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, &tx_accept_flags);
+ }
+
+ __set_bit(RAMROD_RX, &ramrod_flags);
+ __set_bit(RAMROD_TX, &ramrod_flags);
+
+ bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, rx_accept_flags,
+ tx_accept_flags, ramrod_flags);
+}
+
+static void bnx2x_init_internal_common(struct bnx2x *bp)
+{
+ int i;
+
+ if (IS_MF_SI(bp))
+ /*
+ * In switch independent mode, the TSTORM needs to accept
+ * packets that failed classification, since approximate match
+ * mac addresses aren't written to NIG LLH
+ */
+ REG_WR8(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
+ else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */
+ REG_WR8(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0);
+
+ /* Zero this manually as its initialization is
+ currently missing in the initTool */
+ for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
+ REG_WR(bp, BAR_USTRORM_INTMEM +
+ USTORM_AGG_DATA_OFFSET + i * 4, 0);
+ if (!CHIP_IS_E1x(bp)) {
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
+ CHIP_INT_MODE_IS_BC(bp) ?
+ HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
+ }
+}
+
+static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
+{
+ switch (load_code) {
+ case FW_MSG_CODE_DRV_LOAD_COMMON:
+ case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
+ bnx2x_init_internal_common(bp);
+ /* no break */
+
+ case FW_MSG_CODE_DRV_LOAD_PORT:
+ /* nothing to do */
+ /* no break */
+
+ case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+ /* internal memory per function is
+ initialized inside bnx2x_pf_init */
+ break;
+
+ default:
+ BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
+ break;
+ }
+}
+
+static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
+{
+ return fp->bp->igu_base_sb + fp->index + CNIC_PRESENT;
+}
+
+static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
+{
+ return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT;
+}
+
+static inline u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
+{
+ if (CHIP_IS_E1x(fp->bp))
+ return BP_L_ID(fp->bp) + fp->index;
+ else /* We want Client ID to be the same as IGU SB ID for 57712 */
+ return bnx2x_fp_igu_sb_id(fp);
+}
+
+static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
+{
+ struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
+ u8 cos;
+ unsigned long q_type = 0;
+ u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
+
+ fp->cid = fp_idx;
+ fp->cl_id = bnx2x_fp_cl_id(fp);
+ fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
+ fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
+ /* qZone id equals to FW (per path) client id */
+ fp->cl_qzone_id = bnx2x_fp_qzone_id(fp);
+
+ /* init shortcut */
+ fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
+ /* Setup SB indicies */
+ fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
+
+ /* Configure Queue State object */
+ __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
+ __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
+
+ BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);
+
+ /* init tx data */
+ for_each_cos_in_tx_queue(fp, cos) {
+ bnx2x_init_txdata(bp, &fp->txdata[cos],
+ CID_COS_TO_TX_ONLY_CID(fp->cid, cos),
+ FP_COS_TO_TXQ(fp, cos),
+ BNX2X_TX_SB_INDEX_BASE + cos);
+ cids[cos] = fp->txdata[cos].cid;
+ }
+
+ bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, cids, fp->max_cos,
+ BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
+ bnx2x_sp_mapping(bp, q_rdata), q_type);
+
+ /**
+ * Configure classification DBs: Always enable Tx switching
+ */
+ bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
+
+ DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
+ "cl_id %d fw_sb %d igu_sb %d\n",
+ fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
+ fp->igu_sb_id);
+ bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
+ fp->fw_sb_id, fp->igu_sb_id);
+
+ bnx2x_update_fpsb_idx(fp);
+}
+
+void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
+{
+ int i;
+
+ for_each_eth_queue(bp, i)
+ bnx2x_init_eth_fp(bp, i);
+#ifdef BCM_CNIC
+ if (!NO_FCOE(bp))
+ bnx2x_init_fcoe_fp(bp);
+
+ bnx2x_init_sb(bp, bp->cnic_sb_mapping,
+ BNX2X_VF_ID_INVALID, false,
+ bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
+
+#endif
+
+ /* Initialize MOD_ABS interrupts */
+ bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
+ bp->common.shmem_base, bp->common.shmem2_base,
+ BP_PORT(bp));
+ /* ensure status block indices were read */
+ rmb();
+
+ bnx2x_init_def_sb(bp);
+ bnx2x_update_dsb_idx(bp);
+ bnx2x_init_rx_rings(bp);
+ bnx2x_init_tx_rings(bp);
+ bnx2x_init_sp_ring(bp);
+ bnx2x_init_eq_ring(bp);
+ bnx2x_init_internal(bp, load_code);
+ bnx2x_pf_init(bp);
+ bnx2x_stats_init(bp);
+
+ /* flush all before enabling interrupts */
+ mb();
+ mmiowb();
+
+ bnx2x_int_enable(bp);
+
+ /* Check for SPIO5 */
+ bnx2x_attn_int_deasserted0(bp,
+ REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
+ AEU_INPUTS_ATTN_BITS_SPIO5);
+}
+
+/* end of nic init */
+
+/*
+ * gzip service functions
+ */
+
+static int bnx2x_gunzip_init(struct bnx2x *bp)
+{
+ bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
+ &bp->gunzip_mapping, GFP_KERNEL);
+ if (bp->gunzip_buf == NULL)
+ goto gunzip_nomem1;
+
+ bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
+ if (bp->strm == NULL)
+ goto gunzip_nomem2;
+
+ bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
+ if (bp->strm->workspace == NULL)
+ goto gunzip_nomem3;
+
+ return 0;
+
+gunzip_nomem3:
+ kfree(bp->strm);
+ bp->strm = NULL;
+
+gunzip_nomem2:
+ dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
+ bp->gunzip_mapping);
+ bp->gunzip_buf = NULL;
+
+gunzip_nomem1:
+ netdev_err(bp->dev, "Cannot allocate firmware buffer for"
+ " un-compression\n");
+ return -ENOMEM;
+}
+
+static void bnx2x_gunzip_end(struct bnx2x *bp)
+{
+ if (bp->strm) {
+ vfree(bp->strm->workspace);
+ kfree(bp->strm);
+ bp->strm = NULL;
+ }
+
+ if (bp->gunzip_buf) {
+ dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
+ bp->gunzip_mapping);
+ bp->gunzip_buf = NULL;
+ }
+}
+
+static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
+{
+ int n, rc;
+
+ /* check gzip header */
+ if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
+ BNX2X_ERR("Bad gzip header\n");
+ return -EINVAL;
+ }
+
+ n = 10;
+
+#define FNAME 0x8
+
+ if (zbuf[3] & FNAME)
+ while ((zbuf[n++] != 0) && (n < len));
+
+ bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
+ bp->strm->avail_in = len - n;
+ bp->strm->next_out = bp->gunzip_buf;
+ bp->strm->avail_out = FW_BUF_SIZE;
+
+ rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
+ if (rc != Z_OK)
+ return rc;
+
+ rc = zlib_inflate(bp->strm, Z_FINISH);
+ if ((rc != Z_OK) && (rc != Z_STREAM_END))
+ netdev_err(bp->dev, "Firmware decompression error: %s\n",
+ bp->strm->msg);
+
+ bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
+ if (bp->gunzip_outlen & 0x3)
+ netdev_err(bp->dev, "Firmware decompression error:"
+ " gunzip_outlen (%d) not aligned\n",
+ bp->gunzip_outlen);
+ bp->gunzip_outlen >>= 2;
+
+ zlib_inflateEnd(bp->strm);
+
+ if (rc == Z_STREAM_END)
+ return 0;
+
+ return rc;
+}
+
+/* nic load/unload */
+
+/*
+ * General service functions
+ */
+
+/* send a NIG loopback debug packet */
+static void bnx2x_lb_pckt(struct bnx2x *bp)
+{
+ u32 wb_write[3];
+
+ /* Ethernet source and destination addresses */
+ wb_write[0] = 0x55555555;
+ wb_write[1] = 0x55555555;
+ wb_write[2] = 0x20; /* SOP */
+ REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
+
+ /* NON-IP protocol */
+ wb_write[0] = 0x09000000;
+ wb_write[1] = 0x55555555;
+ wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
+ REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
+}
+
+/* some of the internal memories
+ * are not directly readable from the driver
+ * to test them we send debug packets
+ */
+static int bnx2x_int_mem_test(struct bnx2x *bp)
+{
+ int factor;
+ int count, i;
+ u32 val = 0;
+
+ if (CHIP_REV_IS_FPGA(bp))
+ factor = 120;
+ else if (CHIP_REV_IS_EMUL(bp))
+ factor = 200;
+ else
+ factor = 1;
+
+ /* Disable inputs of parser neighbor blocks */
+ REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
+ REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
+ REG_WR(bp, CFC_REG_DEBUG0, 0x1);
+ REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
+
+ /* Write 0 to parser credits for CFC search request */
+ REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
+
+ /* send Ethernet packet */
+ bnx2x_lb_pckt(bp);
+
+ /* TODO do i reset NIG statistic? */
+ /* Wait until NIG register shows 1 packet of size 0x10 */
+ count = 1000 * factor;
+ while (count) {
+
+ bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
+ val = *bnx2x_sp(bp, wb_data[0]);
+ if (val == 0x10)
+ break;
+
+ msleep(10);
+ count--;
+ }
+ if (val != 0x10) {
+ BNX2X_ERR("NIG timeout val = 0x%x\n", val);
+ return -1;
+ }
+
+ /* Wait until PRS register shows 1 packet */
+ count = 1000 * factor;
+ while (count) {
+ val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
+ if (val == 1)
+ break;
+
+ msleep(10);
+ count--;
+ }
+ if (val != 0x1) {
+ BNX2X_ERR("PRS timeout val = 0x%x\n", val);
+ return -2;
+ }
+
+ /* Reset and init BRB, PRS */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
+ msleep(50);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
+ msleep(50);
+ bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
+
+ DP(NETIF_MSG_HW, "part2\n");
+
+ /* Disable inputs of parser neighbor blocks */
+ REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
+ REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
+ REG_WR(bp, CFC_REG_DEBUG0, 0x1);
+ REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
+
+ /* Write 0 to parser credits for CFC search request */
+ REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
+
+ /* send 10 Ethernet packets */
+ for (i = 0; i < 10; i++)
+ bnx2x_lb_pckt(bp);
+
+ /* Wait until NIG register shows 10 + 1
+ packets of size 11*0x10 = 0xb0 */
+ count = 1000 * factor;
+ while (count) {
+
+ bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
+ val = *bnx2x_sp(bp, wb_data[0]);
+ if (val == 0xb0)
+ break;
+
+ msleep(10);
+ count--;
+ }
+ if (val != 0xb0) {
+ BNX2X_ERR("NIG timeout val = 0x%x\n", val);
+ return -3;
+ }
+
+ /* Wait until PRS register shows 2 packets */
+ val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
+ if (val != 2)
+ BNX2X_ERR("PRS timeout val = 0x%x\n", val);
+
+ /* Write 1 to parser credits for CFC search request */
+ REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
+
+ /* Wait until PRS register shows 3 packets */
+ msleep(10 * factor);
+ /* Wait until NIG register shows 1 packet of size 0x10 */
+ val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
+ if (val != 3)
+ BNX2X_ERR("PRS timeout val = 0x%x\n", val);
+
+ /* clear NIG EOP FIFO */
+ for (i = 0; i < 11; i++)
+ REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
+ val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
+ if (val != 1) {
+ BNX2X_ERR("clear of NIG failed\n");
+ return -4;
+ }
+
+ /* Reset and init BRB, PRS, NIG */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
+ msleep(50);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
+ msleep(50);
+ bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
+#ifndef BCM_CNIC
+ /* set NIC mode */
+ REG_WR(bp, PRS_REG_NIC_MODE, 1);
+#endif
+
+ /* Enable inputs of parser neighbor blocks */
+ REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
+ REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
+ REG_WR(bp, CFC_REG_DEBUG0, 0x0);
+ REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
+
+ DP(NETIF_MSG_HW, "done\n");
+
+ return 0; /* OK */
+}
+
+static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
+{
+ REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
+ else
+ REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
+ REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
+ REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
+ /*
+ * mask read length error interrupts in brb for parser
+ * (parsing unit and 'checksum and crc' unit)
+ * these errors are legal (PU reads fixed length and CAC can cause
+ * read length error on truncated packets)
+ */
+ REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
+ REG_WR(bp, QM_REG_QM_INT_MASK, 0);
+ REG_WR(bp, TM_REG_TM_INT_MASK, 0);
+ REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
+ REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
+ REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
+/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
+/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
+ REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
+ REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
+ REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
+/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
+/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
+ REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
+ REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
+ REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
+ REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
+/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
+/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
+
+ if (CHIP_REV_IS_FPGA(bp))
+ REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
+ else if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
+ (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
+ | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
+ | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
+ | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
+ | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
+ else
+ REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
+ REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
+ REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
+ REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
+/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
+
+ if (!CHIP_IS_E1x(bp))
+ /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
+ REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
+
+ REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
+ REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
+/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
+ REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
+}
+
+static void bnx2x_reset_common(struct bnx2x *bp)
+{
+ u32 val = 0x1400;
+
+ /* reset_common */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+ 0xd3ffff7f);
+
+ if (CHIP_IS_E3(bp)) {
+ val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
+ val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
+ }
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
+}
+
+static void bnx2x_setup_dmae(struct bnx2x *bp)
+{
+ bp->dmae_ready = 0;
+ spin_lock_init(&bp->dmae_lock);
+}
+
+static void bnx2x_init_pxp(struct bnx2x *bp)
+{
+ u16 devctl;
+ int r_order, w_order;
+
+ pci_read_config_word(bp->pdev,
+ pci_pcie_cap(bp->pdev) + PCI_EXP_DEVCTL, &devctl);
+ DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
+ w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
+ if (bp->mrrs == -1)
+ r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
+ else {
+ DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
+ r_order = bp->mrrs;
+ }
+
+ bnx2x_init_pxp_arb(bp, r_order, w_order);
+}
+
+static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
+{
+ int is_required;
+ u32 val;
+ int port;
+
+ if (BP_NOMCP(bp))
+ return;
+
+ is_required = 0;
+ val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
+ SHARED_HW_CFG_FAN_FAILURE_MASK;
+
+ if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
+ is_required = 1;
+
+ /*
+ * The fan failure mechanism is usually related to the PHY type since
+ * the power consumption of the board is affected by the PHY. Currently,
+ * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
+ */
+ else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
+ for (port = PORT_0; port < PORT_MAX; port++) {
+ is_required |=
+ bnx2x_fan_failure_det_req(
+ bp,
+ bp->common.shmem_base,
+ bp->common.shmem2_base,
+ port);
+ }
+
+ DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
+
+ if (is_required == 0)
+ return;
+
+ /* Fan failure is indicated by SPIO 5 */
+ bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
+ MISC_REGISTERS_SPIO_INPUT_HI_Z);
+
+ /* set to active low mode */
+ val = REG_RD(bp, MISC_REG_SPIO_INT);
+ val |= ((1 << MISC_REGISTERS_SPIO_5) <<
+ MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
+ REG_WR(bp, MISC_REG_SPIO_INT, val);
+
+ /* enable interrupt to signal the IGU */
+ val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
+ val |= (1 << MISC_REGISTERS_SPIO_5);
+ REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
+}
+
+static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
+{
+ u32 offset = 0;
+
+ if (CHIP_IS_E1(bp))
+ return;
+ if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
+ return;
+
+ switch (BP_ABS_FUNC(bp)) {
+ case 0:
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
+ break;
+ case 1:
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
+ break;
+ case 2:
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
+ break;
+ case 3:
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
+ break;
+ case 4:
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
+ break;
+ case 5:
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
+ break;
+ case 6:
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
+ break;
+ case 7:
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
+ break;
+ default:
+ return;
+ }
+
+ REG_WR(bp, offset, pretend_func_num);
+ REG_RD(bp, offset);
+ DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
+}
+
+void bnx2x_pf_disable(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
+ val &= ~IGU_PF_CONF_FUNC_EN;
+
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
+ REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
+}
+
+static inline void bnx2x__common_init_phy(struct bnx2x *bp)
+{
+ u32 shmem_base[2], shmem2_base[2];
+ shmem_base[0] = bp->common.shmem_base;
+ shmem2_base[0] = bp->common.shmem2_base;
+ if (!CHIP_IS_E1x(bp)) {
+ shmem_base[1] =
+ SHMEM2_RD(bp, other_shmem_base_addr);
+ shmem2_base[1] =
+ SHMEM2_RD(bp, other_shmem2_base_addr);
+ }
+ bnx2x_acquire_phy_lock(bp);
+ bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
+ bp->common.chip_id);
+ bnx2x_release_phy_lock(bp);
+}
+
+/**
+ * bnx2x_init_hw_common - initialize the HW at the COMMON phase.
+ *
+ * @bp: driver handle
+ */
+static int bnx2x_init_hw_common(struct bnx2x *bp)
+{
+ u32 val;
+
+ DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
+
+ /*
+ * take the UNDI lock to protect undi_unload flow from accessing
+ * registers while we're resetting the chip
+ */
- bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
++ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+
+ bnx2x_reset_common(bp);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
+
+ val = 0xfffc;
+ if (CHIP_IS_E3(bp)) {
+ val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
+ val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
+ }
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
+
- dsb_idx = BP_E1HVN(bp);
++ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+
+ bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
+
+ if (!CHIP_IS_E1x(bp)) {
+ u8 abs_func_id;
+
+ /**
+ * 4-port mode or 2-port mode we need to turn of master-enable
+ * for everyone, after that, turn it back on for self.
+ * so, we disregard multi-function or not, and always disable
+ * for all functions on the given path, this means 0,2,4,6 for
+ * path 0 and 1,3,5,7 for path 1
+ */
+ for (abs_func_id = BP_PATH(bp);
+ abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) {
+ if (abs_func_id == BP_ABS_FUNC(bp)) {
+ REG_WR(bp,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
+ 1);
+ continue;
+ }
+
+ bnx2x_pretend_func(bp, abs_func_id);
+ /* clear pf enable */
+ bnx2x_pf_disable(bp);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+ }
+ }
+
+ bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
+ if (CHIP_IS_E1(bp)) {
+ /* enable HW interrupt from PXP on USDM overflow
+ bit 16 on INT_MASK_0 */
+ REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
+ }
+
+ bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
+ bnx2x_init_pxp(bp);
+
+#ifdef __BIG_ENDIAN
+ REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
+ REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
+ REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
+ REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
+ REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
+ /* make sure this value is 0 */
+ REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
+
+/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
+ REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
+ REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
+ REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
+ REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
+#endif
+
+ bnx2x_ilt_init_page_size(bp, INITOP_SET);
+
+ if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
+ REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
+
+ /* let the HW do it's magic ... */
+ msleep(100);
+ /* finish PXP init */
+ val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
+ if (val != 1) {
+ BNX2X_ERR("PXP2 CFG failed\n");
+ return -EBUSY;
+ }
+ val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
+ if (val != 1) {
+ BNX2X_ERR("PXP2 RD_INIT failed\n");
+ return -EBUSY;
+ }
+
+ /* Timers bug workaround E2 only. We need to set the entire ILT to
+ * have entries with value "0" and valid bit on.
+ * This needs to be done by the first PF that is loaded in a path
+ * (i.e. common phase)
+ */
+ if (!CHIP_IS_E1x(bp)) {
+/* In E2 there is a bug in the timers block that can cause function 6 / 7
+ * (i.e. vnic3) to start even if it is marked as "scan-off".
+ * This occurs when a different function (func2,3) is being marked
+ * as "scan-off". Real-life scenario for example: if a driver is being
+ * load-unloaded while func6,7 are down. This will cause the timer to access
+ * the ilt, translate to a logical address and send a request to read/write.
+ * Since the ilt for the function that is down is not valid, this will cause
+ * a translation error which is unrecoverable.
+ * The Workaround is intended to make sure that when this happens nothing fatal
+ * will occur. The workaround:
+ * 1. First PF driver which loads on a path will:
+ * a. After taking the chip out of reset, by using pretend,
+ * it will write "0" to the following registers of
+ * the other vnics.
+ * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
+ * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
+ * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
+ * And for itself it will write '1' to
+ * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
+ * dmae-operations (writing to pram for example.)
+ * note: can be done for only function 6,7 but cleaner this
+ * way.
+ * b. Write zero+valid to the entire ILT.
+ * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of
+ * VNIC3 (of that port). The range allocated will be the
+ * entire ILT. This is needed to prevent ILT range error.
+ * 2. Any PF driver load flow:
+ * a. ILT update with the physical addresses of the allocated
+ * logical pages.
+ * b. Wait 20msec. - note that this timeout is needed to make
+ * sure there are no requests in one of the PXP internal
+ * queues with "old" ILT addresses.
+ * c. PF enable in the PGLC.
+ * d. Clear the was_error of the PF in the PGLC. (could have
+ * occured while driver was down)
+ * e. PF enable in the CFC (WEAK + STRONG)
+ * f. Timers scan enable
+ * 3. PF driver unload flow:
+ * a. Clear the Timers scan_en.
+ * b. Polling for scan_on=0 for that PF.
+ * c. Clear the PF enable bit in the PXP.
+ * d. Clear the PF enable in the CFC (WEAK + STRONG)
+ * e. Write zero+valid to all ILT entries (The valid bit must
+ * stay set)
+ * f. If this is VNIC 3 of a port then also init
+ * first_timers_ilt_entry to zero and last_timers_ilt_entry
+ * to the last enrty in the ILT.
+ *
+ * Notes:
+ * Currently the PF error in the PGLC is non recoverable.
+ * In the future the there will be a recovery routine for this error.
+ * Currently attention is masked.
+ * Having an MCP lock on the load/unload process does not guarantee that
+ * there is no Timer disable during Func6/7 enable. This is because the
+ * Timers scan is currently being cleared by the MCP on FLR.
+ * Step 2.d can be done only for PF6/7 and the driver can also check if
+ * there is error before clearing it. But the flow above is simpler and
+ * more general.
+ * All ILT entries are written by zero+valid and not just PF6/7
+ * ILT entries since in the future the ILT entries allocation for
+ * PF-s might be dynamic.
+ */
+ struct ilt_client_info ilt_cli;
+ struct bnx2x_ilt ilt;
+ memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
+ memset(&ilt, 0, sizeof(struct bnx2x_ilt));
+
+ /* initialize dummy TM client */
+ ilt_cli.start = 0;
+ ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
+ ilt_cli.client_num = ILT_CLIENT_TM;
+
+ /* Step 1: set zeroes to all ilt page entries with valid bit on
+ * Step 2: set the timers first/last ilt entry to point
+ * to the entire range to prevent ILT range error for 3rd/4th
+ * vnic (this code assumes existance of the vnic)
+ *
+ * both steps performed by call to bnx2x_ilt_client_init_op()
+ * with dummy TM client
+ *
+ * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
+ * and his brother are split registers
+ */
+ bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
+ bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+ REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
+ REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
+ REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
+ }
+
+
+ REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
+ REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
+
+ if (!CHIP_IS_E1x(bp)) {
+ int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
+ (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
+ bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);
+
+ bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);
+
+ /* let the HW do it's magic ... */
+ do {
+ msleep(200);
+ val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
+ } while (factor-- && (val != 1));
+
+ if (val != 1) {
+ BNX2X_ERR("ATC_INIT failed\n");
+ return -EBUSY;
+ }
+ }
+
+ bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
+
+ /* clean the DMAE memory */
+ bp->dmae_ready = 1;
+ bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
+
+ bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);
+
+ bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);
+
+ bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);
+
+ bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);
+
+ bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
+ bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
+ bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
+ bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
+
+ bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
+
+
+ /* QM queues pointers table */
+ bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
+
+ /* soft reset pulse */
+ REG_WR(bp, QM_REG_SOFT_RESET, 1);
+ REG_WR(bp, QM_REG_SOFT_RESET, 0);
+
+#ifdef BCM_CNIC
+ bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
+#endif
+
+ bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
+ REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
+ if (!CHIP_REV_IS_SLOW(bp))
+ /* enable hw interrupt from doorbell Q */
+ REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
+
+ bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
+
+ bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
+ REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
+
+ if (!CHIP_IS_E1(bp))
+ REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
+
+ if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp))
+ /* Bit-map indicating which L2 hdrs may appear
+ * after the basic Ethernet header
+ */
+ REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
+ bp->path_has_ovlan ? 7 : 6);
+
+ bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);
+
+ if (!CHIP_IS_E1x(bp)) {
+ /* reset VFC memories */
+ REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
+ VFC_MEMORIES_RST_REG_CAM_RST |
+ VFC_MEMORIES_RST_REG_RAM_RST);
+ REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
+ VFC_MEMORIES_RST_REG_CAM_RST |
+ VFC_MEMORIES_RST_REG_RAM_RST);
+
+ msleep(20);
+ }
+
+ bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);
+
+ /* sync semi rtc */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+ 0x80000000);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
+ 0x80000000);
+
+ bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
+
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
+ bp->path_has_ovlan ? 7 : 6);
+
+ REG_WR(bp, SRC_REG_SOFT_RST, 1);
+
+ bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
+
+#ifdef BCM_CNIC
+ REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
+ REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
+ REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
+ REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
+ REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
+ REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
+ REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
+ REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
+ REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
+ REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
+#endif
+ REG_WR(bp, SRC_REG_SOFT_RST, 0);
+
+ if (sizeof(union cdu_context) != 1024)
+ /* we currently assume that a context is 1024 bytes */
+ dev_alert(&bp->pdev->dev, "please adjust the size "
+ "of cdu_context(%ld)\n",
+ (long)sizeof(union cdu_context));
+
+ bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
+ val = (4 << 24) + (0 << 12) + 1024;
+ REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
+
+ bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
+ REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
+ /* enable context validation interrupt from CFC */
+ REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
+
+ /* set the thresholds to prevent CFC/CDU race */
+ REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
+
+ bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);
+
+ if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
+ REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
+
+ bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);
+
+ /* Reset PCIE errors for debug */
+ REG_WR(bp, 0x2814, 0xffffffff);
+ REG_WR(bp, 0x3820, 0xffffffff);
+
+ if (!CHIP_IS_E1x(bp)) {
+ REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
+ (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
+ PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
+ REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
+ (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
+ PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
+ PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
+ REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
+ (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
+ PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
+ PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
+ }
+
+ bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
+ if (!CHIP_IS_E1(bp)) {
+ /* in E3 this done in per-port section */
+ if (!CHIP_IS_E3(bp))
+ REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
+ }
+ if (CHIP_IS_E1H(bp))
+ /* not applicable for E2 (and above ...) */
+ REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
+
+ if (CHIP_REV_IS_SLOW(bp))
+ msleep(200);
+
+ /* finish CFC init */
+ val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
+ if (val != 1) {
+ BNX2X_ERR("CFC LL_INIT failed\n");
+ return -EBUSY;
+ }
+ val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
+ if (val != 1) {
+ BNX2X_ERR("CFC AC_INIT failed\n");
+ return -EBUSY;
+ }
+ val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
+ if (val != 1) {
+ BNX2X_ERR("CFC CAM_INIT failed\n");
+ return -EBUSY;
+ }
+ REG_WR(bp, CFC_REG_DEBUG0, 0);
+
+ if (CHIP_IS_E1(bp)) {
+ /* read NIG statistic
+ to see if this is our first up since powerup */
+ bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
+ val = *bnx2x_sp(bp, wb_data[0]);
+
+ /* do internal memory self test */
+ if ((val == 0) && bnx2x_int_mem_test(bp)) {
+ BNX2X_ERR("internal mem self test failed\n");
+ return -EBUSY;
+ }
+ }
+
+ bnx2x_setup_fan_failure_detection(bp);
+
+ /* clear PXP2 attentions */
+ REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
+
+ bnx2x_enable_blocks_attention(bp);
+ bnx2x_enable_blocks_parity(bp);
+
+ if (!BP_NOMCP(bp)) {
+ if (CHIP_IS_E1x(bp))
+ bnx2x__common_init_phy(bp);
+ } else
+ BNX2X_ERR("Bootcode is missing - can not initialize link\n");
+
+ return 0;
+}
+
+/**
+ * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase.
+ *
+ * @bp: driver handle
+ */
+static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
+{
+ int rc = bnx2x_init_hw_common(bp);
+
+ if (rc)
+ return rc;
+
+ /* In E2 2-PORT mode, same ext phy is used for the two paths */
+ if (!BP_NOMCP(bp))
+ bnx2x__common_init_phy(bp);
+
+ return 0;
+}
+
+static int bnx2x_init_hw_port(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
+ u32 low, high;
+ u32 val;
+
+ bnx2x__link_reset(bp);
+
+ DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
+
+ REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
+
+ bnx2x_init_block(bp, BLOCK_MISC, init_phase);
+ bnx2x_init_block(bp, BLOCK_PXP, init_phase);
+ bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
+
+ /* Timers bug workaround: disables the pf_master bit in pglue at
+ * common phase, we need to enable it here before any dmae access are
+ * attempted. Therefore we manually added the enable-master to the
+ * port phase (it also happens in the function phase)
+ */
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
+
+ bnx2x_init_block(bp, BLOCK_ATC, init_phase);
+ bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
+ bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
+ bnx2x_init_block(bp, BLOCK_QM, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_TCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_UCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_CCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_XCM, init_phase);
+
+ /* QM cid (connection) count */
+ bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
+
+#ifdef BCM_CNIC
+ bnx2x_init_block(bp, BLOCK_TM, init_phase);
+ REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
+ REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
+#endif
+
+ bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
+
+ if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
+ bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
+
+ if (IS_MF(bp))
+ low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
+ else if (bp->dev->mtu > 4096) {
+ if (bp->flags & ONE_PORT_FLAG)
+ low = 160;
+ else {
+ val = bp->dev->mtu;
+ /* (24*1024 + val*4)/256 */
+ low = 96 + (val/64) +
+ ((val % 64) ? 1 : 0);
+ }
+ } else
+ low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
+ high = low + 56; /* 14*1024/256 */
+ REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
+ REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
+ }
+
+ if (CHIP_MODE_IS_4_PORT(bp))
+ REG_WR(bp, (BP_PORT(bp) ?
+ BRB1_REG_MAC_GUARANTIED_1 :
+ BRB1_REG_MAC_GUARANTIED_0), 40);
+
+
+ bnx2x_init_block(bp, BLOCK_PRS, init_phase);
+ if (CHIP_IS_E3B0(bp))
+ /* Ovlan exists only if we are in multi-function +
+ * switch-dependent mode, in switch-independent there
+ * is no ovlan headers
+ */
+ REG_WR(bp, BP_PORT(bp) ?
+ PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
+ PRS_REG_HDRS_AFTER_BASIC_PORT_0,
+ (bp->path_has_ovlan ? 7 : 6));
+
+ bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_USDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
+ bnx2x_init_block(bp, BLOCK_USEM, init_phase);
+ bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
+ bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_UPB, init_phase);
+ bnx2x_init_block(bp, BLOCK_XPB, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_PBF, init_phase);
+
+ if (CHIP_IS_E1x(bp)) {
+ /* configure PBF to work without PAUSE mtu 9000 */
+ REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
+
+ /* update threshold */
+ REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
+ /* update init credit */
+ REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
+
+ /* probe changes */
+ REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
+ udelay(50);
+ REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
+ }
+
+#ifdef BCM_CNIC
+ bnx2x_init_block(bp, BLOCK_SRC, init_phase);
+#endif
+ bnx2x_init_block(bp, BLOCK_CDU, init_phase);
+ bnx2x_init_block(bp, BLOCK_CFC, init_phase);
+
+ if (CHIP_IS_E1(bp)) {
+ REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
+ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
+ }
+ bnx2x_init_block(bp, BLOCK_HC, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_IGU, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
+ /* init aeu_mask_attn_func_0/1:
+ * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
+ * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
+ * bits 4-7 are used for "per vn group attention" */
+ val = IS_MF(bp) ? 0xF7 : 0x7;
+ /* Enable DCBX attention for all but E1 */
+ val |= CHIP_IS_E1(bp) ? 0 : 0x10;
+ REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
+
+ bnx2x_init_block(bp, BLOCK_NIG, init_phase);
+
+ if (!CHIP_IS_E1x(bp)) {
+ /* Bit-map indicating which L2 hdrs may appear after the
+ * basic Ethernet header
+ */
+ REG_WR(bp, BP_PORT(bp) ?
+ NIG_REG_P1_HDRS_AFTER_BASIC :
+ NIG_REG_P0_HDRS_AFTER_BASIC,
+ IS_MF_SD(bp) ? 7 : 6);
+
+ if (CHIP_IS_E3(bp))
+ REG_WR(bp, BP_PORT(bp) ?
+ NIG_REG_LLH1_MF_MODE :
+ NIG_REG_LLH_MF_MODE, IS_MF(bp));
+ }
+ if (!CHIP_IS_E3(bp))
+ REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
+
+ if (!CHIP_IS_E1(bp)) {
+ /* 0x2 disable mf_ov, 0x1 enable */
+ REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
+ (IS_MF_SD(bp) ? 0x1 : 0x2));
+
+ if (!CHIP_IS_E1x(bp)) {
+ val = 0;
+ switch (bp->mf_mode) {
+ case MULTI_FUNCTION_SD:
+ val = 1;
+ break;
+ case MULTI_FUNCTION_SI:
+ val = 2;
+ break;
+ }
+
+ REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
+ NIG_REG_LLH0_CLS_TYPE), val);
+ }
+ {
+ REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
+ REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
+ REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
+ }
+ }
+
+
+ /* If SPIO5 is set to generate interrupts, enable it for this port */
+ val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
+ if (val & (1 << MISC_REGISTERS_SPIO_5)) {
+ u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
+ MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
+ val = REG_RD(bp, reg_addr);
+ val |= AEU_INPUTS_ATTN_BITS_SPIO5;
+ REG_WR(bp, reg_addr, val);
+ }
+
+ return 0;
+}
+
+static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
+{
+ int reg;
+
+ if (CHIP_IS_E1(bp))
+ reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
+ else
+ reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
+
+ bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
+}
+
+static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
+{
+ bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/);
+}
+
+static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
+{
+ u32 i, base = FUNC_ILT_BASE(func);
+ for (i = base; i < base + ILT_PER_FUNC; i++)
+ bnx2x_ilt_wr(bp, i, 0);
+}
+
+static int bnx2x_init_hw_func(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ int func = BP_FUNC(bp);
+ int init_phase = PHASE_PF0 + func;
+ struct bnx2x_ilt *ilt = BP_ILT(bp);
+ u16 cdu_ilt_start;
+ u32 addr, val;
+ u32 main_mem_base, main_mem_size, main_mem_prty_clr;
+ int i, main_mem_width;
+
+ DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
+
+ /* FLR cleanup - hmmm */
+ if (!CHIP_IS_E1x(bp))
+ bnx2x_pf_flr_clnup(bp);
+
+ /* set MSI reconfigure capability */
+ if (bp->common.int_block == INT_BLOCK_HC) {
+ addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
+ val = REG_RD(bp, addr);
+ val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
+ REG_WR(bp, addr, val);
+ }
+
+ bnx2x_init_block(bp, BLOCK_PXP, init_phase);
+ bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
+
+ ilt = BP_ILT(bp);
+ cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
+
+ for (i = 0; i < L2_ILT_LINES(bp); i++) {
+ ilt->lines[cdu_ilt_start + i].page =
+ bp->context.vcxt + (ILT_PAGE_CIDS * i);
+ ilt->lines[cdu_ilt_start + i].page_mapping =
+ bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
+ /* cdu ilt pages are allocated manually so there's no need to
+ set the size */
+ }
+ bnx2x_ilt_init_op(bp, INITOP_SET);
+
+#ifdef BCM_CNIC
+ bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
+
+ /* T1 hash bits value determines the T1 number of entries */
+ REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
+#endif
+
+#ifndef BCM_CNIC
+ /* set NIC mode */
+ REG_WR(bp, PRS_REG_NIC_MODE, 1);
+#endif /* BCM_CNIC */
+
+ if (!CHIP_IS_E1x(bp)) {
+ u32 pf_conf = IGU_PF_CONF_FUNC_EN;
+
+ /* Turn on a single ISR mode in IGU if driver is going to use
+ * INT#x or MSI
+ */
+ if (!(bp->flags & USING_MSIX_FLAG))
+ pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+ /*
+ * Timers workaround bug: function init part.
+ * Need to wait 20msec after initializing ILT,
+ * needed to make sure there are no requests in
+ * one of the PXP internal queues with "old" ILT addresses
+ */
+ msleep(20);
+ /*
+ * Master enable - Due to WB DMAE writes performed before this
+ * register is re-initialized as part of the regular function
+ * init
+ */
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
+ /* Enable the function in IGU */
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
+ }
+
+ bp->dmae_ready = 1;
+
+ bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
+
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
+
+ bnx2x_init_block(bp, BLOCK_ATC, init_phase);
+ bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
+ bnx2x_init_block(bp, BLOCK_NIG, init_phase);
+ bnx2x_init_block(bp, BLOCK_SRC, init_phase);
+ bnx2x_init_block(bp, BLOCK_MISC, init_phase);
+ bnx2x_init_block(bp, BLOCK_TCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_UCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_CCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_XCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
+ bnx2x_init_block(bp, BLOCK_USEM, init_phase);
+ bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
+ bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
+
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, QM_REG_PF_EN, 1);
+
+ if (!CHIP_IS_E1x(bp)) {
+ REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+ REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+ REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+ REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+ }
+ bnx2x_init_block(bp, BLOCK_QM, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_TM, init_phase);
+ bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
+ bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
+ bnx2x_init_block(bp, BLOCK_PRS, init_phase);
+ bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_USDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_UPB, init_phase);
+ bnx2x_init_block(bp, BLOCK_XPB, init_phase);
+ bnx2x_init_block(bp, BLOCK_PBF, init_phase);
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, PBF_REG_DISABLE_PF, 0);
+
+ bnx2x_init_block(bp, BLOCK_CDU, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_CFC, init_phase);
+
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
+
+ if (IS_MF(bp)) {
+ REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
+ REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
+ }
+
+ bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
+
+ /* HC init per function */
+ if (bp->common.int_block == INT_BLOCK_HC) {
+ if (CHIP_IS_E1H(bp)) {
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
+
+ REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
+ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
+ }
+ bnx2x_init_block(bp, BLOCK_HC, init_phase);
+
+ } else {
+ int num_segs, sb_idx, prod_offset;
+
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
+
+ if (!CHIP_IS_E1x(bp)) {
+ REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
+ REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
+ }
+
+ bnx2x_init_block(bp, BLOCK_IGU, init_phase);
+
+ if (!CHIP_IS_E1x(bp)) {
+ int dsb_idx = 0;
+ /**
+ * Producer memory:
+ * E2 mode: address 0-135 match to the mapping memory;
+ * 136 - PF0 default prod; 137 - PF1 default prod;
+ * 138 - PF2 default prod; 139 - PF3 default prod;
+ * 140 - PF0 attn prod; 141 - PF1 attn prod;
+ * 142 - PF2 attn prod; 143 - PF3 attn prod;
+ * 144-147 reserved.
+ *
+ * E1.5 mode - In backward compatible mode;
+ * for non default SB; each even line in the memory
+ * holds the U producer and each odd line hold
+ * the C producer. The first 128 producers are for
+ * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
+ * producers are for the DSB for each PF.
+ * Each PF has five segments: (the order inside each
+ * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
+ * 132-135 C prods; 136-139 X prods; 140-143 T prods;
+ * 144-147 attn prods;
+ */
+ /* non-default-status-blocks */
+ num_segs = CHIP_INT_MODE_IS_BC(bp) ?
+ IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
+ for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
+ prod_offset = (bp->igu_base_sb + sb_idx) *
+ num_segs;
+
+ for (i = 0; i < num_segs; i++) {
+ addr = IGU_REG_PROD_CONS_MEMORY +
+ (prod_offset + i) * 4;
+ REG_WR(bp, addr, 0);
+ }
+ /* send consumer update with value 0 */
+ bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
+ USTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_igu_clear_sb(bp,
+ bp->igu_base_sb + sb_idx);
+ }
+
+ /* default-status-blocks */
+ num_segs = CHIP_INT_MODE_IS_BC(bp) ?
+ IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
+
+ if (CHIP_MODE_IS_4_PORT(bp))
+ dsb_idx = BP_FUNC(bp);
+ else
- u8 entry = (BP_E1HVN(bp) + 1)*8;
++ dsb_idx = BP_VN(bp);
+
+ prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
+ IGU_BC_BASE_DSB_PROD + dsb_idx :
+ IGU_NORM_BASE_DSB_PROD + dsb_idx);
+
++ /*
++ * igu prods come in chunks of E1HVN_MAX (4) -
++ * does not matters what is the current chip mode
++ */
+ for (i = 0; i < (num_segs * E1HVN_MAX);
+ i += E1HVN_MAX) {
+ addr = IGU_REG_PROD_CONS_MEMORY +
+ (prod_offset + i)*4;
+ REG_WR(bp, addr, 0);
+ }
+ /* send consumer update with 0 */
+ if (CHIP_INT_MODE_IS_BC(bp)) {
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ USTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ CSTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ XSTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ TSTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ ATTENTION_ID, 0, IGU_INT_NOP, 1);
+ } else {
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ USTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ ATTENTION_ID, 0, IGU_INT_NOP, 1);
+ }
+ bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
+
+ /* !!! these should become driver const once
+ rf-tool supports split-68 const */
+ REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
+ REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
+ REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
+ REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
+ REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
+ REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
+ }
+ }
+
+ /* Reset PCIE errors for debug */
+ REG_WR(bp, 0x2114, 0xffffffff);
+ REG_WR(bp, 0x2120, 0xffffffff);
+
+ if (CHIP_IS_E1x(bp)) {
+ main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
+ main_mem_base = HC_REG_MAIN_MEMORY +
+ BP_PORT(bp) * (main_mem_size * 4);
+ main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
+ main_mem_width = 8;
+
+ val = REG_RD(bp, main_mem_prty_clr);
+ if (val)
+ DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
+ "block during "
+ "function init (0x%x)!\n", val);
+
+ /* Clear "false" parity errors in MSI-X table */
+ for (i = main_mem_base;
+ i < main_mem_base + main_mem_size * 4;
+ i += main_mem_width) {
+ bnx2x_read_dmae(bp, i, main_mem_width / 4);
+ bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
+ i, main_mem_width / 4);
+ }
+ /* Clear HC parity attention */
+ REG_RD(bp, main_mem_prty_clr);
+ }
+
+#ifdef BNX2X_STOP_ON_ERROR
+ /* Enable STORMs SP logging */
+ REG_WR8(bp, BAR_USTRORM_INTMEM +
+ USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
+ REG_WR8(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
+ REG_WR8(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
+ REG_WR8(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
+#endif
+
+ bnx2x_phy_probe(&bp->link_params);
+
+ return 0;
+}
+
+
+void bnx2x_free_mem(struct bnx2x *bp)
+{
+ /* fastpath */
+ bnx2x_free_fp_mem(bp);
+ /* end of fastpath */
+
+ BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
+ sizeof(struct host_sp_status_block));
+
+ BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
+ bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+
+ BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
+ sizeof(struct bnx2x_slowpath));
+
+ BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
+ bp->context.size);
+
+ bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
+
+ BNX2X_FREE(bp->ilt->lines);
+
+#ifdef BCM_CNIC
+ if (!CHIP_IS_E1x(bp))
+ BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e2));
+ else
+ BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e1x));
+
+ BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
+#endif
+
+ BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
+
+ BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
+ BCM_PAGE_SIZE * NUM_EQ_PAGES);
+}
+
+static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
+{
+ int num_groups;
+
+ /* number of eth_queues */
+ u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp);
+
+ /* Total number of FW statistics requests =
+ * 1 for port stats + 1 for PF stats + num_eth_queues */
+ bp->fw_stats_num = 2 + num_queue_stats;
+
+
+ /* Request is built from stats_query_header and an array of
+ * stats_query_cmd_group each of which contains
+ * STATS_QUERY_CMD_COUNT rules. The real number or requests is
+ * configured in the stats_query_header.
+ */
+ num_groups = (2 + num_queue_stats) / STATS_QUERY_CMD_COUNT +
+ (((2 + num_queue_stats) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
+
+ bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
+ num_groups * sizeof(struct stats_query_cmd_group);
+
+ /* Data for statistics requests + stats_conter
+ *
+ * stats_counter holds per-STORM counters that are incremented
+ * when STORM has finished with the current request.
+ */
+ bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
+ sizeof(struct per_pf_stats) +
+ sizeof(struct per_queue_stats) * num_queue_stats +
+ sizeof(struct stats_counter);
+
+ BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
+ bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+
+ /* Set shortcuts */
+ bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
+ bp->fw_stats_req_mapping = bp->fw_stats_mapping;
+
+ bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
+ ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
+
+ bp->fw_stats_data_mapping = bp->fw_stats_mapping +
+ bp->fw_stats_req_sz;
+ return 0;
+
+alloc_mem_err:
+ BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
+ bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+ return -ENOMEM;
+}
+
+
+int bnx2x_alloc_mem(struct bnx2x *bp)
+{
+#ifdef BCM_CNIC
+ if (!CHIP_IS_E1x(bp))
+ /* size = the status block + ramrod buffers */
+ BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e2));
+ else
+ BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e1x));
+
+ /* allocate searcher T2 table */
+ BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
+#endif
+
+
+ BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
+ sizeof(struct host_sp_status_block));
+
+ BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
+ sizeof(struct bnx2x_slowpath));
+
+ /* Allocated memory for FW statistics */
+ if (bnx2x_alloc_fw_stats_mem(bp))
+ goto alloc_mem_err;
+
+ bp->context.size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
+
+ BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
+ bp->context.size);
+
+ BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
+
+ if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
+ goto alloc_mem_err;
+
+ /* Slow path ring */
+ BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
+
+ /* EQ */
+ BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
+ BCM_PAGE_SIZE * NUM_EQ_PAGES);
+
+
+ /* fastpath */
+ /* need to be done at the end, since it's self adjusting to amount
+ * of memory available for RSS queues
+ */
+ if (bnx2x_alloc_fp_mem(bp))
+ goto alloc_mem_err;
+ return 0;
+
+alloc_mem_err:
+ bnx2x_free_mem(bp);
+ return -ENOMEM;
+}
+
+/*
+ * Init service functions
+ */
+
+int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
+ struct bnx2x_vlan_mac_obj *obj, bool set,
+ int mac_type, unsigned long *ramrod_flags)
+{
+ int rc;
+ struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+
+ memset(&ramrod_param, 0, sizeof(ramrod_param));
+
+ /* Fill general parameters */
+ ramrod_param.vlan_mac_obj = obj;
+ ramrod_param.ramrod_flags = *ramrod_flags;
+
+ /* Fill a user request section if needed */
+ if (!test_bit(RAMROD_CONT, ramrod_flags)) {
+ memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
+
+ __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
+
+ /* Set the command: ADD or DEL */
+ if (set)
+ ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
+ else
+ ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
+ }
+
+ rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+ if (rc < 0)
+ BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
+ return rc;
+}
+
+int bnx2x_del_all_macs(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *mac_obj,
+ int mac_type, bool wait_for_comp)
+{
+ int rc;
+ unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
+
+ /* Wait for completion of requested */
+ if (wait_for_comp)
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+
+ /* Set the mac type of addresses we want to clear */
+ __set_bit(mac_type, &vlan_mac_flags);
+
+ rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
+ if (rc < 0)
+ BNX2X_ERR("Failed to delete MACs: %d\n", rc);
+
+ return rc;
+}
+
+int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
+{
+ unsigned long ramrod_flags = 0;
+
+ DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
+
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ /* Eth MAC is set on RSS leading client (fp[0]) */
+ return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->fp->mac_obj, set,
+ BNX2X_ETH_MAC, &ramrod_flags);
+}
+
+int bnx2x_setup_leading(struct bnx2x *bp)
+{
+ return bnx2x_setup_queue(bp, &bp->fp[0], 1);
+}
+
+/**
+ * bnx2x_set_int_mode - configure interrupt mode
+ *
+ * @bp: driver handle
+ *
+ * In case of MSI-X it will also try to enable MSI-X.
+ */
+static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
+{
+ switch (int_mode) {
+ case INT_MODE_MSI:
+ bnx2x_enable_msi(bp);
+ /* falling through... */
+ case INT_MODE_INTx:
+ bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
+ DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
+ break;
+ default:
+ /* Set number of queues according to bp->multi_mode value */
+ bnx2x_set_num_queues(bp);
+
+ DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
+ bp->num_queues);
+
+ /* if we can't use MSI-X we only need one fp,
+ * so try to enable MSI-X with the requested number of fp's
+ * and fallback to MSI or legacy INTx with one fp
+ */
+ if (bnx2x_enable_msix(bp)) {
+ /* failed to enable MSI-X */
+ if (bp->multi_mode)
+ DP(NETIF_MSG_IFUP,
+ "Multi requested but failed to "
+ "enable MSI-X (%d), "
+ "set number of queues to %d\n",
+ bp->num_queues,
+ 1 + NON_ETH_CONTEXT_USE);
+ bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
+
+ /* Try to enable MSI */
+ if (!(bp->flags & DISABLE_MSI_FLAG))
+ bnx2x_enable_msi(bp);
+ }
+ break;
+ }
+}
+
+/* must be called prioir to any HW initializations */
+static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
+{
+ return L2_ILT_LINES(bp);
+}
+
+void bnx2x_ilt_set_info(struct bnx2x *bp)
+{
+ struct ilt_client_info *ilt_client;
+ struct bnx2x_ilt *ilt = BP_ILT(bp);
+ u16 line = 0;
+
+ ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
+ DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
+
+ /* CDU */
+ ilt_client = &ilt->clients[ILT_CLIENT_CDU];
+ ilt_client->client_num = ILT_CLIENT_CDU;
+ ilt_client->page_size = CDU_ILT_PAGE_SZ;
+ ilt_client->flags = ILT_CLIENT_SKIP_MEM;
+ ilt_client->start = line;
+ line += bnx2x_cid_ilt_lines(bp);
+#ifdef BCM_CNIC
+ line += CNIC_ILT_LINES;
+#endif
+ ilt_client->end = line - 1;
+
+ DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
+ "flags 0x%x, hw psz %d\n",
+ ilt_client->start,
+ ilt_client->end,
+ ilt_client->page_size,
+ ilt_client->flags,
+ ilog2(ilt_client->page_size >> 12));
+
+ /* QM */
+ if (QM_INIT(bp->qm_cid_count)) {
+ ilt_client = &ilt->clients[ILT_CLIENT_QM];
+ ilt_client->client_num = ILT_CLIENT_QM;
+ ilt_client->page_size = QM_ILT_PAGE_SZ;
+ ilt_client->flags = 0;
+ ilt_client->start = line;
+
+ /* 4 bytes for each cid */
+ line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
+ QM_ILT_PAGE_SZ);
+
+ ilt_client->end = line - 1;
+
+ DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
+ "flags 0x%x, hw psz %d\n",
+ ilt_client->start,
+ ilt_client->end,
+ ilt_client->page_size,
+ ilt_client->flags,
+ ilog2(ilt_client->page_size >> 12));
+
+ }
+ /* SRC */
+ ilt_client = &ilt->clients[ILT_CLIENT_SRC];
+#ifdef BCM_CNIC
+ ilt_client->client_num = ILT_CLIENT_SRC;
+ ilt_client->page_size = SRC_ILT_PAGE_SZ;
+ ilt_client->flags = 0;
+ ilt_client->start = line;
+ line += SRC_ILT_LINES;
+ ilt_client->end = line - 1;
+
+ DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
+ "flags 0x%x, hw psz %d\n",
+ ilt_client->start,
+ ilt_client->end,
+ ilt_client->page_size,
+ ilt_client->flags,
+ ilog2(ilt_client->page_size >> 12));
+
+#else
+ ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
+#endif
+
+ /* TM */
+ ilt_client = &ilt->clients[ILT_CLIENT_TM];
+#ifdef BCM_CNIC
+ ilt_client->client_num = ILT_CLIENT_TM;
+ ilt_client->page_size = TM_ILT_PAGE_SZ;
+ ilt_client->flags = 0;
+ ilt_client->start = line;
+ line += TM_ILT_LINES;
+ ilt_client->end = line - 1;
+
+ DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
+ "flags 0x%x, hw psz %d\n",
+ ilt_client->start,
+ ilt_client->end,
+ ilt_client->page_size,
+ ilt_client->flags,
+ ilog2(ilt_client->page_size >> 12));
+
+#else
+ ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
+#endif
+ BUG_ON(line > ILT_MAX_LINES);
+}
+
+/**
+ * bnx2x_pf_q_prep_init - prepare INIT transition parameters
+ *
+ * @bp: driver handle
+ * @fp: pointer to fastpath
+ * @init_params: pointer to parameters structure
+ *
+ * parameters configured:
+ * - HC configuration
+ * - Queue's CDU context
+ */
+static inline void bnx2x_pf_q_prep_init(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
+{
+
+ u8 cos;
+ /* FCoE Queue uses Default SB, thus has no HC capabilities */
+ if (!IS_FCOE_FP(fp)) {
+ __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
+ __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
+
+ /* If HC is supporterd, enable host coalescing in the transition
+ * to INIT state.
+ */
+ __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
+ __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags);
+
+ /* HC rate */
+ init_params->rx.hc_rate = bp->rx_ticks ?
+ (1000000 / bp->rx_ticks) : 0;
+ init_params->tx.hc_rate = bp->tx_ticks ?
+ (1000000 / bp->tx_ticks) : 0;
+
+ /* FW SB ID */
+ init_params->rx.fw_sb_id = init_params->tx.fw_sb_id =
+ fp->fw_sb_id;
+
+ /*
+ * CQ index among the SB indices: FCoE clients uses the default
+ * SB, therefore it's different.
+ */
+ init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
+ init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
+ }
+
+ /* set maximum number of COSs supported by this queue */
+ init_params->max_cos = fp->max_cos;
+
+ DP(BNX2X_MSG_SP, "fp: %d setting queue params max cos to: %d\n",
+ fp->index, init_params->max_cos);
+
+ /* set the context pointers queue object */
+ for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++)
+ init_params->cxts[cos] =
+ &bp->context.vcxt[fp->txdata[cos].cid].eth;
+}
+
+int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ struct bnx2x_queue_state_params *q_params,
+ struct bnx2x_queue_setup_tx_only_params *tx_only_params,
+ int tx_index, bool leading)
+{
+ memset(tx_only_params, 0, sizeof(*tx_only_params));
+
+ /* Set the command */
+ q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
+
+ /* Set tx-only QUEUE flags: don't zero statistics */
+ tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false);
+
+ /* choose the index of the cid to send the slow path on */
+ tx_only_params->cid_index = tx_index;
+
+ /* Set general TX_ONLY_SETUP parameters */
+ bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index);
+
+ /* Set Tx TX_ONLY_SETUP parameters */
+ bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
+
+ DP(BNX2X_MSG_SP, "preparing to send tx-only ramrod for connection:"
+ "cos %d, primary cid %d, cid %d, "
+ "client id %d, sp-client id %d, flags %lx\n",
+ tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
+ q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
+ tx_only_params->gen_params.spcl_id, tx_only_params->flags);
+
+ /* send the ramrod */
+ return bnx2x_queue_state_change(bp, q_params);
+}
+
+
+/**
+ * bnx2x_setup_queue - setup queue
+ *
+ * @bp: driver handle
+ * @fp: pointer to fastpath
+ * @leading: is leading
+ *
+ * This function performs 2 steps in a Queue state machine
+ * actually: 1) RESET->INIT 2) INIT->SETUP
+ */
+
+int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ bool leading)
+{
+ struct bnx2x_queue_state_params q_params = {0};
+ struct bnx2x_queue_setup_params *setup_params =
+ &q_params.params.setup;
+ struct bnx2x_queue_setup_tx_only_params *tx_only_params =
+ &q_params.params.tx_only;
+ int rc;
+ u8 tx_index;
+
+ DP(BNX2X_MSG_SP, "setting up queue %d\n", fp->index);
+
+ /* reset IGU state skip FCoE L2 queue */
+ if (!IS_FCOE_FP(fp))
+ bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
+ IGU_INT_ENABLE, 0);
+
+ q_params.q_obj = &fp->q_obj;
+ /* We want to wait for completion in this context */
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+
+ /* Prepare the INIT parameters */
+ bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init);
+
+ /* Set the command */
+ q_params.cmd = BNX2X_Q_CMD_INIT;
+
+ /* Change the state to INIT */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Queue(%d) INIT failed\n", fp->index);
+ return rc;
+ }
+
+ DP(BNX2X_MSG_SP, "init complete\n");
+
+
+ /* Now move the Queue to the SETUP state... */
+ memset(setup_params, 0, sizeof(*setup_params));
+
+ /* Set QUEUE flags */
+ setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
+
+ /* Set general SETUP parameters */
+ bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params,
+ FIRST_TX_COS_INDEX);
+
+ bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params,
+ &setup_params->rxq_params);
+
+ bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params,
+ FIRST_TX_COS_INDEX);
+
+ /* Set the command */
+ q_params.cmd = BNX2X_Q_CMD_SETUP;
+
+ /* Change the state to SETUP */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index);
+ return rc;
+ }
+
+ /* loop through the relevant tx-only indices */
+ for (tx_index = FIRST_TX_ONLY_COS_INDEX;
+ tx_index < fp->max_cos;
+ tx_index++) {
+
+ /* prepare and send tx-only ramrod*/
+ rc = bnx2x_setup_tx_only(bp, fp, &q_params,
+ tx_only_params, tx_index, leading);
+ if (rc) {
+ BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n",
+ fp->index, tx_index);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+static int bnx2x_stop_queue(struct bnx2x *bp, int index)
+{
+ struct bnx2x_fastpath *fp = &bp->fp[index];
+ struct bnx2x_fp_txdata *txdata;
+ struct bnx2x_queue_state_params q_params = {0};
+ int rc, tx_index;
+
+ DP(BNX2X_MSG_SP, "stopping queue %d cid %d\n", index, fp->cid);
+
+ q_params.q_obj = &fp->q_obj;
+ /* We want to wait for completion in this context */
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+
+
+ /* close tx-only connections */
+ for (tx_index = FIRST_TX_ONLY_COS_INDEX;
+ tx_index < fp->max_cos;
+ tx_index++){
+
+ /* ascertain this is a normal queue*/
+ txdata = &fp->txdata[tx_index];
+
+ DP(BNX2X_MSG_SP, "stopping tx-only queue %d\n",
+ txdata->txq_index);
+
+ /* send halt terminate on tx-only connection */
+ q_params.cmd = BNX2X_Q_CMD_TERMINATE;
+ memset(&q_params.params.terminate, 0,
+ sizeof(q_params.params.terminate));
+ q_params.params.terminate.cid_index = tx_index;
+
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc)
+ return rc;
+
+ /* send halt terminate on tx-only connection */
+ q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
+ memset(&q_params.params.cfc_del, 0,
+ sizeof(q_params.params.cfc_del));
+ q_params.params.cfc_del.cid_index = tx_index;
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc)
+ return rc;
+ }
+ /* Stop the primary connection: */
+ /* ...halt the connection */
+ q_params.cmd = BNX2X_Q_CMD_HALT;
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc)
+ return rc;
+
+ /* ...terminate the connection */
+ q_params.cmd = BNX2X_Q_CMD_TERMINATE;
+ memset(&q_params.params.terminate, 0,
+ sizeof(q_params.params.terminate));
+ q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc)
+ return rc;
+ /* ...delete cfc entry */
+ q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
+ memset(&q_params.params.cfc_del, 0,
+ sizeof(q_params.params.cfc_del));
+ q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
+ return bnx2x_queue_state_change(bp, &q_params);
+}
+
+
+static void bnx2x_reset_func(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ int func = BP_FUNC(bp);
+ int i;
+
+ /* Disable the function in the FW */
+ REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
+ REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
+ REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
+
+ /* FP SBs */
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ REG_WR8(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
+ SB_DISABLED);
+ }
+
+#ifdef BCM_CNIC
+ /* CNIC SB */
+ REG_WR8(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(bnx2x_cnic_fw_sb_id(bp)),
+ SB_DISABLED);
+#endif
+ /* SP SB */
+ REG_WR8(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
+ SB_DISABLED);
+
+ for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
+ REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
+ 0);
+
+ /* Configure IGU */
+ if (bp->common.int_block == INT_BLOCK_HC) {
+ REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
+ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
+ } else {
+ REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
+ REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
+ }
+
+#ifdef BCM_CNIC
+ /* Disable Timer scan */
+ REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
+ /*
+ * Wait for at least 10ms and up to 2 second for the timers scan to
+ * complete
+ */
+ for (i = 0; i < 200; i++) {
+ msleep(10);
+ if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
+ break;
+ }
+#endif
+ /* Clear ILT */
+ bnx2x_clear_func_ilt(bp, func);
+
+ /* Timers workaround bug for E2: if this is vnic-3,
+ * we need to set the entire ilt range for this timers.
+ */
+ if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
+ struct ilt_client_info ilt_cli;
+ /* use dummy TM client */
+ memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
+ ilt_cli.start = 0;
+ ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
+ ilt_cli.client_num = ILT_CLIENT_TM;
+
+ bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
+ }
+
+ /* this assumes that reset_port() called before reset_func()*/
+ if (!CHIP_IS_E1x(bp))
+ bnx2x_pf_disable(bp);
+
+ bp->dmae_ready = 0;
+}
+
+static void bnx2x_reset_port(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ u32 val;
+
+ /* Reset physical Link */
+ bnx2x__link_reset(bp);
+
+ REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
+
+ /* Do not rcv packets to BRB */
+ REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
+ /* Do not direct rcv packets that are not for MCP to the BRB */
+ REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
+ NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
+
+ /* Configure AEU */
+ REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
+
+ msleep(100);
+ /* Check for BRB port occupancy */
+ val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
+ if (val)
+ DP(NETIF_MSG_IFDOWN,
+ "BRB1 is not empty %d blocks are occupied\n", val);
+
+ /* TODO: Close Doorbell port? */
+}
+
+static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
+{
+ struct bnx2x_func_state_params func_params = {0};
+
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_HW_RESET;
+
+ func_params.params.hw_init.load_phase = load_code;
+
+ return bnx2x_func_state_change(bp, &func_params);
+}
+
+static inline int bnx2x_func_stop(struct bnx2x *bp)
+{
+ struct bnx2x_func_state_params func_params = {0};
+ int rc;
+
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_STOP;
+
+ /*
+ * Try to stop the function the 'good way'. If fails (in case
+ * of a parity error during bnx2x_chip_cleanup()) and we are
+ * not in a debug mode, perform a state transaction in order to
+ * enable further HW_RESET transaction.
+ */
+ rc = bnx2x_func_state_change(bp, &func_params);
+ if (rc) {
+#ifdef BNX2X_STOP_ON_ERROR
+ return rc;
+#else
+ BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry "
+ "transaction\n");
+ __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
+ return bnx2x_func_state_change(bp, &func_params);
+#endif
+ }
+
+ return 0;
+}
+
+/**
+ * bnx2x_send_unload_req - request unload mode from the MCP.
+ *
+ * @bp: driver handle
+ * @unload_mode: requested function's unload mode
+ *
+ * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
+ */
+u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
+{
+ u32 reset_code = 0;
+ int port = BP_PORT(bp);
+
+ /* Select the UNLOAD request mode */
+ if (unload_mode == UNLOAD_NORMAL)
+ reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+
+ else if (bp->flags & NO_WOL_FLAG)
+ reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
+
+ else if (bp->wol) {
+ u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ u8 *mac_addr = bp->dev->dev_addr;
+ u32 val;
+ /* The mac address is written to entries 1-4 to
+ preserve entry 0 which is used by the PMF */
- /* Check if it is the UNDI driver
++ u8 entry = (BP_VN(bp) + 1)*8;
+
+ val = (mac_addr[0] << 8) | mac_addr[1];
+ EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
+
+ val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
+ (mac_addr[4] << 8) | mac_addr[5];
+ EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
+
+ reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
+
+ } else
+ reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+
+ /* Send the request to the MCP */
+ if (!BP_NOMCP(bp))
+ reset_code = bnx2x_fw_command(bp, reset_code, 0);
+ else {
+ int path = BP_PATH(bp);
+
+ DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
+ "%d, %d, %d\n",
+ path, load_count[path][0], load_count[path][1],
+ load_count[path][2]);
+ load_count[path][0]--;
+ load_count[path][1 + port]--;
+ DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
+ "%d, %d, %d\n",
+ path, load_count[path][0], load_count[path][1],
+ load_count[path][2]);
+ if (load_count[path][0] == 0)
+ reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
+ else if (load_count[path][1 + port] == 0)
+ reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
+ else
+ reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
+ }
+
+ return reset_code;
+}
+
+/**
+ * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
+ *
+ * @bp: driver handle
+ */
+void bnx2x_send_unload_done(struct bnx2x *bp)
+{
+ /* Report UNLOAD_DONE to MCP */
+ if (!BP_NOMCP(bp))
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+}
+
+static inline int bnx2x_func_wait_started(struct bnx2x *bp)
+{
+ int tout = 50;
+ int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
+
+ if (!bp->port.pmf)
+ return 0;
+
+ /*
+ * (assumption: No Attention from MCP at this stage)
+ * PMF probably in the middle of TXdisable/enable transaction
+ * 1. Sync IRS for default SB
+ * 2. Sync SP queue - this guarantes us that attention handling started
+ * 3. Wait, that TXdisable/enable transaction completes
+ *
+ * 1+2 guranty that if DCBx attention was scheduled it already changed
+ * pending bit of transaction from STARTED-->TX_STOPPED, if we alredy
+ * received complettion for the transaction the state is TX_STOPPED.
+ * State will return to STARTED after completion of TX_STOPPED-->STARTED
+ * transaction.
+ */
+
+ /* make sure default SB ISR is done */
+ if (msix)
+ synchronize_irq(bp->msix_table[0].vector);
+ else
+ synchronize_irq(bp->pdev->irq);
+
+ flush_workqueue(bnx2x_wq);
+
+ while (bnx2x_func_get_state(bp, &bp->func_obj) !=
+ BNX2X_F_STATE_STARTED && tout--)
+ msleep(20);
+
+ if (bnx2x_func_get_state(bp, &bp->func_obj) !=
+ BNX2X_F_STATE_STARTED) {
+#ifdef BNX2X_STOP_ON_ERROR
+ return -EBUSY;
+#else
+ /*
+ * Failed to complete the transaction in a "good way"
+ * Force both transactions with CLR bit
+ */
+ struct bnx2x_func_state_params func_params = {0};
+
+ DP(BNX2X_MSG_SP, "Hmmm... unexpected function state! "
+ "Forcing STARTED-->TX_ST0PPED-->STARTED\n");
+
+ func_params.f_obj = &bp->func_obj;
+ __set_bit(RAMROD_DRV_CLR_ONLY,
+ &func_params.ramrod_flags);
+
+ /* STARTED-->TX_ST0PPED */
+ func_params.cmd = BNX2X_F_CMD_TX_STOP;
+ bnx2x_func_state_change(bp, &func_params);
+
+ /* TX_ST0PPED-->STARTED */
+ func_params.cmd = BNX2X_F_CMD_TX_START;
+ return bnx2x_func_state_change(bp, &func_params);
+#endif
+ }
+
+ return 0;
+}
+
+void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
+{
+ int port = BP_PORT(bp);
+ int i, rc = 0;
+ u8 cos;
+ struct bnx2x_mcast_ramrod_params rparam = {0};
+ u32 reset_code;
+
+ /* Wait until tx fastpath tasks complete */
+ for_each_tx_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ for_each_cos_in_tx_queue(fp, cos)
+ rc = bnx2x_clean_tx_queue(bp, &fp->txdata[cos]);
+#ifdef BNX2X_STOP_ON_ERROR
+ if (rc)
+ return;
+#endif
+ }
+
+ /* Give HW time to discard old tx messages */
+ usleep_range(1000, 1000);
+
+ /* Clean all ETH MACs */
+ rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_ETH_MAC, false);
+ if (rc < 0)
+ BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
+
+ /* Clean up UC list */
+ rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC,
+ true);
+ if (rc < 0)
+ BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: "
+ "%d\n", rc);
+
+ /* Disable LLH */
+ if (!CHIP_IS_E1(bp))
+ REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
+
+ /* Set "drop all" (stop Rx).
+ * We need to take a netif_addr_lock() here in order to prevent
+ * a race between the completion code and this code.
+ */
+ netif_addr_lock_bh(bp->dev);
+ /* Schedule the rx_mode command */
+ if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
+ set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
+ else
+ bnx2x_set_storm_rx_mode(bp);
+
+ /* Cleanup multicast configuration */
+ rparam.mcast_obj = &bp->mcast_obj;
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
+ if (rc < 0)
+ BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc);
+
+ netif_addr_unlock_bh(bp->dev);
+
+
+
+ /*
+ * Send the UNLOAD_REQUEST to the MCP. This will return if
+ * this function should perform FUNC, PORT or COMMON HW
+ * reset.
+ */
+ reset_code = bnx2x_send_unload_req(bp, unload_mode);
+
+ /*
+ * (assumption: No Attention from MCP at this stage)
+ * PMF probably in the middle of TXdisable/enable transaction
+ */
+ rc = bnx2x_func_wait_started(bp);
+ if (rc) {
+ BNX2X_ERR("bnx2x_func_wait_started failed\n");
+#ifdef BNX2X_STOP_ON_ERROR
+ return;
+#endif
+ }
+
+ /* Close multi and leading connections
+ * Completions for ramrods are collected in a synchronous way
+ */
+ for_each_queue(bp, i)
+ if (bnx2x_stop_queue(bp, i))
+#ifdef BNX2X_STOP_ON_ERROR
+ return;
+#else
+ goto unload_error;
+#endif
+ /* If SP settings didn't get completed so far - something
+ * very wrong has happen.
+ */
+ if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
+ BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n");
+
+#ifndef BNX2X_STOP_ON_ERROR
+unload_error:
+#endif
+ rc = bnx2x_func_stop(bp);
+ if (rc) {
+ BNX2X_ERR("Function stop failed!\n");
+#ifdef BNX2X_STOP_ON_ERROR
+ return;
+#endif
+ }
+
+ /* Disable HW interrupts, NAPI */
+ bnx2x_netif_stop(bp, 1);
+
+ /* Release IRQs */
+ bnx2x_free_irq(bp);
+
+ /* Reset the chip */
+ rc = bnx2x_reset_hw(bp, reset_code);
+ if (rc)
+ BNX2X_ERR("HW_RESET failed\n");
+
+
+ /* Report UNLOAD_DONE to MCP */
+ bnx2x_send_unload_done(bp);
+}
+
+void bnx2x_disable_close_the_gate(struct bnx2x *bp)
+{
+ u32 val;
+
+ DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
+
+ if (CHIP_IS_E1(bp)) {
+ int port = BP_PORT(bp);
+ u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+ MISC_REG_AEU_MASK_ATTN_FUNC_0;
+
+ val = REG_RD(bp, addr);
+ val &= ~(0x300);
+ REG_WR(bp, addr, val);
+ } else {
+ val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
+ val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
+ MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
+ }
+}
+
+/* Close gates #2, #3 and #4: */
+static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
+{
+ u32 val;
+
+ /* Gates #2 and #4a are closed/opened for "not E1" only */
+ if (!CHIP_IS_E1(bp)) {
+ /* #4 */
+ REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
+ /* #2 */
+ REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
+ }
+
+ /* #3 */
+ if (CHIP_IS_E1x(bp)) {
+ /* Prevent interrupts from HC on both ports */
+ val = REG_RD(bp, HC_REG_CONFIG_1);
+ REG_WR(bp, HC_REG_CONFIG_1,
+ (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
+ (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
+
+ val = REG_RD(bp, HC_REG_CONFIG_0);
+ REG_WR(bp, HC_REG_CONFIG_0,
+ (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
+ (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
+ } else {
+ /* Prevent incomming interrupts in IGU */
+ val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
+
+ REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
+ (!close) ?
+ (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
+ (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
+ }
+
+ DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
+ close ? "closing" : "opening");
+ mmiowb();
+}
+
+#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
+
+static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
+{
+ /* Do some magic... */
+ u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
+ *magic_val = val & SHARED_MF_CLP_MAGIC;
+ MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
+}
+
+/**
+ * bnx2x_clp_reset_done - restore the value of the `magic' bit.
+ *
+ * @bp: driver handle
+ * @magic_val: old value of the `magic' bit.
+ */
+static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
+{
+ /* Restore the `magic' bit value... */
+ u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
+ MF_CFG_WR(bp, shared_mf_config.clp_mb,
+ (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
+}
+
+/**
+ * bnx2x_reset_mcp_prep - prepare for MCP reset.
+ *
+ * @bp: driver handle
+ * @magic_val: old value of 'magic' bit.
+ *
+ * Takes care of CLP configurations.
+ */
+static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
+{
+ u32 shmem;
+ u32 validity_offset;
+
+ DP(NETIF_MSG_HW, "Starting\n");
+
+ /* Set `magic' bit in order to save MF config */
+ if (!CHIP_IS_E1(bp))
+ bnx2x_clp_reset_prep(bp, magic_val);
+
+ /* Get shmem offset */
+ shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+ validity_offset = offsetof(struct shmem_region, validity_map[0]);
+
+ /* Clear validity map flags */
+ if (shmem > 0)
+ REG_WR(bp, shmem + validity_offset, 0);
+}
+
+#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
+#define MCP_ONE_TIMEOUT 100 /* 100 ms */
+
+/**
+ * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT
+ *
+ * @bp: driver handle
+ */
+static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
+{
+ /* special handling for emulation and FPGA,
+ wait 10 times longer */
+ if (CHIP_REV_IS_SLOW(bp))
+ msleep(MCP_ONE_TIMEOUT*10);
+ else
+ msleep(MCP_ONE_TIMEOUT);
+}
+
+/*
+ * initializes bp->common.shmem_base and waits for validity signature to appear
+ */
+static int bnx2x_init_shmem(struct bnx2x *bp)
+{
+ int cnt = 0;
+ u32 val = 0;
+
+ do {
+ bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+ if (bp->common.shmem_base) {
+ val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
+ if (val & SHR_MEM_VALIDITY_MB)
+ return 0;
+ }
+
+ bnx2x_mcp_wait_one(bp);
+
+ } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
+
+ BNX2X_ERR("BAD MCP validity signature\n");
+
+ return -ENODEV;
+}
+
+static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
+{
+ int rc = bnx2x_init_shmem(bp);
+
+ /* Restore the `magic' bit value */
+ if (!CHIP_IS_E1(bp))
+ bnx2x_clp_reset_done(bp, magic_val);
+
+ return rc;
+}
+
+static void bnx2x_pxp_prep(struct bnx2x *bp)
+{
+ if (!CHIP_IS_E1(bp)) {
+ REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
+ REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
+ mmiowb();
+ }
+}
+
+/*
+ * Reset the whole chip except for:
+ * - PCIE core
+ * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
+ * one reset bit)
+ * - IGU
+ * - MISC (including AEU)
+ * - GRC
+ * - RBCN, RBCP
+ */
+static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
+{
+ u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
+ u32 global_bits2, stay_reset2;
+
+ /*
+ * Bits that have to be set in reset_mask2 if we want to reset 'global'
+ * (per chip) blocks.
+ */
+ global_bits2 =
+ MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
+ MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
+
+ /* Don't reset the following blocks */
+ not_reset_mask1 =
+ MISC_REGISTERS_RESET_REG_1_RST_HC |
+ MISC_REGISTERS_RESET_REG_1_RST_PXPV |
+ MISC_REGISTERS_RESET_REG_1_RST_PXP;
+
+ not_reset_mask2 =
+ MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
+ MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_RBCN |
+ MISC_REGISTERS_RESET_REG_2_RST_GRC |
+ MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
+ MISC_REGISTERS_RESET_REG_2_RST_ATC |
+ MISC_REGISTERS_RESET_REG_2_PGLC;
+
+ /*
+ * Keep the following blocks in reset:
+ * - all xxMACs are handled by the bnx2x_link code.
+ */
+ stay_reset2 =
+ MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
+ MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
+ MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
+ MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
+ MISC_REGISTERS_RESET_REG_2_UMAC0 |
+ MISC_REGISTERS_RESET_REG_2_UMAC1 |
+ MISC_REGISTERS_RESET_REG_2_XMAC |
+ MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
+
+ /* Full reset masks according to the chip */
+ reset_mask1 = 0xffffffff;
+
+ if (CHIP_IS_E1(bp))
+ reset_mask2 = 0xffff;
+ else if (CHIP_IS_E1H(bp))
+ reset_mask2 = 0x1ffff;
+ else if (CHIP_IS_E2(bp))
+ reset_mask2 = 0xfffff;
+ else /* CHIP_IS_E3 */
+ reset_mask2 = 0x3ffffff;
+
+ /* Don't reset global blocks unless we need to */
+ if (!global)
+ reset_mask2 &= ~global_bits2;
+
+ /*
+ * In case of attention in the QM, we need to reset PXP
+ * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
+ * because otherwise QM reset would release 'close the gates' shortly
+ * before resetting the PXP, then the PSWRQ would send a write
+ * request to PGLUE. Then when PXP is reset, PGLUE would try to
+ * read the payload data from PSWWR, but PSWWR would not
+ * respond. The write queue in PGLUE would stuck, dmae commands
+ * would not return. Therefore it's important to reset the second
+ * reset register (containing the
+ * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
+ * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
+ * bit).
+ */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ reset_mask2 & (~not_reset_mask2));
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+ reset_mask1 & (~not_reset_mask1));
+
+ barrier();
+ mmiowb();
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+ reset_mask2 & (~stay_reset2));
+
+ barrier();
+ mmiowb();
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
+ mmiowb();
+}
+
+/**
+ * bnx2x_er_poll_igu_vq - poll for pending writes bit.
+ * It should get cleared in no more than 1s.
+ *
+ * @bp: driver handle
+ *
+ * It should get cleared in no more than 1s. Returns 0 if
+ * pending writes bit gets cleared.
+ */
+static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
+{
+ u32 cnt = 1000;
+ u32 pend_bits = 0;
+
+ do {
+ pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS);
+
+ if (pend_bits == 0)
+ break;
+
+ usleep_range(1000, 1000);
+ } while (cnt-- > 0);
+
+ if (cnt <= 0) {
+ BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n",
+ pend_bits);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int bnx2x_process_kill(struct bnx2x *bp, bool global)
+{
+ int cnt = 1000;
+ u32 val = 0;
+ u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
+
+
+ /* Empty the Tetris buffer, wait for 1s */
+ do {
+ sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
+ blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
+ port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
+ port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
+ pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
+ if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
+ ((port_is_idle_0 & 0x1) == 0x1) &&
+ ((port_is_idle_1 & 0x1) == 0x1) &&
+ (pgl_exp_rom2 == 0xffffffff))
+ break;
+ usleep_range(1000, 1000);
+ } while (cnt-- > 0);
+
+ if (cnt <= 0) {
+ DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
+ " are still"
+ " outstanding read requests after 1s!\n");
+ DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
+ " port_is_idle_0=0x%08x,"
+ " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
+ sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
+ pgl_exp_rom2);
+ return -EAGAIN;
+ }
+
+ barrier();
+
+ /* Close gates #2, #3 and #4 */
+ bnx2x_set_234_gates(bp, true);
+
+ /* Poll for IGU VQs for 57712 and newer chips */
+ if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
+ return -EAGAIN;
+
+
+ /* TBD: Indicate that "process kill" is in progress to MCP */
+
+ /* Clear "unprepared" bit */
+ REG_WR(bp, MISC_REG_UNPREPARED, 0);
+ barrier();
+
+ /* Make sure all is written to the chip before the reset */
+ mmiowb();
+
+ /* Wait for 1ms to empty GLUE and PCI-E core queues,
+ * PSWHST, GRC and PSWRD Tetris buffer.
+ */
+ usleep_range(1000, 1000);
+
+ /* Prepare to chip reset: */
+ /* MCP */
+ if (global)
+ bnx2x_reset_mcp_prep(bp, &val);
+
+ /* PXP */
+ bnx2x_pxp_prep(bp);
+ barrier();
+
+ /* reset the chip */
+ bnx2x_process_kill_chip_reset(bp, global);
+ barrier();
+
+ /* Recover after reset: */
+ /* MCP */
+ if (global && bnx2x_reset_mcp_comp(bp, val))
+ return -EAGAIN;
+
+ /* TBD: Add resetting the NO_MCP mode DB here */
+
+ /* PXP */
+ bnx2x_pxp_prep(bp);
+
+ /* Open the gates #2, #3 and #4 */
+ bnx2x_set_234_gates(bp, false);
+
+ /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
+ * reset state, re-enable attentions. */
+
+ return 0;
+}
+
+int bnx2x_leader_reset(struct bnx2x *bp)
+{
+ int rc = 0;
+ bool global = bnx2x_reset_is_global(bp);
+
+ /* Try to recover after the failure */
+ if (bnx2x_process_kill(bp, global)) {
+ netdev_err(bp->dev, "Something bad had happen on engine %d! "
+ "Aii!\n", BP_PATH(bp));
+ rc = -EAGAIN;
+ goto exit_leader_reset;
+ }
+
+ /*
+ * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver
+ * state.
+ */
+ bnx2x_set_reset_done(bp);
+ if (global)
+ bnx2x_clear_reset_global(bp);
+
+exit_leader_reset:
+ bp->is_leader = 0;
+ bnx2x_release_leader_lock(bp);
+ smp_mb();
+ return rc;
+}
+
+static inline void bnx2x_recovery_failed(struct bnx2x *bp)
+{
+ netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
+
+ /* Disconnect this device */
+ netif_device_detach(bp->dev);
+
+ /*
+ * Block ifup for all function on this engine until "process kill"
+ * or power cycle.
+ */
+ bnx2x_set_reset_in_progress(bp);
+
+ /* Shut down the power */
+ bnx2x_set_power_state(bp, PCI_D3hot);
+
+ bp->recovery_state = BNX2X_RECOVERY_FAILED;
+
+ smp_mb();
+}
+
+/*
+ * Assumption: runs under rtnl lock. This together with the fact
+ * that it's called only from bnx2x_sp_rtnl() ensure that it
+ * will never be called when netif_running(bp->dev) is false.
+ */
+static void bnx2x_parity_recover(struct bnx2x *bp)
+{
+ bool global = false;
+
+ DP(NETIF_MSG_HW, "Handling parity\n");
+ while (1) {
+ switch (bp->recovery_state) {
+ case BNX2X_RECOVERY_INIT:
+ DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
+ bnx2x_chk_parity_attn(bp, &global, false);
+
+ /* Try to get a LEADER_LOCK HW lock */
+ if (bnx2x_trylock_leader_lock(bp)) {
+ bnx2x_set_reset_in_progress(bp);
+ /*
+ * Check if there is a global attention and if
+ * there was a global attention, set the global
+ * reset bit.
+ */
+
+ if (global)
+ bnx2x_set_reset_global(bp);
+
+ bp->is_leader = 1;
+ }
+
+ /* Stop the driver */
+ /* If interface has been removed - break */
+ if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
+ return;
+
+ bp->recovery_state = BNX2X_RECOVERY_WAIT;
+
+ /*
+ * Reset MCP command sequence number and MCP mail box
+ * sequence as we are going to reset the MCP.
+ */
+ if (global) {
+ bp->fw_seq = 0;
+ bp->fw_drv_pulse_wr_seq = 0;
+ }
+
+ /* Ensure "is_leader", MCP command sequence and
+ * "recovery_state" update values are seen on other
+ * CPUs.
+ */
+ smp_mb();
+ break;
+
+ case BNX2X_RECOVERY_WAIT:
+ DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
+ if (bp->is_leader) {
+ int other_engine = BP_PATH(bp) ? 0 : 1;
+ u32 other_load_counter =
+ bnx2x_get_load_cnt(bp, other_engine);
+ u32 load_counter =
+ bnx2x_get_load_cnt(bp, BP_PATH(bp));
+ global = bnx2x_reset_is_global(bp);
+
+ /*
+ * In case of a parity in a global block, let
+ * the first leader that performs a
+ * leader_reset() reset the global blocks in
+ * order to clear global attentions. Otherwise
+ * the the gates will remain closed for that
+ * engine.
+ */
+ if (load_counter ||
+ (global && other_load_counter)) {
+ /* Wait until all other functions get
+ * down.
+ */
+ schedule_delayed_work(&bp->sp_rtnl_task,
+ HZ/10);
+ return;
+ } else {
+ /* If all other functions got down -
+ * try to bring the chip back to
+ * normal. In any case it's an exit
+ * point for a leader.
+ */
+ if (bnx2x_leader_reset(bp)) {
+ bnx2x_recovery_failed(bp);
+ return;
+ }
+
+ /* If we are here, means that the
+ * leader has succeeded and doesn't
+ * want to be a leader any more. Try
+ * to continue as a none-leader.
+ */
+ break;
+ }
+ } else { /* non-leader */
+ if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
+ /* Try to get a LEADER_LOCK HW lock as
+ * long as a former leader may have
+ * been unloaded by the user or
+ * released a leadership by another
+ * reason.
+ */
+ if (bnx2x_trylock_leader_lock(bp)) {
+ /* I'm a leader now! Restart a
+ * switch case.
+ */
+ bp->is_leader = 1;
+ break;
+ }
+
+ schedule_delayed_work(&bp->sp_rtnl_task,
+ HZ/10);
+ return;
+
+ } else {
+ /*
+ * If there was a global attention, wait
+ * for it to be cleared.
+ */
+ if (bnx2x_reset_is_global(bp)) {
+ schedule_delayed_work(
+ &bp->sp_rtnl_task,
+ HZ/10);
+ return;
+ }
+
+ if (bnx2x_nic_load(bp, LOAD_NORMAL))
+ bnx2x_recovery_failed(bp);
+ else {
+ bp->recovery_state =
+ BNX2X_RECOVERY_DONE;
+ smp_mb();
+ }
+
+ return;
+ }
+ }
+ default:
+ return;
+ }
+ }
+}
+
+/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
+ * scheduled on a general queue in order to prevent a dead lock.
+ */
+static void bnx2x_sp_rtnl_task(struct work_struct *work)
+{
+ struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
+
+ rtnl_lock();
+
+ if (!netif_running(bp->dev))
+ goto sp_rtnl_exit;
+
+ /* if stop on error is defined no recovery flows should be executed */
+#ifdef BNX2X_STOP_ON_ERROR
+ BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined "
+ "so reset not done to allow debug dump,\n"
+ "you will need to reboot when done\n");
+ goto sp_rtnl_not_reset;
+#endif
+
+ if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
+ /*
+ * Clear all pending SP commands as we are going to reset the
+ * function anyway.
+ */
+ bp->sp_rtnl_state = 0;
+ smp_mb();
+
+ bnx2x_parity_recover(bp);
+
+ goto sp_rtnl_exit;
+ }
+
+ if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
+ /*
+ * Clear all pending SP commands as we are going to reset the
+ * function anyway.
+ */
+ bp->sp_rtnl_state = 0;
+ smp_mb();
+
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ bnx2x_nic_load(bp, LOAD_NORMAL);
+
+ goto sp_rtnl_exit;
+ }
+#ifdef BNX2X_STOP_ON_ERROR
+sp_rtnl_not_reset:
+#endif
+ if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
+ bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
+
+sp_rtnl_exit:
+ rtnl_unlock();
+}
+
+/* end of nic load/unload */
+
+static void bnx2x_period_task(struct work_struct *work)
+{
+ struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
+
+ if (!netif_running(bp->dev))
+ goto period_task_exit;
+
+ if (CHIP_REV_IS_SLOW(bp)) {
+ BNX2X_ERR("period task called on emulation, ignoring\n");
+ goto period_task_exit;
+ }
+
+ bnx2x_acquire_phy_lock(bp);
+ /*
+ * The barrier is needed to ensure the ordering between the writing to
+ * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and
+ * the reading here.
+ */
+ smp_mb();
+ if (bp->port.pmf) {
+ bnx2x_period_func(&bp->link_params, &bp->link_vars);
+
+ /* Re-queue task in 1 sec */
+ queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ);
+ }
+
+ bnx2x_release_phy_lock(bp);
+period_task_exit:
+ return;
+}
+
+/*
+ * Init service functions
+ */
+
+static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
+{
+ u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
+ u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
+ return base + (BP_ABS_FUNC(bp)) * stride;
+}
+
+static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
+{
+ u32 reg = bnx2x_get_pretend_reg(bp);
+
+ /* Flush all outstanding writes */
+ mmiowb();
+
+ /* Pretend to be function 0 */
+ REG_WR(bp, reg, 0);
+ REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
+
+ /* From now we are in the "like-E1" mode */
+ bnx2x_int_disable(bp);
+
+ /* Flush all outstanding writes */
+ mmiowb();
+
+ /* Restore the original function */
+ REG_WR(bp, reg, BP_ABS_FUNC(bp));
+ REG_RD(bp, reg);
+}
+
+static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
+{
+ if (CHIP_IS_E1(bp))
+ bnx2x_int_disable(bp);
+ else
+ bnx2x_undi_int_disable_e1h(bp);
+}
+
+static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
+{
+ u32 val;
+
+ /* Check if there is any driver already loaded */
+ val = REG_RD(bp, MISC_REG_UNPREPARED);
+ if (val == 0x1) {
- bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
++
++ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
++ /*
++ * Check if it is the UNDI driver
+ * UNDI driver initializes CID offset for normal bell to 0x7
+ */
- /* now it's safe to release the lock */
- bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
-
+ val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
+ if (val == 0x7) {
+ u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+ /* save our pf_num */
+ int orig_pf_num = bp->pf_num;
+ int port;
+ u32 swap_en, swap_val, value;
+
+ /* clear the UNDI indication */
+ REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
+
+ BNX2X_DEV_INFO("UNDI is active! reset device\n");
+
+ /* try unload UNDI on port 0 */
+ bp->pf_num = 0;
+ bp->fw_seq =
+ (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
+ reset_code = bnx2x_fw_command(bp, reset_code, 0);
+
+ /* if UNDI is loaded on the other port */
+ if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
+
+ /* send "DONE" for previous unload */
+ bnx2x_fw_command(bp,
+ DRV_MSG_CODE_UNLOAD_DONE, 0);
+
+ /* unload UNDI on port 1 */
+ bp->pf_num = 1;
+ bp->fw_seq =
+ (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
+ reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+
+ bnx2x_fw_command(bp, reset_code, 0);
+ }
+
- } else
- bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
+ bnx2x_undi_int_disable(bp);
+ port = BP_PORT(bp);
+
+ /* close input traffic and wait for it */
+ /* Do not rcv packets to BRB */
+ REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
+ NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
+ /* Do not direct rcv packets that are not for MCP to
+ * the BRB */
+ REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
+ NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
+ /* clear AEU */
+ REG_WR(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+ MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
+ msleep(10);
+
+ /* save NIG port swap info */
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+ /* reset device */
+ REG_WR(bp,
+ GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+ 0xd3ffffff);
+
+ value = 0x1400;
+ if (CHIP_IS_E3(bp)) {
+ value |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
+ value |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
+ }
+
+ REG_WR(bp,
+ GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ value);
+
+ /* take the NIG out of reset and restore swap values */
+ REG_WR(bp,
+ GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
+ MISC_REGISTERS_RESET_REG_1_RST_NIG);
+ REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
+ REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
+
+ /* send unload done to the MCP */
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+
+ /* restore our func and fw_seq */
+ bp->pf_num = orig_pf_num;
+ bp->fw_seq =
+ (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
- int vn = BP_E1HVN(bp);
++ }
++
++ /* now it's safe to release the lock */
++ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+ }
+}
+
+static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
+{
+ u32 val, val2, val3, val4, id;
+ u16 pmc;
+
+ /* Get the chip revision id and number. */
+ /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
+ val = REG_RD(bp, MISC_REG_CHIP_NUM);
+ id = ((val & 0xffff) << 16);
+ val = REG_RD(bp, MISC_REG_CHIP_REV);
+ id |= ((val & 0xf) << 12);
+ val = REG_RD(bp, MISC_REG_CHIP_METAL);
+ id |= ((val & 0xff) << 4);
+ val = REG_RD(bp, MISC_REG_BOND_ID);
+ id |= (val & 0xf);
+ bp->common.chip_id = id;
+
+ /* Set doorbell size */
+ bp->db_size = (1 << BNX2X_DB_SHIFT);
+
+ if (!CHIP_IS_E1x(bp)) {
+ val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
+ if ((val & 1) == 0)
+ val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
+ else
+ val = (val >> 1) & 1;
+ BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
+ "2_PORT_MODE");
+ bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
+ CHIP_2_PORT_MODE;
+
+ if (CHIP_MODE_IS_4_PORT(bp))
+ bp->pfid = (bp->pf_num >> 1); /* 0..3 */
+ else
+ bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
+ } else {
+ bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
+ bp->pfid = bp->pf_num; /* 0..7 */
+ }
+
+ bp->link_params.chip_id = bp->common.chip_id;
+ BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
+
+ val = (REG_RD(bp, 0x2874) & 0x55);
+ if ((bp->common.chip_id & 0x1) ||
+ (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
+ bp->flags |= ONE_PORT_FLAG;
+ BNX2X_DEV_INFO("single port device\n");
+ }
+
+ val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
+ bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE <<
+ (val & MCPR_NVM_CFG4_FLASH_SIZE));
+ BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
+ bp->common.flash_size, bp->common.flash_size);
+
+ bnx2x_init_shmem(bp);
+
+
+
+ bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
+ MISC_REG_GENERIC_CR_1 :
+ MISC_REG_GENERIC_CR_0));
+
+ bp->link_params.shmem_base = bp->common.shmem_base;
+ bp->link_params.shmem2_base = bp->common.shmem2_base;
+ BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
+ bp->common.shmem_base, bp->common.shmem2_base);
+
+ if (!bp->common.shmem_base) {
+ BNX2X_DEV_INFO("MCP not active\n");
+ bp->flags |= NO_MCP_FLAG;
+ return;
+ }
+
+ bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
+ BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
+
+ bp->link_params.hw_led_mode = ((bp->common.hw_config &
+ SHARED_HW_CFG_LED_MODE_MASK) >>
+ SHARED_HW_CFG_LED_MODE_SHIFT);
+
+ bp->link_params.feature_config_flags = 0;
+ val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
+ if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
+ bp->link_params.feature_config_flags |=
+ FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
+ else
+ bp->link_params.feature_config_flags &=
+ ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
+
+ val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
+ bp->common.bc_ver = val;
+ BNX2X_DEV_INFO("bc_ver %X\n", val);
+ if (val < BNX2X_BC_VER) {
+ /* for now only warn
+ * later we might need to enforce this */
+ BNX2X_ERR("This driver needs bc_ver %X but found %X, "
+ "please upgrade BC\n", BNX2X_BC_VER, val);
+ }
+ bp->link_params.feature_config_flags |=
+ (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
+ FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
+
+ bp->link_params.feature_config_flags |=
+ (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
+ FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
+
+ bp->link_params.feature_config_flags |=
+ (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
+ FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
+
+ pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
+ bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
+
+ BNX2X_DEV_INFO("%sWoL capable\n",
+ (bp->flags & NO_WOL_FLAG) ? "not " : "");
+
+ val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
+ val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
+ val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
+ val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
+
+ dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
+ val, val2, val3, val4);
+}
+
+#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
+#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
+
+static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
+{
+ int pfid = BP_FUNC(bp);
- vn = BP_E1HVN(bp);
+ int igu_sb_id;
+ u32 val;
+ u8 fid, igu_sb_cnt = 0;
+
+ bp->igu_base_sb = 0xff;
+ if (CHIP_INT_MODE_IS_BC(bp)) {
++ int vn = BP_VN(bp);
+ igu_sb_cnt = bp->igu_sb_cnt;
+ bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
+ FP_SB_MAX_E1x;
+
+ bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
+ (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
+
+ return;
+ }
+
+ /* IGU in normal mode - read CAM */
+ for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
+ igu_sb_id++) {
+ val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
+ if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
+ continue;
+ fid = IGU_FID(val);
+ if ((fid & IGU_FID_ENCODE_IS_PF)) {
+ if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
+ continue;
+ if (IGU_VEC(val) == 0)
+ /* default status block */
+ bp->igu_dsb_id = igu_sb_id;
+ else {
+ if (bp->igu_base_sb == 0xff)
+ bp->igu_base_sb = igu_sb_id;
+ igu_sb_cnt++;
+ }
+ }
+ }
+
+#ifdef CONFIG_PCI_MSI
+ /*
+ * It's expected that number of CAM entries for this functions is equal
+ * to the number evaluated based on the MSI-X table size. We want a
+ * harsh warning if these values are different!
+ */
+ WARN_ON(bp->igu_sb_cnt != igu_sb_cnt);
+#endif
+
+ if (igu_sb_cnt == 0)
+ BNX2X_ERR("CAM configuration error\n");
+}
+
+static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
+ u32 switch_cfg)
+{
+ int cfg_size = 0, idx, port = BP_PORT(bp);
+
+ /* Aggregation of supported attributes of all external phys */
+ bp->port.supported[0] = 0;
+ bp->port.supported[1] = 0;
+ switch (bp->link_params.num_phys) {
+ case 1:
+ bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
+ cfg_size = 1;
+ break;
+ case 2:
+ bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
+ cfg_size = 1;
+ break;
+ case 3:
+ if (bp->link_params.multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
+ bp->port.supported[1] =
+ bp->link_params.phy[EXT_PHY1].supported;
+ bp->port.supported[0] =
+ bp->link_params.phy[EXT_PHY2].supported;
+ } else {
+ bp->port.supported[0] =
+ bp->link_params.phy[EXT_PHY1].supported;
+ bp->port.supported[1] =
+ bp->link_params.phy[EXT_PHY2].supported;
+ }
+ cfg_size = 2;
+ break;
+ }
+
+ if (!(bp->port.supported[0] || bp->port.supported[1])) {
+ BNX2X_ERR("NVRAM config error. BAD phy config."
+ "PHY1 config 0x%x, PHY2 config 0x%x\n",
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].external_phy_config),
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].external_phy_config2));
+ return;
+ }
+
+ if (CHIP_IS_E3(bp))
+ bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR);
+ else {
+ switch (switch_cfg) {
+ case SWITCH_CFG_1G:
+ bp->port.phy_addr = REG_RD(
+ bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
+ break;
+ case SWITCH_CFG_10G:
+ bp->port.phy_addr = REG_RD(
+ bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
+ break;
+ default:
+ BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
+ bp->port.link_config[0]);
+ return;
+ }
+ }
+ BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
+ /* mask what we support according to speed_cap_mask per configuration */
+ for (idx = 0; idx < cfg_size; idx++) {
+ if (!(bp->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
+ bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
+
+ if (!(bp->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
+ bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
+
+ if (!(bp->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
+ bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
+
+ if (!(bp->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
+ bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
+
+ if (!(bp->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
+ bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full);
+
+ if (!(bp->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
+ bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
+
+ if (!(bp->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
+ bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
+
+ }
+
+ BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
+ bp->port.supported[1]);
+}
+
+static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
+{
+ u32 link_config, idx, cfg_size = 0;
+ bp->port.advertising[0] = 0;
+ bp->port.advertising[1] = 0;
+ switch (bp->link_params.num_phys) {
+ case 1:
+ case 2:
+ cfg_size = 1;
+ break;
+ case 3:
+ cfg_size = 2;
+ break;
+ }
+ for (idx = 0; idx < cfg_size; idx++) {
+ bp->link_params.req_duplex[idx] = DUPLEX_FULL;
+ link_config = bp->port.link_config[idx];
+ switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
+ case PORT_FEATURE_LINK_SPEED_AUTO:
+ if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_AUTO_NEG;
+ bp->port.advertising[idx] |=
+ bp->port.supported[idx];
+ } else {
+ /* force 10G, no AN */
+ bp->link_params.req_line_speed[idx] =
+ SPEED_10000;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_10000baseT_Full |
+ ADVERTISED_FIBRE);
+ continue;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_10M_FULL:
+ if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_10;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_10baseT_Full |
+ ADVERTISED_TP);
+ } else {
+ BNX2X_ERR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_10M_HALF:
+ if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_10;
+ bp->link_params.req_duplex[idx] =
+ DUPLEX_HALF;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_10baseT_Half |
+ ADVERTISED_TP);
+ } else {
+ BNX2X_ERR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_100M_FULL:
+ if (bp->port.supported[idx] &
+ SUPPORTED_100baseT_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_100;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_100baseT_Full |
+ ADVERTISED_TP);
+ } else {
+ BNX2X_ERR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_100M_HALF:
+ if (bp->port.supported[idx] &
+ SUPPORTED_100baseT_Half) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_100;
+ bp->link_params.req_duplex[idx] =
+ DUPLEX_HALF;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_100baseT_Half |
+ ADVERTISED_TP);
+ } else {
+ BNX2X_ERR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_1G:
+ if (bp->port.supported[idx] &
+ SUPPORTED_1000baseT_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_1000;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_1000baseT_Full |
+ ADVERTISED_TP);
+ } else {
+ BNX2X_ERR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_2_5G:
+ if (bp->port.supported[idx] &
+ SUPPORTED_2500baseX_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_2500;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_2500baseX_Full |
+ ADVERTISED_TP);
+ } else {
+ BNX2X_ERR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_10G_CX4:
+ if (bp->port.supported[idx] &
+ SUPPORTED_10000baseT_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_10000;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_10000baseT_Full |
+ ADVERTISED_FIBRE);
+ } else {
+ BNX2X_ERR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+ case PORT_FEATURE_LINK_SPEED_20G:
+ bp->link_params.req_line_speed[idx] = SPEED_20000;
+
+ break;
+ default:
+ BNX2X_ERR("NVRAM config error. "
+ "BAD link speed link_config 0x%x\n",
+ link_config);
+ bp->link_params.req_line_speed[idx] =
+ SPEED_AUTO_NEG;
+ bp->port.advertising[idx] =
+ bp->port.supported[idx];
+ break;
+ }
+
+ bp->link_params.req_flow_ctrl[idx] = (link_config &
+ PORT_FEATURE_FLOW_CONTROL_MASK);
+ if ((bp->link_params.req_flow_ctrl[idx] ==
+ BNX2X_FLOW_CTRL_AUTO) &&
+ !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
+ bp->link_params.req_flow_ctrl[idx] =
+ BNX2X_FLOW_CTRL_NONE;
+ }
+
+ BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
+ " 0x%x advertising 0x%x\n",
+ bp->link_params.req_line_speed[idx],
+ bp->link_params.req_duplex[idx],
+ bp->link_params.req_flow_ctrl[idx],
+ bp->port.advertising[idx]);
+ }
+}
+
+static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
+{
+ mac_hi = cpu_to_be16(mac_hi);
+ mac_lo = cpu_to_be32(mac_lo);
+ memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
+ memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
+}
+
+static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ u32 config;
+ u32 ext_phy_type, ext_phy_config;
+
+ bp->link_params.bp = bp;
+ bp->link_params.port = port;
+
+ bp->link_params.lane_config =
+ SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
+
+ bp->link_params.speed_cap_mask[0] =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].speed_capability_mask);
+ bp->link_params.speed_cap_mask[1] =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].speed_capability_mask2);
+ bp->port.link_config[0] =
+ SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
+
+ bp->port.link_config[1] =
+ SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
+
+ bp->link_params.multi_phy_config =
+ SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
+ /* If the device is capable of WoL, set the default state according
+ * to the HW
+ */
+ config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
+ bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
+ (config & PORT_FEATURE_WOL_ENABLED));
+
+ BNX2X_DEV_INFO("lane_config 0x%08x "
+ "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
+ bp->link_params.lane_config,
+ bp->link_params.speed_cap_mask[0],
+ bp->port.link_config[0]);
+
+ bp->link_params.switch_cfg = (bp->port.link_config[0] &
+ PORT_FEATURE_CONNECTED_SWITCH_MASK);
+ bnx2x_phy_probe(&bp->link_params);
+ bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
+
+ bnx2x_link_settings_requested(bp);
+
+ /*
+ * If connected directly, work with the internal PHY, otherwise, work
+ * with the external PHY
+ */
+ ext_phy_config =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].external_phy_config);
+ ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
+ if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
+ bp->mdio.prtad = bp->port.phy_addr;
+
+ else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
+ (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
+ bp->mdio.prtad =
+ XGXS_EXT_PHY_ADDR(ext_phy_config);
+
+ /*
+ * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
+ * In MF mode, it is set to cover self test cases
+ */
+ if (IS_MF(bp))
+ bp->port.need_hw_lock = 1;
+ else
+ bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
+ bp->common.shmem_base,
+ bp->common.shmem2_base);
+}
+
+#ifdef BCM_CNIC
+static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ int func = BP_ABS_FUNC(bp);
+
+ u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+ drv_lic_key[port].max_iscsi_conn);
+ u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+ drv_lic_key[port].max_fcoe_conn);
+
+ /* Get the number of maximum allowed iSCSI and FCoE connections */
+ bp->cnic_eth_dev.max_iscsi_conn =
+ (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
+ BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
+
+ bp->cnic_eth_dev.max_fcoe_conn =
+ (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
+ BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
+
+ /* Read the WWN: */
+ if (!IS_MF(bp)) {
+ /* Port info */
+ bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].
+ fcoe_wwn_port_name_upper);
+ bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].
+ fcoe_wwn_port_name_lower);
+
+ /* Node info */
+ bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].
+ fcoe_wwn_node_name_upper);
+ bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].
+ fcoe_wwn_node_name_lower);
+ } else if (!IS_MF_SD(bp)) {
+ u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
+
+ /*
+ * Read the WWN info only if the FCoE feature is enabled for
+ * this function.
+ */
+ if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
+ /* Port info */
+ bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
+ MF_CFG_RD(bp, func_ext_config[func].
+ fcoe_wwn_port_name_upper);
+ bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
+ MF_CFG_RD(bp, func_ext_config[func].
+ fcoe_wwn_port_name_lower);
+
+ /* Node info */
+ bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
+ MF_CFG_RD(bp, func_ext_config[func].
+ fcoe_wwn_node_name_upper);
+ bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
+ MF_CFG_RD(bp, func_ext_config[func].
+ fcoe_wwn_node_name_lower);
+ }
+ }
+
+ BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
+ bp->cnic_eth_dev.max_iscsi_conn,
+ bp->cnic_eth_dev.max_fcoe_conn);
+
+ /*
+ * If maximum allowed number of connections is zero -
+ * disable the feature.
+ */
+ if (!bp->cnic_eth_dev.max_iscsi_conn)
+ bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
+
+ if (!bp->cnic_eth_dev.max_fcoe_conn)
+ bp->flags |= NO_FCOE_FLAG;
+}
+#endif
+
+static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
+{
+ u32 val, val2;
+ int func = BP_ABS_FUNC(bp);
+ int port = BP_PORT(bp);
+#ifdef BCM_CNIC
+ u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
+ u8 *fip_mac = bp->fip_mac;
+#endif
+
+ /* Zero primary MAC configuration */
+ memset(bp->dev->dev_addr, 0, ETH_ALEN);
+
+ if (BP_NOMCP(bp)) {
+ BNX2X_ERROR("warning: random MAC workaround active\n");
+ random_ether_addr(bp->dev->dev_addr);
+ } else if (IS_MF(bp)) {
+ val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
+ val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
+ if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
+ (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
+ bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+
+#ifdef BCM_CNIC
+ /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
+ * FCoE MAC then the appropriate feature should be disabled.
+ */
+ if (IS_MF_SI(bp)) {
+ u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
+ if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
+ val2 = MF_CFG_RD(bp, func_ext_config[func].
+ iscsi_mac_addr_upper);
+ val = MF_CFG_RD(bp, func_ext_config[func].
+ iscsi_mac_addr_lower);
+ bnx2x_set_mac_buf(iscsi_mac, val, val2);
+ BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n",
+ iscsi_mac);
+ } else
+ bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
+
+ if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
+ val2 = MF_CFG_RD(bp, func_ext_config[func].
+ fcoe_mac_addr_upper);
+ val = MF_CFG_RD(bp, func_ext_config[func].
+ fcoe_mac_addr_lower);
+ bnx2x_set_mac_buf(fip_mac, val, val2);
+ BNX2X_DEV_INFO("Read FCoE L2 MAC to %pM\n",
+ fip_mac);
+
+ } else
+ bp->flags |= NO_FCOE_FLAG;
+ }
+#endif
+ } else {
+ /* in SF read MACs from port configuration */
+ val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
+ val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
+ bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+
+#ifdef BCM_CNIC
+ val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
+ iscsi_mac_upper);
+ val = SHMEM_RD(bp, dev_info.port_hw_config[port].
+ iscsi_mac_lower);
+ bnx2x_set_mac_buf(iscsi_mac, val, val2);
+
+ val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
+ fcoe_fip_mac_upper);
+ val = SHMEM_RD(bp, dev_info.port_hw_config[port].
+ fcoe_fip_mac_lower);
+ bnx2x_set_mac_buf(fip_mac, val, val2);
+#endif
+ }
+
+ memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
+ memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
+
+#ifdef BCM_CNIC
+ /* Set the FCoE MAC in MF_SD mode */
+ if (!CHIP_IS_E1x(bp) && IS_MF_SD(bp))
+ memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
+
+ /* Disable iSCSI if MAC configuration is
+ * invalid.
+ */
+ if (!is_valid_ether_addr(iscsi_mac)) {
+ bp->flags |= NO_ISCSI_FLAG;
+ memset(iscsi_mac, 0, ETH_ALEN);
+ }
+
+ /* Disable FCoE if MAC configuration is
+ * invalid.
+ */
+ if (!is_valid_ether_addr(fip_mac)) {
+ bp->flags |= NO_FCOE_FLAG;
+ memset(bp->fip_mac, 0, ETH_ALEN);
+ }
+#endif
+
+ if (!is_valid_ether_addr(bp->dev->dev_addr))
+ dev_err(&bp->pdev->dev,
+ "bad Ethernet MAC address configuration: "
+ "%pM, change it manually before bringing up "
+ "the appropriate network interface\n",
+ bp->dev->dev_addr);
+}
+
+static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
+{
+ int /*abs*/func = BP_ABS_FUNC(bp);
+ int vn;
+ u32 val = 0;
+ int rc = 0;
+
+ bnx2x_get_common_hwinfo(bp);
+
+ /*
+ * initialize IGU parameters
+ */
+ if (CHIP_IS_E1x(bp)) {
+ bp->common.int_block = INT_BLOCK_HC;
+
+ bp->igu_dsb_id = DEF_SB_IGU_ID;
+ bp->igu_base_sb = 0;
+ } else {
+ bp->common.int_block = INT_BLOCK_IGU;
++
++ /* do not allow device reset during IGU info preocessing */
++ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
++
+ val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
+
+ if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
+ int tout = 5000;
+
+ BNX2X_DEV_INFO("FORCING Normal Mode\n");
+
+ val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
+ REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val);
+ REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f);
+
+ while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
+ tout--;
+ usleep_range(1000, 1000);
+ }
+
+ if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
+ dev_err(&bp->pdev->dev,
+ "FORCING Normal Mode failed!!!\n");
+ return -EPERM;
+ }
+ }
+
+ if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
+ BNX2X_DEV_INFO("IGU Backward Compatible Mode\n");
+ bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
+ } else
+ BNX2X_DEV_INFO("IGU Normal Mode\n");
+
+ bnx2x_get_igu_cam_info(bp);
+
++ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+ }
+
+ /*
+ * set base FW non-default (fast path) status block id, this value is
+ * used to initialize the fw_sb_id saved on the fp/queue structure to
+ * determine the id used by the FW.
+ */
+ if (CHIP_IS_E1x(bp))
+ bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp);
+ else /*
+ * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of
+ * the same queue are indicated on the same IGU SB). So we prefer
+ * FW and IGU SBs to be the same value.
+ */
+ bp->base_fw_ndsb = bp->igu_base_sb;
+
+ BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n"
+ "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb,
+ bp->igu_sb_cnt, bp->base_fw_ndsb);
+
+ /*
+ * Initialize MF configuration
+ */
+
+ bp->mf_ov = 0;
+ bp->mf_mode = 0;
- if (!BP_NOMCP(bp)) {
- bp->fw_seq =
- (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
- DRV_MSG_SEQ_NUMBER_MASK);
- BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
- }
-
++ vn = BP_VN(bp);
+
+ if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
+ BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
+ bp->common.shmem2_base, SHMEM2_RD(bp, size),
+ (u32)offsetof(struct shmem2_region, mf_cfg_addr));
+
+ if (SHMEM2_HAS(bp, mf_cfg_addr))
+ bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
+ else
+ bp->common.mf_cfg_base = bp->common.shmem_base +
+ offsetof(struct shmem_region, func_mb) +
+ E1H_FUNC_MAX * sizeof(struct drv_func_mb);
+ /*
+ * get mf configuration:
+ * 1. existence of MF configuration
+ * 2. MAC address must be legal (check only upper bytes)
+ * for Switch-Independent mode;
+ * OVLAN must be legal for Switch-Dependent mode
+ * 3. SF_MODE configures specific MF mode
+ */
+ if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
+ /* get mf configuration */
+ val = SHMEM_RD(bp,
+ dev_info.shared_feature_config.config);
+ val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
+
+ switch (val) {
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
+ val = MF_CFG_RD(bp, func_mf_config[func].
+ mac_upper);
+ /* check for legal mac (upper bytes)*/
+ if (val != 0xffff) {
+ bp->mf_mode = MULTI_FUNCTION_SI;
+ bp->mf_config[vn] = MF_CFG_RD(bp,
+ func_mf_config[func].config);
+ } else
+ BNX2X_DEV_INFO("illegal MAC address "
+ "for SI\n");
+ break;
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
+ /* get OV configuration */
+ val = MF_CFG_RD(bp,
+ func_mf_config[FUNC_0].e1hov_tag);
+ val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
+
+ if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
+ bp->mf_mode = MULTI_FUNCTION_SD;
+ bp->mf_config[vn] = MF_CFG_RD(bp,
+ func_mf_config[func].config);
+ } else
+ BNX2X_DEV_INFO("illegal OV for SD\n");
+ break;
+ default:
+ /* Unknown configuration: reset mf_config */
+ bp->mf_config[vn] = 0;
+ BNX2X_DEV_INFO("unkown MF mode 0x%x\n", val);
+ }
+ }
+
+ BNX2X_DEV_INFO("%s function mode\n",
+ IS_MF(bp) ? "multi" : "single");
+
+ switch (bp->mf_mode) {
+ case MULTI_FUNCTION_SD:
+ val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
+ FUNC_MF_CFG_E1HOV_TAG_MASK;
+ if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
+ bp->mf_ov = val;
+ bp->path_has_ovlan = true;
+
+ BNX2X_DEV_INFO("MF OV for func %d is %d "
+ "(0x%04x)\n", func, bp->mf_ov,
+ bp->mf_ov);
+ } else {
+ dev_err(&bp->pdev->dev,
+ "No valid MF OV for func %d, "
+ "aborting\n", func);
+ return -EPERM;
+ }
+ break;
+ case MULTI_FUNCTION_SI:
+ BNX2X_DEV_INFO("func %d is in MF "
+ "switch-independent mode\n", func);
+ break;
+ default:
+ if (vn) {
+ dev_err(&bp->pdev->dev,
+ "VN %d is in a single function mode, "
+ "aborting\n", vn);
+ return -EPERM;
+ }
+ break;
+ }
+
+ /* check if other port on the path needs ovlan:
+ * Since MF configuration is shared between ports
+ * Possible mixed modes are only
+ * {SF, SI} {SF, SD} {SD, SF} {SI, SF}
+ */
+ if (CHIP_MODE_IS_4_PORT(bp) &&
+ !bp->path_has_ovlan &&
+ !IS_MF(bp) &&
+ bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
+ u8 other_port = !BP_PORT(bp);
+ u8 other_func = BP_PATH(bp) + 2*other_port;
+ val = MF_CFG_RD(bp,
+ func_mf_config[other_func].e1hov_tag);
+ if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
+ bp->path_has_ovlan = true;
+ }
+ }
+
+ /* adjust igu_sb_cnt to MF for E1x */
+ if (CHIP_IS_E1x(bp) && IS_MF(bp))
+ bp->igu_sb_cnt /= E1HVN_MAX;
+
+ /* port info */
+ bnx2x_get_port_hwinfo(bp);
+
- /* Clean the following indirect addresses for all functions since it
+ /* Get MAC addresses */
+ bnx2x_get_mac_hwinfo(bp);
+
+#ifdef BCM_CNIC
+ bnx2x_get_cnic_info(bp);
+#endif
+
+ /* Get current FW pulse sequence */
+ if (!BP_NOMCP(bp)) {
+ int mb_idx = BP_FW_MB_IDX(bp);
+
+ bp->fw_drv_pulse_wr_seq =
+ (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
+ DRV_PULSE_SEQ_MASK);
+ BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
+ }
+
+ return rc;
+}
+
+static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
+{
+ int cnt, i, block_end, rodi;
+ char vpd_data[BNX2X_VPD_LEN+1];
+ char str_id_reg[VENDOR_ID_LEN+1];
+ char str_id_cap[VENDOR_ID_LEN+1];
+ u8 len;
+
+ cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
+ memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
+
+ if (cnt < BNX2X_VPD_LEN)
+ goto out_not_found;
+
+ i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
+ PCI_VPD_LRDT_RO_DATA);
+ if (i < 0)
+ goto out_not_found;
+
+
+ block_end = i + PCI_VPD_LRDT_TAG_SIZE +
+ pci_vpd_lrdt_size(&vpd_data[i]);
+
+ i += PCI_VPD_LRDT_TAG_SIZE;
+
+ if (block_end > BNX2X_VPD_LEN)
+ goto out_not_found;
+
+ rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
+ PCI_VPD_RO_KEYWORD_MFR_ID);
+ if (rodi < 0)
+ goto out_not_found;
+
+ len = pci_vpd_info_field_size(&vpd_data[rodi]);
+
+ if (len != VENDOR_ID_LEN)
+ goto out_not_found;
+
+ rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
+
+ /* vendor specific info */
+ snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
+ snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
+ if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
+ !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
+
+ rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
+ PCI_VPD_RO_KEYWORD_VENDOR0);
+ if (rodi >= 0) {
+ len = pci_vpd_info_field_size(&vpd_data[rodi]);
+
+ rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
+
+ if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
+ memcpy(bp->fw_ver, &vpd_data[rodi], len);
+ bp->fw_ver[len] = ' ';
+ }
+ }
+ return;
+ }
+out_not_found:
+ return;
+}
+
+static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp)
+{
+ u32 flags = 0;
+
+ if (CHIP_REV_IS_FPGA(bp))
+ SET_FLAGS(flags, MODE_FPGA);
+ else if (CHIP_REV_IS_EMUL(bp))
+ SET_FLAGS(flags, MODE_EMUL);
+ else
+ SET_FLAGS(flags, MODE_ASIC);
+
+ if (CHIP_MODE_IS_4_PORT(bp))
+ SET_FLAGS(flags, MODE_PORT4);
+ else
+ SET_FLAGS(flags, MODE_PORT2);
+
+ if (CHIP_IS_E2(bp))
+ SET_FLAGS(flags, MODE_E2);
+ else if (CHIP_IS_E3(bp)) {
+ SET_FLAGS(flags, MODE_E3);
+ if (CHIP_REV(bp) == CHIP_REV_Ax)
+ SET_FLAGS(flags, MODE_E3_A0);
+ else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/
+ SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
+ }
+
+ if (IS_MF(bp)) {
+ SET_FLAGS(flags, MODE_MF);
+ switch (bp->mf_mode) {
+ case MULTI_FUNCTION_SD:
+ SET_FLAGS(flags, MODE_MF_SD);
+ break;
+ case MULTI_FUNCTION_SI:
+ SET_FLAGS(flags, MODE_MF_SI);
+ break;
+ }
+ } else
+ SET_FLAGS(flags, MODE_SF);
+
+#if defined(__LITTLE_ENDIAN)
+ SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
+#else /*(__BIG_ENDIAN)*/
+ SET_FLAGS(flags, MODE_BIG_ENDIAN);
+#endif
+ INIT_MODE_FLAGS(bp) = flags;
+}
+
+static int __devinit bnx2x_init_bp(struct bnx2x *bp)
+{
+ int func;
+ int timer_interval;
+ int rc;
+
+ mutex_init(&bp->port.phy_mutex);
+ mutex_init(&bp->fw_mb_mutex);
+ spin_lock_init(&bp->stats_lock);
+#ifdef BCM_CNIC
+ mutex_init(&bp->cnic_mutex);
+#endif
+
+ INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
+ INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
+ INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
+ rc = bnx2x_get_hwinfo(bp);
+ if (rc)
+ return rc;
+
+ bnx2x_set_modes_bitmap(bp);
+
+ rc = bnx2x_alloc_mem_bp(bp);
+ if (rc)
+ return rc;
+
+ bnx2x_read_fwinfo(bp);
+
+ func = BP_FUNC(bp);
+
+ /* need to reset chip if undi was active */
+ if (!BP_NOMCP(bp))
+ bnx2x_undi_unload(bp);
+
++ /* init fw_seq after undi_unload! */
++ if (!BP_NOMCP(bp)) {
++ bp->fw_seq =
++ (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
++ DRV_MSG_SEQ_NUMBER_MASK);
++ BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
++ }
++
+ if (CHIP_REV_IS_FPGA(bp))
+ dev_err(&bp->pdev->dev, "FPGA detected\n");
+
+ if (BP_NOMCP(bp) && (func == 0))
+ dev_err(&bp->pdev->dev, "MCP disabled, "
+ "must load devices in order!\n");
+
+ bp->multi_mode = multi_mode;
+
+ /* Set TPA flags */
+ if (disable_tpa) {
+ bp->flags &= ~TPA_ENABLE_FLAG;
+ bp->dev->features &= ~NETIF_F_LRO;
+ } else {
+ bp->flags |= TPA_ENABLE_FLAG;
+ bp->dev->features |= NETIF_F_LRO;
+ }
+ bp->disable_tpa = disable_tpa;
+
+ if (CHIP_IS_E1(bp))
+ bp->dropless_fc = 0;
+ else
+ bp->dropless_fc = dropless_fc;
+
+ bp->mrrs = mrrs;
+
+ bp->tx_ring_size = MAX_TX_AVAIL;
+
+ /* make sure that the numbers are in the right granularity */
+ bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
+ bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
+
+ timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
+ bp->current_interval = (poll ? poll : timer_interval);
+
+ init_timer(&bp->timer);
+ bp->timer.expires = jiffies + bp->current_interval;
+ bp->timer.data = (unsigned long) bp;
+ bp->timer.function = bnx2x_timer;
+
+ bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
+ bnx2x_dcbx_init_params(bp);
+
+#ifdef BCM_CNIC
+ if (CHIP_IS_E1x(bp))
+ bp->cnic_base_cl_id = FP_SB_MAX_E1x;
+ else
+ bp->cnic_base_cl_id = FP_SB_MAX_E2;
+#endif
+
+ /* multiple tx priority */
+ if (CHIP_IS_E1x(bp))
+ bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
+ if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
+ bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
+ if (CHIP_IS_E3B0(bp))
+ bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
+
+ return rc;
+}
+
+
+/****************************************************************************
+* General service functions
+****************************************************************************/
+
+/*
+ * net_device service functions
+ */
+
+/* called with rtnl_lock */
+static int bnx2x_open(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ bool global = false;
+ int other_engine = BP_PATH(bp) ? 0 : 1;
+ u32 other_load_counter, load_counter;
+
+ netif_carrier_off(dev);
+
+ bnx2x_set_power_state(bp, PCI_D0);
+
+ other_load_counter = bnx2x_get_load_cnt(bp, other_engine);
+ load_counter = bnx2x_get_load_cnt(bp, BP_PATH(bp));
+
+ /*
+ * If parity had happen during the unload, then attentions
+ * and/or RECOVERY_IN_PROGRES may still be set. In this case we
+ * want the first function loaded on the current engine to
+ * complete the recovery.
+ */
+ if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
+ bnx2x_chk_parity_attn(bp, &global, true))
+ do {
+ /*
+ * If there are attentions and they are in a global
+ * blocks, set the GLOBAL_RESET bit regardless whether
+ * it will be this function that will complete the
+ * recovery or not.
+ */
+ if (global)
+ bnx2x_set_reset_global(bp);
+
+ /*
+ * Only the first function on the current engine should
+ * try to recover in open. In case of attentions in
+ * global blocks only the first in the chip should try
+ * to recover.
+ */
+ if ((!load_counter &&
+ (!global || !other_load_counter)) &&
+ bnx2x_trylock_leader_lock(bp) &&
+ !bnx2x_leader_reset(bp)) {
+ netdev_info(bp->dev, "Recovered in open\n");
+ break;
+ }
+
+ /* recovery has failed... */
+ bnx2x_set_power_state(bp, PCI_D3hot);
+ bp->recovery_state = BNX2X_RECOVERY_FAILED;
+
+ netdev_err(bp->dev, "Recovery flow hasn't been properly"
+ " completed yet. Try again later. If u still see this"
+ " message after a few retries then power cycle is"
+ " required.\n");
+
+ return -EAGAIN;
+ } while (0);
+
+ bp->recovery_state = BNX2X_RECOVERY_DONE;
+ return bnx2x_nic_load(bp, LOAD_OPEN);
+}
+
+/* called with rtnl_lock */
+static int bnx2x_close(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ /* Unload the driver, release IRQs */
+ bnx2x_nic_unload(bp, UNLOAD_CLOSE);
+
+ /* Power off */
+ bnx2x_set_power_state(bp, PCI_D3hot);
+
+ return 0;
+}
+
+static inline int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p)
+{
+ int mc_count = netdev_mc_count(bp->dev);
+ struct bnx2x_mcast_list_elem *mc_mac =
+ kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC);
+ struct netdev_hw_addr *ha;
+
+ if (!mc_mac)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&p->mcast_list);
+
+ netdev_for_each_mc_addr(ha, bp->dev) {
+ mc_mac->mac = bnx2x_mc_addr(ha);
+ list_add_tail(&mc_mac->link, &p->mcast_list);
+ mc_mac++;
+ }
+
+ p->mcast_list_len = mc_count;
+
+ return 0;
+}
+
+static inline void bnx2x_free_mcast_macs_list(
+ struct bnx2x_mcast_ramrod_params *p)
+{
+ struct bnx2x_mcast_list_elem *mc_mac =
+ list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem,
+ link);
+
+ WARN_ON(!mc_mac);
+ kfree(mc_mac);
+}
+
+/**
+ * bnx2x_set_uc_list - configure a new unicast MACs list.
+ *
+ * @bp: driver handle
+ *
+ * We will use zero (0) as a MAC type for these MACs.
+ */
+static inline int bnx2x_set_uc_list(struct bnx2x *bp)
+{
+ int rc;
+ struct net_device *dev = bp->dev;
+ struct netdev_hw_addr *ha;
+ struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
+ unsigned long ramrod_flags = 0;
+
+ /* First schedule a cleanup up of old configuration */
+ rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false);
+ if (rc < 0) {
+ BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc);
+ return rc;
+ }
+
+ netdev_for_each_uc_addr(ha, dev) {
+ rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
+ BNX2X_UC_LIST_MAC, &ramrod_flags);
+ if (rc < 0) {
+ BNX2X_ERR("Failed to schedule ADD operations: %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ /* Execute the pending commands */
+ __set_bit(RAMROD_CONT, &ramrod_flags);
+ return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */,
+ BNX2X_UC_LIST_MAC, &ramrod_flags);
+}
+
+static inline int bnx2x_set_mc_list(struct bnx2x *bp)
+{
+ struct net_device *dev = bp->dev;
+ struct bnx2x_mcast_ramrod_params rparam = {0};
+ int rc = 0;
+
+ rparam.mcast_obj = &bp->mcast_obj;
+
+ /* first, clear all configured multicast MACs */
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
+ if (rc < 0) {
+ BNX2X_ERR("Failed to clear multicast "
+ "configuration: %d\n", rc);
+ return rc;
+ }
+
+ /* then, configure a new MACs list */
+ if (netdev_mc_count(dev)) {
+ rc = bnx2x_init_mcast_macs_list(bp, &rparam);
+ if (rc) {
+ BNX2X_ERR("Failed to create multicast MACs "
+ "list: %d\n", rc);
+ return rc;
+ }
+
+ /* Now add the new MACs */
+ rc = bnx2x_config_mcast(bp, &rparam,
+ BNX2X_MCAST_CMD_ADD);
+ if (rc < 0)
+ BNX2X_ERR("Failed to set a new multicast "
+ "configuration: %d\n", rc);
+
+ bnx2x_free_mcast_macs_list(&rparam);
+ }
+
+ return rc;
+}
+
+
+/* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
+void bnx2x_set_rx_mode(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u32 rx_mode = BNX2X_RX_MODE_NORMAL;
+
+ if (bp->state != BNX2X_STATE_OPEN) {
+ DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
+ return;
+ }
+
+ DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
+
+ if (dev->flags & IFF_PROMISC)
+ rx_mode = BNX2X_RX_MODE_PROMISC;
+ else if ((dev->flags & IFF_ALLMULTI) ||
+ ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
+ CHIP_IS_E1(bp)))
+ rx_mode = BNX2X_RX_MODE_ALLMULTI;
+ else {
+ /* some multicasts */
+ if (bnx2x_set_mc_list(bp) < 0)
+ rx_mode = BNX2X_RX_MODE_ALLMULTI;
+
+ if (bnx2x_set_uc_list(bp) < 0)
+ rx_mode = BNX2X_RX_MODE_PROMISC;
+ }
+
+ bp->rx_mode = rx_mode;
+
+ /* Schedule the rx_mode command */
+ if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
+ set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
+ return;
+ }
+
+ bnx2x_set_storm_rx_mode(bp);
+}
+
+/* called with rtnl_lock */
+static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
+ int devad, u16 addr)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ u16 value;
+ int rc;
+
+ DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
+ prtad, devad, addr);
+
+ /* The HW expects different devad if CL22 is used */
+ devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
+
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
+ bnx2x_release_phy_lock(bp);
+ DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
+
+ if (!rc)
+ rc = value;
+ return rc;
+}
+
+/* called with rtnl_lock */
+static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
+ u16 addr, u16 value)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ int rc;
+
+ DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
+ " value 0x%x\n", prtad, devad, addr, value);
+
+ /* The HW expects different devad if CL22 is used */
+ devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
+
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
+ bnx2x_release_phy_lock(bp);
+ return rc;
+}
+
+/* called with rtnl_lock */
+static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct mii_ioctl_data *mdio = if_mii(ifr);
+
+ DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
+ mdio->phy_id, mdio->reg_num, mdio->val_in);
+
+ if (!netif_running(dev))
+ return -EAGAIN;
+
+ return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void poll_bnx2x(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ disable_irq(bp->pdev->irq);
+ bnx2x_interrupt(bp->pdev->irq, dev);
+ enable_irq(bp->pdev->irq);
+}
+#endif
+
+static const struct net_device_ops bnx2x_netdev_ops = {
+ .ndo_open = bnx2x_open,
+ .ndo_stop = bnx2x_close,
+ .ndo_start_xmit = bnx2x_start_xmit,
+ .ndo_select_queue = bnx2x_select_queue,
+ .ndo_set_rx_mode = bnx2x_set_rx_mode,
+ .ndo_set_mac_address = bnx2x_change_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = bnx2x_ioctl,
+ .ndo_change_mtu = bnx2x_change_mtu,
+ .ndo_fix_features = bnx2x_fix_features,
+ .ndo_set_features = bnx2x_set_features,
+ .ndo_tx_timeout = bnx2x_tx_timeout,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = poll_bnx2x,
+#endif
+ .ndo_setup_tc = bnx2x_setup_tc,
+
+#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
+ .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
+#endif
+};
+
+static inline int bnx2x_set_coherency_mask(struct bnx2x *bp)
+{
+ struct device *dev = &bp->pdev->dev;
+
+ if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
+ bp->flags |= USING_DAC_FLAG;
+ if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
+ dev_err(dev, "dma_set_coherent_mask failed, "
+ "aborting\n");
+ return -EIO;
+ }
+ } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
+ dev_err(dev, "System does not support DMA, aborting\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
+ struct net_device *dev,
+ unsigned long board_type)
+{
+ struct bnx2x *bp;
+ int rc;
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ bp = netdev_priv(dev);
+
+ bp->dev = dev;
+ bp->pdev = pdev;
+ bp->flags = 0;
+ bp->pf_num = PCI_FUNC(pdev->devfn);
+
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_err(&bp->pdev->dev,
+ "Cannot enable PCI device, aborting\n");
+ goto err_out;
+ }
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ dev_err(&bp->pdev->dev,
+ "Cannot find PCI device base address, aborting\n");
+ rc = -ENODEV;
+ goto err_out_disable;
+ }
+
+ if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+ dev_err(&bp->pdev->dev, "Cannot find second PCI device"
+ " base address, aborting\n");
+ rc = -ENODEV;
+ goto err_out_disable;
+ }
+
+ if (atomic_read(&pdev->enable_cnt) == 1) {
+ rc = pci_request_regions(pdev, DRV_MODULE_NAME);
+ if (rc) {
+ dev_err(&bp->pdev->dev,
+ "Cannot obtain PCI resources, aborting\n");
+ goto err_out_disable;
+ }
+
+ pci_set_master(pdev);
+ pci_save_state(pdev);
+ }
+
+ bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+ if (bp->pm_cap == 0) {
+ dev_err(&bp->pdev->dev,
+ "Cannot find power management capability, aborting\n");
+ rc = -EIO;
+ goto err_out_release;
+ }
+
+ if (!pci_is_pcie(pdev)) {
+ dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
+ rc = -EIO;
+ goto err_out_release;
+ }
+
+ rc = bnx2x_set_coherency_mask(bp);
+ if (rc)
+ goto err_out_release;
+
+ dev->mem_start = pci_resource_start(pdev, 0);
+ dev->base_addr = dev->mem_start;
+ dev->mem_end = pci_resource_end(pdev, 0);
+
+ dev->irq = pdev->irq;
+
+ bp->regview = pci_ioremap_bar(pdev, 0);
+ if (!bp->regview) {
+ dev_err(&bp->pdev->dev,
+ "Cannot map register space, aborting\n");
+ rc = -ENOMEM;
+ goto err_out_release;
+ }
+
+ bnx2x_set_power_state(bp, PCI_D0);
+
+ /* clean indirect addresses */
+ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
+ PCICFG_VENDOR_ID_OFFSET);
- REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
++ /*
++ * Clean the following indirect addresses for all functions since it
+ * is not used by the driver.
+ */
+ REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
++
++ if (CHIP_IS_E1x(bp)) {
++ REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
++ REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
++ REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
++ REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
++ }
+
+ /*
+ * Enable internal target-read (in case we are probed after PF FLR).
+ * Must be done prior to any BAR read access. Only for 57712 and up
+ */
+ if (board_type != BCM57710 &&
+ board_type != BCM57711 &&
+ board_type != BCM57711E)
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
+
+ /* Reset the load counter */
+ bnx2x_clear_load_cnt(bp);
+
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ dev->netdev_ops = &bnx2x_netdev_ops;
+ bnx2x_set_ethtool_ops(dev);
+
+ dev->priv_flags |= IFF_UNICAST_FLT;
+
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_LRO |
+ NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX;
+
+ dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
+
+ dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX;
+ if (bp->flags & USING_DAC_FLAG)
+ dev->features |= NETIF_F_HIGHDMA;
+
+ /* Add Loopback capability to the device */
+ dev->hw_features |= NETIF_F_LOOPBACK;
+
+#ifdef BCM_DCBNL
+ dev->dcbnl_ops = &bnx2x_dcbnl_ops;
+#endif
+
+ /* get_port_hwinfo() will set prtad and mmds properly */
+ bp->mdio.prtad = MDIO_PRTAD_NONE;
+ bp->mdio.mmds = 0;
+ bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+ bp->mdio.dev = dev;
+ bp->mdio.mdio_read = bnx2x_mdio_read;
+ bp->mdio.mdio_write = bnx2x_mdio_write;
+
+ return 0;
+
+err_out_release:
+ if (atomic_read(&pdev->enable_cnt) == 1)
+ pci_release_regions(pdev);
+
+err_out_disable:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+err_out:
+ return rc;
+}
+
+static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
+ int *width, int *speed)
+{
+ u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
+
+ *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
+
+ /* return value of 1=2.5GHz 2=5GHz */
+ *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
+}
+
+static int bnx2x_check_firmware(struct bnx2x *bp)
+{
+ const struct firmware *firmware = bp->firmware;
+ struct bnx2x_fw_file_hdr *fw_hdr;
+ struct bnx2x_fw_file_section *sections;
+ u32 offset, len, num_ops;
+ u16 *ops_offsets;
+ int i;
+ const u8 *fw_ver;
+
+ if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
+ return -EINVAL;
+
+ fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
+ sections = (struct bnx2x_fw_file_section *)fw_hdr;
+
+ /* Make sure none of the offsets and sizes make us read beyond
+ * the end of the firmware data */
+ for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
+ offset = be32_to_cpu(sections[i].offset);
+ len = be32_to_cpu(sections[i].len);
+ if (offset + len > firmware->size) {
+ dev_err(&bp->pdev->dev,
+ "Section %d length is out of bounds\n", i);
+ return -EINVAL;
+ }
+ }
+
+ /* Likewise for the init_ops offsets */
+ offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
+ ops_offsets = (u16 *)(firmware->data + offset);
+ num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
+
+ for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
+ if (be16_to_cpu(ops_offsets[i]) > num_ops) {
+ dev_err(&bp->pdev->dev,
+ "Section offset %d is out of bounds\n", i);
+ return -EINVAL;
+ }
+ }
+
+ /* Check FW version */
+ offset = be32_to_cpu(fw_hdr->fw_version.offset);
+ fw_ver = firmware->data + offset;
+ if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
+ (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
+ (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
+ (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
+ dev_err(&bp->pdev->dev,
+ "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
+ fw_ver[0], fw_ver[1], fw_ver[2],
+ fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
+ BCM_5710_FW_MINOR_VERSION,
+ BCM_5710_FW_REVISION_VERSION,
+ BCM_5710_FW_ENGINEERING_VERSION);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
+{
+ const __be32 *source = (const __be32 *)_source;
+ u32 *target = (u32 *)_target;
+ u32 i;
+
+ for (i = 0; i < n/4; i++)
+ target[i] = be32_to_cpu(source[i]);
+}
+
+/*
+ Ops array is stored in the following format:
+ {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
+ */
+static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
+{
+ const __be32 *source = (const __be32 *)_source;
+ struct raw_op *target = (struct raw_op *)_target;
+ u32 i, j, tmp;
+
+ for (i = 0, j = 0; i < n/8; i++, j += 2) {
+ tmp = be32_to_cpu(source[j]);
+ target[i].op = (tmp >> 24) & 0xff;
+ target[i].offset = tmp & 0xffffff;
+ target[i].raw_data = be32_to_cpu(source[j + 1]);
+ }
+}
+
+/**
+ * IRO array is stored in the following format:
+ * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
+ */
+static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
+{
+ const __be32 *source = (const __be32 *)_source;
+ struct iro *target = (struct iro *)_target;
+ u32 i, j, tmp;
+
+ for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
+ target[i].base = be32_to_cpu(source[j]);
+ j++;
+ tmp = be32_to_cpu(source[j]);
+ target[i].m1 = (tmp >> 16) & 0xffff;
+ target[i].m2 = tmp & 0xffff;
+ j++;
+ tmp = be32_to_cpu(source[j]);
+ target[i].m3 = (tmp >> 16) & 0xffff;
+ target[i].size = tmp & 0xffff;
+ j++;
+ }
+}
+
+static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
+{
+ const __be16 *source = (const __be16 *)_source;
+ u16 *target = (u16 *)_target;
+ u32 i;
+
+ for (i = 0; i < n/2; i++)
+ target[i] = be16_to_cpu(source[i]);
+}
+
+#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
+do { \
+ u32 len = be32_to_cpu(fw_hdr->arr.len); \
+ bp->arr = kmalloc(len, GFP_KERNEL); \
+ if (!bp->arr) { \
+ pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
+ goto lbl; \
+ } \
+ func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
+ (u8 *)bp->arr, len); \
+} while (0)
+
+int bnx2x_init_firmware(struct bnx2x *bp)
+{
+ const char *fw_file_name;
+ struct bnx2x_fw_file_hdr *fw_hdr;
+ int rc;
+
+ if (CHIP_IS_E1(bp))
+ fw_file_name = FW_FILE_NAME_E1;
+ else if (CHIP_IS_E1H(bp))
+ fw_file_name = FW_FILE_NAME_E1H;
+ else if (!CHIP_IS_E1x(bp))
+ fw_file_name = FW_FILE_NAME_E2;
+ else {
+ BNX2X_ERR("Unsupported chip revision\n");
+ return -EINVAL;
+ }
+
+ BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
+
+ rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
+ if (rc) {
+ BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
+ goto request_firmware_exit;
+ }
+
+ rc = bnx2x_check_firmware(bp);
+ if (rc) {
+ BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
+ goto request_firmware_exit;
+ }
+
+ fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
+
+ /* Initialize the pointers to the init arrays */
+ /* Blob */
+ BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
+
+ /* Opcodes */
+ BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
+
+ /* Offsets */
+ BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
+ be16_to_cpu_n);
+
+ /* STORMs firmware */
+ INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
+ be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
+ INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
+ be32_to_cpu(fw_hdr->tsem_pram_data.offset);
+ INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
+ be32_to_cpu(fw_hdr->usem_int_table_data.offset);
+ INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
+ be32_to_cpu(fw_hdr->usem_pram_data.offset);
+ INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
+ be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
+ INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
+ be32_to_cpu(fw_hdr->xsem_pram_data.offset);
+ INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
+ be32_to_cpu(fw_hdr->csem_int_table_data.offset);
+ INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
+ be32_to_cpu(fw_hdr->csem_pram_data.offset);
+ /* IRO */
+ BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
+
+ return 0;
+
+iro_alloc_err:
+ kfree(bp->init_ops_offsets);
+init_offsets_alloc_err:
+ kfree(bp->init_ops);
+init_ops_alloc_err:
+ kfree(bp->init_data);
+request_firmware_exit:
+ release_firmware(bp->firmware);
+
+ return rc;
+}
+
+static void bnx2x_release_firmware(struct bnx2x *bp)
+{
+ kfree(bp->init_ops_offsets);
+ kfree(bp->init_ops);
+ kfree(bp->init_data);
+ release_firmware(bp->firmware);
+}
+
+
+static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
+ .init_hw_cmn_chip = bnx2x_init_hw_common_chip,
+ .init_hw_cmn = bnx2x_init_hw_common,
+ .init_hw_port = bnx2x_init_hw_port,
+ .init_hw_func = bnx2x_init_hw_func,
+
+ .reset_hw_cmn = bnx2x_reset_common,
+ .reset_hw_port = bnx2x_reset_port,
+ .reset_hw_func = bnx2x_reset_func,
+
+ .gunzip_init = bnx2x_gunzip_init,
+ .gunzip_end = bnx2x_gunzip_end,
+
+ .init_fw = bnx2x_init_firmware,
+ .release_fw = bnx2x_release_firmware,
+};
+
+void bnx2x__init_func_obj(struct bnx2x *bp)
+{
+ /* Prepare DMAE related driver resources */
+ bnx2x_setup_dmae(bp);
+
+ bnx2x_init_func_obj(bp, &bp->func_obj,
+ bnx2x_sp(bp, func_rdata),
+ bnx2x_sp_mapping(bp, func_rdata),
+ &bnx2x_func_sp_drv);
+}
+
+/* must be called after sriov-enable */
+static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp)
+{
+ int cid_count = BNX2X_L2_CID_COUNT(bp);
+
+#ifdef BCM_CNIC
+ cid_count += CNIC_CID_MAX;
+#endif
+ return roundup(cid_count, QM_CID_ROUND);
+}
+
+/**
+ * bnx2x_get_num_none_def_sbs - return the number of none default SBs
+ *
+ * @dev: pci device
+ *
+ */
+static inline int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev)
+{
+ int pos;
+ u16 control;
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+
+ /*
+ * If MSI-X is not supported - return number of SBs needed to support
+ * one fast path queue: one FP queue + SB for CNIC
+ */
+ if (!pos)
+ return 1 + CNIC_PRESENT;
+
+ /*
+ * The value in the PCI configuration space is the index of the last
+ * entry, namely one less than the actual size of the table, which is
+ * exactly what we want to return from this function: number of all SBs
+ * without the default SB.
+ */
+ pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
+ return control & PCI_MSIX_FLAGS_QSIZE;
+}
+
+static int __devinit bnx2x_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev = NULL;
+ struct bnx2x *bp;
+ int pcie_width, pcie_speed;
+ int rc, max_non_def_sbs;
+ int rx_count, tx_count, rss_count;
+ /*
+ * An estimated maximum supported CoS number according to the chip
+ * version.
+ * We will try to roughly estimate the maximum number of CoSes this chip
+ * may support in order to minimize the memory allocated for Tx
+ * netdev_queue's. This number will be accurately calculated during the
+ * initialization of bp->max_cos based on the chip versions AND chip
+ * revision in the bnx2x_init_bp().
+ */
+ u8 max_cos_est = 0;
+
+ switch (ent->driver_data) {
+ case BCM57710:
+ case BCM57711:
+ case BCM57711E:
+ max_cos_est = BNX2X_MULTI_TX_COS_E1X;
+ break;
+
+ case BCM57712:
+ case BCM57712_MF:
+ max_cos_est = BNX2X_MULTI_TX_COS_E2_E3A0;
+ break;
+
+ case BCM57800:
+ case BCM57800_MF:
+ case BCM57810:
+ case BCM57810_MF:
+ case BCM57840:
+ case BCM57840_MF:
+ max_cos_est = BNX2X_MULTI_TX_COS_E3B0;
+ break;
+
+ default:
+ pr_err("Unknown board_type (%ld), aborting\n",
+ ent->driver_data);
+ return -ENODEV;
+ }
+
+ max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev);
+
+ /* !!! FIXME !!!
+ * Do not allow the maximum SB count to grow above 16
+ * since Special CIDs starts from 16*BNX2X_MULTI_TX_COS=48.
+ * We will use the FP_SB_MAX_E1x macro for this matter.
+ */
+ max_non_def_sbs = min_t(int, FP_SB_MAX_E1x, max_non_def_sbs);
+
+ WARN_ON(!max_non_def_sbs);
+
+ /* Maximum number of RSS queues: one IGU SB goes to CNIC */
+ rss_count = max_non_def_sbs - CNIC_PRESENT;
+
+ /* Maximum number of netdev Rx queues: RSS + FCoE L2 */
+ rx_count = rss_count + FCOE_PRESENT;
+
+ /*
+ * Maximum number of netdev Tx queues:
+ * Maximum TSS queues * Maximum supported number of CoS + FCoE L2
+ */
+ tx_count = MAX_TXQS_PER_COS * max_cos_est + FCOE_PRESENT;
+
+ /* dev zeroed in init_etherdev */
+ dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
+ if (!dev) {
+ dev_err(&pdev->dev, "Cannot allocate net device\n");
+ return -ENOMEM;
+ }
+
+ bp = netdev_priv(dev);
+
+ DP(NETIF_MSG_DRV, "Allocated netdev with %d tx and %d rx queues\n",
+ tx_count, rx_count);
+
+ bp->igu_sb_cnt = max_non_def_sbs;
+ bp->msg_enable = debug;
+ pci_set_drvdata(pdev, dev);
+
+ rc = bnx2x_init_dev(pdev, dev, ent->driver_data);
+ if (rc < 0) {
+ free_netdev(dev);
+ return rc;
+ }
+
+ DP(NETIF_MSG_DRV, "max_non_def_sbs %d\n", max_non_def_sbs);
+
+ rc = bnx2x_init_bp(bp);
+ if (rc)
+ goto init_one_exit;
+
+ /*
+ * Map doorbels here as we need the real value of bp->max_cos which
+ * is initialized in bnx2x_init_bp().
+ */
+ bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
+ min_t(u64, BNX2X_DB_SIZE(bp),
+ pci_resource_len(pdev, 2)));
+ if (!bp->doorbells) {
+ dev_err(&bp->pdev->dev,
+ "Cannot map doorbell space, aborting\n");
+ rc = -ENOMEM;
+ goto init_one_exit;
+ }
+
+ /* calc qm_cid_count */
+ bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
+
+#ifdef BCM_CNIC
+ /* disable FCOE L2 queue for E1x and E3*/
+ if (CHIP_IS_E1x(bp) || CHIP_IS_E3(bp))
+ bp->flags |= NO_FCOE_FLAG;
+
+#endif
+
+ /* Configure interrupt mode: try to enable MSI-X/MSI if
+ * needed, set bp->num_queues appropriately.
+ */
+ bnx2x_set_int_mode(bp);
+
+ /* Add all NAPI objects */
+ bnx2x_add_all_napi(bp);
+
+ rc = register_netdev(dev);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot register net device\n");
+ goto init_one_exit;
+ }
+
+#ifdef BCM_CNIC
+ if (!NO_FCOE(bp)) {
+ /* Add storage MAC address */
+ rtnl_lock();
+ dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
+ rtnl_unlock();
+ }
+#endif
+
+ bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
+
+ netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
+ board_info[ent->driver_data].name,
+ (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
+ pcie_width,
+ ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
+ (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
+ "5GHz (Gen2)" : "2.5GHz",
+ dev->base_addr, bp->pdev->irq, dev->dev_addr);
+
+ return 0;
+
+init_one_exit:
+ if (bp->regview)
+ iounmap(bp->regview);
+
+ if (bp->doorbells)
+ iounmap(bp->doorbells);
+
+ free_netdev(dev);
+
+ if (atomic_read(&pdev->enable_cnt) == 1)
+ pci_release_regions(pdev);
+
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ return rc;
+}
+
+static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2x *bp;
+
+ if (!dev) {
+ dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
+ return;
+ }
+ bp = netdev_priv(dev);
+
+#ifdef BCM_CNIC
+ /* Delete storage MAC address */
+ if (!NO_FCOE(bp)) {
+ rtnl_lock();
+ dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
+ rtnl_unlock();
+ }
+#endif
+
+#ifdef BCM_DCBNL
+ /* Delete app tlvs from dcbnl */
+ bnx2x_dcbnl_update_applist(bp, true);
+#endif
+
+ unregister_netdev(dev);
+
+ /* Delete all NAPI objects */
+ bnx2x_del_all_napi(bp);
+
+ /* Power on: we can't let PCI layer write to us while we are in D3 */
+ bnx2x_set_power_state(bp, PCI_D0);
+
+ /* Disable MSI/MSI-X */
+ bnx2x_disable_msi(bp);
+
+ /* Power off */
+ bnx2x_set_power_state(bp, PCI_D3hot);
+
+ /* Make sure RESET task is not scheduled before continuing */
+ cancel_delayed_work_sync(&bp->sp_rtnl_task);
+
+ if (bp->regview)
+ iounmap(bp->regview);
+
+ if (bp->doorbells)
+ iounmap(bp->doorbells);
+
+ bnx2x_free_mem_bp(bp);
+
+ free_netdev(dev);
+
+ if (atomic_read(&pdev->enable_cnt) == 1)
+ pci_release_regions(pdev);
+
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
+{
+ int i;
+
+ bp->state = BNX2X_STATE_ERROR;
+
+ bp->rx_mode = BNX2X_RX_MODE_NONE;
+
+#ifdef BCM_CNIC
+ bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
+#endif
+ /* Stop Tx */
+ bnx2x_tx_disable(bp);
+
+ bnx2x_netif_stop(bp, 0);
+
+ del_timer_sync(&bp->timer);
+
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+
+ /* Release IRQs */
+ bnx2x_free_irq(bp);
+
+ /* Free SKBs, SGEs, TPA pool and driver internals */
+ bnx2x_free_skbs(bp);
+
+ for_each_rx_queue(bp, i)
+ bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
+
+ bnx2x_free_mem(bp);
+
+ bp->state = BNX2X_STATE_CLOSED;
+
+ netif_carrier_off(bp->dev);
+
+ return 0;
+}
+
+static void bnx2x_eeh_recover(struct bnx2x *bp)
+{
+ u32 val;
+
+ mutex_init(&bp->port.phy_mutex);
+
+ bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+ bp->link_params.shmem_base = bp->common.shmem_base;
+ BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
+
+ if (!bp->common.shmem_base ||
+ (bp->common.shmem_base < 0xA0000) ||
+ (bp->common.shmem_base >= 0xC0000)) {
+ BNX2X_DEV_INFO("MCP not active\n");
+ bp->flags |= NO_MCP_FLAG;
+ return;
+ }
+
+ val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
+ if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
+ != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
+ BNX2X_ERR("BAD MCP validity signature\n");
+
+ if (!BP_NOMCP(bp)) {
+ bp->fw_seq =
+ (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
+ BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
+ }
+}
+
+/**
+ * bnx2x_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2x *bp = netdev_priv(dev);
+
+ rtnl_lock();
+
+ netif_device_detach(dev);
+
+ if (state == pci_channel_io_perm_failure) {
+ rtnl_unlock();
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ if (netif_running(dev))
+ bnx2x_eeh_nic_unload(bp);
+
+ pci_disable_device(pdev);
+
+ rtnl_unlock();
+
+ /* Request a slot reset */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * bnx2x_io_slot_reset - called after the PCI bus has been reset
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot.
+ */
+static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2x *bp = netdev_priv(dev);
+
+ rtnl_lock();
+
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev,
+ "Cannot re-enable PCI device after reset\n");
+ rtnl_unlock();
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+
+ if (netif_running(dev))
+ bnx2x_set_power_state(bp, PCI_D0);
+
+ rtnl_unlock();
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * bnx2x_io_resume - called when traffic can start flowing again
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation.
+ */
+static void bnx2x_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ netdev_err(bp->dev, "Handling parity error recovery. "
+ "Try again later\n");
+ return;
+ }
+
+ rtnl_lock();
+
+ bnx2x_eeh_recover(bp);
+
+ if (netif_running(dev))
+ bnx2x_nic_load(bp, LOAD_NORMAL);
+
+ netif_device_attach(dev);
+
+ rtnl_unlock();
+}
+
+static struct pci_error_handlers bnx2x_err_handler = {
+ .error_detected = bnx2x_io_error_detected,
+ .slot_reset = bnx2x_io_slot_reset,
+ .resume = bnx2x_io_resume,
+};
+
+static struct pci_driver bnx2x_pci_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = bnx2x_pci_tbl,
+ .probe = bnx2x_init_one,
+ .remove = __devexit_p(bnx2x_remove_one),
+ .suspend = bnx2x_suspend,
+ .resume = bnx2x_resume,
+ .err_handler = &bnx2x_err_handler,
+};
+
+static int __init bnx2x_init(void)
+{
+ int ret;
+
+ pr_info("%s", version);
+
+ bnx2x_wq = create_singlethread_workqueue("bnx2x");
+ if (bnx2x_wq == NULL) {
+ pr_err("Cannot create workqueue\n");
+ return -ENOMEM;
+ }
+
+ ret = pci_register_driver(&bnx2x_pci_driver);
+ if (ret) {
+ pr_err("Cannot register driver\n");
+ destroy_workqueue(bnx2x_wq);
+ }
+ return ret;
+}
+
+static void __exit bnx2x_cleanup(void)
+{
+ pci_unregister_driver(&bnx2x_pci_driver);
+
+ destroy_workqueue(bnx2x_wq);
+}
+
+void bnx2x_notify_link_changed(struct bnx2x *bp)
+{
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1);
+}
+
+module_init(bnx2x_init);
+module_exit(bnx2x_cleanup);
+
+#ifdef BCM_CNIC
+/**
+ * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
+ *
+ * @bp: driver handle
+ * @set: set or clear the CAM entry
+ *
+ * This function will wait until the ramdord completion returns.
+ * Return 0 if success, -ENODEV if ramrod doesn't return.
+ */
+static inline int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
+{
+ unsigned long ramrod_flags = 0;
+
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac,
+ &bp->iscsi_l2_mac_obj, true,
+ BNX2X_ISCSI_ETH_MAC, &ramrod_flags);
+}
+
+/* count denotes the number of new completions we have seen */
+static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
+{
+ struct eth_spe *spe;
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return;
+#endif
+
+ spin_lock_bh(&bp->spq_lock);
+ BUG_ON(bp->cnic_spq_pending < count);
+ bp->cnic_spq_pending -= count;
+
+
+ for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
+ u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
+ & SPE_HDR_CONN_TYPE) >>
+ SPE_HDR_CONN_TYPE_SHIFT;
+ u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
+ >> SPE_HDR_CMD_ID_SHIFT) & 0xff;
+
+ /* Set validation for iSCSI L2 client before sending SETUP
+ * ramrod
+ */
+ if (type == ETH_CONNECTION_TYPE) {
+ if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
+ bnx2x_set_ctx_validation(bp, &bp->context.
+ vcxt[BNX2X_ISCSI_ETH_CID].eth,
+ BNX2X_ISCSI_ETH_CID);
+ }
+
+ /*
+ * There may be not more than 8 L2, not more than 8 L5 SPEs
+ * and in the air. We also check that number of outstanding
+ * COMMON ramrods is not more than the EQ and SPQ can
+ * accommodate.
+ */
+ if (type == ETH_CONNECTION_TYPE) {
+ if (!atomic_read(&bp->cq_spq_left))
+ break;
+ else
+ atomic_dec(&bp->cq_spq_left);
+ } else if (type == NONE_CONNECTION_TYPE) {
+ if (!atomic_read(&bp->eq_spq_left))
+ break;
+ else
+ atomic_dec(&bp->eq_spq_left);
+ } else if ((type == ISCSI_CONNECTION_TYPE) ||
+ (type == FCOE_CONNECTION_TYPE)) {
+ if (bp->cnic_spq_pending >=
+ bp->cnic_eth_dev.max_kwqe_pending)
+ break;
+ else
+ bp->cnic_spq_pending++;
+ } else {
+ BNX2X_ERR("Unknown SPE type: %d\n", type);
+ bnx2x_panic();
+ break;
+ }
+
+ spe = bnx2x_sp_get_next(bp);
+ *spe = *bp->cnic_kwq_cons;
+
+ DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
+ bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
+
+ if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
+ bp->cnic_kwq_cons = bp->cnic_kwq;
+ else
+ bp->cnic_kwq_cons++;
+ }
+ bnx2x_sp_prod_update(bp);
+ spin_unlock_bh(&bp->spq_lock);
+}
+
+static int bnx2x_cnic_sp_queue(struct net_device *dev,
+ struct kwqe_16 *kwqes[], u32 count)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int i;
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return -EIO;
+#endif
+
+ spin_lock_bh(&bp->spq_lock);
+
+ for (i = 0; i < count; i++) {
+ struct eth_spe *spe = (struct eth_spe *)kwqes[i];
+
+ if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
+ break;
+
+ *bp->cnic_kwq_prod = *spe;
+
+ bp->cnic_kwq_pending++;
+
+ DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
+ spe->hdr.conn_and_cmd_data, spe->hdr.type,
+ spe->data.update_data_addr.hi,
+ spe->data.update_data_addr.lo,
+ bp->cnic_kwq_pending);
+
+ if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
+ bp->cnic_kwq_prod = bp->cnic_kwq;
+ else
+ bp->cnic_kwq_prod++;
+ }
+
+ spin_unlock_bh(&bp->spq_lock);
+
+ if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
+ bnx2x_cnic_sp_post(bp, 0);
+
+ return i;
+}
+
+static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
+{
+ struct cnic_ops *c_ops;
+ int rc = 0;
+
+ mutex_lock(&bp->cnic_mutex);
+ c_ops = rcu_dereference_protected(bp->cnic_ops,
+ lockdep_is_held(&bp->cnic_mutex));
+ if (c_ops)
+ rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
+ mutex_unlock(&bp->cnic_mutex);
+
+ return rc;
+}
+
+static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
+{
+ struct cnic_ops *c_ops;
+ int rc = 0;
+
+ rcu_read_lock();
+ c_ops = rcu_dereference(bp->cnic_ops);
+ if (c_ops)
+ rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
+ rcu_read_unlock();
+
+ return rc;
+}
+
+/*
+ * for commands that have no data
+ */
+int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
+{
+ struct cnic_ctl_info ctl = {0};
+
+ ctl.cmd = cmd;
+
+ return bnx2x_cnic_ctl_send(bp, &ctl);
+}
+
+static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
+{
+ struct cnic_ctl_info ctl = {0};
+
+ /* first we tell CNIC and only then we count this as a completion */
+ ctl.cmd = CNIC_CTL_COMPLETION_CMD;
+ ctl.data.comp.cid = cid;
+ ctl.data.comp.error = err;
+
+ bnx2x_cnic_ctl_send_bh(bp, &ctl);
+ bnx2x_cnic_sp_post(bp, 0);
+}
+
+
+/* Called with netif_addr_lock_bh() taken.
+ * Sets an rx_mode config for an iSCSI ETH client.
+ * Doesn't block.
+ * Completion should be checked outside.
+ */
+static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
+{
+ unsigned long accept_flags = 0, ramrod_flags = 0;
+ u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
+ int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED;
+
+ if (start) {
+ /* Start accepting on iSCSI L2 ring. Accept all multicasts
+ * because it's the only way for UIO Queue to accept
+ * multicasts (in non-promiscuous mode only one Queue per
+ * function will receive multicast packets (leading in our
+ * case).
+ */
+ __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+
+ /* Clear STOP_PENDING bit if START is requested */
+ clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state);
+
+ sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED;
+ } else
+ /* Clear START_PENDING bit if STOP is requested */
+ clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state);
+
+ if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
+ set_bit(sched_state, &bp->sp_state);
+ else {
+ __set_bit(RAMROD_RX, &ramrod_flags);
+ bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0,
+ ramrod_flags);
+ }
+}
+
+
+static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int rc = 0;
+
+ switch (ctl->cmd) {
+ case DRV_CTL_CTXTBL_WR_CMD: {
+ u32 index = ctl->data.io.offset;
+ dma_addr_t addr = ctl->data.io.dma_addr;
+
+ bnx2x_ilt_wr(bp, index, addr);
+ break;
+ }
+
+ case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
+ int count = ctl->data.credit.credit_count;
+
+ bnx2x_cnic_sp_post(bp, count);
+ break;
+ }
+
+ /* rtnl_lock is held. */
+ case DRV_CTL_START_L2_CMD: {
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+ unsigned long sp_bits = 0;
+
+ /* Configure the iSCSI classification object */
+ bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj,
+ cp->iscsi_l2_client_id,
+ cp->iscsi_l2_cid, BP_FUNC(bp),
+ bnx2x_sp(bp, mac_rdata),
+ bnx2x_sp_mapping(bp, mac_rdata),
+ BNX2X_FILTER_MAC_PENDING,
+ &bp->sp_state, BNX2X_OBJ_TYPE_RX,
+ &bp->macs_pool);
+
+ /* Set iSCSI MAC address */
+ rc = bnx2x_set_iscsi_eth_mac_addr(bp);
+ if (rc)
+ break;
+
+ mmiowb();
+ barrier();
+
+ /* Start accepting on iSCSI L2 ring */
+
+ netif_addr_lock_bh(dev);
+ bnx2x_set_iscsi_eth_rx_mode(bp, true);
+ netif_addr_unlock_bh(dev);
+
+ /* bits to wait on */
+ __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
+ __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits);
+
+ if (!bnx2x_wait_sp_comp(bp, sp_bits))
+ BNX2X_ERR("rx_mode completion timed out!\n");
+
+ break;
+ }
+
+ /* rtnl_lock is held. */
+ case DRV_CTL_STOP_L2_CMD: {
+ unsigned long sp_bits = 0;
+
+ /* Stop accepting on iSCSI L2 ring */
+ netif_addr_lock_bh(dev);
+ bnx2x_set_iscsi_eth_rx_mode(bp, false);
+ netif_addr_unlock_bh(dev);
+
+ /* bits to wait on */
+ __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
+ __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits);
+
+ if (!bnx2x_wait_sp_comp(bp, sp_bits))
+ BNX2X_ERR("rx_mode completion timed out!\n");
+
+ mmiowb();
+ barrier();
+
+ /* Unset iSCSI L2 MAC */
+ rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
+ BNX2X_ISCSI_ETH_MAC, true);
+ break;
+ }
+ case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
+ int count = ctl->data.credit.credit_count;
+
+ smp_mb__before_atomic_inc();
+ atomic_add(count, &bp->cq_spq_left);
+ smp_mb__after_atomic_inc();
+ break;
+ }
+
+ default:
+ BNX2X_ERR("unknown command %x\n", ctl->cmd);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
+{
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+ if (bp->flags & USING_MSIX_FLAG) {
+ cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
+ cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
+ cp->irq_arr[0].vector = bp->msix_table[1].vector;
+ } else {
+ cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
+ cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
+ }
+ if (!CHIP_IS_E1x(bp))
+ cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
+ else
+ cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
+
+ cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp);
+ cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
+ cp->irq_arr[1].status_blk = bp->def_status_blk;
+ cp->irq_arr[1].status_blk_num = DEF_SB_ID;
+ cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
+
+ cp->num_irq = 2;
+}
+
+static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
+ void *data)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+ if (ops == NULL)
+ return -EINVAL;
+
+ bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!bp->cnic_kwq)
+ return -ENOMEM;
+
+ bp->cnic_kwq_cons = bp->cnic_kwq;
+ bp->cnic_kwq_prod = bp->cnic_kwq;
+ bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
+
+ bp->cnic_spq_pending = 0;
+ bp->cnic_kwq_pending = 0;
+
+ bp->cnic_data = data;
+
+ cp->num_irq = 0;
+ cp->drv_state |= CNIC_DRV_STATE_REGD;
+ cp->iro_arr = bp->iro_arr;
+
+ bnx2x_setup_cnic_irq_info(bp);
+
+ rcu_assign_pointer(bp->cnic_ops, ops);
+
+ return 0;
+}
+
+static int bnx2x_unregister_cnic(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+ mutex_lock(&bp->cnic_mutex);
+ cp->drv_state = 0;
+ rcu_assign_pointer(bp->cnic_ops, NULL);
+ mutex_unlock(&bp->cnic_mutex);
+ synchronize_rcu();
+ kfree(bp->cnic_kwq);
+ bp->cnic_kwq = NULL;
+
+ return 0;
+}
+
+struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+ /* If both iSCSI and FCoE are disabled - return NULL in
+ * order to indicate CNIC that it should not try to work
+ * with this device.
+ */
+ if (NO_ISCSI(bp) && NO_FCOE(bp))
+ return NULL;
+
+ cp->drv_owner = THIS_MODULE;
+ cp->chip_id = CHIP_ID(bp);
+ cp->pdev = bp->pdev;
+ cp->io_base = bp->regview;
+ cp->io_base2 = bp->doorbells;
+ cp->max_kwqe_pending = 8;
+ cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
+ cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
+ bnx2x_cid_ilt_lines(bp);
+ cp->ctx_tbl_len = CNIC_ILT_LINES;
+ cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
+ cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
+ cp->drv_ctl = bnx2x_drv_ctl;
+ cp->drv_register_cnic = bnx2x_register_cnic;
+ cp->drv_unregister_cnic = bnx2x_unregister_cnic;
+ cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
+ cp->iscsi_l2_client_id =
+ bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
+ cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
+
+ if (NO_ISCSI_OOO(bp))
+ cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
+
+ if (NO_ISCSI(bp))
+ cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
+
+ if (NO_FCOE(bp))
+ cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
+
+ DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
+ "starting cid %d\n",
+ cp->ctx_blk_size,
+ cp->ctx_tbl_offset,
+ cp->ctx_tbl_len,
+ cp->starting_cid);
+ return cp;
+}
+EXPORT_SYMBOL(bnx2x_cnic_probe);
+
+#endif /* BCM_CNIC */
+
--- /dev/null
- #define XMAC_CTRL_REG_CORE_LOCAL_LPBK (0x1<<3)
+/* bnx2x_reg.h: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * The registers description starts with the register Access type followed
+ * by size in bits. For example [RW 32]. The access types are:
+ * R - Read only
+ * RC - Clear on read
+ * RW - Read/Write
+ * ST - Statistics register (clear on read)
+ * W - Write only
+ * WB - Wide bus register - the size is over 32 bits and it should be
+ * read/write in consecutive 32 bits accesses
+ * WR - Write Clear (write 1 to clear the bit)
+ *
+ */
+#ifndef BNX2X_REG_H
+#define BNX2X_REG_H
+
+#define ATC_ATC_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
+#define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS (0x1<<2)
+#define ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU (0x1<<5)
+#define ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT (0x1<<3)
+#define ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR (0x1<<4)
+#define ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND (0x1<<1)
+/* [RW 1] Initiate the ATC array - reset all the valid bits */
+#define ATC_REG_ATC_INIT_ARRAY 0x1100b8
+/* [R 1] ATC initalization done */
+#define ATC_REG_ATC_INIT_DONE 0x1100bc
+/* [RC 6] Interrupt register #0 read clear */
+#define ATC_REG_ATC_INT_STS_CLR 0x1101c0
+/* [RW 5] Parity mask register #0 read/write */
+#define ATC_REG_ATC_PRTY_MASK 0x1101d8
+/* [RC 5] Parity register #0 read clear */
+#define ATC_REG_ATC_PRTY_STS_CLR 0x1101d0
+/* [RW 19] Interrupt mask register #0 read/write */
+#define BRB1_REG_BRB1_INT_MASK 0x60128
+/* [R 19] Interrupt register #0 read */
+#define BRB1_REG_BRB1_INT_STS 0x6011c
+/* [RW 4] Parity mask register #0 read/write */
+#define BRB1_REG_BRB1_PRTY_MASK 0x60138
+/* [R 4] Parity register #0 read */
+#define BRB1_REG_BRB1_PRTY_STS 0x6012c
+/* [RC 4] Parity register #0 read clear */
+#define BRB1_REG_BRB1_PRTY_STS_CLR 0x60130
+/* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At
+ * address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address
+ * BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning -
+ * following reset the first rbc access to this reg must be write; there can
+ * be no more rbc writes after the first one; there can be any number of rbc
+ * read following the first write; rbc access not following these rules will
+ * result in hang condition. */
+#define BRB1_REG_FREE_LIST_PRS_CRDT 0x60200
+/* [RW 10] The number of free blocks below which the full signal to class 0
+ * is asserted */
+#define BRB1_REG_FULL_0_XOFF_THRESHOLD_0 0x601d0
+#define BRB1_REG_FULL_0_XOFF_THRESHOLD_1 0x60230
+/* [RW 11] The number of free blocks above which the full signal to class 0
+ * is de-asserted */
+#define BRB1_REG_FULL_0_XON_THRESHOLD_0 0x601d4
+#define BRB1_REG_FULL_0_XON_THRESHOLD_1 0x60234
+/* [RW 11] The number of free blocks below which the full signal to class 1
+ * is asserted */
+#define BRB1_REG_FULL_1_XOFF_THRESHOLD_0 0x601d8
+#define BRB1_REG_FULL_1_XOFF_THRESHOLD_1 0x60238
+/* [RW 11] The number of free blocks above which the full signal to class 1
+ * is de-asserted */
+#define BRB1_REG_FULL_1_XON_THRESHOLD_0 0x601dc
+#define BRB1_REG_FULL_1_XON_THRESHOLD_1 0x6023c
+/* [RW 11] The number of free blocks below which the full signal to the LB
+ * port is asserted */
+#define BRB1_REG_FULL_LB_XOFF_THRESHOLD 0x601e0
+/* [RW 10] The number of free blocks above which the full signal to the LB
+ * port is de-asserted */
+#define BRB1_REG_FULL_LB_XON_THRESHOLD 0x601e4
+/* [RW 10] The number of free blocks above which the High_llfc signal to
+ interface #n is de-asserted. */
+#define BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD_0 0x6014c
+/* [RW 10] The number of free blocks below which the High_llfc signal to
+ interface #n is asserted. */
+#define BRB1_REG_HIGH_LLFC_LOW_THRESHOLD_0 0x6013c
+/* [RW 11] The number of blocks guarantied for the LB port */
+#define BRB1_REG_LB_GUARANTIED 0x601ec
+/* [RW 11] The hysteresis on the guarantied buffer space for the Lb port
+ * before signaling XON. */
+#define BRB1_REG_LB_GUARANTIED_HYST 0x60264
+/* [RW 24] LL RAM data. */
+#define BRB1_REG_LL_RAM 0x61000
+/* [RW 10] The number of free blocks above which the Low_llfc signal to
+ interface #n is de-asserted. */
+#define BRB1_REG_LOW_LLFC_HIGH_THRESHOLD_0 0x6016c
+/* [RW 10] The number of free blocks below which the Low_llfc signal to
+ interface #n is asserted. */
+#define BRB1_REG_LOW_LLFC_LOW_THRESHOLD_0 0x6015c
+/* [RW 11] The number of blocks guarantied for class 0 in MAC 0. The
+ * register is applicable only when per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_0_CLASS_0_GUARANTIED 0x60244
+/* [RW 11] The hysteresis on the guarantied buffer space for class 0 in MAC
+ * 1 before signaling XON. The register is applicable only when
+ * per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST 0x60254
+/* [RW 11] The number of blocks guarantied for class 1 in MAC 0. The
+ * register is applicable only when per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_0_CLASS_1_GUARANTIED 0x60248
+/* [RW 11] The hysteresis on the guarantied buffer space for class 1in MAC 0
+ * before signaling XON. The register is applicable only when
+ * per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST 0x60258
+/* [RW 11] The number of blocks guarantied for class 0in MAC1.The register
+ * is applicable only when per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_1_CLASS_0_GUARANTIED 0x6024c
+/* [RW 11] The hysteresis on the guarantied buffer space for class 0 in MAC
+ * 1 before signaling XON. The register is applicable only when
+ * per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST 0x6025c
+/* [RW 11] The number of blocks guarantied for class 1 in MAC 1. The
+ * register is applicable only when per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_1_CLASS_1_GUARANTIED 0x60250
+/* [RW 11] The hysteresis on the guarantied buffer space for class 1 in MAC
+ * 1 before signaling XON. The register is applicable only when
+ * per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST 0x60260
+/* [RW 11] The number of blocks guarantied for the MAC port. The register is
+ * applicable only when per_class_guaranty_mode is reset. */
+#define BRB1_REG_MAC_GUARANTIED_0 0x601e8
+#define BRB1_REG_MAC_GUARANTIED_1 0x60240
+/* [R 24] The number of full blocks. */
+#define BRB1_REG_NUM_OF_FULL_BLOCKS 0x60090
+/* [ST 32] The number of cycles that the write_full signal towards MAC #0
+ was asserted. */
+#define BRB1_REG_NUM_OF_FULL_CYCLES_0 0x600c8
+#define BRB1_REG_NUM_OF_FULL_CYCLES_1 0x600cc
+#define BRB1_REG_NUM_OF_FULL_CYCLES_4 0x600d8
+/* [ST 32] The number of cycles that the pause signal towards MAC #0 was
+ asserted. */
+#define BRB1_REG_NUM_OF_PAUSE_CYCLES_0 0x600b8
+#define BRB1_REG_NUM_OF_PAUSE_CYCLES_1 0x600bc
+/* [RW 10] The number of free blocks below which the pause signal to class 0
+ * is asserted */
+#define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 0x601c0
+#define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 0x60220
+/* [RW 11] The number of free blocks above which the pause signal to class 0
+ * is de-asserted */
+#define BRB1_REG_PAUSE_0_XON_THRESHOLD_0 0x601c4
+#define BRB1_REG_PAUSE_0_XON_THRESHOLD_1 0x60224
+/* [RW 11] The number of free blocks below which the pause signal to class 1
+ * is asserted */
+#define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0 0x601c8
+#define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 0x60228
+/* [RW 11] The number of free blocks above which the pause signal to class 1
+ * is de-asserted */
+#define BRB1_REG_PAUSE_1_XON_THRESHOLD_0 0x601cc
+#define BRB1_REG_PAUSE_1_XON_THRESHOLD_1 0x6022c
+/* [RW 10] Write client 0: De-assert pause threshold. Not Functional */
+#define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 0x60078
+#define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 0x6007c
+/* [RW 10] Write client 0: Assert pause threshold. */
+#define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068
+#define BRB1_REG_PAUSE_LOW_THRESHOLD_1 0x6006c
+/* [R 24] The number of full blocks occupied by port. */
+#define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 0x60094
+/* [RW 1] Reset the design by software. */
+#define BRB1_REG_SOFT_RESET 0x600dc
+/* [R 5] Used to read the value of the XX protection CAM occupancy counter. */
+#define CCM_REG_CAM_OCCUP 0xd0188
+/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define CCM_REG_CCM_CFC_IFEN 0xd003c
+/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define CCM_REG_CCM_CQM_IFEN 0xd000c
+/* [RW 1] If set the Q index; received from the QM is inserted to event ID.
+ Otherwise 0 is inserted. */
+#define CCM_REG_CCM_CQM_USE_Q 0xd00c0
+/* [RW 11] Interrupt mask register #0 read/write */
+#define CCM_REG_CCM_INT_MASK 0xd01e4
+/* [R 11] Interrupt register #0 read */
+#define CCM_REG_CCM_INT_STS 0xd01d8
+/* [RW 27] Parity mask register #0 read/write */
+#define CCM_REG_CCM_PRTY_MASK 0xd01f4
+/* [R 27] Parity register #0 read */
+#define CCM_REG_CCM_PRTY_STS 0xd01e8
+/* [RC 27] Parity register #0 read clear */
+#define CCM_REG_CCM_PRTY_STS_CLR 0xd01ec
+/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
+ REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
+ Is used to determine the number of the AG context REG-pairs written back;
+ when the input message Reg1WbFlg isn't set. */
+#define CCM_REG_CCM_REG0_SZ 0xd00c4
+/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define CCM_REG_CCM_STORM0_IFEN 0xd0004
+/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define CCM_REG_CCM_STORM1_IFEN 0xd0008
+/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
+ disregarded; valid output is deasserted; all other signals are treated as
+ usual; if 1 - normal activity. */
+#define CCM_REG_CDU_AG_RD_IFEN 0xd0030
+/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
+ are disregarded; all other signals are treated as usual; if 1 - normal
+ activity. */
+#define CCM_REG_CDU_AG_WR_IFEN 0xd002c
+/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
+ disregarded; valid output is deasserted; all other signals are treated as
+ usual; if 1 - normal activity. */
+#define CCM_REG_CDU_SM_RD_IFEN 0xd0038
+/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
+ input is disregarded; all other signals are treated as usual; if 1 -
+ normal activity. */
+#define CCM_REG_CDU_SM_WR_IFEN 0xd0034
+/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
+ the initial credit value; read returns the current value of the credit
+ counter. Must be initialized to 1 at start-up. */
+#define CCM_REG_CFC_INIT_CRD 0xd0204
+/* [RW 2] Auxiliary counter flag Q number 1. */
+#define CCM_REG_CNT_AUX1_Q 0xd00c8
+/* [RW 2] Auxiliary counter flag Q number 2. */
+#define CCM_REG_CNT_AUX2_Q 0xd00cc
+/* [RW 28] The CM header value for QM request (primary). */
+#define CCM_REG_CQM_CCM_HDR_P 0xd008c
+/* [RW 28] The CM header value for QM request (secondary). */
+#define CCM_REG_CQM_CCM_HDR_S 0xd0090
+/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define CCM_REG_CQM_CCM_IFEN 0xd0014
+/* [RW 6] QM output initial credit. Max credit available - 32. Write writes
+ the initial credit value; read returns the current value of the credit
+ counter. Must be initialized to 32 at start-up. */
+#define CCM_REG_CQM_INIT_CRD 0xd020c
+/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_CQM_P_WEIGHT 0xd00b8
+/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_CQM_S_WEIGHT 0xd00bc
+/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define CCM_REG_CSDM_IFEN 0xd0018
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the SDM interface is detected. */
+#define CCM_REG_CSDM_LENGTH_MIS 0xd0170
+/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_CSDM_WEIGHT 0xd00b4
+/* [RW 28] The CM header for QM formatting in case of an error in the QM
+ inputs. */
+#define CCM_REG_ERR_CCM_HDR 0xd0094
+/* [RW 8] The Event ID in case the input message ErrorFlg is set. */
+#define CCM_REG_ERR_EVNT_ID 0xd0098
+/* [RW 8] FIC0 output initial credit. Max credit available - 255. Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 64 at start-up. */
+#define CCM_REG_FIC0_INIT_CRD 0xd0210
+/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 64 at start-up. */
+#define CCM_REG_FIC1_INIT_CRD 0xd0214
+/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1
+ - strict priority defined by ~ccm_registers_gr_ag_pr.gr_ag_pr;
+ ~ccm_registers_gr_ld0_pr.gr_ld0_pr and
+ ~ccm_registers_gr_ld1_pr.gr_ld1_pr. Groups are according to channels and
+ outputs to STORM: aggregation; load FIC0; load FIC1 and store. */
+#define CCM_REG_GR_ARB_TYPE 0xd015c
+/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
+ highest priority is 3. It is supposed; that the Store channel priority is
+ the compliment to 4 of the rest priorities - Aggregation channel; Load
+ (FIC0) channel and Load (FIC1). */
+#define CCM_REG_GR_LD0_PR 0xd0164
+/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
+ highest priority is 3. It is supposed; that the Store channel priority is
+ the compliment to 4 of the rest priorities - Aggregation channel; Load
+ (FIC0) channel and Load (FIC1). */
+#define CCM_REG_GR_LD1_PR 0xd0168
+/* [RW 2] General flags index. */
+#define CCM_REG_INV_DONE_Q 0xd0108
+/* [RW 4] The number of double REG-pairs(128 bits); loaded from the STORM
+ context and sent to STORM; for a specific connection type. The double
+ REG-pairs are used in order to align to STORM context row size of 128
+ bits. The offset of these data in the STORM context is always 0. Index
+ _(0..15) stands for the connection type (one of 16). */
+#define CCM_REG_N_SM_CTX_LD_0 0xd004c
+#define CCM_REG_N_SM_CTX_LD_1 0xd0050
+#define CCM_REG_N_SM_CTX_LD_2 0xd0054
+#define CCM_REG_N_SM_CTX_LD_3 0xd0058
+#define CCM_REG_N_SM_CTX_LD_4 0xd005c
+/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define CCM_REG_PBF_IFEN 0xd0028
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the pbf interface is detected. */
+#define CCM_REG_PBF_LENGTH_MIS 0xd0180
+/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_PBF_WEIGHT 0xd00ac
+#define CCM_REG_PHYS_QNUM1_0 0xd0134
+#define CCM_REG_PHYS_QNUM1_1 0xd0138
+#define CCM_REG_PHYS_QNUM2_0 0xd013c
+#define CCM_REG_PHYS_QNUM2_1 0xd0140
+#define CCM_REG_PHYS_QNUM3_0 0xd0144
+#define CCM_REG_PHYS_QNUM3_1 0xd0148
+#define CCM_REG_QOS_PHYS_QNUM0_0 0xd0114
+#define CCM_REG_QOS_PHYS_QNUM0_1 0xd0118
+#define CCM_REG_QOS_PHYS_QNUM1_0 0xd011c
+#define CCM_REG_QOS_PHYS_QNUM1_1 0xd0120
+#define CCM_REG_QOS_PHYS_QNUM2_0 0xd0124
+#define CCM_REG_QOS_PHYS_QNUM2_1 0xd0128
+#define CCM_REG_QOS_PHYS_QNUM3_0 0xd012c
+#define CCM_REG_QOS_PHYS_QNUM3_1 0xd0130
+/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define CCM_REG_STORM_CCM_IFEN 0xd0010
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the STORM interface is detected. */
+#define CCM_REG_STORM_LENGTH_MIS 0xd016c
+/* [RW 3] The weight of the STORM input in the WRR (Weighted Round robin)
+ mechanism. 0 stands for weight 8 (the most prioritised); 1 stands for
+ weight 1(least prioritised); 2 stands for weight 2 (more prioritised);
+ tc. */
+#define CCM_REG_STORM_WEIGHT 0xd009c
+/* [RW 1] Input tsem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define CCM_REG_TSEM_IFEN 0xd001c
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the tsem interface is detected. */
+#define CCM_REG_TSEM_LENGTH_MIS 0xd0174
+/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_TSEM_WEIGHT 0xd00a0
+/* [RW 1] Input usem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define CCM_REG_USEM_IFEN 0xd0024
+/* [RC 1] Set when message length mismatch (relative to last indication) at
+ the usem interface is detected. */
+#define CCM_REG_USEM_LENGTH_MIS 0xd017c
+/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_USEM_WEIGHT 0xd00a8
+/* [RW 1] Input xsem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define CCM_REG_XSEM_IFEN 0xd0020
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the xsem interface is detected. */
+#define CCM_REG_XSEM_LENGTH_MIS 0xd0178
+/* [RW 3] The weight of the input xsem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_XSEM_WEIGHT 0xd00a4
+/* [RW 19] Indirect access to the descriptor table of the XX protection
+ mechanism. The fields are: [5:0] - message length; [12:6] - message
+ pointer; 18:13] - next pointer. */
+#define CCM_REG_XX_DESCR_TABLE 0xd0300
+#define CCM_REG_XX_DESCR_TABLE_SIZE 24
+/* [R 7] Used to read the value of XX protection Free counter. */
+#define CCM_REG_XX_FREE 0xd0184
+/* [RW 6] Initial value for the credit counter; responsible for fulfilling
+ of the Input Stage XX protection buffer by the XX protection pending
+ messages. Max credit available - 127. Write writes the initial credit
+ value; read returns the current value of the credit counter. Must be
+ initialized to maximum XX protected message size - 2 at start-up. */
+#define CCM_REG_XX_INIT_CRD 0xd0220
+/* [RW 7] The maximum number of pending messages; which may be stored in XX
+ protection. At read the ~ccm_registers_xx_free.xx_free counter is read.
+ At write comprises the start value of the ~ccm_registers_xx_free.xx_free
+ counter. */
+#define CCM_REG_XX_MSG_NUM 0xd0224
+/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
+#define CCM_REG_XX_OVFL_EVNT_ID 0xd0044
+/* [RW 18] Indirect access to the XX table of the XX protection mechanism.
+ The fields are: [5:0] - tail pointer; 11:6] - Link List size; 17:12] -
+ header pointer. */
+#define CCM_REG_XX_TABLE 0xd0280
+#define CDU_REG_CDU_CHK_MASK0 0x101000
+#define CDU_REG_CDU_CHK_MASK1 0x101004
+#define CDU_REG_CDU_CONTROL0 0x101008
+#define CDU_REG_CDU_DEBUG 0x101010
+#define CDU_REG_CDU_GLOBAL_PARAMS 0x101020
+/* [RW 7] Interrupt mask register #0 read/write */
+#define CDU_REG_CDU_INT_MASK 0x10103c
+/* [R 7] Interrupt register #0 read */
+#define CDU_REG_CDU_INT_STS 0x101030
+/* [RW 5] Parity mask register #0 read/write */
+#define CDU_REG_CDU_PRTY_MASK 0x10104c
+/* [R 5] Parity register #0 read */
+#define CDU_REG_CDU_PRTY_STS 0x101040
+/* [RC 5] Parity register #0 read clear */
+#define CDU_REG_CDU_PRTY_STS_CLR 0x101044
+/* [RC 32] logging of error data in case of a CDU load error:
+ {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error;
+ ype_error; ctual_active; ctual_compressed_context}; */
+#define CDU_REG_ERROR_DATA 0x101014
+/* [WB 216] L1TT ram access. each entry has the following format :
+ {mrege_regions[7:0]; ffset12[5:0]...offset0[5:0];
+ ength12[5:0]...length0[5:0]; d12[3:0]...id0[3:0]} */
+#define CDU_REG_L1TT 0x101800
+/* [WB 24] MATT ram access. each entry has the following
+ format:{RegionLength[11:0]; egionOffset[11:0]} */
+#define CDU_REG_MATT 0x101100
+/* [RW 1] when this bit is set the CDU operates in e1hmf mode */
+#define CDU_REG_MF_MODE 0x101050
+/* [R 1] indication the initializing the activity counter by the hardware
+ was done. */
+#define CFC_REG_AC_INIT_DONE 0x104078
+/* [RW 13] activity counter ram access */
+#define CFC_REG_ACTIVITY_COUNTER 0x104400
+#define CFC_REG_ACTIVITY_COUNTER_SIZE 256
+/* [R 1] indication the initializing the cams by the hardware was done. */
+#define CFC_REG_CAM_INIT_DONE 0x10407c
+/* [RW 2] Interrupt mask register #0 read/write */
+#define CFC_REG_CFC_INT_MASK 0x104108
+/* [R 2] Interrupt register #0 read */
+#define CFC_REG_CFC_INT_STS 0x1040fc
+/* [RC 2] Interrupt register #0 read clear */
+#define CFC_REG_CFC_INT_STS_CLR 0x104100
+/* [RW 4] Parity mask register #0 read/write */
+#define CFC_REG_CFC_PRTY_MASK 0x104118
+/* [R 4] Parity register #0 read */
+#define CFC_REG_CFC_PRTY_STS 0x10410c
+/* [RC 4] Parity register #0 read clear */
+#define CFC_REG_CFC_PRTY_STS_CLR 0x104110
+/* [RW 21] CID cam access (21:1 - Data; alid - 0) */
+#define CFC_REG_CID_CAM 0x104800
+#define CFC_REG_CONTROL0 0x104028
+#define CFC_REG_DEBUG0 0x104050
+/* [RW 14] indicates per error (in #cfc_registers_cfc_error_vector.cfc_error
+ vector) whether the cfc should be disabled upon it */
+#define CFC_REG_DISABLE_ON_ERROR 0x104044
+/* [RC 14] CFC error vector. when the CFC detects an internal error it will
+ set one of these bits. the bit description can be found in CFC
+ specifications */
+#define CFC_REG_ERROR_VECTOR 0x10403c
+/* [WB 93] LCID info ram access */
+#define CFC_REG_INFO_RAM 0x105000
+#define CFC_REG_INFO_RAM_SIZE 1024
+#define CFC_REG_INIT_REG 0x10404c
+#define CFC_REG_INTERFACES 0x104058
+/* [RW 24] {weight_load_client7[2:0] to weight_load_client0[2:0]}. this
+ field allows changing the priorities of the weighted-round-robin arbiter
+ which selects which CFC load client should be served next */
+#define CFC_REG_LCREQ_WEIGHTS 0x104084
+/* [RW 16] Link List ram access; data = {prev_lcid; ext_lcid} */
+#define CFC_REG_LINK_LIST 0x104c00
+#define CFC_REG_LINK_LIST_SIZE 256
+/* [R 1] indication the initializing the link list by the hardware was done. */
+#define CFC_REG_LL_INIT_DONE 0x104074
+/* [R 9] Number of allocated LCIDs which are at empty state */
+#define CFC_REG_NUM_LCIDS_ALLOC 0x104020
+/* [R 9] Number of Arriving LCIDs in Link List Block */
+#define CFC_REG_NUM_LCIDS_ARRIVING 0x104004
+#define CFC_REG_NUM_LCIDS_INSIDE_PF 0x104120
+/* [R 9] Number of Leaving LCIDs in Link List Block */
+#define CFC_REG_NUM_LCIDS_LEAVING 0x104018
+#define CFC_REG_WEAK_ENABLE_PF 0x104124
+/* [RW 8] The event id for aggregated interrupt 0 */
+#define CSDM_REG_AGG_INT_EVENT_0 0xc2038
+#define CSDM_REG_AGG_INT_EVENT_10 0xc2060
+#define CSDM_REG_AGG_INT_EVENT_11 0xc2064
+#define CSDM_REG_AGG_INT_EVENT_12 0xc2068
+#define CSDM_REG_AGG_INT_EVENT_13 0xc206c
+#define CSDM_REG_AGG_INT_EVENT_14 0xc2070
+#define CSDM_REG_AGG_INT_EVENT_15 0xc2074
+#define CSDM_REG_AGG_INT_EVENT_16 0xc2078
+#define CSDM_REG_AGG_INT_EVENT_2 0xc2040
+#define CSDM_REG_AGG_INT_EVENT_3 0xc2044
+#define CSDM_REG_AGG_INT_EVENT_4 0xc2048
+#define CSDM_REG_AGG_INT_EVENT_5 0xc204c
+#define CSDM_REG_AGG_INT_EVENT_6 0xc2050
+#define CSDM_REG_AGG_INT_EVENT_7 0xc2054
+#define CSDM_REG_AGG_INT_EVENT_8 0xc2058
+#define CSDM_REG_AGG_INT_EVENT_9 0xc205c
+/* [RW 1] For each aggregated interrupt index whether the mode is normal (0)
+ or auto-mask-mode (1) */
+#define CSDM_REG_AGG_INT_MODE_10 0xc21e0
+#define CSDM_REG_AGG_INT_MODE_11 0xc21e4
+#define CSDM_REG_AGG_INT_MODE_12 0xc21e8
+#define CSDM_REG_AGG_INT_MODE_13 0xc21ec
+#define CSDM_REG_AGG_INT_MODE_14 0xc21f0
+#define CSDM_REG_AGG_INT_MODE_15 0xc21f4
+#define CSDM_REG_AGG_INT_MODE_16 0xc21f8
+#define CSDM_REG_AGG_INT_MODE_6 0xc21d0
+#define CSDM_REG_AGG_INT_MODE_7 0xc21d4
+#define CSDM_REG_AGG_INT_MODE_8 0xc21d8
+#define CSDM_REG_AGG_INT_MODE_9 0xc21dc
+/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
+#define CSDM_REG_CFC_RSP_START_ADDR 0xc2008
+/* [RW 16] The maximum value of the completion counter #0 */
+#define CSDM_REG_CMP_COUNTER_MAX0 0xc201c
+/* [RW 16] The maximum value of the completion counter #1 */
+#define CSDM_REG_CMP_COUNTER_MAX1 0xc2020
+/* [RW 16] The maximum value of the completion counter #2 */
+#define CSDM_REG_CMP_COUNTER_MAX2 0xc2024
+/* [RW 16] The maximum value of the completion counter #3 */
+#define CSDM_REG_CMP_COUNTER_MAX3 0xc2028
+/* [RW 13] The start address in the internal RAM for the completion
+ counters. */
+#define CSDM_REG_CMP_COUNTER_START_ADDR 0xc200c
+/* [RW 32] Interrupt mask register #0 read/write */
+#define CSDM_REG_CSDM_INT_MASK_0 0xc229c
+#define CSDM_REG_CSDM_INT_MASK_1 0xc22ac
+/* [R 32] Interrupt register #0 read */
+#define CSDM_REG_CSDM_INT_STS_0 0xc2290
+#define CSDM_REG_CSDM_INT_STS_1 0xc22a0
+/* [RW 11] Parity mask register #0 read/write */
+#define CSDM_REG_CSDM_PRTY_MASK 0xc22bc
+/* [R 11] Parity register #0 read */
+#define CSDM_REG_CSDM_PRTY_STS 0xc22b0
+/* [RC 11] Parity register #0 read clear */
+#define CSDM_REG_CSDM_PRTY_STS_CLR 0xc22b4
+#define CSDM_REG_ENABLE_IN1 0xc2238
+#define CSDM_REG_ENABLE_IN2 0xc223c
+#define CSDM_REG_ENABLE_OUT1 0xc2240
+#define CSDM_REG_ENABLE_OUT2 0xc2244
+/* [RW 4] The initial number of messages that can be sent to the pxp control
+ interface without receiving any ACK. */
+#define CSDM_REG_INIT_CREDIT_PXP_CTRL 0xc24bc
+/* [ST 32] The number of ACK after placement messages received */
+#define CSDM_REG_NUM_OF_ACK_AFTER_PLACE 0xc227c
+/* [ST 32] The number of packet end messages received from the parser */
+#define CSDM_REG_NUM_OF_PKT_END_MSG 0xc2274
+/* [ST 32] The number of requests received from the pxp async if */
+#define CSDM_REG_NUM_OF_PXP_ASYNC_REQ 0xc2278
+/* [ST 32] The number of commands received in queue 0 */
+#define CSDM_REG_NUM_OF_Q0_CMD 0xc2248
+/* [ST 32] The number of commands received in queue 10 */
+#define CSDM_REG_NUM_OF_Q10_CMD 0xc226c
+/* [ST 32] The number of commands received in queue 11 */
+#define CSDM_REG_NUM_OF_Q11_CMD 0xc2270
+/* [ST 32] The number of commands received in queue 1 */
+#define CSDM_REG_NUM_OF_Q1_CMD 0xc224c
+/* [ST 32] The number of commands received in queue 3 */
+#define CSDM_REG_NUM_OF_Q3_CMD 0xc2250
+/* [ST 32] The number of commands received in queue 4 */
+#define CSDM_REG_NUM_OF_Q4_CMD 0xc2254
+/* [ST 32] The number of commands received in queue 5 */
+#define CSDM_REG_NUM_OF_Q5_CMD 0xc2258
+/* [ST 32] The number of commands received in queue 6 */
+#define CSDM_REG_NUM_OF_Q6_CMD 0xc225c
+/* [ST 32] The number of commands received in queue 7 */
+#define CSDM_REG_NUM_OF_Q7_CMD 0xc2260
+/* [ST 32] The number of commands received in queue 8 */
+#define CSDM_REG_NUM_OF_Q8_CMD 0xc2264
+/* [ST 32] The number of commands received in queue 9 */
+#define CSDM_REG_NUM_OF_Q9_CMD 0xc2268
+/* [RW 13] The start address in the internal RAM for queue counters */
+#define CSDM_REG_Q_COUNTER_START_ADDR 0xc2010
+/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
+#define CSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0xc2548
+/* [R 1] parser fifo empty in sdm_sync block */
+#define CSDM_REG_SYNC_PARSER_EMPTY 0xc2550
+/* [R 1] parser serial fifo empty in sdm_sync block */
+#define CSDM_REG_SYNC_SYNC_EMPTY 0xc2558
+/* [RW 32] Tick for timer counter. Applicable only when
+ ~csdm_registers_timer_tick_enable.timer_tick_enable =1 */
+#define CSDM_REG_TIMER_TICK 0xc2000
+/* [RW 5] The number of time_slots in the arbitration cycle */
+#define CSEM_REG_ARB_CYCLE_SIZE 0x200034
+/* [RW 3] The source that is associated with arbitration element 0. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2 */
+#define CSEM_REG_ARB_ELEMENT0 0x200020
+/* [RW 3] The source that is associated with arbitration element 1. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~csem_registers_arb_element0.arb_element0 */
+#define CSEM_REG_ARB_ELEMENT1 0x200024
+/* [RW 3] The source that is associated with arbitration element 2. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~csem_registers_arb_element0.arb_element0
+ and ~csem_registers_arb_element1.arb_element1 */
+#define CSEM_REG_ARB_ELEMENT2 0x200028
+/* [RW 3] The source that is associated with arbitration element 3. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
+ not be equal to register ~csem_registers_arb_element0.arb_element0 and
+ ~csem_registers_arb_element1.arb_element1 and
+ ~csem_registers_arb_element2.arb_element2 */
+#define CSEM_REG_ARB_ELEMENT3 0x20002c
+/* [RW 3] The source that is associated with arbitration element 4. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~csem_registers_arb_element0.arb_element0
+ and ~csem_registers_arb_element1.arb_element1 and
+ ~csem_registers_arb_element2.arb_element2 and
+ ~csem_registers_arb_element3.arb_element3 */
+#define CSEM_REG_ARB_ELEMENT4 0x200030
+/* [RW 32] Interrupt mask register #0 read/write */
+#define CSEM_REG_CSEM_INT_MASK_0 0x200110
+#define CSEM_REG_CSEM_INT_MASK_1 0x200120
+/* [R 32] Interrupt register #0 read */
+#define CSEM_REG_CSEM_INT_STS_0 0x200104
+#define CSEM_REG_CSEM_INT_STS_1 0x200114
+/* [RW 32] Parity mask register #0 read/write */
+#define CSEM_REG_CSEM_PRTY_MASK_0 0x200130
+#define CSEM_REG_CSEM_PRTY_MASK_1 0x200140
+/* [R 32] Parity register #0 read */
+#define CSEM_REG_CSEM_PRTY_STS_0 0x200124
+#define CSEM_REG_CSEM_PRTY_STS_1 0x200134
+/* [RC 32] Parity register #0 read clear */
+#define CSEM_REG_CSEM_PRTY_STS_CLR_0 0x200128
+#define CSEM_REG_CSEM_PRTY_STS_CLR_1 0x200138
+#define CSEM_REG_ENABLE_IN 0x2000a4
+#define CSEM_REG_ENABLE_OUT 0x2000a8
+/* [RW 32] This address space contains all registers and memories that are
+ placed in SEM_FAST block. The SEM_FAST registers are described in
+ appendix B. In order to access the sem_fast registers the base address
+ ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
+#define CSEM_REG_FAST_MEMORY 0x220000
+/* [RW 1] Disables input messages from FIC0 May be updated during run_time
+ by the microcode */
+#define CSEM_REG_FIC0_DISABLE 0x200224
+/* [RW 1] Disables input messages from FIC1 May be updated during run_time
+ by the microcode */
+#define CSEM_REG_FIC1_DISABLE 0x200234
+/* [RW 15] Interrupt table Read and write access to it is not possible in
+ the middle of the work */
+#define CSEM_REG_INT_TABLE 0x200400
+/* [ST 24] Statistics register. The number of messages that entered through
+ FIC0 */
+#define CSEM_REG_MSG_NUM_FIC0 0x200000
+/* [ST 24] Statistics register. The number of messages that entered through
+ FIC1 */
+#define CSEM_REG_MSG_NUM_FIC1 0x200004
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC0 */
+#define CSEM_REG_MSG_NUM_FOC0 0x200008
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC1 */
+#define CSEM_REG_MSG_NUM_FOC1 0x20000c
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC2 */
+#define CSEM_REG_MSG_NUM_FOC2 0x200010
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC3 */
+#define CSEM_REG_MSG_NUM_FOC3 0x200014
+/* [RW 1] Disables input messages from the passive buffer May be updated
+ during run_time by the microcode */
+#define CSEM_REG_PAS_DISABLE 0x20024c
+/* [WB 128] Debug only. Passive buffer memory */
+#define CSEM_REG_PASSIVE_BUFFER 0x202000
+/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
+#define CSEM_REG_PRAM 0x240000
+/* [R 16] Valid sleeping threads indication have bit per thread */
+#define CSEM_REG_SLEEP_THREADS_VALID 0x20026c
+/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
+#define CSEM_REG_SLOW_EXT_STORE_EMPTY 0x2002a0
+/* [RW 16] List of free threads . There is a bit per thread. */
+#define CSEM_REG_THREADS_LIST 0x2002e4
+/* [RW 3] The arbitration scheme of time_slot 0 */
+#define CSEM_REG_TS_0_AS 0x200038
+/* [RW 3] The arbitration scheme of time_slot 10 */
+#define CSEM_REG_TS_10_AS 0x200060
+/* [RW 3] The arbitration scheme of time_slot 11 */
+#define CSEM_REG_TS_11_AS 0x200064
+/* [RW 3] The arbitration scheme of time_slot 12 */
+#define CSEM_REG_TS_12_AS 0x200068
+/* [RW 3] The arbitration scheme of time_slot 13 */
+#define CSEM_REG_TS_13_AS 0x20006c
+/* [RW 3] The arbitration scheme of time_slot 14 */
+#define CSEM_REG_TS_14_AS 0x200070
+/* [RW 3] The arbitration scheme of time_slot 15 */
+#define CSEM_REG_TS_15_AS 0x200074
+/* [RW 3] The arbitration scheme of time_slot 16 */
+#define CSEM_REG_TS_16_AS 0x200078
+/* [RW 3] The arbitration scheme of time_slot 17 */
+#define CSEM_REG_TS_17_AS 0x20007c
+/* [RW 3] The arbitration scheme of time_slot 18 */
+#define CSEM_REG_TS_18_AS 0x200080
+/* [RW 3] The arbitration scheme of time_slot 1 */
+#define CSEM_REG_TS_1_AS 0x20003c
+/* [RW 3] The arbitration scheme of time_slot 2 */
+#define CSEM_REG_TS_2_AS 0x200040
+/* [RW 3] The arbitration scheme of time_slot 3 */
+#define CSEM_REG_TS_3_AS 0x200044
+/* [RW 3] The arbitration scheme of time_slot 4 */
+#define CSEM_REG_TS_4_AS 0x200048
+/* [RW 3] The arbitration scheme of time_slot 5 */
+#define CSEM_REG_TS_5_AS 0x20004c
+/* [RW 3] The arbitration scheme of time_slot 6 */
+#define CSEM_REG_TS_6_AS 0x200050
+/* [RW 3] The arbitration scheme of time_slot 7 */
+#define CSEM_REG_TS_7_AS 0x200054
+/* [RW 3] The arbitration scheme of time_slot 8 */
+#define CSEM_REG_TS_8_AS 0x200058
+/* [RW 3] The arbitration scheme of time_slot 9 */
+#define CSEM_REG_TS_9_AS 0x20005c
+/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
+ * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
+#define CSEM_REG_VFPF_ERR_NUM 0x200380
+/* [RW 1] Parity mask register #0 read/write */
+#define DBG_REG_DBG_PRTY_MASK 0xc0a8
+/* [R 1] Parity register #0 read */
+#define DBG_REG_DBG_PRTY_STS 0xc09c
+/* [RC 1] Parity register #0 read clear */
+#define DBG_REG_DBG_PRTY_STS_CLR 0xc0a0
+/* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The
+ * function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0;
+ * 4.Completion function=0; 5.Error handling=0 */
+#define DMAE_REG_BACKWARD_COMP_EN 0x10207c
+/* [RW 32] Commands memory. The address to command X; row Y is to calculated
+ as 14*X+Y. */
+#define DMAE_REG_CMD_MEM 0x102400
+#define DMAE_REG_CMD_MEM_SIZE 224
+/* [RW 1] If 0 - the CRC-16c initial value is all zeroes; if 1 - the CRC-16c
+ initial value is all ones. */
+#define DMAE_REG_CRC16C_INIT 0x10201c
+/* [RW 1] If 0 - the CRC-16 T10 initial value is all zeroes; if 1 - the
+ CRC-16 T10 initial value is all ones. */
+#define DMAE_REG_CRC16T10_INIT 0x102020
+/* [RW 2] Interrupt mask register #0 read/write */
+#define DMAE_REG_DMAE_INT_MASK 0x102054
+/* [RW 4] Parity mask register #0 read/write */
+#define DMAE_REG_DMAE_PRTY_MASK 0x102064
+/* [R 4] Parity register #0 read */
+#define DMAE_REG_DMAE_PRTY_STS 0x102058
+/* [RC 4] Parity register #0 read clear */
+#define DMAE_REG_DMAE_PRTY_STS_CLR 0x10205c
+/* [RW 1] Command 0 go. */
+#define DMAE_REG_GO_C0 0x102080
+/* [RW 1] Command 1 go. */
+#define DMAE_REG_GO_C1 0x102084
+/* [RW 1] Command 10 go. */
+#define DMAE_REG_GO_C10 0x102088
+/* [RW 1] Command 11 go. */
+#define DMAE_REG_GO_C11 0x10208c
+/* [RW 1] Command 12 go. */
+#define DMAE_REG_GO_C12 0x102090
+/* [RW 1] Command 13 go. */
+#define DMAE_REG_GO_C13 0x102094
+/* [RW 1] Command 14 go. */
+#define DMAE_REG_GO_C14 0x102098
+/* [RW 1] Command 15 go. */
+#define DMAE_REG_GO_C15 0x10209c
+/* [RW 1] Command 2 go. */
+#define DMAE_REG_GO_C2 0x1020a0
+/* [RW 1] Command 3 go. */
+#define DMAE_REG_GO_C3 0x1020a4
+/* [RW 1] Command 4 go. */
+#define DMAE_REG_GO_C4 0x1020a8
+/* [RW 1] Command 5 go. */
+#define DMAE_REG_GO_C5 0x1020ac
+/* [RW 1] Command 6 go. */
+#define DMAE_REG_GO_C6 0x1020b0
+/* [RW 1] Command 7 go. */
+#define DMAE_REG_GO_C7 0x1020b4
+/* [RW 1] Command 8 go. */
+#define DMAE_REG_GO_C8 0x1020b8
+/* [RW 1] Command 9 go. */
+#define DMAE_REG_GO_C9 0x1020bc
+/* [RW 1] DMAE GRC Interface (Target; aster) enable. If 0 - the acknowledge
+ input is disregarded; valid is deasserted; all other signals are treated
+ as usual; if 1 - normal activity. */
+#define DMAE_REG_GRC_IFEN 0x102008
+/* [RW 1] DMAE PCI Interface (Request; ead; rite) enable. If 0 - the
+ acknowledge input is disregarded; valid is deasserted; full is asserted;
+ all other signals are treated as usual; if 1 - normal activity. */
+#define DMAE_REG_PCI_IFEN 0x102004
+/* [RW 4] DMAE- PCI Request Interface initial credit. Write writes the
+ initial value to the credit counter; related to the address. Read returns
+ the current value of the counter. */
+#define DMAE_REG_PXP_REQ_INIT_CRD 0x1020c0
+/* [RW 8] Aggregation command. */
+#define DORQ_REG_AGG_CMD0 0x170060
+/* [RW 8] Aggregation command. */
+#define DORQ_REG_AGG_CMD1 0x170064
+/* [RW 8] Aggregation command. */
+#define DORQ_REG_AGG_CMD2 0x170068
+/* [RW 8] Aggregation command. */
+#define DORQ_REG_AGG_CMD3 0x17006c
+/* [RW 28] UCM Header. */
+#define DORQ_REG_CMHEAD_RX 0x170050
+/* [RW 32] Doorbell address for RBC doorbells (function 0). */
+#define DORQ_REG_DB_ADDR0 0x17008c
+/* [RW 5] Interrupt mask register #0 read/write */
+#define DORQ_REG_DORQ_INT_MASK 0x170180
+/* [R 5] Interrupt register #0 read */
+#define DORQ_REG_DORQ_INT_STS 0x170174
+/* [RC 5] Interrupt register #0 read clear */
+#define DORQ_REG_DORQ_INT_STS_CLR 0x170178
+/* [RW 2] Parity mask register #0 read/write */
+#define DORQ_REG_DORQ_PRTY_MASK 0x170190
+/* [R 2] Parity register #0 read */
+#define DORQ_REG_DORQ_PRTY_STS 0x170184
+/* [RC 2] Parity register #0 read clear */
+#define DORQ_REG_DORQ_PRTY_STS_CLR 0x170188
+/* [RW 8] The address to write the DPM CID to STORM. */
+#define DORQ_REG_DPM_CID_ADDR 0x170044
+/* [RW 5] The DPM mode CID extraction offset. */
+#define DORQ_REG_DPM_CID_OFST 0x170030
+/* [RW 12] The threshold of the DQ FIFO to send the almost full interrupt. */
+#define DORQ_REG_DQ_FIFO_AFULL_TH 0x17007c
+/* [RW 12] The threshold of the DQ FIFO to send the full interrupt. */
+#define DORQ_REG_DQ_FIFO_FULL_TH 0x170078
+/* [R 13] Current value of the DQ FIFO fill level according to following
+ pointer. The range is 0 - 256 FIFO rows; where each row stands for the
+ doorbell. */
+#define DORQ_REG_DQ_FILL_LVLF 0x1700a4
+/* [R 1] DQ FIFO full status. Is set; when FIFO filling level is more or
+ equal to full threshold; reset on full clear. */
+#define DORQ_REG_DQ_FULL_ST 0x1700c0
+/* [RW 28] The value sent to CM header in the case of CFC load error. */
+#define DORQ_REG_ERR_CMHEAD 0x170058
+#define DORQ_REG_IF_EN 0x170004
+#define DORQ_REG_MODE_ACT 0x170008
+/* [RW 5] The normal mode CID extraction offset. */
+#define DORQ_REG_NORM_CID_OFST 0x17002c
+/* [RW 28] TCM Header when only TCP context is loaded. */
+#define DORQ_REG_NORM_CMHEAD_TX 0x17004c
+/* [RW 3] The number of simultaneous outstanding requests to Context Fetch
+ Interface. */
+#define DORQ_REG_OUTST_REQ 0x17003c
+#define DORQ_REG_PF_USAGE_CNT 0x1701d0
+#define DORQ_REG_REGN 0x170038
+/* [R 4] Current value of response A counter credit. Initial credit is
+ configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd
+ register. */
+#define DORQ_REG_RSPA_CRD_CNT 0x1700ac
+/* [R 4] Current value of response B counter credit. Initial credit is
+ configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd
+ register. */
+#define DORQ_REG_RSPB_CRD_CNT 0x1700b0
+/* [RW 4] The initial credit at the Doorbell Response Interface. The write
+ writes the same initial credit to the rspa_crd_cnt and rspb_crd_cnt. The
+ read reads this written value. */
+#define DORQ_REG_RSP_INIT_CRD 0x170048
+/* [RW 4] Initial activity counter value on the load request; when the
+ shortcut is done. */
+#define DORQ_REG_SHRT_ACT_CNT 0x170070
+/* [RW 28] TCM Header when both ULP and TCP context is loaded. */
+#define DORQ_REG_SHRT_CMHEAD 0x170054
+#define HC_CONFIG_0_REG_ATTN_BIT_EN_0 (0x1<<4)
+#define HC_CONFIG_0_REG_BLOCK_DISABLE_0 (0x1<<0)
+#define HC_CONFIG_0_REG_INT_LINE_EN_0 (0x1<<3)
+#define HC_CONFIG_0_REG_MSI_ATTN_EN_0 (0x1<<7)
+#define HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 (0x1<<2)
+#define HC_CONFIG_0_REG_SINGLE_ISR_EN_0 (0x1<<1)
+#define HC_CONFIG_1_REG_BLOCK_DISABLE_1 (0x1<<0)
+#define HC_REG_AGG_INT_0 0x108050
+#define HC_REG_AGG_INT_1 0x108054
+#define HC_REG_ATTN_BIT 0x108120
+#define HC_REG_ATTN_IDX 0x108100
+#define HC_REG_ATTN_MSG0_ADDR_L 0x108018
+#define HC_REG_ATTN_MSG1_ADDR_L 0x108020
+#define HC_REG_ATTN_NUM_P0 0x108038
+#define HC_REG_ATTN_NUM_P1 0x10803c
+#define HC_REG_COMMAND_REG 0x108180
+#define HC_REG_CONFIG_0 0x108000
+#define HC_REG_CONFIG_1 0x108004
+#define HC_REG_FUNC_NUM_P0 0x1080ac
+#define HC_REG_FUNC_NUM_P1 0x1080b0
+/* [RW 3] Parity mask register #0 read/write */
+#define HC_REG_HC_PRTY_MASK 0x1080a0
+/* [R 3] Parity register #0 read */
+#define HC_REG_HC_PRTY_STS 0x108094
+/* [RC 3] Parity register #0 read clear */
+#define HC_REG_HC_PRTY_STS_CLR 0x108098
+#define HC_REG_INT_MASK 0x108108
+#define HC_REG_LEADING_EDGE_0 0x108040
+#define HC_REG_LEADING_EDGE_1 0x108048
+#define HC_REG_MAIN_MEMORY 0x108800
+#define HC_REG_MAIN_MEMORY_SIZE 152
+#define HC_REG_P0_PROD_CONS 0x108200
+#define HC_REG_P1_PROD_CONS 0x108400
+#define HC_REG_PBA_COMMAND 0x108140
+#define HC_REG_PCI_CONFIG_0 0x108010
+#define HC_REG_PCI_CONFIG_1 0x108014
+#define HC_REG_STATISTIC_COUNTERS 0x109000
+#define HC_REG_TRAILING_EDGE_0 0x108044
+#define HC_REG_TRAILING_EDGE_1 0x10804c
+#define HC_REG_UC_RAM_ADDR_0 0x108028
+#define HC_REG_UC_RAM_ADDR_1 0x108030
+#define HC_REG_USTORM_ADDR_FOR_COALESCE 0x108068
+#define HC_REG_VQID_0 0x108008
+#define HC_REG_VQID_1 0x10800c
+#define IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN (0x1<<1)
+#define IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE (0x1<<0)
+#define IGU_REG_ATTENTION_ACK_BITS 0x130108
+/* [R 4] Debug: attn_fsm */
+#define IGU_REG_ATTN_FSM 0x130054
+#define IGU_REG_ATTN_MSG_ADDR_H 0x13011c
+#define IGU_REG_ATTN_MSG_ADDR_L 0x130120
+/* [R 4] Debug: [3] - attention write done message is pending (0-no pending;
+ * 1-pending). [2:0] = PFID. Pending means attention message was sent; but
+ * write done didn't receive. */
+#define IGU_REG_ATTN_WRITE_DONE_PENDING 0x130030
+#define IGU_REG_BLOCK_CONFIGURATION 0x130000
+#define IGU_REG_COMMAND_REG_32LSB_DATA 0x130124
+#define IGU_REG_COMMAND_REG_CTRL 0x13012c
+/* [WB_R 32] Cleanup bit status per SB. 1 = cleanup is set. 0 = cleanup bit
+ * is clear. The bits in this registers are set and clear via the producer
+ * command. Data valid only in addresses 0-4. all the rest are zero. */
+#define IGU_REG_CSTORM_TYPE_0_SB_CLEANUP 0x130200
+/* [R 5] Debug: ctrl_fsm */
+#define IGU_REG_CTRL_FSM 0x130064
+/* [R 1] data available for error memory. If this bit is clear do not red
+ * from error_handling_memory. */
+#define IGU_REG_ERROR_HANDLING_DATA_VALID 0x130130
+/* [RW 11] Parity mask register #0 read/write */
+#define IGU_REG_IGU_PRTY_MASK 0x1300a8
+/* [R 11] Parity register #0 read */
+#define IGU_REG_IGU_PRTY_STS 0x13009c
+/* [RC 11] Parity register #0 read clear */
+#define IGU_REG_IGU_PRTY_STS_CLR 0x1300a0
+/* [R 4] Debug: int_handle_fsm */
+#define IGU_REG_INT_HANDLE_FSM 0x130050
+#define IGU_REG_LEADING_EDGE_LATCH 0x130134
+/* [RW 14] mapping CAM; relevant for E2 operating mode only. [0] - valid.
+ * [6:1] - vector number; [13:7] - FID (if VF - [13] = 0; [12:7] = VF
+ * number; if PF - [13] = 1; [12:10] = 0; [9:7] = PF number); */
+#define IGU_REG_MAPPING_MEMORY 0x131000
+#define IGU_REG_MAPPING_MEMORY_SIZE 136
+#define IGU_REG_PBA_STATUS_LSB 0x130138
+#define IGU_REG_PBA_STATUS_MSB 0x13013c
+#define IGU_REG_PCI_PF_MSI_EN 0x130140
+#define IGU_REG_PCI_PF_MSIX_EN 0x130144
+#define IGU_REG_PCI_PF_MSIX_FUNC_MASK 0x130148
+/* [WB_R 32] Each bit represent the pending bits status for that SB. 0 = no
+ * pending; 1 = pending. Pendings means interrupt was asserted; and write
+ * done was not received. Data valid only in addresses 0-4. all the rest are
+ * zero. */
+#define IGU_REG_PENDING_BITS_STATUS 0x130300
+#define IGU_REG_PF_CONFIGURATION 0x130154
+/* [RW 20] producers only. E2 mode: address 0-135 match to the mapping
+ * memory; 136 - PF0 default prod; 137 PF1 default prod; 138 - PF2 default
+ * prod; 139 PF3 default prod; 140 - PF0 - ATTN prod; 141 - PF1 - ATTN prod;
+ * 142 - PF2 - ATTN prod; 143 - PF3 - ATTN prod; 144-147 reserved. E1.5 mode
+ * - In backward compatible mode; for non default SB; each even line in the
+ * memory holds the U producer and each odd line hold the C producer. The
+ * first 128 producer are for NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The
+ * last 20 producers are for the DSB for each PF. each PF has five segments
+ * (the order inside each segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
+ * 132-135 C prods; 136-139 X prods; 140-143 T prods; 144-147 ATTN prods; */
+#define IGU_REG_PROD_CONS_MEMORY 0x132000
+/* [R 3] Debug: pxp_arb_fsm */
+#define IGU_REG_PXP_ARB_FSM 0x130068
+/* [RW 6] Write one for each bit will reset the appropriate memory. When the
+ * memory reset finished the appropriate bit will be clear. Bit 0 - mapping
+ * memory; Bit 1 - SB memory; Bit 2 - SB interrupt and mask register; Bit 3
+ * - MSIX memory; Bit 4 - PBA memory; Bit 5 - statistics; */
+#define IGU_REG_RESET_MEMORIES 0x130158
+/* [R 4] Debug: sb_ctrl_fsm */
+#define IGU_REG_SB_CTRL_FSM 0x13004c
+#define IGU_REG_SB_INT_BEFORE_MASK_LSB 0x13015c
+#define IGU_REG_SB_INT_BEFORE_MASK_MSB 0x130160
+#define IGU_REG_SB_MASK_LSB 0x130164
+#define IGU_REG_SB_MASK_MSB 0x130168
+/* [RW 16] Number of command that were dropped without causing an interrupt
+ * due to: read access for WO BAR address; or write access for RO BAR
+ * address or any access for reserved address or PCI function error is set
+ * and address is not MSIX; PBA or cleanup */
+#define IGU_REG_SILENT_DROP 0x13016c
+/* [RW 10] Number of MSI/MSIX/ATTN messages sent for the function: 0-63 -
+ * number of MSIX messages per VF; 64-67 - number of MSI/MSIX messages per
+ * PF; 68-71 number of ATTN messages per PF */
+#define IGU_REG_STATISTIC_NUM_MESSAGE_SENT 0x130800
+/* [RW 32] Number of cycles the timer mask masking the IGU interrupt when a
+ * timer mask command arrives. Value must be bigger than 100. */
+#define IGU_REG_TIMER_MASKING_VALUE 0x13003c
+#define IGU_REG_TRAILING_EDGE_LATCH 0x130104
+#define IGU_REG_VF_CONFIGURATION 0x130170
+/* [WB_R 32] Each bit represent write done pending bits status for that SB
+ * (MSI/MSIX message was sent and write done was not received yet). 0 =
+ * clear; 1 = set. Data valid only in addresses 0-4. all the rest are zero. */
+#define IGU_REG_WRITE_DONE_PENDING 0x130480
+#define MCP_A_REG_MCPR_SCRATCH 0x3a0000
+#define MCP_REG_MCPR_CPU_PROGRAM_COUNTER 0x8501c
+#define MCP_REG_MCPR_GP_INPUTS 0x800c0
+#define MCP_REG_MCPR_GP_OENABLE 0x800c8
+#define MCP_REG_MCPR_GP_OUTPUTS 0x800c4
+#define MCP_REG_MCPR_IMC_COMMAND 0x85900
+#define MCP_REG_MCPR_IMC_DATAREG0 0x85920
+#define MCP_REG_MCPR_IMC_SLAVE_CONTROL 0x85904
+#define MCP_REG_MCPR_CPU_PROGRAM_COUNTER 0x8501c
+#define MCP_REG_MCPR_NVM_ACCESS_ENABLE 0x86424
+#define MCP_REG_MCPR_NVM_ADDR 0x8640c
+#define MCP_REG_MCPR_NVM_CFG4 0x8642c
+#define MCP_REG_MCPR_NVM_COMMAND 0x86400
+#define MCP_REG_MCPR_NVM_READ 0x86410
+#define MCP_REG_MCPR_NVM_SW_ARB 0x86420
+#define MCP_REG_MCPR_NVM_WRITE 0x86408
+#define MCP_REG_MCPR_SCRATCH 0xa0000
+#define MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK (0x1<<1)
+#define MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK (0x1<<0)
+/* [R 32] read first 32 bit after inversion of function 0. mapped as
+ follows: [0] NIG attention for function0; [1] NIG attention for
+ function1; [2] GPIO1 mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp;
+ [6] GPIO1 function 1; [7] GPIO2 function 1; [8] GPIO3 function 1; [9]
+ GPIO4 function 1; [10] PCIE glue/PXP VPD event function0; [11] PCIE
+ glue/PXP VPD event function1; [12] PCIE glue/PXP Expansion ROM event0;
+ [13] PCIE glue/PXP Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16]
+ MSI/X indication for mcp; [17] MSI/X indication for function 1; [18] BRB
+ Parity error; [19] BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw
+ interrupt; [22] SRC Parity error; [23] SRC Hw interrupt; [24] TSDM Parity
+ error; [25] TSDM Hw interrupt; [26] TCM Parity error; [27] TCM Hw
+ interrupt; [28] TSEMI Parity error; [29] TSEMI Hw interrupt; [30] PBF
+ Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 0xa42c
+#define MISC_REG_AEU_AFTER_INVERT_1_FUNC_1 0xa430
+/* [R 32] read first 32 bit after inversion of mcp. mapped as follows: [0]
+ NIG attention for function0; [1] NIG attention for function1; [2] GPIO1
+ mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1;
+ [7] GPIO2 function 1; [8] GPIO3 function 1; [9] GPIO4 function 1; [10]
+ PCIE glue/PXP VPD event function0; [11] PCIE glue/PXP VPD event
+ function1; [12] PCIE glue/PXP Expansion ROM event0; [13] PCIE glue/PXP
+ Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] MSI/X indication for
+ mcp; [17] MSI/X indication for function 1; [18] BRB Parity error; [19]
+ BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC
+ Parity error; [23] SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw
+ interrupt; [26] TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI
+ Parity error; [29] TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw
+ interrupt; */
+#define MISC_REG_AEU_AFTER_INVERT_1_MCP 0xa434
+/* [R 32] read second 32 bit after inversion of function 0. mapped as
+ follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
+ Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
+ interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
+ error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
+ interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
+ NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
+ [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
+ interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
+ Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
+ Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
+ Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
+ interrupt; */
+#define MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 0xa438
+#define MISC_REG_AEU_AFTER_INVERT_2_FUNC_1 0xa43c
+/* [R 32] read second 32 bit after inversion of mcp. mapped as follows: [0]
+ PBClient Parity error; [1] PBClient Hw interrupt; [2] QM Parity error;
+ [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw interrupt;
+ [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity error; [9]
+ XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw interrupt; [12]
+ DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] NIG Parity
+ error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; [17] Vaux
+ PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw interrupt;
+ [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM Parity error;
+ [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI Hw interrupt;
+ [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM Parity error;
+ [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw interrupt; */
+#define MISC_REG_AEU_AFTER_INVERT_2_MCP 0xa440
+/* [R 32] read third 32 bit after inversion of function 0. mapped as
+ follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP Parity
+ error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error; [5]
+ PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
+ interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
+ error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
+ Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
+ pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
+ MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
+ SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
+ timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
+ func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
+ attn1; */
+#define MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 0xa444
+#define MISC_REG_AEU_AFTER_INVERT_3_FUNC_1 0xa448
+/* [R 32] read third 32 bit after inversion of mcp. mapped as follows: [0]
+ CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP Parity error; [3] PXP
+ Hw interrupt; [4] PXPpciClockClient Parity error; [5] PXPpciClockClient
+ Hw interrupt; [6] CFC Parity error; [7] CFC Hw interrupt; [8] CDU Parity
+ error; [9] CDU Hw interrupt; [10] DMAE Parity error; [11] DMAE Hw
+ interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) Hw interrupt; [14]
+ MISC Parity error; [15] MISC Hw interrupt; [16] pxp_misc_mps_attn; [17]
+ Flash event; [18] SMB event; [19] MCP attn0; [20] MCP attn1; [21] SW
+ timers attn_1 func0; [22] SW timers attn_2 func0; [23] SW timers attn_3
+ func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW timers attn_1
+ func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 func1; [29] SW
+ timers attn_4 func1; [30] General attn0; [31] General attn1; */
+#define MISC_REG_AEU_AFTER_INVERT_3_MCP 0xa44c
+/* [R 32] read fourth 32 bit after inversion of function 0. mapped as
+ follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
+ General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
+ [7] General attn9; [8] General attn10; [9] General attn11; [10] General
+ attn12; [11] General attn13; [12] General attn14; [13] General attn15;
+ [14] General attn16; [15] General attn17; [16] General attn18; [17]
+ General attn19; [18] General attn20; [19] General attn21; [20] Main power
+ interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
+ Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
+ Latched timeout attention; [27] GRC Latched reserved access attention;
+ [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
+ Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 0xa450
+#define MISC_REG_AEU_AFTER_INVERT_4_FUNC_1 0xa454
+/* [R 32] read fourth 32 bit after inversion of mcp. mapped as follows: [0]
+ General attn2; [1] General attn3; [2] General attn4; [3] General attn5;
+ [4] General attn6; [5] General attn7; [6] General attn8; [7] General
+ attn9; [8] General attn10; [9] General attn11; [10] General attn12; [11]
+ General attn13; [12] General attn14; [13] General attn15; [14] General
+ attn16; [15] General attn17; [16] General attn18; [17] General attn19;
+ [18] General attn20; [19] General attn21; [20] Main power interrupt; [21]
+ RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN Latched attn; [24]
+ RBCU Latched attn; [25] RBCP Latched attn; [26] GRC Latched timeout
+ attention; [27] GRC Latched reserved access attention; [28] MCP Latched
+ rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP Latched
+ ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_AFTER_INVERT_4_MCP 0xa458
+/* [R 32] Read fifth 32 bit after inversion of function 0. Mapped as
+ * follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
+ * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
+ * CNIG attention (reserved); [7] CNIG parity (reserved); [31-8] Reserved; */
+#define MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 0xa700
+/* [W 14] write to this register results with the clear of the latched
+ signals; one in d0 clears RBCR latch; one in d1 clears RBCT latch; one in
+ d2 clears RBCN latch; one in d3 clears RBCU latch; one in d4 clears RBCP
+ latch; one in d5 clears GRC Latched timeout attention; one in d6 clears
+ GRC Latched reserved access attention; one in d7 clears Latched
+ rom_parity; one in d8 clears Latched ump_rx_parity; one in d9 clears
+ Latched ump_tx_parity; one in d10 clears Latched scpad_parity (both
+ ports); one in d11 clears pxpv_misc_mps_attn; one in d12 clears
+ pxp_misc_exp_rom_attn0; one in d13 clears pxp_misc_exp_rom_attn1; read
+ from this register return zero */
+#define MISC_REG_AEU_CLR_LATCH_SIGNAL 0xa45c
+/* [RW 32] first 32b for enabling the output for function 0 output0. mapped
+ as follows: [0] NIG attention for function0; [1] NIG attention for
+ function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function
+ 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
+ GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
+ function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
+ Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
+ SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X
+ indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
+ [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
+ SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
+ TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
+ TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0 0xa06c
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1 0xa07c
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2 0xa08c
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_3 0xa09c
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_5 0xa0bc
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_6 0xa0cc
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_7 0xa0dc
+/* [RW 32] first 32b for enabling the output for function 1 output0. mapped
+ as follows: [0] NIG attention for function0; [1] NIG attention for
+ function1; [2] GPIO1 function 1; [3] GPIO2 function 1; [4] GPIO3 function
+ 1; [5] GPIO4 function 1; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
+ GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
+ function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
+ Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
+ SPIO4; [15] SPIO5; [16] MSI/X indication for function 1; [17] MSI/X
+ indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
+ [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
+ SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
+ TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
+ TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 0xa10c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 0xa11c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 0xa12c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_3 0xa13c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_5 0xa15c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_6 0xa16c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_7 0xa17c
+/* [RW 32] first 32b for enabling the output for close the gate nig. mapped
+ as follows: [0] NIG attention for function0; [1] NIG attention for
+ function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function
+ 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
+ GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
+ function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
+ Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
+ SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X
+ indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
+ [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
+ SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
+ TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
+ TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_ENABLE1_NIG_0 0xa0ec
+#define MISC_REG_AEU_ENABLE1_NIG_1 0xa18c
+/* [RW 32] first 32b for enabling the output for close the gate pxp. mapped
+ as follows: [0] NIG attention for function0; [1] NIG attention for
+ function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function
+ 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
+ GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
+ function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
+ Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
+ SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X
+ indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
+ [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
+ SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
+ TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
+ TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_ENABLE1_PXP_0 0xa0fc
+#define MISC_REG_AEU_ENABLE1_PXP_1 0xa19c
+/* [RW 32] second 32b for enabling the output for function 0 output0. mapped
+ as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
+ Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
+ interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
+ error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
+ interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
+ NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
+ [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
+ interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
+ Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
+ Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
+ Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
+ interrupt; */
+#define MISC_REG_AEU_ENABLE2_FUNC_0_OUT_0 0xa070
+#define MISC_REG_AEU_ENABLE2_FUNC_0_OUT_1 0xa080
+/* [RW 32] second 32b for enabling the output for function 1 output0. mapped
+ as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
+ Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
+ interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
+ error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
+ interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
+ NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
+ [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
+ interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
+ Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
+ Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
+ Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
+ interrupt; */
+#define MISC_REG_AEU_ENABLE2_FUNC_1_OUT_0 0xa110
+#define MISC_REG_AEU_ENABLE2_FUNC_1_OUT_1 0xa120
+/* [RW 32] second 32b for enabling the output for close the gate nig. mapped
+ as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
+ Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
+ interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
+ error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
+ interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
+ NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
+ [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
+ interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
+ Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
+ Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
+ Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
+ interrupt; */
+#define MISC_REG_AEU_ENABLE2_NIG_0 0xa0f0
+#define MISC_REG_AEU_ENABLE2_NIG_1 0xa190
+/* [RW 32] second 32b for enabling the output for close the gate pxp. mapped
+ as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
+ Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
+ interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
+ error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
+ interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
+ NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
+ [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
+ interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
+ Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
+ Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
+ Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
+ interrupt; */
+#define MISC_REG_AEU_ENABLE2_PXP_0 0xa100
+#define MISC_REG_AEU_ENABLE2_PXP_1 0xa1a0
+/* [RW 32] third 32b for enabling the output for function 0 output0. mapped
+ as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
+ Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
+ [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
+ interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
+ error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
+ Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
+ pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
+ MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
+ SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
+ timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
+ func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
+ attn1; */
+#define MISC_REG_AEU_ENABLE3_FUNC_0_OUT_0 0xa074
+#define MISC_REG_AEU_ENABLE3_FUNC_0_OUT_1 0xa084
+/* [RW 32] third 32b for enabling the output for function 1 output0. mapped
+ as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
+ Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
+ [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
+ interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
+ error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
+ Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
+ pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
+ MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
+ SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
+ timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
+ func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
+ attn1; */
+#define MISC_REG_AEU_ENABLE3_FUNC_1_OUT_0 0xa114
+#define MISC_REG_AEU_ENABLE3_FUNC_1_OUT_1 0xa124
+/* [RW 32] third 32b for enabling the output for close the gate nig. mapped
+ as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
+ Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
+ [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
+ interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
+ error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
+ Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
+ pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
+ MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
+ SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
+ timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
+ func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
+ attn1; */
+#define MISC_REG_AEU_ENABLE3_NIG_0 0xa0f4
+#define MISC_REG_AEU_ENABLE3_NIG_1 0xa194
+/* [RW 32] third 32b for enabling the output for close the gate pxp. mapped
+ as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
+ Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
+ [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
+ interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
+ error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
+ Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
+ pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
+ MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
+ SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
+ timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
+ func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
+ attn1; */
+#define MISC_REG_AEU_ENABLE3_PXP_0 0xa104
+#define MISC_REG_AEU_ENABLE3_PXP_1 0xa1a4
+/* [RW 32] fourth 32b for enabling the output for function 0 output0.mapped
+ as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
+ General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
+ [7] General attn9; [8] General attn10; [9] General attn11; [10] General
+ attn12; [11] General attn13; [12] General attn14; [13] General attn15;
+ [14] General attn16; [15] General attn17; [16] General attn18; [17]
+ General attn19; [18] General attn20; [19] General attn21; [20] Main power
+ interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
+ Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
+ Latched timeout attention; [27] GRC Latched reserved access attention;
+ [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
+ Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 0xa078
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_2 0xa098
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_4 0xa0b8
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_5 0xa0c8
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_6 0xa0d8
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_7 0xa0e8
+/* [RW 32] fourth 32b for enabling the output for function 1 output0.mapped
+ as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
+ General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
+ [7] General attn9; [8] General attn10; [9] General attn11; [10] General
+ attn12; [11] General attn13; [12] General attn14; [13] General attn15;
+ [14] General attn16; [15] General attn17; [16] General attn18; [17]
+ General attn19; [18] General attn20; [19] General attn21; [20] Main power
+ interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
+ Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
+ Latched timeout attention; [27] GRC Latched reserved access attention;
+ [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
+ Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0 0xa118
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_2 0xa138
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_4 0xa158
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_5 0xa168
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_6 0xa178
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_7 0xa188
+/* [RW 32] fourth 32b for enabling the output for close the gate nig.mapped
+ as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
+ General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
+ [7] General attn9; [8] General attn10; [9] General attn11; [10] General
+ attn12; [11] General attn13; [12] General attn14; [13] General attn15;
+ [14] General attn16; [15] General attn17; [16] General attn18; [17]
+ General attn19; [18] General attn20; [19] General attn21; [20] Main power
+ interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
+ Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
+ Latched timeout attention; [27] GRC Latched reserved access attention;
+ [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
+ Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_ENABLE4_NIG_0 0xa0f8
+#define MISC_REG_AEU_ENABLE4_NIG_1 0xa198
+/* [RW 32] fourth 32b for enabling the output for close the gate pxp.mapped
+ as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
+ General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
+ [7] General attn9; [8] General attn10; [9] General attn11; [10] General
+ attn12; [11] General attn13; [12] General attn14; [13] General attn15;
+ [14] General attn16; [15] General attn17; [16] General attn18; [17]
+ General attn19; [18] General attn20; [19] General attn21; [20] Main power
+ interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
+ Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
+ Latched timeout attention; [27] GRC Latched reserved access attention;
+ [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
+ Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_ENABLE4_PXP_0 0xa108
+#define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8
+/* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu
+ 128 bit vector */
+#define MISC_REG_AEU_GENERAL_ATTN_0 0xa000
+#define MISC_REG_AEU_GENERAL_ATTN_1 0xa004
+#define MISC_REG_AEU_GENERAL_ATTN_10 0xa028
+#define MISC_REG_AEU_GENERAL_ATTN_11 0xa02c
+#define MISC_REG_AEU_GENERAL_ATTN_12 0xa030
+#define MISC_REG_AEU_GENERAL_ATTN_2 0xa008
+#define MISC_REG_AEU_GENERAL_ATTN_3 0xa00c
+#define MISC_REG_AEU_GENERAL_ATTN_4 0xa010
+#define MISC_REG_AEU_GENERAL_ATTN_5 0xa014
+#define MISC_REG_AEU_GENERAL_ATTN_6 0xa018
+#define MISC_REG_AEU_GENERAL_ATTN_7 0xa01c
+#define MISC_REG_AEU_GENERAL_ATTN_8 0xa020
+#define MISC_REG_AEU_GENERAL_ATTN_9 0xa024
+#define MISC_REG_AEU_GENERAL_MASK 0xa61c
+/* [RW 32] first 32b for inverting the input for function 0; for each bit:
+ 0= do not invert; 1= invert; mapped as follows: [0] NIG attention for
+ function0; [1] NIG attention for function1; [2] GPIO1 mcp; [3] GPIO2 mcp;
+ [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1; [7] GPIO2 function 1;
+ [8] GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
+ function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
+ Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
+ SPIO4; [15] SPIO5; [16] MSI/X indication for mcp; [17] MSI/X indication
+ for function 1; [18] BRB Parity error; [19] BRB Hw interrupt; [20] PRS
+ Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23] SRC Hw
+ interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26] TCM
+ Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29] TSEMI
+ Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_INVERTER_1_FUNC_0 0xa22c
+#define MISC_REG_AEU_INVERTER_1_FUNC_1 0xa23c
+/* [RW 32] second 32b for inverting the input for function 0; for each bit:
+ 0= do not invert; 1= invert. mapped as follows: [0] PBClient Parity
+ error; [1] PBClient Hw interrupt; [2] QM Parity error; [3] QM Hw
+ interrupt; [4] Timers Parity error; [5] Timers Hw interrupt; [6] XSDM
+ Parity error; [7] XSDM Hw interrupt; [8] XCM Parity error; [9] XCM Hw
+ interrupt; [10] XSEMI Parity error; [11] XSEMI Hw interrupt; [12]
+ DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] NIG Parity
+ error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; [17] Vaux
+ PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw interrupt;
+ [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM Parity error;
+ [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI Hw interrupt;
+ [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM Parity error;
+ [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw interrupt; */
+#define MISC_REG_AEU_INVERTER_2_FUNC_0 0xa230
+#define MISC_REG_AEU_INVERTER_2_FUNC_1 0xa240
+/* [RW 10] [7:0] = mask 8 attention output signals toward IGU function0;
+ [9:8] = raserved. Zero = mask; one = unmask */
+#define MISC_REG_AEU_MASK_ATTN_FUNC_0 0xa060
+#define MISC_REG_AEU_MASK_ATTN_FUNC_1 0xa064
+/* [RW 1] If set a system kill occurred */
+#define MISC_REG_AEU_SYS_KILL_OCCURRED 0xa610
+/* [RW 32] Represent the status of the input vector to the AEU when a system
+ kill occurred. The register is reset in por reset. Mapped as follows: [0]
+ NIG attention for function0; [1] NIG attention for function1; [2] GPIO1
+ mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1;
+ [7] GPIO2 function 1; [8] GPIO3 function 1; [9] GPIO4 function 1; [10]
+ PCIE glue/PXP VPD event function0; [11] PCIE glue/PXP VPD event
+ function1; [12] PCIE glue/PXP Expansion ROM event0; [13] PCIE glue/PXP
+ Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] MSI/X indication for
+ mcp; [17] MSI/X indication for function 1; [18] BRB Parity error; [19]
+ BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC
+ Parity error; [23] SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw
+ interrupt; [26] TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI
+ Parity error; [29] TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw
+ interrupt; */
+#define MISC_REG_AEU_SYS_KILL_STATUS_0 0xa600
+#define MISC_REG_AEU_SYS_KILL_STATUS_1 0xa604
+#define MISC_REG_AEU_SYS_KILL_STATUS_2 0xa608
+#define MISC_REG_AEU_SYS_KILL_STATUS_3 0xa60c
+/* [R 4] This field indicates the type of the device. '0' - 2 Ports; '1' - 1
+ Port. */
+#define MISC_REG_BOND_ID 0xa400
+/* [R 8] These bits indicate the metal revision of the chip. This value
+ starts at 0x00 for each all-layer tape-out and increments by one for each
+ tape-out. */
+#define MISC_REG_CHIP_METAL 0xa404
+/* [R 16] These bits indicate the part number for the chip. */
+#define MISC_REG_CHIP_NUM 0xa408
+/* [R 4] These bits indicate the base revision of the chip. This value
+ starts at 0x0 for the A0 tape-out and increments by one for each
+ all-layer tape-out. */
+#define MISC_REG_CHIP_REV 0xa40c
+/* [RW 32] The following driver registers(1...16) represent 16 drivers and
+ 32 clients. Each client can be controlled by one driver only. One in each
+ bit represent that this driver control the appropriate client (Ex: bit 5
+ is set means this driver control client number 5). addr1 = set; addr0 =
+ clear; read from both addresses will give the same result = status. write
+ to address 1 will set a request to control all the clients that their
+ appropriate bit (in the write command) is set. if the client is free (the
+ appropriate bit in all the other drivers is clear) one will be written to
+ that driver register; if the client isn't free the bit will remain zero.
+ if the appropriate bit is set (the driver request to gain control on a
+ client it already controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW
+ interrupt will be asserted). write to address 0 will set a request to
+ free all the clients that their appropriate bit (in the write command) is
+ set. if the appropriate bit is clear (the driver request to free a client
+ it doesn't controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW interrupt will
+ be asserted). */
+#define MISC_REG_DRIVER_CONTROL_1 0xa510
+#define MISC_REG_DRIVER_CONTROL_7 0xa3c8
+/* [RW 1] e1hmf for WOL. If clr WOL signal o the PXP will be send on bit 0
+ only. */
+#define MISC_REG_E1HMF_MODE 0xa5f8
+/* [R 1] Status of four port mode path swap input pin. */
+#define MISC_REG_FOUR_PORT_PATH_SWAP 0xa75c
+/* [RW 2] 4 port path swap overwrite.[0] - Overwrite control; if it is 0 -
+ the path_swap output is equal to 4 port mode path swap input pin; if it
+ is 1 - the path_swap output is equal to bit[1] of this register; [1] -
+ Overwrite value. If bit[0] of this register is 1 this is the value that
+ receives the path_swap output. Reset on Hard reset. */
+#define MISC_REG_FOUR_PORT_PATH_SWAP_OVWR 0xa738
+/* [R 1] Status of 4 port mode port swap input pin. */
+#define MISC_REG_FOUR_PORT_PORT_SWAP 0xa754
+/* [RW 2] 4 port port swap overwrite.[0] - Overwrite control; if it is 0 -
+ the port_swap output is equal to 4 port mode port swap input pin; if it
+ is 1 - the port_swap output is equal to bit[1] of this register; [1] -
+ Overwrite value. If bit[0] of this register is 1 this is the value that
+ receives the port_swap output. Reset on Hard reset. */
+#define MISC_REG_FOUR_PORT_PORT_SWAP_OVWR 0xa734
+/* [RW 32] Debug only: spare RW register reset by core reset */
+#define MISC_REG_GENERIC_CR_0 0xa460
+#define MISC_REG_GENERIC_CR_1 0xa464
+/* [RW 32] Debug only: spare RW register reset by por reset */
+#define MISC_REG_GENERIC_POR_1 0xa474
+/* [RW 32] Bit[0]: EPIO MODE SEL: Setting this bit to 1 will allow SW/FW to
+ use all of the 32 Extended GPIO pins. Without setting this bit; an EPIO
+ can not be configured as an output. Each output has its output enable in
+ the MCP register space; but this bit needs to be set to make use of that.
+ Bit[3:1] spare. Bit[4]: WCVTMON_PWRDN: Powerdown for Warpcore VTMON. When
+ set to 1 - Powerdown. Bit[5]: WCVTMON_RESETB: Reset for Warpcore VTMON.
+ When set to 0 - vTMON is in reset. Bit[6]: setting this bit will change
+ the i/o to an output and will drive the TimeSync output. Bit[31:7]:
+ spare. Global register. Reset by hard reset. */
+#define MISC_REG_GEN_PURP_HWG 0xa9a0
+/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of
+ these bits is written as a '1'; the corresponding SPIO bit will turn off
+ it's drivers and become an input. This is the reset state of all GPIO
+ pins. The read value of these bits will be a '1' if that last command
+ (#SET; #CLR; or #FLOAT) for this bit was a #FLOAT. (reset value 0xff).
+ [23-20] CLR port 1; 19-16] CLR port 0; When any of these bits is written
+ as a '1'; the corresponding GPIO bit will drive low. The read value of
+ these bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for
+ this bit was a #CLR. (reset value 0). [15-12] SET port 1; 11-8] port 0;
+ SET When any of these bits is written as a '1'; the corresponding GPIO
+ bit will drive high (if it has that capability). The read value of these
+ bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for this
+ bit was a #SET. (reset value 0). [7-4] VALUE port 1; [3-0] VALUE port 0;
+ RO; These bits indicate the read value of each of the eight GPIO pins.
+ This is the result value of the pin; not the drive value. Writing these
+ bits will have not effect. */
+#define MISC_REG_GPIO 0xa490
+/* [RW 8] These bits enable the GPIO_INTs to signals event to the
+ IGU/MCP.according to the following map: [0] p0_gpio_0; [1] p0_gpio_1; [2]
+ p0_gpio_2; [3] p0_gpio_3; [4] p1_gpio_0; [5] p1_gpio_1; [6] p1_gpio_2;
+ [7] p1_gpio_3; */
+#define MISC_REG_GPIO_EVENT_EN 0xa2bc
+/* [RW 32] GPIO INT. [31-28] OLD_CLR port1; [27-24] OLD_CLR port0; Writing a
+ '1' to these bit clears the corresponding bit in the #OLD_VALUE register.
+ This will acknowledge an interrupt on the falling edge of corresponding
+ GPIO input (reset value 0). [23-16] OLD_SET [23-16] port1; OLD_SET port0;
+ Writing a '1' to these bit sets the corresponding bit in the #OLD_VALUE
+ register. This will acknowledge an interrupt on the rising edge of
+ corresponding SPIO input (reset value 0). [15-12] OLD_VALUE [11-8] port1;
+ OLD_VALUE port0; RO; These bits indicate the old value of the GPIO input
+ value. When the ~INT_STATE bit is set; this bit indicates the OLD value
+ of the pin such that if ~INT_STATE is set and this bit is '0'; then the
+ interrupt is due to a low to high edge. If ~INT_STATE is set and this bit
+ is '1'; then the interrupt is due to a high to low edge (reset value 0).
+ [7-4] INT_STATE port1; [3-0] INT_STATE RO port0; These bits indicate the
+ current GPIO interrupt state for each GPIO pin. This bit is cleared when
+ the appropriate #OLD_SET or #OLD_CLR command bit is written. This bit is
+ set when the GPIO input does not match the current value in #OLD_VALUE
+ (reset value 0). */
+#define MISC_REG_GPIO_INT 0xa494
+/* [R 28] this field hold the last information that caused reserved
+ attention. bits [19:0] - address; [22:20] function; [23] reserved;
+ [27:24] the master that caused the attention - according to the following
+ encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
+ dbu; 8 = dmae */
+#define MISC_REG_GRC_RSV_ATTN 0xa3c0
+/* [R 28] this field hold the last information that caused timeout
+ attention. bits [19:0] - address; [22:20] function; [23] reserved;
+ [27:24] the master that caused the attention - according to the following
+ encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
+ dbu; 8 = dmae */
+#define MISC_REG_GRC_TIMEOUT_ATTN 0xa3c4
+/* [RW 1] Setting this bit enables a timer in the GRC block to timeout any
+ access that does not finish within
+ ~misc_registers_grc_timout_val.grc_timeout_val cycles. When this bit is
+ cleared; this timeout is disabled. If this timeout occurs; the GRC shall
+ assert it attention output. */
+#define MISC_REG_GRC_TIMEOUT_EN 0xa280
+/* [RW 28] 28 LSB of LCPLL first register; reset val = 521. inside order of
+ the bits is: [2:0] OAC reset value 001) CML output buffer bias control;
+ 111 for +40%; 011 for +20%; 001 for 0%; 000 for -20%. [5:3] Icp_ctrl
+ (reset value 001) Charge pump current control; 111 for 720u; 011 for
+ 600u; 001 for 480u and 000 for 360u. [7:6] Bias_ctrl (reset value 00)
+ Global bias control; When bit 7 is high bias current will be 10 0gh; When
+ bit 6 is high bias will be 100w; Valid values are 00; 10; 01. [10:8]
+ Pll_observe (reset value 010) Bits to control observability. bit 10 is
+ for test bias; bit 9 is for test CK; bit 8 is test Vc. [12:11] Vth_ctrl
+ (reset value 00) Comparator threshold control. 00 for 0.6V; 01 for 0.54V
+ and 10 for 0.66V. [13] pllSeqStart (reset value 0) Enables VCO tuning
+ sequencer: 1= sequencer disabled; 0= sequencer enabled (inverted
+ internally). [14] reserved (reset value 0) Reset for VCO sequencer is
+ connected to RESET input directly. [15] capRetry_en (reset value 0)
+ enable retry on cap search failure (inverted). [16] freqMonitor_e (reset
+ value 0) bit to continuously monitor vco freq (inverted). [17]
+ freqDetRestart_en (reset value 0) bit to enable restart when not freq
+ locked (inverted). [18] freqDetRetry_en (reset value 0) bit to enable
+ retry on freq det failure(inverted). [19] pllForceFdone_en (reset value
+ 0) bit to enable pllForceFdone & pllForceFpass into pllSeq. [20]
+ pllForceFdone (reset value 0) bit to force freqDone. [21] pllForceFpass
+ (reset value 0) bit to force freqPass. [22] pllForceDone_en (reset value
+ 0) bit to enable pllForceCapDone. [23] pllForceCapDone (reset value 0)
+ bit to force capDone. [24] pllForceCapPass_en (reset value 0) bit to
+ enable pllForceCapPass. [25] pllForceCapPass (reset value 0) bit to force
+ capPass. [26] capRestart (reset value 0) bit to force cap sequencer to
+ restart. [27] capSelectM_en (reset value 0) bit to enable cap select
+ register bits. */
+#define MISC_REG_LCPLL_CTRL_1 0xa2a4
+#define MISC_REG_LCPLL_CTRL_REG_2 0xa2a8
+/* [RW 4] Interrupt mask register #0 read/write */
+#define MISC_REG_MISC_INT_MASK 0xa388
+/* [RW 1] Parity mask register #0 read/write */
+#define MISC_REG_MISC_PRTY_MASK 0xa398
+/* [R 1] Parity register #0 read */
+#define MISC_REG_MISC_PRTY_STS 0xa38c
+/* [RC 1] Parity register #0 read clear */
+#define MISC_REG_MISC_PRTY_STS_CLR 0xa390
+#define MISC_REG_NIG_WOL_P0 0xa270
+#define MISC_REG_NIG_WOL_P1 0xa274
+/* [R 1] If set indicate that the pcie_rst_b was asserted without perst
+ assertion */
+#define MISC_REG_PCIE_HOT_RESET 0xa618
+/* [RW 32] 32 LSB of storm PLL first register; reset val = 0x 071d2911.
+ inside order of the bits is: [0] P1 divider[0] (reset value 1); [1] P1
+ divider[1] (reset value 0); [2] P1 divider[2] (reset value 0); [3] P1
+ divider[3] (reset value 0); [4] P2 divider[0] (reset value 1); [5] P2
+ divider[1] (reset value 0); [6] P2 divider[2] (reset value 0); [7] P2
+ divider[3] (reset value 0); [8] ph_det_dis (reset value 1); [9]
+ freq_det_dis (reset value 0); [10] Icpx[0] (reset value 0); [11] Icpx[1]
+ (reset value 1); [12] Icpx[2] (reset value 0); [13] Icpx[3] (reset value
+ 1); [14] Icpx[4] (reset value 0); [15] Icpx[5] (reset value 0); [16]
+ Rx[0] (reset value 1); [17] Rx[1] (reset value 0); [18] vc_en (reset
+ value 1); [19] vco_rng[0] (reset value 1); [20] vco_rng[1] (reset value
+ 1); [21] Kvco_xf[0] (reset value 0); [22] Kvco_xf[1] (reset value 0);
+ [23] Kvco_xf[2] (reset value 0); [24] Kvco_xs[0] (reset value 1); [25]
+ Kvco_xs[1] (reset value 1); [26] Kvco_xs[2] (reset value 1); [27]
+ testd_en (reset value 0); [28] testd_sel[0] (reset value 0); [29]
+ testd_sel[1] (reset value 0); [30] testd_sel[2] (reset value 0); [31]
+ testa_en (reset value 0); */
+#define MISC_REG_PLL_STORM_CTRL_1 0xa294
+#define MISC_REG_PLL_STORM_CTRL_2 0xa298
+#define MISC_REG_PLL_STORM_CTRL_3 0xa29c
+#define MISC_REG_PLL_STORM_CTRL_4 0xa2a0
+/* [R 1] Status of 4 port mode enable input pin. */
+#define MISC_REG_PORT4MODE_EN 0xa750
+/* [RW 2] 4 port mode enable overwrite.[0] - Overwrite control; if it is 0 -
+ * the port4mode_en output is equal to 4 port mode input pin; if it is 1 -
+ * the port4mode_en output is equal to bit[1] of this register; [1] -
+ * Overwrite value. If bit[0] of this register is 1 this is the value that
+ * receives the port4mode_en output . */
+#define MISC_REG_PORT4MODE_EN_OVWR 0xa720
+/* [RW 32] reset reg#2; rite/read one = the specific block is out of reset;
+ write/read zero = the specific block is in reset; addr 0-wr- the write
+ value will be written to the register; addr 1-set - one will be written
+ to all the bits that have the value of one in the data written (bits that
+ have the value of zero will not be change) ; addr 2-clear - zero will be
+ written to all the bits that have the value of one in the data written
+ (bits that have the value of zero will not be change); addr 3-ignore;
+ read ignore from all addr except addr 00; inside order of the bits is:
+ [0] rst_bmac0; [1] rst_bmac1; [2] rst_emac0; [3] rst_emac1; [4] rst_grc;
+ [5] rst_mcp_n_reset_reg_hard_core; [6] rst_ mcp_n_hard_core_rst_b; [7]
+ rst_ mcp_n_reset_cmn_cpu; [8] rst_ mcp_n_reset_cmn_core; [9] rst_rbcn;
+ [10] rst_dbg; [11] rst_misc_core; [12] rst_dbue (UART); [13]
+ Pci_resetmdio_n; [14] rst_emac0_hard_core; [15] rst_emac1_hard_core; 16]
+ rst_pxp_rq_rd_wr; 31:17] reserved */
+#define MISC_REG_RESET_REG_2 0xa590
+/* [RW 20] 20 bit GRC address where the scratch-pad of the MCP that is
+ shared with the driver resides */
+#define MISC_REG_SHARED_MEM_ADDR 0xa2b4
+/* [RW 32] SPIO. [31-24] FLOAT When any of these bits is written as a '1';
+ the corresponding SPIO bit will turn off it's drivers and become an
+ input. This is the reset state of all SPIO pins. The read value of these
+ bits will be a '1' if that last command (#SET; #CL; or #FLOAT) for this
+ bit was a #FLOAT. (reset value 0xff). [23-16] CLR When any of these bits
+ is written as a '1'; the corresponding SPIO bit will drive low. The read
+ value of these bits will be a '1' if that last command (#SET; #CLR; or
+#FLOAT) for this bit was a #CLR. (reset value 0). [15-8] SET When any of
+ these bits is written as a '1'; the corresponding SPIO bit will drive
+ high (if it has that capability). The read value of these bits will be a
+ '1' if that last command (#SET; #CLR; or #FLOAT) for this bit was a #SET.
+ (reset value 0). [7-0] VALUE RO; These bits indicate the read value of
+ each of the eight SPIO pins. This is the result value of the pin; not the
+ drive value. Writing these bits will have not effect. Each 8 bits field
+ is divided as follows: [0] VAUX Enable; when pulsed low; enables supply
+ from VAUX. (This is an output pin only; the FLOAT field is not applicable
+ for this pin); [1] VAUX Disable; when pulsed low; disables supply form
+ VAUX. (This is an output pin only; FLOAT field is not applicable for this
+ pin); [2] SEL_VAUX_B - Control to power switching logic. Drive low to
+ select VAUX supply. (This is an output pin only; it is not controlled by
+ the SET and CLR fields; it is controlled by the Main Power SM; the FLOAT
+ field is not applicable for this pin; only the VALUE fields is relevant -
+ it reflects the output value); [3] port swap [4] spio_4; [5] spio_5; [6]
+ Bit 0 of UMP device ID select; read by UMP firmware; [7] Bit 1 of UMP
+ device ID select; read by UMP firmware. */
+#define MISC_REG_SPIO 0xa4fc
+/* [RW 8] These bits enable the SPIO_INTs to signals event to the IGU/MC.
+ according to the following map: [3:0] reserved; [4] spio_4 [5] spio_5;
+ [7:0] reserved */
+#define MISC_REG_SPIO_EVENT_EN 0xa2b8
+/* [RW 32] SPIO INT. [31-24] OLD_CLR Writing a '1' to these bit clears the
+ corresponding bit in the #OLD_VALUE register. This will acknowledge an
+ interrupt on the falling edge of corresponding SPIO input (reset value
+ 0). [23-16] OLD_SET Writing a '1' to these bit sets the corresponding bit
+ in the #OLD_VALUE register. This will acknowledge an interrupt on the
+ rising edge of corresponding SPIO input (reset value 0). [15-8] OLD_VALUE
+ RO; These bits indicate the old value of the SPIO input value. When the
+ ~INT_STATE bit is set; this bit indicates the OLD value of the pin such
+ that if ~INT_STATE is set and this bit is '0'; then the interrupt is due
+ to a low to high edge. If ~INT_STATE is set and this bit is '1'; then the
+ interrupt is due to a high to low edge (reset value 0). [7-0] INT_STATE
+ RO; These bits indicate the current SPIO interrupt state for each SPIO
+ pin. This bit is cleared when the appropriate #OLD_SET or #OLD_CLR
+ command bit is written. This bit is set when the SPIO input does not
+ match the current value in #OLD_VALUE (reset value 0). */
+#define MISC_REG_SPIO_INT 0xa500
+/* [RW 32] reload value for counter 4 if reload; the value will be reload if
+ the counter reached zero and the reload bit
+ (~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */
+#define MISC_REG_SW_TIMER_RELOAD_VAL_4 0xa2fc
+/* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses
+ in this register. address 0 - timer 1; address 1 - timer 2, ... address 7 -
+ timer 8 */
+#define MISC_REG_SW_TIMER_VAL 0xa5c0
+/* [R 1] Status of two port mode path swap input pin. */
+#define MISC_REG_TWO_PORT_PATH_SWAP 0xa758
+/* [RW 2] 2 port swap overwrite.[0] - Overwrite control; if it is 0 - the
+ path_swap output is equal to 2 port mode path swap input pin; if it is 1
+ - the path_swap output is equal to bit[1] of this register; [1] -
+ Overwrite value. If bit[0] of this register is 1 this is the value that
+ receives the path_swap output. Reset on Hard reset. */
+#define MISC_REG_TWO_PORT_PATH_SWAP_OVWR 0xa72c
+/* [RW 1] Set by the MCP to remember if one or more of the drivers is/are
+ loaded; 0-prepare; -unprepare */
+#define MISC_REG_UNPREPARED 0xa424
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST (0x1<<0)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST (0x1<<1)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3)
+/* [RW 5] MDIO PHY Address. The WC uses this address to determine whether or
+ * not it is the recipient of the message on the MDIO interface. The value
+ * is compared to the value on ctrl_md_devad. Drives output
+ * misc_xgxs0_phy_addr. Global register. */
+#define MISC_REG_WC0_CTRL_PHY_ADDR 0xa9cc
+/* [RW 2] XMAC Core port mode. Indicates the number of ports on the system
+ side. This should be less than or equal to phy_port_mode; if some of the
+ ports are not used. This enables reduction of frequency on the core side.
+ This is a strap input for the XMAC_MP core. 00 - Single Port Mode; 01 -
+ Dual Port Mode; 10 - Tri Port Mode; 11 - Quad Port Mode. This is a strap
+ input for the XMAC_MP core; and should be changed only while reset is
+ held low. Reset on Hard reset. */
+#define MISC_REG_XMAC_CORE_PORT_MODE 0xa964
+/* [RW 2] XMAC PHY port mode. Indicates the number of ports on the Warp
+ Core. This is a strap input for the XMAC_MP core. 00 - Single Port Mode;
+ 01 - Dual Port Mode; 1x - Quad Port Mode; This is a strap input for the
+ XMAC_MP core; and should be changed only while reset is held low. Reset
+ on Hard reset. */
+#define MISC_REG_XMAC_PHY_PORT_MODE 0xa960
+/* [RW 32] 1 [47] Packet Size = 64 Write to this register write bits 31:0.
+ * Reads from this register will clear bits 31:0. */
+#define MSTAT_REG_RX_STAT_GR64_LO 0x200
+/* [RW 32] 1 [00] Tx Good Packet Count Write to this register write bits
+ * 31:0. Reads from this register will clear bits 31:0. */
+#define MSTAT_REG_TX_STAT_GTXPOK_LO 0
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST (0x1<<0)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST (0x1<<1)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3)
+#define NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN (0x1<<0)
+#define NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN (0x1<<0)
+#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT (0x1<<0)
+#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS (0x1<<9)
+#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G (0x1<<15)
+#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS (0xf<<18)
+/* [RW 1] Input enable for RX_BMAC0 IF */
+#define NIG_REG_BMAC0_IN_EN 0x100ac
+/* [RW 1] output enable for TX_BMAC0 IF */
+#define NIG_REG_BMAC0_OUT_EN 0x100e0
+/* [RW 1] output enable for TX BMAC pause port 0 IF */
+#define NIG_REG_BMAC0_PAUSE_OUT_EN 0x10110
+/* [RW 1] output enable for RX_BMAC0_REGS IF */
+#define NIG_REG_BMAC0_REGS_OUT_EN 0x100e8
+/* [RW 1] output enable for RX BRB1 port0 IF */
+#define NIG_REG_BRB0_OUT_EN 0x100f8
+/* [RW 1] Input enable for TX BRB1 pause port 0 IF */
+#define NIG_REG_BRB0_PAUSE_IN_EN 0x100c4
+/* [RW 1] output enable for RX BRB1 port1 IF */
+#define NIG_REG_BRB1_OUT_EN 0x100fc
+/* [RW 1] Input enable for TX BRB1 pause port 1 IF */
+#define NIG_REG_BRB1_PAUSE_IN_EN 0x100c8
+/* [RW 1] output enable for RX BRB1 LP IF */
+#define NIG_REG_BRB_LB_OUT_EN 0x10100
+/* [WB_W 82] Debug packet to LP from RBC; Data spelling:[63:0] data; 64]
+ error; [67:65]eop_bvalid; [68]eop; [69]sop; [70]port_id; 71]flush;
+ 72:73]-vnic_num; 81:74]-sideband_info */
+#define NIG_REG_DEBUG_PACKET_LB 0x10800
+/* [RW 1] Input enable for TX Debug packet */
+#define NIG_REG_EGRESS_DEBUG_IN_EN 0x100dc
+/* [RW 1] If 1 - egress drain mode for port0 is active. In this mode all
+ packets from PBFare not forwarded to the MAC and just deleted from FIFO.
+ First packet may be deleted from the middle. And last packet will be
+ always deleted till the end. */
+#define NIG_REG_EGRESS_DRAIN0_MODE 0x10060
+/* [RW 1] Output enable to EMAC0 */
+#define NIG_REG_EGRESS_EMAC0_OUT_EN 0x10120
+/* [RW 1] MAC configuration for packets of port0. If 1 - all packet outputs
+ to emac for port0; other way to bmac for port0 */
+#define NIG_REG_EGRESS_EMAC0_PORT 0x10058
+/* [RW 1] Input enable for TX PBF user packet port0 IF */
+#define NIG_REG_EGRESS_PBF0_IN_EN 0x100cc
+/* [RW 1] Input enable for TX PBF user packet port1 IF */
+#define NIG_REG_EGRESS_PBF1_IN_EN 0x100d0
+/* [RW 1] Input enable for TX UMP management packet port0 IF */
+#define NIG_REG_EGRESS_UMP0_IN_EN 0x100d4
+/* [RW 1] Input enable for RX_EMAC0 IF */
+#define NIG_REG_EMAC0_IN_EN 0x100a4
+/* [RW 1] output enable for TX EMAC pause port 0 IF */
+#define NIG_REG_EMAC0_PAUSE_OUT_EN 0x10118
+/* [R 1] status from emac0. This bit is set when MDINT from either the
+ EXT_MDINT pin or from the Copper PHY is driven low. This condition must
+ be cleared in the attached PHY device that is driving the MINT pin. */
+#define NIG_REG_EMAC0_STATUS_MISC_MI_INT 0x10494
+/* [WB 48] This address space contains BMAC0 registers. The BMAC registers
+ are described in appendix A. In order to access the BMAC0 registers; the
+ base address; NIG_REGISTERS_INGRESS_BMAC0_MEM; Offset: 0x10c00; should be
+ added to each BMAC register offset */
+#define NIG_REG_INGRESS_BMAC0_MEM 0x10c00
+/* [WB 48] This address space contains BMAC1 registers. The BMAC registers
+ are described in appendix A. In order to access the BMAC0 registers; the
+ base address; NIG_REGISTERS_INGRESS_BMAC1_MEM; Offset: 0x11000; should be
+ added to each BMAC register offset */
+#define NIG_REG_INGRESS_BMAC1_MEM 0x11000
+/* [R 1] FIFO empty in EOP descriptor FIFO of LP in NIG_RX_EOP */
+#define NIG_REG_INGRESS_EOP_LB_EMPTY 0x104e0
+/* [RW 17] Debug only. RX_EOP_DSCR_lb_FIFO in NIG_RX_EOP. Data
+ packet_length[13:0]; mac_error[14]; trunc_error[15]; parity[16] */
+#define NIG_REG_INGRESS_EOP_LB_FIFO 0x104e4
+/* [RW 27] 0 - must be active for Everest A0; 1- for Everest B0 when latch
+ logic for interrupts must be used. Enable per bit of interrupt of
+ ~latch_status.latch_status */
+#define NIG_REG_LATCH_BC_0 0x16210
+/* [RW 27] Latch for each interrupt from Unicore.b[0]
+ status_emac0_misc_mi_int; b[1] status_emac0_misc_mi_complete;
+ b[2]status_emac0_misc_cfg_change; b[3]status_emac0_misc_link_status;
+ b[4]status_emac0_misc_link_change; b[5]status_emac0_misc_attn;
+ b[6]status_serdes0_mac_crs; b[7]status_serdes0_autoneg_complete;
+ b[8]status_serdes0_fiber_rxact; b[9]status_serdes0_link_status;
+ b[10]status_serdes0_mr_page_rx; b[11]status_serdes0_cl73_an_complete;
+ b[12]status_serdes0_cl73_mr_page_rx; b[13]status_serdes0_rx_sigdet;
+ b[14]status_xgxs0_remotemdioreq; b[15]status_xgxs0_link10g;
+ b[16]status_xgxs0_autoneg_complete; b[17]status_xgxs0_fiber_rxact;
+ b[21:18]status_xgxs0_link_status; b[22]status_xgxs0_mr_page_rx;
+ b[23]status_xgxs0_cl73_an_complete; b[24]status_xgxs0_cl73_mr_page_rx;
+ b[25]status_xgxs0_rx_sigdet; b[26]status_xgxs0_mac_crs */
+#define NIG_REG_LATCH_STATUS_0 0x18000
+/* [RW 1] led 10g for port 0 */
+#define NIG_REG_LED_10G_P0 0x10320
+/* [RW 1] led 10g for port 1 */
+#define NIG_REG_LED_10G_P1 0x10324
+/* [RW 1] Port0: This bit is set to enable the use of the
+ ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 field
+ defined below. If this bit is cleared; then the blink rate will be about
+ 8Hz. */
+#define NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 0x10318
+/* [RW 12] Port0: Specifies the period of each blink cycle (on + off) for
+ Traffic LED in milliseconds. Must be a non-zero value. This 12-bit field
+ is reset to 0x080; giving a default blink period of approximately 8Hz. */
+#define NIG_REG_LED_CONTROL_BLINK_RATE_P0 0x10310
+/* [RW 1] Port0: If set along with the
+ ~nig_registers_led_control_override_traffic_p0.led_control_override_traffic_p0
+ bit and ~nig_registers_led_control_traffic_p0.led_control_traffic_p0 LED
+ bit; the Traffic LED will blink with the blink rate specified in
+ ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 and
+ ~nig_registers_led_control_blink_rate_ena_p0.led_control_blink_rate_ena_p0
+ fields. */
+#define NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 0x10308
+/* [RW 1] Port0: If set overrides hardware control of the Traffic LED. The
+ Traffic LED will then be controlled via bit ~nig_registers_
+ led_control_traffic_p0.led_control_traffic_p0 and bit
+ ~nig_registers_led_control_blink_traffic_p0.led_control_blink_traffic_p0 */
+#define NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 0x102f8
+/* [RW 1] Port0: If set along with the led_control_override_trafic_p0 bit;
+ turns on the Traffic LED. If the led_control_blink_traffic_p0 bit is also
+ set; the LED will blink with blink rate specified in
+ ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 and
+ ~nig_regsters_led_control_blink_rate_ena_p0.led_control_blink_rate_ena_p0
+ fields. */
+#define NIG_REG_LED_CONTROL_TRAFFIC_P0 0x10300
+/* [RW 4] led mode for port0: 0 MAC; 1-3 PHY1; 4 MAC2; 5-7 PHY4; 8-MAC3;
+ 9-11PHY7; 12 MAC4; 13-15 PHY10; */
+#define NIG_REG_LED_MODE_P0 0x102f0
+/* [RW 3] for port0 enable for llfc ppp and pause. b0 - brb1 enable; b1-
+ tsdm enable; b2- usdm enable */
+#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 0x16070
+#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 0x16074
+/* [RW 1] SAFC enable for port0. This register may get 1 only when
+ ~ppp_enable.ppp_enable = 0 and pause_enable.pause_enable =0 for the same
+ port */
+#define NIG_REG_LLFC_ENABLE_0 0x16208
+#define NIG_REG_LLFC_ENABLE_1 0x1620c
+/* [RW 16] classes are high-priority for port0 */
+#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0 0x16058
+#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 0x1605c
+/* [RW 16] classes are low-priority for port0 */
+#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0 0x16060
+#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 0x16064
+/* [RW 1] Output enable of message to LLFC BMAC IF for port0 */
+#define NIG_REG_LLFC_OUT_EN_0 0x160c8
+#define NIG_REG_LLFC_OUT_EN_1 0x160cc
+#define NIG_REG_LLH0_ACPI_PAT_0_CRC 0x1015c
+#define NIG_REG_LLH0_ACPI_PAT_6_LEN 0x10154
+#define NIG_REG_LLH0_BRB1_DRV_MASK 0x10244
+#define NIG_REG_LLH0_BRB1_DRV_MASK_MF 0x16048
+/* [RW 1] send to BRB1 if no match on any of RMP rules. */
+#define NIG_REG_LLH0_BRB1_NOT_MCP 0x1025c
+/* [RW 2] Determine the classification participants. 0: no classification.1:
+ classification upon VLAN id. 2: classification upon MAC address. 3:
+ classification upon both VLAN id & MAC addr. */
+#define NIG_REG_LLH0_CLS_TYPE 0x16080
+/* [RW 32] cm header for llh0 */
+#define NIG_REG_LLH0_CM_HEADER 0x1007c
+#define NIG_REG_LLH0_DEST_IP_0_1 0x101dc
+#define NIG_REG_LLH0_DEST_MAC_0_0 0x101c0
+/* [RW 16] destination TCP address 1. The LLH will look for this address in
+ all incoming packets. */
+#define NIG_REG_LLH0_DEST_TCP_0 0x10220
+/* [RW 16] destination UDP address 1 The LLH will look for this address in
+ all incoming packets. */
+#define NIG_REG_LLH0_DEST_UDP_0 0x10214
+#define NIG_REG_LLH0_ERROR_MASK 0x1008c
+/* [RW 8] event id for llh0 */
+#define NIG_REG_LLH0_EVENT_ID 0x10084
+#define NIG_REG_LLH0_FUNC_EN 0x160fc
+#define NIG_REG_LLH0_FUNC_MEM 0x16180
+#define NIG_REG_LLH0_FUNC_MEM_ENABLE 0x16140
+#define NIG_REG_LLH0_FUNC_VLAN_ID 0x16100
+/* [RW 1] Determine the IP version to look for in
+ ~nig_registers_llh0_dest_ip_0.llh0_dest_ip_0. 0 - IPv6; 1-IPv4 */
+#define NIG_REG_LLH0_IPV4_IPV6_0 0x10208
+/* [RW 1] t bit for llh0 */
+#define NIG_REG_LLH0_T_BIT 0x10074
+/* [RW 12] VLAN ID 1. In case of VLAN packet the LLH will look for this ID. */
+#define NIG_REG_LLH0_VLAN_ID_0 0x1022c
+/* [RW 8] init credit counter for port0 in LLH */
+#define NIG_REG_LLH0_XCM_INIT_CREDIT 0x10554
+#define NIG_REG_LLH0_XCM_MASK 0x10130
+#define NIG_REG_LLH1_BRB1_DRV_MASK 0x10248
+/* [RW 1] send to BRB1 if no match on any of RMP rules. */
+#define NIG_REG_LLH1_BRB1_NOT_MCP 0x102dc
+/* [RW 2] Determine the classification participants. 0: no classification.1:
+ classification upon VLAN id. 2: classification upon MAC address. 3:
+ classification upon both VLAN id & MAC addr. */
+#define NIG_REG_LLH1_CLS_TYPE 0x16084
+/* [RW 32] cm header for llh1 */
+#define NIG_REG_LLH1_CM_HEADER 0x10080
+#define NIG_REG_LLH1_ERROR_MASK 0x10090
+/* [RW 8] event id for llh1 */
+#define NIG_REG_LLH1_EVENT_ID 0x10088
+#define NIG_REG_LLH1_FUNC_MEM 0x161c0
+#define NIG_REG_LLH1_FUNC_MEM_ENABLE 0x16160
+#define NIG_REG_LLH1_FUNC_MEM_SIZE 16
+/* [RW 1] When this bit is set; the LLH will classify the packet before
+ * sending it to the BRB or calculating WoL on it. This bit controls port 1
+ * only. The legacy llh_multi_function_mode bit controls port 0. */
+#define NIG_REG_LLH1_MF_MODE 0x18614
+/* [RW 8] init credit counter for port1 in LLH */
+#define NIG_REG_LLH1_XCM_INIT_CREDIT 0x10564
+#define NIG_REG_LLH1_XCM_MASK 0x10134
+/* [RW 1] When this bit is set; the LLH will expect all packets to be with
+ e1hov */
+#define NIG_REG_LLH_E1HOV_MODE 0x160d8
+/* [RW 1] When this bit is set; the LLH will classify the packet before
+ sending it to the BRB or calculating WoL on it. */
+#define NIG_REG_LLH_MF_MODE 0x16024
+#define NIG_REG_MASK_INTERRUPT_PORT0 0x10330
+#define NIG_REG_MASK_INTERRUPT_PORT1 0x10334
+/* [RW 1] Output signal from NIG to EMAC0. When set enables the EMAC0 block. */
+#define NIG_REG_NIG_EMAC0_EN 0x1003c
+/* [RW 1] Output signal from NIG to EMAC1. When set enables the EMAC1 block. */
+#define NIG_REG_NIG_EMAC1_EN 0x10040
+/* [RW 1] Output signal from NIG to TX_EMAC0. When set indicates to the
+ EMAC0 to strip the CRC from the ingress packets. */
+#define NIG_REG_NIG_INGRESS_EMAC0_NO_CRC 0x10044
+/* [R 32] Interrupt register #0 read */
+#define NIG_REG_NIG_INT_STS_0 0x103b0
+#define NIG_REG_NIG_INT_STS_1 0x103c0
+/* [R 32] Legacy E1 and E1H location for parity error mask register. */
+#define NIG_REG_NIG_PRTY_MASK 0x103dc
+/* [RW 32] Parity mask register #0 read/write */
+#define NIG_REG_NIG_PRTY_MASK_0 0x183c8
+#define NIG_REG_NIG_PRTY_MASK_1 0x183d8
+/* [R 32] Legacy E1 and E1H location for parity error status register. */
+#define NIG_REG_NIG_PRTY_STS 0x103d0
+/* [R 32] Parity register #0 read */
+#define NIG_REG_NIG_PRTY_STS_0 0x183bc
+#define NIG_REG_NIG_PRTY_STS_1 0x183cc
+/* [R 32] Legacy E1 and E1H location for parity error status clear register. */
+#define NIG_REG_NIG_PRTY_STS_CLR 0x103d4
+/* [RC 32] Parity register #0 read clear */
+#define NIG_REG_NIG_PRTY_STS_CLR_0 0x183c0
+#define NIG_REG_NIG_PRTY_STS_CLR_1 0x183d0
+#define MCPR_IMC_COMMAND_ENABLE (1L<<31)
+#define MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT 16
+#define MCPR_IMC_COMMAND_OPERATION_BITSHIFT 28
+#define MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT 8
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header. */
+#define NIG_REG_P0_HDRS_AFTER_BASIC 0x18038
+/* [RW 1] HW PFC enable bit. Set this bit to enable the PFC functionality in
+ * the NIG. Other flow control modes such as PAUSE and SAFC/LLFC should be
+ * disabled when this bit is set. */
+#define NIG_REG_P0_HWPFC_ENABLE 0x18078
+#define NIG_REG_P0_LLH_FUNC_MEM2 0x18480
+#define NIG_REG_P0_LLH_FUNC_MEM2_ENABLE 0x18440
+/* [RW 1] Input enable for RX MAC interface. */
+#define NIG_REG_P0_MAC_IN_EN 0x185ac
+/* [RW 1] Output enable for TX MAC interface */
+#define NIG_REG_P0_MAC_OUT_EN 0x185b0
+/* [RW 1] Output enable for TX PAUSE signal to the MAC. */
+#define NIG_REG_P0_MAC_PAUSE_OUT_EN 0x185b4
+/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for
+ * future expansion) each priorty is to be mapped to. Bits 3:0 specify the
+ * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit
+ * priority field is extracted from the outer-most VLAN in receive packet.
+ * Only COS 0 and COS 1 are supported in E2. */
+#define NIG_REG_P0_PKT_PRIORITY_TO_COS 0x18054
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A
+ * priority is mapped to COS 0 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS0_PRIORITY_MASK 0x18058
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 1. A
+ * priority is mapped to COS 1 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS1_PRIORITY_MASK 0x1805c
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A
+ * priority is mapped to COS 2 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS2_PRIORITY_MASK 0x186b0
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 3. A
+ * priority is mapped to COS 3 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS3_PRIORITY_MASK 0x186b4
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 4. A
+ * priority is mapped to COS 4 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS4_PRIORITY_MASK 0x186b8
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 5. A
+ * priority is mapped to COS 5 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS5_PRIORITY_MASK 0x186bc
+/* [R 1] RX FIFO for receiving data from MAC is empty. */
+/* [RW 15] Specify which of the credit registers the client is to be mapped
+ * to. Bits[2:0] are for client 0; bits [14:12] are for client 4. For
+ * clients that are not subject to WFQ credit blocking - their
+ * specifications here are not used. */
+#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP 0x180f0
+/* [RW 32] Specify which of the credit registers the client is to be mapped
+ * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are
+ * for client 0; bits [35:32] are for client 8. For clients that are not
+ * subject to WFQ credit blocking - their specifications here are not used.
+ * This is a new register (with 2_) added in E3 B0 to accommodate the 9
+ * input clients to ETS arbiter. The reset default is set for management and
+ * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
+ * use credit registers 0-5 respectively (0x543210876). Note that credit
+ * registers can not be shared between clients. */
+#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB 0x18688
+/* [RW 4] Specify which of the credit registers the client is to be mapped
+ * to. This register specifies bits 35:32 of the 36-bit value. Bits[3:0] are
+ * for client 0; bits [35:32] are for client 8. For clients that are not
+ * subject to WFQ credit blocking - their specifications here are not used.
+ * This is a new register (with 2_) added in E3 B0 to accommodate the 9
+ * input clients to ETS arbiter. The reset default is set for management and
+ * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
+ * use credit registers 0-5 respectively (0x543210876). Note that credit
+ * registers can not be shared between clients. */
+#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB 0x1868c
+/* [RW 5] Specify whether the client competes directly in the strict
+ * priority arbiter. The bits are mapped according to client ID (client IDs
+ * are defined in tx_arb_priority_client). Default value is set to enable
+ * strict priorities for clients 0-2 -- management and debug traffic. */
+#define NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT 0x180e8
+/* [RW 5] Specify whether the client is subject to WFQ credit blocking. The
+ * bits are mapped according to client ID (client IDs are defined in
+ * tx_arb_priority_client). Default value is 0 for not using WFQ credit
+ * blocking. */
+#define NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x180ec
+/* [RW 32] Specify the upper bound that credit register 0 is allowed to
+ * reach. */
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0 0x1810c
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1 0x18110
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2 0x18114
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3 0x18118
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4 0x1811c
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5 0x186a0
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6 0x186a4
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7 0x186a8
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8 0x186ac
+/* [RW 32] Specify the weight (in bytes) to be added to credit register 0
+ * when it is time to increment. */
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0 0x180f8
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1 0x180fc
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2 0x18100
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3 0x18104
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4 0x18108
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5 0x18690
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6 0x18694
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7 0x18698
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8 0x1869c
+/* [RW 12] Specify the number of strict priority arbitration slots between
+ * two round-robin arbitration slots to avoid starvation. A value of 0 means
+ * no strict priority cycles - the strict priority with anti-starvation
+ * arbiter becomes a round-robin arbiter. */
+#define NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS 0x180f4
+/* [RW 15] Specify the client number to be assigned to each priority of the
+ * strict priority arbiter. Priority 0 is the highest priority. Bits [2:0]
+ * are for priority 0 client; bits [14:12] are for priority 4 client. The
+ * clients are assigned the following IDs: 0-management; 1-debug traffic
+ * from this port; 2-debug traffic from other port; 3-COS0 traffic; 4-COS1
+ * traffic. The reset value[14:0] is set to 0x4688 (15'b100_011_010_001_000)
+ * for management at priority 0; debug traffic at priorities 1 and 2; COS0
+ * traffic at priority 3; and COS1 traffic at priority 4. */
+#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT 0x180e4
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header. */
+#define NIG_REG_P1_HDRS_AFTER_BASIC 0x1818c
+#define NIG_REG_P1_LLH_FUNC_MEM2 0x184c0
+#define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE 0x18460
+/* [RW 32] Specify the client number to be assigned to each priority of the
+ * strict priority arbiter. This register specifies bits 31:0 of the 36-bit
+ * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
+ * client; bits [35-32] are for priority 8 client. The clients are assigned
+ * the following IDs: 0-management; 1-debug traffic from this port; 2-debug
+ * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
+ * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
+ * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
+ * accommodate the 9 input clients to ETS arbiter. */
+#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB 0x18680
+/* [RW 4] Specify the client number to be assigned to each priority of the
+ * strict priority arbiter. This register specifies bits 35:32 of the 36-bit
+ * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
+ * client; bits [35-32] are for priority 8 client. The clients are assigned
+ * the following IDs: 0-management; 1-debug traffic from this port; 2-debug
+ * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
+ * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
+ * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
+ * accommodate the 9 input clients to ETS arbiter. */
+#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB 0x18684
+#define NIG_REG_P1_MAC_IN_EN 0x185c0
+/* [RW 1] Output enable for TX MAC interface */
+#define NIG_REG_P1_MAC_OUT_EN 0x185c4
+/* [RW 1] Output enable for TX PAUSE signal to the MAC. */
+#define NIG_REG_P1_MAC_PAUSE_OUT_EN 0x185c8
+/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for
+ * future expansion) each priorty is to be mapped to. Bits 3:0 specify the
+ * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit
+ * priority field is extracted from the outer-most VLAN in receive packet.
+ * Only COS 0 and COS 1 are supported in E2. */
+#define NIG_REG_P1_PKT_PRIORITY_TO_COS 0x181a8
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A
+ * priority is mapped to COS 0 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P1_RX_COS0_PRIORITY_MASK 0x181ac
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 1. A
+ * priority is mapped to COS 1 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P1_RX_COS1_PRIORITY_MASK 0x181b0
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A
+ * priority is mapped to COS 2 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P1_RX_COS2_PRIORITY_MASK 0x186f8
+/* [R 1] RX FIFO for receiving data from MAC is empty. */
+#define NIG_REG_P1_RX_MACFIFO_EMPTY 0x1858c
+/* [R 1] TLLH FIFO is empty. */
+#define NIG_REG_P1_TLLH_FIFO_EMPTY 0x18338
+/* [RW 32] Specify which of the credit registers the client is to be mapped
+ * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are
+ * for client 0; bits [35:32] are for client 8. For clients that are not
+ * subject to WFQ credit blocking - their specifications here are not used.
+ * This is a new register (with 2_) added in E3 B0 to accommodate the 9
+ * input clients to ETS arbiter. The reset default is set for management and
+ * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
+ * use credit registers 0-5 respectively (0x543210876). Note that credit
+ * registers can not be shared between clients. Note also that there are
+ * only COS0-2 in port 1- there is a total of 6 clients in port 1. Only
+ * credit registers 0-5 are valid. This register should be configured
+ * appropriately before enabling WFQ. */
+#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB 0x186e8
+/* [RW 4] Specify which of the credit registers the client is to be mapped
+ * to. This register specifies bits 35:32 of the 36-bit value. Bits[3:0] are
+ * for client 0; bits [35:32] are for client 8. For clients that are not
+ * subject to WFQ credit blocking - their specifications here are not used.
+ * This is a new register (with 2_) added in E3 B0 to accommodate the 9
+ * input clients to ETS arbiter. The reset default is set for management and
+ * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
+ * use credit registers 0-5 respectively (0x543210876). Note that credit
+ * registers can not be shared between clients. Note also that there are
+ * only COS0-2 in port 1- there is a total of 6 clients in port 1. Only
+ * credit registers 0-5 are valid. This register should be configured
+ * appropriately before enabling WFQ. */
+#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB 0x186ec
+/* [RW 9] Specify whether the client competes directly in the strict
+ * priority arbiter. The bits are mapped according to client ID (client IDs
+ * are defined in tx_arb_priority_client2): 0-management; 1-debug traffic
+ * from this port; 2-debug traffic from other port; 3-COS0 traffic; 4-COS1
+ * traffic; 5-COS2 traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic.
+ * Default value is set to enable strict priorities for all clients. */
+#define NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT 0x18234
+/* [RW 9] Specify whether the client is subject to WFQ credit blocking. The
+ * bits are mapped according to client ID (client IDs are defined in
+ * tx_arb_priority_client2): 0-management; 1-debug traffic from this port;
+ * 2-debug traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2
+ * traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. Default value is
+ * 0 for not using WFQ credit blocking. */
+#define NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x18238
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 0x18258
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 0x1825c
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 0x18260
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 0x18264
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 0x18268
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 0x186f4
+/* [RW 32] Specify the weight (in bytes) to be added to credit register 0
+ * when it is time to increment. */
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 0x18244
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 0x18248
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 0x1824c
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 0x18250
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 0x18254
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 0x186f0
+/* [RW 12] Specify the number of strict priority arbitration slots between
+ two round-robin arbitration slots to avoid starvation. A value of 0 means
+ no strict priority cycles - the strict priority with anti-starvation
+ arbiter becomes a round-robin arbiter. */
+#define NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS 0x18240
+/* [RW 32] Specify the client number to be assigned to each priority of the
+ strict priority arbiter. This register specifies bits 31:0 of the 36-bit
+ value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
+ client; bits [35-32] are for priority 8 client. The clients are assigned
+ the following IDs: 0-management; 1-debug traffic from this port; 2-debug
+ traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
+ 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
+ set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
+ accommodate the 9 input clients to ETS arbiter. Note that this register
+ is the same as the one for port 0, except that port 1 only has COS 0-2
+ traffic. There is no traffic for COS 3-5 of port 1. */
+#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB 0x186e0
+/* [RW 4] Specify the client number to be assigned to each priority of the
+ strict priority arbiter. This register specifies bits 35:32 of the 36-bit
+ value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
+ client; bits [35-32] are for priority 8 client. The clients are assigned
+ the following IDs: 0-management; 1-debug traffic from this port; 2-debug
+ traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
+ 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
+ set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
+ accommodate the 9 input clients to ETS arbiter. Note that this register
+ is the same as the one for port 0, except that port 1 only has COS 0-2
+ traffic. There is no traffic for COS 3-5 of port 1. */
+#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB 0x186e4
+/* [R 1] TX FIFO for transmitting data to MAC is empty. */
+#define NIG_REG_P1_TX_MACFIFO_EMPTY 0x18594
+/* [R 1] FIFO empty status of the MCP TX FIFO used for storing MCP packets
+ forwarded to the host. */
+#define NIG_REG_P1_TX_MNG_HOST_FIFO_EMPTY 0x182b8
+/* [RW 32] Specify the upper bound that credit register 0 is allowed to
+ * reach. */
+/* [RW 1] Pause enable for port0. This register may get 1 only when
+ ~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same
+ port */
+#define NIG_REG_PAUSE_ENABLE_0 0x160c0
+#define NIG_REG_PAUSE_ENABLE_1 0x160c4
+/* [RW 1] Input enable for RX PBF LP IF */
+#define NIG_REG_PBF_LB_IN_EN 0x100b4
+/* [RW 1] Value of this register will be transmitted to port swap when
+ ~nig_registers_strap_override.strap_override =1 */
+#define NIG_REG_PORT_SWAP 0x10394
+/* [RW 1] PPP enable for port0. This register may get 1 only when
+ * ~safc_enable.safc_enable = 0 and pause_enable.pause_enable =0 for the
+ * same port */
+#define NIG_REG_PPP_ENABLE_0 0x160b0
+#define NIG_REG_PPP_ENABLE_1 0x160b4
+/* [RW 1] output enable for RX parser descriptor IF */
+#define NIG_REG_PRS_EOP_OUT_EN 0x10104
+/* [RW 1] Input enable for RX parser request IF */
+#define NIG_REG_PRS_REQ_IN_EN 0x100b8
+/* [RW 5] control to serdes - CL45 DEVAD */
+#define NIG_REG_SERDES0_CTRL_MD_DEVAD 0x10370
+/* [RW 1] control to serdes; 0 - clause 45; 1 - clause 22 */
+#define NIG_REG_SERDES0_CTRL_MD_ST 0x1036c
+/* [RW 5] control to serdes - CL22 PHY_ADD and CL45 PRTAD */
+#define NIG_REG_SERDES0_CTRL_PHY_ADDR 0x10374
+/* [R 1] status from serdes0 that inputs to interrupt logic of link status */
+#define NIG_REG_SERDES0_STATUS_LINK_STATUS 0x10578
+/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure
+ for port0 */
+#define NIG_REG_STAT0_BRB_DISCARD 0x105f0
+/* [R 32] Rx statistics : In user packets truncated due to BRB backpressure
+ for port0 */
+#define NIG_REG_STAT0_BRB_TRUNCATE 0x105f8
+/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that
+ between 1024 and 1522 bytes for port0 */
+#define NIG_REG_STAT0_EGRESS_MAC_PKT0 0x10750
+/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that
+ between 1523 bytes and above for port0 */
+#define NIG_REG_STAT0_EGRESS_MAC_PKT1 0x10760
+/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure
+ for port1 */
+#define NIG_REG_STAT1_BRB_DISCARD 0x10628
+/* [WB_R 36] Tx statistics : Number of packets from emac1 or bmac1 that
+ between 1024 and 1522 bytes for port1 */
+#define NIG_REG_STAT1_EGRESS_MAC_PKT0 0x107a0
+/* [WB_R 36] Tx statistics : Number of packets from emac1 or bmac1 that
+ between 1523 bytes and above for port1 */
+#define NIG_REG_STAT1_EGRESS_MAC_PKT1 0x107b0
+/* [WB_R 64] Rx statistics : User octets received for LP */
+#define NIG_REG_STAT2_BRB_OCTET 0x107e0
+#define NIG_REG_STATUS_INTERRUPT_PORT0 0x10328
+#define NIG_REG_STATUS_INTERRUPT_PORT1 0x1032c
+/* [RW 1] port swap mux selection. If this register equal to 0 then port
+ swap is equal to SPIO pin that inputs from ifmux_serdes_swap. If 1 then
+ ort swap is equal to ~nig_registers_port_swap.port_swap */
+#define NIG_REG_STRAP_OVERRIDE 0x10398
+/* [RW 1] output enable for RX_XCM0 IF */
+#define NIG_REG_XCM0_OUT_EN 0x100f0
+/* [RW 1] output enable for RX_XCM1 IF */
+#define NIG_REG_XCM1_OUT_EN 0x100f4
+/* [RW 1] control to xgxs - remote PHY in-band MDIO */
+#define NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST 0x10348
+/* [RW 5] control to xgxs - CL45 DEVAD */
+#define NIG_REG_XGXS0_CTRL_MD_DEVAD 0x1033c
+/* [RW 1] control to xgxs; 0 - clause 45; 1 - clause 22 */
+#define NIG_REG_XGXS0_CTRL_MD_ST 0x10338
+/* [RW 5] control to xgxs - CL22 PHY_ADD and CL45 PRTAD */
+#define NIG_REG_XGXS0_CTRL_PHY_ADDR 0x10340
+/* [R 1] status from xgxs0 that inputs to interrupt logic of link10g. */
+#define NIG_REG_XGXS0_STATUS_LINK10G 0x10680
+/* [R 4] status from xgxs0 that inputs to interrupt logic of link status */
+#define NIG_REG_XGXS0_STATUS_LINK_STATUS 0x10684
+/* [RW 2] selection for XGXS lane of port 0 in NIG_MUX block */
+#define NIG_REG_XGXS_LANE_SEL_P0 0x102e8
+/* [RW 1] selection for port0 for NIG_MUX block : 0 = SerDes; 1 = XGXS */
+#define NIG_REG_XGXS_SERDES0_MODE_SEL 0x102e0
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT (0x1<<0)
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS (0x1<<9)
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G (0x1<<15)
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS (0xf<<18)
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE 18
+/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter. */
+#define PBF_REG_COS0_UPPER_BOUND 0x15c05c
+/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter
+ * of port 0. */
+#define PBF_REG_COS0_UPPER_BOUND_P0 0x15c2cc
+/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter
+ * of port 1. */
+#define PBF_REG_COS0_UPPER_BOUND_P1 0x15c2e4
+/* [RW 31] The weight of COS0 in the ETS command arbiter. */
+#define PBF_REG_COS0_WEIGHT 0x15c054
+/* [RW 31] The weight of COS0 in port 0 ETS command arbiter. */
+#define PBF_REG_COS0_WEIGHT_P0 0x15c2a8
+/* [RW 31] The weight of COS0 in port 1 ETS command arbiter. */
+#define PBF_REG_COS0_WEIGHT_P1 0x15c2c0
+/* [RW 31] The upper bound of the weight of COS1 in the ETS command arbiter. */
+#define PBF_REG_COS1_UPPER_BOUND 0x15c060
+/* [RW 31] The weight of COS1 in the ETS command arbiter. */
+#define PBF_REG_COS1_WEIGHT 0x15c058
+/* [RW 31] The weight of COS1 in port 0 ETS command arbiter. */
+#define PBF_REG_COS1_WEIGHT_P0 0x15c2ac
+/* [RW 31] The weight of COS1 in port 1 ETS command arbiter. */
+#define PBF_REG_COS1_WEIGHT_P1 0x15c2c4
+/* [RW 31] The weight of COS2 in port 0 ETS command arbiter. */
+#define PBF_REG_COS2_WEIGHT_P0 0x15c2b0
+/* [RW 31] The weight of COS2 in port 1 ETS command arbiter. */
+#define PBF_REG_COS2_WEIGHT_P1 0x15c2c8
+/* [RW 31] The weight of COS3 in port 0 ETS command arbiter. */
+#define PBF_REG_COS3_WEIGHT_P0 0x15c2b4
+/* [RW 31] The weight of COS4 in port 0 ETS command arbiter. */
+#define PBF_REG_COS4_WEIGHT_P0 0x15c2b8
+/* [RW 31] The weight of COS5 in port 0 ETS command arbiter. */
+#define PBF_REG_COS5_WEIGHT_P0 0x15c2bc
+/* [R 11] Current credit for the LB queue in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_CREDIT_LB_Q 0x140338
+/* [R 11] Current credit for queue 0 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_CREDIT_Q0 0x14033c
+/* [R 11] Current credit for queue 1 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_CREDIT_Q1 0x140340
+/* [RW 1] Disable processing further tasks from port 0 (after ending the
+ current task in process). */
+#define PBF_REG_DISABLE_NEW_TASK_PROC_P0 0x14005c
+/* [RW 1] Disable processing further tasks from port 1 (after ending the
+ current task in process). */
+#define PBF_REG_DISABLE_NEW_TASK_PROC_P1 0x140060
+/* [RW 1] Disable processing further tasks from port 4 (after ending the
+ current task in process). */
+#define PBF_REG_DISABLE_NEW_TASK_PROC_P4 0x14006c
+#define PBF_REG_DISABLE_PF 0x1402e8
+/* [RW 18] For port 0: For each client that is subject to WFQ (the
+ * corresponding bit is 1); indicates to which of the credit registers this
+ * client is mapped. For clients which are not credit blocked; their mapping
+ * is dont care. */
+#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0 0x15c288
+/* [RW 9] For port 1: For each client that is subject to WFQ (the
+ * corresponding bit is 1); indicates to which of the credit registers this
+ * client is mapped. For clients which are not credit blocked; their mapping
+ * is dont care. */
+#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1 0x15c28c
+/* [RW 6] For port 0: Bit per client to indicate if the client competes in
+ * the strict priority arbiter directly (corresponding bit = 1); or first
+ * goes to the RR arbiter (corresponding bit = 0); and then competes in the
+ * lowest priority in the strict-priority arbiter. */
+#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 0x15c278
+/* [RW 3] For port 1: Bit per client to indicate if the client competes in
+ * the strict priority arbiter directly (corresponding bit = 1); or first
+ * goes to the RR arbiter (corresponding bit = 0); and then competes in the
+ * lowest priority in the strict-priority arbiter. */
+#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 0x15c27c
+/* [RW 6] For port 0: Bit per client to indicate if the client is subject to
+ * WFQ credit blocking (corresponding bit = 1). */
+#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 0x15c280
+/* [RW 3] For port 0: Bit per client to indicate if the client is subject to
+ * WFQ credit blocking (corresponding bit = 1). */
+#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 0x15c284
+/* [RW 16] For port 0: The number of strict priority arbitration slots
+ * between 2 RR arbitration slots. A value of 0 means no strict priority
+ * cycles; i.e. the strict-priority w/ anti-starvation arbiter is a RR
+ * arbiter. */
+#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0 0x15c2a0
+/* [RW 16] For port 1: The number of strict priority arbitration slots
+ * between 2 RR arbitration slots. A value of 0 means no strict priority
+ * cycles; i.e. the strict-priority w/ anti-starvation arbiter is a RR
+ * arbiter. */
+#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 0x15c2a4
+/* [RW 18] For port 0: Indicates which client is connected to each priority
+ * in the strict-priority arbiter. Priority 0 is the highest priority, and
+ * priority 5 is the lowest; to which the RR output is connected to (this is
+ * not configurable). */
+#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 0x15c270
+/* [RW 9] For port 1: Indicates which client is connected to each priority
+ * in the strict-priority arbiter. Priority 0 is the highest priority, and
+ * priority 5 is the lowest; to which the RR output is connected to (this is
+ * not configurable). */
+#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 0x15c274
+/* [RW 1] Indicates that ETS is performed between the COSes in the command
+ * arbiter. If reset strict priority w/ anti-starvation will be performed
+ * w/o WFQ. */
+#define PBF_REG_ETS_ENABLED 0x15c050
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header. */
+#define PBF_REG_HDRS_AFTER_BASIC 0x15c0a8
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */
+#define PBF_REG_HDRS_AFTER_TAG_0 0x15c0b8
+/* [R 1] Removed for E3 B0 - Indicates which COS is conncted to the highest
+ * priority in the command arbiter. */
+#define PBF_REG_HIGH_PRIORITY_COS_NUM 0x15c04c
+#define PBF_REG_IF_ENABLE_REG 0x140044
+/* [RW 1] Init bit. When set the initial credits are copied to the credit
+ registers (except the port credits). Should be set and then reset after
+ the configuration of the block has ended. */
+#define PBF_REG_INIT 0x140000
+/* [RW 11] Initial credit for the LB queue in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_INIT_CRD_LB_Q 0x15c248
+/* [RW 11] Initial credit for queue 0 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_INIT_CRD_Q0 0x15c230
+/* [RW 11] Initial credit for queue 1 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_INIT_CRD_Q1 0x15c234
+/* [RW 1] Init bit for port 0. When set the initial credit of port 0 is
+ copied to the credit register. Should be set and then reset after the
+ configuration of the port has ended. */
+#define PBF_REG_INIT_P0 0x140004
+/* [RW 1] Init bit for port 1. When set the initial credit of port 1 is
+ copied to the credit register. Should be set and then reset after the
+ configuration of the port has ended. */
+#define PBF_REG_INIT_P1 0x140008
+/* [RW 1] Init bit for port 4. When set the initial credit of port 4 is
+ copied to the credit register. Should be set and then reset after the
+ configuration of the port has ended. */
+#define PBF_REG_INIT_P4 0x14000c
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * the LB queue. Reset upon init. */
+#define PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q 0x140354
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * queue 0. Reset upon init. */
+#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 0x140358
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * queue 1. Reset upon init. */
+#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 0x14035c
+/* [RW 1] Enable for mac interface 0. */
+#define PBF_REG_MAC_IF0_ENABLE 0x140030
+/* [RW 1] Enable for mac interface 1. */
+#define PBF_REG_MAC_IF1_ENABLE 0x140034
+/* [RW 1] Enable for the loopback interface. */
+#define PBF_REG_MAC_LB_ENABLE 0x140040
+/* [RW 6] Bit-map indicating which headers must appear in the packet */
+#define PBF_REG_MUST_HAVE_HDRS 0x15c0c4
+/* [RW 16] The number of strict priority arbitration slots between 2 RR
+ * arbitration slots. A value of 0 means no strict priority cycles; i.e. the
+ * strict-priority w/ anti-starvation arbiter is a RR arbiter. */
+#define PBF_REG_NUM_STRICT_ARB_SLOTS 0x15c064
+/* [RW 10] Port 0 threshold used by arbiter in 16 byte lines used when pause
+ not suppoterd. */
+#define PBF_REG_P0_ARB_THRSH 0x1400e4
+/* [R 11] Current credit for port 0 in the tx port buffers in 16 byte lines. */
+#define PBF_REG_P0_CREDIT 0x140200
+/* [RW 11] Initial credit for port 0 in the tx port buffers in 16 byte
+ lines. */
+#define PBF_REG_P0_INIT_CRD 0x1400d0
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * port 0. Reset upon init. */
+#define PBF_REG_P0_INTERNAL_CRD_FREED_CNT 0x140308
+/* [R 1] Removed for E3 B0 - Indication that pause is enabled for port 0. */
+#define PBF_REG_P0_PAUSE_ENABLE 0x140014
+/* [R 8] Removed for E3 B0 - Number of tasks in port 0 task queue. */
+#define PBF_REG_P0_TASK_CNT 0x140204
+/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines
+ * freed from the task queue of port 0. Reset upon init. */
+#define PBF_REG_P0_TQ_LINES_FREED_CNT 0x1402f0
+/* [R 12] Number of 8 bytes lines occupied in the task queue of port 0. */
+#define PBF_REG_P0_TQ_OCCUPANCY 0x1402fc
+/* [R 11] Removed for E3 B0 - Current credit for port 1 in the tx port
+ * buffers in 16 byte lines. */
+#define PBF_REG_P1_CREDIT 0x140208
+/* [R 11] Removed for E3 B0 - Initial credit for port 0 in the tx port
+ * buffers in 16 byte lines. */
+#define PBF_REG_P1_INIT_CRD 0x1400d4
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * port 1. Reset upon init. */
+#define PBF_REG_P1_INTERNAL_CRD_FREED_CNT 0x14030c
+/* [R 8] Removed for E3 B0 - Number of tasks in port 1 task queue. */
+#define PBF_REG_P1_TASK_CNT 0x14020c
+/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines
+ * freed from the task queue of port 1. Reset upon init. */
+#define PBF_REG_P1_TQ_LINES_FREED_CNT 0x1402f4
+/* [R 12] Number of 8 bytes lines occupied in the task queue of port 1. */
+#define PBF_REG_P1_TQ_OCCUPANCY 0x140300
+/* [R 11] Current credit for port 4 in the tx port buffers in 16 byte lines. */
+#define PBF_REG_P4_CREDIT 0x140210
+/* [RW 11] Initial credit for port 4 in the tx port buffers in 16 byte
+ lines. */
+#define PBF_REG_P4_INIT_CRD 0x1400e0
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * port 4. Reset upon init. */
+#define PBF_REG_P4_INTERNAL_CRD_FREED_CNT 0x140310
+/* [R 8] Removed for E3 B0 - Number of tasks in port 4 task queue. */
+#define PBF_REG_P4_TASK_CNT 0x140214
+/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines
+ * freed from the task queue of port 4. Reset upon init. */
+#define PBF_REG_P4_TQ_LINES_FREED_CNT 0x1402f8
+/* [R 12] Number of 8 bytes lines occupied in the task queue of port 4. */
+#define PBF_REG_P4_TQ_OCCUPANCY 0x140304
+/* [RW 5] Interrupt mask register #0 read/write */
+#define PBF_REG_PBF_INT_MASK 0x1401d4
+/* [R 5] Interrupt register #0 read */
+#define PBF_REG_PBF_INT_STS 0x1401c8
+/* [RW 20] Parity mask register #0 read/write */
+#define PBF_REG_PBF_PRTY_MASK 0x1401e4
+/* [RC 20] Parity register #0 read clear */
+#define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc
+/* [RW 16] The Ethernet type value for L2 tag 0 */
+#define PBF_REG_TAG_ETHERTYPE_0 0x15c090
+/* [RW 4] The length of the info field for L2 tag 0. The length is between
+ * 2B and 14B; in 2B granularity */
+#define PBF_REG_TAG_LEN_0 0x15c09c
+/* [R 32] Cyclic counter for number of 8 byte lines freed from the LB task
+ * queue. Reset upon init. */
+#define PBF_REG_TQ_LINES_FREED_CNT_LB_Q 0x14038c
+/* [R 32] Cyclic counter for number of 8 byte lines freed from the task
+ * queue 0. Reset upon init. */
+#define PBF_REG_TQ_LINES_FREED_CNT_Q0 0x140390
+/* [R 32] Cyclic counter for number of 8 byte lines freed from task queue 1.
+ * Reset upon init. */
+#define PBF_REG_TQ_LINES_FREED_CNT_Q1 0x140394
+/* [R 13] Number of 8 bytes lines occupied in the task queue of the LB
+ * queue. */
+#define PBF_REG_TQ_OCCUPANCY_LB_Q 0x1403a8
+/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 0. */
+#define PBF_REG_TQ_OCCUPANCY_Q0 0x1403ac
+/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 1. */
+#define PBF_REG_TQ_OCCUPANCY_Q1 0x1403b0
+#define PB_REG_CONTROL 0
+/* [RW 2] Interrupt mask register #0 read/write */
+#define PB_REG_PB_INT_MASK 0x28
+/* [R 2] Interrupt register #0 read */
+#define PB_REG_PB_INT_STS 0x1c
+/* [RW 4] Parity mask register #0 read/write */
+#define PB_REG_PB_PRTY_MASK 0x38
+/* [R 4] Parity register #0 read */
+#define PB_REG_PB_PRTY_STS 0x2c
+/* [RC 4] Parity register #0 read clear */
+#define PB_REG_PB_PRTY_STS_CLR 0x30
+#define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW (0x1<<8)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR (0x1<<1)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN (0x1<<6)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN (0x1<<7)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN (0x1<<4)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN (0x1<<3)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN (0x1<<5)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN (0x1<<2)
+/* [R 8] Config space A attention dirty bits. Each bit indicates that the
+ * corresponding PF generates config space A attention. Set by PXP. Reset by
+ * MCP writing 1 to icfg_space_a_request_clr. Note: register contains bits
+ * from both paths. */
+#define PGLUE_B_REG_CFG_SPACE_A_REQUEST 0x9010
+/* [R 8] Config space B attention dirty bits. Each bit indicates that the
+ * corresponding PF generates config space B attention. Set by PXP. Reset by
+ * MCP writing 1 to icfg_space_b_request_clr. Note: register contains bits
+ * from both paths. */
+#define PGLUE_B_REG_CFG_SPACE_B_REQUEST 0x9014
+/* [RW 1] Type A PF enable inbound interrupt table for CSDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_CSDM_INB_INT_A_PF_ENABLE 0x9194
+/* [RW 18] Type B VF inbound interrupt table for CSDM: bits[17:9]-mask;
+ * its[8:0]-address. Bits [1:0] must be zero (DW resolution address). */
+#define PGLUE_B_REG_CSDM_INB_INT_B_VF 0x916c
+/* [RW 1] Type B VF enable inbound interrupt table for CSDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_CSDM_INB_INT_B_VF_ENABLE 0x919c
+/* [RW 16] Start offset of CSDM zone A (queue zone) in the internal RAM */
+#define PGLUE_B_REG_CSDM_START_OFFSET_A 0x9100
+/* [RW 16] Start offset of CSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_CSDM_START_OFFSET_B 0x9108
+/* [RW 5] VF Shift of CSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_CSDM_VF_SHIFT_B 0x9110
+/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
+#define PGLUE_B_REG_CSDM_ZONE_A_SIZE_PF 0x91ac
+/* [R 8] FLR request attention dirty bits for PFs 0 to 7. Each bit indicates
+ * that the FLR register of the corresponding PF was set. Set by PXP. Reset
+ * by MCP writing 1 to flr_request_pf_7_0_clr. Note: register contains bits
+ * from both paths. */
+#define PGLUE_B_REG_FLR_REQUEST_PF_7_0 0x9028
+/* [W 8] FLR request attention dirty bits clear for PFs 0 to 7. MCP writes 1
+ * to a bit in this register in order to clear the corresponding bit in
+ * flr_request_pf_7_0 register. Note: register contains bits from both
+ * paths. */
+#define PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR 0x9418
+/* [R 32] FLR request attention dirty bits for VFs 96 to 127. Each bit
+ * indicates that the FLR register of the corresponding VF was set. Set by
+ * PXP. Reset by MCP writing 1 to flr_request_vf_127_96_clr. */
+#define PGLUE_B_REG_FLR_REQUEST_VF_127_96 0x9024
+/* [R 32] FLR request attention dirty bits for VFs 0 to 31. Each bit
+ * indicates that the FLR register of the corresponding VF was set. Set by
+ * PXP. Reset by MCP writing 1 to flr_request_vf_31_0_clr. */
+#define PGLUE_B_REG_FLR_REQUEST_VF_31_0 0x9018
+/* [R 32] FLR request attention dirty bits for VFs 32 to 63. Each bit
+ * indicates that the FLR register of the corresponding VF was set. Set by
+ * PXP. Reset by MCP writing 1 to flr_request_vf_63_32_clr. */
+#define PGLUE_B_REG_FLR_REQUEST_VF_63_32 0x901c
+/* [R 32] FLR request attention dirty bits for VFs 64 to 95. Each bit
+ * indicates that the FLR register of the corresponding VF was set. Set by
+ * PXP. Reset by MCP writing 1 to flr_request_vf_95_64_clr. */
+#define PGLUE_B_REG_FLR_REQUEST_VF_95_64 0x9020
+/* [R 8] Each bit indicates an incorrect behavior in user RX interface. Bit
+ * 0 - Target memory read arrived with a correctable error. Bit 1 - Target
+ * memory read arrived with an uncorrectable error. Bit 2 - Configuration RW
+ * arrived with a correctable error. Bit 3 - Configuration RW arrived with
+ * an uncorrectable error. Bit 4 - Completion with Configuration Request
+ * Retry Status. Bit 5 - Expansion ROM access received with a write request.
+ * Bit 6 - Completion with pcie_rx_err of 0000; CMPL_STATUS of non-zero; and
+ * pcie_rx_last not asserted. Bit 7 - Completion with pcie_rx_err of 1010;
+ * and pcie_rx_last not asserted. */
+#define PGLUE_B_REG_INCORRECT_RCV_DETAILS 0x9068
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER 0x942c
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ 0x9430
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_WRITE 0x9434
+#define PGLUE_B_REG_INTERNAL_VFID_ENABLE 0x9438
+/* [R 9] Interrupt register #0 read */
+#define PGLUE_B_REG_PGLUE_B_INT_STS 0x9298
+/* [RC 9] Interrupt register #0 read clear */
+#define PGLUE_B_REG_PGLUE_B_INT_STS_CLR 0x929c
+/* [RW 2] Parity mask register #0 read/write */
+#define PGLUE_B_REG_PGLUE_B_PRTY_MASK 0x92b4
+/* [R 2] Parity register #0 read */
+#define PGLUE_B_REG_PGLUE_B_PRTY_STS 0x92a8
+/* [RC 2] Parity register #0 read clear */
+#define PGLUE_B_REG_PGLUE_B_PRTY_STS_CLR 0x92ac
+/* [R 13] Details of first request received with error. [2:0] - PFID. [3] -
+ * VF_VALID. [9:4] - VFID. [11:10] - Error Code - 0 - Indicates Completion
+ * Timeout of a User Tx non-posted request. 1 - unsupported request. 2 -
+ * completer abort. 3 - Illegal value for this field. [12] valid - indicates
+ * if there was a completion error since the last time this register was
+ * cleared. */
+#define PGLUE_B_REG_RX_ERR_DETAILS 0x9080
+/* [R 18] Details of first ATS Translation Completion request received with
+ * error. [2:0] - PFID. [3] - VF_VALID. [9:4] - VFID. [11:10] - Error Code -
+ * 0 - Indicates Completion Timeout of a User Tx non-posted request. 1 -
+ * unsupported request. 2 - completer abort. 3 - Illegal value for this
+ * field. [16:12] - ATC OTB EntryID. [17] valid - indicates if there was a
+ * completion error since the last time this register was cleared. */
+#define PGLUE_B_REG_RX_TCPL_ERR_DETAILS 0x9084
+/* [W 8] Debug only - Shadow BME bits clear for PFs 0 to 7. MCP writes 1 to
+ * a bit in this register in order to clear the corresponding bit in
+ * shadow_bme_pf_7_0 register. MCP should never use this unless a
+ * work-around is needed. Note: register contains bits from both paths. */
+#define PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR 0x9458
+/* [R 8] SR IOV disabled attention dirty bits. Each bit indicates that the
+ * VF enable register of the corresponding PF is written to 0 and was
+ * previously 1. Set by PXP. Reset by MCP writing 1 to
+ * sr_iov_disabled_request_clr. Note: register contains bits from both
+ * paths. */
+#define PGLUE_B_REG_SR_IOV_DISABLED_REQUEST 0x9030
+/* [R 32] Indicates the status of tags 32-63. 0 - tags is used - read
+ * completion did not return yet. 1 - tag is unused. Same functionality as
+ * pxp2_registers_pgl_exp_rom_data2 for tags 0-31. */
+#define PGLUE_B_REG_TAGS_63_32 0x9244
+/* [RW 1] Type A PF enable inbound interrupt table for TSDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_TSDM_INB_INT_A_PF_ENABLE 0x9170
+/* [RW 16] Start offset of TSDM zone A (queue zone) in the internal RAM */
+#define PGLUE_B_REG_TSDM_START_OFFSET_A 0x90c4
+/* [RW 16] Start offset of TSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_TSDM_START_OFFSET_B 0x90cc
+/* [RW 5] VF Shift of TSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_TSDM_VF_SHIFT_B 0x90d4
+/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
+#define PGLUE_B_REG_TSDM_ZONE_A_SIZE_PF 0x91a0
+/* [R 32] Address [31:0] of first read request not submitted due to error */
+#define PGLUE_B_REG_TX_ERR_RD_ADD_31_0 0x9098
+/* [R 32] Address [63:32] of first read request not submitted due to error */
+#define PGLUE_B_REG_TX_ERR_RD_ADD_63_32 0x909c
+/* [R 31] Details of first read request not submitted due to error. [4:0]
+ * VQID. [5] TREQ. 1 - Indicates the request is a Translation Request.
+ * [20:8] - Length in bytes. [23:21] - PFID. [24] - VF_VALID. [30:25] -
+ * VFID. */
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS 0x90a0
+/* [R 26] Details of first read request not submitted due to error. [15:0]
+ * Request ID. [19:16] client ID. [20] - last SR. [24:21] - Error type -
+ * [21] - Indicates was_error was set; [22] - Indicates BME was cleared;
+ * [23] - Indicates FID_enable was cleared; [24] - Indicates VF with parent
+ * PF FLR_request or IOV_disable_request dirty bit is set. [25] valid -
+ * indicates if there was a request not submitted due to error since the
+ * last time this register was cleared. */
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS2 0x90a4
+/* [R 32] Address [31:0] of first write request not submitted due to error */
+#define PGLUE_B_REG_TX_ERR_WR_ADD_31_0 0x9088
+/* [R 32] Address [63:32] of first write request not submitted due to error */
+#define PGLUE_B_REG_TX_ERR_WR_ADD_63_32 0x908c
+/* [R 31] Details of first write request not submitted due to error. [4:0]
+ * VQID. [20:8] - Length in bytes. [23:21] - PFID. [24] - VF_VALID. [30:25]
+ * - VFID. */
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS 0x9090
+/* [R 26] Details of first write request not submitted due to error. [15:0]
+ * Request ID. [19:16] client ID. [20] - last SR. [24:21] - Error type -
+ * [21] - Indicates was_error was set; [22] - Indicates BME was cleared;
+ * [23] - Indicates FID_enable was cleared; [24] - Indicates VF with parent
+ * PF FLR_request or IOV_disable_request dirty bit is set. [25] valid -
+ * indicates if there was a request not submitted due to error since the
+ * last time this register was cleared. */
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS2 0x9094
+/* [RW 10] Type A PF/VF inbound interrupt table for USDM: bits[9:5]-mask;
+ * its[4:0]-address relative to start_offset_a. Bits [1:0] can have any
+ * value (Byte resolution address). */
+#define PGLUE_B_REG_USDM_INB_INT_A_0 0x9128
+#define PGLUE_B_REG_USDM_INB_INT_A_1 0x912c
+#define PGLUE_B_REG_USDM_INB_INT_A_2 0x9130
+#define PGLUE_B_REG_USDM_INB_INT_A_3 0x9134
+#define PGLUE_B_REG_USDM_INB_INT_A_4 0x9138
+#define PGLUE_B_REG_USDM_INB_INT_A_5 0x913c
+#define PGLUE_B_REG_USDM_INB_INT_A_6 0x9140
+/* [RW 1] Type A PF enable inbound interrupt table for USDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_USDM_INB_INT_A_PF_ENABLE 0x917c
+/* [RW 1] Type A VF enable inbound interrupt table for USDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_USDM_INB_INT_A_VF_ENABLE 0x9180
+/* [RW 1] Type B VF enable inbound interrupt table for USDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_USDM_INB_INT_B_VF_ENABLE 0x9184
+/* [RW 16] Start offset of USDM zone A (queue zone) in the internal RAM */
+#define PGLUE_B_REG_USDM_START_OFFSET_A 0x90d8
+/* [RW 16] Start offset of USDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_USDM_START_OFFSET_B 0x90e0
+/* [RW 5] VF Shift of USDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_USDM_VF_SHIFT_B 0x90e8
+/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
+#define PGLUE_B_REG_USDM_ZONE_A_SIZE_PF 0x91a4
+/* [R 26] Details of first target VF request accessing VF GRC space that
+ * failed permission check. [14:0] Address. [15] w_nr: 0 - Read; 1 - Write.
+ * [21:16] VFID. [24:22] - PFID. [25] valid - indicates if there was a
+ * request accessing VF GRC space that failed permission check since the
+ * last time this register was cleared. Permission checks are: function
+ * permission; R/W permission; address range permission. */
+#define PGLUE_B_REG_VF_GRC_SPACE_VIOLATION_DETAILS 0x9234
+/* [R 31] Details of first target VF request with length violation (too many
+ * DWs) accessing BAR0. [12:0] Address in DWs (bits [14:2] of byte address).
+ * [14:13] BAR. [20:15] VFID. [23:21] - PFID. [29:24] - Length in DWs. [30]
+ * valid - indicates if there was a request with length violation since the
+ * last time this register was cleared. Length violations: length of more
+ * than 2DWs; length of 2DWs and address not QW aligned; window is GRC and
+ * length is more than 1 DW. */
+#define PGLUE_B_REG_VF_LENGTH_VIOLATION_DETAILS 0x9230
+/* [R 8] Was_error indication dirty bits for PFs 0 to 7. Each bit indicates
+ * that there was a completion with uncorrectable error for the
+ * corresponding PF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_pf_7_0_clr. */
+#define PGLUE_B_REG_WAS_ERROR_PF_7_0 0x907c
+/* [W 8] Was_error indication dirty bits clear for PFs 0 to 7. MCP writes 1
+ * to a bit in this register in order to clear the corresponding bit in
+ * flr_request_pf_7_0 register. */
+#define PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR 0x9470
+/* [R 32] Was_error indication dirty bits for VFs 96 to 127. Each bit
+ * indicates that there was a completion with uncorrectable error for the
+ * corresponding VF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_vf_127_96_clr. */
+#define PGLUE_B_REG_WAS_ERROR_VF_127_96 0x9078
+/* [W 32] Was_error indication dirty bits clear for VFs 96 to 127. MCP
+ * writes 1 to a bit in this register in order to clear the corresponding
+ * bit in was_error_vf_127_96 register. */
+#define PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR 0x9474
+/* [R 32] Was_error indication dirty bits for VFs 0 to 31. Each bit
+ * indicates that there was a completion with uncorrectable error for the
+ * corresponding VF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_vf_31_0_clr. */
+#define PGLUE_B_REG_WAS_ERROR_VF_31_0 0x906c
+/* [W 32] Was_error indication dirty bits clear for VFs 0 to 31. MCP writes
+ * 1 to a bit in this register in order to clear the corresponding bit in
+ * was_error_vf_31_0 register. */
+#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR 0x9478
+/* [R 32] Was_error indication dirty bits for VFs 32 to 63. Each bit
+ * indicates that there was a completion with uncorrectable error for the
+ * corresponding VF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_vf_63_32_clr. */
+#define PGLUE_B_REG_WAS_ERROR_VF_63_32 0x9070
+/* [W 32] Was_error indication dirty bits clear for VFs 32 to 63. MCP writes
+ * 1 to a bit in this register in order to clear the corresponding bit in
+ * was_error_vf_63_32 register. */
+#define PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR 0x947c
+/* [R 32] Was_error indication dirty bits for VFs 64 to 95. Each bit
+ * indicates that there was a completion with uncorrectable error for the
+ * corresponding VF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_vf_95_64_clr. */
+#define PGLUE_B_REG_WAS_ERROR_VF_95_64 0x9074
+/* [W 32] Was_error indication dirty bits clear for VFs 64 to 95. MCP writes
+ * 1 to a bit in this register in order to clear the corresponding bit in
+ * was_error_vf_95_64 register. */
+#define PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR 0x9480
+/* [RW 1] Type A PF enable inbound interrupt table for XSDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_XSDM_INB_INT_A_PF_ENABLE 0x9188
+/* [RW 16] Start offset of XSDM zone A (queue zone) in the internal RAM */
+#define PGLUE_B_REG_XSDM_START_OFFSET_A 0x90ec
+/* [RW 16] Start offset of XSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_XSDM_START_OFFSET_B 0x90f4
+/* [RW 5] VF Shift of XSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_XSDM_VF_SHIFT_B 0x90fc
+/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
+#define PGLUE_B_REG_XSDM_ZONE_A_SIZE_PF 0x91a8
+#define PRS_REG_A_PRSU_20 0x40134
+/* [R 8] debug only: CFC load request current credit. Transaction based. */
+#define PRS_REG_CFC_LD_CURRENT_CREDIT 0x40164
+/* [R 8] debug only: CFC search request current credit. Transaction based. */
+#define PRS_REG_CFC_SEARCH_CURRENT_CREDIT 0x40168
+/* [RW 6] The initial credit for the search message to the CFC interface.
+ Credit is transaction based. */
+#define PRS_REG_CFC_SEARCH_INITIAL_CREDIT 0x4011c
+/* [RW 24] CID for port 0 if no match */
+#define PRS_REG_CID_PORT_0 0x400fc
+/* [RW 32] The CM header for flush message where 'load existed' bit in CFC
+ load response is reset and packet type is 0. Used in packet start message
+ to TCM. */
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_0 0x400dc
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_1 0x400e0
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_2 0x400e4
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_3 0x400e8
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_4 0x400ec
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_5 0x400f0
+/* [RW 32] The CM header for flush message where 'load existed' bit in CFC
+ load response is set and packet type is 0. Used in packet start message
+ to TCM. */
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_0 0x400bc
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_1 0x400c0
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_2 0x400c4
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_3 0x400c8
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_4 0x400cc
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_5 0x400d0
+/* [RW 32] The CM header for a match and packet type 1 for loopback port.
+ Used in packet start message to TCM. */
+#define PRS_REG_CM_HDR_LOOPBACK_TYPE_1 0x4009c
+#define PRS_REG_CM_HDR_LOOPBACK_TYPE_2 0x400a0
+#define PRS_REG_CM_HDR_LOOPBACK_TYPE_3 0x400a4
+#define PRS_REG_CM_HDR_LOOPBACK_TYPE_4 0x400a8
+/* [RW 32] The CM header for a match and packet type 0. Used in packet start
+ message to TCM. */
+#define PRS_REG_CM_HDR_TYPE_0 0x40078
+#define PRS_REG_CM_HDR_TYPE_1 0x4007c
+#define PRS_REG_CM_HDR_TYPE_2 0x40080
+#define PRS_REG_CM_HDR_TYPE_3 0x40084
+#define PRS_REG_CM_HDR_TYPE_4 0x40088
+/* [RW 32] The CM header in case there was not a match on the connection */
+#define PRS_REG_CM_NO_MATCH_HDR 0x400b8
+/* [RW 1] Indicates if in e1hov mode. 0=non-e1hov mode; 1=e1hov mode. */
+#define PRS_REG_E1HOV_MODE 0x401c8
+/* [RW 8] The 8-bit event ID for a match and packet type 1. Used in packet
+ start message to TCM. */
+#define PRS_REG_EVENT_ID_1 0x40054
+#define PRS_REG_EVENT_ID_2 0x40058
+#define PRS_REG_EVENT_ID_3 0x4005c
+/* [RW 16] The Ethernet type value for FCoE */
+#define PRS_REG_FCOE_TYPE 0x401d0
+/* [RW 8] Context region for flush packet with packet type 0. Used in CFC
+ load request message. */
+#define PRS_REG_FLUSH_REGIONS_TYPE_0 0x40004
+#define PRS_REG_FLUSH_REGIONS_TYPE_1 0x40008
+#define PRS_REG_FLUSH_REGIONS_TYPE_2 0x4000c
+#define PRS_REG_FLUSH_REGIONS_TYPE_3 0x40010
+#define PRS_REG_FLUSH_REGIONS_TYPE_4 0x40014
+#define PRS_REG_FLUSH_REGIONS_TYPE_5 0x40018
+#define PRS_REG_FLUSH_REGIONS_TYPE_6 0x4001c
+#define PRS_REG_FLUSH_REGIONS_TYPE_7 0x40020
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header. */
+#define PRS_REG_HDRS_AFTER_BASIC 0x40238
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header for port 0 packets. */
+#define PRS_REG_HDRS_AFTER_BASIC_PORT_0 0x40270
+#define PRS_REG_HDRS_AFTER_BASIC_PORT_1 0x40290
+/* [R 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */
+#define PRS_REG_HDRS_AFTER_TAG_0 0x40248
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 for
+ * port 0 packets */
+#define PRS_REG_HDRS_AFTER_TAG_0_PORT_0 0x40280
+#define PRS_REG_HDRS_AFTER_TAG_0_PORT_1 0x402a0
+/* [RW 4] The increment value to send in the CFC load request message */
+#define PRS_REG_INC_VALUE 0x40048
+/* [RW 6] Bit-map indicating which headers must appear in the packet */
+#define PRS_REG_MUST_HAVE_HDRS 0x40254
+/* [RW 6] Bit-map indicating which headers must appear in the packet for
+ * port 0 packets */
+#define PRS_REG_MUST_HAVE_HDRS_PORT_0 0x4028c
+#define PRS_REG_MUST_HAVE_HDRS_PORT_1 0x402ac
+#define PRS_REG_NIC_MODE 0x40138
+/* [RW 8] The 8-bit event ID for cases where there is no match on the
+ connection. Used in packet start message to TCM. */
+#define PRS_REG_NO_MATCH_EVENT_ID 0x40070
+/* [ST 24] The number of input CFC flush packets */
+#define PRS_REG_NUM_OF_CFC_FLUSH_MESSAGES 0x40128
+/* [ST 32] The number of cycles the Parser halted its operation since it
+ could not allocate the next serial number */
+#define PRS_REG_NUM_OF_DEAD_CYCLES 0x40130
+/* [ST 24] The number of input packets */
+#define PRS_REG_NUM_OF_PACKETS 0x40124
+/* [ST 24] The number of input transparent flush packets */
+#define PRS_REG_NUM_OF_TRANSPARENT_FLUSH_MESSAGES 0x4012c
+/* [RW 8] Context region for received Ethernet packet with a match and
+ packet type 0. Used in CFC load request message */
+#define PRS_REG_PACKET_REGIONS_TYPE_0 0x40028
+#define PRS_REG_PACKET_REGIONS_TYPE_1 0x4002c
+#define PRS_REG_PACKET_REGIONS_TYPE_2 0x40030
+#define PRS_REG_PACKET_REGIONS_TYPE_3 0x40034
+#define PRS_REG_PACKET_REGIONS_TYPE_4 0x40038
+#define PRS_REG_PACKET_REGIONS_TYPE_5 0x4003c
+#define PRS_REG_PACKET_REGIONS_TYPE_6 0x40040
+#define PRS_REG_PACKET_REGIONS_TYPE_7 0x40044
+/* [R 2] debug only: Number of pending requests for CAC on port 0. */
+#define PRS_REG_PENDING_BRB_CAC0_RQ 0x40174
+/* [R 2] debug only: Number of pending requests for header parsing. */
+#define PRS_REG_PENDING_BRB_PRS_RQ 0x40170
+/* [R 1] Interrupt register #0 read */
+#define PRS_REG_PRS_INT_STS 0x40188
+/* [RW 8] Parity mask register #0 read/write */
+#define PRS_REG_PRS_PRTY_MASK 0x401a4
+/* [R 8] Parity register #0 read */
+#define PRS_REG_PRS_PRTY_STS 0x40198
+/* [RC 8] Parity register #0 read clear */
+#define PRS_REG_PRS_PRTY_STS_CLR 0x4019c
+/* [RW 8] Context region for pure acknowledge packets. Used in CFC load
+ request message */
+#define PRS_REG_PURE_REGIONS 0x40024
+/* [R 32] debug only: Serial number status lsb 32 bits. '1' indicates this
+ serail number was released by SDM but cannot be used because a previous
+ serial number was not released. */
+#define PRS_REG_SERIAL_NUM_STATUS_LSB 0x40154
+/* [R 32] debug only: Serial number status msb 32 bits. '1' indicates this
+ serail number was released by SDM but cannot be used because a previous
+ serial number was not released. */
+#define PRS_REG_SERIAL_NUM_STATUS_MSB 0x40158
+/* [R 4] debug only: SRC current credit. Transaction based. */
+#define PRS_REG_SRC_CURRENT_CREDIT 0x4016c
+/* [RW 16] The Ethernet type value for L2 tag 0 */
+#define PRS_REG_TAG_ETHERTYPE_0 0x401d4
+/* [RW 4] The length of the info field for L2 tag 0. The length is between
+ * 2B and 14B; in 2B granularity */
+#define PRS_REG_TAG_LEN_0 0x4022c
+/* [R 8] debug only: TCM current credit. Cycle based. */
+#define PRS_REG_TCM_CURRENT_CREDIT 0x40160
+/* [R 8] debug only: TSDM current credit. Transaction based. */
+#define PRS_REG_TSDM_CURRENT_CREDIT 0x4015c
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT (0x1<<19)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF (0x1<<20)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN (0x1<<22)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED (0x1<<23)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED (0x1<<24)
+#define PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR (0x1<<7)
+#define PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR (0x1<<7)
+/* [R 6] Debug only: Number of used entries in the data FIFO */
+#define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c
+/* [R 7] Debug only: Number of used entries in the header FIFO */
+#define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478
+#define PXP2_REG_PGL_ADDR_88_F0 0x120534
+/* [R 32] GRC address for configuration access to PCIE config address 0x88.
+ * any write to this PCIE address will cause a GRC write access to the
+ * address that's in t this register */
+#define PXP2_REG_PGL_ADDR_88_F1 0x120544
+#define PXP2_REG_PGL_ADDR_8C_F0 0x120538
+/* [R 32] GRC address for configuration access to PCIE config address 0x8c.
+ * any write to this PCIE address will cause a GRC write access to the
+ * address that's in t this register */
+#define PXP2_REG_PGL_ADDR_8C_F1 0x120548
+#define PXP2_REG_PGL_ADDR_90_F0 0x12053c
+/* [R 32] GRC address for configuration access to PCIE config address 0x90.
+ * any write to this PCIE address will cause a GRC write access to the
+ * address that's in t this register */
+#define PXP2_REG_PGL_ADDR_90_F1 0x12054c
+#define PXP2_REG_PGL_ADDR_94_F0 0x120540
+/* [R 32] GRC address for configuration access to PCIE config address 0x94.
+ * any write to this PCIE address will cause a GRC write access to the
+ * address that's in t this register */
+#define PXP2_REG_PGL_ADDR_94_F1 0x120550
+#define PXP2_REG_PGL_CONTROL0 0x120490
+#define PXP2_REG_PGL_CONTROL1 0x120514
+#define PXP2_REG_PGL_DEBUG 0x120520
+/* [RW 32] third dword data of expansion rom request. this register is
+ special. reading from it provides a vector outstanding read requests. if
+ a bit is zero it means that a read request on the corresponding tag did
+ not finish yet (not all completions have arrived for it) */
+#define PXP2_REG_PGL_EXP_ROM2 0x120808
+/* [RW 32] Inbound interrupt table for CSDM: bits[31:16]-mask;
+ its[15:0]-address */
+#define PXP2_REG_PGL_INT_CSDM_0 0x1204f4
+#define PXP2_REG_PGL_INT_CSDM_1 0x1204f8
+#define PXP2_REG_PGL_INT_CSDM_2 0x1204fc
+#define PXP2_REG_PGL_INT_CSDM_3 0x120500
+#define PXP2_REG_PGL_INT_CSDM_4 0x120504
+#define PXP2_REG_PGL_INT_CSDM_5 0x120508
+#define PXP2_REG_PGL_INT_CSDM_6 0x12050c
+#define PXP2_REG_PGL_INT_CSDM_7 0x120510
+/* [RW 32] Inbound interrupt table for TSDM: bits[31:16]-mask;
+ its[15:0]-address */
+#define PXP2_REG_PGL_INT_TSDM_0 0x120494
+#define PXP2_REG_PGL_INT_TSDM_1 0x120498
+#define PXP2_REG_PGL_INT_TSDM_2 0x12049c
+#define PXP2_REG_PGL_INT_TSDM_3 0x1204a0
+#define PXP2_REG_PGL_INT_TSDM_4 0x1204a4
+#define PXP2_REG_PGL_INT_TSDM_5 0x1204a8
+#define PXP2_REG_PGL_INT_TSDM_6 0x1204ac
+#define PXP2_REG_PGL_INT_TSDM_7 0x1204b0
+/* [RW 32] Inbound interrupt table for USDM: bits[31:16]-mask;
+ its[15:0]-address */
+#define PXP2_REG_PGL_INT_USDM_0 0x1204b4
+#define PXP2_REG_PGL_INT_USDM_1 0x1204b8
+#define PXP2_REG_PGL_INT_USDM_2 0x1204bc
+#define PXP2_REG_PGL_INT_USDM_3 0x1204c0
+#define PXP2_REG_PGL_INT_USDM_4 0x1204c4
+#define PXP2_REG_PGL_INT_USDM_5 0x1204c8
+#define PXP2_REG_PGL_INT_USDM_6 0x1204cc
+#define PXP2_REG_PGL_INT_USDM_7 0x1204d0
+/* [RW 32] Inbound interrupt table for XSDM: bits[31:16]-mask;
+ its[15:0]-address */
+#define PXP2_REG_PGL_INT_XSDM_0 0x1204d4
+#define PXP2_REG_PGL_INT_XSDM_1 0x1204d8
+#define PXP2_REG_PGL_INT_XSDM_2 0x1204dc
+#define PXP2_REG_PGL_INT_XSDM_3 0x1204e0
+#define PXP2_REG_PGL_INT_XSDM_4 0x1204e4
+#define PXP2_REG_PGL_INT_XSDM_5 0x1204e8
+#define PXP2_REG_PGL_INT_XSDM_6 0x1204ec
+#define PXP2_REG_PGL_INT_XSDM_7 0x1204f0
+/* [RW 3] this field allows one function to pretend being another function
+ when accessing any BAR mapped resource within the device. the value of
+ the field is the number of the function that will be accessed
+ effectively. after software write to this bit it must read it in order to
+ know that the new value is updated */
+#define PXP2_REG_PGL_PRETEND_FUNC_F0 0x120674
+#define PXP2_REG_PGL_PRETEND_FUNC_F1 0x120678
+#define PXP2_REG_PGL_PRETEND_FUNC_F2 0x12067c
+#define PXP2_REG_PGL_PRETEND_FUNC_F3 0x120680
+#define PXP2_REG_PGL_PRETEND_FUNC_F4 0x120684
+#define PXP2_REG_PGL_PRETEND_FUNC_F5 0x120688
+#define PXP2_REG_PGL_PRETEND_FUNC_F6 0x12068c
+#define PXP2_REG_PGL_PRETEND_FUNC_F7 0x120690
+/* [R 1] this bit indicates that a read request was blocked because of
+ bus_master_en was deasserted */
+#define PXP2_REG_PGL_READ_BLOCKED 0x120568
+#define PXP2_REG_PGL_TAGS_LIMIT 0x1205a8
+/* [R 18] debug only */
+#define PXP2_REG_PGL_TXW_CDTS 0x12052c
+/* [R 1] this bit indicates that a write request was blocked because of
+ bus_master_en was deasserted */
+#define PXP2_REG_PGL_WRITE_BLOCKED 0x120564
+#define PXP2_REG_PSWRQ_BW_ADD1 0x1201c0
+#define PXP2_REG_PSWRQ_BW_ADD10 0x1201e4
+#define PXP2_REG_PSWRQ_BW_ADD11 0x1201e8
+#define PXP2_REG_PSWRQ_BW_ADD2 0x1201c4
+#define PXP2_REG_PSWRQ_BW_ADD28 0x120228
+#define PXP2_REG_PSWRQ_BW_ADD3 0x1201c8
+#define PXP2_REG_PSWRQ_BW_ADD6 0x1201d4
+#define PXP2_REG_PSWRQ_BW_ADD7 0x1201d8
+#define PXP2_REG_PSWRQ_BW_ADD8 0x1201dc
+#define PXP2_REG_PSWRQ_BW_ADD9 0x1201e0
+#define PXP2_REG_PSWRQ_BW_CREDIT 0x12032c
+#define PXP2_REG_PSWRQ_BW_L1 0x1202b0
+#define PXP2_REG_PSWRQ_BW_L10 0x1202d4
+#define PXP2_REG_PSWRQ_BW_L11 0x1202d8
+#define PXP2_REG_PSWRQ_BW_L2 0x1202b4
+#define PXP2_REG_PSWRQ_BW_L28 0x120318
+#define PXP2_REG_PSWRQ_BW_L3 0x1202b8
+#define PXP2_REG_PSWRQ_BW_L6 0x1202c4
+#define PXP2_REG_PSWRQ_BW_L7 0x1202c8
+#define PXP2_REG_PSWRQ_BW_L8 0x1202cc
+#define PXP2_REG_PSWRQ_BW_L9 0x1202d0
+#define PXP2_REG_PSWRQ_BW_RD 0x120324
+#define PXP2_REG_PSWRQ_BW_UB1 0x120238
+#define PXP2_REG_PSWRQ_BW_UB10 0x12025c
+#define PXP2_REG_PSWRQ_BW_UB11 0x120260
+#define PXP2_REG_PSWRQ_BW_UB2 0x12023c
+#define PXP2_REG_PSWRQ_BW_UB28 0x1202a0
+#define PXP2_REG_PSWRQ_BW_UB3 0x120240
+#define PXP2_REG_PSWRQ_BW_UB6 0x12024c
+#define PXP2_REG_PSWRQ_BW_UB7 0x120250
+#define PXP2_REG_PSWRQ_BW_UB8 0x120254
+#define PXP2_REG_PSWRQ_BW_UB9 0x120258
+#define PXP2_REG_PSWRQ_BW_WR 0x120328
+#define PXP2_REG_PSWRQ_CDU0_L2P 0x120000
+#define PXP2_REG_PSWRQ_QM0_L2P 0x120038
+#define PXP2_REG_PSWRQ_SRC0_L2P 0x120054
+#define PXP2_REG_PSWRQ_TM0_L2P 0x12001c
+#define PXP2_REG_PSWRQ_TSDM0_L2P 0x1200e0
+/* [RW 32] Interrupt mask register #0 read/write */
+#define PXP2_REG_PXP2_INT_MASK_0 0x120578
+/* [R 32] Interrupt register #0 read */
+#define PXP2_REG_PXP2_INT_STS_0 0x12056c
+#define PXP2_REG_PXP2_INT_STS_1 0x120608
+/* [RC 32] Interrupt register #0 read clear */
+#define PXP2_REG_PXP2_INT_STS_CLR_0 0x120570
+/* [RW 32] Parity mask register #0 read/write */
+#define PXP2_REG_PXP2_PRTY_MASK_0 0x120588
+#define PXP2_REG_PXP2_PRTY_MASK_1 0x120598
+/* [R 32] Parity register #0 read */
+#define PXP2_REG_PXP2_PRTY_STS_0 0x12057c
+#define PXP2_REG_PXP2_PRTY_STS_1 0x12058c
+/* [RC 32] Parity register #0 read clear */
+#define PXP2_REG_PXP2_PRTY_STS_CLR_0 0x120580
+#define PXP2_REG_PXP2_PRTY_STS_CLR_1 0x120590
+/* [R 1] Debug only: The 'almost full' indication from each fifo (gives
+ indication about backpressure) */
+#define PXP2_REG_RD_ALMOST_FULL_0 0x120424
+/* [R 8] Debug only: The blocks counter - number of unused block ids */
+#define PXP2_REG_RD_BLK_CNT 0x120418
+/* [RW 8] Debug only: Total number of available blocks in Tetris Buffer.
+ Must be bigger than 6. Normally should not be changed. */
+#define PXP2_REG_RD_BLK_NUM_CFG 0x12040c
+/* [RW 2] CDU byte swapping mode configuration for master read requests */
+#define PXP2_REG_RD_CDURD_SWAP_MODE 0x120404
+/* [RW 1] When '1'; inputs to the PSWRD block are ignored */
+#define PXP2_REG_RD_DISABLE_INPUTS 0x120374
+/* [R 1] PSWRD internal memories initialization is done */
+#define PXP2_REG_RD_INIT_DONE 0x120370
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq10 */
+#define PXP2_REG_RD_MAX_BLKS_VQ10 0x1203a0
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq11 */
+#define PXP2_REG_RD_MAX_BLKS_VQ11 0x1203a4
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq17 */
+#define PXP2_REG_RD_MAX_BLKS_VQ17 0x1203bc
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq18 */
+#define PXP2_REG_RD_MAX_BLKS_VQ18 0x1203c0
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq19 */
+#define PXP2_REG_RD_MAX_BLKS_VQ19 0x1203c4
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq22 */
+#define PXP2_REG_RD_MAX_BLKS_VQ22 0x1203d0
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq25 */
+#define PXP2_REG_RD_MAX_BLKS_VQ25 0x1203dc
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq6 */
+#define PXP2_REG_RD_MAX_BLKS_VQ6 0x120390
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq9 */
+#define PXP2_REG_RD_MAX_BLKS_VQ9 0x12039c
+/* [RW 2] PBF byte swapping mode configuration for master read requests */
+#define PXP2_REG_RD_PBF_SWAP_MODE 0x1203f4
+/* [R 1] Debug only: Indication if delivery ports are idle */
+#define PXP2_REG_RD_PORT_IS_IDLE_0 0x12041c
+#define PXP2_REG_RD_PORT_IS_IDLE_1 0x120420
+/* [RW 2] QM byte swapping mode configuration for master read requests */
+#define PXP2_REG_RD_QM_SWAP_MODE 0x1203f8
+/* [R 7] Debug only: The SR counter - number of unused sub request ids */
+#define PXP2_REG_RD_SR_CNT 0x120414
+/* [RW 2] SRC byte swapping mode configuration for master read requests */
+#define PXP2_REG_RD_SRC_SWAP_MODE 0x120400
+/* [RW 7] Debug only: Total number of available PCI read sub-requests. Must
+ be bigger than 1. Normally should not be changed. */
+#define PXP2_REG_RD_SR_NUM_CFG 0x120408
+/* [RW 1] Signals the PSWRD block to start initializing internal memories */
+#define PXP2_REG_RD_START_INIT 0x12036c
+/* [RW 2] TM byte swapping mode configuration for master read requests */
+#define PXP2_REG_RD_TM_SWAP_MODE 0x1203fc
+/* [RW 10] Bandwidth addition to VQ0 write requests */
+#define PXP2_REG_RQ_BW_RD_ADD0 0x1201bc
+/* [RW 10] Bandwidth addition to VQ12 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD12 0x1201ec
+/* [RW 10] Bandwidth addition to VQ13 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD13 0x1201f0
+/* [RW 10] Bandwidth addition to VQ14 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD14 0x1201f4
+/* [RW 10] Bandwidth addition to VQ15 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD15 0x1201f8
+/* [RW 10] Bandwidth addition to VQ16 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD16 0x1201fc
+/* [RW 10] Bandwidth addition to VQ17 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD17 0x120200
+/* [RW 10] Bandwidth addition to VQ18 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD18 0x120204
+/* [RW 10] Bandwidth addition to VQ19 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD19 0x120208
+/* [RW 10] Bandwidth addition to VQ20 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD20 0x12020c
+/* [RW 10] Bandwidth addition to VQ22 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD22 0x120210
+/* [RW 10] Bandwidth addition to VQ23 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD23 0x120214
+/* [RW 10] Bandwidth addition to VQ24 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD24 0x120218
+/* [RW 10] Bandwidth addition to VQ25 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD25 0x12021c
+/* [RW 10] Bandwidth addition to VQ26 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD26 0x120220
+/* [RW 10] Bandwidth addition to VQ27 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD27 0x120224
+/* [RW 10] Bandwidth addition to VQ4 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD4 0x1201cc
+/* [RW 10] Bandwidth addition to VQ5 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD5 0x1201d0
+/* [RW 10] Bandwidth Typical L for VQ0 Read requests */
+#define PXP2_REG_RQ_BW_RD_L0 0x1202ac
+/* [RW 10] Bandwidth Typical L for VQ12 Read requests */
+#define PXP2_REG_RQ_BW_RD_L12 0x1202dc
+/* [RW 10] Bandwidth Typical L for VQ13 Read requests */
+#define PXP2_REG_RQ_BW_RD_L13 0x1202e0
+/* [RW 10] Bandwidth Typical L for VQ14 Read requests */
+#define PXP2_REG_RQ_BW_RD_L14 0x1202e4
+/* [RW 10] Bandwidth Typical L for VQ15 Read requests */
+#define PXP2_REG_RQ_BW_RD_L15 0x1202e8
+/* [RW 10] Bandwidth Typical L for VQ16 Read requests */
+#define PXP2_REG_RQ_BW_RD_L16 0x1202ec
+/* [RW 10] Bandwidth Typical L for VQ17 Read requests */
+#define PXP2_REG_RQ_BW_RD_L17 0x1202f0
+/* [RW 10] Bandwidth Typical L for VQ18 Read requests */
+#define PXP2_REG_RQ_BW_RD_L18 0x1202f4
+/* [RW 10] Bandwidth Typical L for VQ19 Read requests */
+#define PXP2_REG_RQ_BW_RD_L19 0x1202f8
+/* [RW 10] Bandwidth Typical L for VQ20 Read requests */
+#define PXP2_REG_RQ_BW_RD_L20 0x1202fc
+/* [RW 10] Bandwidth Typical L for VQ22 Read requests */
+#define PXP2_REG_RQ_BW_RD_L22 0x120300
+/* [RW 10] Bandwidth Typical L for VQ23 Read requests */
+#define PXP2_REG_RQ_BW_RD_L23 0x120304
+/* [RW 10] Bandwidth Typical L for VQ24 Read requests */
+#define PXP2_REG_RQ_BW_RD_L24 0x120308
+/* [RW 10] Bandwidth Typical L for VQ25 Read requests */
+#define PXP2_REG_RQ_BW_RD_L25 0x12030c
+/* [RW 10] Bandwidth Typical L for VQ26 Read requests */
+#define PXP2_REG_RQ_BW_RD_L26 0x120310
+/* [RW 10] Bandwidth Typical L for VQ27 Read requests */
+#define PXP2_REG_RQ_BW_RD_L27 0x120314
+/* [RW 10] Bandwidth Typical L for VQ4 Read requests */
+#define PXP2_REG_RQ_BW_RD_L4 0x1202bc
+/* [RW 10] Bandwidth Typical L for VQ5 Read- currently not used */
+#define PXP2_REG_RQ_BW_RD_L5 0x1202c0
+/* [RW 7] Bandwidth upper bound for VQ0 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND0 0x120234
+/* [RW 7] Bandwidth upper bound for VQ12 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND12 0x120264
+/* [RW 7] Bandwidth upper bound for VQ13 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND13 0x120268
+/* [RW 7] Bandwidth upper bound for VQ14 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND14 0x12026c
+/* [RW 7] Bandwidth upper bound for VQ15 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND15 0x120270
+/* [RW 7] Bandwidth upper bound for VQ16 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND16 0x120274
+/* [RW 7] Bandwidth upper bound for VQ17 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND17 0x120278
+/* [RW 7] Bandwidth upper bound for VQ18 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND18 0x12027c
+/* [RW 7] Bandwidth upper bound for VQ19 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND19 0x120280
+/* [RW 7] Bandwidth upper bound for VQ20 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND20 0x120284
+/* [RW 7] Bandwidth upper bound for VQ22 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND22 0x120288
+/* [RW 7] Bandwidth upper bound for VQ23 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND23 0x12028c
+/* [RW 7] Bandwidth upper bound for VQ24 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND24 0x120290
+/* [RW 7] Bandwidth upper bound for VQ25 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND25 0x120294
+/* [RW 7] Bandwidth upper bound for VQ26 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND26 0x120298
+/* [RW 7] Bandwidth upper bound for VQ27 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND27 0x12029c
+/* [RW 7] Bandwidth upper bound for VQ4 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND4 0x120244
+/* [RW 7] Bandwidth upper bound for VQ5 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND5 0x120248
+/* [RW 10] Bandwidth addition to VQ29 write requests */
+#define PXP2_REG_RQ_BW_WR_ADD29 0x12022c
+/* [RW 10] Bandwidth addition to VQ30 write requests */
+#define PXP2_REG_RQ_BW_WR_ADD30 0x120230
+/* [RW 10] Bandwidth Typical L for VQ29 Write requests */
+#define PXP2_REG_RQ_BW_WR_L29 0x12031c
+/* [RW 10] Bandwidth Typical L for VQ30 Write requests */
+#define PXP2_REG_RQ_BW_WR_L30 0x120320
+/* [RW 7] Bandwidth upper bound for VQ29 */
+#define PXP2_REG_RQ_BW_WR_UBOUND29 0x1202a4
+/* [RW 7] Bandwidth upper bound for VQ30 */
+#define PXP2_REG_RQ_BW_WR_UBOUND30 0x1202a8
+/* [RW 18] external first_mem_addr field in L2P table for CDU module port 0 */
+#define PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR 0x120008
+/* [RW 2] Endian mode for cdu */
+#define PXP2_REG_RQ_CDU_ENDIAN_M 0x1201a0
+#define PXP2_REG_RQ_CDU_FIRST_ILT 0x12061c
+#define PXP2_REG_RQ_CDU_LAST_ILT 0x120620
+/* [RW 3] page size in L2P table for CDU module; -4k; -8k; -16k; -32k; -64k;
+ -128k */
+#define PXP2_REG_RQ_CDU_P_SIZE 0x120018
+/* [R 1] 1' indicates that the requester has finished its internal
+ configuration */
+#define PXP2_REG_RQ_CFG_DONE 0x1201b4
+/* [RW 2] Endian mode for debug */
+#define PXP2_REG_RQ_DBG_ENDIAN_M 0x1201a4
+/* [RW 1] When '1'; requests will enter input buffers but wont get out
+ towards the glue */
+#define PXP2_REG_RQ_DISABLE_INPUTS 0x120330
+/* [RW 4] Determines alignment of write SRs when a request is split into
+ * several SRs. 0 - 8B aligned. 1 - 64B aligned. 2 - 128B aligned. 3 - 256B
+ * aligned. 4 - 512B aligned. */
+#define PXP2_REG_RQ_DRAM_ALIGN 0x1205b0
+/* [RW 4] Determines alignment of read SRs when a request is split into
+ * several SRs. 0 - 8B aligned. 1 - 64B aligned. 2 - 128B aligned. 3 - 256B
+ * aligned. 4 - 512B aligned. */
+#define PXP2_REG_RQ_DRAM_ALIGN_RD 0x12092c
+/* [RW 1] when set the new alignment method (E2) will be applied; when reset
+ * the original alignment method (E1 E1H) will be applied */
+#define PXP2_REG_RQ_DRAM_ALIGN_SEL 0x120930
+/* [RW 1] If 1 ILT failiue will not result in ELT access; An interrupt will
+ be asserted */
+#define PXP2_REG_RQ_ELT_DISABLE 0x12066c
+/* [RW 2] Endian mode for hc */
+#define PXP2_REG_RQ_HC_ENDIAN_M 0x1201a8
+/* [RW 1] when '0' ILT logic will work as in A0; otherwise B0; for back
+ compatibility needs; Note that different registers are used per mode */
+#define PXP2_REG_RQ_ILT_MODE 0x1205b4
+/* [WB 53] Onchip address table */
+#define PXP2_REG_RQ_ONCHIP_AT 0x122000
+/* [WB 53] Onchip address table - B0 */
+#define PXP2_REG_RQ_ONCHIP_AT_B0 0x128000
+/* [RW 13] Pending read limiter threshold; in Dwords */
+#define PXP2_REG_RQ_PDR_LIMIT 0x12033c
+/* [RW 2] Endian mode for qm */
+#define PXP2_REG_RQ_QM_ENDIAN_M 0x120194
+#define PXP2_REG_RQ_QM_FIRST_ILT 0x120634
+#define PXP2_REG_RQ_QM_LAST_ILT 0x120638
+/* [RW 3] page size in L2P table for QM module; -4k; -8k; -16k; -32k; -64k;
+ -128k */
+#define PXP2_REG_RQ_QM_P_SIZE 0x120050
+/* [RW 1] 1' indicates that the RBC has finished configuring the PSWRQ */
+#define PXP2_REG_RQ_RBC_DONE 0x1201b0
+/* [RW 3] Max burst size filed for read requests port 0; 000 - 128B;
+ 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */
+#define PXP2_REG_RQ_RD_MBS0 0x120160
+/* [RW 3] Max burst size filed for read requests port 1; 000 - 128B;
+ 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */
+#define PXP2_REG_RQ_RD_MBS1 0x120168
+/* [RW 2] Endian mode for src */
+#define PXP2_REG_RQ_SRC_ENDIAN_M 0x12019c
+#define PXP2_REG_RQ_SRC_FIRST_ILT 0x12063c
+#define PXP2_REG_RQ_SRC_LAST_ILT 0x120640
+/* [RW 3] page size in L2P table for SRC module; -4k; -8k; -16k; -32k; -64k;
+ -128k */
+#define PXP2_REG_RQ_SRC_P_SIZE 0x12006c
+/* [RW 2] Endian mode for tm */
+#define PXP2_REG_RQ_TM_ENDIAN_M 0x120198
+#define PXP2_REG_RQ_TM_FIRST_ILT 0x120644
+#define PXP2_REG_RQ_TM_LAST_ILT 0x120648
+/* [RW 3] page size in L2P table for TM module; -4k; -8k; -16k; -32k; -64k;
+ -128k */
+#define PXP2_REG_RQ_TM_P_SIZE 0x120034
+/* [R 5] Number of entries in the ufifo; his fifo has l2p completions */
+#define PXP2_REG_RQ_UFIFO_NUM_OF_ENTRY 0x12080c
+/* [RW 18] external first_mem_addr field in L2P table for USDM module port 0 */
+#define PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR 0x120094
+/* [R 8] Number of entries occupied by vq 0 in pswrq memory */
+#define PXP2_REG_RQ_VQ0_ENTRY_CNT 0x120810
+/* [R 8] Number of entries occupied by vq 10 in pswrq memory */
+#define PXP2_REG_RQ_VQ10_ENTRY_CNT 0x120818
+/* [R 8] Number of entries occupied by vq 11 in pswrq memory */
+#define PXP2_REG_RQ_VQ11_ENTRY_CNT 0x120820
+/* [R 8] Number of entries occupied by vq 12 in pswrq memory */
+#define PXP2_REG_RQ_VQ12_ENTRY_CNT 0x120828
+/* [R 8] Number of entries occupied by vq 13 in pswrq memory */
+#define PXP2_REG_RQ_VQ13_ENTRY_CNT 0x120830
+/* [R 8] Number of entries occupied by vq 14 in pswrq memory */
+#define PXP2_REG_RQ_VQ14_ENTRY_CNT 0x120838
+/* [R 8] Number of entries occupied by vq 15 in pswrq memory */
+#define PXP2_REG_RQ_VQ15_ENTRY_CNT 0x120840
+/* [R 8] Number of entries occupied by vq 16 in pswrq memory */
+#define PXP2_REG_RQ_VQ16_ENTRY_CNT 0x120848
+/* [R 8] Number of entries occupied by vq 17 in pswrq memory */
+#define PXP2_REG_RQ_VQ17_ENTRY_CNT 0x120850
+/* [R 8] Number of entries occupied by vq 18 in pswrq memory */
+#define PXP2_REG_RQ_VQ18_ENTRY_CNT 0x120858
+/* [R 8] Number of entries occupied by vq 19 in pswrq memory */
+#define PXP2_REG_RQ_VQ19_ENTRY_CNT 0x120860
+/* [R 8] Number of entries occupied by vq 1 in pswrq memory */
+#define PXP2_REG_RQ_VQ1_ENTRY_CNT 0x120868
+/* [R 8] Number of entries occupied by vq 20 in pswrq memory */
+#define PXP2_REG_RQ_VQ20_ENTRY_CNT 0x120870
+/* [R 8] Number of entries occupied by vq 21 in pswrq memory */
+#define PXP2_REG_RQ_VQ21_ENTRY_CNT 0x120878
+/* [R 8] Number of entries occupied by vq 22 in pswrq memory */
+#define PXP2_REG_RQ_VQ22_ENTRY_CNT 0x120880
+/* [R 8] Number of entries occupied by vq 23 in pswrq memory */
+#define PXP2_REG_RQ_VQ23_ENTRY_CNT 0x120888
+/* [R 8] Number of entries occupied by vq 24 in pswrq memory */
+#define PXP2_REG_RQ_VQ24_ENTRY_CNT 0x120890
+/* [R 8] Number of entries occupied by vq 25 in pswrq memory */
+#define PXP2_REG_RQ_VQ25_ENTRY_CNT 0x120898
+/* [R 8] Number of entries occupied by vq 26 in pswrq memory */
+#define PXP2_REG_RQ_VQ26_ENTRY_CNT 0x1208a0
+/* [R 8] Number of entries occupied by vq 27 in pswrq memory */
+#define PXP2_REG_RQ_VQ27_ENTRY_CNT 0x1208a8
+/* [R 8] Number of entries occupied by vq 28 in pswrq memory */
+#define PXP2_REG_RQ_VQ28_ENTRY_CNT 0x1208b0
+/* [R 8] Number of entries occupied by vq 29 in pswrq memory */
+#define PXP2_REG_RQ_VQ29_ENTRY_CNT 0x1208b8
+/* [R 8] Number of entries occupied by vq 2 in pswrq memory */
+#define PXP2_REG_RQ_VQ2_ENTRY_CNT 0x1208c0
+/* [R 8] Number of entries occupied by vq 30 in pswrq memory */
+#define PXP2_REG_RQ_VQ30_ENTRY_CNT 0x1208c8
+/* [R 8] Number of entries occupied by vq 31 in pswrq memory */
+#define PXP2_REG_RQ_VQ31_ENTRY_CNT 0x1208d0
+/* [R 8] Number of entries occupied by vq 3 in pswrq memory */
+#define PXP2_REG_RQ_VQ3_ENTRY_CNT 0x1208d8
+/* [R 8] Number of entries occupied by vq 4 in pswrq memory */
+#define PXP2_REG_RQ_VQ4_ENTRY_CNT 0x1208e0
+/* [R 8] Number of entries occupied by vq 5 in pswrq memory */
+#define PXP2_REG_RQ_VQ5_ENTRY_CNT 0x1208e8
+/* [R 8] Number of entries occupied by vq 6 in pswrq memory */
+#define PXP2_REG_RQ_VQ6_ENTRY_CNT 0x1208f0
+/* [R 8] Number of entries occupied by vq 7 in pswrq memory */
+#define PXP2_REG_RQ_VQ7_ENTRY_CNT 0x1208f8
+/* [R 8] Number of entries occupied by vq 8 in pswrq memory */
+#define PXP2_REG_RQ_VQ8_ENTRY_CNT 0x120900
+/* [R 8] Number of entries occupied by vq 9 in pswrq memory */
+#define PXP2_REG_RQ_VQ9_ENTRY_CNT 0x120908
+/* [RW 3] Max burst size filed for write requests port 0; 000 - 128B;
+ 001:256B; 010: 512B; */
+#define PXP2_REG_RQ_WR_MBS0 0x12015c
+/* [RW 3] Max burst size filed for write requests port 1; 000 - 128B;
+ 001:256B; 010: 512B; */
+#define PXP2_REG_RQ_WR_MBS1 0x120164
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_CDU_MPS 0x1205f0
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_CSDM_MPS 0x1205d0
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_DBG_MPS 0x1205e8
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_DMAE_MPS 0x1205ec
+/* [RW 10] if Number of entries in dmae fifo will be higher than this
+ threshold then has_payload indication will be asserted; the default value
+ should be equal to > write MBS size! */
+#define PXP2_REG_WR_DMAE_TH 0x120368
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_HC_MPS 0x1205c8
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_QM_MPS 0x1205dc
+/* [RW 1] 0 - working in A0 mode; - working in B0 mode */
+#define PXP2_REG_WR_REV_MODE 0x120670
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_SRC_MPS 0x1205e4
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_TM_MPS 0x1205e0
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_TSDM_MPS 0x1205d4
+/* [RW 10] if Number of entries in usdmdp fifo will be higher than this
+ threshold then has_payload indication will be asserted; the default value
+ should be equal to > write MBS size! */
+#define PXP2_REG_WR_USDMDP_TH 0x120348
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_USDM_MPS 0x1205cc
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_XSDM_MPS 0x1205d8
+/* [R 1] debug only: Indication if PSWHST arbiter is idle */
+#define PXP_REG_HST_ARB_IS_IDLE 0x103004
+/* [R 8] debug only: A bit mask for all PSWHST arbiter clients. '1' means
+ this client is waiting for the arbiter. */
+#define PXP_REG_HST_CLIENTS_WAITING_TO_ARB 0x103008
+/* [RW 1] When 1; doorbells are discarded and not passed to doorbell queue
+ block. Should be used for close the gates. */
+#define PXP_REG_HST_DISCARD_DOORBELLS 0x1030a4
+/* [R 1] debug only: '1' means this PSWHST is discarding doorbells. This bit
+ should update according to 'hst_discard_doorbells' register when the state
+ machine is idle */
+#define PXP_REG_HST_DISCARD_DOORBELLS_STATUS 0x1030a0
+/* [RW 1] When 1; new internal writes arriving to the block are discarded.
+ Should be used for close the gates. */
+#define PXP_REG_HST_DISCARD_INTERNAL_WRITES 0x1030a8
+/* [R 6] debug only: A bit mask for all PSWHST internal write clients. '1'
+ means this PSWHST is discarding inputs from this client. Each bit should
+ update according to 'hst_discard_internal_writes' register when the state
+ machine is idle. */
+#define PXP_REG_HST_DISCARD_INTERNAL_WRITES_STATUS 0x10309c
+/* [WB 160] Used for initialization of the inbound interrupts memory */
+#define PXP_REG_HST_INBOUND_INT 0x103800
+/* [RW 32] Interrupt mask register #0 read/write */
+#define PXP_REG_PXP_INT_MASK_0 0x103074
+#define PXP_REG_PXP_INT_MASK_1 0x103084
+/* [R 32] Interrupt register #0 read */
+#define PXP_REG_PXP_INT_STS_0 0x103068
+#define PXP_REG_PXP_INT_STS_1 0x103078
+/* [RC 32] Interrupt register #0 read clear */
+#define PXP_REG_PXP_INT_STS_CLR_0 0x10306c
+#define PXP_REG_PXP_INT_STS_CLR_1 0x10307c
+/* [RW 27] Parity mask register #0 read/write */
+#define PXP_REG_PXP_PRTY_MASK 0x103094
+/* [R 26] Parity register #0 read */
+#define PXP_REG_PXP_PRTY_STS 0x103088
+/* [RC 27] Parity register #0 read clear */
+#define PXP_REG_PXP_PRTY_STS_CLR 0x10308c
+/* [RW 4] The activity counter initial increment value sent in the load
+ request */
+#define QM_REG_ACTCTRINITVAL_0 0x168040
+#define QM_REG_ACTCTRINITVAL_1 0x168044
+#define QM_REG_ACTCTRINITVAL_2 0x168048
+#define QM_REG_ACTCTRINITVAL_3 0x16804c
+/* [RW 32] The base logical address (in bytes) of each physical queue. The
+ index I represents the physical queue number. The 12 lsbs are ignore and
+ considered zero so practically there are only 20 bits in this register;
+ queues 63-0 */
+#define QM_REG_BASEADDR 0x168900
+/* [RW 32] The base logical address (in bytes) of each physical queue. The
+ index I represents the physical queue number. The 12 lsbs are ignore and
+ considered zero so practically there are only 20 bits in this register;
+ queues 127-64 */
+#define QM_REG_BASEADDR_EXT_A 0x16e100
+/* [RW 16] The byte credit cost for each task. This value is for both ports */
+#define QM_REG_BYTECRDCOST 0x168234
+/* [RW 16] The initial byte credit value for both ports. */
+#define QM_REG_BYTECRDINITVAL 0x168238
+/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
+ queue uses port 0 else it uses port 1; queues 31-0 */
+#define QM_REG_BYTECRDPORT_LSB 0x168228
+/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
+ queue uses port 0 else it uses port 1; queues 95-64 */
+#define QM_REG_BYTECRDPORT_LSB_EXT_A 0x16e520
+/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
+ queue uses port 0 else it uses port 1; queues 63-32 */
+#define QM_REG_BYTECRDPORT_MSB 0x168224
+/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
+ queue uses port 0 else it uses port 1; queues 127-96 */
+#define QM_REG_BYTECRDPORT_MSB_EXT_A 0x16e51c
+/* [RW 16] The byte credit value that if above the QM is considered almost
+ full */
+#define QM_REG_BYTECREDITAFULLTHR 0x168094
+/* [RW 4] The initial credit for interface */
+#define QM_REG_CMINITCRD_0 0x1680cc
+#define QM_REG_BYTECRDCMDQ_0 0x16e6e8
+#define QM_REG_CMINITCRD_1 0x1680d0
+#define QM_REG_CMINITCRD_2 0x1680d4
+#define QM_REG_CMINITCRD_3 0x1680d8
+#define QM_REG_CMINITCRD_4 0x1680dc
+#define QM_REG_CMINITCRD_5 0x1680e0
+#define QM_REG_CMINITCRD_6 0x1680e4
+#define QM_REG_CMINITCRD_7 0x1680e8
+/* [RW 8] A mask bit per CM interface. If this bit is 0 then this interface
+ is masked */
+#define QM_REG_CMINTEN 0x1680ec
+/* [RW 12] A bit vector which indicates which one of the queues are tied to
+ interface 0 */
+#define QM_REG_CMINTVOQMASK_0 0x1681f4
+#define QM_REG_CMINTVOQMASK_1 0x1681f8
+#define QM_REG_CMINTVOQMASK_2 0x1681fc
+#define QM_REG_CMINTVOQMASK_3 0x168200
+#define QM_REG_CMINTVOQMASK_4 0x168204
+#define QM_REG_CMINTVOQMASK_5 0x168208
+#define QM_REG_CMINTVOQMASK_6 0x16820c
+#define QM_REG_CMINTVOQMASK_7 0x168210
+/* [RW 20] The number of connections divided by 16 which dictates the size
+ of each queue which belongs to even function number. */
+#define QM_REG_CONNNUM_0 0x168020
+/* [R 6] Keep the fill level of the fifo from write client 4 */
+#define QM_REG_CQM_WRC_FIFOLVL 0x168018
+/* [RW 8] The context regions sent in the CFC load request */
+#define QM_REG_CTXREG_0 0x168030
+#define QM_REG_CTXREG_1 0x168034
+#define QM_REG_CTXREG_2 0x168038
+#define QM_REG_CTXREG_3 0x16803c
+/* [RW 12] The VOQ mask used to select the VOQs which needs to be full for
+ bypass enable */
+#define QM_REG_ENBYPVOQMASK 0x16823c
+/* [RW 32] A bit mask per each physical queue. If a bit is set then the
+ physical queue uses the byte credit; queues 31-0 */
+#define QM_REG_ENBYTECRD_LSB 0x168220
+/* [RW 32] A bit mask per each physical queue. If a bit is set then the
+ physical queue uses the byte credit; queues 95-64 */
+#define QM_REG_ENBYTECRD_LSB_EXT_A 0x16e518
+/* [RW 32] A bit mask per each physical queue. If a bit is set then the
+ physical queue uses the byte credit; queues 63-32 */
+#define QM_REG_ENBYTECRD_MSB 0x16821c
+/* [RW 32] A bit mask per each physical queue. If a bit is set then the
+ physical queue uses the byte credit; queues 127-96 */
+#define QM_REG_ENBYTECRD_MSB_EXT_A 0x16e514
+/* [RW 4] If cleared then the secondary interface will not be served by the
+ RR arbiter */
+#define QM_REG_ENSEC 0x1680f0
+/* [RW 32] NA */
+#define QM_REG_FUNCNUMSEL_LSB 0x168230
+/* [RW 32] NA */
+#define QM_REG_FUNCNUMSEL_MSB 0x16822c
+/* [RW 32] A mask register to mask the Almost empty signals which will not
+ be use for the almost empty indication to the HW block; queues 31:0 */
+#define QM_REG_HWAEMPTYMASK_LSB 0x168218
+/* [RW 32] A mask register to mask the Almost empty signals which will not
+ be use for the almost empty indication to the HW block; queues 95-64 */
+#define QM_REG_HWAEMPTYMASK_LSB_EXT_A 0x16e510
+/* [RW 32] A mask register to mask the Almost empty signals which will not
+ be use for the almost empty indication to the HW block; queues 63:32 */
+#define QM_REG_HWAEMPTYMASK_MSB 0x168214
+/* [RW 32] A mask register to mask the Almost empty signals which will not
+ be use for the almost empty indication to the HW block; queues 127-96 */
+#define QM_REG_HWAEMPTYMASK_MSB_EXT_A 0x16e50c
+/* [RW 4] The number of outstanding request to CFC */
+#define QM_REG_OUTLDREQ 0x168804
+/* [RC 1] A flag to indicate that overflow error occurred in one of the
+ queues. */
+#define QM_REG_OVFERROR 0x16805c
+/* [RC 7] the Q where the overflow occurs */
+#define QM_REG_OVFQNUM 0x168058
+/* [R 16] Pause state for physical queues 15-0 */
+#define QM_REG_PAUSESTATE0 0x168410
+/* [R 16] Pause state for physical queues 31-16 */
+#define QM_REG_PAUSESTATE1 0x168414
+/* [R 16] Pause state for physical queues 47-32 */
+#define QM_REG_PAUSESTATE2 0x16e684
+/* [R 16] Pause state for physical queues 63-48 */
+#define QM_REG_PAUSESTATE3 0x16e688
+/* [R 16] Pause state for physical queues 79-64 */
+#define QM_REG_PAUSESTATE4 0x16e68c
+/* [R 16] Pause state for physical queues 95-80 */
+#define QM_REG_PAUSESTATE5 0x16e690
+/* [R 16] Pause state for physical queues 111-96 */
+#define QM_REG_PAUSESTATE6 0x16e694
+/* [R 16] Pause state for physical queues 127-112 */
+#define QM_REG_PAUSESTATE7 0x16e698
+/* [RW 2] The PCI attributes field used in the PCI request. */
+#define QM_REG_PCIREQAT 0x168054
+#define QM_REG_PF_EN 0x16e70c
+/* [R 24] The number of tasks stored in the QM for the PF. only even
+ * functions are valid in E2 (odd I registers will be hard wired to 0) */
+#define QM_REG_PF_USG_CNT_0 0x16e040
+/* [R 16] NOT USED */
+#define QM_REG_PORT0BYTECRD 0x168300
+/* [R 16] The byte credit of port 1 */
+#define QM_REG_PORT1BYTECRD 0x168304
+/* [RW 3] pci function number of queues 15-0 */
+#define QM_REG_PQ2PCIFUNC_0 0x16e6bc
+#define QM_REG_PQ2PCIFUNC_1 0x16e6c0
+#define QM_REG_PQ2PCIFUNC_2 0x16e6c4
+#define QM_REG_PQ2PCIFUNC_3 0x16e6c8
+#define QM_REG_PQ2PCIFUNC_4 0x16e6cc
+#define QM_REG_PQ2PCIFUNC_5 0x16e6d0
+#define QM_REG_PQ2PCIFUNC_6 0x16e6d4
+#define QM_REG_PQ2PCIFUNC_7 0x16e6d8
+/* [WB 54] Pointer Table Memory for queues 63-0; The mapping is as follow:
+ ptrtbl[53:30] read pointer; ptrtbl[29:6] write pointer; ptrtbl[5:4] read
+ bank0; ptrtbl[3:2] read bank 1; ptrtbl[1:0] write bank; */
+#define QM_REG_PTRTBL 0x168a00
+/* [WB 54] Pointer Table Memory for queues 127-64; The mapping is as follow:
+ ptrtbl[53:30] read pointer; ptrtbl[29:6] write pointer; ptrtbl[5:4] read
+ bank0; ptrtbl[3:2] read bank 1; ptrtbl[1:0] write bank; */
+#define QM_REG_PTRTBL_EXT_A 0x16e200
+/* [RW 2] Interrupt mask register #0 read/write */
+#define QM_REG_QM_INT_MASK 0x168444
+/* [R 2] Interrupt register #0 read */
+#define QM_REG_QM_INT_STS 0x168438
+/* [RW 12] Parity mask register #0 read/write */
+#define QM_REG_QM_PRTY_MASK 0x168454
+/* [R 12] Parity register #0 read */
+#define QM_REG_QM_PRTY_STS 0x168448
+/* [RC 12] Parity register #0 read clear */
+#define QM_REG_QM_PRTY_STS_CLR 0x16844c
+/* [R 32] Current queues in pipeline: Queues from 32 to 63 */
+#define QM_REG_QSTATUS_HIGH 0x16802c
+/* [R 32] Current queues in pipeline: Queues from 96 to 127 */
+#define QM_REG_QSTATUS_HIGH_EXT_A 0x16e408
+/* [R 32] Current queues in pipeline: Queues from 0 to 31 */
+#define QM_REG_QSTATUS_LOW 0x168028
+/* [R 32] Current queues in pipeline: Queues from 64 to 95 */
+#define QM_REG_QSTATUS_LOW_EXT_A 0x16e404
+/* [R 24] The number of tasks queued for each queue; queues 63-0 */
+#define QM_REG_QTASKCTR_0 0x168308
+/* [R 24] The number of tasks queued for each queue; queues 127-64 */
+#define QM_REG_QTASKCTR_EXT_A_0 0x16e584
+/* [RW 4] Queue tied to VOQ */
+#define QM_REG_QVOQIDX_0 0x1680f4
+#define QM_REG_QVOQIDX_10 0x16811c
+#define QM_REG_QVOQIDX_100 0x16e49c
+#define QM_REG_QVOQIDX_101 0x16e4a0
+#define QM_REG_QVOQIDX_102 0x16e4a4
+#define QM_REG_QVOQIDX_103 0x16e4a8
+#define QM_REG_QVOQIDX_104 0x16e4ac
+#define QM_REG_QVOQIDX_105 0x16e4b0
+#define QM_REG_QVOQIDX_106 0x16e4b4
+#define QM_REG_QVOQIDX_107 0x16e4b8
+#define QM_REG_QVOQIDX_108 0x16e4bc
+#define QM_REG_QVOQIDX_109 0x16e4c0
+#define QM_REG_QVOQIDX_11 0x168120
+#define QM_REG_QVOQIDX_110 0x16e4c4
+#define QM_REG_QVOQIDX_111 0x16e4c8
+#define QM_REG_QVOQIDX_112 0x16e4cc
+#define QM_REG_QVOQIDX_113 0x16e4d0
+#define QM_REG_QVOQIDX_114 0x16e4d4
+#define QM_REG_QVOQIDX_115 0x16e4d8
+#define QM_REG_QVOQIDX_116 0x16e4dc
+#define QM_REG_QVOQIDX_117 0x16e4e0
+#define QM_REG_QVOQIDX_118 0x16e4e4
+#define QM_REG_QVOQIDX_119 0x16e4e8
+#define QM_REG_QVOQIDX_12 0x168124
+#define QM_REG_QVOQIDX_120 0x16e4ec
+#define QM_REG_QVOQIDX_121 0x16e4f0
+#define QM_REG_QVOQIDX_122 0x16e4f4
+#define QM_REG_QVOQIDX_123 0x16e4f8
+#define QM_REG_QVOQIDX_124 0x16e4fc
+#define QM_REG_QVOQIDX_125 0x16e500
+#define QM_REG_QVOQIDX_126 0x16e504
+#define QM_REG_QVOQIDX_127 0x16e508
+#define QM_REG_QVOQIDX_13 0x168128
+#define QM_REG_QVOQIDX_14 0x16812c
+#define QM_REG_QVOQIDX_15 0x168130
+#define QM_REG_QVOQIDX_16 0x168134
+#define QM_REG_QVOQIDX_17 0x168138
+#define QM_REG_QVOQIDX_21 0x168148
+#define QM_REG_QVOQIDX_22 0x16814c
+#define QM_REG_QVOQIDX_23 0x168150
+#define QM_REG_QVOQIDX_24 0x168154
+#define QM_REG_QVOQIDX_25 0x168158
+#define QM_REG_QVOQIDX_26 0x16815c
+#define QM_REG_QVOQIDX_27 0x168160
+#define QM_REG_QVOQIDX_28 0x168164
+#define QM_REG_QVOQIDX_29 0x168168
+#define QM_REG_QVOQIDX_30 0x16816c
+#define QM_REG_QVOQIDX_31 0x168170
+#define QM_REG_QVOQIDX_32 0x168174
+#define QM_REG_QVOQIDX_33 0x168178
+#define QM_REG_QVOQIDX_34 0x16817c
+#define QM_REG_QVOQIDX_35 0x168180
+#define QM_REG_QVOQIDX_36 0x168184
+#define QM_REG_QVOQIDX_37 0x168188
+#define QM_REG_QVOQIDX_38 0x16818c
+#define QM_REG_QVOQIDX_39 0x168190
+#define QM_REG_QVOQIDX_40 0x168194
+#define QM_REG_QVOQIDX_41 0x168198
+#define QM_REG_QVOQIDX_42 0x16819c
+#define QM_REG_QVOQIDX_43 0x1681a0
+#define QM_REG_QVOQIDX_44 0x1681a4
+#define QM_REG_QVOQIDX_45 0x1681a8
+#define QM_REG_QVOQIDX_46 0x1681ac
+#define QM_REG_QVOQIDX_47 0x1681b0
+#define QM_REG_QVOQIDX_48 0x1681b4
+#define QM_REG_QVOQIDX_49 0x1681b8
+#define QM_REG_QVOQIDX_5 0x168108
+#define QM_REG_QVOQIDX_50 0x1681bc
+#define QM_REG_QVOQIDX_51 0x1681c0
+#define QM_REG_QVOQIDX_52 0x1681c4
+#define QM_REG_QVOQIDX_53 0x1681c8
+#define QM_REG_QVOQIDX_54 0x1681cc
+#define QM_REG_QVOQIDX_55 0x1681d0
+#define QM_REG_QVOQIDX_56 0x1681d4
+#define QM_REG_QVOQIDX_57 0x1681d8
+#define QM_REG_QVOQIDX_58 0x1681dc
+#define QM_REG_QVOQIDX_59 0x1681e0
+#define QM_REG_QVOQIDX_6 0x16810c
+#define QM_REG_QVOQIDX_60 0x1681e4
+#define QM_REG_QVOQIDX_61 0x1681e8
+#define QM_REG_QVOQIDX_62 0x1681ec
+#define QM_REG_QVOQIDX_63 0x1681f0
+#define QM_REG_QVOQIDX_64 0x16e40c
+#define QM_REG_QVOQIDX_65 0x16e410
+#define QM_REG_QVOQIDX_69 0x16e420
+#define QM_REG_QVOQIDX_7 0x168110
+#define QM_REG_QVOQIDX_70 0x16e424
+#define QM_REG_QVOQIDX_71 0x16e428
+#define QM_REG_QVOQIDX_72 0x16e42c
+#define QM_REG_QVOQIDX_73 0x16e430
+#define QM_REG_QVOQIDX_74 0x16e434
+#define QM_REG_QVOQIDX_75 0x16e438
+#define QM_REG_QVOQIDX_76 0x16e43c
+#define QM_REG_QVOQIDX_77 0x16e440
+#define QM_REG_QVOQIDX_78 0x16e444
+#define QM_REG_QVOQIDX_79 0x16e448
+#define QM_REG_QVOQIDX_8 0x168114
+#define QM_REG_QVOQIDX_80 0x16e44c
+#define QM_REG_QVOQIDX_81 0x16e450
+#define QM_REG_QVOQIDX_85 0x16e460
+#define QM_REG_QVOQIDX_86 0x16e464
+#define QM_REG_QVOQIDX_87 0x16e468
+#define QM_REG_QVOQIDX_88 0x16e46c
+#define QM_REG_QVOQIDX_89 0x16e470
+#define QM_REG_QVOQIDX_9 0x168118
+#define QM_REG_QVOQIDX_90 0x16e474
+#define QM_REG_QVOQIDX_91 0x16e478
+#define QM_REG_QVOQIDX_92 0x16e47c
+#define QM_REG_QVOQIDX_93 0x16e480
+#define QM_REG_QVOQIDX_94 0x16e484
+#define QM_REG_QVOQIDX_95 0x16e488
+#define QM_REG_QVOQIDX_96 0x16e48c
+#define QM_REG_QVOQIDX_97 0x16e490
+#define QM_REG_QVOQIDX_98 0x16e494
+#define QM_REG_QVOQIDX_99 0x16e498
+/* [RW 1] Initialization bit command */
+#define QM_REG_SOFT_RESET 0x168428
+/* [RW 8] The credit cost per every task in the QM. A value per each VOQ */
+#define QM_REG_TASKCRDCOST_0 0x16809c
+#define QM_REG_TASKCRDCOST_1 0x1680a0
+#define QM_REG_TASKCRDCOST_2 0x1680a4
+#define QM_REG_TASKCRDCOST_4 0x1680ac
+#define QM_REG_TASKCRDCOST_5 0x1680b0
+/* [R 6] Keep the fill level of the fifo from write client 3 */
+#define QM_REG_TQM_WRC_FIFOLVL 0x168010
+/* [R 6] Keep the fill level of the fifo from write client 2 */
+#define QM_REG_UQM_WRC_FIFOLVL 0x168008
+/* [RC 32] Credit update error register */
+#define QM_REG_VOQCRDERRREG 0x168408
+/* [R 16] The credit value for each VOQ */
+#define QM_REG_VOQCREDIT_0 0x1682d0
+#define QM_REG_VOQCREDIT_1 0x1682d4
+#define QM_REG_VOQCREDIT_4 0x1682e0
+/* [RW 16] The credit value that if above the QM is considered almost full */
+#define QM_REG_VOQCREDITAFULLTHR 0x168090
+/* [RW 16] The init and maximum credit for each VoQ */
+#define QM_REG_VOQINITCREDIT_0 0x168060
+#define QM_REG_VOQINITCREDIT_1 0x168064
+#define QM_REG_VOQINITCREDIT_2 0x168068
+#define QM_REG_VOQINITCREDIT_4 0x168070
+#define QM_REG_VOQINITCREDIT_5 0x168074
+/* [RW 1] The port of which VOQ belongs */
+#define QM_REG_VOQPORT_0 0x1682a0
+#define QM_REG_VOQPORT_1 0x1682a4
+#define QM_REG_VOQPORT_2 0x1682a8
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_0_LSB 0x168240
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_0_LSB_EXT_A 0x16e524
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_0_MSB 0x168244
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_0_MSB_EXT_A 0x16e528
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_10_LSB 0x168290
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_10_LSB_EXT_A 0x16e574
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_10_MSB 0x168294
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_10_MSB_EXT_A 0x16e578
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_11_LSB 0x168298
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_11_LSB_EXT_A 0x16e57c
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_11_MSB 0x16829c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_11_MSB_EXT_A 0x16e580
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_1_LSB 0x168248
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_1_LSB_EXT_A 0x16e52c
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_1_MSB 0x16824c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_1_MSB_EXT_A 0x16e530
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_2_LSB 0x168250
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_2_LSB_EXT_A 0x16e534
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_2_MSB 0x168254
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_2_MSB_EXT_A 0x16e538
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_3_LSB 0x168258
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_3_LSB_EXT_A 0x16e53c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_3_MSB_EXT_A 0x16e540
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_4_LSB 0x168260
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_4_LSB_EXT_A 0x16e544
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_4_MSB 0x168264
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_4_MSB_EXT_A 0x16e548
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_5_LSB 0x168268
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_5_LSB_EXT_A 0x16e54c
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_5_MSB 0x16826c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_5_MSB_EXT_A 0x16e550
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_6_LSB 0x168270
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_6_LSB_EXT_A 0x16e554
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_6_MSB 0x168274
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_6_MSB_EXT_A 0x16e558
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_7_LSB 0x168278
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_7_LSB_EXT_A 0x16e55c
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_7_MSB 0x16827c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_7_MSB_EXT_A 0x16e560
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_8_LSB 0x168280
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_8_LSB_EXT_A 0x16e564
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_8_MSB 0x168284
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_8_MSB_EXT_A 0x16e568
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_9_LSB 0x168288
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_9_LSB_EXT_A 0x16e56c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_9_MSB_EXT_A 0x16e570
+/* [RW 32] Wrr weights */
+#define QM_REG_WRRWEIGHTS_0 0x16880c
+#define QM_REG_WRRWEIGHTS_1 0x168810
+#define QM_REG_WRRWEIGHTS_10 0x168814
+#define QM_REG_WRRWEIGHTS_11 0x168818
+#define QM_REG_WRRWEIGHTS_12 0x16881c
+#define QM_REG_WRRWEIGHTS_13 0x168820
+#define QM_REG_WRRWEIGHTS_14 0x168824
+#define QM_REG_WRRWEIGHTS_15 0x168828
+#define QM_REG_WRRWEIGHTS_16 0x16e000
+#define QM_REG_WRRWEIGHTS_17 0x16e004
+#define QM_REG_WRRWEIGHTS_18 0x16e008
+#define QM_REG_WRRWEIGHTS_19 0x16e00c
+#define QM_REG_WRRWEIGHTS_2 0x16882c
+#define QM_REG_WRRWEIGHTS_20 0x16e010
+#define QM_REG_WRRWEIGHTS_21 0x16e014
+#define QM_REG_WRRWEIGHTS_22 0x16e018
+#define QM_REG_WRRWEIGHTS_23 0x16e01c
+#define QM_REG_WRRWEIGHTS_24 0x16e020
+#define QM_REG_WRRWEIGHTS_25 0x16e024
+#define QM_REG_WRRWEIGHTS_26 0x16e028
+#define QM_REG_WRRWEIGHTS_27 0x16e02c
+#define QM_REG_WRRWEIGHTS_28 0x16e030
+#define QM_REG_WRRWEIGHTS_29 0x16e034
+#define QM_REG_WRRWEIGHTS_3 0x168830
+#define QM_REG_WRRWEIGHTS_30 0x16e038
+#define QM_REG_WRRWEIGHTS_31 0x16e03c
+#define QM_REG_WRRWEIGHTS_4 0x168834
+#define QM_REG_WRRWEIGHTS_5 0x168838
+#define QM_REG_WRRWEIGHTS_6 0x16883c
+#define QM_REG_WRRWEIGHTS_7 0x168840
+#define QM_REG_WRRWEIGHTS_8 0x168844
+#define QM_REG_WRRWEIGHTS_9 0x168848
+/* [R 6] Keep the fill level of the fifo from write client 1 */
+#define QM_REG_XQM_WRC_FIFOLVL 0x168000
+/* [W 1] reset to parity interrupt */
+#define SEM_FAST_REG_PARITY_RST 0x18840
+#define SRC_REG_COUNTFREE0 0x40500
+/* [RW 1] If clr the searcher is compatible to E1 A0 - support only two
+ ports. If set the searcher support 8 functions. */
+#define SRC_REG_E1HMF_ENABLE 0x404cc
+#define SRC_REG_FIRSTFREE0 0x40510
+#define SRC_REG_KEYRSS0_0 0x40408
+#define SRC_REG_KEYRSS0_7 0x40424
+#define SRC_REG_KEYRSS1_9 0x40454
+#define SRC_REG_KEYSEARCH_0 0x40458
+#define SRC_REG_KEYSEARCH_1 0x4045c
+#define SRC_REG_KEYSEARCH_2 0x40460
+#define SRC_REG_KEYSEARCH_3 0x40464
+#define SRC_REG_KEYSEARCH_4 0x40468
+#define SRC_REG_KEYSEARCH_5 0x4046c
+#define SRC_REG_KEYSEARCH_6 0x40470
+#define SRC_REG_KEYSEARCH_7 0x40474
+#define SRC_REG_KEYSEARCH_8 0x40478
+#define SRC_REG_KEYSEARCH_9 0x4047c
+#define SRC_REG_LASTFREE0 0x40530
+#define SRC_REG_NUMBER_HASH_BITS0 0x40400
+/* [RW 1] Reset internal state machines. */
+#define SRC_REG_SOFT_RST 0x4049c
+/* [R 3] Interrupt register #0 read */
+#define SRC_REG_SRC_INT_STS 0x404ac
+/* [RW 3] Parity mask register #0 read/write */
+#define SRC_REG_SRC_PRTY_MASK 0x404c8
+/* [R 3] Parity register #0 read */
+#define SRC_REG_SRC_PRTY_STS 0x404bc
+/* [RC 3] Parity register #0 read clear */
+#define SRC_REG_SRC_PRTY_STS_CLR 0x404c0
+/* [R 4] Used to read the value of the XX protection CAM occupancy counter. */
+#define TCM_REG_CAM_OCCUP 0x5017c
+/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
+ disregarded; valid output is deasserted; all other signals are treated as
+ usual; if 1 - normal activity. */
+#define TCM_REG_CDU_AG_RD_IFEN 0x50034
+/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
+ are disregarded; all other signals are treated as usual; if 1 - normal
+ activity. */
+#define TCM_REG_CDU_AG_WR_IFEN 0x50030
+/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
+ disregarded; valid output is deasserted; all other signals are treated as
+ usual; if 1 - normal activity. */
+#define TCM_REG_CDU_SM_RD_IFEN 0x5003c
+/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
+ input is disregarded; all other signals are treated as usual; if 1 -
+ normal activity. */
+#define TCM_REG_CDU_SM_WR_IFEN 0x50038
+/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
+ the initial credit value; read returns the current value of the credit
+ counter. Must be initialized to 1 at start-up. */
+#define TCM_REG_CFC_INIT_CRD 0x50204
+/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_CP_WEIGHT 0x500c0
+/* [RW 1] Input csem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define TCM_REG_CSEM_IFEN 0x5002c
+/* [RC 1] Message length mismatch (relative to last indication) at the In#9
+ interface. */
+#define TCM_REG_CSEM_LENGTH_MIS 0x50174
+/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_CSEM_WEIGHT 0x500bc
+/* [RW 8] The Event ID in case of ErrorFlg is set in the input message. */
+#define TCM_REG_ERR_EVNT_ID 0x500a0
+/* [RW 28] The CM erroneous header for QM and Timers formatting. */
+#define TCM_REG_ERR_TCM_HDR 0x5009c
+/* [RW 8] The Event ID for Timers expiration. */
+#define TCM_REG_EXPR_EVNT_ID 0x500a4
+/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 64 at start-up. */
+#define TCM_REG_FIC0_INIT_CRD 0x5020c
+/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 64 at start-up. */
+#define TCM_REG_FIC1_INIT_CRD 0x50210
+/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1
+ - strict priority defined by ~tcm_registers_gr_ag_pr.gr_ag_pr;
+ ~tcm_registers_gr_ld0_pr.gr_ld0_pr and
+ ~tcm_registers_gr_ld1_pr.gr_ld1_pr. */
+#define TCM_REG_GR_ARB_TYPE 0x50114
+/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
+ highest priority is 3. It is supposed that the Store channel is the
+ compliment of the other 3 groups. */
+#define TCM_REG_GR_LD0_PR 0x5011c
+/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
+ highest priority is 3. It is supposed that the Store channel is the
+ compliment of the other 3 groups. */
+#define TCM_REG_GR_LD1_PR 0x50120
+/* [RW 4] The number of double REG-pairs; loaded from the STORM context and
+ sent to STORM; for a specific connection type. The double REG-pairs are
+ used to align to STORM context row size of 128 bits. The offset of these
+ data in the STORM context is always 0. Index _i stands for the connection
+ type (one of 16). */
+#define TCM_REG_N_SM_CTX_LD_0 0x50050
+#define TCM_REG_N_SM_CTX_LD_1 0x50054
+#define TCM_REG_N_SM_CTX_LD_2 0x50058
+#define TCM_REG_N_SM_CTX_LD_3 0x5005c
+#define TCM_REG_N_SM_CTX_LD_4 0x50060
+#define TCM_REG_N_SM_CTX_LD_5 0x50064
+/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define TCM_REG_PBF_IFEN 0x50024
+/* [RC 1] Message length mismatch (relative to last indication) at the In#7
+ interface. */
+#define TCM_REG_PBF_LENGTH_MIS 0x5016c
+/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_PBF_WEIGHT 0x500b4
+#define TCM_REG_PHYS_QNUM0_0 0x500e0
+#define TCM_REG_PHYS_QNUM0_1 0x500e4
+#define TCM_REG_PHYS_QNUM1_0 0x500e8
+#define TCM_REG_PHYS_QNUM1_1 0x500ec
+#define TCM_REG_PHYS_QNUM2_0 0x500f0
+#define TCM_REG_PHYS_QNUM2_1 0x500f4
+#define TCM_REG_PHYS_QNUM3_0 0x500f8
+#define TCM_REG_PHYS_QNUM3_1 0x500fc
+/* [RW 1] Input prs Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define TCM_REG_PRS_IFEN 0x50020
+/* [RC 1] Message length mismatch (relative to last indication) at the In#6
+ interface. */
+#define TCM_REG_PRS_LENGTH_MIS 0x50168
+/* [RW 3] The weight of the input prs in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_PRS_WEIGHT 0x500b0
+/* [RW 8] The Event ID for Timers formatting in case of stop done. */
+#define TCM_REG_STOP_EVNT_ID 0x500a8
+/* [RC 1] Message length mismatch (relative to last indication) at the STORM
+ interface. */
+#define TCM_REG_STORM_LENGTH_MIS 0x50160
+/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define TCM_REG_STORM_TCM_IFEN 0x50010
+/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_STORM_WEIGHT 0x500ac
+/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define TCM_REG_TCM_CFC_IFEN 0x50040
+/* [RW 11] Interrupt mask register #0 read/write */
+#define TCM_REG_TCM_INT_MASK 0x501dc
+/* [R 11] Interrupt register #0 read */
+#define TCM_REG_TCM_INT_STS 0x501d0
+/* [RW 27] Parity mask register #0 read/write */
+#define TCM_REG_TCM_PRTY_MASK 0x501ec
+/* [R 27] Parity register #0 read */
+#define TCM_REG_TCM_PRTY_STS 0x501e0
+/* [RC 27] Parity register #0 read clear */
+#define TCM_REG_TCM_PRTY_STS_CLR 0x501e4
+/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
+ REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
+ Is used to determine the number of the AG context REG-pairs written back;
+ when the input message Reg1WbFlg isn't set. */
+#define TCM_REG_TCM_REG0_SZ 0x500d8
+/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define TCM_REG_TCM_STORM0_IFEN 0x50004
+/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define TCM_REG_TCM_STORM1_IFEN 0x50008
+/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define TCM_REG_TCM_TQM_IFEN 0x5000c
+/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */
+#define TCM_REG_TCM_TQM_USE_Q 0x500d4
+/* [RW 28] The CM header for Timers expiration command. */
+#define TCM_REG_TM_TCM_HDR 0x50098
+/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define TCM_REG_TM_TCM_IFEN 0x5001c
+/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_TM_WEIGHT 0x500d0
+/* [RW 6] QM output initial credit. Max credit available - 32.Write writes
+ the initial credit value; read returns the current value of the credit
+ counter. Must be initialized to 32 at start-up. */
+#define TCM_REG_TQM_INIT_CRD 0x5021c
+/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_TQM_P_WEIGHT 0x500c8
+/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_TQM_S_WEIGHT 0x500cc
+/* [RW 28] The CM header value for QM request (primary). */
+#define TCM_REG_TQM_TCM_HDR_P 0x50090
+/* [RW 28] The CM header value for QM request (secondary). */
+#define TCM_REG_TQM_TCM_HDR_S 0x50094
+/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define TCM_REG_TQM_TCM_IFEN 0x50014
+/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define TCM_REG_TSDM_IFEN 0x50018
+/* [RC 1] Message length mismatch (relative to last indication) at the SDM
+ interface. */
+#define TCM_REG_TSDM_LENGTH_MIS 0x50164
+/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_TSDM_WEIGHT 0x500c4
+/* [RW 1] Input usem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define TCM_REG_USEM_IFEN 0x50028
+/* [RC 1] Message length mismatch (relative to last indication) at the In#8
+ interface. */
+#define TCM_REG_USEM_LENGTH_MIS 0x50170
+/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_USEM_WEIGHT 0x500b8
+/* [RW 21] Indirect access to the descriptor table of the XX protection
+ mechanism. The fields are: [5:0] - length of the message; 15:6] - message
+ pointer; 20:16] - next pointer. */
+#define TCM_REG_XX_DESCR_TABLE 0x50280
+#define TCM_REG_XX_DESCR_TABLE_SIZE 29
+/* [R 6] Use to read the value of XX protection Free counter. */
+#define TCM_REG_XX_FREE 0x50178
+/* [RW 6] Initial value for the credit counter; responsible for fulfilling
+ of the Input Stage XX protection buffer by the XX protection pending
+ messages. Max credit available - 127.Write writes the initial credit
+ value; read returns the current value of the credit counter. Must be
+ initialized to 19 at start-up. */
+#define TCM_REG_XX_INIT_CRD 0x50220
+/* [RW 6] Maximum link list size (messages locked) per connection in the XX
+ protection. */
+#define TCM_REG_XX_MAX_LL_SZ 0x50044
+/* [RW 6] The maximum number of pending messages; which may be stored in XX
+ protection. ~tcm_registers_xx_free.xx_free is read on read. */
+#define TCM_REG_XX_MSG_NUM 0x50224
+/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
+#define TCM_REG_XX_OVFL_EVNT_ID 0x50048
+/* [RW 16] Indirect access to the XX table of the XX protection mechanism.
+ The fields are:[4:0] - tail pointer; [10:5] - Link List size; 15:11] -
+ header pointer. */
+#define TCM_REG_XX_TABLE 0x50240
+/* [RW 4] Load value for cfc ac credit cnt. */
+#define TM_REG_CFC_AC_CRDCNT_VAL 0x164208
+/* [RW 4] Load value for cfc cld credit cnt. */
+#define TM_REG_CFC_CLD_CRDCNT_VAL 0x164210
+/* [RW 8] Client0 context region. */
+#define TM_REG_CL0_CONT_REGION 0x164030
+/* [RW 8] Client1 context region. */
+#define TM_REG_CL1_CONT_REGION 0x164034
+/* [RW 8] Client2 context region. */
+#define TM_REG_CL2_CONT_REGION 0x164038
+/* [RW 2] Client in High priority client number. */
+#define TM_REG_CLIN_PRIOR0_CLIENT 0x164024
+/* [RW 4] Load value for clout0 cred cnt. */
+#define TM_REG_CLOUT_CRDCNT0_VAL 0x164220
+/* [RW 4] Load value for clout1 cred cnt. */
+#define TM_REG_CLOUT_CRDCNT1_VAL 0x164228
+/* [RW 4] Load value for clout2 cred cnt. */
+#define TM_REG_CLOUT_CRDCNT2_VAL 0x164230
+/* [RW 1] Enable client0 input. */
+#define TM_REG_EN_CL0_INPUT 0x164008
+/* [RW 1] Enable client1 input. */
+#define TM_REG_EN_CL1_INPUT 0x16400c
+/* [RW 1] Enable client2 input. */
+#define TM_REG_EN_CL2_INPUT 0x164010
+#define TM_REG_EN_LINEAR0_TIMER 0x164014
+/* [RW 1] Enable real time counter. */
+#define TM_REG_EN_REAL_TIME_CNT 0x1640d8
+/* [RW 1] Enable for Timers state machines. */
+#define TM_REG_EN_TIMERS 0x164000
+/* [RW 4] Load value for expiration credit cnt. CFC max number of
+ outstanding load requests for timers (expiration) context loading. */
+#define TM_REG_EXP_CRDCNT_VAL 0x164238
+/* [RW 32] Linear0 logic address. */
+#define TM_REG_LIN0_LOGIC_ADDR 0x164240
+/* [RW 18] Linear0 Max active cid (in banks of 32 entries). */
+#define TM_REG_LIN0_MAX_ACTIVE_CID 0x164048
+/* [ST 16] Linear0 Number of scans counter. */
+#define TM_REG_LIN0_NUM_SCANS 0x1640a0
+/* [WB 64] Linear0 phy address. */
+#define TM_REG_LIN0_PHY_ADDR 0x164270
+/* [RW 1] Linear0 physical address valid. */
+#define TM_REG_LIN0_PHY_ADDR_VALID 0x164248
+#define TM_REG_LIN0_SCAN_ON 0x1640d0
+/* [RW 24] Linear0 array scan timeout. */
+#define TM_REG_LIN0_SCAN_TIME 0x16403c
+#define TM_REG_LIN0_VNIC_UC 0x164128
+/* [RW 32] Linear1 logic address. */
+#define TM_REG_LIN1_LOGIC_ADDR 0x164250
+/* [WB 64] Linear1 phy address. */
+#define TM_REG_LIN1_PHY_ADDR 0x164280
+/* [RW 1] Linear1 physical address valid. */
+#define TM_REG_LIN1_PHY_ADDR_VALID 0x164258
+/* [RW 6] Linear timer set_clear fifo threshold. */
+#define TM_REG_LIN_SETCLR_FIFO_ALFULL_THR 0x164070
+/* [RW 2] Load value for pci arbiter credit cnt. */
+#define TM_REG_PCIARB_CRDCNT_VAL 0x164260
+/* [RW 20] The amount of hardware cycles for each timer tick. */
+#define TM_REG_TIMER_TICK_SIZE 0x16401c
+/* [RW 8] Timers Context region. */
+#define TM_REG_TM_CONTEXT_REGION 0x164044
+/* [RW 1] Interrupt mask register #0 read/write */
+#define TM_REG_TM_INT_MASK 0x1640fc
+/* [R 1] Interrupt register #0 read */
+#define TM_REG_TM_INT_STS 0x1640f0
+/* [RW 7] Parity mask register #0 read/write */
+#define TM_REG_TM_PRTY_MASK 0x16410c
+/* [RC 7] Parity register #0 read clear */
+#define TM_REG_TM_PRTY_STS_CLR 0x164104
+/* [RW 8] The event id for aggregated interrupt 0 */
+#define TSDM_REG_AGG_INT_EVENT_0 0x42038
+#define TSDM_REG_AGG_INT_EVENT_1 0x4203c
+#define TSDM_REG_AGG_INT_EVENT_2 0x42040
+#define TSDM_REG_AGG_INT_EVENT_3 0x42044
+#define TSDM_REG_AGG_INT_EVENT_4 0x42048
+/* [RW 1] The T bit for aggregated interrupt 0 */
+#define TSDM_REG_AGG_INT_T_0 0x420b8
+#define TSDM_REG_AGG_INT_T_1 0x420bc
+/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
+#define TSDM_REG_CFC_RSP_START_ADDR 0x42008
+/* [RW 16] The maximum value of the completion counter #0 */
+#define TSDM_REG_CMP_COUNTER_MAX0 0x4201c
+/* [RW 16] The maximum value of the completion counter #1 */
+#define TSDM_REG_CMP_COUNTER_MAX1 0x42020
+/* [RW 16] The maximum value of the completion counter #2 */
+#define TSDM_REG_CMP_COUNTER_MAX2 0x42024
+/* [RW 16] The maximum value of the completion counter #3 */
+#define TSDM_REG_CMP_COUNTER_MAX3 0x42028
+/* [RW 13] The start address in the internal RAM for the completion
+ counters. */
+#define TSDM_REG_CMP_COUNTER_START_ADDR 0x4200c
+#define TSDM_REG_ENABLE_IN1 0x42238
+#define TSDM_REG_ENABLE_IN2 0x4223c
+#define TSDM_REG_ENABLE_OUT1 0x42240
+#define TSDM_REG_ENABLE_OUT2 0x42244
+/* [RW 4] The initial number of messages that can be sent to the pxp control
+ interface without receiving any ACK. */
+#define TSDM_REG_INIT_CREDIT_PXP_CTRL 0x424bc
+/* [ST 32] The number of ACK after placement messages received */
+#define TSDM_REG_NUM_OF_ACK_AFTER_PLACE 0x4227c
+/* [ST 32] The number of packet end messages received from the parser */
+#define TSDM_REG_NUM_OF_PKT_END_MSG 0x42274
+/* [ST 32] The number of requests received from the pxp async if */
+#define TSDM_REG_NUM_OF_PXP_ASYNC_REQ 0x42278
+/* [ST 32] The number of commands received in queue 0 */
+#define TSDM_REG_NUM_OF_Q0_CMD 0x42248
+/* [ST 32] The number of commands received in queue 10 */
+#define TSDM_REG_NUM_OF_Q10_CMD 0x4226c
+/* [ST 32] The number of commands received in queue 11 */
+#define TSDM_REG_NUM_OF_Q11_CMD 0x42270
+/* [ST 32] The number of commands received in queue 1 */
+#define TSDM_REG_NUM_OF_Q1_CMD 0x4224c
+/* [ST 32] The number of commands received in queue 3 */
+#define TSDM_REG_NUM_OF_Q3_CMD 0x42250
+/* [ST 32] The number of commands received in queue 4 */
+#define TSDM_REG_NUM_OF_Q4_CMD 0x42254
+/* [ST 32] The number of commands received in queue 5 */
+#define TSDM_REG_NUM_OF_Q5_CMD 0x42258
+/* [ST 32] The number of commands received in queue 6 */
+#define TSDM_REG_NUM_OF_Q6_CMD 0x4225c
+/* [ST 32] The number of commands received in queue 7 */
+#define TSDM_REG_NUM_OF_Q7_CMD 0x42260
+/* [ST 32] The number of commands received in queue 8 */
+#define TSDM_REG_NUM_OF_Q8_CMD 0x42264
+/* [ST 32] The number of commands received in queue 9 */
+#define TSDM_REG_NUM_OF_Q9_CMD 0x42268
+/* [RW 13] The start address in the internal RAM for the packet end message */
+#define TSDM_REG_PCK_END_MSG_START_ADDR 0x42014
+/* [RW 13] The start address in the internal RAM for queue counters */
+#define TSDM_REG_Q_COUNTER_START_ADDR 0x42010
+/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
+#define TSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0x42548
+/* [R 1] parser fifo empty in sdm_sync block */
+#define TSDM_REG_SYNC_PARSER_EMPTY 0x42550
+/* [R 1] parser serial fifo empty in sdm_sync block */
+#define TSDM_REG_SYNC_SYNC_EMPTY 0x42558
+/* [RW 32] Tick for timer counter. Applicable only when
+ ~tsdm_registers_timer_tick_enable.timer_tick_enable =1 */
+#define TSDM_REG_TIMER_TICK 0x42000
+/* [RW 32] Interrupt mask register #0 read/write */
+#define TSDM_REG_TSDM_INT_MASK_0 0x4229c
+#define TSDM_REG_TSDM_INT_MASK_1 0x422ac
+/* [R 32] Interrupt register #0 read */
+#define TSDM_REG_TSDM_INT_STS_0 0x42290
+#define TSDM_REG_TSDM_INT_STS_1 0x422a0
+/* [RW 11] Parity mask register #0 read/write */
+#define TSDM_REG_TSDM_PRTY_MASK 0x422bc
+/* [R 11] Parity register #0 read */
+#define TSDM_REG_TSDM_PRTY_STS 0x422b0
+/* [RC 11] Parity register #0 read clear */
+#define TSDM_REG_TSDM_PRTY_STS_CLR 0x422b4
+/* [RW 5] The number of time_slots in the arbitration cycle */
+#define TSEM_REG_ARB_CYCLE_SIZE 0x180034
+/* [RW 3] The source that is associated with arbitration element 0. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2 */
+#define TSEM_REG_ARB_ELEMENT0 0x180020
+/* [RW 3] The source that is associated with arbitration element 1. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~tsem_registers_arb_element0.arb_element0 */
+#define TSEM_REG_ARB_ELEMENT1 0x180024
+/* [RW 3] The source that is associated with arbitration element 2. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~tsem_registers_arb_element0.arb_element0
+ and ~tsem_registers_arb_element1.arb_element1 */
+#define TSEM_REG_ARB_ELEMENT2 0x180028
+/* [RW 3] The source that is associated with arbitration element 3. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
+ not be equal to register ~tsem_registers_arb_element0.arb_element0 and
+ ~tsem_registers_arb_element1.arb_element1 and
+ ~tsem_registers_arb_element2.arb_element2 */
+#define TSEM_REG_ARB_ELEMENT3 0x18002c
+/* [RW 3] The source that is associated with arbitration element 4. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~tsem_registers_arb_element0.arb_element0
+ and ~tsem_registers_arb_element1.arb_element1 and
+ ~tsem_registers_arb_element2.arb_element2 and
+ ~tsem_registers_arb_element3.arb_element3 */
+#define TSEM_REG_ARB_ELEMENT4 0x180030
+#define TSEM_REG_ENABLE_IN 0x1800a4
+#define TSEM_REG_ENABLE_OUT 0x1800a8
+/* [RW 32] This address space contains all registers and memories that are
+ placed in SEM_FAST block. The SEM_FAST registers are described in
+ appendix B. In order to access the sem_fast registers the base address
+ ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
+#define TSEM_REG_FAST_MEMORY 0x1a0000
+/* [RW 1] Disables input messages from FIC0 May be updated during run_time
+ by the microcode */
+#define TSEM_REG_FIC0_DISABLE 0x180224
+/* [RW 1] Disables input messages from FIC1 May be updated during run_time
+ by the microcode */
+#define TSEM_REG_FIC1_DISABLE 0x180234
+/* [RW 15] Interrupt table Read and write access to it is not possible in
+ the middle of the work */
+#define TSEM_REG_INT_TABLE 0x180400
+/* [ST 24] Statistics register. The number of messages that entered through
+ FIC0 */
+#define TSEM_REG_MSG_NUM_FIC0 0x180000
+/* [ST 24] Statistics register. The number of messages that entered through
+ FIC1 */
+#define TSEM_REG_MSG_NUM_FIC1 0x180004
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC0 */
+#define TSEM_REG_MSG_NUM_FOC0 0x180008
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC1 */
+#define TSEM_REG_MSG_NUM_FOC1 0x18000c
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC2 */
+#define TSEM_REG_MSG_NUM_FOC2 0x180010
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC3 */
+#define TSEM_REG_MSG_NUM_FOC3 0x180014
+/* [RW 1] Disables input messages from the passive buffer May be updated
+ during run_time by the microcode */
+#define TSEM_REG_PAS_DISABLE 0x18024c
+/* [WB 128] Debug only. Passive buffer memory */
+#define TSEM_REG_PASSIVE_BUFFER 0x181000
+/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
+#define TSEM_REG_PRAM 0x1c0000
+/* [R 8] Valid sleeping threads indication have bit per thread */
+#define TSEM_REG_SLEEP_THREADS_VALID 0x18026c
+/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
+#define TSEM_REG_SLOW_EXT_STORE_EMPTY 0x1802a0
+/* [RW 8] List of free threads . There is a bit per thread. */
+#define TSEM_REG_THREADS_LIST 0x1802e4
+/* [RC 32] Parity register #0 read clear */
+#define TSEM_REG_TSEM_PRTY_STS_CLR_0 0x180118
+#define TSEM_REG_TSEM_PRTY_STS_CLR_1 0x180128
+/* [RW 3] The arbitration scheme of time_slot 0 */
+#define TSEM_REG_TS_0_AS 0x180038
+/* [RW 3] The arbitration scheme of time_slot 10 */
+#define TSEM_REG_TS_10_AS 0x180060
+/* [RW 3] The arbitration scheme of time_slot 11 */
+#define TSEM_REG_TS_11_AS 0x180064
+/* [RW 3] The arbitration scheme of time_slot 12 */
+#define TSEM_REG_TS_12_AS 0x180068
+/* [RW 3] The arbitration scheme of time_slot 13 */
+#define TSEM_REG_TS_13_AS 0x18006c
+/* [RW 3] The arbitration scheme of time_slot 14 */
+#define TSEM_REG_TS_14_AS 0x180070
+/* [RW 3] The arbitration scheme of time_slot 15 */
+#define TSEM_REG_TS_15_AS 0x180074
+/* [RW 3] The arbitration scheme of time_slot 16 */
+#define TSEM_REG_TS_16_AS 0x180078
+/* [RW 3] The arbitration scheme of time_slot 17 */
+#define TSEM_REG_TS_17_AS 0x18007c
+/* [RW 3] The arbitration scheme of time_slot 18 */
+#define TSEM_REG_TS_18_AS 0x180080
+/* [RW 3] The arbitration scheme of time_slot 1 */
+#define TSEM_REG_TS_1_AS 0x18003c
+/* [RW 3] The arbitration scheme of time_slot 2 */
+#define TSEM_REG_TS_2_AS 0x180040
+/* [RW 3] The arbitration scheme of time_slot 3 */
+#define TSEM_REG_TS_3_AS 0x180044
+/* [RW 3] The arbitration scheme of time_slot 4 */
+#define TSEM_REG_TS_4_AS 0x180048
+/* [RW 3] The arbitration scheme of time_slot 5 */
+#define TSEM_REG_TS_5_AS 0x18004c
+/* [RW 3] The arbitration scheme of time_slot 6 */
+#define TSEM_REG_TS_6_AS 0x180050
+/* [RW 3] The arbitration scheme of time_slot 7 */
+#define TSEM_REG_TS_7_AS 0x180054
+/* [RW 3] The arbitration scheme of time_slot 8 */
+#define TSEM_REG_TS_8_AS 0x180058
+/* [RW 3] The arbitration scheme of time_slot 9 */
+#define TSEM_REG_TS_9_AS 0x18005c
+/* [RW 32] Interrupt mask register #0 read/write */
+#define TSEM_REG_TSEM_INT_MASK_0 0x180100
+#define TSEM_REG_TSEM_INT_MASK_1 0x180110
+/* [R 32] Interrupt register #0 read */
+#define TSEM_REG_TSEM_INT_STS_0 0x1800f4
+#define TSEM_REG_TSEM_INT_STS_1 0x180104
+/* [RW 32] Parity mask register #0 read/write */
+#define TSEM_REG_TSEM_PRTY_MASK_0 0x180120
+#define TSEM_REG_TSEM_PRTY_MASK_1 0x180130
+/* [R 32] Parity register #0 read */
+#define TSEM_REG_TSEM_PRTY_STS_0 0x180114
+#define TSEM_REG_TSEM_PRTY_STS_1 0x180124
+/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
+ * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
+#define TSEM_REG_VFPF_ERR_NUM 0x180380
+/* [RW 32] Indirect access to AG context with 32-bits granularity. The bits
+ * [10:8] of the address should be the offset within the accessed LCID
+ * context; the bits [7:0] are the accessed LCID.Example: to write to REG10
+ * LCID100. The RBC address should be 12'ha64. */
+#define UCM_REG_AG_CTX 0xe2000
+/* [R 5] Used to read the XX protection CAM occupancy counter. */
+#define UCM_REG_CAM_OCCUP 0xe0170
+/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
+ disregarded; valid output is deasserted; all other signals are treated as
+ usual; if 1 - normal activity. */
+#define UCM_REG_CDU_AG_RD_IFEN 0xe0038
+/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
+ are disregarded; all other signals are treated as usual; if 1 - normal
+ activity. */
+#define UCM_REG_CDU_AG_WR_IFEN 0xe0034
+/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
+ disregarded; valid output is deasserted; all other signals are treated as
+ usual; if 1 - normal activity. */
+#define UCM_REG_CDU_SM_RD_IFEN 0xe0040
+/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
+ input is disregarded; all other signals are treated as usual; if 1 -
+ normal activity. */
+#define UCM_REG_CDU_SM_WR_IFEN 0xe003c
+/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
+ the initial credit value; read returns the current value of the credit
+ counter. Must be initialized to 1 at start-up. */
+#define UCM_REG_CFC_INIT_CRD 0xe0204
+/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_CP_WEIGHT 0xe00c4
+/* [RW 1] Input csem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define UCM_REG_CSEM_IFEN 0xe0028
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the csem interface is detected. */
+#define UCM_REG_CSEM_LENGTH_MIS 0xe0160
+/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_CSEM_WEIGHT 0xe00b8
+/* [RW 1] Input dorq Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define UCM_REG_DORQ_IFEN 0xe0030
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the dorq interface is detected. */
+#define UCM_REG_DORQ_LENGTH_MIS 0xe0168
+/* [RW 3] The weight of the input dorq in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_DORQ_WEIGHT 0xe00c0
+/* [RW 8] The Event ID in case ErrorFlg input message bit is set. */
+#define UCM_REG_ERR_EVNT_ID 0xe00a4
+/* [RW 28] The CM erroneous header for QM and Timers formatting. */
+#define UCM_REG_ERR_UCM_HDR 0xe00a0
+/* [RW 8] The Event ID for Timers expiration. */
+#define UCM_REG_EXPR_EVNT_ID 0xe00a8
+/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 64 at start-up. */
+#define UCM_REG_FIC0_INIT_CRD 0xe020c
+/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 64 at start-up. */
+#define UCM_REG_FIC1_INIT_CRD 0xe0210
+/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1
+ - strict priority defined by ~ucm_registers_gr_ag_pr.gr_ag_pr;
+ ~ucm_registers_gr_ld0_pr.gr_ld0_pr and
+ ~ucm_registers_gr_ld1_pr.gr_ld1_pr. */
+#define UCM_REG_GR_ARB_TYPE 0xe0144
+/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
+ highest priority is 3. It is supposed that the Store channel group is
+ compliment to the others. */
+#define UCM_REG_GR_LD0_PR 0xe014c
+/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
+ highest priority is 3. It is supposed that the Store channel group is
+ compliment to the others. */
+#define UCM_REG_GR_LD1_PR 0xe0150
+/* [RW 2] The queue index for invalidate counter flag decision. */
+#define UCM_REG_INV_CFLG_Q 0xe00e4
+/* [RW 5] The number of double REG-pairs; loaded from the STORM context and
+ sent to STORM; for a specific connection type. the double REG-pairs are
+ used in order to align to STORM context row size of 128 bits. The offset
+ of these data in the STORM context is always 0. Index _i stands for the
+ connection type (one of 16). */
+#define UCM_REG_N_SM_CTX_LD_0 0xe0054
+#define UCM_REG_N_SM_CTX_LD_1 0xe0058
+#define UCM_REG_N_SM_CTX_LD_2 0xe005c
+#define UCM_REG_N_SM_CTX_LD_3 0xe0060
+#define UCM_REG_N_SM_CTX_LD_4 0xe0064
+#define UCM_REG_N_SM_CTX_LD_5 0xe0068
+#define UCM_REG_PHYS_QNUM0_0 0xe0110
+#define UCM_REG_PHYS_QNUM0_1 0xe0114
+#define UCM_REG_PHYS_QNUM1_0 0xe0118
+#define UCM_REG_PHYS_QNUM1_1 0xe011c
+#define UCM_REG_PHYS_QNUM2_0 0xe0120
+#define UCM_REG_PHYS_QNUM2_1 0xe0124
+#define UCM_REG_PHYS_QNUM3_0 0xe0128
+#define UCM_REG_PHYS_QNUM3_1 0xe012c
+/* [RW 8] The Event ID for Timers formatting in case of stop done. */
+#define UCM_REG_STOP_EVNT_ID 0xe00ac
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the STORM interface is detected. */
+#define UCM_REG_STORM_LENGTH_MIS 0xe0154
+/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define UCM_REG_STORM_UCM_IFEN 0xe0010
+/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_STORM_WEIGHT 0xe00b0
+/* [RW 4] Timers output initial credit. Max credit available - 15.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 4 at start-up. */
+#define UCM_REG_TM_INIT_CRD 0xe021c
+/* [RW 28] The CM header for Timers expiration command. */
+#define UCM_REG_TM_UCM_HDR 0xe009c
+/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define UCM_REG_TM_UCM_IFEN 0xe001c
+/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_TM_WEIGHT 0xe00d4
+/* [RW 1] Input tsem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define UCM_REG_TSEM_IFEN 0xe0024
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the tsem interface is detected. */
+#define UCM_REG_TSEM_LENGTH_MIS 0xe015c
+/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_TSEM_WEIGHT 0xe00b4
+/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define UCM_REG_UCM_CFC_IFEN 0xe0044
+/* [RW 11] Interrupt mask register #0 read/write */
+#define UCM_REG_UCM_INT_MASK 0xe01d4
+/* [R 11] Interrupt register #0 read */
+#define UCM_REG_UCM_INT_STS 0xe01c8
+/* [RW 27] Parity mask register #0 read/write */
+#define UCM_REG_UCM_PRTY_MASK 0xe01e4
+/* [R 27] Parity register #0 read */
+#define UCM_REG_UCM_PRTY_STS 0xe01d8
+/* [RC 27] Parity register #0 read clear */
+#define UCM_REG_UCM_PRTY_STS_CLR 0xe01dc
+/* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS
+ REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
+ Is used to determine the number of the AG context REG-pairs written back;
+ when the Reg1WbFlg isn't set. */
+#define UCM_REG_UCM_REG0_SZ 0xe00dc
+/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define UCM_REG_UCM_STORM0_IFEN 0xe0004
+/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define UCM_REG_UCM_STORM1_IFEN 0xe0008
+/* [RW 1] CM - Timers Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define UCM_REG_UCM_TM_IFEN 0xe0020
+/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define UCM_REG_UCM_UQM_IFEN 0xe000c
+/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */
+#define UCM_REG_UCM_UQM_USE_Q 0xe00d8
+/* [RW 6] QM output initial credit. Max credit available - 32.Write writes
+ the initial credit value; read returns the current value of the credit
+ counter. Must be initialized to 32 at start-up. */
+#define UCM_REG_UQM_INIT_CRD 0xe0220
+/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_UQM_P_WEIGHT 0xe00cc
+/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_UQM_S_WEIGHT 0xe00d0
+/* [RW 28] The CM header value for QM request (primary). */
+#define UCM_REG_UQM_UCM_HDR_P 0xe0094
+/* [RW 28] The CM header value for QM request (secondary). */
+#define UCM_REG_UQM_UCM_HDR_S 0xe0098
+/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define UCM_REG_UQM_UCM_IFEN 0xe0014
+/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define UCM_REG_USDM_IFEN 0xe0018
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the SDM interface is detected. */
+#define UCM_REG_USDM_LENGTH_MIS 0xe0158
+/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_USDM_WEIGHT 0xe00c8
+/* [RW 1] Input xsem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define UCM_REG_XSEM_IFEN 0xe002c
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the xsem interface isdetected. */
+#define UCM_REG_XSEM_LENGTH_MIS 0xe0164
+/* [RW 3] The weight of the input xsem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_XSEM_WEIGHT 0xe00bc
+/* [RW 20] Indirect access to the descriptor table of the XX protection
+ mechanism. The fields are:[5:0] - message length; 14:6] - message
+ pointer; 19:15] - next pointer. */
+#define UCM_REG_XX_DESCR_TABLE 0xe0280
+#define UCM_REG_XX_DESCR_TABLE_SIZE 27
+/* [R 6] Use to read the XX protection Free counter. */
+#define UCM_REG_XX_FREE 0xe016c
+/* [RW 6] Initial value for the credit counter; responsible for fulfilling
+ of the Input Stage XX protection buffer by the XX protection pending
+ messages. Write writes the initial credit value; read returns the current
+ value of the credit counter. Must be initialized to 12 at start-up. */
+#define UCM_REG_XX_INIT_CRD 0xe0224
+/* [RW 6] The maximum number of pending messages; which may be stored in XX
+ protection. ~ucm_registers_xx_free.xx_free read on read. */
+#define UCM_REG_XX_MSG_NUM 0xe0228
+/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
+#define UCM_REG_XX_OVFL_EVNT_ID 0xe004c
+/* [RW 16] Indirect access to the XX table of the XX protection mechanism.
+ The fields are: [4:0] - tail pointer; 10:5] - Link List size; 15:11] -
+ header pointer. */
+#define UCM_REG_XX_TABLE 0xe0300
+#define UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE (0x1<<28)
+#define UMAC_COMMAND_CONFIG_REG_LOOP_ENA (0x1<<15)
+#define UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK (0x1<<24)
+#define UMAC_COMMAND_CONFIG_REG_PAD_EN (0x1<<5)
+#define UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE (0x1<<8)
+#define UMAC_COMMAND_CONFIG_REG_PROMIS_EN (0x1<<4)
+#define UMAC_COMMAND_CONFIG_REG_RX_ENA (0x1<<1)
+#define UMAC_COMMAND_CONFIG_REG_SW_RESET (0x1<<13)
+#define UMAC_COMMAND_CONFIG_REG_TX_ENA (0x1<<0)
+#define UMAC_REG_COMMAND_CONFIG 0x8
+/* [RW 32] Register Bit 0 refers to Bit 16 of the MAC address; Bit 1 refers
+ * to bit 17 of the MAC address etc. */
+#define UMAC_REG_MAC_ADDR0 0xc
+/* [RW 16] Register Bit 0 refers to Bit 0 of the MAC address; Register Bit 1
+ * refers to Bit 1 of the MAC address etc. Bits 16 to 31 are reserved. */
+#define UMAC_REG_MAC_ADDR1 0x10
+/* [RW 14] Defines a 14-Bit maximum frame length used by the MAC receive
+ * logic to check frames. */
+#define UMAC_REG_MAXFR 0x14
+/* [RW 8] The event id for aggregated interrupt 0 */
+#define USDM_REG_AGG_INT_EVENT_0 0xc4038
+#define USDM_REG_AGG_INT_EVENT_1 0xc403c
+#define USDM_REG_AGG_INT_EVENT_2 0xc4040
+#define USDM_REG_AGG_INT_EVENT_4 0xc4048
+#define USDM_REG_AGG_INT_EVENT_5 0xc404c
+#define USDM_REG_AGG_INT_EVENT_6 0xc4050
+/* [RW 1] For each aggregated interrupt index whether the mode is normal (0)
+ or auto-mask-mode (1) */
+#define USDM_REG_AGG_INT_MODE_0 0xc41b8
+#define USDM_REG_AGG_INT_MODE_1 0xc41bc
+#define USDM_REG_AGG_INT_MODE_4 0xc41c8
+#define USDM_REG_AGG_INT_MODE_5 0xc41cc
+#define USDM_REG_AGG_INT_MODE_6 0xc41d0
+/* [RW 1] The T bit for aggregated interrupt 5 */
+#define USDM_REG_AGG_INT_T_5 0xc40cc
+#define USDM_REG_AGG_INT_T_6 0xc40d0
+/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
+#define USDM_REG_CFC_RSP_START_ADDR 0xc4008
+/* [RW 16] The maximum value of the completion counter #0 */
+#define USDM_REG_CMP_COUNTER_MAX0 0xc401c
+/* [RW 16] The maximum value of the completion counter #1 */
+#define USDM_REG_CMP_COUNTER_MAX1 0xc4020
+/* [RW 16] The maximum value of the completion counter #2 */
+#define USDM_REG_CMP_COUNTER_MAX2 0xc4024
+/* [RW 16] The maximum value of the completion counter #3 */
+#define USDM_REG_CMP_COUNTER_MAX3 0xc4028
+/* [RW 13] The start address in the internal RAM for the completion
+ counters. */
+#define USDM_REG_CMP_COUNTER_START_ADDR 0xc400c
+#define USDM_REG_ENABLE_IN1 0xc4238
+#define USDM_REG_ENABLE_IN2 0xc423c
+#define USDM_REG_ENABLE_OUT1 0xc4240
+#define USDM_REG_ENABLE_OUT2 0xc4244
+/* [RW 4] The initial number of messages that can be sent to the pxp control
+ interface without receiving any ACK. */
+#define USDM_REG_INIT_CREDIT_PXP_CTRL 0xc44c0
+/* [ST 32] The number of ACK after placement messages received */
+#define USDM_REG_NUM_OF_ACK_AFTER_PLACE 0xc4280
+/* [ST 32] The number of packet end messages received from the parser */
+#define USDM_REG_NUM_OF_PKT_END_MSG 0xc4278
+/* [ST 32] The number of requests received from the pxp async if */
+#define USDM_REG_NUM_OF_PXP_ASYNC_REQ 0xc427c
+/* [ST 32] The number of commands received in queue 0 */
+#define USDM_REG_NUM_OF_Q0_CMD 0xc4248
+/* [ST 32] The number of commands received in queue 10 */
+#define USDM_REG_NUM_OF_Q10_CMD 0xc4270
+/* [ST 32] The number of commands received in queue 11 */
+#define USDM_REG_NUM_OF_Q11_CMD 0xc4274
+/* [ST 32] The number of commands received in queue 1 */
+#define USDM_REG_NUM_OF_Q1_CMD 0xc424c
+/* [ST 32] The number of commands received in queue 2 */
+#define USDM_REG_NUM_OF_Q2_CMD 0xc4250
+/* [ST 32] The number of commands received in queue 3 */
+#define USDM_REG_NUM_OF_Q3_CMD 0xc4254
+/* [ST 32] The number of commands received in queue 4 */
+#define USDM_REG_NUM_OF_Q4_CMD 0xc4258
+/* [ST 32] The number of commands received in queue 5 */
+#define USDM_REG_NUM_OF_Q5_CMD 0xc425c
+/* [ST 32] The number of commands received in queue 6 */
+#define USDM_REG_NUM_OF_Q6_CMD 0xc4260
+/* [ST 32] The number of commands received in queue 7 */
+#define USDM_REG_NUM_OF_Q7_CMD 0xc4264
+/* [ST 32] The number of commands received in queue 8 */
+#define USDM_REG_NUM_OF_Q8_CMD 0xc4268
+/* [ST 32] The number of commands received in queue 9 */
+#define USDM_REG_NUM_OF_Q9_CMD 0xc426c
+/* [RW 13] The start address in the internal RAM for the packet end message */
+#define USDM_REG_PCK_END_MSG_START_ADDR 0xc4014
+/* [RW 13] The start address in the internal RAM for queue counters */
+#define USDM_REG_Q_COUNTER_START_ADDR 0xc4010
+/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
+#define USDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0xc4550
+/* [R 1] parser fifo empty in sdm_sync block */
+#define USDM_REG_SYNC_PARSER_EMPTY 0xc4558
+/* [R 1] parser serial fifo empty in sdm_sync block */
+#define USDM_REG_SYNC_SYNC_EMPTY 0xc4560
+/* [RW 32] Tick for timer counter. Applicable only when
+ ~usdm_registers_timer_tick_enable.timer_tick_enable =1 */
+#define USDM_REG_TIMER_TICK 0xc4000
+/* [RW 32] Interrupt mask register #0 read/write */
+#define USDM_REG_USDM_INT_MASK_0 0xc42a0
+#define USDM_REG_USDM_INT_MASK_1 0xc42b0
+/* [R 32] Interrupt register #0 read */
+#define USDM_REG_USDM_INT_STS_0 0xc4294
+#define USDM_REG_USDM_INT_STS_1 0xc42a4
+/* [RW 11] Parity mask register #0 read/write */
+#define USDM_REG_USDM_PRTY_MASK 0xc42c0
+/* [R 11] Parity register #0 read */
+#define USDM_REG_USDM_PRTY_STS 0xc42b4
+/* [RC 11] Parity register #0 read clear */
+#define USDM_REG_USDM_PRTY_STS_CLR 0xc42b8
+/* [RW 5] The number of time_slots in the arbitration cycle */
+#define USEM_REG_ARB_CYCLE_SIZE 0x300034
+/* [RW 3] The source that is associated with arbitration element 0. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2 */
+#define USEM_REG_ARB_ELEMENT0 0x300020
+/* [RW 3] The source that is associated with arbitration element 1. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~usem_registers_arb_element0.arb_element0 */
+#define USEM_REG_ARB_ELEMENT1 0x300024
+/* [RW 3] The source that is associated with arbitration element 2. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~usem_registers_arb_element0.arb_element0
+ and ~usem_registers_arb_element1.arb_element1 */
+#define USEM_REG_ARB_ELEMENT2 0x300028
+/* [RW 3] The source that is associated with arbitration element 3. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
+ not be equal to register ~usem_registers_arb_element0.arb_element0 and
+ ~usem_registers_arb_element1.arb_element1 and
+ ~usem_registers_arb_element2.arb_element2 */
+#define USEM_REG_ARB_ELEMENT3 0x30002c
+/* [RW 3] The source that is associated with arbitration element 4. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~usem_registers_arb_element0.arb_element0
+ and ~usem_registers_arb_element1.arb_element1 and
+ ~usem_registers_arb_element2.arb_element2 and
+ ~usem_registers_arb_element3.arb_element3 */
+#define USEM_REG_ARB_ELEMENT4 0x300030
+#define USEM_REG_ENABLE_IN 0x3000a4
+#define USEM_REG_ENABLE_OUT 0x3000a8
+/* [RW 32] This address space contains all registers and memories that are
+ placed in SEM_FAST block. The SEM_FAST registers are described in
+ appendix B. In order to access the sem_fast registers the base address
+ ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
+#define USEM_REG_FAST_MEMORY 0x320000
+/* [RW 1] Disables input messages from FIC0 May be updated during run_time
+ by the microcode */
+#define USEM_REG_FIC0_DISABLE 0x300224
+/* [RW 1] Disables input messages from FIC1 May be updated during run_time
+ by the microcode */
+#define USEM_REG_FIC1_DISABLE 0x300234
+/* [RW 15] Interrupt table Read and write access to it is not possible in
+ the middle of the work */
+#define USEM_REG_INT_TABLE 0x300400
+/* [ST 24] Statistics register. The number of messages that entered through
+ FIC0 */
+#define USEM_REG_MSG_NUM_FIC0 0x300000
+/* [ST 24] Statistics register. The number of messages that entered through
+ FIC1 */
+#define USEM_REG_MSG_NUM_FIC1 0x300004
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC0 */
+#define USEM_REG_MSG_NUM_FOC0 0x300008
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC1 */
+#define USEM_REG_MSG_NUM_FOC1 0x30000c
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC2 */
+#define USEM_REG_MSG_NUM_FOC2 0x300010
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC3 */
+#define USEM_REG_MSG_NUM_FOC3 0x300014
+/* [RW 1] Disables input messages from the passive buffer May be updated
+ during run_time by the microcode */
+#define USEM_REG_PAS_DISABLE 0x30024c
+/* [WB 128] Debug only. Passive buffer memory */
+#define USEM_REG_PASSIVE_BUFFER 0x302000
+/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
+#define USEM_REG_PRAM 0x340000
+/* [R 16] Valid sleeping threads indication have bit per thread */
+#define USEM_REG_SLEEP_THREADS_VALID 0x30026c
+/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
+#define USEM_REG_SLOW_EXT_STORE_EMPTY 0x3002a0
+/* [RW 16] List of free threads . There is a bit per thread. */
+#define USEM_REG_THREADS_LIST 0x3002e4
+/* [RW 3] The arbitration scheme of time_slot 0 */
+#define USEM_REG_TS_0_AS 0x300038
+/* [RW 3] The arbitration scheme of time_slot 10 */
+#define USEM_REG_TS_10_AS 0x300060
+/* [RW 3] The arbitration scheme of time_slot 11 */
+#define USEM_REG_TS_11_AS 0x300064
+/* [RW 3] The arbitration scheme of time_slot 12 */
+#define USEM_REG_TS_12_AS 0x300068
+/* [RW 3] The arbitration scheme of time_slot 13 */
+#define USEM_REG_TS_13_AS 0x30006c
+/* [RW 3] The arbitration scheme of time_slot 14 */
+#define USEM_REG_TS_14_AS 0x300070
+/* [RW 3] The arbitration scheme of time_slot 15 */
+#define USEM_REG_TS_15_AS 0x300074
+/* [RW 3] The arbitration scheme of time_slot 16 */
+#define USEM_REG_TS_16_AS 0x300078
+/* [RW 3] The arbitration scheme of time_slot 17 */
+#define USEM_REG_TS_17_AS 0x30007c
+/* [RW 3] The arbitration scheme of time_slot 18 */
+#define USEM_REG_TS_18_AS 0x300080
+/* [RW 3] The arbitration scheme of time_slot 1 */
+#define USEM_REG_TS_1_AS 0x30003c
+/* [RW 3] The arbitration scheme of time_slot 2 */
+#define USEM_REG_TS_2_AS 0x300040
+/* [RW 3] The arbitration scheme of time_slot 3 */
+#define USEM_REG_TS_3_AS 0x300044
+/* [RW 3] The arbitration scheme of time_slot 4 */
+#define USEM_REG_TS_4_AS 0x300048
+/* [RW 3] The arbitration scheme of time_slot 5 */
+#define USEM_REG_TS_5_AS 0x30004c
+/* [RW 3] The arbitration scheme of time_slot 6 */
+#define USEM_REG_TS_6_AS 0x300050
+/* [RW 3] The arbitration scheme of time_slot 7 */
+#define USEM_REG_TS_7_AS 0x300054
+/* [RW 3] The arbitration scheme of time_slot 8 */
+#define USEM_REG_TS_8_AS 0x300058
+/* [RW 3] The arbitration scheme of time_slot 9 */
+#define USEM_REG_TS_9_AS 0x30005c
+/* [RW 32] Interrupt mask register #0 read/write */
+#define USEM_REG_USEM_INT_MASK_0 0x300110
+#define USEM_REG_USEM_INT_MASK_1 0x300120
+/* [R 32] Interrupt register #0 read */
+#define USEM_REG_USEM_INT_STS_0 0x300104
+#define USEM_REG_USEM_INT_STS_1 0x300114
+/* [RW 32] Parity mask register #0 read/write */
+#define USEM_REG_USEM_PRTY_MASK_0 0x300130
+#define USEM_REG_USEM_PRTY_MASK_1 0x300140
+/* [R 32] Parity register #0 read */
+#define USEM_REG_USEM_PRTY_STS_0 0x300124
+#define USEM_REG_USEM_PRTY_STS_1 0x300134
+/* [RC 32] Parity register #0 read clear */
+#define USEM_REG_USEM_PRTY_STS_CLR_0 0x300128
+#define USEM_REG_USEM_PRTY_STS_CLR_1 0x300138
+/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
+ * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
+#define USEM_REG_VFPF_ERR_NUM 0x300380
+#define VFC_MEMORIES_RST_REG_CAM_RST (0x1<<0)
+#define VFC_MEMORIES_RST_REG_RAM_RST (0x1<<1)
+#define VFC_REG_MEMORIES_RST 0x1943c
+/* [RW 32] Indirect access to AG context with 32-bits granularity. The bits
+ * [12:8] of the address should be the offset within the accessed LCID
+ * context; the bits [7:0] are the accessed LCID.Example: to write to REG10
+ * LCID100. The RBC address should be 13'ha64. */
+#define XCM_REG_AG_CTX 0x28000
+/* [RW 2] The queue index for registration on Aux1 counter flag. */
+#define XCM_REG_AUX1_Q 0x20134
+/* [RW 2] Per each decision rule the queue index to register to. */
+#define XCM_REG_AUX_CNT_FLG_Q_19 0x201b0
+/* [R 5] Used to read the XX protection CAM occupancy counter. */
+#define XCM_REG_CAM_OCCUP 0x20244
+/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
+ disregarded; valid output is deasserted; all other signals are treated as
+ usual; if 1 - normal activity. */
+#define XCM_REG_CDU_AG_RD_IFEN 0x20044
+/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
+ are disregarded; all other signals are treated as usual; if 1 - normal
+ activity. */
+#define XCM_REG_CDU_AG_WR_IFEN 0x20040
+/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
+ disregarded; valid output is deasserted; all other signals are treated as
+ usual; if 1 - normal activity. */
+#define XCM_REG_CDU_SM_RD_IFEN 0x2004c
+/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
+ input is disregarded; all other signals are treated as usual; if 1 -
+ normal activity. */
+#define XCM_REG_CDU_SM_WR_IFEN 0x20048
+/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
+ the initial credit value; read returns the current value of the credit
+ counter. Must be initialized to 1 at start-up. */
+#define XCM_REG_CFC_INIT_CRD 0x20404
+/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_CP_WEIGHT 0x200dc
+/* [RW 1] Input csem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_CSEM_IFEN 0x20028
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+ the csem interface. */
+#define XCM_REG_CSEM_LENGTH_MIS 0x20228
+/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_CSEM_WEIGHT 0x200c4
+/* [RW 1] Input dorq Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_DORQ_IFEN 0x20030
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+ the dorq interface. */
+#define XCM_REG_DORQ_LENGTH_MIS 0x20230
+/* [RW 3] The weight of the input dorq in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_DORQ_WEIGHT 0x200cc
+/* [RW 8] The Event ID in case the ErrorFlg input message bit is set. */
+#define XCM_REG_ERR_EVNT_ID 0x200b0
+/* [RW 28] The CM erroneous header for QM and Timers formatting. */
+#define XCM_REG_ERR_XCM_HDR 0x200ac
+/* [RW 8] The Event ID for Timers expiration. */
+#define XCM_REG_EXPR_EVNT_ID 0x200b4
+/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 64 at start-up. */
+#define XCM_REG_FIC0_INIT_CRD 0x2040c
+/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 64 at start-up. */
+#define XCM_REG_FIC1_INIT_CRD 0x20410
+#define XCM_REG_GLB_DEL_ACK_MAX_CNT_0 0x20118
+#define XCM_REG_GLB_DEL_ACK_MAX_CNT_1 0x2011c
+#define XCM_REG_GLB_DEL_ACK_TMR_VAL_0 0x20108
+#define XCM_REG_GLB_DEL_ACK_TMR_VAL_1 0x2010c
+/* [RW 1] Arbitratiojn between Input Arbiter groups: 0 - fair Round-Robin; 1
+ - strict priority defined by ~xcm_registers_gr_ag_pr.gr_ag_pr;
+ ~xcm_registers_gr_ld0_pr.gr_ld0_pr and
+ ~xcm_registers_gr_ld1_pr.gr_ld1_pr. */
+#define XCM_REG_GR_ARB_TYPE 0x2020c
+/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
+ highest priority is 3. It is supposed that the Channel group is the
+ compliment of the other 3 groups. */
+#define XCM_REG_GR_LD0_PR 0x20214
+/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
+ highest priority is 3. It is supposed that the Channel group is the
+ compliment of the other 3 groups. */
+#define XCM_REG_GR_LD1_PR 0x20218
+/* [RW 1] Input nig0 Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_NIG0_IFEN 0x20038
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+ the nig0 interface. */
+#define XCM_REG_NIG0_LENGTH_MIS 0x20238
+/* [RW 3] The weight of the input nig0 in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_NIG0_WEIGHT 0x200d4
+/* [RW 1] Input nig1 Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_NIG1_IFEN 0x2003c
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+ the nig1 interface. */
+#define XCM_REG_NIG1_LENGTH_MIS 0x2023c
+/* [RW 5] The number of double REG-pairs; loaded from the STORM context and
+ sent to STORM; for a specific connection type. The double REG-pairs are
+ used in order to align to STORM context row size of 128 bits. The offset
+ of these data in the STORM context is always 0. Index _i stands for the
+ connection type (one of 16). */
+#define XCM_REG_N_SM_CTX_LD_0 0x20060
+#define XCM_REG_N_SM_CTX_LD_1 0x20064
+#define XCM_REG_N_SM_CTX_LD_2 0x20068
+#define XCM_REG_N_SM_CTX_LD_3 0x2006c
+#define XCM_REG_N_SM_CTX_LD_4 0x20070
+#define XCM_REG_N_SM_CTX_LD_5 0x20074
+/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define XCM_REG_PBF_IFEN 0x20034
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+ the pbf interface. */
+#define XCM_REG_PBF_LENGTH_MIS 0x20234
+/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_PBF_WEIGHT 0x200d0
+#define XCM_REG_PHYS_QNUM3_0 0x20100
+#define XCM_REG_PHYS_QNUM3_1 0x20104
+/* [RW 8] The Event ID for Timers formatting in case of stop done. */
+#define XCM_REG_STOP_EVNT_ID 0x200b8
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+ the STORM interface. */
+#define XCM_REG_STORM_LENGTH_MIS 0x2021c
+/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_STORM_WEIGHT 0x200bc
+/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_STORM_XCM_IFEN 0x20010
+/* [RW 4] Timers output initial credit. Max credit available - 15.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 4 at start-up. */
+#define XCM_REG_TM_INIT_CRD 0x2041c
+/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_TM_WEIGHT 0x200ec
+/* [RW 28] The CM header for Timers expiration command. */
+#define XCM_REG_TM_XCM_HDR 0x200a8
+/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_TM_XCM_IFEN 0x2001c
+/* [RW 1] Input tsem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_TSEM_IFEN 0x20024
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+ the tsem interface. */
+#define XCM_REG_TSEM_LENGTH_MIS 0x20224
+/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_TSEM_WEIGHT 0x200c0
+/* [RW 2] The queue index for registration on UNA greater NXT decision rule. */
+#define XCM_REG_UNA_GT_NXT_Q 0x20120
+/* [RW 1] Input usem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_USEM_IFEN 0x2002c
+/* [RC 1] Message length mismatch (relative to last indication) at the usem
+ interface. */
+#define XCM_REG_USEM_LENGTH_MIS 0x2022c
+/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_USEM_WEIGHT 0x200c8
+#define XCM_REG_WU_DA_CNT_CMD00 0x201d4
+#define XCM_REG_WU_DA_CNT_CMD01 0x201d8
+#define XCM_REG_WU_DA_CNT_CMD10 0x201dc
+#define XCM_REG_WU_DA_CNT_CMD11 0x201e0
+#define XCM_REG_WU_DA_CNT_UPD_VAL00 0x201e4
+#define XCM_REG_WU_DA_CNT_UPD_VAL01 0x201e8
+#define XCM_REG_WU_DA_CNT_UPD_VAL10 0x201ec
+#define XCM_REG_WU_DA_CNT_UPD_VAL11 0x201f0
+#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00 0x201c4
+#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01 0x201c8
+#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD10 0x201cc
+#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD11 0x201d0
+/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define XCM_REG_XCM_CFC_IFEN 0x20050
+/* [RW 14] Interrupt mask register #0 read/write */
+#define XCM_REG_XCM_INT_MASK 0x202b4
+/* [R 14] Interrupt register #0 read */
+#define XCM_REG_XCM_INT_STS 0x202a8
+/* [RW 30] Parity mask register #0 read/write */
+#define XCM_REG_XCM_PRTY_MASK 0x202c4
+/* [R 30] Parity register #0 read */
+#define XCM_REG_XCM_PRTY_STS 0x202b8
+/* [RC 30] Parity register #0 read clear */
+#define XCM_REG_XCM_PRTY_STS_CLR 0x202bc
+
+/* [RW 4] The size of AG context region 0 in REG-pairs. Designates the MS
+ REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
+ Is used to determine the number of the AG context REG-pairs written back;
+ when the Reg1WbFlg isn't set. */
+#define XCM_REG_XCM_REG0_SZ 0x200f4
+/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define XCM_REG_XCM_STORM0_IFEN 0x20004
+/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define XCM_REG_XCM_STORM1_IFEN 0x20008
+/* [RW 1] CM - Timers Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_XCM_TM_IFEN 0x20020
+/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define XCM_REG_XCM_XQM_IFEN 0x2000c
+/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */
+#define XCM_REG_XCM_XQM_USE_Q 0x200f0
+/* [RW 4] The value by which CFC updates the activity counter at QM bypass. */
+#define XCM_REG_XQM_BYP_ACT_UPD 0x200fc
+/* [RW 6] QM output initial credit. Max credit available - 32.Write writes
+ the initial credit value; read returns the current value of the credit
+ counter. Must be initialized to 32 at start-up. */
+#define XCM_REG_XQM_INIT_CRD 0x20420
+/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_XQM_P_WEIGHT 0x200e4
+/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_XQM_S_WEIGHT 0x200e8
+/* [RW 28] The CM header value for QM request (primary). */
+#define XCM_REG_XQM_XCM_HDR_P 0x200a0
+/* [RW 28] The CM header value for QM request (secondary). */
+#define XCM_REG_XQM_XCM_HDR_S 0x200a4
+/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define XCM_REG_XQM_XCM_IFEN 0x20014
+/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define XCM_REG_XSDM_IFEN 0x20018
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+ the SDM interface. */
+#define XCM_REG_XSDM_LENGTH_MIS 0x20220
+/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_XSDM_WEIGHT 0x200e0
+/* [RW 17] Indirect access to the descriptor table of the XX protection
+ mechanism. The fields are: [5:0] - message length; 11:6] - message
+ pointer; 16:12] - next pointer. */
+#define XCM_REG_XX_DESCR_TABLE 0x20480
+#define XCM_REG_XX_DESCR_TABLE_SIZE 32
+/* [R 6] Used to read the XX protection Free counter. */
+#define XCM_REG_XX_FREE 0x20240
+/* [RW 6] Initial value for the credit counter; responsible for fulfilling
+ of the Input Stage XX protection buffer by the XX protection pending
+ messages. Max credit available - 3.Write writes the initial credit value;
+ read returns the current value of the credit counter. Must be initialized
+ to 2 at start-up. */
+#define XCM_REG_XX_INIT_CRD 0x20424
+/* [RW 6] The maximum number of pending messages; which may be stored in XX
+ protection. ~xcm_registers_xx_free.xx_free read on read. */
+#define XCM_REG_XX_MSG_NUM 0x20428
+/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
+#define XCM_REG_XX_OVFL_EVNT_ID 0x20058
+#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0)
+#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1)
- #define HW_LOCK_RESOURCE_UNDI 5
++#define XMAC_CTRL_REG_LINE_LOCAL_LPBK (0x1<<2)
+#define XMAC_CTRL_REG_RX_EN (0x1<<1)
+#define XMAC_CTRL_REG_SOFT_RESET (0x1<<6)
+#define XMAC_CTRL_REG_TX_EN (0x1<<0)
+#define XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN (0x1<<18)
+#define XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN (0x1<<17)
+#define XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN (0x1<<0)
+#define XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN (0x1<<3)
+#define XMAC_PFC_CTRL_HI_REG_RX_PFC_EN (0x1<<4)
+#define XMAC_PFC_CTRL_HI_REG_TX_PFC_EN (0x1<<5)
+#define XMAC_REG_CLEAR_RX_LSS_STATUS 0x60
+#define XMAC_REG_CTRL 0
+/* [RW 16] Upper 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC
+ * packets transmitted by the MAC */
+#define XMAC_REG_CTRL_SA_HI 0x2c
+/* [RW 32] Lower 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC
+ * packets transmitted by the MAC */
+#define XMAC_REG_CTRL_SA_LO 0x28
+#define XMAC_REG_PAUSE_CTRL 0x68
+#define XMAC_REG_PFC_CTRL 0x70
+#define XMAC_REG_PFC_CTRL_HI 0x74
+#define XMAC_REG_RX_LSS_STATUS 0x58
+/* [RW 14] Maximum packet size in receive direction; exclusive of preamble &
+ * CRC in strip mode */
+#define XMAC_REG_RX_MAX_SIZE 0x40
+#define XMAC_REG_TX_CTRL 0x20
+/* [RW 16] Indirect access to the XX table of the XX protection mechanism.
+ The fields are:[4:0] - tail pointer; 9:5] - Link List size; 14:10] -
+ header pointer. */
+#define XCM_REG_XX_TABLE 0x20500
+/* [RW 8] The event id for aggregated interrupt 0 */
+#define XSDM_REG_AGG_INT_EVENT_0 0x166038
+#define XSDM_REG_AGG_INT_EVENT_1 0x16603c
+#define XSDM_REG_AGG_INT_EVENT_10 0x166060
+#define XSDM_REG_AGG_INT_EVENT_11 0x166064
+#define XSDM_REG_AGG_INT_EVENT_12 0x166068
+#define XSDM_REG_AGG_INT_EVENT_13 0x16606c
+#define XSDM_REG_AGG_INT_EVENT_14 0x166070
+#define XSDM_REG_AGG_INT_EVENT_2 0x166040
+#define XSDM_REG_AGG_INT_EVENT_3 0x166044
+#define XSDM_REG_AGG_INT_EVENT_4 0x166048
+#define XSDM_REG_AGG_INT_EVENT_5 0x16604c
+#define XSDM_REG_AGG_INT_EVENT_6 0x166050
+#define XSDM_REG_AGG_INT_EVENT_7 0x166054
+#define XSDM_REG_AGG_INT_EVENT_8 0x166058
+#define XSDM_REG_AGG_INT_EVENT_9 0x16605c
+/* [RW 1] For each aggregated interrupt index whether the mode is normal (0)
+ or auto-mask-mode (1) */
+#define XSDM_REG_AGG_INT_MODE_0 0x1661b8
+#define XSDM_REG_AGG_INT_MODE_1 0x1661bc
+/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
+#define XSDM_REG_CFC_RSP_START_ADDR 0x166008
+/* [RW 16] The maximum value of the completion counter #0 */
+#define XSDM_REG_CMP_COUNTER_MAX0 0x16601c
+/* [RW 16] The maximum value of the completion counter #1 */
+#define XSDM_REG_CMP_COUNTER_MAX1 0x166020
+/* [RW 16] The maximum value of the completion counter #2 */
+#define XSDM_REG_CMP_COUNTER_MAX2 0x166024
+/* [RW 16] The maximum value of the completion counter #3 */
+#define XSDM_REG_CMP_COUNTER_MAX3 0x166028
+/* [RW 13] The start address in the internal RAM for the completion
+ counters. */
+#define XSDM_REG_CMP_COUNTER_START_ADDR 0x16600c
+#define XSDM_REG_ENABLE_IN1 0x166238
+#define XSDM_REG_ENABLE_IN2 0x16623c
+#define XSDM_REG_ENABLE_OUT1 0x166240
+#define XSDM_REG_ENABLE_OUT2 0x166244
+/* [RW 4] The initial number of messages that can be sent to the pxp control
+ interface without receiving any ACK. */
+#define XSDM_REG_INIT_CREDIT_PXP_CTRL 0x1664bc
+/* [ST 32] The number of ACK after placement messages received */
+#define XSDM_REG_NUM_OF_ACK_AFTER_PLACE 0x16627c
+/* [ST 32] The number of packet end messages received from the parser */
+#define XSDM_REG_NUM_OF_PKT_END_MSG 0x166274
+/* [ST 32] The number of requests received from the pxp async if */
+#define XSDM_REG_NUM_OF_PXP_ASYNC_REQ 0x166278
+/* [ST 32] The number of commands received in queue 0 */
+#define XSDM_REG_NUM_OF_Q0_CMD 0x166248
+/* [ST 32] The number of commands received in queue 10 */
+#define XSDM_REG_NUM_OF_Q10_CMD 0x16626c
+/* [ST 32] The number of commands received in queue 11 */
+#define XSDM_REG_NUM_OF_Q11_CMD 0x166270
+/* [ST 32] The number of commands received in queue 1 */
+#define XSDM_REG_NUM_OF_Q1_CMD 0x16624c
+/* [ST 32] The number of commands received in queue 3 */
+#define XSDM_REG_NUM_OF_Q3_CMD 0x166250
+/* [ST 32] The number of commands received in queue 4 */
+#define XSDM_REG_NUM_OF_Q4_CMD 0x166254
+/* [ST 32] The number of commands received in queue 5 */
+#define XSDM_REG_NUM_OF_Q5_CMD 0x166258
+/* [ST 32] The number of commands received in queue 6 */
+#define XSDM_REG_NUM_OF_Q6_CMD 0x16625c
+/* [ST 32] The number of commands received in queue 7 */
+#define XSDM_REG_NUM_OF_Q7_CMD 0x166260
+/* [ST 32] The number of commands received in queue 8 */
+#define XSDM_REG_NUM_OF_Q8_CMD 0x166264
+/* [ST 32] The number of commands received in queue 9 */
+#define XSDM_REG_NUM_OF_Q9_CMD 0x166268
+/* [RW 13] The start address in the internal RAM for queue counters */
+#define XSDM_REG_Q_COUNTER_START_ADDR 0x166010
+/* [W 17] Generate an operation after completion; bit-16 is
+ * AggVectIdx_valid; bits 15:8 are AggVectIdx; bits 7:5 are the TRIG and
+ * bits 4:0 are the T124Param[4:0] */
+#define XSDM_REG_OPERATION_GEN 0x1664c4
+/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
+#define XSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0x166548
+/* [R 1] parser fifo empty in sdm_sync block */
+#define XSDM_REG_SYNC_PARSER_EMPTY 0x166550
+/* [R 1] parser serial fifo empty in sdm_sync block */
+#define XSDM_REG_SYNC_SYNC_EMPTY 0x166558
+/* [RW 32] Tick for timer counter. Applicable only when
+ ~xsdm_registers_timer_tick_enable.timer_tick_enable =1 */
+#define XSDM_REG_TIMER_TICK 0x166000
+/* [RW 32] Interrupt mask register #0 read/write */
+#define XSDM_REG_XSDM_INT_MASK_0 0x16629c
+#define XSDM_REG_XSDM_INT_MASK_1 0x1662ac
+/* [R 32] Interrupt register #0 read */
+#define XSDM_REG_XSDM_INT_STS_0 0x166290
+#define XSDM_REG_XSDM_INT_STS_1 0x1662a0
+/* [RW 11] Parity mask register #0 read/write */
+#define XSDM_REG_XSDM_PRTY_MASK 0x1662bc
+/* [R 11] Parity register #0 read */
+#define XSDM_REG_XSDM_PRTY_STS 0x1662b0
+/* [RC 11] Parity register #0 read clear */
+#define XSDM_REG_XSDM_PRTY_STS_CLR 0x1662b4
+/* [RW 5] The number of time_slots in the arbitration cycle */
+#define XSEM_REG_ARB_CYCLE_SIZE 0x280034
+/* [RW 3] The source that is associated with arbitration element 0. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2 */
+#define XSEM_REG_ARB_ELEMENT0 0x280020
+/* [RW 3] The source that is associated with arbitration element 1. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~xsem_registers_arb_element0.arb_element0 */
+#define XSEM_REG_ARB_ELEMENT1 0x280024
+/* [RW 3] The source that is associated with arbitration element 2. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~xsem_registers_arb_element0.arb_element0
+ and ~xsem_registers_arb_element1.arb_element1 */
+#define XSEM_REG_ARB_ELEMENT2 0x280028
+/* [RW 3] The source that is associated with arbitration element 3. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
+ not be equal to register ~xsem_registers_arb_element0.arb_element0 and
+ ~xsem_registers_arb_element1.arb_element1 and
+ ~xsem_registers_arb_element2.arb_element2 */
+#define XSEM_REG_ARB_ELEMENT3 0x28002c
+/* [RW 3] The source that is associated with arbitration element 4. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~xsem_registers_arb_element0.arb_element0
+ and ~xsem_registers_arb_element1.arb_element1 and
+ ~xsem_registers_arb_element2.arb_element2 and
+ ~xsem_registers_arb_element3.arb_element3 */
+#define XSEM_REG_ARB_ELEMENT4 0x280030
+#define XSEM_REG_ENABLE_IN 0x2800a4
+#define XSEM_REG_ENABLE_OUT 0x2800a8
+/* [RW 32] This address space contains all registers and memories that are
+ placed in SEM_FAST block. The SEM_FAST registers are described in
+ appendix B. In order to access the sem_fast registers the base address
+ ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
+#define XSEM_REG_FAST_MEMORY 0x2a0000
+/* [RW 1] Disables input messages from FIC0 May be updated during run_time
+ by the microcode */
+#define XSEM_REG_FIC0_DISABLE 0x280224
+/* [RW 1] Disables input messages from FIC1 May be updated during run_time
+ by the microcode */
+#define XSEM_REG_FIC1_DISABLE 0x280234
+/* [RW 15] Interrupt table Read and write access to it is not possible in
+ the middle of the work */
+#define XSEM_REG_INT_TABLE 0x280400
+/* [ST 24] Statistics register. The number of messages that entered through
+ FIC0 */
+#define XSEM_REG_MSG_NUM_FIC0 0x280000
+/* [ST 24] Statistics register. The number of messages that entered through
+ FIC1 */
+#define XSEM_REG_MSG_NUM_FIC1 0x280004
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC0 */
+#define XSEM_REG_MSG_NUM_FOC0 0x280008
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC1 */
+#define XSEM_REG_MSG_NUM_FOC1 0x28000c
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC2 */
+#define XSEM_REG_MSG_NUM_FOC2 0x280010
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC3 */
+#define XSEM_REG_MSG_NUM_FOC3 0x280014
+/* [RW 1] Disables input messages from the passive buffer May be updated
+ during run_time by the microcode */
+#define XSEM_REG_PAS_DISABLE 0x28024c
+/* [WB 128] Debug only. Passive buffer memory */
+#define XSEM_REG_PASSIVE_BUFFER 0x282000
+/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
+#define XSEM_REG_PRAM 0x2c0000
+/* [R 16] Valid sleeping threads indication have bit per thread */
+#define XSEM_REG_SLEEP_THREADS_VALID 0x28026c
+/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
+#define XSEM_REG_SLOW_EXT_STORE_EMPTY 0x2802a0
+/* [RW 16] List of free threads . There is a bit per thread. */
+#define XSEM_REG_THREADS_LIST 0x2802e4
+/* [RW 3] The arbitration scheme of time_slot 0 */
+#define XSEM_REG_TS_0_AS 0x280038
+/* [RW 3] The arbitration scheme of time_slot 10 */
+#define XSEM_REG_TS_10_AS 0x280060
+/* [RW 3] The arbitration scheme of time_slot 11 */
+#define XSEM_REG_TS_11_AS 0x280064
+/* [RW 3] The arbitration scheme of time_slot 12 */
+#define XSEM_REG_TS_12_AS 0x280068
+/* [RW 3] The arbitration scheme of time_slot 13 */
+#define XSEM_REG_TS_13_AS 0x28006c
+/* [RW 3] The arbitration scheme of time_slot 14 */
+#define XSEM_REG_TS_14_AS 0x280070
+/* [RW 3] The arbitration scheme of time_slot 15 */
+#define XSEM_REG_TS_15_AS 0x280074
+/* [RW 3] The arbitration scheme of time_slot 16 */
+#define XSEM_REG_TS_16_AS 0x280078
+/* [RW 3] The arbitration scheme of time_slot 17 */
+#define XSEM_REG_TS_17_AS 0x28007c
+/* [RW 3] The arbitration scheme of time_slot 18 */
+#define XSEM_REG_TS_18_AS 0x280080
+/* [RW 3] The arbitration scheme of time_slot 1 */
+#define XSEM_REG_TS_1_AS 0x28003c
+/* [RW 3] The arbitration scheme of time_slot 2 */
+#define XSEM_REG_TS_2_AS 0x280040
+/* [RW 3] The arbitration scheme of time_slot 3 */
+#define XSEM_REG_TS_3_AS 0x280044
+/* [RW 3] The arbitration scheme of time_slot 4 */
+#define XSEM_REG_TS_4_AS 0x280048
+/* [RW 3] The arbitration scheme of time_slot 5 */
+#define XSEM_REG_TS_5_AS 0x28004c
+/* [RW 3] The arbitration scheme of time_slot 6 */
+#define XSEM_REG_TS_6_AS 0x280050
+/* [RW 3] The arbitration scheme of time_slot 7 */
+#define XSEM_REG_TS_7_AS 0x280054
+/* [RW 3] The arbitration scheme of time_slot 8 */
+#define XSEM_REG_TS_8_AS 0x280058
+/* [RW 3] The arbitration scheme of time_slot 9 */
+#define XSEM_REG_TS_9_AS 0x28005c
+/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
+ * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
+#define XSEM_REG_VFPF_ERR_NUM 0x280380
+/* [RW 32] Interrupt mask register #0 read/write */
+#define XSEM_REG_XSEM_INT_MASK_0 0x280110
+#define XSEM_REG_XSEM_INT_MASK_1 0x280120
+/* [R 32] Interrupt register #0 read */
+#define XSEM_REG_XSEM_INT_STS_0 0x280104
+#define XSEM_REG_XSEM_INT_STS_1 0x280114
+/* [RW 32] Parity mask register #0 read/write */
+#define XSEM_REG_XSEM_PRTY_MASK_0 0x280130
+#define XSEM_REG_XSEM_PRTY_MASK_1 0x280140
+/* [R 32] Parity register #0 read */
+#define XSEM_REG_XSEM_PRTY_STS_0 0x280124
+#define XSEM_REG_XSEM_PRTY_STS_1 0x280134
+/* [RC 32] Parity register #0 read clear */
+#define XSEM_REG_XSEM_PRTY_STS_CLR_0 0x280128
+#define XSEM_REG_XSEM_PRTY_STS_CLR_1 0x280138
+#define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0)
+#define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1)
+#define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0)
+#define MCPR_NVM_CFG4_FLASH_SIZE (0x7L<<0)
+#define MCPR_NVM_COMMAND_DOIT (1L<<4)
+#define MCPR_NVM_COMMAND_DONE (1L<<3)
+#define MCPR_NVM_COMMAND_FIRST (1L<<7)
+#define MCPR_NVM_COMMAND_LAST (1L<<8)
+#define MCPR_NVM_COMMAND_WR (1L<<5)
+#define MCPR_NVM_SW_ARB_ARB_ARB1 (1L<<9)
+#define MCPR_NVM_SW_ARB_ARB_REQ_CLR1 (1L<<5)
+#define MCPR_NVM_SW_ARB_ARB_REQ_SET1 (1L<<1)
+#define BIGMAC_REGISTER_BMAC_CONTROL (0x00<<3)
+#define BIGMAC_REGISTER_BMAC_XGXS_CONTROL (0x01<<3)
+#define BIGMAC_REGISTER_CNT_MAX_SIZE (0x05<<3)
+#define BIGMAC_REGISTER_RX_CONTROL (0x21<<3)
+#define BIGMAC_REGISTER_RX_LLFC_MSG_FLDS (0x46<<3)
+#define BIGMAC_REGISTER_RX_LSS_STATUS (0x43<<3)
+#define BIGMAC_REGISTER_RX_MAX_SIZE (0x23<<3)
+#define BIGMAC_REGISTER_RX_STAT_GR64 (0x26<<3)
+#define BIGMAC_REGISTER_RX_STAT_GRIPJ (0x42<<3)
+#define BIGMAC_REGISTER_TX_CONTROL (0x07<<3)
+#define BIGMAC_REGISTER_TX_MAX_SIZE (0x09<<3)
+#define BIGMAC_REGISTER_TX_PAUSE_THRESHOLD (0x0A<<3)
+#define BIGMAC_REGISTER_TX_SOURCE_ADDR (0x08<<3)
+#define BIGMAC_REGISTER_TX_STAT_GTBYT (0x20<<3)
+#define BIGMAC_REGISTER_TX_STAT_GTPKT (0x0C<<3)
+#define BIGMAC2_REGISTER_BMAC_CONTROL (0x00<<3)
+#define BIGMAC2_REGISTER_BMAC_XGXS_CONTROL (0x01<<3)
+#define BIGMAC2_REGISTER_CNT_MAX_SIZE (0x05<<3)
+#define BIGMAC2_REGISTER_PFC_CONTROL (0x06<<3)
+#define BIGMAC2_REGISTER_RX_CONTROL (0x3A<<3)
+#define BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS (0x62<<3)
+#define BIGMAC2_REGISTER_RX_LSS_STAT (0x3E<<3)
+#define BIGMAC2_REGISTER_RX_MAX_SIZE (0x3C<<3)
+#define BIGMAC2_REGISTER_RX_STAT_GR64 (0x40<<3)
+#define BIGMAC2_REGISTER_RX_STAT_GRIPJ (0x5f<<3)
+#define BIGMAC2_REGISTER_RX_STAT_GRPP (0x51<<3)
+#define BIGMAC2_REGISTER_TX_CONTROL (0x1C<<3)
+#define BIGMAC2_REGISTER_TX_MAX_SIZE (0x1E<<3)
+#define BIGMAC2_REGISTER_TX_PAUSE_CONTROL (0x20<<3)
+#define BIGMAC2_REGISTER_TX_SOURCE_ADDR (0x1D<<3)
+#define BIGMAC2_REGISTER_TX_STAT_GTBYT (0x39<<3)
+#define BIGMAC2_REGISTER_TX_STAT_GTPOK (0x22<<3)
+#define BIGMAC2_REGISTER_TX_STAT_GTPP (0x24<<3)
+#define EMAC_LED_1000MB_OVERRIDE (1L<<1)
+#define EMAC_LED_100MB_OVERRIDE (1L<<2)
+#define EMAC_LED_10MB_OVERRIDE (1L<<3)
+#define EMAC_LED_2500MB_OVERRIDE (1L<<12)
+#define EMAC_LED_OVERRIDE (1L<<0)
+#define EMAC_LED_TRAFFIC (1L<<6)
+#define EMAC_MDIO_COMM_COMMAND_ADDRESS (0L<<26)
+#define EMAC_MDIO_COMM_COMMAND_READ_22 (2L<<26)
+#define EMAC_MDIO_COMM_COMMAND_READ_45 (3L<<26)
+#define EMAC_MDIO_COMM_COMMAND_WRITE_22 (1L<<26)
+#define EMAC_MDIO_COMM_COMMAND_WRITE_45 (1L<<26)
+#define EMAC_MDIO_COMM_DATA (0xffffL<<0)
+#define EMAC_MDIO_COMM_START_BUSY (1L<<29)
+#define EMAC_MDIO_MODE_AUTO_POLL (1L<<4)
+#define EMAC_MDIO_MODE_CLAUSE_45 (1L<<31)
+#define EMAC_MDIO_MODE_CLOCK_CNT (0x3ffL<<16)
+#define EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT 16
+#define EMAC_MDIO_STATUS_10MB (1L<<1)
+#define EMAC_MODE_25G_MODE (1L<<5)
+#define EMAC_MODE_HALF_DUPLEX (1L<<1)
+#define EMAC_MODE_PORT_GMII (2L<<2)
+#define EMAC_MODE_PORT_MII (1L<<2)
+#define EMAC_MODE_PORT_MII_10M (3L<<2)
+#define EMAC_MODE_RESET (1L<<0)
+#define EMAC_REG_EMAC_LED 0xc
+#define EMAC_REG_EMAC_MAC_MATCH 0x10
+#define EMAC_REG_EMAC_MDIO_COMM 0xac
+#define EMAC_REG_EMAC_MDIO_MODE 0xb4
+#define EMAC_REG_EMAC_MDIO_STATUS 0xb0
+#define EMAC_REG_EMAC_MODE 0x0
+#define EMAC_REG_EMAC_RX_MODE 0xc8
+#define EMAC_REG_EMAC_RX_MTU_SIZE 0x9c
+#define EMAC_REG_EMAC_RX_STAT_AC 0x180
+#define EMAC_REG_EMAC_RX_STAT_AC_28 0x1f4
+#define EMAC_REG_EMAC_RX_STAT_AC_COUNT 23
+#define EMAC_REG_EMAC_TX_MODE 0xbc
+#define EMAC_REG_EMAC_TX_STAT_AC 0x280
+#define EMAC_REG_EMAC_TX_STAT_AC_COUNT 22
+#define EMAC_REG_RX_PFC_MODE 0x320
+#define EMAC_REG_RX_PFC_MODE_PRIORITIES (1L<<2)
+#define EMAC_REG_RX_PFC_MODE_RX_EN (1L<<1)
+#define EMAC_REG_RX_PFC_MODE_TX_EN (1L<<0)
+#define EMAC_REG_RX_PFC_PARAM 0x324
+#define EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT 0
+#define EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT 16
+#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD 0x328
+#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT (0xffff<<0)
+#define EMAC_REG_RX_PFC_STATS_XOFF_SENT 0x330
+#define EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT (0xffff<<0)
+#define EMAC_REG_RX_PFC_STATS_XON_RCVD 0x32c
+#define EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT (0xffff<<0)
+#define EMAC_REG_RX_PFC_STATS_XON_SENT 0x334
+#define EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT (0xffff<<0)
+#define EMAC_RX_MODE_FLOW_EN (1L<<2)
+#define EMAC_RX_MODE_KEEP_MAC_CONTROL (1L<<3)
+#define EMAC_RX_MODE_KEEP_VLAN_TAG (1L<<10)
+#define EMAC_RX_MODE_PROMISCUOUS (1L<<8)
+#define EMAC_RX_MODE_RESET (1L<<0)
+#define EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31)
+#define EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3)
+#define EMAC_TX_MODE_FLOW_EN (1L<<4)
+#define EMAC_TX_MODE_RESET (1L<<0)
+#define MISC_REGISTERS_GPIO_0 0
+#define MISC_REGISTERS_GPIO_1 1
+#define MISC_REGISTERS_GPIO_2 2
+#define MISC_REGISTERS_GPIO_3 3
+#define MISC_REGISTERS_GPIO_CLR_POS 16
+#define MISC_REGISTERS_GPIO_FLOAT (0xffL<<24)
+#define MISC_REGISTERS_GPIO_FLOAT_POS 24
+#define MISC_REGISTERS_GPIO_HIGH 1
+#define MISC_REGISTERS_GPIO_INPUT_HI_Z 2
+#define MISC_REGISTERS_GPIO_INT_CLR_POS 24
+#define MISC_REGISTERS_GPIO_INT_OUTPUT_CLR 0
+#define MISC_REGISTERS_GPIO_INT_OUTPUT_SET 1
+#define MISC_REGISTERS_GPIO_INT_SET_POS 16
+#define MISC_REGISTERS_GPIO_LOW 0
+#define MISC_REGISTERS_GPIO_OUTPUT_HIGH 1
+#define MISC_REGISTERS_GPIO_OUTPUT_LOW 0
+#define MISC_REGISTERS_GPIO_PORT_SHIFT 4
+#define MISC_REGISTERS_GPIO_SET_POS 8
+#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588
+#define MISC_REGISTERS_RESET_REG_1_RST_HC (0x1<<29)
+#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7)
+#define MISC_REGISTERS_RESET_REG_1_RST_PXP (0x1<<26)
+#define MISC_REGISTERS_RESET_REG_1_RST_PXPV (0x1<<27)
+#define MISC_REGISTERS_RESET_REG_1_SET 0x584
+#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598
+#define MISC_REGISTERS_RESET_REG_2_MSTAT0 (0x1<<24)
+#define MISC_REGISTERS_RESET_REG_2_MSTAT1 (0x1<<25)
+#define MISC_REGISTERS_RESET_REG_2_PGLC (0x1<<19)
+#define MISC_REGISTERS_RESET_REG_2_RST_ATC (0x1<<17)
+#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0)
+#define MISC_REGISTERS_RESET_REG_2_RST_BMAC1 (0x1<<1)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0 (0x1<<2)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE (0x1<<14)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1 (0x1<<3)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE (0x1<<15)
+#define MISC_REGISTERS_RESET_REG_2_RST_GRC (0x1<<4)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B (0x1<<6)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE (0x1<<8)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU (0x1<<7)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE (0x1<<5)
+#define MISC_REGISTERS_RESET_REG_2_RST_MDIO (0x1<<13)
+#define MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE (0x1<<11)
+#define MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO (0x1<<13)
+#define MISC_REGISTERS_RESET_REG_2_RST_RBCN (0x1<<9)
+#define MISC_REGISTERS_RESET_REG_2_SET 0x594
+#define MISC_REGISTERS_RESET_REG_2_UMAC0 (0x1<<20)
+#define MISC_REGISTERS_RESET_REG_2_UMAC1 (0x1<<21)
+#define MISC_REGISTERS_RESET_REG_2_XMAC (0x1<<22)
+#define MISC_REGISTERS_RESET_REG_2_XMAC_SOFT (0x1<<23)
+#define MISC_REGISTERS_RESET_REG_3_CLEAR 0x5a8
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ (0x1<<1)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN (0x1<<2)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD (0x1<<3)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW (0x1<<0)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ (0x1<<5)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN (0x1<<6)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD (0x1<<7)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW (0x1<<4)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB (0x1<<8)
+#define MISC_REGISTERS_RESET_REG_3_SET 0x5a4
+#define MISC_REGISTERS_SPIO_4 4
+#define MISC_REGISTERS_SPIO_5 5
+#define MISC_REGISTERS_SPIO_7 7
+#define MISC_REGISTERS_SPIO_CLR_POS 16
+#define MISC_REGISTERS_SPIO_FLOAT (0xffL<<24)
+#define MISC_REGISTERS_SPIO_FLOAT_POS 24
+#define MISC_REGISTERS_SPIO_INPUT_HI_Z 2
+#define MISC_REGISTERS_SPIO_INT_OLD_SET_POS 16
+#define MISC_REGISTERS_SPIO_OUTPUT_HIGH 1
+#define MISC_REGISTERS_SPIO_OUTPUT_LOW 0
+#define MISC_REGISTERS_SPIO_SET_POS 8
+#define HW_LOCK_DRV_FLAGS 10
+#define HW_LOCK_MAX_RESOURCE_VALUE 31
+#define HW_LOCK_RESOURCE_GPIO 1
+#define HW_LOCK_RESOURCE_MDIO 0
+#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3
+#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8
+#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9
+#define HW_LOCK_RESOURCE_SPIO 2
++#define HW_LOCK_RESOURCE_RESET 5
+#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4)
+#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18)
+#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (0x1<<31)
+#define AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR (0x1<<30)
+#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (0x1<<9)
+#define AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR (0x1<<8)
+#define AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT (0x1<<7)
+#define AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR (0x1<<6)
+#define AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT (0x1<<29)
+#define AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR (0x1<<28)
+#define AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT (0x1<<1)
+#define AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR (0x1<<0)
+#define AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR (0x1<<18)
+#define AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT (0x1<<11)
+#define AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR (0x1<<10)
+#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT (0x1<<13)
+#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR (0x1<<12)
+#define AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (0x1<<12)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (0x1<<28)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (0x1<<31)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (0x1<<29)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (0x1<<30)
+#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (0x1<<15)
+#define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR (0x1<<14)
+#define AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR (0x1<<14)
+#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (0x1<<20)
+#define AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT (0x1<<31)
+#define AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR (0x1<<30)
+#define AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR (0x1<<0)
+#define AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR (0x1<<3)
+#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR (0x1<<4)
+#define AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT (0x1<<3)
+#define AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT (0x1<<3)
+#define AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR (0x1<<22)
+#define AEU_INPUTS_ATTN_BITS_SPIO5 (0x1<<15)
+#define AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT (0x1<<27)
+#define AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR (0x1<<26)
+#define AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR (0x1<<4)
+#define AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT (0x1<<25)
+#define AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR (0x1<<24)
+#define AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT (0x1<<29)
+#define AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR (0x1<<28)
+#define AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT (0x1<<23)
+#define AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR (0x1<<22)
+#define AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT (0x1<<27)
+#define AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR (0x1<<26)
+#define AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT (0x1<<21)
+#define AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR (0x1<<20)
+#define AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT (0x1<<25)
+#define AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR (0x1<<24)
+#define AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR (0x1<<16)
+#define AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT (0x1<<9)
+#define AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR (0x1<<8)
+#define AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT (0x1<<7)
+#define AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR (0x1<<6)
+#define AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT (0x1<<11)
+#define AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR (0x1<<10)
+
+#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 (0x1<<9)
+
+#define RESERVED_GENERAL_ATTENTION_BIT_0 0
+
+#define EVEREST_GEN_ATTN_IN_USE_MASK 0x7ffe0
+#define EVEREST_LATCHED_ATTN_IN_USE_MASK 0xffe00000
+
+#define RESERVED_GENERAL_ATTENTION_BIT_6 6
+#define RESERVED_GENERAL_ATTENTION_BIT_7 7
+#define RESERVED_GENERAL_ATTENTION_BIT_8 8
+#define RESERVED_GENERAL_ATTENTION_BIT_9 9
+#define RESERVED_GENERAL_ATTENTION_BIT_10 10
+#define RESERVED_GENERAL_ATTENTION_BIT_11 11
+#define RESERVED_GENERAL_ATTENTION_BIT_12 12
+#define RESERVED_GENERAL_ATTENTION_BIT_13 13
+#define RESERVED_GENERAL_ATTENTION_BIT_14 14
+#define RESERVED_GENERAL_ATTENTION_BIT_15 15
+#define RESERVED_GENERAL_ATTENTION_BIT_16 16
+#define RESERVED_GENERAL_ATTENTION_BIT_17 17
+#define RESERVED_GENERAL_ATTENTION_BIT_18 18
+#define RESERVED_GENERAL_ATTENTION_BIT_19 19
+#define RESERVED_GENERAL_ATTENTION_BIT_20 20
+#define RESERVED_GENERAL_ATTENTION_BIT_21 21
+
+/* storm asserts attention bits */
+#define TSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_7
+#define USTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_8
+#define CSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_9
+#define XSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_10
+
+/* mcp error attention bit */
+#define MCP_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_11
+
+/*E1H NIG status sync attention mapped to group 4-7*/
+#define LINK_SYNC_ATTENTION_BIT_FUNC_0 RESERVED_GENERAL_ATTENTION_BIT_12
+#define LINK_SYNC_ATTENTION_BIT_FUNC_1 RESERVED_GENERAL_ATTENTION_BIT_13
+#define LINK_SYNC_ATTENTION_BIT_FUNC_2 RESERVED_GENERAL_ATTENTION_BIT_14
+#define LINK_SYNC_ATTENTION_BIT_FUNC_3 RESERVED_GENERAL_ATTENTION_BIT_15
+#define LINK_SYNC_ATTENTION_BIT_FUNC_4 RESERVED_GENERAL_ATTENTION_BIT_16
+#define LINK_SYNC_ATTENTION_BIT_FUNC_5 RESERVED_GENERAL_ATTENTION_BIT_17
+#define LINK_SYNC_ATTENTION_BIT_FUNC_6 RESERVED_GENERAL_ATTENTION_BIT_18
+#define LINK_SYNC_ATTENTION_BIT_FUNC_7 RESERVED_GENERAL_ATTENTION_BIT_19
+
+
+#define LATCHED_ATTN_RBCR 23
+#define LATCHED_ATTN_RBCT 24
+#define LATCHED_ATTN_RBCN 25
+#define LATCHED_ATTN_RBCU 26
+#define LATCHED_ATTN_RBCP 27
+#define LATCHED_ATTN_TIMEOUT_GRC 28
+#define LATCHED_ATTN_RSVD_GRC 29
+#define LATCHED_ATTN_ROM_PARITY_MCP 30
+#define LATCHED_ATTN_UM_RX_PARITY_MCP 31
+#define LATCHED_ATTN_UM_TX_PARITY_MCP 32
+#define LATCHED_ATTN_SCPAD_PARITY_MCP 33
+
+#define GENERAL_ATTEN_WORD(atten_name) ((94 + atten_name) / 32)
+#define GENERAL_ATTEN_OFFSET(atten_name)\
+ (1UL << ((94 + atten_name) % 32))
+/*
+ * This file defines GRC base address for every block.
+ * This file is included by chipsim, asm microcode and cpp microcode.
+ * These values are used in Design.xml on regBase attribute
+ * Use the base with the generated offsets of specific registers.
+ */
+
+#define GRCBASE_PXPCS 0x000000
+#define GRCBASE_PCICONFIG 0x002000
+#define GRCBASE_PCIREG 0x002400
+#define GRCBASE_EMAC0 0x008000
+#define GRCBASE_EMAC1 0x008400
+#define GRCBASE_DBU 0x008800
+#define GRCBASE_MISC 0x00A000
+#define GRCBASE_DBG 0x00C000
+#define GRCBASE_NIG 0x010000
+#define GRCBASE_XCM 0x020000
+#define GRCBASE_PRS 0x040000
+#define GRCBASE_SRCH 0x040400
+#define GRCBASE_TSDM 0x042000
+#define GRCBASE_TCM 0x050000
+#define GRCBASE_BRB1 0x060000
+#define GRCBASE_MCP 0x080000
+#define GRCBASE_UPB 0x0C1000
+#define GRCBASE_CSDM 0x0C2000
+#define GRCBASE_USDM 0x0C4000
+#define GRCBASE_CCM 0x0D0000
+#define GRCBASE_UCM 0x0E0000
+#define GRCBASE_CDU 0x101000
+#define GRCBASE_DMAE 0x102000
+#define GRCBASE_PXP 0x103000
+#define GRCBASE_CFC 0x104000
+#define GRCBASE_HC 0x108000
+#define GRCBASE_PXP2 0x120000
+#define GRCBASE_PBF 0x140000
+#define GRCBASE_UMAC0 0x160000
+#define GRCBASE_UMAC1 0x160400
+#define GRCBASE_XPB 0x161000
+#define GRCBASE_MSTAT0 0x162000
+#define GRCBASE_MSTAT1 0x162800
+#define GRCBASE_XMAC0 0x163000
+#define GRCBASE_XMAC1 0x163800
+#define GRCBASE_TIMERS 0x164000
+#define GRCBASE_XSDM 0x166000
+#define GRCBASE_QM 0x168000
+#define GRCBASE_DQ 0x170000
+#define GRCBASE_TSEM 0x180000
+#define GRCBASE_CSEM 0x200000
+#define GRCBASE_XSEM 0x280000
+#define GRCBASE_USEM 0x300000
+#define GRCBASE_MISC_AEU GRCBASE_MISC
+
+
+/* offset of configuration space in the pci core register */
+#define PCICFG_OFFSET 0x2000
+#define PCICFG_VENDOR_ID_OFFSET 0x00
+#define PCICFG_DEVICE_ID_OFFSET 0x02
+#define PCICFG_COMMAND_OFFSET 0x04
+#define PCICFG_COMMAND_IO_SPACE (1<<0)
+#define PCICFG_COMMAND_MEM_SPACE (1<<1)
+#define PCICFG_COMMAND_BUS_MASTER (1<<2)
+#define PCICFG_COMMAND_SPECIAL_CYCLES (1<<3)
+#define PCICFG_COMMAND_MWI_CYCLES (1<<4)
+#define PCICFG_COMMAND_VGA_SNOOP (1<<5)
+#define PCICFG_COMMAND_PERR_ENA (1<<6)
+#define PCICFG_COMMAND_STEPPING (1<<7)
+#define PCICFG_COMMAND_SERR_ENA (1<<8)
+#define PCICFG_COMMAND_FAST_B2B (1<<9)
+#define PCICFG_COMMAND_INT_DISABLE (1<<10)
+#define PCICFG_COMMAND_RESERVED (0x1f<<11)
+#define PCICFG_STATUS_OFFSET 0x06
+#define PCICFG_REVESION_ID_OFFSET 0x08
+#define PCICFG_CACHE_LINE_SIZE 0x0c
+#define PCICFG_LATENCY_TIMER 0x0d
+#define PCICFG_BAR_1_LOW 0x10
+#define PCICFG_BAR_1_HIGH 0x14
+#define PCICFG_BAR_2_LOW 0x18
+#define PCICFG_BAR_2_HIGH 0x1c
+#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET 0x2c
+#define PCICFG_SUBSYSTEM_ID_OFFSET 0x2e
+#define PCICFG_INT_LINE 0x3c
+#define PCICFG_INT_PIN 0x3d
+#define PCICFG_PM_CAPABILITY 0x48
+#define PCICFG_PM_CAPABILITY_VERSION (0x3<<16)
+#define PCICFG_PM_CAPABILITY_CLOCK (1<<19)
+#define PCICFG_PM_CAPABILITY_RESERVED (1<<20)
+#define PCICFG_PM_CAPABILITY_DSI (1<<21)
+#define PCICFG_PM_CAPABILITY_AUX_CURRENT (0x7<<22)
+#define PCICFG_PM_CAPABILITY_D1_SUPPORT (1<<25)
+#define PCICFG_PM_CAPABILITY_D2_SUPPORT (1<<26)
+#define PCICFG_PM_CAPABILITY_PME_IN_D0 (1<<27)
+#define PCICFG_PM_CAPABILITY_PME_IN_D1 (1<<28)
+#define PCICFG_PM_CAPABILITY_PME_IN_D2 (1<<29)
+#define PCICFG_PM_CAPABILITY_PME_IN_D3_HOT (1<<30)
+#define PCICFG_PM_CAPABILITY_PME_IN_D3_COLD (1<<31)
+#define PCICFG_PM_CSR_OFFSET 0x4c
+#define PCICFG_PM_CSR_STATE (0x3<<0)
+#define PCICFG_PM_CSR_PME_ENABLE (1<<8)
+#define PCICFG_PM_CSR_PME_STATUS (1<<15)
+#define PCICFG_MSI_CAP_ID_OFFSET 0x58
+#define PCICFG_MSI_CONTROL_ENABLE (0x1<<16)
+#define PCICFG_MSI_CONTROL_MCAP (0x7<<17)
+#define PCICFG_MSI_CONTROL_MENA (0x7<<20)
+#define PCICFG_MSI_CONTROL_64_BIT_ADDR_CAP (0x1<<23)
+#define PCICFG_MSI_CONTROL_MSI_PVMASK_CAPABLE (0x1<<24)
+#define PCICFG_GRC_ADDRESS 0x78
+#define PCICFG_GRC_DATA 0x80
+#define PCICFG_MSIX_CAP_ID_OFFSET 0xa0
+#define PCICFG_MSIX_CONTROL_TABLE_SIZE (0x7ff<<16)
+#define PCICFG_MSIX_CONTROL_RESERVED (0x7<<27)
+#define PCICFG_MSIX_CONTROL_FUNC_MASK (0x1<<30)
+#define PCICFG_MSIX_CONTROL_MSIX_ENABLE (0x1<<31)
+
+#define PCICFG_DEVICE_CONTROL 0xb4
+#define PCICFG_DEVICE_STATUS 0xb6
+#define PCICFG_DEVICE_STATUS_CORR_ERR_DET (1<<0)
+#define PCICFG_DEVICE_STATUS_NON_FATAL_ERR_DET (1<<1)
+#define PCICFG_DEVICE_STATUS_FATAL_ERR_DET (1<<2)
+#define PCICFG_DEVICE_STATUS_UNSUP_REQ_DET (1<<3)
+#define PCICFG_DEVICE_STATUS_AUX_PWR_DET (1<<4)
+#define PCICFG_DEVICE_STATUS_NO_PEND (1<<5)
+#define PCICFG_LINK_CONTROL 0xbc
+
+
+#define BAR_USTRORM_INTMEM 0x400000
+#define BAR_CSTRORM_INTMEM 0x410000
+#define BAR_XSTRORM_INTMEM 0x420000
+#define BAR_TSTRORM_INTMEM 0x430000
+
+/* for accessing the IGU in case of status block ACK */
+#define BAR_IGU_INTMEM 0x440000
+
+#define BAR_DOORBELL_OFFSET 0x800000
+
+#define BAR_ME_REGISTER 0x450000
+
+/* config_2 offset */
+#define GRC_CONFIG_2_SIZE_REG 0x408
+#define PCI_CONFIG_2_BAR1_SIZE (0xfL<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_DISABLED (0L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_64K (1L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_128K (2L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_256K (3L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_512K (4L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_1M (5L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_2M (6L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_4M (7L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_8M (8L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_16M (9L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_32M (10L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_64M (11L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_128M (12L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_256M (13L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_512M (14L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_1G (15L<<0)
+#define PCI_CONFIG_2_BAR1_64ENA (1L<<4)
+#define PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5)
+#define PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6)
+#define PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7)
+#define PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED (0L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_2K (1L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_4K (2L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_8K (3L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_16K (4L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_32K (5L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_64K (6L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_128K (7L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_256K (8L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_512K (9L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_1M (10L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_2M (11L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_4M (12L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_8M (13L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_16M (14L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_32M (15L<<8)
+#define PCI_CONFIG_2_BAR_PREFETCH (1L<<16)
+#define PCI_CONFIG_2_RESERVED0 (0x7fffL<<17)
+
+/* config_3 offset */
+#define GRC_CONFIG_3_SIZE_REG 0x40c
+#define PCI_CONFIG_3_STICKY_BYTE (0xffL<<0)
+#define PCI_CONFIG_3_FORCE_PME (1L<<24)
+#define PCI_CONFIG_3_PME_STATUS (1L<<25)
+#define PCI_CONFIG_3_PME_ENABLE (1L<<26)
+#define PCI_CONFIG_3_PM_STATE (0x3L<<27)
+#define PCI_CONFIG_3_VAUX_PRESET (1L<<30)
+#define PCI_CONFIG_3_PCI_POWER (1L<<31)
+
+#define GRC_BAR2_CONFIG 0x4e0
+#define PCI_CONFIG_2_BAR2_SIZE (0xfL<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_DISABLED (0L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_64K (1L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_128K (2L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_256K (3L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_512K (4L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_1M (5L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_2M (6L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_4M (7L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_8M (8L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_16M (9L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_32M (10L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_64M (11L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_128M (12L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_256M (13L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_512M (14L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_1G (15L<<0)
+#define PCI_CONFIG_2_BAR2_64ENA (1L<<4)
+
+#define PCI_PM_DATA_A 0x410
+#define PCI_PM_DATA_B 0x414
+#define PCI_ID_VAL1 0x434
+#define PCI_ID_VAL2 0x438
+
+#define PXPCS_TL_CONTROL_5 0x814
+#define PXPCS_TL_CONTROL_5_UNKNOWNTYPE_ERR_ATTN (1 << 29) /*WC*/
+#define PXPCS_TL_CONTROL_5_BOUNDARY4K_ERR_ATTN (1 << 28) /*WC*/
+#define PXPCS_TL_CONTROL_5_MRRS_ERR_ATTN (1 << 27) /*WC*/
+#define PXPCS_TL_CONTROL_5_MPS_ERR_ATTN (1 << 26) /*WC*/
+#define PXPCS_TL_CONTROL_5_TTX_BRIDGE_FORWARD_ERR (1 << 25) /*WC*/
+#define PXPCS_TL_CONTROL_5_TTX_TXINTF_OVERFLOW (1 << 24) /*WC*/
+#define PXPCS_TL_CONTROL_5_PHY_ERR_ATTN (1 << 23) /*RO*/
+#define PXPCS_TL_CONTROL_5_DL_ERR_ATTN (1 << 22) /*RO*/
+#define PXPCS_TL_CONTROL_5_TTX_ERR_NP_TAG_IN_USE (1 << 21) /*WC*/
+#define PXPCS_TL_CONTROL_5_TRX_ERR_UNEXP_RTAG (1 << 20) /*WC*/
+#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT1 (1 << 19) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 (1 << 18) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_ECRC1 (1 << 17) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP1 (1 << 16) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW1 (1 << 15) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL1 (1 << 14) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT1 (1 << 13) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT1 (1 << 12) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL1 (1 << 11) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP1 (1 << 10) /*WC*/
+#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT (1 << 9) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT (1 << 8) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_ECRC (1 << 7) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP (1 << 6) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW (1 << 5) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL (1 << 4) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT (1 << 3) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT (1 << 2) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL (1 << 1) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP (1 << 0) /*WC*/
+
+
+#define PXPCS_TL_FUNC345_STAT 0x854
+#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT4 (1 << 29) /* WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4\
+ (1 << 28) /* Unsupported Request Error Status in function4, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_ECRC4\
+ (1 << 27) /* ECRC Error TLP Status Status in function 4, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP4\
+ (1 << 26) /* Malformed TLP Status Status in function 4, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW4\
+ (1 << 25) /* Receiver Overflow Status Status in function 4, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL4\
+ (1 << 24) /* Unexpected Completion Status Status in function 4, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT4\
+ (1 << 23) /* Receive UR Statusin function 4. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT4\
+ (1 << 22) /* Completer Timeout Status Status in function 4, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL4\
+ (1 << 21) /* Flow Control Protocol Error Status Status in \
+ function 4, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP4\
+ (1 << 20) /* Poisoned Error Status Status in function 4, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT3 (1 << 19) /* WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3\
+ (1 << 18) /* Unsupported Request Error Status in function3, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_ECRC3\
+ (1 << 17) /* ECRC Error TLP Status Status in function 3, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP3\
+ (1 << 16) /* Malformed TLP Status Status in function 3, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW3\
+ (1 << 15) /* Receiver Overflow Status Status in function 3, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL3\
+ (1 << 14) /* Unexpected Completion Status Status in function 3, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT3\
+ (1 << 13) /* Receive UR Statusin function 3. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT3\
+ (1 << 12) /* Completer Timeout Status Status in function 3, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL3\
+ (1 << 11) /* Flow Control Protocol Error Status Status in \
+ function 3, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP3\
+ (1 << 10) /* Poisoned Error Status Status in function 3, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT2 (1 << 9) /* WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2\
+ (1 << 8) /* Unsupported Request Error Status for Function 2, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_ECRC2\
+ (1 << 7) /* ECRC Error TLP Status Status for Function 2, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP2\
+ (1 << 6) /* Malformed TLP Status Status for Function 2, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW2\
+ (1 << 5) /* Receiver Overflow Status Status for Function 2, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL2\
+ (1 << 4) /* Unexpected Completion Status Status for Function 2, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT2\
+ (1 << 3) /* Receive UR Statusfor Function 2. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT2\
+ (1 << 2) /* Completer Timeout Status Status for Function 2, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL2\
+ (1 << 1) /* Flow Control Protocol Error Status Status for \
+ Function 2, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP2\
+ (1 << 0) /* Poisoned Error Status Status for Function 2, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+
+
+#define PXPCS_TL_FUNC678_STAT 0x85C
+#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT7 (1 << 29) /* WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7\
+ (1 << 28) /* Unsupported Request Error Status in function7, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_ECRC7\
+ (1 << 27) /* ECRC Error TLP Status Status in function 7, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP7\
+ (1 << 26) /* Malformed TLP Status Status in function 7, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW7\
+ (1 << 25) /* Receiver Overflow Status Status in function 7, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL7\
+ (1 << 24) /* Unexpected Completion Status Status in function 7, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT7\
+ (1 << 23) /* Receive UR Statusin function 7. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT7\
+ (1 << 22) /* Completer Timeout Status Status in function 7, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL7\
+ (1 << 21) /* Flow Control Protocol Error Status Status in \
+ function 7, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP7\
+ (1 << 20) /* Poisoned Error Status Status in function 7, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT6 (1 << 19) /* WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6\
+ (1 << 18) /* Unsupported Request Error Status in function6, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_ECRC6\
+ (1 << 17) /* ECRC Error TLP Status Status in function 6, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP6\
+ (1 << 16) /* Malformed TLP Status Status in function 6, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW6\
+ (1 << 15) /* Receiver Overflow Status Status in function 6, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL6\
+ (1 << 14) /* Unexpected Completion Status Status in function 6, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT6\
+ (1 << 13) /* Receive UR Statusin function 6. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT6\
+ (1 << 12) /* Completer Timeout Status Status in function 6, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL6\
+ (1 << 11) /* Flow Control Protocol Error Status Status in \
+ function 6, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP6\
+ (1 << 10) /* Poisoned Error Status Status in function 6, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT5 (1 << 9) /* WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5\
+ (1 << 8) /* Unsupported Request Error Status for Function 5, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_ECRC5\
+ (1 << 7) /* ECRC Error TLP Status Status for Function 5, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP5\
+ (1 << 6) /* Malformed TLP Status Status for Function 5, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW5\
+ (1 << 5) /* Receiver Overflow Status Status for Function 5, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL5\
+ (1 << 4) /* Unexpected Completion Status Status for Function 5, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT5\
+ (1 << 3) /* Receive UR Statusfor Function 5. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT5\
+ (1 << 2) /* Completer Timeout Status Status for Function 5, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL5\
+ (1 << 1) /* Flow Control Protocol Error Status Status for \
+ Function 5, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP5\
+ (1 << 0) /* Poisoned Error Status Status for Function 5, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+
+
+#define BAR_USTRORM_INTMEM 0x400000
+#define BAR_CSTRORM_INTMEM 0x410000
+#define BAR_XSTRORM_INTMEM 0x420000
+#define BAR_TSTRORM_INTMEM 0x430000
+
+/* for accessing the IGU in case of status block ACK */
+#define BAR_IGU_INTMEM 0x440000
+
+#define BAR_DOORBELL_OFFSET 0x800000
+
+#define BAR_ME_REGISTER 0x450000
+#define ME_REG_PF_NUM_SHIFT 0
+#define ME_REG_PF_NUM\
+ (7L<<ME_REG_PF_NUM_SHIFT) /* Relative PF Num */
+#define ME_REG_VF_VALID (1<<8)
+#define ME_REG_VF_NUM_SHIFT 9
+#define ME_REG_VF_NUM_MASK (0x3f<<ME_REG_VF_NUM_SHIFT)
+#define ME_REG_VF_ERR (0x1<<3)
+#define ME_REG_ABS_PF_NUM_SHIFT 16
+#define ME_REG_ABS_PF_NUM\
+ (7L<<ME_REG_ABS_PF_NUM_SHIFT) /* Absolute PF Num */
+
+
+#define MDIO_REG_BANK_CL73_IEEEB0 0x0
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN 0x0200
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN 0x1000
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_MAIN_RST 0x8000
+
+#define MDIO_REG_BANK_CL73_IEEEB1 0x10
+#define MDIO_CL73_IEEEB1_AN_ADV1 0x00
+#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE 0x0400
+#define MDIO_CL73_IEEEB1_AN_ADV1_ASYMMETRIC 0x0800
+#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH 0x0C00
+#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK 0x0C00
+#define MDIO_CL73_IEEEB1_AN_ADV2 0x01
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M 0x0000
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX 0x0020
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 0x0040
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR 0x0080
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1 0x03
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE 0x0400
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_ASYMMETRIC 0x0800
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_BOTH 0x0C00
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK 0x0C00
+
+#define MDIO_REG_BANK_RX0 0x80b0
+#define MDIO_RX0_RX_STATUS 0x10
+#define MDIO_RX0_RX_STATUS_SIGDET 0x8000
+#define MDIO_RX0_RX_STATUS_RX_SEQ_DONE 0x1000
+#define MDIO_RX0_RX_EQ_BOOST 0x1c
+#define MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_RX1 0x80c0
+#define MDIO_RX1_RX_EQ_BOOST 0x1c
+#define MDIO_RX1_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX1_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_RX2 0x80d0
+#define MDIO_RX2_RX_EQ_BOOST 0x1c
+#define MDIO_RX2_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX2_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_RX3 0x80e0
+#define MDIO_RX3_RX_EQ_BOOST 0x1c
+#define MDIO_RX3_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX3_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_RX_ALL 0x80f0
+#define MDIO_RX_ALL_RX_EQ_BOOST 0x1c
+#define MDIO_RX_ALL_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX_ALL_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_TX0 0x8060
+#define MDIO_TX0_TX_DRIVER 0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
+
+#define MDIO_REG_BANK_TX1 0x8070
+#define MDIO_TX1_TX_DRIVER 0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
+
+#define MDIO_REG_BANK_TX2 0x8080
+#define MDIO_TX2_TX_DRIVER 0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
+
+#define MDIO_REG_BANK_TX3 0x8090
+#define MDIO_TX3_TX_DRIVER 0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
+
+#define MDIO_REG_BANK_XGXS_BLOCK0 0x8000
+#define MDIO_BLOCK0_XGXS_CONTROL 0x10
+
+#define MDIO_REG_BANK_XGXS_BLOCK1 0x8010
+#define MDIO_BLOCK1_LANE_CTRL0 0x15
+#define MDIO_BLOCK1_LANE_CTRL1 0x16
+#define MDIO_BLOCK1_LANE_CTRL2 0x17
+#define MDIO_BLOCK1_LANE_PRBS 0x19
+
+#define MDIO_REG_BANK_XGXS_BLOCK2 0x8100
+#define MDIO_XGXS_BLOCK2_RX_LN_SWAP 0x10
+#define MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE 0x8000
+#define MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE 0x4000
+#define MDIO_XGXS_BLOCK2_TX_LN_SWAP 0x11
+#define MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE 0x8000
+#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G 0x14
+#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS 0x0001
+#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS 0x0010
+#define MDIO_XGXS_BLOCK2_TEST_MODE_LANE 0x15
+
+#define MDIO_REG_BANK_GP_STATUS 0x8120
+#define MDIO_GP_STATUS_TOP_AN_STATUS1 0x1B
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE 0x0001
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE 0x0002
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS 0x0004
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS 0x0008
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE 0x0010
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_LP_NP_BAM_ABLE 0x0020
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE 0x0040
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE 0x0080
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK 0x3f00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M 0x0000
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M 0x0100
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G 0x0200
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G 0x0300
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G 0x0400
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G 0x0500
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG 0x0600
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4 0x0700
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12G_HIG 0x0800
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12_5G 0x0900
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_13G 0x0A00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_15G 0x0B00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_16G 0x0C00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX 0x0D00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 0x0E00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR 0x0F00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI 0x1B00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS 0x1E00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI 0x1F00
+
+
+#define MDIO_REG_BANK_10G_PARALLEL_DETECT 0x8130
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS 0x10
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK 0x8000
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL 0x11
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN 0x1
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK 0x13
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT (0xb71<<1)
+
+#define MDIO_REG_BANK_SERDES_DIGITAL 0x8300
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1 0x10
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE 0x0001
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_TBI_IF 0x0002
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN 0x0004
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT 0x0008
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET 0x0010
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE 0x0020
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2 0x11
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN 0x0001
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_AN_FST_TMR 0x0040
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1 0x14
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SGMII 0x0001
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_LINK 0x0002
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_DUPLEX 0x0004
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_MASK 0x0018
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_SHIFT 3
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_2_5G 0x0018
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_1G 0x0010
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_100M 0x0008
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_10M 0x0000
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2 0x15
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED 0x0002
+#define MDIO_SERDES_DIGITAL_MISC1 0x18
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_MASK 0xE000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_25M 0x0000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_100M 0x2000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_125M 0x4000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M 0x6000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_187_5M 0x8000
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL 0x0010
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK 0x000f
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_2_5G 0x0000
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_5G 0x0001
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_6G 0x0002
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_HIG 0x0003
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4 0x0004
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12G 0x0005
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12_5G 0x0006
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G 0x0007
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_15G 0x0008
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_16G 0x0009
+
+#define MDIO_REG_BANK_OVER_1G 0x8320
+#define MDIO_OVER_1G_DIGCTL_3_4 0x14
+#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_MASK 0xffe0
+#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_SHIFT 5
+#define MDIO_OVER_1G_UP1 0x19
+#define MDIO_OVER_1G_UP1_2_5G 0x0001
+#define MDIO_OVER_1G_UP1_5G 0x0002
+#define MDIO_OVER_1G_UP1_6G 0x0004
+#define MDIO_OVER_1G_UP1_10G 0x0010
+#define MDIO_OVER_1G_UP1_10GH 0x0008
+#define MDIO_OVER_1G_UP1_12G 0x0020
+#define MDIO_OVER_1G_UP1_12_5G 0x0040
+#define MDIO_OVER_1G_UP1_13G 0x0080
+#define MDIO_OVER_1G_UP1_15G 0x0100
+#define MDIO_OVER_1G_UP1_16G 0x0200
+#define MDIO_OVER_1G_UP2 0x1A
+#define MDIO_OVER_1G_UP2_IPREDRIVER_MASK 0x0007
+#define MDIO_OVER_1G_UP2_IDRIVER_MASK 0x0038
+#define MDIO_OVER_1G_UP2_PREEMPHASIS_MASK 0x03C0
+#define MDIO_OVER_1G_UP3 0x1B
+#define MDIO_OVER_1G_UP3_HIGIG2 0x0001
+#define MDIO_OVER_1G_LP_UP1 0x1C
+#define MDIO_OVER_1G_LP_UP2 0x1D
+#define MDIO_OVER_1G_LP_UP2_MR_ADV_OVER_1G_MASK 0x03ff
+#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK 0x0780
+#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT 7
+#define MDIO_OVER_1G_LP_UP3 0x1E
+
+#define MDIO_REG_BANK_REMOTE_PHY 0x8330
+#define MDIO_REMOTE_PHY_MISC_RX_STATUS 0x10
+#define MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG 0x0010
+#define MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG 0x0600
+
+#define MDIO_REG_BANK_BAM_NEXT_PAGE 0x8350
+#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL 0x10
+#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE 0x0001
+#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN 0x0002
+
+#define MDIO_REG_BANK_CL73_USERB0 0x8370
+#define MDIO_CL73_USERB0_CL73_UCTRL 0x10
+#define MDIO_CL73_USERB0_CL73_UCTRL_USTAT1_MUXSEL 0x0002
+#define MDIO_CL73_USERB0_CL73_USTAT1 0x11
+#define MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK 0x0100
+#define MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37 0x0400
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1 0x12
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN 0x8000
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN 0x4000
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN 0x2000
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL3 0x14
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR 0x0001
+
+#define MDIO_REG_BANK_AER_BLOCK 0xFFD0
+#define MDIO_AER_BLOCK_AER_REG 0x1E
+
+#define MDIO_REG_BANK_COMBO_IEEE0 0xFFE0
+#define MDIO_COMBO_IEEE0_MII_CONTROL 0x10
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK 0x2040
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_10 0x0000
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100 0x2000
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000 0x0040
+#define MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX 0x0100
+#define MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN 0x0200
+#define MDIO_COMBO_IEEO_MII_CONTROL_AN_EN 0x1000
+#define MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK 0x4000
+#define MDIO_COMBO_IEEO_MII_CONTROL_RESET 0x8000
+#define MDIO_COMBO_IEEE0_MII_STATUS 0x11
+#define MDIO_COMBO_IEEE0_MII_STATUS_LINK_PASS 0x0004
+#define MDIO_COMBO_IEEE0_MII_STATUS_AUTONEG_COMPLETE 0x0020
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV 0x14
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX 0x0020
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_HALF_DUPLEX 0x0040
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK 0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE 0x0000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC 0x0080
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC 0x0100
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH 0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_NEXT_PAGE 0x8000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1 0x15
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_NEXT_PAGE 0x8000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_ACK 0x4000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_MASK 0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_NONE 0x0000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_BOTH 0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_HALF_DUP_CAP 0x0040
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_FULL_DUP_CAP 0x0020
+/*WhenthelinkpartnerisinSGMIImode(bit0=1),then
+bit15=link,bit12=duplex,bits11:10=speed,bit14=acknowledge.
+Theotherbitsarereservedandshouldbezero*/
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_SGMII_MODE 0x0001
+
+
+#define MDIO_PMA_DEVAD 0x1
+/*ieee*/
+#define MDIO_PMA_REG_CTRL 0x0
+#define MDIO_PMA_REG_STATUS 0x1
+#define MDIO_PMA_REG_10G_CTRL2 0x7
+#define MDIO_PMA_REG_TX_DISABLE 0x0009
+#define MDIO_PMA_REG_RX_SD 0xa
+/*bcm*/
+#define MDIO_PMA_REG_BCM_CTRL 0x0096
+#define MDIO_PMA_REG_FEC_CTRL 0x00ab
+#define MDIO_PMA_REG_PHY_IDENTIFIER 0xc800
+#define MDIO_PMA_REG_DIGITAL_CTRL 0xc808
+#define MDIO_PMA_REG_DIGITAL_STATUS 0xc809
+#define MDIO_PMA_REG_TX_POWER_DOWN 0xca02
+#define MDIO_PMA_REG_CMU_PLL_BYPASS 0xca09
+#define MDIO_PMA_REG_MISC_CTRL 0xca0a
+#define MDIO_PMA_REG_GEN_CTRL 0xca10
+#define MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP 0x0188
+#define MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET 0x018a
+#define MDIO_PMA_REG_M8051_MSGIN_REG 0xca12
+#define MDIO_PMA_REG_M8051_MSGOUT_REG 0xca13
+#define MDIO_PMA_REG_ROM_VER1 0xca19
+#define MDIO_PMA_REG_ROM_VER2 0xca1a
+#define MDIO_PMA_REG_EDC_FFE_MAIN 0xca1b
+#define MDIO_PMA_REG_PLL_BANDWIDTH 0xca1d
+#define MDIO_PMA_REG_PLL_CTRL 0xca1e
+#define MDIO_PMA_REG_MISC_CTRL0 0xca23
+#define MDIO_PMA_REG_LRM_MODE 0xca3f
+#define MDIO_PMA_REG_CDR_BANDWIDTH 0xca46
+#define MDIO_PMA_REG_MISC_CTRL1 0xca85
+
+#define MDIO_PMA_REG_SFP_TWO_WIRE_CTRL 0x8000
+#define MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK 0x000c
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE 0x0000
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE 0x0004
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IN_PROGRESS 0x0008
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_FAILED 0x000c
+#define MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT 0x8002
+#define MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR 0x8003
+#define MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF 0xc820
+#define MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK 0xff
+#define MDIO_PMA_REG_8726_TX_CTRL1 0xca01
+#define MDIO_PMA_REG_8726_TX_CTRL2 0xca05
+
+#define MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR 0x8005
+#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF 0x8007
+#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK 0xff
+#define MDIO_PMA_REG_8727_TX_CTRL1 0xca02
+#define MDIO_PMA_REG_8727_TX_CTRL2 0xca05
+#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808
+#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e
+#define MDIO_PMA_REG_8727_PCS_GP 0xc842
+#define MDIO_PMA_REG_8727_OPT_CFG_REG 0xc8e4
+
+#define MDIO_AN_REG_8727_MISC_CTRL 0x8309
+
+#define MDIO_PMA_REG_8073_CHIP_REV 0xc801
+#define MDIO_PMA_REG_8073_SPEED_LINK_STATUS 0xc820
+#define MDIO_PMA_REG_8073_XAUI_WA 0xc841
+#define MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL 0xcd08
+
+#define MDIO_PMA_REG_7101_RESET 0xc000
+#define MDIO_PMA_REG_7107_LED_CNTL 0xc007
+#define MDIO_PMA_REG_7107_LINK_LED_CNTL 0xc009
+#define MDIO_PMA_REG_7101_VER1 0xc026
+#define MDIO_PMA_REG_7101_VER2 0xc027
+
+#define MDIO_PMA_REG_8481_PMD_SIGNAL 0xa811
+#define MDIO_PMA_REG_8481_LED1_MASK 0xa82c
+#define MDIO_PMA_REG_8481_LED2_MASK 0xa82f
+#define MDIO_PMA_REG_8481_LED3_MASK 0xa832
+#define MDIO_PMA_REG_8481_LED3_BLINK 0xa834
+#define MDIO_PMA_REG_8481_LED5_MASK 0xa838
+#define MDIO_PMA_REG_8481_SIGNAL_MASK 0xa835
+#define MDIO_PMA_REG_8481_LINK_SIGNAL 0xa83b
+#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK 0x800
+#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT 11
+
+
+#define MDIO_WIS_DEVAD 0x2
+/*bcm*/
+#define MDIO_WIS_REG_LASI_CNTL 0x9002
+#define MDIO_WIS_REG_LASI_STATUS 0x9005
+
+#define MDIO_PCS_DEVAD 0x3
+#define MDIO_PCS_REG_STATUS 0x0020
+#define MDIO_PCS_REG_LASI_STATUS 0x9005
+#define MDIO_PCS_REG_7101_DSP_ACCESS 0xD000
+#define MDIO_PCS_REG_7101_SPI_MUX 0xD008
+#define MDIO_PCS_REG_7101_SPI_CTRL_ADDR 0xE12A
+#define MDIO_PCS_REG_7101_SPI_RESET_BIT (5)
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR 0xE02A
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_WRITE_ENABLE_CMD (6)
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_BULK_ERASE_CMD (0xC7)
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_PAGE_PROGRAM_CMD (2)
+#define MDIO_PCS_REG_7101_SPI_BYTES_TO_TRANSFER_ADDR 0xE028
+
+
+#define MDIO_XS_DEVAD 0x4
+#define MDIO_XS_PLL_SEQUENCER 0x8000
+#define MDIO_XS_SFX7101_XGXS_TEST1 0xc00a
+
+#define MDIO_XS_8706_REG_BANK_RX0 0x80bc
+#define MDIO_XS_8706_REG_BANK_RX1 0x80cc
+#define MDIO_XS_8706_REG_BANK_RX2 0x80dc
+#define MDIO_XS_8706_REG_BANK_RX3 0x80ec
+#define MDIO_XS_8706_REG_BANK_RXA 0x80fc
+
+#define MDIO_XS_REG_8073_RX_CTRL_PCIE 0x80FA
+
+#define MDIO_AN_DEVAD 0x7
+/*ieee*/
+#define MDIO_AN_REG_CTRL 0x0000
+#define MDIO_AN_REG_STATUS 0x0001
+#define MDIO_AN_REG_STATUS_AN_COMPLETE 0x0020
+#define MDIO_AN_REG_ADV_PAUSE 0x0010
+#define MDIO_AN_REG_ADV_PAUSE_PAUSE 0x0400
+#define MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC 0x0800
+#define MDIO_AN_REG_ADV_PAUSE_BOTH 0x0C00
+#define MDIO_AN_REG_ADV_PAUSE_MASK 0x0C00
+#define MDIO_AN_REG_ADV 0x0011
+#define MDIO_AN_REG_ADV2 0x0012
+#define MDIO_AN_REG_LP_AUTO_NEG 0x0013
+#define MDIO_AN_REG_MASTER_STATUS 0x0021
+/*bcm*/
+#define MDIO_AN_REG_LINK_STATUS 0x8304
+#define MDIO_AN_REG_CL37_CL73 0x8370
+#define MDIO_AN_REG_CL37_AN 0xffe0
+#define MDIO_AN_REG_CL37_FC_LD 0xffe4
+#define MDIO_AN_REG_CL37_FC_LP 0xffe5
+
+#define MDIO_AN_REG_8073_2_5G 0x8329
+#define MDIO_AN_REG_8073_BAM 0x8350
+
+#define MDIO_AN_REG_8481_10GBASE_T_AN_CTRL 0x0020
+#define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0
+#define MDIO_AN_REG_8481_LEGACY_MII_STATUS 0xffe1
+#define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4
+#define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION 0xffe6
+#define MDIO_AN_REG_8481_1000T_CTRL 0xffe9
+#define MDIO_AN_REG_8481_EXPANSION_REG_RD_RW 0xfff5
+#define MDIO_AN_REG_8481_EXPANSION_REG_ACCESS 0xfff7
+#define MDIO_AN_REG_8481_AUX_CTRL 0xfff8
+#define MDIO_AN_REG_8481_LEGACY_SHADOW 0xfffc
+
+/* BCM84823 only */
+#define MDIO_CTL_DEVAD 0x1e
+#define MDIO_CTL_REG_84823_MEDIA 0x401a
+#define MDIO_CTL_REG_84823_MEDIA_MAC_MASK 0x0018
+ /* These pins configure the BCM84823 interface to MAC after reset. */
+#define MDIO_CTL_REG_84823_CTRL_MAC_XFI 0x0008
+#define MDIO_CTL_REG_84823_MEDIA_MAC_XAUI_M 0x0010
+ /* These pins configure the BCM84823 interface to Line after reset. */
+#define MDIO_CTL_REG_84823_MEDIA_LINE_MASK 0x0060
+#define MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L 0x0020
+#define MDIO_CTL_REG_84823_MEDIA_LINE_XFI 0x0040
+ /* When this pin is active high during reset, 10GBASE-T core is power
+ * down, When it is active low the 10GBASE-T is power up
+ */
+#define MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN 0x0080
+#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK 0x0100
+#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000
+#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100
+#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000
+#define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005
+#define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080
+
+#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3
+#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
+
+/* BCM84833 only */
+#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a
+#define MDIO_84833_SUPER_ISOLATE 0x8000
+/* These are mailbox register set used by 84833. */
+#define MDIO_84833_TOP_CFG_SCRATCH_REG0 0x4005
+#define MDIO_84833_TOP_CFG_SCRATCH_REG1 0x4006
+#define MDIO_84833_TOP_CFG_SCRATCH_REG2 0x4007
+#define MDIO_84833_TOP_CFG_SCRATCH_REG3 0x4008
+#define MDIO_84833_TOP_CFG_SCRATCH_REG4 0x4009
+#define MDIO_84833_TOP_CFG_DATA3_REG 0x4011
+#define MDIO_84833_TOP_CFG_DATA4_REG 0x4012
+
+/* Mailbox command set used by 84833. */
+#define PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE 0x2
+/* Mailbox status set used by 84833. */
+#define PHY84833_CMD_RECEIVED 0x0001
+#define PHY84833_CMD_IN_PROGRESS 0x0002
+#define PHY84833_CMD_COMPLETE_PASS 0x0004
+#define PHY84833_CMD_COMPLETE_ERROR 0x0008
+#define PHY84833_CMD_OPEN_FOR_CMDS 0x0010
+#define PHY84833_CMD_SYSTEM_BOOT 0x0020
+#define PHY84833_CMD_NOT_OPEN_FOR_CMDS 0x0040
+#define PHY84833_CMD_CLEAR_COMPLETE 0x0080
+#define PHY84833_CMD_OPEN_OVERRIDE 0xa5a5
+
+
+/* 84833 F/W Feature Commands */
+#define PHY84833_DIAG_CMD_GET_EEE_MODE 0x27
+#define PHY84833_DIAG_CMD_SET_EEE_MODE 0x28
+
+/* Warpcore clause 45 addressing */
+#define MDIO_WC_DEVAD 0x3
+#define MDIO_WC_REG_IEEE0BLK_MIICNTL 0x0
+#define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11
++#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2 0x12
++#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY 0x4000
++#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ 0x8000
+#define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150 0x96
+#define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000
+#define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e
+#define MDIO_WC_REG_XGXSBLK1_DESKEW 0x8010
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL0 0x8015
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL1 0x8016
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL2 0x8017
+#define MDIO_WC_REG_TX0_ANA_CTRL0 0x8061
+#define MDIO_WC_REG_TX1_ANA_CTRL0 0x8071
+#define MDIO_WC_REG_TX2_ANA_CTRL0 0x8081
+#define MDIO_WC_REG_TX3_ANA_CTRL0 0x8091
+#define MDIO_WC_REG_TX0_TX_DRIVER 0x8067
+#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET 0x04
+#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_MASK 0x00f0
+#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET 0x08
+#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET 0x0c
+#define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_MASK 0x7000
+#define MDIO_WC_REG_TX1_TX_DRIVER 0x8077
+#define MDIO_WC_REG_TX2_TX_DRIVER 0x8087
+#define MDIO_WC_REG_TX3_TX_DRIVER 0x8097
+#define MDIO_WC_REG_RX0_ANARXCONTROL1G 0x80b9
+#define MDIO_WC_REG_RX2_ANARXCONTROL1G 0x80d9
+#define MDIO_WC_REG_RX0_PCI_CTRL 0x80ba
+#define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca
+#define MDIO_WC_REG_RX2_PCI_CTRL 0x80da
+#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea
+#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104
+#define MDIO_WC_REG_XGXS_STATUS3 0x8129
+#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130
+#define MDIO_WC_REG_PAR_DET_10G_CTRL 0x8131
+#define MDIO_WC_REG_XGXS_X2_CONTROL2 0x8141
+#define MDIO_WC_REG_XGXS_RX_LN_SWAP1 0x816B
+#define MDIO_WC_REG_XGXS_TX_LN_SWAP1 0x8169
+#define MDIO_WC_REG_GP2_STATUS_GP_2_0 0x81d0
+#define MDIO_WC_REG_GP2_STATUS_GP_2_1 0x81d1
+#define MDIO_WC_REG_GP2_STATUS_GP_2_2 0x81d2
+#define MDIO_WC_REG_GP2_STATUS_GP_2_3 0x81d3
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4 0x81d4
+#define MDIO_WC_REG_UC_INFO_B0_DEAD_TRAP 0x81EE
+#define MDIO_WC_REG_UC_INFO_B1_VERSION 0x81F0
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE 0x81F2
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE0_OFFSET 0x0
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT 0x0
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_OPT_LR 0x1
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC 0x2
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_XLAUI 0x3
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_LONG_CH_6G 0x4
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE1_OFFSET 0x4
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE2_OFFSET 0x8
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE3_OFFSET 0xc
+#define MDIO_WC_REG_UC_INFO_B1_CRC 0x81FE
+#define MDIO_WC_REG_DSC_SMC 0x8213
+#define MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0 0x821e
+#define MDIO_WC_REG_TX_FIR_TAP 0x82e2
+#define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET 0x00
+#define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_MASK 0x000f
+#define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET 0x04
+#define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_MASK 0x03f0
+#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET 0x0a
+#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_MASK 0x7c00
+#define MDIO_WC_REG_TX_FIR_TAP_ENABLE 0x8000
+#define MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL 0x82e3
+#define MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL 0x82e6
+#define MDIO_WC_REG_CL72_USERB0_CL72_BR_DEF_CTRL 0x82e7
+#define MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL 0x82e8
+#define MDIO_WC_REG_CL72_USERB0_CL72_MISC4_CONTROL 0x82ec
+#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1 0x8300
+#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2 0x8301
+#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3 0x8302
+#define MDIO_WC_REG_SERDESDIGITAL_STATUS1000X1 0x8304
+#define MDIO_WC_REG_SERDESDIGITAL_MISC1 0x8308
+#define MDIO_WC_REG_SERDESDIGITAL_MISC2 0x8309
+#define MDIO_WC_REG_DIGITAL3_UP1 0x8329
+#define MDIO_WC_REG_DIGITAL4_MISC3 0x833c
+#define MDIO_WC_REG_DIGITAL5_MISC6 0x8345
+#define MDIO_WC_REG_DIGITAL5_MISC7 0x8349
+#define MDIO_WC_REG_DIGITAL5_ACTUAL_SPEED 0x834e
+#define MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL 0x8350
+#define MDIO_WC_REG_CL49_USERB0_CTRL 0x8368
+#define MDIO_WC_REG_TX66_CONTROL 0x83b0
+#define MDIO_WC_REG_RX66_CONTROL 0x83c0
+#define MDIO_WC_REG_RX66_SCW0 0x83c2
+#define MDIO_WC_REG_RX66_SCW1 0x83c3
+#define MDIO_WC_REG_RX66_SCW2 0x83c4
+#define MDIO_WC_REG_RX66_SCW3 0x83c5
+#define MDIO_WC_REG_RX66_SCW0_MASK 0x83c6
+#define MDIO_WC_REG_RX66_SCW1_MASK 0x83c7
+#define MDIO_WC_REG_RX66_SCW2_MASK 0x83c8
+#define MDIO_WC_REG_RX66_SCW3_MASK 0x83c9
+#define MDIO_WC_REG_FX100_CTRL1 0x8400
+#define MDIO_WC_REG_FX100_CTRL3 0x8402
+
+#define MDIO_WC_REG_MICROBLK_CMD 0xffc2
+#define MDIO_WC_REG_MICROBLK_DL_STATUS 0xffc5
+#define MDIO_WC_REG_MICROBLK_CMD3 0xffcc
+
+#define MDIO_WC_REG_AERBLK_AER 0xffde
+#define MDIO_WC_REG_COMBO_IEEE0_MIICTRL 0xffe0
+#define MDIO_WC_REG_COMBO_IEEE0_MIIISTAT 0xffe1
+
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET 0x810A
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET_RX_BITSHIFT 0
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET_TX_BITSHIFT 4
+
+#define MDIO_WC0_XGXS_BLK6_XGXS_X2_CONTROL2 0x8141
+
+#define DIGITAL5_ACTUAL_SPEED_TX_MASK 0x003f
+
+/* 54618se */
+#define MDIO_REG_GPHY_PHYID_LSB 0x3
+#define MDIO_REG_GPHY_ID_54618SE 0x5cd5
+#define MDIO_REG_GPHY_CL45_ADDR_REG 0xd
+#define MDIO_REG_GPHY_CL45_DATA_REG 0xe
+#define MDIO_REG_GPHY_EEE_ADV 0x3c
+#define MDIO_REG_GPHY_EEE_1G (0x1 << 2)
+#define MDIO_REG_GPHY_EEE_100 (0x1 << 1)
+#define MDIO_REG_GPHY_EEE_RESOLVED 0x803e
+#define MDIO_REG_INTR_STATUS 0x1a
+#define MDIO_REG_INTR_MASK 0x1b
+#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1)
+#define MDIO_REG_GPHY_SHADOW 0x1c
+#define MDIO_REG_GPHY_SHADOW_LED_SEL2 (0x0e << 10)
+#define MDIO_REG_GPHY_SHADOW_WR_ENA (0x1 << 15)
+#define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED (0x1e << 10)
+#define MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD (0x1 << 8)
+
+#define IGU_FUNC_BASE 0x0400
+
+#define IGU_ADDR_MSIX 0x0000
+#define IGU_ADDR_INT_ACK 0x0200
+#define IGU_ADDR_PROD_UPD 0x0201
+#define IGU_ADDR_ATTN_BITS_UPD 0x0202
+#define IGU_ADDR_ATTN_BITS_SET 0x0203
+#define IGU_ADDR_ATTN_BITS_CLR 0x0204
+#define IGU_ADDR_COALESCE_NOW 0x0205
+#define IGU_ADDR_SIMD_MASK 0x0206
+#define IGU_ADDR_SIMD_NOMASK 0x0207
+#define IGU_ADDR_MSI_CTL 0x0210
+#define IGU_ADDR_MSI_ADDR_LO 0x0211
+#define IGU_ADDR_MSI_ADDR_HI 0x0212
+#define IGU_ADDR_MSI_DATA 0x0213
+
+#define IGU_USE_REGISTER_ustorm_type_0_sb_cleanup 0
+#define IGU_USE_REGISTER_ustorm_type_1_sb_cleanup 1
+#define IGU_USE_REGISTER_cstorm_type_0_sb_cleanup 2
+#define IGU_USE_REGISTER_cstorm_type_1_sb_cleanup 3
+
+#define COMMAND_REG_INT_ACK 0x0
+#define COMMAND_REG_PROD_UPD 0x4
+#define COMMAND_REG_ATTN_BITS_UPD 0x8
+#define COMMAND_REG_ATTN_BITS_SET 0xc
+#define COMMAND_REG_ATTN_BITS_CLR 0x10
+#define COMMAND_REG_COALESCE_NOW 0x14
+#define COMMAND_REG_SIMD_MASK 0x18
+#define COMMAND_REG_SIMD_NOMASK 0x1c
+
+
+#define IGU_MEM_BASE 0x0000
+
+#define IGU_MEM_MSIX_BASE 0x0000
+#define IGU_MEM_MSIX_UPPER 0x007f
+#define IGU_MEM_MSIX_RESERVED_UPPER 0x01ff
+
+#define IGU_MEM_PBA_MSIX_BASE 0x0200
+#define IGU_MEM_PBA_MSIX_UPPER 0x0200
+
+#define IGU_CMD_BACKWARD_COMP_PROD_UPD 0x0201
+#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff
+
+#define IGU_CMD_INT_ACK_BASE 0x0400
+#define IGU_CMD_INT_ACK_UPPER\
+ (IGU_CMD_INT_ACK_BASE + MAX_SB_PER_PORT * NUM_OF_PORTS_PER_PATH - 1)
+#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x04ff
+
+#define IGU_CMD_E2_PROD_UPD_BASE 0x0500
+#define IGU_CMD_E2_PROD_UPD_UPPER\
+ (IGU_CMD_E2_PROD_UPD_BASE + MAX_SB_PER_PORT * NUM_OF_PORTS_PER_PATH - 1)
+#define IGU_CMD_E2_PROD_UPD_RESERVED_UPPER 0x059f
+
+#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05a0
+#define IGU_CMD_ATTN_BIT_SET_UPPER 0x05a1
+#define IGU_CMD_ATTN_BIT_CLR_UPPER 0x05a2
+
+#define IGU_REG_SISR_MDPC_WMASK_UPPER 0x05a3
+#define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER 0x05a4
+#define IGU_REG_SISR_MDPC_WMASK_MSB_UPPER 0x05a5
+#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05a6
+
+#define IGU_REG_RESERVED_UPPER 0x05ff
+/* Fields of IGU PF CONFIGRATION REGISTER */
+#define IGU_PF_CONF_FUNC_EN (0x1<<0) /* function enable */
+#define IGU_PF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */
+#define IGU_PF_CONF_INT_LINE_EN (0x1<<2) /* INT enable */
+#define IGU_PF_CONF_ATTN_BIT_EN (0x1<<3) /* attention enable */
+#define IGU_PF_CONF_SINGLE_ISR_EN (0x1<<4) /* single ISR mode enable */
+#define IGU_PF_CONF_SIMD_MODE (0x1<<5) /* simd all ones mode */
+
+/* Fields of IGU VF CONFIGRATION REGISTER */
+#define IGU_VF_CONF_FUNC_EN (0x1<<0) /* function enable */
+#define IGU_VF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */
+#define IGU_VF_CONF_PARENT_MASK (0x3<<2) /* Parent PF */
+#define IGU_VF_CONF_PARENT_SHIFT 2 /* Parent PF */
+#define IGU_VF_CONF_SINGLE_ISR_EN (0x1<<4) /* single ISR mode enable */
+
+
+#define IGU_BC_DSB_NUM_SEGS 5
+#define IGU_BC_NDSB_NUM_SEGS 2
+#define IGU_NORM_DSB_NUM_SEGS 2
+#define IGU_NORM_NDSB_NUM_SEGS 1
+#define IGU_BC_BASE_DSB_PROD 128
+#define IGU_NORM_BASE_DSB_PROD 136
+
+ /* FID (if VF - [6] = 0; [5:0] = VF number; if PF - [6] = 1; \
+ [5:2] = 0; [1:0] = PF number) */
+#define IGU_FID_ENCODE_IS_PF (0x1<<6)
+#define IGU_FID_ENCODE_IS_PF_SHIFT 6
+#define IGU_FID_VF_NUM_MASK (0x3f)
+#define IGU_FID_PF_NUM_MASK (0x7)
+
+#define IGU_REG_MAPPING_MEMORY_VALID (1<<0)
+#define IGU_REG_MAPPING_MEMORY_VECTOR_MASK (0x3F<<1)
+#define IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT 1
+#define IGU_REG_MAPPING_MEMORY_FID_MASK (0x7F<<7)
+#define IGU_REG_MAPPING_MEMORY_FID_SHIFT 7
+
+
+#define CDU_REGION_NUMBER_XCM_AG 2
+#define CDU_REGION_NUMBER_UCM_AG 4
+
+
+/**
+ * String-to-compress [31:8] = CID (all 24 bits)
+ * String-to-compress [7:4] = Region
+ * String-to-compress [3:0] = Type
+ */
+#define CDU_VALID_DATA(_cid, _region, _type)\
+ (((_cid) << 8) | (((_region)&0xf)<<4) | (((_type)&0xf)))
+#define CDU_CRC8(_cid, _region, _type)\
+ (calc_crc8(CDU_VALID_DATA(_cid, _region, _type), 0xff))
+#define CDU_RSRVD_VALUE_TYPE_A(_cid, _region, _type)\
+ (0x80 | ((CDU_CRC8(_cid, _region, _type)) & 0x7f))
+#define CDU_RSRVD_VALUE_TYPE_B(_crc, _type)\
+ (0x80 | ((_type)&0xf << 3) | ((CDU_CRC8(_cid, _region, _type)) & 0x7))
+#define CDU_RSRVD_INVALIDATE_CONTEXT_VALUE(_val) ((_val) & ~0x80)
+
+/******************************************************************************
+ * Description:
+ * Calculates crc 8 on a word value: polynomial 0-1-2-8
+ * Code was translated from Verilog.
+ * Return:
+ *****************************************************************************/
+static inline u8 calc_crc8(u32 data, u8 crc)
+{
+ u8 D[32];
+ u8 NewCRC[8];
+ u8 C[8];
+ u8 crc_res;
+ u8 i;
+
+ /* split the data into 31 bits */
+ for (i = 0; i < 32; i++) {
+ D[i] = (u8)(data & 1);
+ data = data >> 1;
+ }
+
+ /* split the crc into 8 bits */
+ for (i = 0; i < 8; i++) {
+ C[i] = crc & 1;
+ crc = crc >> 1;
+ }
+
+ NewCRC[0] = D[31] ^ D[30] ^ D[28] ^ D[23] ^ D[21] ^ D[19] ^ D[18] ^
+ D[16] ^ D[14] ^ D[12] ^ D[8] ^ D[7] ^ D[6] ^ D[0] ^ C[4] ^
+ C[6] ^ C[7];
+ NewCRC[1] = D[30] ^ D[29] ^ D[28] ^ D[24] ^ D[23] ^ D[22] ^ D[21] ^
+ D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^
+ D[12] ^ D[9] ^ D[6] ^ D[1] ^ D[0] ^ C[0] ^ C[4] ^ C[5] ^
+ C[6];
+ NewCRC[2] = D[29] ^ D[28] ^ D[25] ^ D[24] ^ D[22] ^ D[17] ^ D[15] ^
+ D[13] ^ D[12] ^ D[10] ^ D[8] ^ D[6] ^ D[2] ^ D[1] ^ D[0] ^
+ C[0] ^ C[1] ^ C[4] ^ C[5];
+ NewCRC[3] = D[30] ^ D[29] ^ D[26] ^ D[25] ^ D[23] ^ D[18] ^ D[16] ^
+ D[14] ^ D[13] ^ D[11] ^ D[9] ^ D[7] ^ D[3] ^ D[2] ^ D[1] ^
+ C[1] ^ C[2] ^ C[5] ^ C[6];
+ NewCRC[4] = D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[19] ^ D[17] ^
+ D[15] ^ D[14] ^ D[12] ^ D[10] ^ D[8] ^ D[4] ^ D[3] ^ D[2] ^
+ C[0] ^ C[2] ^ C[3] ^ C[6] ^ C[7];
+ NewCRC[5] = D[31] ^ D[28] ^ D[27] ^ D[25] ^ D[20] ^ D[18] ^ D[16] ^
+ D[15] ^ D[13] ^ D[11] ^ D[9] ^ D[5] ^ D[4] ^ D[3] ^ C[1] ^
+ C[3] ^ C[4] ^ C[7];
+ NewCRC[6] = D[29] ^ D[28] ^ D[26] ^ D[21] ^ D[19] ^ D[17] ^ D[16] ^
+ D[14] ^ D[12] ^ D[10] ^ D[6] ^ D[5] ^ D[4] ^ C[2] ^ C[4] ^
+ C[5];
+ NewCRC[7] = D[30] ^ D[29] ^ D[27] ^ D[22] ^ D[20] ^ D[18] ^ D[17] ^
+ D[15] ^ D[13] ^ D[11] ^ D[7] ^ D[6] ^ D[5] ^ C[3] ^ C[5] ^
+ C[6];
+
+ crc_res = 0;
+ for (i = 0; i < 8; i++)
+ crc_res |= (NewCRC[i] << i);
+
+ return crc_res;
+}
+
+
+#endif /* BNX2X_REG_H */
--- /dev/null
- BNX2X_ERR("stats updated by DMAE but no MAC active\n");
+/* bnx2x_stats.c: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "bnx2x_stats.h"
+#include "bnx2x_cmn.h"
+
+
+/* Statistics */
+
+/*
+ * General service functions
+ */
+
+static inline long bnx2x_hilo(u32 *hiref)
+{
+ u32 lo = *(hiref + 1);
+#if (BITS_PER_LONG == 64)
+ u32 hi = *hiref;
+
+ return HILO_U64(hi, lo);
+#else
+ return lo;
+#endif
+}
+
+/*
+ * Init service functions
+ */
+
+/* Post the next statistics ramrod. Protect it with the spin in
+ * order to ensure the strict order between statistics ramrods
+ * (each ramrod has a sequence number passed in a
+ * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be
+ * sent in order).
+ */
+static void bnx2x_storm_stats_post(struct bnx2x *bp)
+{
+ if (!bp->stats_pending) {
+ int rc;
+
+ spin_lock_bh(&bp->stats_lock);
+
+ if (bp->stats_pending) {
+ spin_unlock_bh(&bp->stats_lock);
+ return;
+ }
+
+ bp->fw_stats_req->hdr.drv_stats_counter =
+ cpu_to_le16(bp->stats_counter++);
+
+ DP(NETIF_MSG_TIMER, "Sending statistics ramrod %d\n",
+ bp->fw_stats_req->hdr.drv_stats_counter);
+
+
+
+ /* send FW stats ramrod */
+ rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
+ U64_HI(bp->fw_stats_req_mapping),
+ U64_LO(bp->fw_stats_req_mapping),
+ NONE_CONNECTION_TYPE);
+ if (rc == 0)
+ bp->stats_pending = 1;
+
+ spin_unlock_bh(&bp->stats_lock);
+ }
+}
+
+static void bnx2x_hw_stats_post(struct bnx2x *bp)
+{
+ struct dmae_command *dmae = &bp->stats_dmae;
+ u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+ *stats_comp = DMAE_COMP_VAL;
+ if (CHIP_REV_IS_SLOW(bp))
+ return;
+
+ /* loader */
+ if (bp->executer_idx) {
+ int loader_idx = PMF_DMAE_C(bp);
+ u32 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
+ true, DMAE_COMP_GRC);
+ opcode = bnx2x_dmae_opcode_clr_src_reset(opcode);
+
+ memset(dmae, 0, sizeof(struct dmae_command));
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
+ dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
+ dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
+ sizeof(struct dmae_command) *
+ (loader_idx + 1)) >> 2;
+ dmae->dst_addr_hi = 0;
+ dmae->len = sizeof(struct dmae_command) >> 2;
+ if (CHIP_IS_E1(bp))
+ dmae->len--;
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+
+ *stats_comp = 0;
+ bnx2x_post_dmae(bp, dmae, loader_idx);
+
+ } else if (bp->func_stx) {
+ *stats_comp = 0;
+ bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
+ }
+}
+
+static int bnx2x_stats_comp(struct bnx2x *bp)
+{
+ u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+ int cnt = 10;
+
+ might_sleep();
+ while (*stats_comp != DMAE_COMP_VAL) {
+ if (!cnt) {
+ BNX2X_ERR("timeout waiting for stats finished\n");
+ break;
+ }
+ cnt--;
+ usleep_range(1000, 1000);
+ }
+ return 1;
+}
+
+/*
+ * Statistics service functions
+ */
+
+static void bnx2x_stats_pmf_update(struct bnx2x *bp)
+{
+ struct dmae_command *dmae;
+ u32 opcode;
+ int loader_idx = PMF_DMAE_C(bp);
+ u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+ /* sanity */
+ if (!IS_MF(bp) || !bp->port.pmf || !bp->port.port_stx) {
+ BNX2X_ERR("BUG!\n");
+ return;
+ }
+
+ bp->executer_idx = 0;
+
+ opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0);
+
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
+ dmae->src_addr_lo = bp->port.port_stx >> 2;
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
+ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
+ dmae->len = DMAE_LEN32_RD_MAX;
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
+ dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
+ DMAE_LEN32_RD_MAX * 4);
+ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
+ DMAE_LEN32_RD_MAX * 4);
+ dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
+ dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_val = DMAE_COMP_VAL;
+
+ *stats_comp = 0;
+ bnx2x_hw_stats_post(bp);
+ bnx2x_stats_comp(bp);
+}
+
+static void bnx2x_port_stats_init(struct bnx2x *bp)
+{
+ struct dmae_command *dmae;
+ int port = BP_PORT(bp);
+ u32 opcode;
+ int loader_idx = PMF_DMAE_C(bp);
+ u32 mac_addr;
+ u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+ /* sanity */
+ if (!bp->link_vars.link_up || !bp->port.pmf) {
+ BNX2X_ERR("BUG!\n");
+ return;
+ }
+
+ bp->executer_idx = 0;
+
+ /* MCP */
+ opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
+ true, DMAE_COMP_GRC);
+
+ if (bp->port.port_stx) {
+
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
+ dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
+ dmae->dst_addr_lo = bp->port.port_stx >> 2;
+ dmae->dst_addr_hi = 0;
+ dmae->len = sizeof(struct host_port_stats) >> 2;
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+ }
+
+ if (bp->func_stx) {
+
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
+ dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
+ dmae->dst_addr_lo = bp->func_stx >> 2;
+ dmae->dst_addr_hi = 0;
+ dmae->len = sizeof(struct host_func_stats) >> 2;
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+ }
+
+ /* MAC */
+ opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
+ true, DMAE_COMP_GRC);
+
+ /* EMAC is special */
+ if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
+ mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
+
+ /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = (mac_addr +
+ EMAC_REG_EMAC_RX_STAT_AC) >> 2;
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
+ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
+ dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+
+ /* EMAC_REG_EMAC_RX_STAT_AC_28 */
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = (mac_addr +
+ EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
+ offsetof(struct emac_stats, rx_stat_falsecarriererrors));
+ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
+ offsetof(struct emac_stats, rx_stat_falsecarriererrors));
+ dmae->len = 1;
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+
+ /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = (mac_addr +
+ EMAC_REG_EMAC_TX_STAT_AC) >> 2;
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
+ offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
+ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
+ offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
+ dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+ } else {
+ u32 tx_src_addr_lo, rx_src_addr_lo;
+ u16 rx_len, tx_len;
+
+ /* configure the params according to MAC type */
+ switch (bp->link_vars.mac_type) {
+ case MAC_TYPE_BMAC:
+ mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM);
+
+ /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
+ BIGMAC_REGISTER_TX_STAT_GTBYT */
+ if (CHIP_IS_E1x(bp)) {
+ tx_src_addr_lo = (mac_addr +
+ BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
+ tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
+ BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
+ rx_src_addr_lo = (mac_addr +
+ BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
+ rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
+ BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
+ } else {
+ tx_src_addr_lo = (mac_addr +
+ BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
+ tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
+ BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
+ rx_src_addr_lo = (mac_addr +
+ BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
+ rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
+ BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
+ }
+ break;
+
+ case MAC_TYPE_UMAC: /* handled by MSTAT */
+ case MAC_TYPE_XMAC: /* handled by MSTAT */
+ default:
+ mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
+ tx_src_addr_lo = (mac_addr +
+ MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2;
+ rx_src_addr_lo = (mac_addr +
+ MSTAT_REG_RX_STAT_GR64_LO) >> 2;
+ tx_len = sizeof(bp->slowpath->
+ mac_stats.mstat_stats.stats_tx) >> 2;
+ rx_len = sizeof(bp->slowpath->
+ mac_stats.mstat_stats.stats_rx) >> 2;
+ break;
+ }
+
+ /* TX stats */
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = tx_src_addr_lo;
+ dmae->src_addr_hi = 0;
+ dmae->len = tx_len;
+ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
+ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+
+ /* RX stats */
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_hi = 0;
+ dmae->src_addr_lo = rx_src_addr_lo;
+ dmae->dst_addr_lo =
+ U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
+ dmae->dst_addr_hi =
+ U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
+ dmae->len = rx_len;
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+ }
+
+ /* NIG */
+ if (!CHIP_IS_E3(bp)) {
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
+ NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
+ offsetof(struct nig_stats, egress_mac_pkt0_lo));
+ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
+ offsetof(struct nig_stats, egress_mac_pkt0_lo));
+ dmae->len = (2*sizeof(u32)) >> 2;
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
+ NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
+ offsetof(struct nig_stats, egress_mac_pkt1_lo));
+ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
+ offsetof(struct nig_stats, egress_mac_pkt1_lo));
+ dmae->len = (2*sizeof(u32)) >> 2;
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+ }
+
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
+ true, DMAE_COMP_PCI);
+ dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
+ NIG_REG_STAT0_BRB_DISCARD) >> 2;
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
+ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
+ dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
+
+ dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_val = DMAE_COMP_VAL;
+
+ *stats_comp = 0;
+}
+
+static void bnx2x_func_stats_init(struct bnx2x *bp)
+{
+ struct dmae_command *dmae = &bp->stats_dmae;
+ u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+ /* sanity */
+ if (!bp->func_stx) {
+ BNX2X_ERR("BUG!\n");
+ return;
+ }
+
+ bp->executer_idx = 0;
+ memset(dmae, 0, sizeof(struct dmae_command));
+
+ dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
+ true, DMAE_COMP_PCI);
+ dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
+ dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
+ dmae->dst_addr_lo = bp->func_stx >> 2;
+ dmae->dst_addr_hi = 0;
+ dmae->len = sizeof(struct host_func_stats) >> 2;
+ dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_val = DMAE_COMP_VAL;
+
+ *stats_comp = 0;
+}
+
+static void bnx2x_stats_start(struct bnx2x *bp)
+{
+ if (bp->port.pmf)
+ bnx2x_port_stats_init(bp);
+
+ else if (bp->func_stx)
+ bnx2x_func_stats_init(bp);
+
+ bnx2x_hw_stats_post(bp);
+ bnx2x_storm_stats_post(bp);
+}
+
+static void bnx2x_stats_pmf_start(struct bnx2x *bp)
+{
+ bnx2x_stats_comp(bp);
+ bnx2x_stats_pmf_update(bp);
+ bnx2x_stats_start(bp);
+}
+
+static void bnx2x_stats_restart(struct bnx2x *bp)
+{
+ bnx2x_stats_comp(bp);
+ bnx2x_stats_start(bp);
+}
+
+static void bnx2x_bmac_stats_update(struct bnx2x *bp)
+{
+ struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
+ struct bnx2x_eth_stats *estats = &bp->eth_stats;
+ struct {
+ u32 lo;
+ u32 hi;
+ } diff;
+
+ if (CHIP_IS_E1x(bp)) {
+ struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats);
+
+ /* the macros below will use "bmac1_stats" type */
+ UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
+ UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
+ UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
+ UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
+ UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
+ UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
+ UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
+ UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
+ UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
+
+ UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
+ UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
+ UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
+ UPDATE_STAT64(tx_stat_gt127,
+ tx_stat_etherstatspkts65octetsto127octets);
+ UPDATE_STAT64(tx_stat_gt255,
+ tx_stat_etherstatspkts128octetsto255octets);
+ UPDATE_STAT64(tx_stat_gt511,
+ tx_stat_etherstatspkts256octetsto511octets);
+ UPDATE_STAT64(tx_stat_gt1023,
+ tx_stat_etherstatspkts512octetsto1023octets);
+ UPDATE_STAT64(tx_stat_gt1518,
+ tx_stat_etherstatspkts1024octetsto1522octets);
+ UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
+ UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
+ UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
+ UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
+ UPDATE_STAT64(tx_stat_gterr,
+ tx_stat_dot3statsinternalmactransmiterrors);
+ UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
+
+ } else {
+ struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats);
+
+ /* the macros below will use "bmac2_stats" type */
+ UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
+ UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
+ UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
+ UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
+ UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
+ UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
+ UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
+ UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
+ UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
+ UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
+ UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
+ UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
+ UPDATE_STAT64(tx_stat_gt127,
+ tx_stat_etherstatspkts65octetsto127octets);
+ UPDATE_STAT64(tx_stat_gt255,
+ tx_stat_etherstatspkts128octetsto255octets);
+ UPDATE_STAT64(tx_stat_gt511,
+ tx_stat_etherstatspkts256octetsto511octets);
+ UPDATE_STAT64(tx_stat_gt1023,
+ tx_stat_etherstatspkts512octetsto1023octets);
+ UPDATE_STAT64(tx_stat_gt1518,
+ tx_stat_etherstatspkts1024octetsto1522octets);
+ UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
+ UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
+ UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
+ UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
+ UPDATE_STAT64(tx_stat_gterr,
+ tx_stat_dot3statsinternalmactransmiterrors);
+ UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
+ }
+
+ estats->pause_frames_received_hi =
+ pstats->mac_stx[1].rx_stat_mac_xpf_hi;
+ estats->pause_frames_received_lo =
+ pstats->mac_stx[1].rx_stat_mac_xpf_lo;
+
+ estats->pause_frames_sent_hi =
+ pstats->mac_stx[1].tx_stat_outxoffsent_hi;
+ estats->pause_frames_sent_lo =
+ pstats->mac_stx[1].tx_stat_outxoffsent_lo;
+}
+
+static void bnx2x_mstat_stats_update(struct bnx2x *bp)
+{
+ struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
+ struct bnx2x_eth_stats *estats = &bp->eth_stats;
+
+ struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats);
+
+ ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
+ ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
+ ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
+ ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
+ ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
+ ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
+ ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
+ ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
+ ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
+ ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
+
+
+ ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
+ ADD_STAT64(stats_tx.tx_gt127,
+ tx_stat_etherstatspkts65octetsto127octets);
+ ADD_STAT64(stats_tx.tx_gt255,
+ tx_stat_etherstatspkts128octetsto255octets);
+ ADD_STAT64(stats_tx.tx_gt511,
+ tx_stat_etherstatspkts256octetsto511octets);
+ ADD_STAT64(stats_tx.tx_gt1023,
+ tx_stat_etherstatspkts512octetsto1023octets);
+ ADD_STAT64(stats_tx.tx_gt1518,
+ tx_stat_etherstatspkts1024octetsto1522octets);
+ ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
+
+ ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
+ ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
+ ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
+
+ ADD_STAT64(stats_tx.tx_gterr,
+ tx_stat_dot3statsinternalmactransmiterrors);
+ ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
+
+ ADD_64(estats->etherstatspkts1024octetsto1522octets_hi,
+ new->stats_tx.tx_gt1518_hi,
+ estats->etherstatspkts1024octetsto1522octets_lo,
+ new->stats_tx.tx_gt1518_lo);
+
+ ADD_64(estats->etherstatspktsover1522octets_hi,
+ new->stats_tx.tx_gt2047_hi,
+ estats->etherstatspktsover1522octets_lo,
+ new->stats_tx.tx_gt2047_lo);
+
+ ADD_64(estats->etherstatspktsover1522octets_hi,
+ new->stats_tx.tx_gt4095_hi,
+ estats->etherstatspktsover1522octets_lo,
+ new->stats_tx.tx_gt4095_lo);
+
+ ADD_64(estats->etherstatspktsover1522octets_hi,
+ new->stats_tx.tx_gt9216_hi,
+ estats->etherstatspktsover1522octets_lo,
+ new->stats_tx.tx_gt9216_lo);
+
+
+ ADD_64(estats->etherstatspktsover1522octets_hi,
+ new->stats_tx.tx_gt16383_hi,
+ estats->etherstatspktsover1522octets_lo,
+ new->stats_tx.tx_gt16383_lo);
+
+ estats->pause_frames_received_hi =
+ pstats->mac_stx[1].rx_stat_mac_xpf_hi;
+ estats->pause_frames_received_lo =
+ pstats->mac_stx[1].rx_stat_mac_xpf_lo;
+
+ estats->pause_frames_sent_hi =
+ pstats->mac_stx[1].tx_stat_outxoffsent_hi;
+ estats->pause_frames_sent_lo =
+ pstats->mac_stx[1].tx_stat_outxoffsent_lo;
+}
+
+static void bnx2x_emac_stats_update(struct bnx2x *bp)
+{
+ struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
+ struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
+ struct bnx2x_eth_stats *estats = &bp->eth_stats;
+
+ UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
+ UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
+ UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
+ UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
+ UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
+ UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
+ UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
+ UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
+ UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
+ UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
+ UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
+ UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
+ UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
+ UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
+ UPDATE_EXTEND_STAT(tx_stat_outxonsent);
+ UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
+ UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
+ UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
+ UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
+ UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
+ UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
+ UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
+ UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
+ UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
+ UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
+ UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
+ UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
+ UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
+ UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
+ UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
+ UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
+
+ estats->pause_frames_received_hi =
+ pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
+ estats->pause_frames_received_lo =
+ pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
+ ADD_64(estats->pause_frames_received_hi,
+ pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
+ estats->pause_frames_received_lo,
+ pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
+
+ estats->pause_frames_sent_hi =
+ pstats->mac_stx[1].tx_stat_outxonsent_hi;
+ estats->pause_frames_sent_lo =
+ pstats->mac_stx[1].tx_stat_outxonsent_lo;
+ ADD_64(estats->pause_frames_sent_hi,
+ pstats->mac_stx[1].tx_stat_outxoffsent_hi,
+ estats->pause_frames_sent_lo,
+ pstats->mac_stx[1].tx_stat_outxoffsent_lo);
+}
+
+static int bnx2x_hw_stats_update(struct bnx2x *bp)
+{
+ struct nig_stats *new = bnx2x_sp(bp, nig_stats);
+ struct nig_stats *old = &(bp->port.old_nig_stats);
+ struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
+ struct bnx2x_eth_stats *estats = &bp->eth_stats;
+ struct {
+ u32 lo;
+ u32 hi;
+ } diff;
+
+ switch (bp->link_vars.mac_type) {
+ case MAC_TYPE_BMAC:
+ bnx2x_bmac_stats_update(bp);
+ break;
+
+ case MAC_TYPE_EMAC:
+ bnx2x_emac_stats_update(bp);
+ break;
+
+ case MAC_TYPE_UMAC:
+ case MAC_TYPE_XMAC:
+ bnx2x_mstat_stats_update(bp);
+ break;
+
+ case MAC_TYPE_NONE: /* unreached */
- int vn, vn_max = IS_MF(bp) ? E1HVN_MAX : E1VN_MAX;
++ DP(BNX2X_MSG_STATS,
++ "stats updated by DMAE but no MAC active\n");
+ return -1;
+
+ default: /* unreached */
+ BNX2X_ERR("Unknown MAC type\n");
+ }
+
+ ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
+ new->brb_discard - old->brb_discard);
+ ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
+ new->brb_truncate - old->brb_truncate);
+
+ if (!CHIP_IS_E3(bp)) {
+ UPDATE_STAT64_NIG(egress_mac_pkt0,
+ etherstatspkts1024octetsto1522octets);
+ UPDATE_STAT64_NIG(egress_mac_pkt1,
+ etherstatspktsover1522octets);
+ }
+
+ memcpy(old, new, sizeof(struct nig_stats));
+
+ memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
+ sizeof(struct mac_stx));
+ estats->brb_drop_hi = pstats->brb_drop_hi;
+ estats->brb_drop_lo = pstats->brb_drop_lo;
+
+ pstats->host_port_stats_start = ++pstats->host_port_stats_end;
+
+ if (!BP_NOMCP(bp)) {
+ u32 nig_timer_max =
+ SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
+ if (nig_timer_max != estats->nig_timer_max) {
+ estats->nig_timer_max = nig_timer_max;
+ BNX2X_ERR("NIG timer max (%u)\n",
+ estats->nig_timer_max);
+ }
+ }
+
+ return 0;
+}
+
+static int bnx2x_storm_stats_update(struct bnx2x *bp)
+{
+ struct tstorm_per_port_stats *tport =
+ &bp->fw_stats_data->port.tstorm_port_statistics;
+ struct tstorm_per_pf_stats *tfunc =
+ &bp->fw_stats_data->pf.tstorm_pf_statistics;
+ struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
+ struct bnx2x_eth_stats *estats = &bp->eth_stats;
+ struct stats_counter *counters = &bp->fw_stats_data->storm_counters;
+ int i;
+ u16 cur_stats_counter;
+
+ /* Make sure we use the value of the counter
+ * used for sending the last stats ramrod.
+ */
+ spin_lock_bh(&bp->stats_lock);
+ cur_stats_counter = bp->stats_counter - 1;
+ spin_unlock_bh(&bp->stats_lock);
+
+ /* are storm stats valid? */
+ if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
+ DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
+ " xstorm counter (0x%x) != stats_counter (0x%x)\n",
+ le16_to_cpu(counters->xstats_counter), bp->stats_counter);
+ return -EAGAIN;
+ }
+
+ if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) {
+ DP(BNX2X_MSG_STATS, "stats not updated by ustorm"
+ " ustorm counter (0x%x) != stats_counter (0x%x)\n",
+ le16_to_cpu(counters->ustats_counter), bp->stats_counter);
+ return -EAGAIN;
+ }
+
+ if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) {
+ DP(BNX2X_MSG_STATS, "stats not updated by cstorm"
+ " cstorm counter (0x%x) != stats_counter (0x%x)\n",
+ le16_to_cpu(counters->cstats_counter), bp->stats_counter);
+ return -EAGAIN;
+ }
+
+ if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) {
+ DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
+ " tstorm counter (0x%x) != stats_counter (0x%x)\n",
+ le16_to_cpu(counters->tstats_counter), bp->stats_counter);
+ return -EAGAIN;
+ }
+
+ memcpy(&(fstats->total_bytes_received_hi),
+ &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
+ sizeof(struct host_func_stats) - 2*sizeof(u32));
+ estats->error_bytes_received_hi = 0;
+ estats->error_bytes_received_lo = 0;
+ estats->etherstatsoverrsizepkts_hi = 0;
+ estats->etherstatsoverrsizepkts_lo = 0;
+ estats->no_buff_discard_hi = 0;
+ estats->no_buff_discard_lo = 0;
+ estats->total_tpa_aggregations_hi = 0;
+ estats->total_tpa_aggregations_lo = 0;
+ estats->total_tpa_aggregated_frames_hi = 0;
+ estats->total_tpa_aggregated_frames_lo = 0;
+ estats->total_tpa_bytes_hi = 0;
+ estats->total_tpa_bytes_lo = 0;
+
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ struct tstorm_per_queue_stats *tclient =
+ &bp->fw_stats_data->queue_stats[i].
+ tstorm_queue_statistics;
+ struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient;
+ struct ustorm_per_queue_stats *uclient =
+ &bp->fw_stats_data->queue_stats[i].
+ ustorm_queue_statistics;
+ struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient;
+ struct xstorm_per_queue_stats *xclient =
+ &bp->fw_stats_data->queue_stats[i].
+ xstorm_queue_statistics;
+ struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient;
+ struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+ u32 diff;
+
+ DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, "
+ "bcast_sent 0x%x mcast_sent 0x%x\n",
+ i, xclient->ucast_pkts_sent,
+ xclient->bcast_pkts_sent, xclient->mcast_pkts_sent);
+
+ DP(BNX2X_MSG_STATS, "---------------\n");
+
+ qstats->total_broadcast_bytes_received_hi =
+ le32_to_cpu(tclient->rcv_bcast_bytes.hi);
+ qstats->total_broadcast_bytes_received_lo =
+ le32_to_cpu(tclient->rcv_bcast_bytes.lo);
+
+ qstats->total_multicast_bytes_received_hi =
+ le32_to_cpu(tclient->rcv_mcast_bytes.hi);
+ qstats->total_multicast_bytes_received_lo =
+ le32_to_cpu(tclient->rcv_mcast_bytes.lo);
+
+ qstats->total_unicast_bytes_received_hi =
+ le32_to_cpu(tclient->rcv_ucast_bytes.hi);
+ qstats->total_unicast_bytes_received_lo =
+ le32_to_cpu(tclient->rcv_ucast_bytes.lo);
+
+ /*
+ * sum to total_bytes_received all
+ * unicast/multicast/broadcast
+ */
+ qstats->total_bytes_received_hi =
+ qstats->total_broadcast_bytes_received_hi;
+ qstats->total_bytes_received_lo =
+ qstats->total_broadcast_bytes_received_lo;
+
+ ADD_64(qstats->total_bytes_received_hi,
+ qstats->total_multicast_bytes_received_hi,
+ qstats->total_bytes_received_lo,
+ qstats->total_multicast_bytes_received_lo);
+
+ ADD_64(qstats->total_bytes_received_hi,
+ qstats->total_unicast_bytes_received_hi,
+ qstats->total_bytes_received_lo,
+ qstats->total_unicast_bytes_received_lo);
+
+ qstats->valid_bytes_received_hi =
+ qstats->total_bytes_received_hi;
+ qstats->valid_bytes_received_lo =
+ qstats->total_bytes_received_lo;
+
+
+ UPDATE_EXTEND_TSTAT(rcv_ucast_pkts,
+ total_unicast_packets_received);
+ UPDATE_EXTEND_TSTAT(rcv_mcast_pkts,
+ total_multicast_packets_received);
+ UPDATE_EXTEND_TSTAT(rcv_bcast_pkts,
+ total_broadcast_packets_received);
+ UPDATE_EXTEND_TSTAT(pkts_too_big_discard,
+ etherstatsoverrsizepkts);
+ UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
+
+ SUB_EXTEND_USTAT(ucast_no_buff_pkts,
+ total_unicast_packets_received);
+ SUB_EXTEND_USTAT(mcast_no_buff_pkts,
+ total_multicast_packets_received);
+ SUB_EXTEND_USTAT(bcast_no_buff_pkts,
+ total_broadcast_packets_received);
+ UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
+ UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
+ UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
+
+ qstats->total_broadcast_bytes_transmitted_hi =
+ le32_to_cpu(xclient->bcast_bytes_sent.hi);
+ qstats->total_broadcast_bytes_transmitted_lo =
+ le32_to_cpu(xclient->bcast_bytes_sent.lo);
+
+ qstats->total_multicast_bytes_transmitted_hi =
+ le32_to_cpu(xclient->mcast_bytes_sent.hi);
+ qstats->total_multicast_bytes_transmitted_lo =
+ le32_to_cpu(xclient->mcast_bytes_sent.lo);
+
+ qstats->total_unicast_bytes_transmitted_hi =
+ le32_to_cpu(xclient->ucast_bytes_sent.hi);
+ qstats->total_unicast_bytes_transmitted_lo =
+ le32_to_cpu(xclient->ucast_bytes_sent.lo);
+ /*
+ * sum to total_bytes_transmitted all
+ * unicast/multicast/broadcast
+ */
+ qstats->total_bytes_transmitted_hi =
+ qstats->total_unicast_bytes_transmitted_hi;
+ qstats->total_bytes_transmitted_lo =
+ qstats->total_unicast_bytes_transmitted_lo;
+
+ ADD_64(qstats->total_bytes_transmitted_hi,
+ qstats->total_broadcast_bytes_transmitted_hi,
+ qstats->total_bytes_transmitted_lo,
+ qstats->total_broadcast_bytes_transmitted_lo);
+
+ ADD_64(qstats->total_bytes_transmitted_hi,
+ qstats->total_multicast_bytes_transmitted_hi,
+ qstats->total_bytes_transmitted_lo,
+ qstats->total_multicast_bytes_transmitted_lo);
+
+ UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
+ total_unicast_packets_transmitted);
+ UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
+ total_multicast_packets_transmitted);
+ UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
+ total_broadcast_packets_transmitted);
+
+ UPDATE_EXTEND_TSTAT(checksum_discard,
+ total_packets_received_checksum_discarded);
+ UPDATE_EXTEND_TSTAT(ttl0_discard,
+ total_packets_received_ttl0_discarded);
+
+ UPDATE_EXTEND_XSTAT(error_drop_pkts,
+ total_transmitted_dropped_packets_error);
+
+ /* TPA aggregations completed */
+ UPDATE_EXTEND_USTAT(coalesced_events, total_tpa_aggregations);
+ /* Number of network frames aggregated by TPA */
+ UPDATE_EXTEND_USTAT(coalesced_pkts,
+ total_tpa_aggregated_frames);
+ /* Total number of bytes in completed TPA aggregations */
+ qstats->total_tpa_bytes_lo =
+ le32_to_cpu(uclient->coalesced_bytes.lo);
+ qstats->total_tpa_bytes_hi =
+ le32_to_cpu(uclient->coalesced_bytes.hi);
+
+ /* TPA stats per-function */
+ ADD_64(estats->total_tpa_aggregations_hi,
+ qstats->total_tpa_aggregations_hi,
+ estats->total_tpa_aggregations_lo,
+ qstats->total_tpa_aggregations_lo);
+ ADD_64(estats->total_tpa_aggregated_frames_hi,
+ qstats->total_tpa_aggregated_frames_hi,
+ estats->total_tpa_aggregated_frames_lo,
+ qstats->total_tpa_aggregated_frames_lo);
+ ADD_64(estats->total_tpa_bytes_hi,
+ qstats->total_tpa_bytes_hi,
+ estats->total_tpa_bytes_lo,
+ qstats->total_tpa_bytes_lo);
+
+ ADD_64(fstats->total_bytes_received_hi,
+ qstats->total_bytes_received_hi,
+ fstats->total_bytes_received_lo,
+ qstats->total_bytes_received_lo);
+ ADD_64(fstats->total_bytes_transmitted_hi,
+ qstats->total_bytes_transmitted_hi,
+ fstats->total_bytes_transmitted_lo,
+ qstats->total_bytes_transmitted_lo);
+ ADD_64(fstats->total_unicast_packets_received_hi,
+ qstats->total_unicast_packets_received_hi,
+ fstats->total_unicast_packets_received_lo,
+ qstats->total_unicast_packets_received_lo);
+ ADD_64(fstats->total_multicast_packets_received_hi,
+ qstats->total_multicast_packets_received_hi,
+ fstats->total_multicast_packets_received_lo,
+ qstats->total_multicast_packets_received_lo);
+ ADD_64(fstats->total_broadcast_packets_received_hi,
+ qstats->total_broadcast_packets_received_hi,
+ fstats->total_broadcast_packets_received_lo,
+ qstats->total_broadcast_packets_received_lo);
+ ADD_64(fstats->total_unicast_packets_transmitted_hi,
+ qstats->total_unicast_packets_transmitted_hi,
+ fstats->total_unicast_packets_transmitted_lo,
+ qstats->total_unicast_packets_transmitted_lo);
+ ADD_64(fstats->total_multicast_packets_transmitted_hi,
+ qstats->total_multicast_packets_transmitted_hi,
+ fstats->total_multicast_packets_transmitted_lo,
+ qstats->total_multicast_packets_transmitted_lo);
+ ADD_64(fstats->total_broadcast_packets_transmitted_hi,
+ qstats->total_broadcast_packets_transmitted_hi,
+ fstats->total_broadcast_packets_transmitted_lo,
+ qstats->total_broadcast_packets_transmitted_lo);
+ ADD_64(fstats->valid_bytes_received_hi,
+ qstats->valid_bytes_received_hi,
+ fstats->valid_bytes_received_lo,
+ qstats->valid_bytes_received_lo);
+
+ ADD_64(estats->etherstatsoverrsizepkts_hi,
+ qstats->etherstatsoverrsizepkts_hi,
+ estats->etherstatsoverrsizepkts_lo,
+ qstats->etherstatsoverrsizepkts_lo);
+ ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
+ estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
+ }
+
+ ADD_64(fstats->total_bytes_received_hi,
+ estats->rx_stat_ifhcinbadoctets_hi,
+ fstats->total_bytes_received_lo,
+ estats->rx_stat_ifhcinbadoctets_lo);
+
+ ADD_64(fstats->total_bytes_received_hi,
+ tfunc->rcv_error_bytes.hi,
+ fstats->total_bytes_received_lo,
+ tfunc->rcv_error_bytes.lo);
+
+ memcpy(estats, &(fstats->total_bytes_received_hi),
+ sizeof(struct host_func_stats) - 2*sizeof(u32));
+
+ ADD_64(estats->error_bytes_received_hi,
+ tfunc->rcv_error_bytes.hi,
+ estats->error_bytes_received_lo,
+ tfunc->rcv_error_bytes.lo);
+
+ ADD_64(estats->etherstatsoverrsizepkts_hi,
+ estats->rx_stat_dot3statsframestoolong_hi,
+ estats->etherstatsoverrsizepkts_lo,
+ estats->rx_stat_dot3statsframestoolong_lo);
+ ADD_64(estats->error_bytes_received_hi,
+ estats->rx_stat_ifhcinbadoctets_hi,
+ estats->error_bytes_received_lo,
+ estats->rx_stat_ifhcinbadoctets_lo);
+
+ if (bp->port.pmf) {
+ estats->mac_filter_discard =
+ le32_to_cpu(tport->mac_filter_discard);
+ estats->mf_tag_discard =
+ le32_to_cpu(tport->mf_tag_discard);
+ estats->brb_truncate_discard =
+ le32_to_cpu(tport->brb_truncate_discard);
+ estats->mac_discard = le32_to_cpu(tport->mac_discard);
+ }
+
+ fstats->host_func_stats_start = ++fstats->host_func_stats_end;
+
+ bp->stats_pending = 0;
+
+ return 0;
+}
+
+static void bnx2x_net_stats_update(struct bnx2x *bp)
+{
+ struct bnx2x_eth_stats *estats = &bp->eth_stats;
+ struct net_device_stats *nstats = &bp->dev->stats;
+ unsigned long tmp;
+ int i;
+
+ nstats->rx_packets =
+ bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
+ bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
+ bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
+
+ nstats->tx_packets =
+ bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
+ bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
+ bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
+
+ nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
+
+ nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
+
+ tmp = estats->mac_discard;
+ for_each_rx_queue(bp, i)
+ tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
+ nstats->rx_dropped = tmp;
+
+ nstats->tx_dropped = 0;
+
+ nstats->multicast =
+ bnx2x_hilo(&estats->total_multicast_packets_received_hi);
+
+ nstats->collisions =
+ bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
+
+ nstats->rx_length_errors =
+ bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
+ bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
+ nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
+ bnx2x_hilo(&estats->brb_truncate_hi);
+ nstats->rx_crc_errors =
+ bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
+ nstats->rx_frame_errors =
+ bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
+ nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
+ nstats->rx_missed_errors = 0;
+
+ nstats->rx_errors = nstats->rx_length_errors +
+ nstats->rx_over_errors +
+ nstats->rx_crc_errors +
+ nstats->rx_frame_errors +
+ nstats->rx_fifo_errors +
+ nstats->rx_missed_errors;
+
+ nstats->tx_aborted_errors =
+ bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
+ bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
+ nstats->tx_carrier_errors =
+ bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
+ nstats->tx_fifo_errors = 0;
+ nstats->tx_heartbeat_errors = 0;
+ nstats->tx_window_errors = 0;
+
+ nstats->tx_errors = nstats->tx_aborted_errors +
+ nstats->tx_carrier_errors +
+ bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
+}
+
+static void bnx2x_drv_stats_update(struct bnx2x *bp)
+{
+ struct bnx2x_eth_stats *estats = &bp->eth_stats;
+ int i;
+
+ estats->driver_xoff = 0;
+ estats->rx_err_discard_pkt = 0;
+ estats->rx_skb_alloc_failed = 0;
+ estats->hw_csum_err = 0;
+ for_each_queue(bp, i) {
+ struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
+
+ estats->driver_xoff += qstats->driver_xoff;
+ estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
+ estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
+ estats->hw_csum_err += qstats->hw_csum_err;
+ }
+}
+
+static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp)
+{
+ u32 val;
+
+ if (SHMEM2_HAS(bp, edebug_driver_if[1])) {
+ val = SHMEM2_RD(bp, edebug_driver_if[1]);
+
+ if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT)
+ return true;
+ }
+
+ return false;
+}
+
+static void bnx2x_stats_update(struct bnx2x *bp)
+{
+ u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+ if (bnx2x_edebug_stats_stopped(bp))
+ return;
+
+ if (*stats_comp != DMAE_COMP_VAL)
+ return;
+
+ if (bp->port.pmf)
+ bnx2x_hw_stats_update(bp);
+
+ if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
+ BNX2X_ERR("storm stats were not updated for 3 times\n");
+ bnx2x_panic();
+ return;
+ }
+
+ bnx2x_net_stats_update(bp);
+ bnx2x_drv_stats_update(bp);
+
+ if (netif_msg_timer(bp)) {
+ struct bnx2x_eth_stats *estats = &bp->eth_stats;
+ int i, cos;
+
+ netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n",
+ estats->brb_drop_lo, estats->brb_truncate_lo);
+
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+
+ pr_debug("%s: rx usage(%4u) *rx_cons_sb(%u) rx pkt(%lu) rx calls(%lu %lu)\n",
+ fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
+ fp->rx_comp_cons),
+ le16_to_cpu(*fp->rx_cons_sb),
+ bnx2x_hilo(&qstats->
+ total_unicast_packets_received_hi),
+ fp->rx_calls, fp->rx_pkt);
+ }
+
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ struct bnx2x_fp_txdata *txdata;
+ struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+ struct netdev_queue *txq;
+
+ pr_debug("%s: tx pkt(%lu) (Xoff events %u)",
+ fp->name,
+ bnx2x_hilo(
+ &qstats->total_unicast_packets_transmitted_hi),
+ qstats->driver_xoff);
+
+ for_each_cos_in_tx_queue(fp, cos) {
+ txdata = &fp->txdata[cos];
+ txq = netdev_get_tx_queue(bp->dev,
+ FP_COS_TO_TXQ(fp, cos));
+
+ pr_debug("%d: tx avail(%4u) *tx_cons_sb(%u) tx calls (%lu) %s\n",
+ cos,
+ bnx2x_tx_avail(bp, txdata),
+ le16_to_cpu(*txdata->tx_cons_sb),
+ txdata->tx_pkt,
+ (netif_tx_queue_stopped(txq) ?
+ "Xoff" : "Xon")
+ );
+ }
+ }
+ }
+
+ bnx2x_hw_stats_post(bp);
+ bnx2x_storm_stats_post(bp);
+}
+
+static void bnx2x_port_stats_stop(struct bnx2x *bp)
+{
+ struct dmae_command *dmae;
+ u32 opcode;
+ int loader_idx = PMF_DMAE_C(bp);
+ u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+ bp->executer_idx = 0;
+
+ opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0);
+
+ if (bp->port.port_stx) {
+
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ if (bp->func_stx)
+ dmae->opcode = bnx2x_dmae_opcode_add_comp(
+ opcode, DMAE_COMP_GRC);
+ else
+ dmae->opcode = bnx2x_dmae_opcode_add_comp(
+ opcode, DMAE_COMP_PCI);
+
+ dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
+ dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
+ dmae->dst_addr_lo = bp->port.port_stx >> 2;
+ dmae->dst_addr_hi = 0;
+ dmae->len = sizeof(struct host_port_stats) >> 2;
+ if (bp->func_stx) {
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+ } else {
+ dmae->comp_addr_lo =
+ U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_addr_hi =
+ U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_val = DMAE_COMP_VAL;
+
+ *stats_comp = 0;
+ }
+ }
+
+ if (bp->func_stx) {
+
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode =
+ bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
+ dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
+ dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
+ dmae->dst_addr_lo = bp->func_stx >> 2;
+ dmae->dst_addr_hi = 0;
+ dmae->len = sizeof(struct host_func_stats) >> 2;
+ dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_val = DMAE_COMP_VAL;
+
+ *stats_comp = 0;
+ }
+}
+
+static void bnx2x_stats_stop(struct bnx2x *bp)
+{
+ int update = 0;
+
+ bnx2x_stats_comp(bp);
+
+ if (bp->port.pmf)
+ update = (bnx2x_hw_stats_update(bp) == 0);
+
+ update |= (bnx2x_storm_stats_update(bp) == 0);
+
+ if (update) {
+ bnx2x_net_stats_update(bp);
+
+ if (bp->port.pmf)
+ bnx2x_port_stats_stop(bp);
+
+ bnx2x_hw_stats_post(bp);
+ bnx2x_stats_comp(bp);
+ }
+}
+
+static void bnx2x_stats_do_nothing(struct bnx2x *bp)
+{
+}
+
+static const struct {
+ void (*action)(struct bnx2x *bp);
+ enum bnx2x_stats_state next_state;
+} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
+/* state event */
+{
+/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
+/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
+/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
+/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
+},
+{
+/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
+/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
+/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
+/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
+}
+};
+
+void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
+{
+ enum bnx2x_stats_state state;
+ if (unlikely(bp->panic))
+ return;
+ bnx2x_stats_stm[bp->stats_state][event].action(bp);
+ spin_lock_bh(&bp->stats_lock);
+ state = bp->stats_state;
+ bp->stats_state = bnx2x_stats_stm[state][event].next_state;
+ spin_unlock_bh(&bp->stats_lock);
+
+ if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
+ DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
+ state, event, bp->stats_state);
+}
+
+static void bnx2x_port_stats_base_init(struct bnx2x *bp)
+{
+ struct dmae_command *dmae;
+ u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+ /* sanity */
+ if (!bp->port.pmf || !bp->port.port_stx) {
+ BNX2X_ERR("BUG!\n");
+ return;
+ }
+
+ bp->executer_idx = 0;
+
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
+ true, DMAE_COMP_PCI);
+ dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
+ dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
+ dmae->dst_addr_lo = bp->port.port_stx >> 2;
+ dmae->dst_addr_hi = 0;
+ dmae->len = sizeof(struct host_port_stats) >> 2;
+ dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_val = DMAE_COMP_VAL;
+
+ *stats_comp = 0;
+ bnx2x_hw_stats_post(bp);
+ bnx2x_stats_comp(bp);
+}
+
+static void bnx2x_func_stats_base_init(struct bnx2x *bp)
+{
- int mb_idx = CHIP_IS_E1x(bp) ? 2*vn + BP_PORT(bp) : vn;
++ int vn, vn_max = IS_MF(bp) ? BP_MAX_VN_NUM(bp) : E1VN_MAX;
+ u32 func_stx;
+
+ /* sanity */
+ if (!bp->port.pmf || !bp->func_stx) {
+ BNX2X_ERR("BUG!\n");
+ return;
+ }
+
+ /* save our func_stx */
+ func_stx = bp->func_stx;
+
+ for (vn = VN_0; vn < vn_max; vn++) {
++ int mb_idx = BP_FW_MB_IDX_VN(bp, vn);
+
+ bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
+ bnx2x_func_stats_init(bp);
+ bnx2x_hw_stats_post(bp);
+ bnx2x_stats_comp(bp);
+ }
+
+ /* restore our func_stx */
+ bp->func_stx = func_stx;
+}
+
+static void bnx2x_func_stats_base_update(struct bnx2x *bp)
+{
+ struct dmae_command *dmae = &bp->stats_dmae;
+ u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+ /* sanity */
+ if (!bp->func_stx) {
+ BNX2X_ERR("BUG!\n");
+ return;
+ }
+
+ bp->executer_idx = 0;
+ memset(dmae, 0, sizeof(struct dmae_command));
+
+ dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
+ true, DMAE_COMP_PCI);
+ dmae->src_addr_lo = bp->func_stx >> 2;
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
+ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
+ dmae->len = sizeof(struct host_func_stats) >> 2;
+ dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_val = DMAE_COMP_VAL;
+
+ *stats_comp = 0;
+ bnx2x_hw_stats_post(bp);
+ bnx2x_stats_comp(bp);
+}
+
+/**
+ * This function will prepare the statistics ramrod data the way
+ * we will only have to increment the statistics counter and
+ * send the ramrod each time we have to.
+ *
+ * @param bp
+ */
+static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
+{
+ int i;
+ struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr;
+
+ dma_addr_t cur_data_offset;
+ struct stats_query_entry *cur_query_entry;
+
+ stats_hdr->cmd_num = bp->fw_stats_num;
+ stats_hdr->drv_stats_counter = 0;
+
+ /* storm_counters struct contains the counters of completed
+ * statistics requests per storm which are incremented by FW
+ * each time it completes hadning a statistics ramrod. We will
+ * check these counters in the timer handler and discard a
+ * (statistics) ramrod completion.
+ */
+ cur_data_offset = bp->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, storm_counters);
+
+ stats_hdr->stats_counters_addrs.hi =
+ cpu_to_le32(U64_HI(cur_data_offset));
+ stats_hdr->stats_counters_addrs.lo =
+ cpu_to_le32(U64_LO(cur_data_offset));
+
+ /* prepare to the first stats ramrod (will be completed with
+ * the counters equal to zero) - init counters to somethig different.
+ */
+ memset(&bp->fw_stats_data->storm_counters, 0xff,
+ sizeof(struct stats_counter));
+
+ /**** Port FW statistics data ****/
+ cur_data_offset = bp->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, port);
+
+ cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX];
+
+ cur_query_entry->kind = STATS_TYPE_PORT;
+ /* For port query index is a DONT CARE */
+ cur_query_entry->index = BP_PORT(bp);
+ /* For port query funcID is a DONT CARE */
+ cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+ cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
+ cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
+
+ /**** PF FW statistics data ****/
+ cur_data_offset = bp->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, pf);
+
+ cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX];
+
+ cur_query_entry->kind = STATS_TYPE_PF;
+ /* For PF query index is a DONT CARE */
+ cur_query_entry->index = BP_PORT(bp);
+ cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+ cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
+ cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
+
+ /**** Clients' queries ****/
+ cur_data_offset = bp->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, queue_stats);
+
+ for_each_eth_queue(bp, i) {
+ cur_query_entry =
+ &bp->fw_stats_req->
+ query[BNX2X_FIRST_QUEUE_QUERY_IDX + i];
+
+ cur_query_entry->kind = STATS_TYPE_QUEUE;
+ cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]);
+ cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+ cur_query_entry->address.hi =
+ cpu_to_le32(U64_HI(cur_data_offset));
+ cur_query_entry->address.lo =
+ cpu_to_le32(U64_LO(cur_data_offset));
+
+ cur_data_offset += sizeof(struct per_queue_stats);
+ }
+}
+
+void bnx2x_stats_init(struct bnx2x *bp)
+{
+ int /*abs*/port = BP_PORT(bp);
+ int mb_idx = BP_FW_MB_IDX(bp);
+ int i;
+
+ bp->stats_pending = 0;
+ bp->executer_idx = 0;
+ bp->stats_counter = 0;
+
+ /* port and func stats for management */
+ if (!BP_NOMCP(bp)) {
+ bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
+ bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
+
+ } else {
+ bp->port.port_stx = 0;
+ bp->func_stx = 0;
+ }
+ DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
+ bp->port.port_stx, bp->func_stx);
+
+ port = BP_PORT(bp);
+ /* port stats */
+ memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
+ bp->port.old_nig_stats.brb_discard =
+ REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
+ bp->port.old_nig_stats.brb_truncate =
+ REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
+ if (!CHIP_IS_E3(bp)) {
+ REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
+ &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
+ REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
+ &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
+ }
+
+ /* function stats */
+ for_each_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ memset(&fp->old_tclient, 0, sizeof(fp->old_tclient));
+ memset(&fp->old_uclient, 0, sizeof(fp->old_uclient));
+ memset(&fp->old_xclient, 0, sizeof(fp->old_xclient));
+ memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats));
+ }
+
+ /* Prepare statistics ramrod data */
+ bnx2x_prep_fw_stats_req(bp);
+
+ memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
+ memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
+
+ bp->stats_state = STATS_STATE_DISABLED;
+
+ if (bp->port.pmf) {
+ if (bp->port.port_stx)
+ bnx2x_port_stats_base_init(bp);
+
+ if (bp->func_stx)
+ bnx2x_func_stats_base_init(bp);
+
+ } else if (bp->func_stx)
+ bnx2x_func_stats_base_update(bp);
+}
--- /dev/null
- #ifdef BCM_KERNEL_SUPPORTS_8021Q
+/*
+ * tg3.c: Broadcom Tigon3 ethernet driver.
+ *
+ * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
+ * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
+ * Copyright (C) 2004 Sun Microsystems Inc.
+ * Copyright (C) 2005-2011 Broadcom Corporation.
+ *
+ * Firmware is:
+ * Derived from proprietary unpublished source code,
+ * Copyright (C) 2000-2003 Broadcom Corporation.
+ *
+ * Permission is hereby granted for the distribution of this firmware
+ * data in hexadecimal or equivalent format, provided this copyright
+ * notice is accompanying it.
+ */
+
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/stringify.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/in.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/mdio.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/brcmphy.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/workqueue.h>
+#include <linux/prefetch.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+
+#include <net/checksum.h>
+#include <net/ip.h>
+
+#include <asm/system.h>
+#include <linux/io.h>
+#include <asm/byteorder.h>
+#include <linux/uaccess.h>
+
+#ifdef CONFIG_SPARC
+#include <asm/idprom.h>
+#include <asm/prom.h>
+#endif
+
+#define BAR_0 0
+#define BAR_2 2
+
+#include "tg3.h"
+
+/* Functions & macros to verify TG3_FLAGS types */
+
+static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
+{
+ return test_bit(flag, bits);
+}
+
+static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
+{
+ set_bit(flag, bits);
+}
+
+static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
+{
+ clear_bit(flag, bits);
+}
+
+#define tg3_flag(tp, flag) \
+ _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
+#define tg3_flag_set(tp, flag) \
+ _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
+#define tg3_flag_clear(tp, flag) \
+ _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
+
+#define DRV_MODULE_NAME "tg3"
+#define TG3_MAJ_NUM 3
+#define TG3_MIN_NUM 120
+#define DRV_MODULE_VERSION \
+ __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
+#define DRV_MODULE_RELDATE "August 18, 2011"
+
+#define RESET_KIND_SHUTDOWN 0
+#define RESET_KIND_INIT 1
+#define RESET_KIND_SUSPEND 2
+
+#define TG3_DEF_RX_MODE 0
+#define TG3_DEF_TX_MODE 0
+#define TG3_DEF_MSG_ENABLE \
+ (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR)
+
+#define TG3_GRC_LCLCTL_PWRSW_DELAY 100
+
+/* length of time before we decide the hardware is borked,
+ * and dev->tx_timeout() should be called to fix the problem
+ */
+
+#define TG3_TX_TIMEOUT (5 * HZ)
+
+/* hardware minimum and maximum for a single frame's data payload */
+#define TG3_MIN_MTU 60
+#define TG3_MAX_MTU(tp) \
+ (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
+
+/* These numbers seem to be hard coded in the NIC firmware somehow.
+ * You can't change the ring sizes, but you can change where you place
+ * them in the NIC onboard memory.
+ */
+#define TG3_RX_STD_RING_SIZE(tp) \
+ (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
+ TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
+#define TG3_DEF_RX_RING_PENDING 200
+#define TG3_RX_JMB_RING_SIZE(tp) \
+ (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
+ TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
+#define TG3_DEF_RX_JUMBO_RING_PENDING 100
+#define TG3_RSS_INDIR_TBL_SIZE 128
+
+/* Do not place this n-ring entries value into the tp struct itself,
+ * we really want to expose these constants to GCC so that modulo et
+ * al. operations are done with shifts and masks instead of with
+ * hw multiply/modulo instructions. Another solution would be to
+ * replace things like '% foo' with '& (foo - 1)'.
+ */
+
+#define TG3_TX_RING_SIZE 512
+#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
+
+#define TG3_RX_STD_RING_BYTES(tp) \
+ (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
+#define TG3_RX_JMB_RING_BYTES(tp) \
+ (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
+#define TG3_RX_RCB_RING_BYTES(tp) \
+ (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
+#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
+ TG3_TX_RING_SIZE)
+#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
+
+#define TG3_DMA_BYTE_ENAB 64
+
+#define TG3_RX_STD_DMA_SZ 1536
+#define TG3_RX_JMB_DMA_SZ 9046
+
+#define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
+
+#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
+#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
+
+#define TG3_RX_STD_BUFF_RING_SIZE(tp) \
+ (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
+
+#define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
+ (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
+
+/* Due to a hardware bug, the 5701 can only DMA to memory addresses
+ * that are at least dword aligned when used in PCIX mode. The driver
+ * works around this bug by double copying the packet. This workaround
+ * is built into the normal double copy length check for efficiency.
+ *
+ * However, the double copy is only necessary on those architectures
+ * where unaligned memory accesses are inefficient. For those architectures
+ * where unaligned memory accesses incur little penalty, we can reintegrate
+ * the 5701 in the normal rx path. Doing so saves a device structure
+ * dereference by hardcoding the double copy threshold in place.
+ */
+#define TG3_RX_COPY_THRESHOLD 256
+#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
+#else
+ #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
+#endif
+
+#if (NET_IP_ALIGN != 0)
+#define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
+#else
+#define TG3_RX_OFFSET(tp) 0
+#endif
+
+/* minimum number of free TX descriptors required to wake up TX process */
+#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
+#define TG3_TX_BD_DMA_MAX 4096
+
+#define TG3_RAW_IP_ALIGN 2
+
+#define TG3_FW_UPDATE_TIMEOUT_SEC 5
+
+#define FIRMWARE_TG3 "tigon/tg3.bin"
+#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
+#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
+
+static char version[] __devinitdata =
+ DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
+
+MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
+MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+MODULE_FIRMWARE(FIRMWARE_TG3);
+MODULE_FIRMWARE(FIRMWARE_TG3TSO);
+MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
+
+static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
+module_param(tg3_debug, int, 0);
+MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
+
+static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
+ {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
+ {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
+ {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
+ {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
+
+static const struct {
+ const char string[ETH_GSTRING_LEN];
+} ethtool_stats_keys[] = {
+ { "rx_octets" },
+ { "rx_fragments" },
+ { "rx_ucast_packets" },
+ { "rx_mcast_packets" },
+ { "rx_bcast_packets" },
+ { "rx_fcs_errors" },
+ { "rx_align_errors" },
+ { "rx_xon_pause_rcvd" },
+ { "rx_xoff_pause_rcvd" },
+ { "rx_mac_ctrl_rcvd" },
+ { "rx_xoff_entered" },
+ { "rx_frame_too_long_errors" },
+ { "rx_jabbers" },
+ { "rx_undersize_packets" },
+ { "rx_in_length_errors" },
+ { "rx_out_length_errors" },
+ { "rx_64_or_less_octet_packets" },
+ { "rx_65_to_127_octet_packets" },
+ { "rx_128_to_255_octet_packets" },
+ { "rx_256_to_511_octet_packets" },
+ { "rx_512_to_1023_octet_packets" },
+ { "rx_1024_to_1522_octet_packets" },
+ { "rx_1523_to_2047_octet_packets" },
+ { "rx_2048_to_4095_octet_packets" },
+ { "rx_4096_to_8191_octet_packets" },
+ { "rx_8192_to_9022_octet_packets" },
+
+ { "tx_octets" },
+ { "tx_collisions" },
+
+ { "tx_xon_sent" },
+ { "tx_xoff_sent" },
+ { "tx_flow_control" },
+ { "tx_mac_errors" },
+ { "tx_single_collisions" },
+ { "tx_mult_collisions" },
+ { "tx_deferred" },
+ { "tx_excessive_collisions" },
+ { "tx_late_collisions" },
+ { "tx_collide_2times" },
+ { "tx_collide_3times" },
+ { "tx_collide_4times" },
+ { "tx_collide_5times" },
+ { "tx_collide_6times" },
+ { "tx_collide_7times" },
+ { "tx_collide_8times" },
+ { "tx_collide_9times" },
+ { "tx_collide_10times" },
+ { "tx_collide_11times" },
+ { "tx_collide_12times" },
+ { "tx_collide_13times" },
+ { "tx_collide_14times" },
+ { "tx_collide_15times" },
+ { "tx_ucast_packets" },
+ { "tx_mcast_packets" },
+ { "tx_bcast_packets" },
+ { "tx_carrier_sense_errors" },
+ { "tx_discards" },
+ { "tx_errors" },
+
+ { "dma_writeq_full" },
+ { "dma_write_prioq_full" },
+ { "rxbds_empty" },
+ { "rx_discards" },
+ { "rx_errors" },
+ { "rx_threshold_hit" },
+
+ { "dma_readq_full" },
+ { "dma_read_prioq_full" },
+ { "tx_comp_queue_full" },
+
+ { "ring_set_send_prod_index" },
+ { "ring_status_update" },
+ { "nic_irqs" },
+ { "nic_avoided_irqs" },
+ { "nic_tx_threshold_hit" },
+
+ { "mbuf_lwm_thresh_hit" },
+};
+
+#define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
+
+
+static const struct {
+ const char string[ETH_GSTRING_LEN];
+} ethtool_test_keys[] = {
+ { "nvram test (online) " },
+ { "link test (online) " },
+ { "register test (offline)" },
+ { "memory test (offline)" },
+ { "mac loopback test (offline)" },
+ { "phy loopback test (offline)" },
+ { "ext loopback test (offline)" },
+ { "interrupt test (offline)" },
+};
+
+#define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
+
+
+static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
+{
+ writel(val, tp->regs + off);
+}
+
+static u32 tg3_read32(struct tg3 *tp, u32 off)
+{
+ return readl(tp->regs + off);
+}
+
+static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
+{
+ writel(val, tp->aperegs + off);
+}
+
+static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
+{
+ return readl(tp->aperegs + off);
+}
+
+static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&tp->indirect_lock, flags);
+ pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
+ pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
+ spin_unlock_irqrestore(&tp->indirect_lock, flags);
+}
+
+static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
+{
+ writel(val, tp->regs + off);
+ readl(tp->regs + off);
+}
+
+static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&tp->indirect_lock, flags);
+ pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
+ pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
+ spin_unlock_irqrestore(&tp->indirect_lock, flags);
+ return val;
+}
+
+static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
+{
+ unsigned long flags;
+
+ if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
+ pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
+ TG3_64BIT_REG_LOW, val);
+ return;
+ }
+ if (off == TG3_RX_STD_PROD_IDX_REG) {
+ pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
+ TG3_64BIT_REG_LOW, val);
+ return;
+ }
+
+ spin_lock_irqsave(&tp->indirect_lock, flags);
+ pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
+ pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
+ spin_unlock_irqrestore(&tp->indirect_lock, flags);
+
+ /* In indirect mode when disabling interrupts, we also need
+ * to clear the interrupt bit in the GRC local ctrl register.
+ */
+ if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
+ (val == 0x1)) {
+ pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
+ tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
+ }
+}
+
+static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&tp->indirect_lock, flags);
+ pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
+ pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
+ spin_unlock_irqrestore(&tp->indirect_lock, flags);
+ return val;
+}
+
+/* usec_wait specifies the wait time in usec when writing to certain registers
+ * where it is unsafe to read back the register without some delay.
+ * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
+ * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
+ */
+static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
+{
+ if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
+ /* Non-posted methods */
+ tp->write32(tp, off, val);
+ else {
+ /* Posted method */
+ tg3_write32(tp, off, val);
+ if (usec_wait)
+ udelay(usec_wait);
+ tp->read32(tp, off);
+ }
+ /* Wait again after the read for the posted method to guarantee that
+ * the wait time is met.
+ */
+ if (usec_wait)
+ udelay(usec_wait);
+}
+
+static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
+{
+ tp->write32_mbox(tp, off, val);
+ if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
+ tp->read32_mbox(tp, off);
+}
+
+static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
+{
+ void __iomem *mbox = tp->regs + off;
+ writel(val, mbox);
+ if (tg3_flag(tp, TXD_MBOX_HWBUG))
+ writel(val, mbox);
+ if (tg3_flag(tp, MBOX_WRITE_REORDER))
+ readl(mbox);
+}
+
+static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
+{
+ return readl(tp->regs + off + GRCMBOX_BASE);
+}
+
+static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
+{
+ writel(val, tp->regs + off + GRCMBOX_BASE);
+}
+
+#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
+#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
+#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
+#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
+#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
+
+#define tw32(reg, val) tp->write32(tp, reg, val)
+#define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
+#define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
+#define tr32(reg) tp->read32(tp, reg)
+
+static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
+{
+ unsigned long flags;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
+ (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
+ return;
+
+ spin_lock_irqsave(&tp->indirect_lock, flags);
+ if (tg3_flag(tp, SRAM_USE_CONFIG)) {
+ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
+ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
+
+ /* Always leave this as zero. */
+ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
+ } else {
+ tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
+ tw32_f(TG3PCI_MEM_WIN_DATA, val);
+
+ /* Always leave this as zero. */
+ tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
+ }
+ spin_unlock_irqrestore(&tp->indirect_lock, flags);
+}
+
+static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
+{
+ unsigned long flags;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
+ (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
+ *val = 0;
+ return;
+ }
+
+ spin_lock_irqsave(&tp->indirect_lock, flags);
+ if (tg3_flag(tp, SRAM_USE_CONFIG)) {
+ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
+ pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
+
+ /* Always leave this as zero. */
+ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
+ } else {
+ tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
+ *val = tr32(TG3PCI_MEM_WIN_DATA);
+
+ /* Always leave this as zero. */
+ tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
+ }
+ spin_unlock_irqrestore(&tp->indirect_lock, flags);
+}
+
+static void tg3_ape_lock_init(struct tg3 *tp)
+{
+ int i;
+ u32 regbase, bit;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ regbase = TG3_APE_LOCK_GRANT;
+ else
+ regbase = TG3_APE_PER_LOCK_GRANT;
+
+ /* Make sure the driver hasn't any stale locks. */
+ for (i = 0; i < 8; i++) {
+ if (i == TG3_APE_LOCK_GPIO)
+ continue;
+ tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
+ }
+
+ /* Clear the correct bit of the GPIO lock too. */
+ if (!tp->pci_fn)
+ bit = APE_LOCK_GRANT_DRIVER;
+ else
+ bit = 1 << tp->pci_fn;
+
+ tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit);
+}
+
+static int tg3_ape_lock(struct tg3 *tp, int locknum)
+{
+ int i, off;
+ int ret = 0;
+ u32 status, req, gnt, bit;
+
+ if (!tg3_flag(tp, ENABLE_APE))
+ return 0;
+
+ switch (locknum) {
+ case TG3_APE_LOCK_GPIO:
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ return 0;
+ case TG3_APE_LOCK_GRC:
+ case TG3_APE_LOCK_MEM:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
+ req = TG3_APE_LOCK_REQ;
+ gnt = TG3_APE_LOCK_GRANT;
+ } else {
+ req = TG3_APE_PER_LOCK_REQ;
+ gnt = TG3_APE_PER_LOCK_GRANT;
+ }
+
+ off = 4 * locknum;
+
+ if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
+ bit = APE_LOCK_REQ_DRIVER;
+ else
+ bit = 1 << tp->pci_fn;
+
+ tg3_ape_write32(tp, req + off, bit);
+
+ /* Wait for up to 1 millisecond to acquire lock. */
+ for (i = 0; i < 100; i++) {
+ status = tg3_ape_read32(tp, gnt + off);
+ if (status == bit)
+ break;
+ udelay(10);
+ }
+
+ if (status != bit) {
+ /* Revoke the lock request. */
+ tg3_ape_write32(tp, gnt + off, bit);
+ ret = -EBUSY;
+ }
+
+ return ret;
+}
+
+static void tg3_ape_unlock(struct tg3 *tp, int locknum)
+{
+ u32 gnt, bit;
+
+ if (!tg3_flag(tp, ENABLE_APE))
+ return;
+
+ switch (locknum) {
+ case TG3_APE_LOCK_GPIO:
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ return;
+ case TG3_APE_LOCK_GRC:
+ case TG3_APE_LOCK_MEM:
+ break;
+ default:
+ return;
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ gnt = TG3_APE_LOCK_GRANT;
+ else
+ gnt = TG3_APE_PER_LOCK_GRANT;
+
+ if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
+ bit = APE_LOCK_GRANT_DRIVER;
+ else
+ bit = 1 << tp->pci_fn;
+
+ tg3_ape_write32(tp, gnt + 4 * locknum, bit);
+}
+
+static void tg3_ape_send_event(struct tg3 *tp, u32 event)
+{
+ int i;
+ u32 apedata;
+
+ /* NCSI does not support APE events */
+ if (tg3_flag(tp, APE_HAS_NCSI))
+ return;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
+ if (apedata != APE_SEG_SIG_MAGIC)
+ return;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
+ if (!(apedata & APE_FW_STATUS_READY))
+ return;
+
+ /* Wait for up to 1 millisecond for APE to service previous event. */
+ for (i = 0; i < 10; i++) {
+ if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
+ return;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
+
+ if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
+ tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
+ event | APE_EVENT_STATUS_EVENT_PENDING);
+
+ tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
+
+ if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
+ break;
+
+ udelay(100);
+ }
+
+ if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
+ tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
+}
+
+static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
+{
+ u32 event;
+ u32 apedata;
+
+ if (!tg3_flag(tp, ENABLE_APE))
+ return;
+
+ switch (kind) {
+ case RESET_KIND_INIT:
+ tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
+ APE_HOST_SEG_SIG_MAGIC);
+ tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
+ APE_HOST_SEG_LEN_MAGIC);
+ apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
+ tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
+ tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
+ APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
+ tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
+ APE_HOST_BEHAV_NO_PHYLOCK);
+ tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
+ TG3_APE_HOST_DRVR_STATE_START);
+
+ event = APE_EVENT_STATUS_STATE_START;
+ break;
+ case RESET_KIND_SHUTDOWN:
+ /* With the interface we are currently using,
+ * APE does not track driver state. Wiping
+ * out the HOST SEGMENT SIGNATURE forces
+ * the APE to assume OS absent status.
+ */
+ tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
+
+ if (device_may_wakeup(&tp->pdev->dev) &&
+ tg3_flag(tp, WOL_ENABLE)) {
+ tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
+ TG3_APE_HOST_WOL_SPEED_AUTO);
+ apedata = TG3_APE_HOST_DRVR_STATE_WOL;
+ } else
+ apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
+
+ tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
+
+ event = APE_EVENT_STATUS_STATE_UNLOAD;
+ break;
+ case RESET_KIND_SUSPEND:
+ event = APE_EVENT_STATUS_STATE_SUSPEND;
+ break;
+ default:
+ return;
+ }
+
+ event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
+
+ tg3_ape_send_event(tp, event);
+}
+
+static void tg3_disable_ints(struct tg3 *tp)
+{
+ int i;
+
+ tw32(TG3PCI_MISC_HOST_CTRL,
+ (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
+ for (i = 0; i < tp->irq_max; i++)
+ tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
+}
+
+static void tg3_enable_ints(struct tg3 *tp)
+{
+ int i;
+
+ tp->irq_sync = 0;
+ wmb();
+
+ tw32(TG3PCI_MISC_HOST_CTRL,
+ (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
+
+ tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+
+ tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
+ if (tg3_flag(tp, 1SHOT_MSI))
+ tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
+
+ tp->coal_now |= tnapi->coal_now;
+ }
+
+ /* Force an initial interrupt */
+ if (!tg3_flag(tp, TAGGED_STATUS) &&
+ (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
+ tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
+ else
+ tw32(HOSTCC_MODE, tp->coal_now);
+
+ tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
+}
+
+static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
+{
+ struct tg3 *tp = tnapi->tp;
+ struct tg3_hw_status *sblk = tnapi->hw_status;
+ unsigned int work_exists = 0;
+
+ /* check for phy events */
+ if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
+ if (sblk->status & SD_STATUS_LINK_CHG)
+ work_exists = 1;
+ }
+ /* check for RX/TX work to do */
+ if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
+ *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
+ work_exists = 1;
+
+ return work_exists;
+}
+
+/* tg3_int_reenable
+ * similar to tg3_enable_ints, but it accurately determines whether there
+ * is new work pending and can return without flushing the PIO write
+ * which reenables interrupts
+ */
+static void tg3_int_reenable(struct tg3_napi *tnapi)
+{
+ struct tg3 *tp = tnapi->tp;
+
+ tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
+ mmiowb();
+
+ /* When doing tagged status, this work check is unnecessary.
+ * The last_tag we write above tells the chip which piece of
+ * work we've completed.
+ */
+ if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
+ tw32(HOSTCC_MODE, tp->coalesce_mode |
+ HOSTCC_MODE_ENABLE | tnapi->coal_now);
+}
+
+static void tg3_switch_clocks(struct tg3 *tp)
+{
+ u32 clock_ctrl;
+ u32 orig_clock_ctrl;
+
+ if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
+ return;
+
+ clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
+
+ orig_clock_ctrl = clock_ctrl;
+ clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
+ CLOCK_CTRL_CLKRUN_OENABLE |
+ 0x1f);
+ tp->pci_clock_ctrl = clock_ctrl;
+
+ if (tg3_flag(tp, 5705_PLUS)) {
+ if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
+ tw32_wait_f(TG3PCI_CLOCK_CTRL,
+ clock_ctrl | CLOCK_CTRL_625_CORE, 40);
+ }
+ } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
+ tw32_wait_f(TG3PCI_CLOCK_CTRL,
+ clock_ctrl |
+ (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
+ 40);
+ tw32_wait_f(TG3PCI_CLOCK_CTRL,
+ clock_ctrl | (CLOCK_CTRL_ALTCLK),
+ 40);
+ }
+ tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
+}
+
+#define PHY_BUSY_LOOPS 5000
+
+static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
+{
+ u32 frame_val;
+ unsigned int loops;
+ int ret;
+
+ if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
+ tw32_f(MAC_MI_MODE,
+ (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
+ udelay(80);
+ }
+
+ *val = 0x0;
+
+ frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
+ MI_COM_PHY_ADDR_MASK);
+ frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
+ MI_COM_REG_ADDR_MASK);
+ frame_val |= (MI_COM_CMD_READ | MI_COM_START);
+
+ tw32_f(MAC_MI_COM, frame_val);
+
+ loops = PHY_BUSY_LOOPS;
+ while (loops != 0) {
+ udelay(10);
+ frame_val = tr32(MAC_MI_COM);
+
+ if ((frame_val & MI_COM_BUSY) == 0) {
+ udelay(5);
+ frame_val = tr32(MAC_MI_COM);
+ break;
+ }
+ loops -= 1;
+ }
+
+ ret = -EBUSY;
+ if (loops != 0) {
+ *val = frame_val & MI_COM_DATA_MASK;
+ ret = 0;
+ }
+
+ if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
+ tw32_f(MAC_MI_MODE, tp->mi_mode);
+ udelay(80);
+ }
+
+ return ret;
+}
+
+static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
+{
+ u32 frame_val;
+ unsigned int loops;
+ int ret;
+
+ if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
+ (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
+ return 0;
+
+ if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
+ tw32_f(MAC_MI_MODE,
+ (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
+ udelay(80);
+ }
+
+ frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
+ MI_COM_PHY_ADDR_MASK);
+ frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
+ MI_COM_REG_ADDR_MASK);
+ frame_val |= (val & MI_COM_DATA_MASK);
+ frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
+
+ tw32_f(MAC_MI_COM, frame_val);
+
+ loops = PHY_BUSY_LOOPS;
+ while (loops != 0) {
+ udelay(10);
+ frame_val = tr32(MAC_MI_COM);
+ if ((frame_val & MI_COM_BUSY) == 0) {
+ udelay(5);
+ frame_val = tr32(MAC_MI_COM);
+ break;
+ }
+ loops -= 1;
+ }
+
+ ret = -EBUSY;
+ if (loops != 0)
+ ret = 0;
+
+ if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
+ tw32_f(MAC_MI_MODE, tp->mi_mode);
+ udelay(80);
+ }
+
+ return ret;
+}
+
+static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
+{
+ int err;
+
+ err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
+ if (err)
+ goto done;
+
+ err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
+ if (err)
+ goto done;
+
+ err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
+ MII_TG3_MMD_CTRL_DATA_NOINC | devad);
+ if (err)
+ goto done;
+
+ err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
+
+done:
+ return err;
+}
+
+static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
+{
+ int err;
+
+ err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
+ if (err)
+ goto done;
+
+ err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
+ if (err)
+ goto done;
+
+ err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
+ MII_TG3_MMD_CTRL_DATA_NOINC | devad);
+ if (err)
+ goto done;
+
+ err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
+
+done:
+ return err;
+}
+
+static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
+{
+ int err;
+
+ err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
+ if (!err)
+ err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
+
+ return err;
+}
+
+static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
+{
+ int err;
+
+ err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
+ if (!err)
+ err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
+
+ return err;
+}
+
+static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
+{
+ int err;
+
+ err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
+ (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
+ MII_TG3_AUXCTL_SHDWSEL_MISC);
+ if (!err)
+ err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
+
+ return err;
+}
+
+static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
+{
+ if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
+ set |= MII_TG3_AUXCTL_MISC_WREN;
+
+ return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
+}
+
+#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
+ tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
+ MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
+ MII_TG3_AUXCTL_ACTL_TX_6DB)
+
+#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
+ tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
+ MII_TG3_AUXCTL_ACTL_TX_6DB);
+
+static int tg3_bmcr_reset(struct tg3 *tp)
+{
+ u32 phy_control;
+ int limit, err;
+
+ /* OK, reset it, and poll the BMCR_RESET bit until it
+ * clears or we time out.
+ */
+ phy_control = BMCR_RESET;
+ err = tg3_writephy(tp, MII_BMCR, phy_control);
+ if (err != 0)
+ return -EBUSY;
+
+ limit = 5000;
+ while (limit--) {
+ err = tg3_readphy(tp, MII_BMCR, &phy_control);
+ if (err != 0)
+ return -EBUSY;
+
+ if ((phy_control & BMCR_RESET) == 0) {
+ udelay(40);
+ break;
+ }
+ udelay(10);
+ }
+ if (limit < 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
+{
+ struct tg3 *tp = bp->priv;
+ u32 val;
+
+ spin_lock_bh(&tp->lock);
+
+ if (tg3_readphy(tp, reg, &val))
+ val = -EIO;
+
+ spin_unlock_bh(&tp->lock);
+
+ return val;
+}
+
+static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
+{
+ struct tg3 *tp = bp->priv;
+ u32 ret = 0;
+
+ spin_lock_bh(&tp->lock);
+
+ if (tg3_writephy(tp, reg, val))
+ ret = -EIO;
+
+ spin_unlock_bh(&tp->lock);
+
+ return ret;
+}
+
+static int tg3_mdio_reset(struct mii_bus *bp)
+{
+ return 0;
+}
+
+static void tg3_mdio_config_5785(struct tg3 *tp)
+{
+ u32 val;
+ struct phy_device *phydev;
+
+ phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
+ case PHY_ID_BCM50610:
+ case PHY_ID_BCM50610M:
+ val = MAC_PHYCFG2_50610_LED_MODES;
+ break;
+ case PHY_ID_BCMAC131:
+ val = MAC_PHYCFG2_AC131_LED_MODES;
+ break;
+ case PHY_ID_RTL8211C:
+ val = MAC_PHYCFG2_RTL8211C_LED_MODES;
+ break;
+ case PHY_ID_RTL8201E:
+ val = MAC_PHYCFG2_RTL8201E_LED_MODES;
+ break;
+ default:
+ return;
+ }
+
+ if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
+ tw32(MAC_PHYCFG2, val);
+
+ val = tr32(MAC_PHYCFG1);
+ val &= ~(MAC_PHYCFG1_RGMII_INT |
+ MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
+ val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
+ tw32(MAC_PHYCFG1, val);
+
+ return;
+ }
+
+ if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
+ val |= MAC_PHYCFG2_EMODE_MASK_MASK |
+ MAC_PHYCFG2_FMODE_MASK_MASK |
+ MAC_PHYCFG2_GMODE_MASK_MASK |
+ MAC_PHYCFG2_ACT_MASK_MASK |
+ MAC_PHYCFG2_QUAL_MASK_MASK |
+ MAC_PHYCFG2_INBAND_ENABLE;
+
+ tw32(MAC_PHYCFG2, val);
+
+ val = tr32(MAC_PHYCFG1);
+ val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
+ MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
+ if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
+ if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
+ val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
+ if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
+ val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
+ }
+ val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
+ MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
+ tw32(MAC_PHYCFG1, val);
+
+ val = tr32(MAC_EXT_RGMII_MODE);
+ val &= ~(MAC_RGMII_MODE_RX_INT_B |
+ MAC_RGMII_MODE_RX_QUALITY |
+ MAC_RGMII_MODE_RX_ACTIVITY |
+ MAC_RGMII_MODE_RX_ENG_DET |
+ MAC_RGMII_MODE_TX_ENABLE |
+ MAC_RGMII_MODE_TX_LOWPWR |
+ MAC_RGMII_MODE_TX_RESET);
+ if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
+ if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
+ val |= MAC_RGMII_MODE_RX_INT_B |
+ MAC_RGMII_MODE_RX_QUALITY |
+ MAC_RGMII_MODE_RX_ACTIVITY |
+ MAC_RGMII_MODE_RX_ENG_DET;
+ if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
+ val |= MAC_RGMII_MODE_TX_ENABLE |
+ MAC_RGMII_MODE_TX_LOWPWR |
+ MAC_RGMII_MODE_TX_RESET;
+ }
+ tw32(MAC_EXT_RGMII_MODE, val);
+}
+
+static void tg3_mdio_start(struct tg3 *tp)
+{
+ tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
+ tw32_f(MAC_MI_MODE, tp->mi_mode);
+ udelay(80);
+
+ if (tg3_flag(tp, MDIOBUS_INITED) &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ tg3_mdio_config_5785(tp);
+}
+
+static int tg3_mdio_init(struct tg3 *tp)
+{
+ int i;
+ u32 reg;
+ struct phy_device *phydev;
+
+ if (tg3_flag(tp, 5717_PLUS)) {
+ u32 is_serdes;
+
+ tp->phy_addr = tp->pci_fn + 1;
+
+ if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
+ is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
+ else
+ is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
+ TG3_CPMU_PHY_STRAP_IS_SERDES;
+ if (is_serdes)
+ tp->phy_addr += 7;
+ } else
+ tp->phy_addr = TG3_PHY_MII_ADDR;
+
+ tg3_mdio_start(tp);
+
+ if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
+ return 0;
+
+ tp->mdio_bus = mdiobus_alloc();
+ if (tp->mdio_bus == NULL)
+ return -ENOMEM;
+
+ tp->mdio_bus->name = "tg3 mdio bus";
+ snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
+ (tp->pdev->bus->number << 8) | tp->pdev->devfn);
+ tp->mdio_bus->priv = tp;
+ tp->mdio_bus->parent = &tp->pdev->dev;
+ tp->mdio_bus->read = &tg3_mdio_read;
+ tp->mdio_bus->write = &tg3_mdio_write;
+ tp->mdio_bus->reset = &tg3_mdio_reset;
+ tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
+ tp->mdio_bus->irq = &tp->mdio_irq[0];
+
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ tp->mdio_bus->irq[i] = PHY_POLL;
+
+ /* The bus registration will look for all the PHYs on the mdio bus.
+ * Unfortunately, it does not ensure the PHY is powered up before
+ * accessing the PHY ID registers. A chip reset is the
+ * quickest way to bring the device back to an operational state..
+ */
+ if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
+ tg3_bmcr_reset(tp);
+
+ i = mdiobus_register(tp->mdio_bus);
+ if (i) {
+ dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
+ mdiobus_free(tp->mdio_bus);
+ return i;
+ }
+
+ phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+
+ if (!phydev || !phydev->drv) {
+ dev_warn(&tp->pdev->dev, "No PHY devices\n");
+ mdiobus_unregister(tp->mdio_bus);
+ mdiobus_free(tp->mdio_bus);
+ return -ENODEV;
+ }
+
+ switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
+ case PHY_ID_BCM57780:
+ phydev->interface = PHY_INTERFACE_MODE_GMII;
+ phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
+ break;
+ case PHY_ID_BCM50610:
+ case PHY_ID_BCM50610M:
+ phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
+ PHY_BRCM_RX_REFCLK_UNUSED |
+ PHY_BRCM_DIS_TXCRXC_NOENRGY |
+ PHY_BRCM_AUTO_PWRDWN_ENABLE;
+ if (tg3_flag(tp, RGMII_INBAND_DISABLE))
+ phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
+ if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
+ phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
+ if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
+ phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
+ /* fallthru */
+ case PHY_ID_RTL8211C:
+ phydev->interface = PHY_INTERFACE_MODE_RGMII;
+ break;
+ case PHY_ID_RTL8201E:
+ case PHY_ID_BCMAC131:
+ phydev->interface = PHY_INTERFACE_MODE_MII;
+ phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
+ tp->phy_flags |= TG3_PHYFLG_IS_FET;
+ break;
+ }
+
+ tg3_flag_set(tp, MDIOBUS_INITED);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ tg3_mdio_config_5785(tp);
+
+ return 0;
+}
+
+static void tg3_mdio_fini(struct tg3 *tp)
+{
+ if (tg3_flag(tp, MDIOBUS_INITED)) {
+ tg3_flag_clear(tp, MDIOBUS_INITED);
+ mdiobus_unregister(tp->mdio_bus);
+ mdiobus_free(tp->mdio_bus);
+ }
+}
+
+/* tp->lock is held. */
+static inline void tg3_generate_fw_event(struct tg3 *tp)
+{
+ u32 val;
+
+ val = tr32(GRC_RX_CPU_EVENT);
+ val |= GRC_RX_CPU_DRIVER_EVENT;
+ tw32_f(GRC_RX_CPU_EVENT, val);
+
+ tp->last_event_jiffies = jiffies;
+}
+
+#define TG3_FW_EVENT_TIMEOUT_USEC 2500
+
+/* tp->lock is held. */
+static void tg3_wait_for_event_ack(struct tg3 *tp)
+{
+ int i;
+ unsigned int delay_cnt;
+ long time_remain;
+
+ /* If enough time has passed, no wait is necessary. */
+ time_remain = (long)(tp->last_event_jiffies + 1 +
+ usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
+ (long)jiffies;
+ if (time_remain < 0)
+ return;
+
+ /* Check if we can shorten the wait time. */
+ delay_cnt = jiffies_to_usecs(time_remain);
+ if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
+ delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
+ delay_cnt = (delay_cnt >> 3) + 1;
+
+ for (i = 0; i < delay_cnt; i++) {
+ if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
+ break;
+ udelay(8);
+ }
+}
+
+/* tp->lock is held. */
+static void tg3_ump_link_report(struct tg3 *tp)
+{
+ u32 reg;
+ u32 val;
+
+ if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
+ return;
+
+ tg3_wait_for_event_ack(tp);
+
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
+
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
+
+ val = 0;
+ if (!tg3_readphy(tp, MII_BMCR, ®))
+ val = reg << 16;
+ if (!tg3_readphy(tp, MII_BMSR, ®))
+ val |= (reg & 0xffff);
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
+
+ val = 0;
+ if (!tg3_readphy(tp, MII_ADVERTISE, ®))
+ val = reg << 16;
+ if (!tg3_readphy(tp, MII_LPA, ®))
+ val |= (reg & 0xffff);
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
+
+ val = 0;
+ if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
+ if (!tg3_readphy(tp, MII_CTRL1000, ®))
+ val = reg << 16;
+ if (!tg3_readphy(tp, MII_STAT1000, ®))
+ val |= (reg & 0xffff);
+ }
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
+
+ if (!tg3_readphy(tp, MII_PHYADDR, ®))
+ val = reg << 16;
+ else
+ val = 0;
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
+
+ tg3_generate_fw_event(tp);
+}
+
+/* tp->lock is held. */
+static void tg3_stop_fw(struct tg3 *tp)
+{
+ if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
+ /* Wait for RX cpu to ACK the previous event. */
+ tg3_wait_for_event_ack(tp);
+
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
+
+ tg3_generate_fw_event(tp);
+
+ /* Wait for RX cpu to ACK this event. */
+ tg3_wait_for_event_ack(tp);
+ }
+}
+
+/* tp->lock is held. */
+static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
+{
+ tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
+ NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
+
+ if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
+ switch (kind) {
+ case RESET_KIND_INIT:
+ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+ DRV_STATE_START);
+ break;
+
+ case RESET_KIND_SHUTDOWN:
+ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+ DRV_STATE_UNLOAD);
+ break;
+
+ case RESET_KIND_SUSPEND:
+ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+ DRV_STATE_SUSPEND);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (kind == RESET_KIND_INIT ||
+ kind == RESET_KIND_SUSPEND)
+ tg3_ape_driver_state_change(tp, kind);
+}
+
+/* tp->lock is held. */
+static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
+{
+ if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
+ switch (kind) {
+ case RESET_KIND_INIT:
+ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+ DRV_STATE_START_DONE);
+ break;
+
+ case RESET_KIND_SHUTDOWN:
+ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+ DRV_STATE_UNLOAD_DONE);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (kind == RESET_KIND_SHUTDOWN)
+ tg3_ape_driver_state_change(tp, kind);
+}
+
+/* tp->lock is held. */
+static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
+{
+ if (tg3_flag(tp, ENABLE_ASF)) {
+ switch (kind) {
+ case RESET_KIND_INIT:
+ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+ DRV_STATE_START);
+ break;
+
+ case RESET_KIND_SHUTDOWN:
+ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+ DRV_STATE_UNLOAD);
+ break;
+
+ case RESET_KIND_SUSPEND:
+ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+ DRV_STATE_SUSPEND);
+ break;
+
+ default:
+ break;
+ }
+ }
+}
+
+static int tg3_poll_fw(struct tg3 *tp)
+{
+ int i;
+ u32 val;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ /* Wait up to 20ms for init done. */
+ for (i = 0; i < 200; i++) {
+ if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
+ return 0;
+ udelay(100);
+ }
+ return -ENODEV;
+ }
+
+ /* Wait for firmware initialization to complete. */
+ for (i = 0; i < 100000; i++) {
+ tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
+ if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
+ break;
+ udelay(10);
+ }
+
+ /* Chip might not be fitted with firmware. Some Sun onboard
+ * parts are configured like that. So don't signal the timeout
+ * of the above loop as an error, but do report the lack of
+ * running firmware once.
+ */
+ if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
+ tg3_flag_set(tp, NO_FWARE_REPORTED);
+
+ netdev_info(tp->dev, "No firmware running\n");
+ }
+
+ if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
+ /* The 57765 A0 needs a little more
+ * time to do some important work.
+ */
+ mdelay(10);
+ }
+
+ return 0;
+}
+
+static void tg3_link_report(struct tg3 *tp)
+{
+ if (!netif_carrier_ok(tp->dev)) {
+ netif_info(tp, link, tp->dev, "Link is down\n");
+ tg3_ump_link_report(tp);
+ } else if (netif_msg_link(tp)) {
+ netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
+ (tp->link_config.active_speed == SPEED_1000 ?
+ 1000 :
+ (tp->link_config.active_speed == SPEED_100 ?
+ 100 : 10)),
+ (tp->link_config.active_duplex == DUPLEX_FULL ?
+ "full" : "half"));
+
+ netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
+ (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
+ "on" : "off",
+ (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
+ "on" : "off");
+
+ if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
+ netdev_info(tp->dev, "EEE is %s\n",
+ tp->setlpicnt ? "enabled" : "disabled");
+
+ tg3_ump_link_report(tp);
+ }
+}
+
+static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
+{
+ u16 miireg;
+
+ if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
+ miireg = ADVERTISE_PAUSE_CAP;
+ else if (flow_ctrl & FLOW_CTRL_TX)
+ miireg = ADVERTISE_PAUSE_ASYM;
+ else if (flow_ctrl & FLOW_CTRL_RX)
+ miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ else
+ miireg = 0;
+
+ return miireg;
+}
+
+static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
+{
+ u16 miireg;
+
+ if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
+ miireg = ADVERTISE_1000XPAUSE;
+ else if (flow_ctrl & FLOW_CTRL_TX)
+ miireg = ADVERTISE_1000XPSE_ASYM;
+ else if (flow_ctrl & FLOW_CTRL_RX)
+ miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
+ else
+ miireg = 0;
+
+ return miireg;
+}
+
+static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
+{
+ u8 cap = 0;
+
+ if (lcladv & ADVERTISE_1000XPAUSE) {
+ if (lcladv & ADVERTISE_1000XPSE_ASYM) {
+ if (rmtadv & LPA_1000XPAUSE)
+ cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
+ else if (rmtadv & LPA_1000XPAUSE_ASYM)
+ cap = FLOW_CTRL_RX;
+ } else {
+ if (rmtadv & LPA_1000XPAUSE)
+ cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
+ }
+ } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
+ if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
+ cap = FLOW_CTRL_TX;
+ }
+
+ return cap;
+}
+
+static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
+{
+ u8 autoneg;
+ u8 flowctrl = 0;
+ u32 old_rx_mode = tp->rx_mode;
+ u32 old_tx_mode = tp->tx_mode;
+
+ if (tg3_flag(tp, USE_PHYLIB))
+ autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
+ else
+ autoneg = tp->link_config.autoneg;
+
+ if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
+ if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
+ flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
+ else
+ flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
+ } else
+ flowctrl = tp->link_config.flowctrl;
+
+ tp->link_config.active_flowctrl = flowctrl;
+
+ if (flowctrl & FLOW_CTRL_RX)
+ tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
+ else
+ tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
+
+ if (old_rx_mode != tp->rx_mode)
+ tw32_f(MAC_RX_MODE, tp->rx_mode);
+
+ if (flowctrl & FLOW_CTRL_TX)
+ tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
+ else
+ tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
+
+ if (old_tx_mode != tp->tx_mode)
+ tw32_f(MAC_TX_MODE, tp->tx_mode);
+}
+
+static void tg3_adjust_link(struct net_device *dev)
+{
+ u8 oldflowctrl, linkmesg = 0;
+ u32 mac_mode, lcl_adv, rmt_adv;
+ struct tg3 *tp = netdev_priv(dev);
+ struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+
+ spin_lock_bh(&tp->lock);
+
+ mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
+ MAC_MODE_HALF_DUPLEX);
+
+ oldflowctrl = tp->link_config.active_flowctrl;
+
+ if (phydev->link) {
+ lcl_adv = 0;
+ rmt_adv = 0;
+
+ if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
+ mac_mode |= MAC_MODE_PORT_MODE_MII;
+ else if (phydev->speed == SPEED_1000 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
+ mac_mode |= MAC_MODE_PORT_MODE_GMII;
+ else
+ mac_mode |= MAC_MODE_PORT_MODE_MII;
+
+ if (phydev->duplex == DUPLEX_HALF)
+ mac_mode |= MAC_MODE_HALF_DUPLEX;
+ else {
+ lcl_adv = tg3_advert_flowctrl_1000T(
+ tp->link_config.flowctrl);
+
+ if (phydev->pause)
+ rmt_adv = LPA_PAUSE_CAP;
+ if (phydev->asym_pause)
+ rmt_adv |= LPA_PAUSE_ASYM;
+ }
+
+ tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
+ } else
+ mac_mode |= MAC_MODE_PORT_MODE_GMII;
+
+ if (mac_mode != tp->mac_mode) {
+ tp->mac_mode = mac_mode;
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
+ if (phydev->speed == SPEED_10)
+ tw32(MAC_MI_STAT,
+ MAC_MI_STAT_10MBPS_MODE |
+ MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
+ else
+ tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
+ }
+
+ if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
+ tw32(MAC_TX_LENGTHS,
+ ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
+ (6 << TX_LENGTHS_IPG_SHIFT) |
+ (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
+ else
+ tw32(MAC_TX_LENGTHS,
+ ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
+ (6 << TX_LENGTHS_IPG_SHIFT) |
+ (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
+
+ if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
+ (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
+ phydev->speed != tp->link_config.active_speed ||
+ phydev->duplex != tp->link_config.active_duplex ||
+ oldflowctrl != tp->link_config.active_flowctrl)
+ linkmesg = 1;
+
+ tp->link_config.active_speed = phydev->speed;
+ tp->link_config.active_duplex = phydev->duplex;
+
+ spin_unlock_bh(&tp->lock);
+
+ if (linkmesg)
+ tg3_link_report(tp);
+}
+
+static int tg3_phy_init(struct tg3 *tp)
+{
+ struct phy_device *phydev;
+
+ if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
+ return 0;
+
+ /* Bring the PHY back to a known state. */
+ tg3_bmcr_reset(tp);
+
+ phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+
+ /* Attach the MAC to the PHY. */
+ phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
+ phydev->dev_flags, phydev->interface);
+ if (IS_ERR(phydev)) {
+ dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
+ return PTR_ERR(phydev);
+ }
+
+ /* Mask with MAC supported features. */
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_GMII:
+ case PHY_INTERFACE_MODE_RGMII:
+ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+ phydev->supported &= (PHY_GBIT_FEATURES |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ break;
+ }
+ /* fallthru */
+ case PHY_INTERFACE_MODE_MII:
+ phydev->supported &= (PHY_BASIC_FEATURES |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ break;
+ default:
+ phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+ return -EINVAL;
+ }
+
+ tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
+
+ phydev->advertising = phydev->supported;
+
+ return 0;
+}
+
+static void tg3_phy_start(struct tg3 *tp)
+{
+ struct phy_device *phydev;
+
+ if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
+ return;
+
+ phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+
+ if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
+ tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
+ phydev->speed = tp->link_config.orig_speed;
+ phydev->duplex = tp->link_config.orig_duplex;
+ phydev->autoneg = tp->link_config.orig_autoneg;
+ phydev->advertising = tp->link_config.orig_advertising;
+ }
+
+ phy_start(phydev);
+
+ phy_start_aneg(phydev);
+}
+
+static void tg3_phy_stop(struct tg3 *tp)
+{
+ if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
+ return;
+
+ phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+}
+
+static void tg3_phy_fini(struct tg3 *tp)
+{
+ if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
+ phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+ tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
+ }
+}
+
+static int tg3_phy_set_extloopbk(struct tg3 *tp)
+{
+ int err;
+ u32 val;
+
+ if (tp->phy_flags & TG3_PHYFLG_IS_FET)
+ return 0;
+
+ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
+ /* Cannot do read-modify-write on 5401 */
+ err = tg3_phy_auxctl_write(tp,
+ MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
+ MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
+ 0x4c20);
+ goto done;
+ }
+
+ err = tg3_phy_auxctl_read(tp,
+ MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
+ if (err)
+ return err;
+
+ val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
+ err = tg3_phy_auxctl_write(tp,
+ MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
+
+done:
+ return err;
+}
+
+static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
+{
+ u32 phytest;
+
+ if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
+ u32 phy;
+
+ tg3_writephy(tp, MII_TG3_FET_TEST,
+ phytest | MII_TG3_FET_SHADOW_EN);
+ if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
+ if (enable)
+ phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
+ else
+ phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
+ tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
+ }
+ tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
+ }
+}
+
+static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
+{
+ u32 reg;
+
+ if (!tg3_flag(tp, 5705_PLUS) ||
+ (tg3_flag(tp, 5717_PLUS) &&
+ (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
+ return;
+
+ if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
+ tg3_phy_fet_toggle_apd(tp, enable);
+ return;
+ }
+
+ reg = MII_TG3_MISC_SHDW_WREN |
+ MII_TG3_MISC_SHDW_SCR5_SEL |
+ MII_TG3_MISC_SHDW_SCR5_LPED |
+ MII_TG3_MISC_SHDW_SCR5_DLPTLM |
+ MII_TG3_MISC_SHDW_SCR5_SDTL |
+ MII_TG3_MISC_SHDW_SCR5_C125OE;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
+ reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
+
+ tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
+
+
+ reg = MII_TG3_MISC_SHDW_WREN |
+ MII_TG3_MISC_SHDW_APD_SEL |
+ MII_TG3_MISC_SHDW_APD_WKTM_84MS;
+ if (enable)
+ reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
+
+ tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
+}
+
+static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
+{
+ u32 phy;
+
+ if (!tg3_flag(tp, 5705_PLUS) ||
+ (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
+ return;
+
+ if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
+ u32 ephy;
+
+ if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
+ u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
+
+ tg3_writephy(tp, MII_TG3_FET_TEST,
+ ephy | MII_TG3_FET_SHADOW_EN);
+ if (!tg3_readphy(tp, reg, &phy)) {
+ if (enable)
+ phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
+ else
+ phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
+ tg3_writephy(tp, reg, phy);
+ }
+ tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
+ }
+ } else {
+ int ret;
+
+ ret = tg3_phy_auxctl_read(tp,
+ MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
+ if (!ret) {
+ if (enable)
+ phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
+ else
+ phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
+ tg3_phy_auxctl_write(tp,
+ MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
+ }
+ }
+}
+
+static void tg3_phy_set_wirespeed(struct tg3 *tp)
+{
+ int ret;
+ u32 val;
+
+ if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
+ return;
+
+ ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
+ if (!ret)
+ tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
+ val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
+}
+
+static void tg3_phy_apply_otp(struct tg3 *tp)
+{
+ u32 otp, phy;
+
+ if (!tp->phy_otp)
+ return;
+
+ otp = tp->phy_otp;
+
+ if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
+ return;
+
+ phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
+ phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
+ tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
+
+ phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
+ ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
+ tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
+
+ phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
+ phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
+ tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
+
+ phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
+ tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
+
+ phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
+ tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
+
+ phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
+ ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
+ tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
+
+ TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+}
+
+static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
+{
+ u32 val;
+
+ if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
+ return;
+
+ tp->setlpicnt = 0;
+
+ if (tp->link_config.autoneg == AUTONEG_ENABLE &&
+ current_link_up == 1 &&
+ tp->link_config.active_duplex == DUPLEX_FULL &&
+ (tp->link_config.active_speed == SPEED_100 ||
+ tp->link_config.active_speed == SPEED_1000)) {
+ u32 eeectl;
+
+ if (tp->link_config.active_speed == SPEED_1000)
+ eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
+ else
+ eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
+
+ tw32(TG3_CPMU_EEE_CTRL, eeectl);
+
+ tg3_phy_cl45_read(tp, MDIO_MMD_AN,
+ TG3_CL45_D7_EEERES_STAT, &val);
+
+ if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
+ val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
+ tp->setlpicnt = 2;
+ }
+
+ if (!tp->setlpicnt) {
+ if (current_link_up == 1 &&
+ !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
+ TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ }
+
+ val = tr32(TG3_CPMU_EEE_MODE);
+ tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
+ }
+}
+
+static void tg3_phy_eee_enable(struct tg3 *tp)
+{
+ u32 val;
+
+ if (tp->link_config.active_speed == SPEED_1000 &&
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
+ !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ val = MII_TG3_DSP_TAP26_ALNOKO |
+ MII_TG3_DSP_TAP26_RMRXSTO;
+ tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
+ TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ }
+
+ val = tr32(TG3_CPMU_EEE_MODE);
+ tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
+}
+
+static int tg3_wait_macro_done(struct tg3 *tp)
+{
+ int limit = 100;
+
+ while (limit--) {
+ u32 tmp32;
+
+ if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
+ if ((tmp32 & 0x1000) == 0)
+ break;
+ }
+ }
+ if (limit < 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
+{
+ static const u32 test_pat[4][6] = {
+ { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
+ { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
+ { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
+ { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
+ };
+ int chan;
+
+ for (chan = 0; chan < 4; chan++) {
+ int i;
+
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
+ (chan * 0x2000) | 0x0200);
+ tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
+
+ for (i = 0; i < 6; i++)
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
+ test_pat[chan][i]);
+
+ tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
+ if (tg3_wait_macro_done(tp)) {
+ *resetp = 1;
+ return -EBUSY;
+ }
+
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
+ (chan * 0x2000) | 0x0200);
+ tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
+ if (tg3_wait_macro_done(tp)) {
+ *resetp = 1;
+ return -EBUSY;
+ }
+
+ tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
+ if (tg3_wait_macro_done(tp)) {
+ *resetp = 1;
+ return -EBUSY;
+ }
+
+ for (i = 0; i < 6; i += 2) {
+ u32 low, high;
+
+ if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
+ tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
+ tg3_wait_macro_done(tp)) {
+ *resetp = 1;
+ return -EBUSY;
+ }
+ low &= 0x7fff;
+ high &= 0x000f;
+ if (low != test_pat[chan][i] ||
+ high != test_pat[chan][i+1]) {
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
+
+ return -EBUSY;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int tg3_phy_reset_chanpat(struct tg3 *tp)
+{
+ int chan;
+
+ for (chan = 0; chan < 4; chan++) {
+ int i;
+
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
+ (chan * 0x2000) | 0x0200);
+ tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
+ for (i = 0; i < 6; i++)
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
+ tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
+ if (tg3_wait_macro_done(tp))
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
+{
+ u32 reg32, phy9_orig;
+ int retries, do_phy_reset, err;
+
+ retries = 10;
+ do_phy_reset = 1;
+ do {
+ if (do_phy_reset) {
+ err = tg3_bmcr_reset(tp);
+ if (err)
+ return err;
+ do_phy_reset = 0;
+ }
+
+ /* Disable transmitter and interrupt. */
+ if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
+ continue;
+
+ reg32 |= 0x3000;
+ tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
+
+ /* Set full-duplex, 1000 mbps. */
+ tg3_writephy(tp, MII_BMCR,
+ BMCR_FULLDPLX | BMCR_SPEED1000);
+
+ /* Set to master mode. */
+ if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
+ continue;
+
+ tg3_writephy(tp, MII_CTRL1000,
+ CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
+
+ err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
+ if (err)
+ return err;
+
+ /* Block the PHY control access. */
+ tg3_phydsp_write(tp, 0x8005, 0x0800);
+
+ err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
+ if (!err)
+ break;
+ } while (--retries);
+
+ err = tg3_phy_reset_chanpat(tp);
+ if (err)
+ return err;
+
+ tg3_phydsp_write(tp, 0x8005, 0x0000);
+
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
+ tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
+
+ TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+
+ tg3_writephy(tp, MII_CTRL1000, phy9_orig);
+
+ if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
+ reg32 &= ~0x3000;
+ tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
+ } else if (!err)
+ err = -EBUSY;
+
+ return err;
+}
+
+/* This will reset the tigon3 PHY if there is no valid
+ * link unless the FORCE argument is non-zero.
+ */
+static int tg3_phy_reset(struct tg3 *tp)
+{
+ u32 val, cpmuctrl;
+ int err;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ val = tr32(GRC_MISC_CFG);
+ tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
+ udelay(40);
+ }
+ err = tg3_readphy(tp, MII_BMSR, &val);
+ err |= tg3_readphy(tp, MII_BMSR, &val);
+ if (err != 0)
+ return -EBUSY;
+
+ if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
+ netif_carrier_off(tp->dev);
+ tg3_link_report(tp);
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ err = tg3_phy_reset_5703_4_5(tp);
+ if (err)
+ return err;
+ goto out;
+ }
+
+ cpmuctrl = 0;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
+ GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
+ cpmuctrl = tr32(TG3_CPMU_CTRL);
+ if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
+ tw32(TG3_CPMU_CTRL,
+ cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
+ }
+
+ err = tg3_bmcr_reset(tp);
+ if (err)
+ return err;
+
+ if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
+ val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
+ tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
+
+ tw32(TG3_CPMU_CTRL, cpmuctrl);
+ }
+
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
+ GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
+ val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
+ if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
+ CPMU_LSPD_1000MB_MACCLK_12_5) {
+ val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
+ udelay(40);
+ tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
+ }
+ }
+
+ if (tg3_flag(tp, 5717_PLUS) &&
+ (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
+ return 0;
+
+ tg3_phy_apply_otp(tp);
+
+ if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
+ tg3_phy_toggle_apd(tp, true);
+ else
+ tg3_phy_toggle_apd(tp, false);
+
+out:
+ if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
+ !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ tg3_phydsp_write(tp, 0x201f, 0x2aaa);
+ tg3_phydsp_write(tp, 0x000a, 0x0323);
+ TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ }
+
+ if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
+ tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
+ tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
+ }
+
+ if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
+ if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ tg3_phydsp_write(tp, 0x000a, 0x310b);
+ tg3_phydsp_write(tp, 0x201f, 0x9506);
+ tg3_phydsp_write(tp, 0x401f, 0x14e2);
+ TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ }
+ } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
+ if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
+ if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
+ tg3_writephy(tp, MII_TG3_TEST1,
+ MII_TG3_TEST1_TRIM_EN | 0x4);
+ } else
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
+
+ TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ }
+ }
+
+ /* Set Extended packet length bit (bit 14) on all chips that */
+ /* support jumbo frames */
+ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
+ /* Cannot do read-modify-write on 5401 */
+ tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
+ } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
+ /* Set bit 14 with read-modify-write to preserve other bits */
+ err = tg3_phy_auxctl_read(tp,
+ MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
+ if (!err)
+ tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
+ val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
+ }
+
+ /* Set phy register 0x10 bit 0 to high fifo elasticity to support
+ * jumbo frames transmission.
+ */
+ if (tg3_flag(tp, JUMBO_CAPABLE)) {
+ if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
+ tg3_writephy(tp, MII_TG3_EXT_CTRL,
+ val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ /* adjust output voltage */
+ tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
+ }
+
+ tg3_phy_toggle_automdix(tp, 1);
+ tg3_phy_set_wirespeed(tp);
+ return 0;
+}
+
+#define TG3_GPIO_MSG_DRVR_PRES 0x00000001
+#define TG3_GPIO_MSG_NEED_VAUX 0x00000002
+#define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
+ TG3_GPIO_MSG_NEED_VAUX)
+#define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
+ ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
+ (TG3_GPIO_MSG_DRVR_PRES << 4) | \
+ (TG3_GPIO_MSG_DRVR_PRES << 8) | \
+ (TG3_GPIO_MSG_DRVR_PRES << 12))
+
+#define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
+ ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
+ (TG3_GPIO_MSG_NEED_VAUX << 4) | \
+ (TG3_GPIO_MSG_NEED_VAUX << 8) | \
+ (TG3_GPIO_MSG_NEED_VAUX << 12))
+
+static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
+{
+ u32 status, shift;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
+ else
+ status = tr32(TG3_CPMU_DRV_STATUS);
+
+ shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
+ status &= ~(TG3_GPIO_MSG_MASK << shift);
+ status |= (newstat << shift);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
+ else
+ tw32(TG3_CPMU_DRV_STATUS, status);
+
+ return status >> TG3_APE_GPIO_MSG_SHIFT;
+}
+
+static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
+{
+ if (!tg3_flag(tp, IS_NIC))
+ return 0;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
+ return -EIO;
+
+ tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
+
+ tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+ tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
+ } else {
+ tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+ }
+
+ return 0;
+}
+
+static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
+{
+ u32 grc_local_ctrl;
+
+ if (!tg3_flag(tp, IS_NIC) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
+ return;
+
+ grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
+
+ tw32_wait_f(GRC_LOCAL_CTRL,
+ grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+ tw32_wait_f(GRC_LOCAL_CTRL,
+ grc_local_ctrl,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+ tw32_wait_f(GRC_LOCAL_CTRL,
+ grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+}
+
+static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
+{
+ if (!tg3_flag(tp, IS_NIC))
+ return;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
+ (GRC_LCLCTRL_GPIO_OE0 |
+ GRC_LCLCTRL_GPIO_OE1 |
+ GRC_LCLCTRL_GPIO_OE2 |
+ GRC_LCLCTRL_GPIO_OUTPUT0 |
+ GRC_LCLCTRL_GPIO_OUTPUT1),
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+ } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
+ /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
+ u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
+ GRC_LCLCTRL_GPIO_OE1 |
+ GRC_LCLCTRL_GPIO_OE2 |
+ GRC_LCLCTRL_GPIO_OUTPUT0 |
+ GRC_LCLCTRL_GPIO_OUTPUT1 |
+ tp->grc_local_ctrl;
+ tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+ grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
+ tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+ grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
+ tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+ } else {
+ u32 no_gpio2;
+ u32 grc_local_ctrl = 0;
+
+ /* Workaround to prevent overdrawing Amps. */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+ grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
+ tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
+ grc_local_ctrl,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+ }
+
+ /* On 5753 and variants, GPIO2 cannot be used. */
+ no_gpio2 = tp->nic_sram_data_cfg &
+ NIC_SRAM_DATA_CFG_NO_GPIO2;
+
+ grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
+ GRC_LCLCTRL_GPIO_OE1 |
+ GRC_LCLCTRL_GPIO_OE2 |
+ GRC_LCLCTRL_GPIO_OUTPUT1 |
+ GRC_LCLCTRL_GPIO_OUTPUT2;
+ if (no_gpio2) {
+ grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
+ GRC_LCLCTRL_GPIO_OUTPUT2);
+ }
+ tw32_wait_f(GRC_LOCAL_CTRL,
+ tp->grc_local_ctrl | grc_local_ctrl,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+ grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
+
+ tw32_wait_f(GRC_LOCAL_CTRL,
+ tp->grc_local_ctrl | grc_local_ctrl,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+ if (!no_gpio2) {
+ grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
+ tw32_wait_f(GRC_LOCAL_CTRL,
+ tp->grc_local_ctrl | grc_local_ctrl,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+ }
+ }
+}
+
+static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
+{
+ u32 msg = 0;
+
+ /* Serialize power state transitions */
+ if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
+ return;
+
+ if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
+ msg = TG3_GPIO_MSG_NEED_VAUX;
+
+ msg = tg3_set_function_status(tp, msg);
+
+ if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
+ goto done;
+
+ if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
+ tg3_pwrsrc_switch_to_vaux(tp);
+ else
+ tg3_pwrsrc_die_with_vmain(tp);
+
+done:
+ tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
+}
+
+static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
+{
+ bool need_vaux = false;
+
+ /* The GPIOs do something completely different on 57765. */
+ if (!tg3_flag(tp, IS_NIC) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ return;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ tg3_frob_aux_power_5717(tp, include_wol ?
+ tg3_flag(tp, WOL_ENABLE) != 0 : 0);
+ return;
+ }
+
+ if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
+ struct net_device *dev_peer;
+
+ dev_peer = pci_get_drvdata(tp->pdev_peer);
+
+ /* remove_one() may have been run on the peer. */
+ if (dev_peer) {
+ struct tg3 *tp_peer = netdev_priv(dev_peer);
+
+ if (tg3_flag(tp_peer, INIT_COMPLETE))
+ return;
+
+ if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
+ tg3_flag(tp_peer, ENABLE_ASF))
+ need_vaux = true;
+ }
+ }
+
+ if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
+ tg3_flag(tp, ENABLE_ASF))
+ need_vaux = true;
+
+ if (need_vaux)
+ tg3_pwrsrc_switch_to_vaux(tp);
+ else
+ tg3_pwrsrc_die_with_vmain(tp);
+}
+
+static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
+{
+ if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
+ return 1;
+ else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
+ if (speed != SPEED_10)
+ return 1;
+ } else if (speed == SPEED_10)
+ return 1;
+
+ return 0;
+}
+
+static int tg3_setup_phy(struct tg3 *, int);
+static int tg3_halt_cpu(struct tg3 *, u32);
+
+static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
+{
+ u32 val;
+
+ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+ u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
+ u32 serdes_cfg = tr32(MAC_SERDES_CFG);
+
+ sg_dig_ctrl |=
+ SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
+ tw32(SG_DIG_CTRL, sg_dig_ctrl);
+ tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
+ }
+ return;
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ tg3_bmcr_reset(tp);
+ val = tr32(GRC_MISC_CFG);
+ tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
+ udelay(40);
+ return;
+ } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
+ u32 phytest;
+ if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
+ u32 phy;
+
+ tg3_writephy(tp, MII_ADVERTISE, 0);
+ tg3_writephy(tp, MII_BMCR,
+ BMCR_ANENABLE | BMCR_ANRESTART);
+
+ tg3_writephy(tp, MII_TG3_FET_TEST,
+ phytest | MII_TG3_FET_SHADOW_EN);
+ if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
+ phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
+ tg3_writephy(tp,
+ MII_TG3_FET_SHDW_AUXMODE4,
+ phy);
+ }
+ tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
+ }
+ return;
+ } else if (do_low_power) {
+ tg3_writephy(tp, MII_TG3_EXT_CTRL,
+ MII_TG3_EXT_CTRL_FORCE_LED_OFF);
+
+ val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
+ MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
+ MII_TG3_AUXCTL_PCTL_VREG_11V;
+ tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
+ }
+
+ /* The PHY should not be powered down on some chips because
+ * of bugs.
+ */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
+ (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
+ return;
+
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
+ GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
+ val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
+ val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
+ val |= CPMU_LSPD_1000MB_MACCLK_12_5;
+ tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
+ }
+
+ tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
+}
+
+/* tp->lock is held. */
+static int tg3_nvram_lock(struct tg3 *tp)
+{
+ if (tg3_flag(tp, NVRAM)) {
+ int i;
+
+ if (tp->nvram_lock_cnt == 0) {
+ tw32(NVRAM_SWARB, SWARB_REQ_SET1);
+ for (i = 0; i < 8000; i++) {
+ if (tr32(NVRAM_SWARB) & SWARB_GNT1)
+ break;
+ udelay(20);
+ }
+ if (i == 8000) {
+ tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
+ return -ENODEV;
+ }
+ }
+ tp->nvram_lock_cnt++;
+ }
+ return 0;
+}
+
+/* tp->lock is held. */
+static void tg3_nvram_unlock(struct tg3 *tp)
+{
+ if (tg3_flag(tp, NVRAM)) {
+ if (tp->nvram_lock_cnt > 0)
+ tp->nvram_lock_cnt--;
+ if (tp->nvram_lock_cnt == 0)
+ tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
+ }
+}
+
+/* tp->lock is held. */
+static void tg3_enable_nvram_access(struct tg3 *tp)
+{
+ if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
+ u32 nvaccess = tr32(NVRAM_ACCESS);
+
+ tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
+ }
+}
+
+/* tp->lock is held. */
+static void tg3_disable_nvram_access(struct tg3 *tp)
+{
+ if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
+ u32 nvaccess = tr32(NVRAM_ACCESS);
+
+ tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
+ }
+}
+
+static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
+ u32 offset, u32 *val)
+{
+ u32 tmp;
+ int i;
+
+ if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
+ return -EINVAL;
+
+ tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
+ EEPROM_ADDR_DEVID_MASK |
+ EEPROM_ADDR_READ);
+ tw32(GRC_EEPROM_ADDR,
+ tmp |
+ (0 << EEPROM_ADDR_DEVID_SHIFT) |
+ ((offset << EEPROM_ADDR_ADDR_SHIFT) &
+ EEPROM_ADDR_ADDR_MASK) |
+ EEPROM_ADDR_READ | EEPROM_ADDR_START);
+
+ for (i = 0; i < 1000; i++) {
+ tmp = tr32(GRC_EEPROM_ADDR);
+
+ if (tmp & EEPROM_ADDR_COMPLETE)
+ break;
+ msleep(1);
+ }
+ if (!(tmp & EEPROM_ADDR_COMPLETE))
+ return -EBUSY;
+
+ tmp = tr32(GRC_EEPROM_DATA);
+
+ /*
+ * The data will always be opposite the native endian
+ * format. Perform a blind byteswap to compensate.
+ */
+ *val = swab32(tmp);
+
+ return 0;
+}
+
+#define NVRAM_CMD_TIMEOUT 10000
+
+static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
+{
+ int i;
+
+ tw32(NVRAM_CMD, nvram_cmd);
+ for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
+ udelay(10);
+ if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
+ udelay(10);
+ break;
+ }
+ }
+
+ if (i == NVRAM_CMD_TIMEOUT)
+ return -EBUSY;
+
+ return 0;
+}
+
+static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
+{
+ if (tg3_flag(tp, NVRAM) &&
+ tg3_flag(tp, NVRAM_BUFFERED) &&
+ tg3_flag(tp, FLASH) &&
+ !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
+ (tp->nvram_jedecnum == JEDEC_ATMEL))
+
+ addr = ((addr / tp->nvram_pagesize) <<
+ ATMEL_AT45DB0X1B_PAGE_POS) +
+ (addr % tp->nvram_pagesize);
+
+ return addr;
+}
+
+static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
+{
+ if (tg3_flag(tp, NVRAM) &&
+ tg3_flag(tp, NVRAM_BUFFERED) &&
+ tg3_flag(tp, FLASH) &&
+ !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
+ (tp->nvram_jedecnum == JEDEC_ATMEL))
+
+ addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
+ tp->nvram_pagesize) +
+ (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
+
+ return addr;
+}
+
+/* NOTE: Data read in from NVRAM is byteswapped according to
+ * the byteswapping settings for all other register accesses.
+ * tg3 devices are BE devices, so on a BE machine, the data
+ * returned will be exactly as it is seen in NVRAM. On a LE
+ * machine, the 32-bit value will be byteswapped.
+ */
+static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
+{
+ int ret;
+
+ if (!tg3_flag(tp, NVRAM))
+ return tg3_nvram_read_using_eeprom(tp, offset, val);
+
+ offset = tg3_nvram_phys_addr(tp, offset);
+
+ if (offset > NVRAM_ADDR_MSK)
+ return -EINVAL;
+
+ ret = tg3_nvram_lock(tp);
+ if (ret)
+ return ret;
+
+ tg3_enable_nvram_access(tp);
+
+ tw32(NVRAM_ADDR, offset);
+ ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
+ NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
+
+ if (ret == 0)
+ *val = tr32(NVRAM_RDDATA);
+
+ tg3_disable_nvram_access(tp);
+
+ tg3_nvram_unlock(tp);
+
+ return ret;
+}
+
+/* Ensures NVRAM data is in bytestream format. */
+static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
+{
+ u32 v;
+ int res = tg3_nvram_read(tp, offset, &v);
+ if (!res)
+ *val = cpu_to_be32(v);
+ return res;
+}
+
+#define RX_CPU_SCRATCH_BASE 0x30000
+#define RX_CPU_SCRATCH_SIZE 0x04000
+#define TX_CPU_SCRATCH_BASE 0x34000
+#define TX_CPU_SCRATCH_SIZE 0x04000
+
+/* tp->lock is held. */
+static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
+{
+ int i;
+
+ BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ u32 val = tr32(GRC_VCPU_EXT_CTRL);
+
+ tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
+ return 0;
+ }
+ if (offset == RX_CPU_BASE) {
+ for (i = 0; i < 10000; i++) {
+ tw32(offset + CPU_STATE, 0xffffffff);
+ tw32(offset + CPU_MODE, CPU_MODE_HALT);
+ if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
+ break;
+ }
+
+ tw32(offset + CPU_STATE, 0xffffffff);
+ tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
+ udelay(10);
+ } else {
+ for (i = 0; i < 10000; i++) {
+ tw32(offset + CPU_STATE, 0xffffffff);
+ tw32(offset + CPU_MODE, CPU_MODE_HALT);
+ if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
+ break;
+ }
+ }
+
+ if (i >= 10000) {
+ netdev_err(tp->dev, "%s timed out, %s CPU\n",
+ __func__, offset == RX_CPU_BASE ? "RX" : "TX");
+ return -ENODEV;
+ }
+
+ /* Clear firmware's nvram arbitration. */
+ if (tg3_flag(tp, NVRAM))
+ tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
+ return 0;
+}
+
+struct fw_info {
+ unsigned int fw_base;
+ unsigned int fw_len;
+ const __be32 *fw_data;
+};
+
+/* tp->lock is held. */
+static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
+ u32 cpu_scratch_base, int cpu_scratch_size,
+ struct fw_info *info)
+{
+ int err, lock_err, i;
+ void (*write_op)(struct tg3 *, u32, u32);
+
+ if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
+ netdev_err(tp->dev,
+ "%s: Trying to load TX cpu firmware which is 5705\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (tg3_flag(tp, 5705_PLUS))
+ write_op = tg3_write_mem;
+ else
+ write_op = tg3_write_indirect_reg32;
+
+ /* It is possible that bootcode is still loading at this point.
+ * Get the nvram lock first before halting the cpu.
+ */
+ lock_err = tg3_nvram_lock(tp);
+ err = tg3_halt_cpu(tp, cpu_base);
+ if (!lock_err)
+ tg3_nvram_unlock(tp);
+ if (err)
+ goto out;
+
+ for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
+ write_op(tp, cpu_scratch_base + i, 0);
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
+ for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
+ write_op(tp, (cpu_scratch_base +
+ (info->fw_base & 0xffff) +
+ (i * sizeof(u32))),
+ be32_to_cpu(info->fw_data[i]));
+
+ err = 0;
+
+out:
+ return err;
+}
+
+/* tp->lock is held. */
+static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
+{
+ struct fw_info info;
+ const __be32 *fw_data;
+ int err, i;
+
+ fw_data = (void *)tp->fw->data;
+
+ /* Firmware blob starts with version numbers, followed by
+ start address and length. We are setting complete length.
+ length = end_address_of_bss - start_address_of_text.
+ Remainder is the blob to be loaded contiguously
+ from start address. */
+
+ info.fw_base = be32_to_cpu(fw_data[1]);
+ info.fw_len = tp->fw->size - 12;
+ info.fw_data = &fw_data[3];
+
+ err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
+ RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
+ &info);
+ if (err)
+ return err;
+
+ err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
+ TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
+ &info);
+ if (err)
+ return err;
+
+ /* Now startup only the RX cpu. */
+ tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
+ tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
+
+ for (i = 0; i < 5; i++) {
+ if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
+ break;
+ tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
+ tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
+ tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
+ udelay(1000);
+ }
+ if (i >= 5) {
+ netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
+ "should be %08x\n", __func__,
+ tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
+ return -ENODEV;
+ }
+ tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
+ tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
+
+ return 0;
+}
+
+/* tp->lock is held. */
+static int tg3_load_tso_firmware(struct tg3 *tp)
+{
+ struct fw_info info;
+ const __be32 *fw_data;
+ unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
+ int err, i;
+
+ if (tg3_flag(tp, HW_TSO_1) ||
+ tg3_flag(tp, HW_TSO_2) ||
+ tg3_flag(tp, HW_TSO_3))
+ return 0;
+
+ fw_data = (void *)tp->fw->data;
+
+ /* Firmware blob starts with version numbers, followed by
+ start address and length. We are setting complete length.
+ length = end_address_of_bss - start_address_of_text.
+ Remainder is the blob to be loaded contiguously
+ from start address. */
+
+ info.fw_base = be32_to_cpu(fw_data[1]);
+ cpu_scratch_size = tp->fw_len;
+ info.fw_len = tp->fw->size - 12;
+ info.fw_data = &fw_data[3];
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ cpu_base = RX_CPU_BASE;
+ cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
+ } else {
+ cpu_base = TX_CPU_BASE;
+ cpu_scratch_base = TX_CPU_SCRATCH_BASE;
+ cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
+ }
+
+ err = tg3_load_firmware_cpu(tp, cpu_base,
+ cpu_scratch_base, cpu_scratch_size,
+ &info);
+ if (err)
+ return err;
+
+ /* Now startup the cpu. */
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32_f(cpu_base + CPU_PC, info.fw_base);
+
+ for (i = 0; i < 5; i++) {
+ if (tr32(cpu_base + CPU_PC) == info.fw_base)
+ break;
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
+ tw32_f(cpu_base + CPU_PC, info.fw_base);
+ udelay(1000);
+ }
+ if (i >= 5) {
+ netdev_err(tp->dev,
+ "%s fails to set CPU PC, is %08x should be %08x\n",
+ __func__, tr32(cpu_base + CPU_PC), info.fw_base);
+ return -ENODEV;
+ }
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32_f(cpu_base + CPU_MODE, 0x00000000);
+ return 0;
+}
+
+
+/* tp->lock is held. */
+static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
+{
+ u32 addr_high, addr_low;
+ int i;
+
+ addr_high = ((tp->dev->dev_addr[0] << 8) |
+ tp->dev->dev_addr[1]);
+ addr_low = ((tp->dev->dev_addr[2] << 24) |
+ (tp->dev->dev_addr[3] << 16) |
+ (tp->dev->dev_addr[4] << 8) |
+ (tp->dev->dev_addr[5] << 0));
+ for (i = 0; i < 4; i++) {
+ if (i == 1 && skip_mac_1)
+ continue;
+ tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
+ tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+ for (i = 0; i < 12; i++) {
+ tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
+ tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
+ }
+ }
+
+ addr_high = (tp->dev->dev_addr[0] +
+ tp->dev->dev_addr[1] +
+ tp->dev->dev_addr[2] +
+ tp->dev->dev_addr[3] +
+ tp->dev->dev_addr[4] +
+ tp->dev->dev_addr[5]) &
+ TX_BACKOFF_SEED_MASK;
+ tw32(MAC_TX_BACKOFF_SEED, addr_high);
+}
+
+static void tg3_enable_register_access(struct tg3 *tp)
+{
+ /*
+ * Make sure register accesses (indirect or otherwise) will function
+ * correctly.
+ */
+ pci_write_config_dword(tp->pdev,
+ TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
+}
+
+static int tg3_power_up(struct tg3 *tp)
+{
+ int err;
+
+ tg3_enable_register_access(tp);
+
+ err = pci_set_power_state(tp->pdev, PCI_D0);
+ if (!err) {
+ /* Switch out of Vaux if it is a NIC */
+ tg3_pwrsrc_switch_to_vmain(tp);
+ } else {
+ netdev_err(tp->dev, "Transition to D0 failed\n");
+ }
+
+ return err;
+}
+
+static int tg3_power_down_prepare(struct tg3 *tp)
+{
+ u32 misc_host_ctrl;
+ bool device_should_wake, do_low_power;
+
+ tg3_enable_register_access(tp);
+
+ /* Restore the CLKREQ setting. */
+ if (tg3_flag(tp, CLKREQ_BUG)) {
+ u16 lnkctl;
+
+ pci_read_config_word(tp->pdev,
+ pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
+ &lnkctl);
+ lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
+ pci_write_config_word(tp->pdev,
+ pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
+ lnkctl);
+ }
+
+ misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
+ tw32(TG3PCI_MISC_HOST_CTRL,
+ misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
+
+ device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
+ tg3_flag(tp, WOL_ENABLE);
+
+ if (tg3_flag(tp, USE_PHYLIB)) {
+ do_low_power = false;
+ if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
+ !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
+ struct phy_device *phydev;
+ u32 phyid, advertising;
+
+ phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+
+ tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
+
+ tp->link_config.orig_speed = phydev->speed;
+ tp->link_config.orig_duplex = phydev->duplex;
+ tp->link_config.orig_autoneg = phydev->autoneg;
+ tp->link_config.orig_advertising = phydev->advertising;
+
+ advertising = ADVERTISED_TP |
+ ADVERTISED_Pause |
+ ADVERTISED_Autoneg |
+ ADVERTISED_10baseT_Half;
+
+ if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
+ if (tg3_flag(tp, WOL_SPEED_100MB))
+ advertising |=
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_10baseT_Full;
+ else
+ advertising |= ADVERTISED_10baseT_Full;
+ }
+
+ phydev->advertising = advertising;
+
+ phy_start_aneg(phydev);
+
+ phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
+ if (phyid != PHY_ID_BCMAC131) {
+ phyid &= PHY_BCM_OUI_MASK;
+ if (phyid == PHY_BCM_OUI_1 ||
+ phyid == PHY_BCM_OUI_2 ||
+ phyid == PHY_BCM_OUI_3)
+ do_low_power = true;
+ }
+ }
+ } else {
+ do_low_power = true;
+
+ if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
+ tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
+ tp->link_config.orig_speed = tp->link_config.speed;
+ tp->link_config.orig_duplex = tp->link_config.duplex;
+ tp->link_config.orig_autoneg = tp->link_config.autoneg;
+ }
+
+ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
+ tp->link_config.speed = SPEED_10;
+ tp->link_config.duplex = DUPLEX_HALF;
+ tp->link_config.autoneg = AUTONEG_ENABLE;
+ tg3_setup_phy(tp, 0);
+ }
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ u32 val;
+
+ val = tr32(GRC_VCPU_EXT_CTRL);
+ tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
+ } else if (!tg3_flag(tp, ENABLE_ASF)) {
+ int i;
+ u32 val;
+
+ for (i = 0; i < 200; i++) {
+ tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
+ if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
+ break;
+ msleep(1);
+ }
+ }
+ if (tg3_flag(tp, WOL_CAP))
+ tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
+ WOL_DRV_STATE_SHUTDOWN |
+ WOL_DRV_WOL |
+ WOL_SET_MAGIC_PKT);
+
+ if (device_should_wake) {
+ u32 mac_mode;
+
+ if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
+ if (do_low_power &&
+ !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
+ tg3_phy_auxctl_write(tp,
+ MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
+ MII_TG3_AUXCTL_PCTL_WOL_EN |
+ MII_TG3_AUXCTL_PCTL_100TX_LPWR |
+ MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
+ udelay(40);
+ }
+
+ if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
+ mac_mode = MAC_MODE_PORT_MODE_GMII;
+ else
+ mac_mode = MAC_MODE_PORT_MODE_MII;
+
+ mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
+ ASIC_REV_5700) {
+ u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
+ SPEED_100 : SPEED_10;
+ if (tg3_5700_link_polarity(tp, speed))
+ mac_mode |= MAC_MODE_LINK_POLARITY;
+ else
+ mac_mode &= ~MAC_MODE_LINK_POLARITY;
+ }
+ } else {
+ mac_mode = MAC_MODE_PORT_MODE_TBI;
+ }
+
+ if (!tg3_flag(tp, 5750_PLUS))
+ tw32(MAC_LED_CTRL, tp->led_ctrl);
+
+ mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
+ if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
+ (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
+ mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
+
+ if (tg3_flag(tp, ENABLE_APE))
+ mac_mode |= MAC_MODE_APE_TX_EN |
+ MAC_MODE_APE_RX_EN |
+ MAC_MODE_TDE_ENABLE;
+
+ tw32_f(MAC_MODE, mac_mode);
+ udelay(100);
+
+ tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
+ udelay(10);
+ }
+
+ if (!tg3_flag(tp, WOL_SPEED_100MB) &&
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
+ u32 base_val;
+
+ base_val = tp->pci_clock_ctrl;
+ base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
+ CLOCK_CTRL_TXCLK_DISABLE);
+
+ tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
+ CLOCK_CTRL_PWRDOWN_PLL133, 40);
+ } else if (tg3_flag(tp, 5780_CLASS) ||
+ tg3_flag(tp, CPMU_PRESENT) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ /* do nothing */
+ } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
+ u32 newbits1, newbits2;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
+ CLOCK_CTRL_TXCLK_DISABLE |
+ CLOCK_CTRL_ALTCLK);
+ newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
+ } else if (tg3_flag(tp, 5705_PLUS)) {
+ newbits1 = CLOCK_CTRL_625_CORE;
+ newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
+ } else {
+ newbits1 = CLOCK_CTRL_ALTCLK;
+ newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
+ }
+
+ tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
+ 40);
+
+ tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
+ 40);
+
+ if (!tg3_flag(tp, 5705_PLUS)) {
+ u32 newbits3;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
+ CLOCK_CTRL_TXCLK_DISABLE |
+ CLOCK_CTRL_44MHZ_CORE);
+ } else {
+ newbits3 = CLOCK_CTRL_44MHZ_CORE;
+ }
+
+ tw32_wait_f(TG3PCI_CLOCK_CTRL,
+ tp->pci_clock_ctrl | newbits3, 40);
+ }
+ }
+
+ if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
+ tg3_power_down_phy(tp, do_low_power);
+
+ tg3_frob_aux_power(tp, true);
+
+ /* Workaround for unstable PLL clock */
+ if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
+ (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
+ u32 val = tr32(0x7d00);
+
+ val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
+ tw32(0x7d00, val);
+ if (!tg3_flag(tp, ENABLE_ASF)) {
+ int err;
+
+ err = tg3_nvram_lock(tp);
+ tg3_halt_cpu(tp, RX_CPU_BASE);
+ if (!err)
+ tg3_nvram_unlock(tp);
+ }
+ }
+
+ tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
+
+ return 0;
+}
+
+static void tg3_power_down(struct tg3 *tp)
+{
+ tg3_power_down_prepare(tp);
+
+ pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
+ pci_set_power_state(tp->pdev, PCI_D3hot);
+}
+
+static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
+{
+ switch (val & MII_TG3_AUX_STAT_SPDMASK) {
+ case MII_TG3_AUX_STAT_10HALF:
+ *speed = SPEED_10;
+ *duplex = DUPLEX_HALF;
+ break;
+
+ case MII_TG3_AUX_STAT_10FULL:
+ *speed = SPEED_10;
+ *duplex = DUPLEX_FULL;
+ break;
+
+ case MII_TG3_AUX_STAT_100HALF:
+ *speed = SPEED_100;
+ *duplex = DUPLEX_HALF;
+ break;
+
+ case MII_TG3_AUX_STAT_100FULL:
+ *speed = SPEED_100;
+ *duplex = DUPLEX_FULL;
+ break;
+
+ case MII_TG3_AUX_STAT_1000HALF:
+ *speed = SPEED_1000;
+ *duplex = DUPLEX_HALF;
+ break;
+
+ case MII_TG3_AUX_STAT_1000FULL:
+ *speed = SPEED_1000;
+ *duplex = DUPLEX_FULL;
+ break;
+
+ default:
+ if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
+ *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
+ SPEED_10;
+ *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
+ DUPLEX_HALF;
+ break;
+ }
+ *speed = SPEED_INVALID;
+ *duplex = DUPLEX_INVALID;
+ break;
+ }
+}
+
+static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
+{
+ int err = 0;
+ u32 val, new_adv;
+
+ new_adv = ADVERTISE_CSMA;
+ if (advertise & ADVERTISED_10baseT_Half)
+ new_adv |= ADVERTISE_10HALF;
+ if (advertise & ADVERTISED_10baseT_Full)
+ new_adv |= ADVERTISE_10FULL;
+ if (advertise & ADVERTISED_100baseT_Half)
+ new_adv |= ADVERTISE_100HALF;
+ if (advertise & ADVERTISED_100baseT_Full)
+ new_adv |= ADVERTISE_100FULL;
+
+ new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
+
+ err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
+ if (err)
+ goto done;
+
+ if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
+ goto done;
+
+ new_adv = 0;
+ if (advertise & ADVERTISED_1000baseT_Half)
+ new_adv |= ADVERTISE_1000HALF;
+ if (advertise & ADVERTISED_1000baseT_Full)
+ new_adv |= ADVERTISE_1000FULL;
+
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
+ new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
+
+ err = tg3_writephy(tp, MII_CTRL1000, new_adv);
+ if (err)
+ goto done;
+
+ if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
+ goto done;
+
+ tw32(TG3_CPMU_EEE_MODE,
+ tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
+
+ err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
+ if (!err) {
+ u32 err2;
+
+ val = 0;
+ /* Advertise 100-BaseTX EEE ability */
+ if (advertise & ADVERTISED_100baseT_Full)
+ val |= MDIO_AN_EEE_ADV_100TX;
+ /* Advertise 1000-BaseT EEE ability */
+ if (advertise & ADVERTISED_1000baseT_Full)
+ val |= MDIO_AN_EEE_ADV_1000T;
+ err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
+ if (err)
+ val = 0;
+
+ switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
+ case ASIC_REV_5717:
+ case ASIC_REV_57765:
+ case ASIC_REV_5719:
+ /* If we advertised any eee advertisements above... */
+ if (val)
+ val = MII_TG3_DSP_TAP26_ALNOKO |
+ MII_TG3_DSP_TAP26_RMRXSTO |
+ MII_TG3_DSP_TAP26_OPCSINPT;
+ tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
+ /* Fall through */
+ case ASIC_REV_5720:
+ if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
+ tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
+ MII_TG3_DSP_CH34TP2_HIBW01);
+ }
+
+ err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ if (!err)
+ err = err2;
+ }
+
+done:
+ return err;
+}
+
+static void tg3_phy_copper_begin(struct tg3 *tp)
+{
+ u32 new_adv;
+ int i;
+
+ if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
+ new_adv = ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full;
+ if (tg3_flag(tp, WOL_SPEED_100MB))
+ new_adv |= ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full;
+
+ tg3_phy_autoneg_cfg(tp, new_adv,
+ FLOW_CTRL_TX | FLOW_CTRL_RX);
+ } else if (tp->link_config.speed == SPEED_INVALID) {
+ if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
+ tp->link_config.advertising &=
+ ~(ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full);
+
+ tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
+ tp->link_config.flowctrl);
+ } else {
+ /* Asking for a specific link mode. */
+ if (tp->link_config.speed == SPEED_1000) {
+ if (tp->link_config.duplex == DUPLEX_FULL)
+ new_adv = ADVERTISED_1000baseT_Full;
+ else
+ new_adv = ADVERTISED_1000baseT_Half;
+ } else if (tp->link_config.speed == SPEED_100) {
+ if (tp->link_config.duplex == DUPLEX_FULL)
+ new_adv = ADVERTISED_100baseT_Full;
+ else
+ new_adv = ADVERTISED_100baseT_Half;
+ } else {
+ if (tp->link_config.duplex == DUPLEX_FULL)
+ new_adv = ADVERTISED_10baseT_Full;
+ else
+ new_adv = ADVERTISED_10baseT_Half;
+ }
+
+ tg3_phy_autoneg_cfg(tp, new_adv,
+ tp->link_config.flowctrl);
+ }
+
+ if (tp->link_config.autoneg == AUTONEG_DISABLE &&
+ tp->link_config.speed != SPEED_INVALID) {
+ u32 bmcr, orig_bmcr;
+
+ tp->link_config.active_speed = tp->link_config.speed;
+ tp->link_config.active_duplex = tp->link_config.duplex;
+
+ bmcr = 0;
+ switch (tp->link_config.speed) {
+ default:
+ case SPEED_10:
+ break;
+
+ case SPEED_100:
+ bmcr |= BMCR_SPEED100;
+ break;
+
+ case SPEED_1000:
+ bmcr |= BMCR_SPEED1000;
+ break;
+ }
+
+ if (tp->link_config.duplex == DUPLEX_FULL)
+ bmcr |= BMCR_FULLDPLX;
+
+ if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
+ (bmcr != orig_bmcr)) {
+ tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
+ for (i = 0; i < 1500; i++) {
+ u32 tmp;
+
+ udelay(10);
+ if (tg3_readphy(tp, MII_BMSR, &tmp) ||
+ tg3_readphy(tp, MII_BMSR, &tmp))
+ continue;
+ if (!(tmp & BMSR_LSTATUS)) {
+ udelay(40);
+ break;
+ }
+ }
+ tg3_writephy(tp, MII_BMCR, bmcr);
+ udelay(40);
+ }
+ } else {
+ tg3_writephy(tp, MII_BMCR,
+ BMCR_ANENABLE | BMCR_ANRESTART);
+ }
+}
+
+static int tg3_init_5401phy_dsp(struct tg3 *tp)
+{
+ int err;
+
+ /* Turn off tap power management. */
+ /* Set Extended packet length bit */
+ err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
+
+ err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
+ err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
+ err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
+ err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
+ err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
+
+ udelay(40);
+
+ return err;
+}
+
+static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
+{
+ u32 adv_reg, all_mask = 0;
+
+ if (mask & ADVERTISED_10baseT_Half)
+ all_mask |= ADVERTISE_10HALF;
+ if (mask & ADVERTISED_10baseT_Full)
+ all_mask |= ADVERTISE_10FULL;
+ if (mask & ADVERTISED_100baseT_Half)
+ all_mask |= ADVERTISE_100HALF;
+ if (mask & ADVERTISED_100baseT_Full)
+ all_mask |= ADVERTISE_100FULL;
+
+ if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
+ return 0;
+
+ if ((adv_reg & ADVERTISE_ALL) != all_mask)
+ return 0;
+
+ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+ u32 tg3_ctrl;
+
+ all_mask = 0;
+ if (mask & ADVERTISED_1000baseT_Half)
+ all_mask |= ADVERTISE_1000HALF;
+ if (mask & ADVERTISED_1000baseT_Full)
+ all_mask |= ADVERTISE_1000FULL;
+
+ if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
+ return 0;
+
+ tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
+ if (tg3_ctrl != all_mask)
+ return 0;
+ }
+
+ return 1;
+}
+
+static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
+{
+ u32 curadv, reqadv;
+
+ if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
+ return 1;
+
+ curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+ reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
+
+ if (tp->link_config.active_duplex == DUPLEX_FULL) {
+ if (curadv != reqadv)
+ return 0;
+
+ if (tg3_flag(tp, PAUSE_AUTONEG))
+ tg3_readphy(tp, MII_LPA, rmtadv);
+ } else {
+ /* Reprogram the advertisement register, even if it
+ * does not affect the current link. If the link
+ * gets renegotiated in the future, we can save an
+ * additional renegotiation cycle by advertising
+ * it correctly in the first place.
+ */
+ if (curadv != reqadv) {
+ *lcladv &= ~(ADVERTISE_PAUSE_CAP |
+ ADVERTISE_PAUSE_ASYM);
+ tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
+ }
+ }
+
+ return 1;
+}
+
+static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
+{
+ int current_link_up;
+ u32 bmsr, val;
+ u32 lcl_adv, rmt_adv;
+ u16 current_speed;
+ u8 current_duplex;
+ int i, err;
+
+ tw32(MAC_EVENT, 0);
+
+ tw32_f(MAC_STATUS,
+ (MAC_STATUS_SYNC_CHANGED |
+ MAC_STATUS_CFG_CHANGED |
+ MAC_STATUS_MI_COMPLETION |
+ MAC_STATUS_LNKSTATE_CHANGED));
+ udelay(40);
+
+ if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
+ tw32_f(MAC_MI_MODE,
+ (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
+ udelay(80);
+ }
+
+ tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
+
+ /* Some third-party PHYs need to be reset on link going
+ * down.
+ */
+ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
+ netif_carrier_ok(tp->dev)) {
+ tg3_readphy(tp, MII_BMSR, &bmsr);
+ if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
+ !(bmsr & BMSR_LSTATUS))
+ force_reset = 1;
+ }
+ if (force_reset)
+ tg3_phy_reset(tp);
+
+ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
+ tg3_readphy(tp, MII_BMSR, &bmsr);
+ if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
+ !tg3_flag(tp, INIT_COMPLETE))
+ bmsr = 0;
+
+ if (!(bmsr & BMSR_LSTATUS)) {
+ err = tg3_init_5401phy_dsp(tp);
+ if (err)
+ return err;
+
+ tg3_readphy(tp, MII_BMSR, &bmsr);
+ for (i = 0; i < 1000; i++) {
+ udelay(10);
+ if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
+ (bmsr & BMSR_LSTATUS)) {
+ udelay(40);
+ break;
+ }
+ }
+
+ if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
+ TG3_PHY_REV_BCM5401_B0 &&
+ !(bmsr & BMSR_LSTATUS) &&
+ tp->link_config.active_speed == SPEED_1000) {
+ err = tg3_phy_reset(tp);
+ if (!err)
+ err = tg3_init_5401phy_dsp(tp);
+ if (err)
+ return err;
+ }
+ }
+ } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
+ /* 5701 {A0,B0} CRC bug workaround */
+ tg3_writephy(tp, 0x15, 0x0a75);
+ tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
+ tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
+ tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
+ }
+
+ /* Clear pending interrupts... */
+ tg3_readphy(tp, MII_TG3_ISTAT, &val);
+ tg3_readphy(tp, MII_TG3_ISTAT, &val);
+
+ if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
+ tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
+ else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
+ tg3_writephy(tp, MII_TG3_IMASK, ~0);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
+ tg3_writephy(tp, MII_TG3_EXT_CTRL,
+ MII_TG3_EXT_CTRL_LNK3_LED_MODE);
+ else
+ tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
+ }
+
+ current_link_up = 0;
+ current_speed = SPEED_INVALID;
+ current_duplex = DUPLEX_INVALID;
+
+ if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
+ err = tg3_phy_auxctl_read(tp,
+ MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
+ &val);
+ if (!err && !(val & (1 << 10))) {
+ tg3_phy_auxctl_write(tp,
+ MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
+ val | (1 << 10));
+ goto relink;
+ }
+ }
+
+ bmsr = 0;
+ for (i = 0; i < 100; i++) {
+ tg3_readphy(tp, MII_BMSR, &bmsr);
+ if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
+ (bmsr & BMSR_LSTATUS))
+ break;
+ udelay(40);
+ }
+
+ if (bmsr & BMSR_LSTATUS) {
+ u32 aux_stat, bmcr;
+
+ tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
+ for (i = 0; i < 2000; i++) {
+ udelay(10);
+ if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
+ aux_stat)
+ break;
+ }
+
+ tg3_aux_stat_to_speed_duplex(tp, aux_stat,
+ ¤t_speed,
+ ¤t_duplex);
+
+ bmcr = 0;
+ for (i = 0; i < 200; i++) {
+ tg3_readphy(tp, MII_BMCR, &bmcr);
+ if (tg3_readphy(tp, MII_BMCR, &bmcr))
+ continue;
+ if (bmcr && bmcr != 0x7fff)
+ break;
+ udelay(10);
+ }
+
+ lcl_adv = 0;
+ rmt_adv = 0;
+
+ tp->link_config.active_speed = current_speed;
+ tp->link_config.active_duplex = current_duplex;
+
+ if (tp->link_config.autoneg == AUTONEG_ENABLE) {
+ if ((bmcr & BMCR_ANENABLE) &&
+ tg3_copper_is_advertising_all(tp,
+ tp->link_config.advertising)) {
+ if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
+ &rmt_adv))
+ current_link_up = 1;
+ }
+ } else {
+ if (!(bmcr & BMCR_ANENABLE) &&
+ tp->link_config.speed == current_speed &&
+ tp->link_config.duplex == current_duplex &&
+ tp->link_config.flowctrl ==
+ tp->link_config.active_flowctrl) {
+ current_link_up = 1;
+ }
+ }
+
+ if (current_link_up == 1 &&
+ tp->link_config.active_duplex == DUPLEX_FULL)
+ tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
+ }
+
+relink:
+ if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
+ tg3_phy_copper_begin(tp);
+
+ tg3_readphy(tp, MII_BMSR, &bmsr);
+ if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
+ (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
+ current_link_up = 1;
+ }
+
+ tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
+ if (current_link_up == 1) {
+ if (tp->link_config.active_speed == SPEED_100 ||
+ tp->link_config.active_speed == SPEED_10)
+ tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
+ else
+ tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+ } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
+ tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
+ else
+ tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+
+ tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
+ if (tp->link_config.active_duplex == DUPLEX_HALF)
+ tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
+ if (current_link_up == 1 &&
+ tg3_5700_link_polarity(tp, tp->link_config.active_speed))
+ tp->mac_mode |= MAC_MODE_LINK_POLARITY;
+ else
+ tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
+ }
+
+ /* ??? Without this setting Netgear GA302T PHY does not
+ * ??? send/receive packets...
+ */
+ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
+ tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
+ tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
+ tw32_f(MAC_MI_MODE, tp->mi_mode);
+ udelay(80);
+ }
+
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+
+ tg3_phy_eee_adjust(tp, current_link_up);
+
+ if (tg3_flag(tp, USE_LINKCHG_REG)) {
+ /* Polled via timer. */
+ tw32_f(MAC_EVENT, 0);
+ } else {
+ tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
+ }
+ udelay(40);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
+ current_link_up == 1 &&
+ tp->link_config.active_speed == SPEED_1000 &&
+ (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
+ udelay(120);
+ tw32_f(MAC_STATUS,
+ (MAC_STATUS_SYNC_CHANGED |
+ MAC_STATUS_CFG_CHANGED));
+ udelay(40);
+ tg3_write_mem(tp,
+ NIC_SRAM_FIRMWARE_MBOX,
+ NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
+ }
+
+ /* Prevent send BD corruption. */
+ if (tg3_flag(tp, CLKREQ_BUG)) {
+ u16 oldlnkctl, newlnkctl;
+
+ pci_read_config_word(tp->pdev,
+ pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
+ &oldlnkctl);
+ if (tp->link_config.active_speed == SPEED_100 ||
+ tp->link_config.active_speed == SPEED_10)
+ newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
+ else
+ newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
+ if (newlnkctl != oldlnkctl)
+ pci_write_config_word(tp->pdev,
+ pci_pcie_cap(tp->pdev) +
+ PCI_EXP_LNKCTL, newlnkctl);
+ }
+
+ if (current_link_up != netif_carrier_ok(tp->dev)) {
+ if (current_link_up)
+ netif_carrier_on(tp->dev);
+ else
+ netif_carrier_off(tp->dev);
+ tg3_link_report(tp);
+ }
+
+ return 0;
+}
+
+struct tg3_fiber_aneginfo {
+ int state;
+#define ANEG_STATE_UNKNOWN 0
+#define ANEG_STATE_AN_ENABLE 1
+#define ANEG_STATE_RESTART_INIT 2
+#define ANEG_STATE_RESTART 3
+#define ANEG_STATE_DISABLE_LINK_OK 4
+#define ANEG_STATE_ABILITY_DETECT_INIT 5
+#define ANEG_STATE_ABILITY_DETECT 6
+#define ANEG_STATE_ACK_DETECT_INIT 7
+#define ANEG_STATE_ACK_DETECT 8
+#define ANEG_STATE_COMPLETE_ACK_INIT 9
+#define ANEG_STATE_COMPLETE_ACK 10
+#define ANEG_STATE_IDLE_DETECT_INIT 11
+#define ANEG_STATE_IDLE_DETECT 12
+#define ANEG_STATE_LINK_OK 13
+#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
+#define ANEG_STATE_NEXT_PAGE_WAIT 15
+
+ u32 flags;
+#define MR_AN_ENABLE 0x00000001
+#define MR_RESTART_AN 0x00000002
+#define MR_AN_COMPLETE 0x00000004
+#define MR_PAGE_RX 0x00000008
+#define MR_NP_LOADED 0x00000010
+#define MR_TOGGLE_TX 0x00000020
+#define MR_LP_ADV_FULL_DUPLEX 0x00000040
+#define MR_LP_ADV_HALF_DUPLEX 0x00000080
+#define MR_LP_ADV_SYM_PAUSE 0x00000100
+#define MR_LP_ADV_ASYM_PAUSE 0x00000200
+#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
+#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
+#define MR_LP_ADV_NEXT_PAGE 0x00001000
+#define MR_TOGGLE_RX 0x00002000
+#define MR_NP_RX 0x00004000
+
+#define MR_LINK_OK 0x80000000
+
+ unsigned long link_time, cur_time;
+
+ u32 ability_match_cfg;
+ int ability_match_count;
+
+ char ability_match, idle_match, ack_match;
+
+ u32 txconfig, rxconfig;
+#define ANEG_CFG_NP 0x00000080
+#define ANEG_CFG_ACK 0x00000040
+#define ANEG_CFG_RF2 0x00000020
+#define ANEG_CFG_RF1 0x00000010
+#define ANEG_CFG_PS2 0x00000001
+#define ANEG_CFG_PS1 0x00008000
+#define ANEG_CFG_HD 0x00004000
+#define ANEG_CFG_FD 0x00002000
+#define ANEG_CFG_INVAL 0x00001f06
+
+};
+#define ANEG_OK 0
+#define ANEG_DONE 1
+#define ANEG_TIMER_ENAB 2
+#define ANEG_FAILED -1
+
+#define ANEG_STATE_SETTLE_TIME 10000
+
+static int tg3_fiber_aneg_smachine(struct tg3 *tp,
+ struct tg3_fiber_aneginfo *ap)
+{
+ u16 flowctrl;
+ unsigned long delta;
+ u32 rx_cfg_reg;
+ int ret;
+
+ if (ap->state == ANEG_STATE_UNKNOWN) {
+ ap->rxconfig = 0;
+ ap->link_time = 0;
+ ap->cur_time = 0;
+ ap->ability_match_cfg = 0;
+ ap->ability_match_count = 0;
+ ap->ability_match = 0;
+ ap->idle_match = 0;
+ ap->ack_match = 0;
+ }
+ ap->cur_time++;
+
+ if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
+ rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
+
+ if (rx_cfg_reg != ap->ability_match_cfg) {
+ ap->ability_match_cfg = rx_cfg_reg;
+ ap->ability_match = 0;
+ ap->ability_match_count = 0;
+ } else {
+ if (++ap->ability_match_count > 1) {
+ ap->ability_match = 1;
+ ap->ability_match_cfg = rx_cfg_reg;
+ }
+ }
+ if (rx_cfg_reg & ANEG_CFG_ACK)
+ ap->ack_match = 1;
+ else
+ ap->ack_match = 0;
+
+ ap->idle_match = 0;
+ } else {
+ ap->idle_match = 1;
+ ap->ability_match_cfg = 0;
+ ap->ability_match_count = 0;
+ ap->ability_match = 0;
+ ap->ack_match = 0;
+
+ rx_cfg_reg = 0;
+ }
+
+ ap->rxconfig = rx_cfg_reg;
+ ret = ANEG_OK;
+
+ switch (ap->state) {
+ case ANEG_STATE_UNKNOWN:
+ if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
+ ap->state = ANEG_STATE_AN_ENABLE;
+
+ /* fallthru */
+ case ANEG_STATE_AN_ENABLE:
+ ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
+ if (ap->flags & MR_AN_ENABLE) {
+ ap->link_time = 0;
+ ap->cur_time = 0;
+ ap->ability_match_cfg = 0;
+ ap->ability_match_count = 0;
+ ap->ability_match = 0;
+ ap->idle_match = 0;
+ ap->ack_match = 0;
+
+ ap->state = ANEG_STATE_RESTART_INIT;
+ } else {
+ ap->state = ANEG_STATE_DISABLE_LINK_OK;
+ }
+ break;
+
+ case ANEG_STATE_RESTART_INIT:
+ ap->link_time = ap->cur_time;
+ ap->flags &= ~(MR_NP_LOADED);
+ ap->txconfig = 0;
+ tw32(MAC_TX_AUTO_NEG, 0);
+ tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+
+ ret = ANEG_TIMER_ENAB;
+ ap->state = ANEG_STATE_RESTART;
+
+ /* fallthru */
+ case ANEG_STATE_RESTART:
+ delta = ap->cur_time - ap->link_time;
+ if (delta > ANEG_STATE_SETTLE_TIME)
+ ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
+ else
+ ret = ANEG_TIMER_ENAB;
+ break;
+
+ case ANEG_STATE_DISABLE_LINK_OK:
+ ret = ANEG_DONE;
+ break;
+
+ case ANEG_STATE_ABILITY_DETECT_INIT:
+ ap->flags &= ~(MR_TOGGLE_TX);
+ ap->txconfig = ANEG_CFG_FD;
+ flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
+ if (flowctrl & ADVERTISE_1000XPAUSE)
+ ap->txconfig |= ANEG_CFG_PS1;
+ if (flowctrl & ADVERTISE_1000XPSE_ASYM)
+ ap->txconfig |= ANEG_CFG_PS2;
+ tw32(MAC_TX_AUTO_NEG, ap->txconfig);
+ tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+
+ ap->state = ANEG_STATE_ABILITY_DETECT;
+ break;
+
+ case ANEG_STATE_ABILITY_DETECT:
+ if (ap->ability_match != 0 && ap->rxconfig != 0)
+ ap->state = ANEG_STATE_ACK_DETECT_INIT;
+ break;
+
+ case ANEG_STATE_ACK_DETECT_INIT:
+ ap->txconfig |= ANEG_CFG_ACK;
+ tw32(MAC_TX_AUTO_NEG, ap->txconfig);
+ tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+
+ ap->state = ANEG_STATE_ACK_DETECT;
+
+ /* fallthru */
+ case ANEG_STATE_ACK_DETECT:
+ if (ap->ack_match != 0) {
+ if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
+ (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
+ ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
+ } else {
+ ap->state = ANEG_STATE_AN_ENABLE;
+ }
+ } else if (ap->ability_match != 0 &&
+ ap->rxconfig == 0) {
+ ap->state = ANEG_STATE_AN_ENABLE;
+ }
+ break;
+
+ case ANEG_STATE_COMPLETE_ACK_INIT:
+ if (ap->rxconfig & ANEG_CFG_INVAL) {
+ ret = ANEG_FAILED;
+ break;
+ }
+ ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
+ MR_LP_ADV_HALF_DUPLEX |
+ MR_LP_ADV_SYM_PAUSE |
+ MR_LP_ADV_ASYM_PAUSE |
+ MR_LP_ADV_REMOTE_FAULT1 |
+ MR_LP_ADV_REMOTE_FAULT2 |
+ MR_LP_ADV_NEXT_PAGE |
+ MR_TOGGLE_RX |
+ MR_NP_RX);
+ if (ap->rxconfig & ANEG_CFG_FD)
+ ap->flags |= MR_LP_ADV_FULL_DUPLEX;
+ if (ap->rxconfig & ANEG_CFG_HD)
+ ap->flags |= MR_LP_ADV_HALF_DUPLEX;
+ if (ap->rxconfig & ANEG_CFG_PS1)
+ ap->flags |= MR_LP_ADV_SYM_PAUSE;
+ if (ap->rxconfig & ANEG_CFG_PS2)
+ ap->flags |= MR_LP_ADV_ASYM_PAUSE;
+ if (ap->rxconfig & ANEG_CFG_RF1)
+ ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
+ if (ap->rxconfig & ANEG_CFG_RF2)
+ ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
+ if (ap->rxconfig & ANEG_CFG_NP)
+ ap->flags |= MR_LP_ADV_NEXT_PAGE;
+
+ ap->link_time = ap->cur_time;
+
+ ap->flags ^= (MR_TOGGLE_TX);
+ if (ap->rxconfig & 0x0008)
+ ap->flags |= MR_TOGGLE_RX;
+ if (ap->rxconfig & ANEG_CFG_NP)
+ ap->flags |= MR_NP_RX;
+ ap->flags |= MR_PAGE_RX;
+
+ ap->state = ANEG_STATE_COMPLETE_ACK;
+ ret = ANEG_TIMER_ENAB;
+ break;
+
+ case ANEG_STATE_COMPLETE_ACK:
+ if (ap->ability_match != 0 &&
+ ap->rxconfig == 0) {
+ ap->state = ANEG_STATE_AN_ENABLE;
+ break;
+ }
+ delta = ap->cur_time - ap->link_time;
+ if (delta > ANEG_STATE_SETTLE_TIME) {
+ if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
+ ap->state = ANEG_STATE_IDLE_DETECT_INIT;
+ } else {
+ if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
+ !(ap->flags & MR_NP_RX)) {
+ ap->state = ANEG_STATE_IDLE_DETECT_INIT;
+ } else {
+ ret = ANEG_FAILED;
+ }
+ }
+ }
+ break;
+
+ case ANEG_STATE_IDLE_DETECT_INIT:
+ ap->link_time = ap->cur_time;
+ tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+
+ ap->state = ANEG_STATE_IDLE_DETECT;
+ ret = ANEG_TIMER_ENAB;
+ break;
+
+ case ANEG_STATE_IDLE_DETECT:
+ if (ap->ability_match != 0 &&
+ ap->rxconfig == 0) {
+ ap->state = ANEG_STATE_AN_ENABLE;
+ break;
+ }
+ delta = ap->cur_time - ap->link_time;
+ if (delta > ANEG_STATE_SETTLE_TIME) {
+ /* XXX another gem from the Broadcom driver :( */
+ ap->state = ANEG_STATE_LINK_OK;
+ }
+ break;
+
+ case ANEG_STATE_LINK_OK:
+ ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
+ ret = ANEG_DONE;
+ break;
+
+ case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
+ /* ??? unimplemented */
+ break;
+
+ case ANEG_STATE_NEXT_PAGE_WAIT:
+ /* ??? unimplemented */
+ break;
+
+ default:
+ ret = ANEG_FAILED;
+ break;
+ }
+
+ return ret;
+}
+
+static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
+{
+ int res = 0;
+ struct tg3_fiber_aneginfo aninfo;
+ int status = ANEG_FAILED;
+ unsigned int tick;
+ u32 tmp;
+
+ tw32_f(MAC_TX_AUTO_NEG, 0);
+
+ tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
+ tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
+ udelay(40);
+
+ tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
+ udelay(40);
+
+ memset(&aninfo, 0, sizeof(aninfo));
+ aninfo.flags |= MR_AN_ENABLE;
+ aninfo.state = ANEG_STATE_UNKNOWN;
+ aninfo.cur_time = 0;
+ tick = 0;
+ while (++tick < 195000) {
+ status = tg3_fiber_aneg_smachine(tp, &aninfo);
+ if (status == ANEG_DONE || status == ANEG_FAILED)
+ break;
+
+ udelay(1);
+ }
+
+ tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+
+ *txflags = aninfo.txconfig;
+ *rxflags = aninfo.flags;
+
+ if (status == ANEG_DONE &&
+ (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
+ MR_LP_ADV_FULL_DUPLEX)))
+ res = 1;
+
+ return res;
+}
+
+static void tg3_init_bcm8002(struct tg3 *tp)
+{
+ u32 mac_status = tr32(MAC_STATUS);
+ int i;
+
+ /* Reset when initting first time or we have a link. */
+ if (tg3_flag(tp, INIT_COMPLETE) &&
+ !(mac_status & MAC_STATUS_PCS_SYNCED))
+ return;
+
+ /* Set PLL lock range. */
+ tg3_writephy(tp, 0x16, 0x8007);
+
+ /* SW reset */
+ tg3_writephy(tp, MII_BMCR, BMCR_RESET);
+
+ /* Wait for reset to complete. */
+ /* XXX schedule_timeout() ... */
+ for (i = 0; i < 500; i++)
+ udelay(10);
+
+ /* Config mode; select PMA/Ch 1 regs. */
+ tg3_writephy(tp, 0x10, 0x8411);
+
+ /* Enable auto-lock and comdet, select txclk for tx. */
+ tg3_writephy(tp, 0x11, 0x0a10);
+
+ tg3_writephy(tp, 0x18, 0x00a0);
+ tg3_writephy(tp, 0x16, 0x41ff);
+
+ /* Assert and deassert POR. */
+ tg3_writephy(tp, 0x13, 0x0400);
+ udelay(40);
+ tg3_writephy(tp, 0x13, 0x0000);
+
+ tg3_writephy(tp, 0x11, 0x0a50);
+ udelay(40);
+ tg3_writephy(tp, 0x11, 0x0a10);
+
+ /* Wait for signal to stabilize */
+ /* XXX schedule_timeout() ... */
+ for (i = 0; i < 15000; i++)
+ udelay(10);
+
+ /* Deselect the channel register so we can read the PHYID
+ * later.
+ */
+ tg3_writephy(tp, 0x10, 0x8011);
+}
+
+static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
+{
+ u16 flowctrl;
+ u32 sg_dig_ctrl, sg_dig_status;
+ u32 serdes_cfg, expected_sg_dig_ctrl;
+ int workaround, port_a;
+ int current_link_up;
+
+ serdes_cfg = 0;
+ expected_sg_dig_ctrl = 0;
+ workaround = 0;
+ port_a = 1;
+ current_link_up = 0;
+
+ if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
+ tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
+ workaround = 1;
+ if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
+ port_a = 0;
+
+ /* preserve bits 0-11,13,14 for signal pre-emphasis */
+ /* preserve bits 20-23 for voltage regulator */
+ serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
+ }
+
+ sg_dig_ctrl = tr32(SG_DIG_CTRL);
+
+ if (tp->link_config.autoneg != AUTONEG_ENABLE) {
+ if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
+ if (workaround) {
+ u32 val = serdes_cfg;
+
+ if (port_a)
+ val |= 0xc010000;
+ else
+ val |= 0x4010000;
+ tw32_f(MAC_SERDES_CFG, val);
+ }
+
+ tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
+ }
+ if (mac_status & MAC_STATUS_PCS_SYNCED) {
+ tg3_setup_flow_control(tp, 0, 0);
+ current_link_up = 1;
+ }
+ goto out;
+ }
+
+ /* Want auto-negotiation. */
+ expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
+
+ flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
+ if (flowctrl & ADVERTISE_1000XPAUSE)
+ expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
+ if (flowctrl & ADVERTISE_1000XPSE_ASYM)
+ expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
+
+ if (sg_dig_ctrl != expected_sg_dig_ctrl) {
+ if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
+ tp->serdes_counter &&
+ ((mac_status & (MAC_STATUS_PCS_SYNCED |
+ MAC_STATUS_RCVD_CFG)) ==
+ MAC_STATUS_PCS_SYNCED)) {
+ tp->serdes_counter--;
+ current_link_up = 1;
+ goto out;
+ }
+restart_autoneg:
+ if (workaround)
+ tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
+ tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
+ udelay(5);
+ tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
+
+ tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
+ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+ } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
+ MAC_STATUS_SIGNAL_DET)) {
+ sg_dig_status = tr32(SG_DIG_STATUS);
+ mac_status = tr32(MAC_STATUS);
+
+ if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
+ (mac_status & MAC_STATUS_PCS_SYNCED)) {
+ u32 local_adv = 0, remote_adv = 0;
+
+ if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
+ local_adv |= ADVERTISE_1000XPAUSE;
+ if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
+ local_adv |= ADVERTISE_1000XPSE_ASYM;
+
+ if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
+ remote_adv |= LPA_1000XPAUSE;
+ if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
+ remote_adv |= LPA_1000XPAUSE_ASYM;
+
+ tg3_setup_flow_control(tp, local_adv, remote_adv);
+ current_link_up = 1;
+ tp->serdes_counter = 0;
+ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+ } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
+ if (tp->serdes_counter)
+ tp->serdes_counter--;
+ else {
+ if (workaround) {
+ u32 val = serdes_cfg;
+
+ if (port_a)
+ val |= 0xc010000;
+ else
+ val |= 0x4010000;
+
+ tw32_f(MAC_SERDES_CFG, val);
+ }
+
+ tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
+ udelay(40);
+
+ /* Link parallel detection - link is up */
+ /* only if we have PCS_SYNC and not */
+ /* receiving config code words */
+ mac_status = tr32(MAC_STATUS);
+ if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
+ !(mac_status & MAC_STATUS_RCVD_CFG)) {
+ tg3_setup_flow_control(tp, 0, 0);
+ current_link_up = 1;
+ tp->phy_flags |=
+ TG3_PHYFLG_PARALLEL_DETECT;
+ tp->serdes_counter =
+ SERDES_PARALLEL_DET_TIMEOUT;
+ } else
+ goto restart_autoneg;
+ }
+ }
+ } else {
+ tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
+ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+ }
+
+out:
+ return current_link_up;
+}
+
+static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
+{
+ int current_link_up = 0;
+
+ if (!(mac_status & MAC_STATUS_PCS_SYNCED))
+ goto out;
+
+ if (tp->link_config.autoneg == AUTONEG_ENABLE) {
+ u32 txflags, rxflags;
+ int i;
+
+ if (fiber_autoneg(tp, &txflags, &rxflags)) {
+ u32 local_adv = 0, remote_adv = 0;
+
+ if (txflags & ANEG_CFG_PS1)
+ local_adv |= ADVERTISE_1000XPAUSE;
+ if (txflags & ANEG_CFG_PS2)
+ local_adv |= ADVERTISE_1000XPSE_ASYM;
+
+ if (rxflags & MR_LP_ADV_SYM_PAUSE)
+ remote_adv |= LPA_1000XPAUSE;
+ if (rxflags & MR_LP_ADV_ASYM_PAUSE)
+ remote_adv |= LPA_1000XPAUSE_ASYM;
+
+ tg3_setup_flow_control(tp, local_adv, remote_adv);
+
+ current_link_up = 1;
+ }
+ for (i = 0; i < 30; i++) {
+ udelay(20);
+ tw32_f(MAC_STATUS,
+ (MAC_STATUS_SYNC_CHANGED |
+ MAC_STATUS_CFG_CHANGED));
+ udelay(40);
+ if ((tr32(MAC_STATUS) &
+ (MAC_STATUS_SYNC_CHANGED |
+ MAC_STATUS_CFG_CHANGED)) == 0)
+ break;
+ }
+
+ mac_status = tr32(MAC_STATUS);
+ if (current_link_up == 0 &&
+ (mac_status & MAC_STATUS_PCS_SYNCED) &&
+ !(mac_status & MAC_STATUS_RCVD_CFG))
+ current_link_up = 1;
+ } else {
+ tg3_setup_flow_control(tp, 0, 0);
+
+ /* Forcing 1000FD link up. */
+ current_link_up = 1;
+
+ tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
+ udelay(40);
+
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+ }
+
+out:
+ return current_link_up;
+}
+
+static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
+{
+ u32 orig_pause_cfg;
+ u16 orig_active_speed;
+ u8 orig_active_duplex;
+ u32 mac_status;
+ int current_link_up;
+ int i;
+
+ orig_pause_cfg = tp->link_config.active_flowctrl;
+ orig_active_speed = tp->link_config.active_speed;
+ orig_active_duplex = tp->link_config.active_duplex;
+
+ if (!tg3_flag(tp, HW_AUTONEG) &&
+ netif_carrier_ok(tp->dev) &&
+ tg3_flag(tp, INIT_COMPLETE)) {
+ mac_status = tr32(MAC_STATUS);
+ mac_status &= (MAC_STATUS_PCS_SYNCED |
+ MAC_STATUS_SIGNAL_DET |
+ MAC_STATUS_CFG_CHANGED |
+ MAC_STATUS_RCVD_CFG);
+ if (mac_status == (MAC_STATUS_PCS_SYNCED |
+ MAC_STATUS_SIGNAL_DET)) {
+ tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
+ MAC_STATUS_CFG_CHANGED));
+ return 0;
+ }
+ }
+
+ tw32_f(MAC_TX_AUTO_NEG, 0);
+
+ tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
+ tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+
+ if (tp->phy_id == TG3_PHY_ID_BCM8002)
+ tg3_init_bcm8002(tp);
+
+ /* Enable link change event even when serdes polling. */
+ tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
+ udelay(40);
+
+ current_link_up = 0;
+ mac_status = tr32(MAC_STATUS);
+
+ if (tg3_flag(tp, HW_AUTONEG))
+ current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
+ else
+ current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
+
+ tp->napi[0].hw_status->status =
+ (SD_STATUS_UPDATED |
+ (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
+
+ for (i = 0; i < 100; i++) {
+ tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
+ MAC_STATUS_CFG_CHANGED));
+ udelay(5);
+ if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
+ MAC_STATUS_CFG_CHANGED |
+ MAC_STATUS_LNKSTATE_CHANGED)) == 0)
+ break;
+ }
+
+ mac_status = tr32(MAC_STATUS);
+ if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
+ current_link_up = 0;
+ if (tp->link_config.autoneg == AUTONEG_ENABLE &&
+ tp->serdes_counter == 0) {
+ tw32_f(MAC_MODE, (tp->mac_mode |
+ MAC_MODE_SEND_CONFIGS));
+ udelay(1);
+ tw32_f(MAC_MODE, tp->mac_mode);
+ }
+ }
+
+ if (current_link_up == 1) {
+ tp->link_config.active_speed = SPEED_1000;
+ tp->link_config.active_duplex = DUPLEX_FULL;
+ tw32(MAC_LED_CTRL, (tp->led_ctrl |
+ LED_CTRL_LNKLED_OVERRIDE |
+ LED_CTRL_1000MBPS_ON));
+ } else {
+ tp->link_config.active_speed = SPEED_INVALID;
+ tp->link_config.active_duplex = DUPLEX_INVALID;
+ tw32(MAC_LED_CTRL, (tp->led_ctrl |
+ LED_CTRL_LNKLED_OVERRIDE |
+ LED_CTRL_TRAFFIC_OVERRIDE));
+ }
+
+ if (current_link_up != netif_carrier_ok(tp->dev)) {
+ if (current_link_up)
+ netif_carrier_on(tp->dev);
+ else
+ netif_carrier_off(tp->dev);
+ tg3_link_report(tp);
+ } else {
+ u32 now_pause_cfg = tp->link_config.active_flowctrl;
+ if (orig_pause_cfg != now_pause_cfg ||
+ orig_active_speed != tp->link_config.active_speed ||
+ orig_active_duplex != tp->link_config.active_duplex)
+ tg3_link_report(tp);
+ }
+
+ return 0;
+}
+
+static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
+{
+ int current_link_up, err = 0;
+ u32 bmsr, bmcr;
+ u16 current_speed;
+ u8 current_duplex;
+ u32 local_adv, remote_adv;
+
+ tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+
+ tw32(MAC_EVENT, 0);
+
+ tw32_f(MAC_STATUS,
+ (MAC_STATUS_SYNC_CHANGED |
+ MAC_STATUS_CFG_CHANGED |
+ MAC_STATUS_MI_COMPLETION |
+ MAC_STATUS_LNKSTATE_CHANGED));
+ udelay(40);
+
+ if (force_reset)
+ tg3_phy_reset(tp);
+
+ current_link_up = 0;
+ current_speed = SPEED_INVALID;
+ current_duplex = DUPLEX_INVALID;
+
+ err |= tg3_readphy(tp, MII_BMSR, &bmsr);
+ err |= tg3_readphy(tp, MII_BMSR, &bmsr);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+ if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
+ bmsr |= BMSR_LSTATUS;
+ else
+ bmsr &= ~BMSR_LSTATUS;
+ }
+
+ err |= tg3_readphy(tp, MII_BMCR, &bmcr);
+
+ if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
+ (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
+ /* do nothing, just check for link up at the end */
+ } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
+ u32 adv, new_adv;
+
+ err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
+ new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
+ ADVERTISE_1000XPAUSE |
+ ADVERTISE_1000XPSE_ASYM |
+ ADVERTISE_SLCT);
+
+ new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
+
+ if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
+ new_adv |= ADVERTISE_1000XHALF;
+ if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
+ new_adv |= ADVERTISE_1000XFULL;
+
+ if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
+ tg3_writephy(tp, MII_ADVERTISE, new_adv);
+ bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
+ tg3_writephy(tp, MII_BMCR, bmcr);
+
+ tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
+ tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
+ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+
+ return err;
+ }
+ } else {
+ u32 new_bmcr;
+
+ bmcr &= ~BMCR_SPEED1000;
+ new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
+
+ if (tp->link_config.duplex == DUPLEX_FULL)
+ new_bmcr |= BMCR_FULLDPLX;
+
+ if (new_bmcr != bmcr) {
+ /* BMCR_SPEED1000 is a reserved bit that needs
+ * to be set on write.
+ */
+ new_bmcr |= BMCR_SPEED1000;
+
+ /* Force a linkdown */
+ if (netif_carrier_ok(tp->dev)) {
+ u32 adv;
+
+ err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
+ adv &= ~(ADVERTISE_1000XFULL |
+ ADVERTISE_1000XHALF |
+ ADVERTISE_SLCT);
+ tg3_writephy(tp, MII_ADVERTISE, adv);
+ tg3_writephy(tp, MII_BMCR, bmcr |
+ BMCR_ANRESTART |
+ BMCR_ANENABLE);
+ udelay(10);
+ netif_carrier_off(tp->dev);
+ }
+ tg3_writephy(tp, MII_BMCR, new_bmcr);
+ bmcr = new_bmcr;
+ err |= tg3_readphy(tp, MII_BMSR, &bmsr);
+ err |= tg3_readphy(tp, MII_BMSR, &bmsr);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
+ ASIC_REV_5714) {
+ if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
+ bmsr |= BMSR_LSTATUS;
+ else
+ bmsr &= ~BMSR_LSTATUS;
+ }
+ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+ }
+ }
+
+ if (bmsr & BMSR_LSTATUS) {
+ current_speed = SPEED_1000;
+ current_link_up = 1;
+ if (bmcr & BMCR_FULLDPLX)
+ current_duplex = DUPLEX_FULL;
+ else
+ current_duplex = DUPLEX_HALF;
+
+ local_adv = 0;
+ remote_adv = 0;
+
+ if (bmcr & BMCR_ANENABLE) {
+ u32 common;
+
+ err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
+ err |= tg3_readphy(tp, MII_LPA, &remote_adv);
+ common = local_adv & remote_adv;
+ if (common & (ADVERTISE_1000XHALF |
+ ADVERTISE_1000XFULL)) {
+ if (common & ADVERTISE_1000XFULL)
+ current_duplex = DUPLEX_FULL;
+ else
+ current_duplex = DUPLEX_HALF;
+ } else if (!tg3_flag(tp, 5780_CLASS)) {
+ /* Link is up via parallel detect */
+ } else {
+ current_link_up = 0;
+ }
+ }
+ }
+
+ if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
+ tg3_setup_flow_control(tp, local_adv, remote_adv);
+
+ tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
+ if (tp->link_config.active_duplex == DUPLEX_HALF)
+ tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
+
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+
+ tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
+
+ tp->link_config.active_speed = current_speed;
+ tp->link_config.active_duplex = current_duplex;
+
+ if (current_link_up != netif_carrier_ok(tp->dev)) {
+ if (current_link_up)
+ netif_carrier_on(tp->dev);
+ else {
+ netif_carrier_off(tp->dev);
+ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+ }
+ tg3_link_report(tp);
+ }
+ return err;
+}
+
+static void tg3_serdes_parallel_detect(struct tg3 *tp)
+{
+ if (tp->serdes_counter) {
+ /* Give autoneg time to complete. */
+ tp->serdes_counter--;
+ return;
+ }
+
+ if (!netif_carrier_ok(tp->dev) &&
+ (tp->link_config.autoneg == AUTONEG_ENABLE)) {
+ u32 bmcr;
+
+ tg3_readphy(tp, MII_BMCR, &bmcr);
+ if (bmcr & BMCR_ANENABLE) {
+ u32 phy1, phy2;
+
+ /* Select shadow register 0x1f */
+ tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
+ tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
+
+ /* Select expansion interrupt status register */
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
+ MII_TG3_DSP_EXP1_INT_STAT);
+ tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
+ tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
+
+ if ((phy1 & 0x10) && !(phy2 & 0x20)) {
+ /* We have signal detect and not receiving
+ * config code words, link is up by parallel
+ * detection.
+ */
+
+ bmcr &= ~BMCR_ANENABLE;
+ bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
+ tg3_writephy(tp, MII_BMCR, bmcr);
+ tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
+ }
+ }
+ } else if (netif_carrier_ok(tp->dev) &&
+ (tp->link_config.autoneg == AUTONEG_ENABLE) &&
+ (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
+ u32 phy2;
+
+ /* Select expansion interrupt status register */
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
+ MII_TG3_DSP_EXP1_INT_STAT);
+ tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
+ if (phy2 & 0x20) {
+ u32 bmcr;
+
+ /* Config code words received, turn on autoneg. */
+ tg3_readphy(tp, MII_BMCR, &bmcr);
+ tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
+
+ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+
+ }
+ }
+}
+
+static int tg3_setup_phy(struct tg3 *tp, int force_reset)
+{
+ u32 val;
+ int err;
+
+ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
+ err = tg3_setup_fiber_phy(tp, force_reset);
+ else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
+ err = tg3_setup_fiber_mii_phy(tp, force_reset);
+ else
+ err = tg3_setup_copper_phy(tp, force_reset);
+
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
+ u32 scale;
+
+ val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
+ if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
+ scale = 65;
+ else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
+ scale = 6;
+ else
+ scale = 12;
+
+ val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
+ val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
+ tw32(GRC_MISC_CFG, val);
+ }
+
+ val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
+ (6 << TX_LENGTHS_IPG_SHIFT);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ val |= tr32(MAC_TX_LENGTHS) &
+ (TX_LENGTHS_JMB_FRM_LEN_MSK |
+ TX_LENGTHS_CNT_DWN_VAL_MSK);
+
+ if (tp->link_config.active_speed == SPEED_1000 &&
+ tp->link_config.active_duplex == DUPLEX_HALF)
+ tw32(MAC_TX_LENGTHS, val |
+ (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
+ else
+ tw32(MAC_TX_LENGTHS, val |
+ (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
+
+ if (!tg3_flag(tp, 5705_PLUS)) {
+ if (netif_carrier_ok(tp->dev)) {
+ tw32(HOSTCC_STAT_COAL_TICKS,
+ tp->coal.stats_block_coalesce_usecs);
+ } else {
+ tw32(HOSTCC_STAT_COAL_TICKS, 0);
+ }
+ }
+
+ if (tg3_flag(tp, ASPM_WORKAROUND)) {
+ val = tr32(PCIE_PWR_MGMT_THRESH);
+ if (!netif_carrier_ok(tp->dev))
+ val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
+ tp->pwrmgmt_thresh;
+ else
+ val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
+ tw32(PCIE_PWR_MGMT_THRESH, val);
+ }
+
+ return err;
+}
+
+static inline int tg3_irq_sync(struct tg3 *tp)
+{
+ return tp->irq_sync;
+}
+
+static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
+{
+ int i;
+
+ dst = (u32 *)((u8 *)dst + off);
+ for (i = 0; i < len; i += sizeof(u32))
+ *dst++ = tr32(off + i);
+}
+
+static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
+{
+ tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
+ tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
+ tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
+ tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
+ tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
+ tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
+ tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
+ tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
+ tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
+ tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
+ tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
+ tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
+ tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
+ tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
+ tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
+ tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
+ tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
+ tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
+ tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
+
+ if (tg3_flag(tp, SUPPORT_MSIX))
+ tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
+
+ tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
+ tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
+ tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
+ tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
+ tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
+ tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
+ tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
+ tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
+
+ if (!tg3_flag(tp, 5705_PLUS)) {
+ tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
+ tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
+ tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
+ }
+
+ tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
+ tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
+ tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
+ tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
+ tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
+
+ if (tg3_flag(tp, NVRAM))
+ tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
+}
+
+static void tg3_dump_state(struct tg3 *tp)
+{
+ int i;
+ u32 *regs;
+
+ regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
+ if (!regs) {
+ netdev_err(tp->dev, "Failed allocating register dump buffer\n");
+ return;
+ }
+
+ if (tg3_flag(tp, PCI_EXPRESS)) {
+ /* Read up to but not including private PCI registers */
+ for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
+ regs[i / sizeof(u32)] = tr32(i);
+ } else
+ tg3_dump_legacy_regs(tp, regs);
+
+ for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
+ if (!regs[i + 0] && !regs[i + 1] &&
+ !regs[i + 2] && !regs[i + 3])
+ continue;
+
+ netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
+ i * 4,
+ regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
+ }
+
+ kfree(regs);
+
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+
+ /* SW status block */
+ netdev_err(tp->dev,
+ "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
+ i,
+ tnapi->hw_status->status,
+ tnapi->hw_status->status_tag,
+ tnapi->hw_status->rx_jumbo_consumer,
+ tnapi->hw_status->rx_consumer,
+ tnapi->hw_status->rx_mini_consumer,
+ tnapi->hw_status->idx[0].rx_producer,
+ tnapi->hw_status->idx[0].tx_consumer);
+
+ netdev_err(tp->dev,
+ "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
+ i,
+ tnapi->last_tag, tnapi->last_irq_tag,
+ tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
+ tnapi->rx_rcb_ptr,
+ tnapi->prodring.rx_std_prod_idx,
+ tnapi->prodring.rx_std_cons_idx,
+ tnapi->prodring.rx_jmb_prod_idx,
+ tnapi->prodring.rx_jmb_cons_idx);
+ }
+}
+
+/* This is called whenever we suspect that the system chipset is re-
+ * ordering the sequence of MMIO to the tx send mailbox. The symptom
+ * is bogus tx completions. We try to recover by setting the
+ * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
+ * in the workqueue.
+ */
+static void tg3_tx_recover(struct tg3 *tp)
+{
+ BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
+ tp->write32_tx_mbox == tg3_write_indirect_mbox);
+
+ netdev_warn(tp->dev,
+ "The system may be re-ordering memory-mapped I/O "
+ "cycles to the network device, attempting to recover. "
+ "Please report the problem to the driver maintainer "
+ "and include system chipset information.\n");
+
+ spin_lock(&tp->lock);
+ tg3_flag_set(tp, TX_RECOVERY_PENDING);
+ spin_unlock(&tp->lock);
+}
+
+static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
+{
+ /* Tell compiler to fetch tx indices from memory. */
+ barrier();
+ return tnapi->tx_pending -
+ ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
+}
+
+/* Tigon3 never reports partial packet sends. So we do not
+ * need special logic to handle SKBs that have not had all
+ * of their frags sent yet, like SunGEM does.
+ */
+static void tg3_tx(struct tg3_napi *tnapi)
+{
+ struct tg3 *tp = tnapi->tp;
+ u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
+ u32 sw_idx = tnapi->tx_cons;
+ struct netdev_queue *txq;
+ int index = tnapi - tp->napi;
+
+ if (tg3_flag(tp, ENABLE_TSS))
+ index--;
+
+ txq = netdev_get_tx_queue(tp->dev, index);
+
+ while (sw_idx != hw_idx) {
+ struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
+ struct sk_buff *skb = ri->skb;
+ int i, tx_bug = 0;
+
+ if (unlikely(skb == NULL)) {
+ tg3_tx_recover(tp);
+ return;
+ }
+
+ pci_unmap_single(tp->pdev,
+ dma_unmap_addr(ri, mapping),
+ skb_headlen(skb),
+ PCI_DMA_TODEVICE);
+
+ ri->skb = NULL;
+
+ while (ri->fragmented) {
+ ri->fragmented = false;
+ sw_idx = NEXT_TX(sw_idx);
+ ri = &tnapi->tx_buffers[sw_idx];
+ }
+
+ sw_idx = NEXT_TX(sw_idx);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ ri = &tnapi->tx_buffers[sw_idx];
+ if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
+ tx_bug = 1;
+
+ pci_unmap_page(tp->pdev,
+ dma_unmap_addr(ri, mapping),
+ skb_shinfo(skb)->frags[i].size,
+ PCI_DMA_TODEVICE);
+
+ while (ri->fragmented) {
+ ri->fragmented = false;
+ sw_idx = NEXT_TX(sw_idx);
+ ri = &tnapi->tx_buffers[sw_idx];
+ }
+
+ sw_idx = NEXT_TX(sw_idx);
+ }
+
+ dev_kfree_skb(skb);
+
+ if (unlikely(tx_bug)) {
+ tg3_tx_recover(tp);
+ return;
+ }
+ }
+
+ tnapi->tx_cons = sw_idx;
+
+ /* Need to make the tx_cons update visible to tg3_start_xmit()
+ * before checking for netif_queue_stopped(). Without the
+ * memory barrier, there is a small possibility that tg3_start_xmit()
+ * will miss it and cause the queue to be stopped forever.
+ */
+ smp_mb();
+
+ if (unlikely(netif_tx_queue_stopped(txq) &&
+ (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
+ __netif_tx_lock(txq, smp_processor_id());
+ if (netif_tx_queue_stopped(txq) &&
+ (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
+ netif_tx_wake_queue(txq);
+ __netif_tx_unlock(txq);
+ }
+}
+
+static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
+{
+ if (!ri->skb)
+ return;
+
+ pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
+ map_sz, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_any(ri->skb);
+ ri->skb = NULL;
+}
+
+/* Returns size of skb allocated or < 0 on error.
+ *
+ * We only need to fill in the address because the other members
+ * of the RX descriptor are invariant, see tg3_init_rings.
+ *
+ * Note the purposeful assymetry of cpu vs. chip accesses. For
+ * posting buffers we only dirty the first cache line of the RX
+ * descriptor (containing the address). Whereas for the RX status
+ * buffers the cpu only reads the last cacheline of the RX descriptor
+ * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
+ */
+static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
+ u32 opaque_key, u32 dest_idx_unmasked)
+{
+ struct tg3_rx_buffer_desc *desc;
+ struct ring_info *map;
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+ int skb_size, dest_idx;
+
+ switch (opaque_key) {
+ case RXD_OPAQUE_RING_STD:
+ dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
+ desc = &tpr->rx_std[dest_idx];
+ map = &tpr->rx_std_buffers[dest_idx];
+ skb_size = tp->rx_pkt_map_sz;
+ break;
+
+ case RXD_OPAQUE_RING_JUMBO:
+ dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
+ desc = &tpr->rx_jmb[dest_idx].std;
+ map = &tpr->rx_jmb_buffers[dest_idx];
+ skb_size = TG3_RX_JMB_MAP_SZ;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Do not overwrite any of the map or rp information
+ * until we are sure we can commit to a new buffer.
+ *
+ * Callers depend upon this behavior and assume that
+ * we leave everything unchanged if we fail.
+ */
+ skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp));
+ if (skb == NULL)
+ return -ENOMEM;
+
+ skb_reserve(skb, TG3_RX_OFFSET(tp));
+
+ mapping = pci_map_single(tp->pdev, skb->data, skb_size,
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(tp->pdev, mapping)) {
+ dev_kfree_skb(skb);
+ return -EIO;
+ }
+
+ map->skb = skb;
+ dma_unmap_addr_set(map, mapping, mapping);
+
+ desc->addr_hi = ((u64)mapping >> 32);
+ desc->addr_lo = ((u64)mapping & 0xffffffff);
+
+ return skb_size;
+}
+
+/* We only need to move over in the address because the other
+ * members of the RX descriptor are invariant. See notes above
+ * tg3_alloc_rx_skb for full details.
+ */
+static void tg3_recycle_rx(struct tg3_napi *tnapi,
+ struct tg3_rx_prodring_set *dpr,
+ u32 opaque_key, int src_idx,
+ u32 dest_idx_unmasked)
+{
+ struct tg3 *tp = tnapi->tp;
+ struct tg3_rx_buffer_desc *src_desc, *dest_desc;
+ struct ring_info *src_map, *dest_map;
+ struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
+ int dest_idx;
+
+ switch (opaque_key) {
+ case RXD_OPAQUE_RING_STD:
+ dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
+ dest_desc = &dpr->rx_std[dest_idx];
+ dest_map = &dpr->rx_std_buffers[dest_idx];
+ src_desc = &spr->rx_std[src_idx];
+ src_map = &spr->rx_std_buffers[src_idx];
+ break;
+
+ case RXD_OPAQUE_RING_JUMBO:
+ dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
+ dest_desc = &dpr->rx_jmb[dest_idx].std;
+ dest_map = &dpr->rx_jmb_buffers[dest_idx];
+ src_desc = &spr->rx_jmb[src_idx].std;
+ src_map = &spr->rx_jmb_buffers[src_idx];
+ break;
+
+ default:
+ return;
+ }
+
+ dest_map->skb = src_map->skb;
+ dma_unmap_addr_set(dest_map, mapping,
+ dma_unmap_addr(src_map, mapping));
+ dest_desc->addr_hi = src_desc->addr_hi;
+ dest_desc->addr_lo = src_desc->addr_lo;
+
+ /* Ensure that the update to the skb happens after the physical
+ * addresses have been transferred to the new BD location.
+ */
+ smp_wmb();
+
+ src_map->skb = NULL;
+}
+
+/* The RX ring scheme is composed of multiple rings which post fresh
+ * buffers to the chip, and one special ring the chip uses to report
+ * status back to the host.
+ *
+ * The special ring reports the status of received packets to the
+ * host. The chip does not write into the original descriptor the
+ * RX buffer was obtained from. The chip simply takes the original
+ * descriptor as provided by the host, updates the status and length
+ * field, then writes this into the next status ring entry.
+ *
+ * Each ring the host uses to post buffers to the chip is described
+ * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
+ * it is first placed into the on-chip ram. When the packet's length
+ * is known, it walks down the TG3_BDINFO entries to select the ring.
+ * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
+ * which is within the range of the new packet's length is chosen.
+ *
+ * The "separate ring for rx status" scheme may sound queer, but it makes
+ * sense from a cache coherency perspective. If only the host writes
+ * to the buffer post rings, and only the chip writes to the rx status
+ * rings, then cache lines never move beyond shared-modified state.
+ * If both the host and chip were to write into the same ring, cache line
+ * eviction could occur since both entities want it in an exclusive state.
+ */
+static int tg3_rx(struct tg3_napi *tnapi, int budget)
+{
+ struct tg3 *tp = tnapi->tp;
+ u32 work_mask, rx_std_posted = 0;
+ u32 std_prod_idx, jmb_prod_idx;
+ u32 sw_idx = tnapi->rx_rcb_ptr;
+ u16 hw_idx;
+ int received;
+ struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
+
+ hw_idx = *(tnapi->rx_rcb_prod_idx);
+ /*
+ * We need to order the read of hw_idx and the read of
+ * the opaque cookie.
+ */
+ rmb();
+ work_mask = 0;
+ received = 0;
+ std_prod_idx = tpr->rx_std_prod_idx;
+ jmb_prod_idx = tpr->rx_jmb_prod_idx;
+ while (sw_idx != hw_idx && budget > 0) {
+ struct ring_info *ri;
+ struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
+ unsigned int len;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ u32 opaque_key, desc_idx, *post_ptr;
+
+ desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
+ opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
+ if (opaque_key == RXD_OPAQUE_RING_STD) {
+ ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
+ dma_addr = dma_unmap_addr(ri, mapping);
+ skb = ri->skb;
+ post_ptr = &std_prod_idx;
+ rx_std_posted++;
+ } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
+ ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
+ dma_addr = dma_unmap_addr(ri, mapping);
+ skb = ri->skb;
+ post_ptr = &jmb_prod_idx;
+ } else
+ goto next_pkt_nopost;
+
+ work_mask |= opaque_key;
+
+ if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
+ (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
+ drop_it:
+ tg3_recycle_rx(tnapi, tpr, opaque_key,
+ desc_idx, *post_ptr);
+ drop_it_no_recycle:
+ /* Other statistics kept track of by card. */
+ tp->rx_dropped++;
+ goto next_pkt;
+ }
+
+ len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
+ ETH_FCS_LEN;
+
+ if (len > TG3_RX_COPY_THRESH(tp)) {
+ int skb_size;
+
+ skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
+ *post_ptr);
+ if (skb_size < 0)
+ goto drop_it;
+
+ pci_unmap_single(tp->pdev, dma_addr, skb_size,
+ PCI_DMA_FROMDEVICE);
+
+ /* Ensure that the update to the skb happens
+ * after the usage of the old DMA mapping.
+ */
+ smp_wmb();
+
+ ri->skb = NULL;
+
+ skb_put(skb, len);
+ } else {
+ struct sk_buff *copy_skb;
+
+ tg3_recycle_rx(tnapi, tpr, opaque_key,
+ desc_idx, *post_ptr);
+
+ copy_skb = netdev_alloc_skb(tp->dev, len +
+ TG3_RAW_IP_ALIGN);
+ if (copy_skb == NULL)
+ goto drop_it_no_recycle;
+
+ skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
+ skb_put(copy_skb, len);
+ pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
+ skb_copy_from_linear_data(skb, copy_skb->data, len);
+ pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
+
+ /* We'll reuse the original ring buffer. */
+ skb = copy_skb;
+ }
+
+ if ((tp->dev->features & NETIF_F_RXCSUM) &&
+ (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
+ (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
+ >> RXD_TCPCSUM_SHIFT) == 0xffff))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+
+ skb->protocol = eth_type_trans(skb, tp->dev);
+
+ if (len > (tp->dev->mtu + ETH_HLEN) &&
+ skb->protocol != htons(ETH_P_8021Q)) {
+ dev_kfree_skb(skb);
+ goto drop_it_no_recycle;
+ }
+
+ if (desc->type_flags & RXD_FLAG_VLAN &&
+ !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
+ __vlan_hwaccel_put_tag(skb,
+ desc->err_vlan & RXD_VLAN_MASK);
+
+ napi_gro_receive(&tnapi->napi, skb);
+
+ received++;
+ budget--;
+
+next_pkt:
+ (*post_ptr)++;
+
+ if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
+ tpr->rx_std_prod_idx = std_prod_idx &
+ tp->rx_std_ring_mask;
+ tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
+ tpr->rx_std_prod_idx);
+ work_mask &= ~RXD_OPAQUE_RING_STD;
+ rx_std_posted = 0;
+ }
+next_pkt_nopost:
+ sw_idx++;
+ sw_idx &= tp->rx_ret_ring_mask;
+
+ /* Refresh hw_idx to see if there is new work */
+ if (sw_idx == hw_idx) {
+ hw_idx = *(tnapi->rx_rcb_prod_idx);
+ rmb();
+ }
+ }
+
+ /* ACK the status ring. */
+ tnapi->rx_rcb_ptr = sw_idx;
+ tw32_rx_mbox(tnapi->consmbox, sw_idx);
+
+ /* Refill RX ring(s). */
+ if (!tg3_flag(tp, ENABLE_RSS)) {
+ if (work_mask & RXD_OPAQUE_RING_STD) {
+ tpr->rx_std_prod_idx = std_prod_idx &
+ tp->rx_std_ring_mask;
+ tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
+ tpr->rx_std_prod_idx);
+ }
+ if (work_mask & RXD_OPAQUE_RING_JUMBO) {
+ tpr->rx_jmb_prod_idx = jmb_prod_idx &
+ tp->rx_jmb_ring_mask;
+ tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
+ tpr->rx_jmb_prod_idx);
+ }
+ mmiowb();
+ } else if (work_mask) {
+ /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
+ * updated before the producer indices can be updated.
+ */
+ smp_wmb();
+
+ tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
+ tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
+
+ if (tnapi != &tp->napi[1])
+ napi_schedule(&tp->napi[1].napi);
+ }
+
+ return received;
+}
+
+static void tg3_poll_link(struct tg3 *tp)
+{
+ /* handle link change and other phy events */
+ if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
+ struct tg3_hw_status *sblk = tp->napi[0].hw_status;
+
+ if (sblk->status & SD_STATUS_LINK_CHG) {
+ sblk->status = SD_STATUS_UPDATED |
+ (sblk->status & ~SD_STATUS_LINK_CHG);
+ spin_lock(&tp->lock);
+ if (tg3_flag(tp, USE_PHYLIB)) {
+ tw32_f(MAC_STATUS,
+ (MAC_STATUS_SYNC_CHANGED |
+ MAC_STATUS_CFG_CHANGED |
+ MAC_STATUS_MI_COMPLETION |
+ MAC_STATUS_LNKSTATE_CHANGED));
+ udelay(40);
+ } else
+ tg3_setup_phy(tp, 0);
+ spin_unlock(&tp->lock);
+ }
+ }
+}
+
+static int tg3_rx_prodring_xfer(struct tg3 *tp,
+ struct tg3_rx_prodring_set *dpr,
+ struct tg3_rx_prodring_set *spr)
+{
+ u32 si, di, cpycnt, src_prod_idx;
+ int i, err = 0;
+
+ while (1) {
+ src_prod_idx = spr->rx_std_prod_idx;
+
+ /* Make sure updates to the rx_std_buffers[] entries and the
+ * standard producer index are seen in the correct order.
+ */
+ smp_rmb();
+
+ if (spr->rx_std_cons_idx == src_prod_idx)
+ break;
+
+ if (spr->rx_std_cons_idx < src_prod_idx)
+ cpycnt = src_prod_idx - spr->rx_std_cons_idx;
+ else
+ cpycnt = tp->rx_std_ring_mask + 1 -
+ spr->rx_std_cons_idx;
+
+ cpycnt = min(cpycnt,
+ tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
+
+ si = spr->rx_std_cons_idx;
+ di = dpr->rx_std_prod_idx;
+
+ for (i = di; i < di + cpycnt; i++) {
+ if (dpr->rx_std_buffers[i].skb) {
+ cpycnt = i - di;
+ err = -ENOSPC;
+ break;
+ }
+ }
+
+ if (!cpycnt)
+ break;
+
+ /* Ensure that updates to the rx_std_buffers ring and the
+ * shadowed hardware producer ring from tg3_recycle_skb() are
+ * ordered correctly WRT the skb check above.
+ */
+ smp_rmb();
+
+ memcpy(&dpr->rx_std_buffers[di],
+ &spr->rx_std_buffers[si],
+ cpycnt * sizeof(struct ring_info));
+
+ for (i = 0; i < cpycnt; i++, di++, si++) {
+ struct tg3_rx_buffer_desc *sbd, *dbd;
+ sbd = &spr->rx_std[si];
+ dbd = &dpr->rx_std[di];
+ dbd->addr_hi = sbd->addr_hi;
+ dbd->addr_lo = sbd->addr_lo;
+ }
+
+ spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
+ tp->rx_std_ring_mask;
+ dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
+ tp->rx_std_ring_mask;
+ }
+
+ while (1) {
+ src_prod_idx = spr->rx_jmb_prod_idx;
+
+ /* Make sure updates to the rx_jmb_buffers[] entries and
+ * the jumbo producer index are seen in the correct order.
+ */
+ smp_rmb();
+
+ if (spr->rx_jmb_cons_idx == src_prod_idx)
+ break;
+
+ if (spr->rx_jmb_cons_idx < src_prod_idx)
+ cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
+ else
+ cpycnt = tp->rx_jmb_ring_mask + 1 -
+ spr->rx_jmb_cons_idx;
+
+ cpycnt = min(cpycnt,
+ tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
+
+ si = spr->rx_jmb_cons_idx;
+ di = dpr->rx_jmb_prod_idx;
+
+ for (i = di; i < di + cpycnt; i++) {
+ if (dpr->rx_jmb_buffers[i].skb) {
+ cpycnt = i - di;
+ err = -ENOSPC;
+ break;
+ }
+ }
+
+ if (!cpycnt)
+ break;
+
+ /* Ensure that updates to the rx_jmb_buffers ring and the
+ * shadowed hardware producer ring from tg3_recycle_skb() are
+ * ordered correctly WRT the skb check above.
+ */
+ smp_rmb();
+
+ memcpy(&dpr->rx_jmb_buffers[di],
+ &spr->rx_jmb_buffers[si],
+ cpycnt * sizeof(struct ring_info));
+
+ for (i = 0; i < cpycnt; i++, di++, si++) {
+ struct tg3_rx_buffer_desc *sbd, *dbd;
+ sbd = &spr->rx_jmb[si].std;
+ dbd = &dpr->rx_jmb[di].std;
+ dbd->addr_hi = sbd->addr_hi;
+ dbd->addr_lo = sbd->addr_lo;
+ }
+
+ spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
+ tp->rx_jmb_ring_mask;
+ dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
+ tp->rx_jmb_ring_mask;
+ }
+
+ return err;
+}
+
+static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
+{
+ struct tg3 *tp = tnapi->tp;
+
+ /* run TX completion thread */
+ if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
+ tg3_tx(tnapi);
+ if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
+ return work_done;
+ }
+
+ /* run RX thread, within the bounds set by NAPI.
+ * All RX "locking" is done by ensuring outside
+ * code synchronizes with tg3->napi.poll()
+ */
+ if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
+ work_done += tg3_rx(tnapi, budget - work_done);
+
+ if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
+ struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
+ int i, err = 0;
+ u32 std_prod_idx = dpr->rx_std_prod_idx;
+ u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
+
+ for (i = 1; i < tp->irq_cnt; i++)
+ err |= tg3_rx_prodring_xfer(tp, dpr,
+ &tp->napi[i].prodring);
+
+ wmb();
+
+ if (std_prod_idx != dpr->rx_std_prod_idx)
+ tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
+ dpr->rx_std_prod_idx);
+
+ if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
+ tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
+ dpr->rx_jmb_prod_idx);
+
+ mmiowb();
+
+ if (err)
+ tw32_f(HOSTCC_MODE, tp->coal_now);
+ }
+
+ return work_done;
+}
+
+static int tg3_poll_msix(struct napi_struct *napi, int budget)
+{
+ struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
+ struct tg3 *tp = tnapi->tp;
+ int work_done = 0;
+ struct tg3_hw_status *sblk = tnapi->hw_status;
+
+ while (1) {
+ work_done = tg3_poll_work(tnapi, work_done, budget);
+
+ if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
+ goto tx_recovery;
+
+ if (unlikely(work_done >= budget))
+ break;
+
+ /* tp->last_tag is used in tg3_int_reenable() below
+ * to tell the hw how much work has been processed,
+ * so we must read it before checking for more work.
+ */
+ tnapi->last_tag = sblk->status_tag;
+ tnapi->last_irq_tag = tnapi->last_tag;
+ rmb();
+
+ /* check for RX/TX work to do */
+ if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
+ *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
+ napi_complete(napi);
+ /* Reenable interrupts. */
+ tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
+ mmiowb();
+ break;
+ }
+ }
+
+ return work_done;
+
+tx_recovery:
+ /* work_done is guaranteed to be less than budget. */
+ napi_complete(napi);
+ schedule_work(&tp->reset_task);
+ return work_done;
+}
+
+static void tg3_process_error(struct tg3 *tp)
+{
+ u32 val;
+ bool real_error = false;
+
+ if (tg3_flag(tp, ERROR_PROCESSED))
+ return;
+
+ /* Check Flow Attention register */
+ val = tr32(HOSTCC_FLOW_ATTN);
+ if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
+ netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
+ real_error = true;
+ }
+
+ if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
+ netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
+ real_error = true;
+ }
+
+ if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
+ netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
+ real_error = true;
+ }
+
+ if (!real_error)
+ return;
+
+ tg3_dump_state(tp);
+
+ tg3_flag_set(tp, ERROR_PROCESSED);
+ schedule_work(&tp->reset_task);
+}
+
+static int tg3_poll(struct napi_struct *napi, int budget)
+{
+ struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
+ struct tg3 *tp = tnapi->tp;
+ int work_done = 0;
+ struct tg3_hw_status *sblk = tnapi->hw_status;
+
+ while (1) {
+ if (sblk->status & SD_STATUS_ERROR)
+ tg3_process_error(tp);
+
+ tg3_poll_link(tp);
+
+ work_done = tg3_poll_work(tnapi, work_done, budget);
+
+ if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
+ goto tx_recovery;
+
+ if (unlikely(work_done >= budget))
+ break;
+
+ if (tg3_flag(tp, TAGGED_STATUS)) {
+ /* tp->last_tag is used in tg3_int_reenable() below
+ * to tell the hw how much work has been processed,
+ * so we must read it before checking for more work.
+ */
+ tnapi->last_tag = sblk->status_tag;
+ tnapi->last_irq_tag = tnapi->last_tag;
+ rmb();
+ } else
+ sblk->status &= ~SD_STATUS_UPDATED;
+
+ if (likely(!tg3_has_work(tnapi))) {
+ napi_complete(napi);
+ tg3_int_reenable(tnapi);
+ break;
+ }
+ }
+
+ return work_done;
+
+tx_recovery:
+ /* work_done is guaranteed to be less than budget. */
+ napi_complete(napi);
+ schedule_work(&tp->reset_task);
+ return work_done;
+}
+
+static void tg3_napi_disable(struct tg3 *tp)
+{
+ int i;
+
+ for (i = tp->irq_cnt - 1; i >= 0; i--)
+ napi_disable(&tp->napi[i].napi);
+}
+
+static void tg3_napi_enable(struct tg3 *tp)
+{
+ int i;
+
+ for (i = 0; i < tp->irq_cnt; i++)
+ napi_enable(&tp->napi[i].napi);
+}
+
+static void tg3_napi_init(struct tg3 *tp)
+{
+ int i;
+
+ netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
+ for (i = 1; i < tp->irq_cnt; i++)
+ netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
+}
+
+static void tg3_napi_fini(struct tg3 *tp)
+{
+ int i;
+
+ for (i = 0; i < tp->irq_cnt; i++)
+ netif_napi_del(&tp->napi[i].napi);
+}
+
+static inline void tg3_netif_stop(struct tg3 *tp)
+{
+ tp->dev->trans_start = jiffies; /* prevent tx timeout */
+ tg3_napi_disable(tp);
+ netif_tx_disable(tp->dev);
+}
+
+static inline void tg3_netif_start(struct tg3 *tp)
+{
+ /* NOTE: unconditional netif_tx_wake_all_queues is only
+ * appropriate so long as all callers are assured to
+ * have free tx slots (such as after tg3_init_hw)
+ */
+ netif_tx_wake_all_queues(tp->dev);
+
+ tg3_napi_enable(tp);
+ tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
+ tg3_enable_ints(tp);
+}
+
+static void tg3_irq_quiesce(struct tg3 *tp)
+{
+ int i;
+
+ BUG_ON(tp->irq_sync);
+
+ tp->irq_sync = 1;
+ smp_mb();
+
+ for (i = 0; i < tp->irq_cnt; i++)
+ synchronize_irq(tp->napi[i].irq_vec);
+}
+
+/* Fully shutdown all tg3 driver activity elsewhere in the system.
+ * If irq_sync is non-zero, then the IRQ handler must be synchronized
+ * with as well. Most of the time, this is not necessary except when
+ * shutting down the device.
+ */
+static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
+{
+ spin_lock_bh(&tp->lock);
+ if (irq_sync)
+ tg3_irq_quiesce(tp);
+}
+
+static inline void tg3_full_unlock(struct tg3 *tp)
+{
+ spin_unlock_bh(&tp->lock);
+}
+
+/* One-shot MSI handler - Chip automatically disables interrupt
+ * after sending MSI so driver doesn't have to do it.
+ */
+static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
+{
+ struct tg3_napi *tnapi = dev_id;
+ struct tg3 *tp = tnapi->tp;
+
+ prefetch(tnapi->hw_status);
+ if (tnapi->rx_rcb)
+ prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
+
+ if (likely(!tg3_irq_sync(tp)))
+ napi_schedule(&tnapi->napi);
+
+ return IRQ_HANDLED;
+}
+
+/* MSI ISR - No need to check for interrupt sharing and no need to
+ * flush status block and interrupt mailbox. PCI ordering rules
+ * guarantee that MSI will arrive after the status block.
+ */
+static irqreturn_t tg3_msi(int irq, void *dev_id)
+{
+ struct tg3_napi *tnapi = dev_id;
+ struct tg3 *tp = tnapi->tp;
+
+ prefetch(tnapi->hw_status);
+ if (tnapi->rx_rcb)
+ prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
+ /*
+ * Writing any value to intr-mbox-0 clears PCI INTA# and
+ * chip-internal interrupt pending events.
+ * Writing non-zero to intr-mbox-0 additional tells the
+ * NIC to stop sending us irqs, engaging "in-intr-handler"
+ * event coalescing.
+ */
+ tw32_mailbox(tnapi->int_mbox, 0x00000001);
+ if (likely(!tg3_irq_sync(tp)))
+ napi_schedule(&tnapi->napi);
+
+ return IRQ_RETVAL(1);
+}
+
+static irqreturn_t tg3_interrupt(int irq, void *dev_id)
+{
+ struct tg3_napi *tnapi = dev_id;
+ struct tg3 *tp = tnapi->tp;
+ struct tg3_hw_status *sblk = tnapi->hw_status;
+ unsigned int handled = 1;
+
+ /* In INTx mode, it is possible for the interrupt to arrive at
+ * the CPU before the status block posted prior to the interrupt.
+ * Reading the PCI State register will confirm whether the
+ * interrupt is ours and will flush the status block.
+ */
+ if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
+ if (tg3_flag(tp, CHIP_RESETTING) ||
+ (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
+ handled = 0;
+ goto out;
+ }
+ }
+
+ /*
+ * Writing any value to intr-mbox-0 clears PCI INTA# and
+ * chip-internal interrupt pending events.
+ * Writing non-zero to intr-mbox-0 additional tells the
+ * NIC to stop sending us irqs, engaging "in-intr-handler"
+ * event coalescing.
+ *
+ * Flush the mailbox to de-assert the IRQ immediately to prevent
+ * spurious interrupts. The flush impacts performance but
+ * excessive spurious interrupts can be worse in some cases.
+ */
+ tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
+ if (tg3_irq_sync(tp))
+ goto out;
+ sblk->status &= ~SD_STATUS_UPDATED;
+ if (likely(tg3_has_work(tnapi))) {
+ prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
+ napi_schedule(&tnapi->napi);
+ } else {
+ /* No work, shared interrupt perhaps? re-enable
+ * interrupts, and flush that PCI write
+ */
+ tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
+ 0x00000000);
+ }
+out:
+ return IRQ_RETVAL(handled);
+}
+
+static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
+{
+ struct tg3_napi *tnapi = dev_id;
+ struct tg3 *tp = tnapi->tp;
+ struct tg3_hw_status *sblk = tnapi->hw_status;
+ unsigned int handled = 1;
+
+ /* In INTx mode, it is possible for the interrupt to arrive at
+ * the CPU before the status block posted prior to the interrupt.
+ * Reading the PCI State register will confirm whether the
+ * interrupt is ours and will flush the status block.
+ */
+ if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
+ if (tg3_flag(tp, CHIP_RESETTING) ||
+ (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
+ handled = 0;
+ goto out;
+ }
+ }
+
+ /*
+ * writing any value to intr-mbox-0 clears PCI INTA# and
+ * chip-internal interrupt pending events.
+ * writing non-zero to intr-mbox-0 additional tells the
+ * NIC to stop sending us irqs, engaging "in-intr-handler"
+ * event coalescing.
+ *
+ * Flush the mailbox to de-assert the IRQ immediately to prevent
+ * spurious interrupts. The flush impacts performance but
+ * excessive spurious interrupts can be worse in some cases.
+ */
+ tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
+
+ /*
+ * In a shared interrupt configuration, sometimes other devices'
+ * interrupts will scream. We record the current status tag here
+ * so that the above check can report that the screaming interrupts
+ * are unhandled. Eventually they will be silenced.
+ */
+ tnapi->last_irq_tag = sblk->status_tag;
+
+ if (tg3_irq_sync(tp))
+ goto out;
+
+ prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
+
+ napi_schedule(&tnapi->napi);
+
+out:
+ return IRQ_RETVAL(handled);
+}
+
+/* ISR for interrupt test */
+static irqreturn_t tg3_test_isr(int irq, void *dev_id)
+{
+ struct tg3_napi *tnapi = dev_id;
+ struct tg3 *tp = tnapi->tp;
+ struct tg3_hw_status *sblk = tnapi->hw_status;
+
+ if ((sblk->status & SD_STATUS_UPDATED) ||
+ !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
+ tg3_disable_ints(tp);
+ return IRQ_RETVAL(1);
+ }
+ return IRQ_RETVAL(0);
+}
+
+static int tg3_init_hw(struct tg3 *, int);
+static int tg3_halt(struct tg3 *, int, int);
+
+/* Restart hardware after configuration changes, self-test, etc.
+ * Invoked with tp->lock held.
+ */
+static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
+ __releases(tp->lock)
+ __acquires(tp->lock)
+{
+ int err;
+
+ err = tg3_init_hw(tp, reset_phy);
+ if (err) {
+ netdev_err(tp->dev,
+ "Failed to re-initialize device, aborting\n");
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+ tg3_full_unlock(tp);
+ del_timer_sync(&tp->timer);
+ tp->irq_sync = 0;
+ tg3_napi_enable(tp);
+ dev_close(tp->dev);
+ tg3_full_lock(tp, 0);
+ }
+ return err;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void tg3_poll_controller(struct net_device *dev)
+{
+ int i;
+ struct tg3 *tp = netdev_priv(dev);
+
+ for (i = 0; i < tp->irq_cnt; i++)
+ tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
+}
+#endif
+
+static void tg3_reset_task(struct work_struct *work)
+{
+ struct tg3 *tp = container_of(work, struct tg3, reset_task);
+ int err;
+ unsigned int restart_timer;
+
+ tg3_full_lock(tp, 0);
+
+ if (!netif_running(tp->dev)) {
+ tg3_full_unlock(tp);
+ return;
+ }
+
+ tg3_full_unlock(tp);
+
+ tg3_phy_stop(tp);
+
+ tg3_netif_stop(tp);
+
+ tg3_full_lock(tp, 1);
+
+ restart_timer = tg3_flag(tp, RESTART_TIMER);
+ tg3_flag_clear(tp, RESTART_TIMER);
+
+ if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
+ tp->write32_tx_mbox = tg3_write32_tx_mbox;
+ tp->write32_rx_mbox = tg3_write_flush_reg32;
+ tg3_flag_set(tp, MBOX_WRITE_REORDER);
+ tg3_flag_clear(tp, TX_RECOVERY_PENDING);
+ }
+
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
+ err = tg3_init_hw(tp, 1);
+ if (err)
+ goto out;
+
+ tg3_netif_start(tp);
+
+ if (restart_timer)
+ mod_timer(&tp->timer, jiffies + 1);
+
+out:
+ tg3_full_unlock(tp);
+
+ if (!err)
+ tg3_phy_start(tp);
+}
+
+static void tg3_tx_timeout(struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (netif_msg_tx_err(tp)) {
+ netdev_err(dev, "transmit timed out, resetting\n");
+ tg3_dump_state(tp);
+ }
+
+ schedule_work(&tp->reset_task);
+}
+
+/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
+static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
+{
+ u32 base = (u32) mapping & 0xffffffff;
+
+ return (base > 0xffffdcc0) && (base + len + 8 < base);
+}
+
+/* Test for DMA addresses > 40-bit */
+static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
+ int len)
+{
+#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
+ if (tg3_flag(tp, 40BIT_DMA_BUG))
+ return ((u64) mapping + len) > DMA_BIT_MASK(40);
+ return 0;
+#else
+ return 0;
+#endif
+}
+
+static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
+ dma_addr_t mapping, u32 len, u32 flags,
+ u32 mss, u32 vlan)
+{
+ txbd->addr_hi = ((u64) mapping >> 32);
+ txbd->addr_lo = ((u64) mapping & 0xffffffff);
+ txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
+ txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
+}
+
+static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
+ dma_addr_t map, u32 len, u32 flags,
+ u32 mss, u32 vlan)
+{
+ struct tg3 *tp = tnapi->tp;
+ bool hwbug = false;
+
+ if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
+ hwbug = 1;
+
+ if (tg3_4g_overflow_test(map, len))
+ hwbug = 1;
+
+ if (tg3_40bit_overflow_test(tp, map, len))
+ hwbug = 1;
+
+ if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
+ u32 tmp_flag = flags & ~TXD_FLAG_END;
+ while (len > TG3_TX_BD_DMA_MAX) {
+ u32 frag_len = TG3_TX_BD_DMA_MAX;
+ len -= TG3_TX_BD_DMA_MAX;
+
+ if (len) {
+ tnapi->tx_buffers[*entry].fragmented = true;
+ /* Avoid the 8byte DMA problem */
+ if (len <= 8) {
+ len += TG3_TX_BD_DMA_MAX / 2;
+ frag_len = TG3_TX_BD_DMA_MAX / 2;
+ }
+ } else
+ tmp_flag = flags;
+
+ if (*budget) {
+ tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
+ frag_len, tmp_flag, mss, vlan);
+ (*budget)--;
+ *entry = NEXT_TX(*entry);
+ } else {
+ hwbug = 1;
+ break;
+ }
+
+ map += frag_len;
+ }
+
+ if (len) {
+ if (*budget) {
+ tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
+ len, flags, mss, vlan);
+ (*budget)--;
+ *entry = NEXT_TX(*entry);
+ } else {
+ hwbug = 1;
+ }
+ }
+ } else {
+ tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
+ len, flags, mss, vlan);
+ *entry = NEXT_TX(*entry);
+ }
+
+ return hwbug;
+}
+
+static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
+{
+ int i;
+ struct sk_buff *skb;
+ struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
+
+ skb = txb->skb;
+ txb->skb = NULL;
+
+ pci_unmap_single(tnapi->tp->pdev,
+ dma_unmap_addr(txb, mapping),
+ skb_headlen(skb),
+ PCI_DMA_TODEVICE);
+
+ while (txb->fragmented) {
+ txb->fragmented = false;
+ entry = NEXT_TX(entry);
+ txb = &tnapi->tx_buffers[entry];
+ }
+
+ for (i = 0; i < last; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ entry = NEXT_TX(entry);
+ txb = &tnapi->tx_buffers[entry];
+
+ pci_unmap_page(tnapi->tp->pdev,
+ dma_unmap_addr(txb, mapping),
+ frag->size, PCI_DMA_TODEVICE);
+
+ while (txb->fragmented) {
+ txb->fragmented = false;
+ entry = NEXT_TX(entry);
+ txb = &tnapi->tx_buffers[entry];
+ }
+ }
+}
+
+/* Workaround 4GB and 40-bit hardware DMA bugs. */
+static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
+ struct sk_buff *skb,
+ u32 *entry, u32 *budget,
+ u32 base_flags, u32 mss, u32 vlan)
+{
+ struct tg3 *tp = tnapi->tp;
+ struct sk_buff *new_skb;
+ dma_addr_t new_addr = 0;
+ int ret = 0;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
+ new_skb = skb_copy(skb, GFP_ATOMIC);
+ else {
+ int more_headroom = 4 - ((unsigned long)skb->data & 3);
+
+ new_skb = skb_copy_expand(skb,
+ skb_headroom(skb) + more_headroom,
+ skb_tailroom(skb), GFP_ATOMIC);
+ }
+
+ if (!new_skb) {
+ ret = -1;
+ } else {
+ /* New SKB is guaranteed to be linear. */
+ new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
+ PCI_DMA_TODEVICE);
+ /* Make sure the mapping succeeded */
+ if (pci_dma_mapping_error(tp->pdev, new_addr)) {
+ dev_kfree_skb(new_skb);
+ ret = -1;
+ } else {
+ base_flags |= TXD_FLAG_END;
+
+ tnapi->tx_buffers[*entry].skb = new_skb;
+ dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
+ mapping, new_addr);
+
+ if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
+ new_skb->len, base_flags,
+ mss, vlan)) {
+ tg3_tx_skb_unmap(tnapi, *entry, 0);
+ dev_kfree_skb(new_skb);
+ ret = -1;
+ }
+ }
+ }
+
+ dev_kfree_skb(skb);
+
+ return ret;
+}
+
+static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
+
+/* Use GSO to workaround a rare TSO bug that may be triggered when the
+ * TSO header is greater than 80 bytes.
+ */
+static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
+{
+ struct sk_buff *segs, *nskb;
+ u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
+
+ /* Estimate the number of fragments in the worst case */
+ if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
+ netif_stop_queue(tp->dev);
+
+ /* netif_tx_stop_queue() must be done before checking
+ * checking tx index in tg3_tx_avail() below, because in
+ * tg3_tx(), we update tx index before checking for
+ * netif_tx_queue_stopped().
+ */
+ smp_mb();
+ if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
+ return NETDEV_TX_BUSY;
+
+ netif_wake_queue(tp->dev);
+ }
+
+ segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
+ if (IS_ERR(segs))
+ goto tg3_tso_bug_end;
+
+ do {
+ nskb = segs;
+ segs = segs->next;
+ nskb->next = NULL;
+ tg3_start_xmit(nskb, tp->dev);
+ } while (segs);
+
+tg3_tso_bug_end:
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
+ * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
+ */
+static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ u32 len, entry, base_flags, mss, vlan = 0;
+ u32 budget;
+ int i = -1, would_hit_hwbug;
+ dma_addr_t mapping;
+ struct tg3_napi *tnapi;
+ struct netdev_queue *txq;
+ unsigned int last;
+
+ txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
+ tnapi = &tp->napi[skb_get_queue_mapping(skb)];
+ if (tg3_flag(tp, ENABLE_TSS))
+ tnapi++;
+
+ budget = tg3_tx_avail(tnapi);
+
+ /* We are running in BH disabled context with netif_tx_lock
+ * and TX reclaim runs via tp->napi.poll inside of a software
+ * interrupt. Furthermore, IRQ processing runs lockless so we have
+ * no IRQ context deadlocks to worry about either. Rejoice!
+ */
+ if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
+ if (!netif_tx_queue_stopped(txq)) {
+ netif_tx_stop_queue(txq);
+
+ /* This is a hard error, log it. */
+ netdev_err(dev,
+ "BUG! Tx Ring full when queue awake!\n");
+ }
+ return NETDEV_TX_BUSY;
+ }
+
+ entry = tnapi->tx_prod;
+ base_flags = 0;
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ base_flags |= TXD_FLAG_TCPUDP_CSUM;
+
+ mss = skb_shinfo(skb)->gso_size;
+ if (mss) {
+ struct iphdr *iph;
+ u32 tcp_opt_len, hdr_len;
+
+ if (skb_header_cloned(skb) &&
+ pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
+ dev_kfree_skb(skb);
+ goto out_unlock;
+ }
+
+ iph = ip_hdr(skb);
+ tcp_opt_len = tcp_optlen(skb);
+
+ if (skb_is_gso_v6(skb)) {
+ hdr_len = skb_headlen(skb) - ETH_HLEN;
+ } else {
+ u32 ip_tcp_len;
+
+ ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
+ hdr_len = ip_tcp_len + tcp_opt_len;
+
+ iph->check = 0;
+ iph->tot_len = htons(mss + hdr_len);
+ }
+
+ if (unlikely((ETH_HLEN + hdr_len) > 80) &&
+ tg3_flag(tp, TSO_BUG))
+ return tg3_tso_bug(tp, skb);
+
+ base_flags |= (TXD_FLAG_CPU_PRE_DMA |
+ TXD_FLAG_CPU_POST_DMA);
+
+ if (tg3_flag(tp, HW_TSO_1) ||
+ tg3_flag(tp, HW_TSO_2) ||
+ tg3_flag(tp, HW_TSO_3)) {
+ tcp_hdr(skb)->check = 0;
+ base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
+ } else
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, 0,
+ IPPROTO_TCP,
+ 0);
+
+ if (tg3_flag(tp, HW_TSO_3)) {
+ mss |= (hdr_len & 0xc) << 12;
+ if (hdr_len & 0x10)
+ base_flags |= 0x00000010;
+ base_flags |= (hdr_len & 0x3e0) << 5;
+ } else if (tg3_flag(tp, HW_TSO_2))
+ mss |= hdr_len << 9;
+ else if (tg3_flag(tp, HW_TSO_1) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ if (tcp_opt_len || iph->ihl > 5) {
+ int tsflags;
+
+ tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
+ mss |= (tsflags << 11);
+ }
+ } else {
+ if (tcp_opt_len || iph->ihl > 5) {
+ int tsflags;
+
+ tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
+ base_flags |= tsflags << 12;
+ }
+ }
+ }
+
+ if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
+ !mss && skb->len > VLAN_ETH_FRAME_LEN)
+ base_flags |= TXD_FLAG_JMB_PKT;
+
- #endif
+ if (vlan_tx_tag_present(skb)) {
+ base_flags |= TXD_FLAG_VLAN;
+ vlan = vlan_tx_tag_get(skb);
+ }
+
+ len = skb_headlen(skb);
+
+ mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(tp->pdev, mapping)) {
+ dev_kfree_skb(skb);
+ goto out_unlock;
+ }
+
+ tnapi->tx_buffers[entry].skb = skb;
+ dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
+
+ would_hit_hwbug = 0;
+
+ if (tg3_flag(tp, 5701_DMA_BUG))
+ would_hit_hwbug = 1;
+
+ if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
+ ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
+ mss, vlan))
+ would_hit_hwbug = 1;
+
+ /* Now loop through additional data fragments, and queue them. */
+ if (skb_shinfo(skb)->nr_frags > 0) {
+ u32 tmp_mss = mss;
+
+ if (!tg3_flag(tp, HW_TSO_1) &&
+ !tg3_flag(tp, HW_TSO_2) &&
+ !tg3_flag(tp, HW_TSO_3))
+ tmp_mss = 0;
+
+ last = skb_shinfo(skb)->nr_frags - 1;
+ for (i = 0; i <= last; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ len = frag->size;
+ mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
+ len, PCI_DMA_TODEVICE);
+
+ tnapi->tx_buffers[entry].skb = NULL;
+ dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
+ mapping);
+ if (pci_dma_mapping_error(tp->pdev, mapping))
+ goto dma_error;
+
+ if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
+ len, base_flags |
+ ((i == last) ? TXD_FLAG_END : 0),
+ tmp_mss, vlan))
+ would_hit_hwbug = 1;
+ }
+ }
+
+ if (would_hit_hwbug) {
+ tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
+
+ /* If the workaround fails due to memory/mapping
+ * failure, silently drop this packet.
+ */
+ entry = tnapi->tx_prod;
+ budget = tg3_tx_avail(tnapi);
+ if (tigon3_dma_hwbug_workaround(tnapi, skb, &entry, &budget,
+ base_flags, mss, vlan))
+ goto out_unlock;
+ }
+
+ skb_tx_timestamp(skb);
+
+ /* Packets are ready, update Tx producer idx local and on card. */
+ tw32_tx_mbox(tnapi->prodmbox, entry);
+
+ tnapi->tx_prod = entry;
+ if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
+ netif_tx_stop_queue(txq);
+
+ /* netif_tx_stop_queue() must be done before checking
+ * checking tx index in tg3_tx_avail() below, because in
+ * tg3_tx(), we update tx index before checking for
+ * netif_tx_queue_stopped().
+ */
+ smp_mb();
+ if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
+ netif_tx_wake_queue(txq);
+ }
+
+out_unlock:
+ mmiowb();
+
+ return NETDEV_TX_OK;
+
+dma_error:
+ tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
+ dev_kfree_skb(skb);
+ tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
+ return NETDEV_TX_OK;
+}
+
+static void tg3_mac_loopback(struct tg3 *tp, bool enable)
+{
+ if (enable) {
+ tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
+ MAC_MODE_PORT_MODE_MASK);
+
+ tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
+
+ if (!tg3_flag(tp, 5705_PLUS))
+ tp->mac_mode |= MAC_MODE_LINK_POLARITY;
+
+ if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
+ tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
+ else
+ tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+ } else {
+ tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
+
+ if (tg3_flag(tp, 5705_PLUS) ||
+ (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
+ tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
+ }
+
+ tw32(MAC_MODE, tp->mac_mode);
+ udelay(40);
+}
+
+static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
+{
+ u32 val, bmcr, mac_mode, ptest = 0;
+
+ tg3_phy_toggle_apd(tp, false);
+ tg3_phy_toggle_automdix(tp, 0);
+
+ if (extlpbk && tg3_phy_set_extloopbk(tp))
+ return -EIO;
+
+ bmcr = BMCR_FULLDPLX;
+ switch (speed) {
+ case SPEED_10:
+ break;
+ case SPEED_100:
+ bmcr |= BMCR_SPEED100;
+ break;
+ case SPEED_1000:
+ default:
+ if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
+ speed = SPEED_100;
+ bmcr |= BMCR_SPEED100;
+ } else {
+ speed = SPEED_1000;
+ bmcr |= BMCR_SPEED1000;
+ }
+ }
+
+ if (extlpbk) {
+ if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
+ tg3_readphy(tp, MII_CTRL1000, &val);
+ val |= CTL1000_AS_MASTER |
+ CTL1000_ENABLE_MASTER;
+ tg3_writephy(tp, MII_CTRL1000, val);
+ } else {
+ ptest = MII_TG3_FET_PTEST_TRIM_SEL |
+ MII_TG3_FET_PTEST_TRIM_2;
+ tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
+ }
+ } else
+ bmcr |= BMCR_LOOPBACK;
+
+ tg3_writephy(tp, MII_BMCR, bmcr);
+
+ /* The write needs to be flushed for the FETs */
+ if (tp->phy_flags & TG3_PHYFLG_IS_FET)
+ tg3_readphy(tp, MII_BMCR, &bmcr);
+
+ udelay(40);
+
+ if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
+ tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
+ MII_TG3_FET_PTEST_FRC_TX_LINK |
+ MII_TG3_FET_PTEST_FRC_TX_LOCK);
+
+ /* The write needs to be flushed for the AC131 */
+ tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
+ }
+
+ /* Reset to prevent losing 1st rx packet intermittently */
+ if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
+ tg3_flag(tp, 5780_CLASS)) {
+ tw32_f(MAC_RX_MODE, RX_MODE_RESET);
+ udelay(10);
+ tw32_f(MAC_RX_MODE, tp->rx_mode);
+ }
+
+ mac_mode = tp->mac_mode &
+ ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
+ if (speed == SPEED_1000)
+ mac_mode |= MAC_MODE_PORT_MODE_GMII;
+ else
+ mac_mode |= MAC_MODE_PORT_MODE_MII;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
+ u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
+
+ if (masked_phy_id == TG3_PHY_ID_BCM5401)
+ mac_mode &= ~MAC_MODE_LINK_POLARITY;
+ else if (masked_phy_id == TG3_PHY_ID_BCM5411)
+ mac_mode |= MAC_MODE_LINK_POLARITY;
+
+ tg3_writephy(tp, MII_TG3_EXT_CTRL,
+ MII_TG3_EXT_CTRL_LNK3_LED_MODE);
+ }
+
+ tw32(MAC_MODE, mac_mode);
+ udelay(40);
+
+ return 0;
+}
+
+static void tg3_set_loopback(struct net_device *dev, u32 features)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (features & NETIF_F_LOOPBACK) {
+ if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
+ return;
+
+ spin_lock_bh(&tp->lock);
+ tg3_mac_loopback(tp, true);
+ netif_carrier_on(tp->dev);
+ spin_unlock_bh(&tp->lock);
+ netdev_info(dev, "Internal MAC loopback mode enabled.\n");
+ } else {
+ if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
+ return;
+
+ spin_lock_bh(&tp->lock);
+ tg3_mac_loopback(tp, false);
+ /* Force link status check */
+ tg3_setup_phy(tp, 1);
+ spin_unlock_bh(&tp->lock);
+ netdev_info(dev, "Internal MAC loopback mode disabled.\n");
+ }
+}
+
+static u32 tg3_fix_features(struct net_device *dev, u32 features)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
+ features &= ~NETIF_F_ALL_TSO;
+
+ return features;
+}
+
+static int tg3_set_features(struct net_device *dev, u32 features)
+{
+ u32 changed = dev->features ^ features;
+
+ if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
+ tg3_set_loopback(dev, features);
+
+ return 0;
+}
+
+static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
+ int new_mtu)
+{
+ dev->mtu = new_mtu;
+
+ if (new_mtu > ETH_DATA_LEN) {
+ if (tg3_flag(tp, 5780_CLASS)) {
+ netdev_update_features(dev);
+ tg3_flag_clear(tp, TSO_CAPABLE);
+ } else {
+ tg3_flag_set(tp, JUMBO_RING_ENABLE);
+ }
+ } else {
+ if (tg3_flag(tp, 5780_CLASS)) {
+ tg3_flag_set(tp, TSO_CAPABLE);
+ netdev_update_features(dev);
+ }
+ tg3_flag_clear(tp, JUMBO_RING_ENABLE);
+ }
+}
+
+static int tg3_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ int err;
+
+ if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
+ return -EINVAL;
+
+ if (!netif_running(dev)) {
+ /* We'll just catch it later when the
+ * device is up'd.
+ */
+ tg3_set_mtu(dev, tp, new_mtu);
+ return 0;
+ }
+
+ tg3_phy_stop(tp);
+
+ tg3_netif_stop(tp);
+
+ tg3_full_lock(tp, 1);
+
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+
+ tg3_set_mtu(dev, tp, new_mtu);
+
+ err = tg3_restart_hw(tp, 0);
+
+ if (!err)
+ tg3_netif_start(tp);
+
+ tg3_full_unlock(tp);
+
+ if (!err)
+ tg3_phy_start(tp);
+
+ return err;
+}
+
+static void tg3_rx_prodring_free(struct tg3 *tp,
+ struct tg3_rx_prodring_set *tpr)
+{
+ int i;
+
+ if (tpr != &tp->napi[0].prodring) {
+ for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
+ i = (i + 1) & tp->rx_std_ring_mask)
+ tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
+ tp->rx_pkt_map_sz);
+
+ if (tg3_flag(tp, JUMBO_CAPABLE)) {
+ for (i = tpr->rx_jmb_cons_idx;
+ i != tpr->rx_jmb_prod_idx;
+ i = (i + 1) & tp->rx_jmb_ring_mask) {
+ tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
+ TG3_RX_JMB_MAP_SZ);
+ }
+ }
+
+ return;
+ }
+
+ for (i = 0; i <= tp->rx_std_ring_mask; i++)
+ tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
+ tp->rx_pkt_map_sz);
+
+ if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
+ for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
+ tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
+ TG3_RX_JMB_MAP_SZ);
+ }
+}
+
+/* Initialize rx rings for packet processing.
+ *
+ * The chip has been shut down and the driver detached from
+ * the networking, so no interrupts or new tx packets will
+ * end up in the driver. tp->{tx,}lock are held and thus
+ * we may not sleep.
+ */
+static int tg3_rx_prodring_alloc(struct tg3 *tp,
+ struct tg3_rx_prodring_set *tpr)
+{
+ u32 i, rx_pkt_dma_sz;
+
+ tpr->rx_std_cons_idx = 0;
+ tpr->rx_std_prod_idx = 0;
+ tpr->rx_jmb_cons_idx = 0;
+ tpr->rx_jmb_prod_idx = 0;
+
+ if (tpr != &tp->napi[0].prodring) {
+ memset(&tpr->rx_std_buffers[0], 0,
+ TG3_RX_STD_BUFF_RING_SIZE(tp));
+ if (tpr->rx_jmb_buffers)
+ memset(&tpr->rx_jmb_buffers[0], 0,
+ TG3_RX_JMB_BUFF_RING_SIZE(tp));
+ goto done;
+ }
+
+ /* Zero out all descriptors. */
+ memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
+
+ rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
+ if (tg3_flag(tp, 5780_CLASS) &&
+ tp->dev->mtu > ETH_DATA_LEN)
+ rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
+ tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
+
+ /* Initialize invariants of the rings, we only set this
+ * stuff once. This works because the card does not
+ * write into the rx buffer posting rings.
+ */
+ for (i = 0; i <= tp->rx_std_ring_mask; i++) {
+ struct tg3_rx_buffer_desc *rxd;
+
+ rxd = &tpr->rx_std[i];
+ rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
+ rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
+ rxd->opaque = (RXD_OPAQUE_RING_STD |
+ (i << RXD_OPAQUE_INDEX_SHIFT));
+ }
+
+ /* Now allocate fresh SKBs for each rx ring. */
+ for (i = 0; i < tp->rx_pending; i++) {
+ if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
+ netdev_warn(tp->dev,
+ "Using a smaller RX standard ring. Only "
+ "%d out of %d buffers were allocated "
+ "successfully\n", i, tp->rx_pending);
+ if (i == 0)
+ goto initfail;
+ tp->rx_pending = i;
+ break;
+ }
+ }
+
+ if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
+ goto done;
+
+ memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
+
+ if (!tg3_flag(tp, JUMBO_RING_ENABLE))
+ goto done;
+
+ for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
+ struct tg3_rx_buffer_desc *rxd;
+
+ rxd = &tpr->rx_jmb[i].std;
+ rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
+ rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
+ RXD_FLAG_JUMBO;
+ rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
+ (i << RXD_OPAQUE_INDEX_SHIFT));
+ }
+
+ for (i = 0; i < tp->rx_jumbo_pending; i++) {
+ if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
+ netdev_warn(tp->dev,
+ "Using a smaller RX jumbo ring. Only %d "
+ "out of %d buffers were allocated "
+ "successfully\n", i, tp->rx_jumbo_pending);
+ if (i == 0)
+ goto initfail;
+ tp->rx_jumbo_pending = i;
+ break;
+ }
+ }
+
+done:
+ return 0;
+
+initfail:
+ tg3_rx_prodring_free(tp, tpr);
+ return -ENOMEM;
+}
+
+static void tg3_rx_prodring_fini(struct tg3 *tp,
+ struct tg3_rx_prodring_set *tpr)
+{
+ kfree(tpr->rx_std_buffers);
+ tpr->rx_std_buffers = NULL;
+ kfree(tpr->rx_jmb_buffers);
+ tpr->rx_jmb_buffers = NULL;
+ if (tpr->rx_std) {
+ dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
+ tpr->rx_std, tpr->rx_std_mapping);
+ tpr->rx_std = NULL;
+ }
+ if (tpr->rx_jmb) {
+ dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
+ tpr->rx_jmb, tpr->rx_jmb_mapping);
+ tpr->rx_jmb = NULL;
+ }
+}
+
+static int tg3_rx_prodring_init(struct tg3 *tp,
+ struct tg3_rx_prodring_set *tpr)
+{
+ tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
+ GFP_KERNEL);
+ if (!tpr->rx_std_buffers)
+ return -ENOMEM;
+
+ tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
+ TG3_RX_STD_RING_BYTES(tp),
+ &tpr->rx_std_mapping,
+ GFP_KERNEL);
+ if (!tpr->rx_std)
+ goto err_out;
+
+ if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
+ tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
+ GFP_KERNEL);
+ if (!tpr->rx_jmb_buffers)
+ goto err_out;
+
+ tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
+ TG3_RX_JMB_RING_BYTES(tp),
+ &tpr->rx_jmb_mapping,
+ GFP_KERNEL);
+ if (!tpr->rx_jmb)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ tg3_rx_prodring_fini(tp, tpr);
+ return -ENOMEM;
+}
+
+/* Free up pending packets in all rx/tx rings.
+ *
+ * The chip has been shut down and the driver detached from
+ * the networking, so no interrupts or new tx packets will
+ * end up in the driver. tp->{tx,}lock is not held and we are not
+ * in an interrupt context and thus may sleep.
+ */
+static void tg3_free_rings(struct tg3 *tp)
+{
+ int i, j;
+
+ for (j = 0; j < tp->irq_cnt; j++) {
+ struct tg3_napi *tnapi = &tp->napi[j];
+
+ tg3_rx_prodring_free(tp, &tnapi->prodring);
+
+ if (!tnapi->tx_buffers)
+ continue;
+
+ for (i = 0; i < TG3_TX_RING_SIZE; i++) {
+ struct sk_buff *skb = tnapi->tx_buffers[i].skb;
+
+ if (!skb)
+ continue;
+
+ tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags);
+
+ dev_kfree_skb_any(skb);
+ }
+ }
+}
+
+/* Initialize tx/rx rings for packet processing.
+ *
+ * The chip has been shut down and the driver detached from
+ * the networking, so no interrupts or new tx packets will
+ * end up in the driver. tp->{tx,}lock are held and thus
+ * we may not sleep.
+ */
+static int tg3_init_rings(struct tg3 *tp)
+{
+ int i;
+
+ /* Free up all the SKBs. */
+ tg3_free_rings(tp);
+
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+
+ tnapi->last_tag = 0;
+ tnapi->last_irq_tag = 0;
+ tnapi->hw_status->status = 0;
+ tnapi->hw_status->status_tag = 0;
+ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
+
+ tnapi->tx_prod = 0;
+ tnapi->tx_cons = 0;
+ if (tnapi->tx_ring)
+ memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
+
+ tnapi->rx_rcb_ptr = 0;
+ if (tnapi->rx_rcb)
+ memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
+
+ if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
+ tg3_free_rings(tp);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Must not be invoked with interrupt sources disabled and
+ * the hardware shutdown down.
+ */
+static void tg3_free_consistent(struct tg3 *tp)
+{
+ int i;
+
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+
+ if (tnapi->tx_ring) {
+ dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
+ tnapi->tx_ring, tnapi->tx_desc_mapping);
+ tnapi->tx_ring = NULL;
+ }
+
+ kfree(tnapi->tx_buffers);
+ tnapi->tx_buffers = NULL;
+
+ if (tnapi->rx_rcb) {
+ dma_free_coherent(&tp->pdev->dev,
+ TG3_RX_RCB_RING_BYTES(tp),
+ tnapi->rx_rcb,
+ tnapi->rx_rcb_mapping);
+ tnapi->rx_rcb = NULL;
+ }
+
+ tg3_rx_prodring_fini(tp, &tnapi->prodring);
+
+ if (tnapi->hw_status) {
+ dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
+ tnapi->hw_status,
+ tnapi->status_mapping);
+ tnapi->hw_status = NULL;
+ }
+ }
+
+ if (tp->hw_stats) {
+ dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
+ tp->hw_stats, tp->stats_mapping);
+ tp->hw_stats = NULL;
+ }
+}
+
+/*
+ * Must not be invoked with interrupt sources disabled and
+ * the hardware shutdown down. Can sleep.
+ */
+static int tg3_alloc_consistent(struct tg3 *tp)
+{
+ int i;
+
+ tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
+ sizeof(struct tg3_hw_stats),
+ &tp->stats_mapping,
+ GFP_KERNEL);
+ if (!tp->hw_stats)
+ goto err_out;
+
+ memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
+
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+ struct tg3_hw_status *sblk;
+
+ tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
+ TG3_HW_STATUS_SIZE,
+ &tnapi->status_mapping,
+ GFP_KERNEL);
+ if (!tnapi->hw_status)
+ goto err_out;
+
+ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
+ sblk = tnapi->hw_status;
+
+ if (tg3_rx_prodring_init(tp, &tnapi->prodring))
+ goto err_out;
+
+ /* If multivector TSS is enabled, vector 0 does not handle
+ * tx interrupts. Don't allocate any resources for it.
+ */
+ if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
+ (i && tg3_flag(tp, ENABLE_TSS))) {
+ tnapi->tx_buffers = kzalloc(
+ sizeof(struct tg3_tx_ring_info) *
+ TG3_TX_RING_SIZE, GFP_KERNEL);
+ if (!tnapi->tx_buffers)
+ goto err_out;
+
+ tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
+ TG3_TX_RING_BYTES,
+ &tnapi->tx_desc_mapping,
+ GFP_KERNEL);
+ if (!tnapi->tx_ring)
+ goto err_out;
+ }
+
+ /*
+ * When RSS is enabled, the status block format changes
+ * slightly. The "rx_jumbo_consumer", "reserved",
+ * and "rx_mini_consumer" members get mapped to the
+ * other three rx return ring producer indexes.
+ */
+ switch (i) {
+ default:
+ tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
+ break;
+ case 2:
+ tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
+ break;
+ case 3:
+ tnapi->rx_rcb_prod_idx = &sblk->reserved;
+ break;
+ case 4:
+ tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
+ break;
+ }
+
+ /*
+ * If multivector RSS is enabled, vector 0 does not handle
+ * rx or tx interrupts. Don't allocate any resources for it.
+ */
+ if (!i && tg3_flag(tp, ENABLE_RSS))
+ continue;
+
+ tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
+ TG3_RX_RCB_RING_BYTES(tp),
+ &tnapi->rx_rcb_mapping,
+ GFP_KERNEL);
+ if (!tnapi->rx_rcb)
+ goto err_out;
+
+ memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
+ }
+
+ return 0;
+
+err_out:
+ tg3_free_consistent(tp);
+ return -ENOMEM;
+}
+
+#define MAX_WAIT_CNT 1000
+
+/* To stop a block, clear the enable bit and poll till it
+ * clears. tp->lock is held.
+ */
+static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
+{
+ unsigned int i;
+ u32 val;
+
+ if (tg3_flag(tp, 5705_PLUS)) {
+ switch (ofs) {
+ case RCVLSC_MODE:
+ case DMAC_MODE:
+ case MBFREE_MODE:
+ case BUFMGR_MODE:
+ case MEMARB_MODE:
+ /* We can't enable/disable these bits of the
+ * 5705/5750, just say success.
+ */
+ return 0;
+
+ default:
+ break;
+ }
+ }
+
+ val = tr32(ofs);
+ val &= ~enable_bit;
+ tw32_f(ofs, val);
+
+ for (i = 0; i < MAX_WAIT_CNT; i++) {
+ udelay(100);
+ val = tr32(ofs);
+ if ((val & enable_bit) == 0)
+ break;
+ }
+
+ if (i == MAX_WAIT_CNT && !silent) {
+ dev_err(&tp->pdev->dev,
+ "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
+ ofs, enable_bit);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/* tp->lock is held. */
+static int tg3_abort_hw(struct tg3 *tp, int silent)
+{
+ int i, err;
+
+ tg3_disable_ints(tp);
+
+ tp->rx_mode &= ~RX_MODE_ENABLE;
+ tw32_f(MAC_RX_MODE, tp->rx_mode);
+ udelay(10);
+
+ err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
+ err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
+ err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
+ err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
+ err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
+ err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
+
+ err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
+ err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
+ err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
+ err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
+ err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
+ err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
+ err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
+
+ tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+
+ tp->tx_mode &= ~TX_MODE_ENABLE;
+ tw32_f(MAC_TX_MODE, tp->tx_mode);
+
+ for (i = 0; i < MAX_WAIT_CNT; i++) {
+ udelay(100);
+ if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
+ break;
+ }
+ if (i >= MAX_WAIT_CNT) {
+ dev_err(&tp->pdev->dev,
+ "%s timed out, TX_MODE_ENABLE will not clear "
+ "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
+ err |= -ENODEV;
+ }
+
+ err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
+ err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
+ err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
+
+ tw32(FTQ_RESET, 0xffffffff);
+ tw32(FTQ_RESET, 0x00000000);
+
+ err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
+ err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
+
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+ if (tnapi->hw_status)
+ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
+ }
+ if (tp->hw_stats)
+ memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
+
+ return err;
+}
+
+/* Save PCI command register before chip reset */
+static void tg3_save_pci_state(struct tg3 *tp)
+{
+ pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
+}
+
+/* Restore PCI state after chip reset */
+static void tg3_restore_pci_state(struct tg3 *tp)
+{
+ u32 val;
+
+ /* Re-enable indirect register accesses. */
+ pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
+ tp->misc_host_ctrl);
+
+ /* Set MAX PCI retry to zero. */
+ val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
+ tg3_flag(tp, PCIX_MODE))
+ val |= PCISTATE_RETRY_SAME_DMA;
+ /* Allow reads and writes to the APE register and memory space. */
+ if (tg3_flag(tp, ENABLE_APE))
+ val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
+ PCISTATE_ALLOW_APE_SHMEM_WR |
+ PCISTATE_ALLOW_APE_PSPACE_WR;
+ pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
+
+ pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
+ if (tg3_flag(tp, PCI_EXPRESS))
+ pcie_set_readrq(tp->pdev, tp->pcie_readrq);
+ else {
+ pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
+ tp->pci_cacheline_sz);
+ pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
+ tp->pci_lat_timer);
+ }
+ }
+
+ /* Make sure PCI-X relaxed ordering bit is clear. */
+ if (tg3_flag(tp, PCIX_MODE)) {
+ u16 pcix_cmd;
+
+ pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
+ &pcix_cmd);
+ pcix_cmd &= ~PCI_X_CMD_ERO;
+ pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
+ pcix_cmd);
+ }
+
+ if (tg3_flag(tp, 5780_CLASS)) {
+
+ /* Chip reset on 5780 will reset MSI enable bit,
+ * so need to restore it.
+ */
+ if (tg3_flag(tp, USING_MSI)) {
+ u16 ctrl;
+
+ pci_read_config_word(tp->pdev,
+ tp->msi_cap + PCI_MSI_FLAGS,
+ &ctrl);
+ pci_write_config_word(tp->pdev,
+ tp->msi_cap + PCI_MSI_FLAGS,
+ ctrl | PCI_MSI_FLAGS_ENABLE);
+ val = tr32(MSGINT_MODE);
+ tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
+ }
+ }
+}
+
+/* tp->lock is held. */
+static int tg3_chip_reset(struct tg3 *tp)
+{
+ u32 val;
+ void (*write_op)(struct tg3 *, u32, u32);
+ int i, err;
+
+ tg3_nvram_lock(tp);
+
+ tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
+
+ /* No matching tg3_nvram_unlock() after this because
+ * chip reset below will undo the nvram lock.
+ */
+ tp->nvram_lock_cnt = 0;
+
+ /* GRC_MISC_CFG core clock reset will clear the memory
+ * enable bit in PCI register 4 and the MSI enable bit
+ * on some chips, so we save relevant registers here.
+ */
+ tg3_save_pci_state(tp);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+ tg3_flag(tp, 5755_PLUS))
+ tw32(GRC_FASTBOOT_PC, 0);
+
+ /*
+ * We must avoid the readl() that normally takes place.
+ * It locks machines, causes machine checks, and other
+ * fun things. So, temporarily disable the 5701
+ * hardware workaround, while we do the reset.
+ */
+ write_op = tp->write32;
+ if (write_op == tg3_write_flush_reg32)
+ tp->write32 = tg3_write32;
+
+ /* Prevent the irq handler from reading or writing PCI registers
+ * during chip reset when the memory enable bit in the PCI command
+ * register may be cleared. The chip does not generate interrupt
+ * at this time, but the irq handler may still be called due to irq
+ * sharing or irqpoll.
+ */
+ tg3_flag_set(tp, CHIP_RESETTING);
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+ if (tnapi->hw_status) {
+ tnapi->hw_status->status = 0;
+ tnapi->hw_status->status_tag = 0;
+ }
+ tnapi->last_tag = 0;
+ tnapi->last_irq_tag = 0;
+ }
+ smp_mb();
+
+ for (i = 0; i < tp->irq_cnt; i++)
+ synchronize_irq(tp->napi[i].irq_vec);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
+ val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
+ tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
+ }
+
+ /* do the reset */
+ val = GRC_MISC_CFG_CORECLK_RESET;
+
+ if (tg3_flag(tp, PCI_EXPRESS)) {
+ /* Force PCIe 1.0a mode */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
+ !tg3_flag(tp, 57765_PLUS) &&
+ tr32(TG3_PCIE_PHY_TSTCTL) ==
+ (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
+ tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
+
+ if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
+ tw32(GRC_MISC_CFG, (1 << 29));
+ val |= (1 << 29);
+ }
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
+ tw32(GRC_VCPU_EXT_CTRL,
+ tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
+ }
+
+ /* Manage gphy power for all CPMU absent PCIe devices. */
+ if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
+ val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
+
+ tw32(GRC_MISC_CFG, val);
+
+ /* restore 5701 hardware bug workaround write method */
+ tp->write32 = write_op;
+
+ /* Unfortunately, we have to delay before the PCI read back.
+ * Some 575X chips even will not respond to a PCI cfg access
+ * when the reset command is given to the chip.
+ *
+ * How do these hardware designers expect things to work
+ * properly if the PCI write is posted for a long period
+ * of time? It is always necessary to have some method by
+ * which a register read back can occur to push the write
+ * out which does the reset.
+ *
+ * For most tg3 variants the trick below was working.
+ * Ho hum...
+ */
+ udelay(120);
+
+ /* Flush PCI posted writes. The normal MMIO registers
+ * are inaccessible at this time so this is the only
+ * way to make this reliably (actually, this is no longer
+ * the case, see above). I tried to use indirect
+ * register read/write but this upset some 5701 variants.
+ */
+ pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
+
+ udelay(120);
+
+ if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
+ u16 val16;
+
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
+ int i;
+ u32 cfg_val;
+
+ /* Wait for link training to complete. */
+ for (i = 0; i < 5000; i++)
+ udelay(100);
+
+ pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
+ pci_write_config_dword(tp->pdev, 0xc4,
+ cfg_val | (1 << 15));
+ }
+
+ /* Clear the "no snoop" and "relaxed ordering" bits. */
+ pci_read_config_word(tp->pdev,
+ pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
+ &val16);
+ val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
+ PCI_EXP_DEVCTL_NOSNOOP_EN);
+ /*
+ * Older PCIe devices only support the 128 byte
+ * MPS setting. Enforce the restriction.
+ */
+ if (!tg3_flag(tp, CPMU_PRESENT))
+ val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
+ pci_write_config_word(tp->pdev,
+ pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
+ val16);
+
+ pcie_set_readrq(tp->pdev, tp->pcie_readrq);
+
+ /* Clear error status */
+ pci_write_config_word(tp->pdev,
+ pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
+ PCI_EXP_DEVSTA_CED |
+ PCI_EXP_DEVSTA_NFED |
+ PCI_EXP_DEVSTA_FED |
+ PCI_EXP_DEVSTA_URD);
+ }
+
+ tg3_restore_pci_state(tp);
+
+ tg3_flag_clear(tp, CHIP_RESETTING);
+ tg3_flag_clear(tp, ERROR_PROCESSED);
+
+ val = 0;
+ if (tg3_flag(tp, 5780_CLASS))
+ val = tr32(MEMARB_MODE);
+ tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
+
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
+ tg3_stop_fw(tp);
+ tw32(0x5000, 0x400);
+ }
+
+ tw32(GRC_MODE, tp->grc_mode);
+
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
+ val = tr32(0xc4);
+
+ tw32(0xc4, val | (1 << 15));
+ }
+
+ if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
+ tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
+ tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
+ }
+
+ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
+ tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
+ val = tp->mac_mode;
+ } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
+ tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
+ val = tp->mac_mode;
+ } else
+ val = 0;
+
+ tw32_f(MAC_MODE, val);
+ udelay(40);
+
+ tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
+
+ err = tg3_poll_fw(tp);
+ if (err)
+ return err;
+
+ tg3_mdio_start(tp);
+
+ if (tg3_flag(tp, PCI_EXPRESS) &&
+ tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
+ !tg3_flag(tp, 57765_PLUS)) {
+ val = tr32(0x7c00);
+
+ tw32(0x7c00, val | (1 << 25));
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ val = tr32(TG3_CPMU_CLCK_ORIDE);
+ tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
+ }
+
+ /* Reprobe ASF enable state. */
+ tg3_flag_clear(tp, ENABLE_ASF);
+ tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
+ tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
+ if (val == NIC_SRAM_DATA_SIG_MAGIC) {
+ u32 nic_cfg;
+
+ tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
+ if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
+ tg3_flag_set(tp, ENABLE_ASF);
+ tp->last_event_jiffies = jiffies;
+ if (tg3_flag(tp, 5750_PLUS))
+ tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
+ }
+ }
+
+ return 0;
+}
+
+/* tp->lock is held. */
+static int tg3_halt(struct tg3 *tp, int kind, int silent)
+{
+ int err;
+
+ tg3_stop_fw(tp);
+
+ tg3_write_sig_pre_reset(tp, kind);
+
+ tg3_abort_hw(tp, silent);
+ err = tg3_chip_reset(tp);
+
+ __tg3_set_mac_addr(tp, 0);
+
+ tg3_write_sig_legacy(tp, kind);
+ tg3_write_sig_post_reset(tp, kind);
+
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tg3_set_mac_addr(struct net_device *dev, void *p)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ struct sockaddr *addr = p;
+ int err = 0, skip_mac_1 = 0;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ if (!netif_running(dev))
+ return 0;
+
+ if (tg3_flag(tp, ENABLE_ASF)) {
+ u32 addr0_high, addr0_low, addr1_high, addr1_low;
+
+ addr0_high = tr32(MAC_ADDR_0_HIGH);
+ addr0_low = tr32(MAC_ADDR_0_LOW);
+ addr1_high = tr32(MAC_ADDR_1_HIGH);
+ addr1_low = tr32(MAC_ADDR_1_LOW);
+
+ /* Skip MAC addr 1 if ASF is using it. */
+ if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
+ !(addr1_high == 0 && addr1_low == 0))
+ skip_mac_1 = 1;
+ }
+ spin_lock_bh(&tp->lock);
+ __tg3_set_mac_addr(tp, skip_mac_1);
+ spin_unlock_bh(&tp->lock);
+
+ return err;
+}
+
+/* tp->lock is held. */
+static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
+ dma_addr_t mapping, u32 maxlen_flags,
+ u32 nic_addr)
+{
+ tg3_write_mem(tp,
+ (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
+ ((u64) mapping >> 32));
+ tg3_write_mem(tp,
+ (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
+ ((u64) mapping & 0xffffffff));
+ tg3_write_mem(tp,
+ (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
+ maxlen_flags);
+
+ if (!tg3_flag(tp, 5705_PLUS))
+ tg3_write_mem(tp,
+ (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
+ nic_addr);
+}
+
+static void __tg3_set_rx_mode(struct net_device *);
+static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
+{
+ int i;
+
+ if (!tg3_flag(tp, ENABLE_TSS)) {
+ tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
+ tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
+ tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
+ } else {
+ tw32(HOSTCC_TXCOL_TICKS, 0);
+ tw32(HOSTCC_TXMAX_FRAMES, 0);
+ tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
+ }
+
+ if (!tg3_flag(tp, ENABLE_RSS)) {
+ tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
+ tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
+ tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
+ } else {
+ tw32(HOSTCC_RXCOL_TICKS, 0);
+ tw32(HOSTCC_RXMAX_FRAMES, 0);
+ tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
+ }
+
+ if (!tg3_flag(tp, 5705_PLUS)) {
+ u32 val = ec->stats_block_coalesce_usecs;
+
+ tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
+ tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
+
+ if (!netif_carrier_ok(tp->dev))
+ val = 0;
+
+ tw32(HOSTCC_STAT_COAL_TICKS, val);
+ }
+
+ for (i = 0; i < tp->irq_cnt - 1; i++) {
+ u32 reg;
+
+ reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
+ tw32(reg, ec->rx_coalesce_usecs);
+ reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
+ tw32(reg, ec->rx_max_coalesced_frames);
+ reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
+ tw32(reg, ec->rx_max_coalesced_frames_irq);
+
+ if (tg3_flag(tp, ENABLE_TSS)) {
+ reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
+ tw32(reg, ec->tx_coalesce_usecs);
+ reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
+ tw32(reg, ec->tx_max_coalesced_frames);
+ reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
+ tw32(reg, ec->tx_max_coalesced_frames_irq);
+ }
+ }
+
+ for (; i < tp->irq_max - 1; i++) {
+ tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
+ tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
+ tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
+
+ if (tg3_flag(tp, ENABLE_TSS)) {
+ tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
+ tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
+ tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
+ }
+ }
+}
+
+/* tp->lock is held. */
+static void tg3_rings_reset(struct tg3 *tp)
+{
+ int i;
+ u32 stblk, txrcb, rxrcb, limit;
+ struct tg3_napi *tnapi = &tp->napi[0];
+
+ /* Disable all transmit rings but the first. */
+ if (!tg3_flag(tp, 5705_PLUS))
+ limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
+ else if (tg3_flag(tp, 5717_PLUS))
+ limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
+ else
+ limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
+
+ for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
+ txrcb < limit; txrcb += TG3_BDINFO_SIZE)
+ tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
+ BDINFO_FLAGS_DISABLED);
+
+
+ /* Disable all receive return rings but the first. */
+ if (tg3_flag(tp, 5717_PLUS))
+ limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
+ else if (!tg3_flag(tp, 5705_PLUS))
+ limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
+ else
+ limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
+
+ for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
+ rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
+ tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
+ BDINFO_FLAGS_DISABLED);
+
+ /* Disable interrupts */
+ tw32_mailbox_f(tp->napi[0].int_mbox, 1);
+ tp->napi[0].chk_msi_cnt = 0;
+ tp->napi[0].last_rx_cons = 0;
+ tp->napi[0].last_tx_cons = 0;
+
+ /* Zero mailbox registers. */
+ if (tg3_flag(tp, SUPPORT_MSIX)) {
+ for (i = 1; i < tp->irq_max; i++) {
+ tp->napi[i].tx_prod = 0;
+ tp->napi[i].tx_cons = 0;
+ if (tg3_flag(tp, ENABLE_TSS))
+ tw32_mailbox(tp->napi[i].prodmbox, 0);
+ tw32_rx_mbox(tp->napi[i].consmbox, 0);
+ tw32_mailbox_f(tp->napi[i].int_mbox, 1);
+ tp->napi[i].chk_msi_cnt = 0;
+ tp->napi[i].last_rx_cons = 0;
+ tp->napi[i].last_tx_cons = 0;
+ }
+ if (!tg3_flag(tp, ENABLE_TSS))
+ tw32_mailbox(tp->napi[0].prodmbox, 0);
+ } else {
+ tp->napi[0].tx_prod = 0;
+ tp->napi[0].tx_cons = 0;
+ tw32_mailbox(tp->napi[0].prodmbox, 0);
+ tw32_rx_mbox(tp->napi[0].consmbox, 0);
+ }
+
+ /* Make sure the NIC-based send BD rings are disabled. */
+ if (!tg3_flag(tp, 5705_PLUS)) {
+ u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
+ for (i = 0; i < 16; i++)
+ tw32_tx_mbox(mbox + i * 8, 0);
+ }
+
+ txrcb = NIC_SRAM_SEND_RCB;
+ rxrcb = NIC_SRAM_RCV_RET_RCB;
+
+ /* Clear status block in ram. */
+ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
+
+ /* Set status block DMA address */
+ tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
+ ((u64) tnapi->status_mapping >> 32));
+ tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
+ ((u64) tnapi->status_mapping & 0xffffffff));
+
+ if (tnapi->tx_ring) {
+ tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
+ (TG3_TX_RING_SIZE <<
+ BDINFO_FLAGS_MAXLEN_SHIFT),
+ NIC_SRAM_TX_BUFFER_DESC);
+ txrcb += TG3_BDINFO_SIZE;
+ }
+
+ if (tnapi->rx_rcb) {
+ tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
+ (tp->rx_ret_ring_mask + 1) <<
+ BDINFO_FLAGS_MAXLEN_SHIFT, 0);
+ rxrcb += TG3_BDINFO_SIZE;
+ }
+
+ stblk = HOSTCC_STATBLCK_RING1;
+
+ for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
+ u64 mapping = (u64)tnapi->status_mapping;
+ tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
+ tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
+
+ /* Clear status block in ram. */
+ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
+
+ if (tnapi->tx_ring) {
+ tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
+ (TG3_TX_RING_SIZE <<
+ BDINFO_FLAGS_MAXLEN_SHIFT),
+ NIC_SRAM_TX_BUFFER_DESC);
+ txrcb += TG3_BDINFO_SIZE;
+ }
+
+ tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
+ ((tp->rx_ret_ring_mask + 1) <<
+ BDINFO_FLAGS_MAXLEN_SHIFT), 0);
+
+ stblk += 8;
+ rxrcb += TG3_BDINFO_SIZE;
+ }
+}
+
+static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
+{
+ u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
+
+ if (!tg3_flag(tp, 5750_PLUS) ||
+ tg3_flag(tp, 5780_CLASS) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+ bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
+ bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
+ else
+ bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
+
+ nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
+ host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
+
+ val = min(nic_rep_thresh, host_rep_thresh);
+ tw32(RCVBDI_STD_THRESH, val);
+
+ if (tg3_flag(tp, 57765_PLUS))
+ tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
+
+ if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
+ return;
+
+ if (!tg3_flag(tp, 5705_PLUS))
+ bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
+ else
+ bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
+
+ host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
+
+ val = min(bdcache_maxcnt / 2, host_rep_thresh);
+ tw32(RCVBDI_JUMBO_THRESH, val);
+
+ if (tg3_flag(tp, 57765_PLUS))
+ tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
+}
+
+/* tp->lock is held. */
+static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
+{
+ u32 val, rdmac_mode;
+ int i, err, limit;
+ struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
+
+ tg3_disable_ints(tp);
+
+ tg3_stop_fw(tp);
+
+ tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
+
+ if (tg3_flag(tp, INIT_COMPLETE))
+ tg3_abort_hw(tp, 1);
+
+ /* Enable MAC control of LPI */
+ if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
+ tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
+ TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
+ TG3_CPMU_EEE_LNKIDL_UART_IDL);
+
+ tw32_f(TG3_CPMU_EEE_CTRL,
+ TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
+
+ val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
+ TG3_CPMU_EEEMD_LPI_IN_TX |
+ TG3_CPMU_EEEMD_LPI_IN_RX |
+ TG3_CPMU_EEEMD_EEE_ENABLE;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
+ val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
+
+ if (tg3_flag(tp, ENABLE_APE))
+ val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
+
+ tw32_f(TG3_CPMU_EEE_MODE, val);
+
+ tw32_f(TG3_CPMU_EEE_DBTMR1,
+ TG3_CPMU_DBTMR1_PCIEXIT_2047US |
+ TG3_CPMU_DBTMR1_LNKIDLE_2047US);
+
+ tw32_f(TG3_CPMU_EEE_DBTMR2,
+ TG3_CPMU_DBTMR2_APE_TX_2047US |
+ TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
+ }
+
+ if (reset_phy)
+ tg3_phy_reset(tp);
+
+ err = tg3_chip_reset(tp);
+ if (err)
+ return err;
+
+ tg3_write_sig_legacy(tp, RESET_KIND_INIT);
+
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
+ val = tr32(TG3_CPMU_CTRL);
+ val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
+ tw32(TG3_CPMU_CTRL, val);
+
+ val = tr32(TG3_CPMU_LSPD_10MB_CLK);
+ val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
+ val |= CPMU_LSPD_10MB_MACCLK_6_25;
+ tw32(TG3_CPMU_LSPD_10MB_CLK, val);
+
+ val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
+ val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
+ val |= CPMU_LNK_AWARE_MACCLK_6_25;
+ tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
+
+ val = tr32(TG3_CPMU_HST_ACC);
+ val &= ~CPMU_HST_ACC_MACCLK_MASK;
+ val |= CPMU_HST_ACC_MACCLK_6_25;
+ tw32(TG3_CPMU_HST_ACC, val);
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
+ val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
+ val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
+ PCIE_PWR_MGMT_L1_THRESH_4MS;
+ tw32(PCIE_PWR_MGMT_THRESH, val);
+
+ val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
+ tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
+
+ tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
+
+ val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
+ tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
+ }
+
+ if (tg3_flag(tp, L1PLLPD_EN)) {
+ u32 grc_mode = tr32(GRC_MODE);
+
+ /* Access the lower 1K of PL PCIE block registers. */
+ val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
+ tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
+
+ val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
+ tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
+ val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
+
+ tw32(GRC_MODE, grc_mode);
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+ if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
+ u32 grc_mode = tr32(GRC_MODE);
+
+ /* Access the lower 1K of PL PCIE block registers. */
+ val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
+ tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
+
+ val = tr32(TG3_PCIE_TLDLPL_PORT +
+ TG3_PCIE_PL_LO_PHYCTL5);
+ tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
+ val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
+
+ tw32(GRC_MODE, grc_mode);
+ }
+
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
+ u32 grc_mode = tr32(GRC_MODE);
+
+ /* Access the lower 1K of DL PCIE block registers. */
+ val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
+ tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
+
+ val = tr32(TG3_PCIE_TLDLPL_PORT +
+ TG3_PCIE_DL_LO_FTSMAX);
+ val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
+ tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
+ val | TG3_PCIE_DL_LO_FTSMAX_VAL);
+
+ tw32(GRC_MODE, grc_mode);
+ }
+
+ val = tr32(TG3_CPMU_LSPD_10MB_CLK);
+ val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
+ val |= CPMU_LSPD_10MB_MACCLK_6_25;
+ tw32(TG3_CPMU_LSPD_10MB_CLK, val);
+ }
+
+ /* This works around an issue with Athlon chipsets on
+ * B3 tigon3 silicon. This bit has no effect on any
+ * other revision. But do not set this on PCI Express
+ * chips and don't even touch the clocks if the CPMU is present.
+ */
+ if (!tg3_flag(tp, CPMU_PRESENT)) {
+ if (!tg3_flag(tp, PCI_EXPRESS))
+ tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
+ tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
+ }
+
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
+ tg3_flag(tp, PCIX_MODE)) {
+ val = tr32(TG3PCI_PCISTATE);
+ val |= PCISTATE_RETRY_SAME_DMA;
+ tw32(TG3PCI_PCISTATE, val);
+ }
+
+ if (tg3_flag(tp, ENABLE_APE)) {
+ /* Allow reads and writes to the
+ * APE register and memory space.
+ */
+ val = tr32(TG3PCI_PCISTATE);
+ val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
+ PCISTATE_ALLOW_APE_SHMEM_WR |
+ PCISTATE_ALLOW_APE_PSPACE_WR;
+ tw32(TG3PCI_PCISTATE, val);
+ }
+
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
+ /* Enable some hw fixes. */
+ val = tr32(TG3PCI_MSI_DATA);
+ val |= (1 << 26) | (1 << 28) | (1 << 29);
+ tw32(TG3PCI_MSI_DATA, val);
+ }
+
+ /* Descriptor ring init may make accesses to the
+ * NIC SRAM area to setup the TX descriptors, so we
+ * can only do this after the hardware has been
+ * successfully reset.
+ */
+ err = tg3_init_rings(tp);
+ if (err)
+ return err;
+
+ if (tg3_flag(tp, 57765_PLUS)) {
+ val = tr32(TG3PCI_DMA_RW_CTRL) &
+ ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
+ if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
+ val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
+ val |= DMA_RWCTRL_TAGGED_STAT_WA;
+ tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
+ /* This value is determined during the probe time DMA
+ * engine test, tg3_test_dma.
+ */
+ tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
+ }
+
+ tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
+ GRC_MODE_4X_NIC_SEND_RINGS |
+ GRC_MODE_NO_TX_PHDR_CSUM |
+ GRC_MODE_NO_RX_PHDR_CSUM);
+ tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
+
+ /* Pseudo-header checksum is done by hardware logic and not
+ * the offload processers, so make the chip do the pseudo-
+ * header checksums on receive. For transmit it is more
+ * convenient to do the pseudo-header checksum in software
+ * as Linux does that on transmit for us in all cases.
+ */
+ tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
+
+ tw32(GRC_MODE,
+ tp->grc_mode |
+ (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
+
+ /* Setup the timer prescalar register. Clock is always 66Mhz. */
+ val = tr32(GRC_MISC_CFG);
+ val &= ~0xff;
+ val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
+ tw32(GRC_MISC_CFG, val);
+
+ /* Initialize MBUF/DESC pool. */
+ if (tg3_flag(tp, 5750_PLUS)) {
+ /* Do nothing. */
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
+ tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
+ tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
+ else
+ tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
+ tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
+ tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
+ } else if (tg3_flag(tp, TSO_CAPABLE)) {
+ int fw_len;
+
+ fw_len = tp->fw_len;
+ fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
+ tw32(BUFMGR_MB_POOL_ADDR,
+ NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
+ tw32(BUFMGR_MB_POOL_SIZE,
+ NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
+ }
+
+ if (tp->dev->mtu <= ETH_DATA_LEN) {
+ tw32(BUFMGR_MB_RDMA_LOW_WATER,
+ tp->bufmgr_config.mbuf_read_dma_low_water);
+ tw32(BUFMGR_MB_MACRX_LOW_WATER,
+ tp->bufmgr_config.mbuf_mac_rx_low_water);
+ tw32(BUFMGR_MB_HIGH_WATER,
+ tp->bufmgr_config.mbuf_high_water);
+ } else {
+ tw32(BUFMGR_MB_RDMA_LOW_WATER,
+ tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
+ tw32(BUFMGR_MB_MACRX_LOW_WATER,
+ tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
+ tw32(BUFMGR_MB_HIGH_WATER,
+ tp->bufmgr_config.mbuf_high_water_jumbo);
+ }
+ tw32(BUFMGR_DMA_LOW_WATER,
+ tp->bufmgr_config.dma_low_water);
+ tw32(BUFMGR_DMA_HIGH_WATER,
+ tp->bufmgr_config.dma_high_water);
+
+ val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ val |= BUFMGR_MODE_NO_TX_UNDERRUN;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
+ val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
+ tw32(BUFMGR_MODE, val);
+ for (i = 0; i < 2000; i++) {
+ if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
+ break;
+ udelay(10);
+ }
+ if (i >= 2000) {
+ netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
+ return -ENODEV;
+ }
+
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
+ tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
+
+ tg3_setup_rxbd_thresholds(tp);
+
+ /* Initialize TG3_BDINFO's at:
+ * RCVDBDI_STD_BD: standard eth size rx ring
+ * RCVDBDI_JUMBO_BD: jumbo frame rx ring
+ * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
+ *
+ * like so:
+ * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
+ * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
+ * ring attribute flags
+ * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
+ *
+ * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
+ * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
+ *
+ * The size of each ring is fixed in the firmware, but the location is
+ * configurable.
+ */
+ tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
+ ((u64) tpr->rx_std_mapping >> 32));
+ tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
+ ((u64) tpr->rx_std_mapping & 0xffffffff));
+ if (!tg3_flag(tp, 5717_PLUS))
+ tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
+ NIC_SRAM_RX_BUFFER_DESC);
+
+ /* Disable the mini ring */
+ if (!tg3_flag(tp, 5705_PLUS))
+ tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
+ BDINFO_FLAGS_DISABLED);
+
+ /* Program the jumbo buffer descriptor ring control
+ * blocks on those devices that have them.
+ */
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
+ (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
+
+ if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
+ tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
+ ((u64) tpr->rx_jmb_mapping >> 32));
+ tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
+ ((u64) tpr->rx_jmb_mapping & 0xffffffff));
+ val = TG3_RX_JMB_RING_SIZE(tp) <<
+ BDINFO_FLAGS_MAXLEN_SHIFT;
+ tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
+ val | BDINFO_FLAGS_USE_EXT_RECV);
+ if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
+ NIC_SRAM_RX_JUMBO_BUFFER_DESC);
+ } else {
+ tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
+ BDINFO_FLAGS_DISABLED);
+ }
+
+ if (tg3_flag(tp, 57765_PLUS)) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ val = TG3_RX_STD_MAX_SIZE_5700;
+ else
+ val = TG3_RX_STD_MAX_SIZE_5717;
+ val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
+ val |= (TG3_RX_STD_DMA_SZ << 2);
+ } else
+ val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
+ } else
+ val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
+
+ tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
+
+ tpr->rx_std_prod_idx = tp->rx_pending;
+ tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
+
+ tpr->rx_jmb_prod_idx =
+ tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
+ tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
+
+ tg3_rings_reset(tp);
+
+ /* Initialize MAC address and backoff seed. */
+ __tg3_set_mac_addr(tp, 0);
+
+ /* MTU + ethernet header + FCS + optional VLAN tag */
+ tw32(MAC_RX_MTU_SIZE,
+ tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
+
+ /* The slot time is changed by tg3_setup_phy if we
+ * run at gigabit with half duplex.
+ */
+ val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
+ (6 << TX_LENGTHS_IPG_SHIFT) |
+ (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ val |= tr32(MAC_TX_LENGTHS) &
+ (TX_LENGTHS_JMB_FRM_LEN_MSK |
+ TX_LENGTHS_CNT_DWN_VAL_MSK);
+
+ tw32(MAC_TX_LENGTHS, val);
+
+ /* Receive rules. */
+ tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
+ tw32(RCVLPC_CONFIG, 0x0181);
+
+ /* Calculate RDMAC_MODE setting early, we need it to determine
+ * the RCVLPC_STATE_ENABLE mask.
+ */
+ rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
+ RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
+ RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
+ RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
+ RDMAC_MODE_LNGREAD_ENAB);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+ rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
+ RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
+ RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
+ tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
+ if (tg3_flag(tp, TSO_CAPABLE) &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
+ } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
+ !tg3_flag(tp, IS_5788)) {
+ rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
+ }
+ }
+
+ if (tg3_flag(tp, PCI_EXPRESS))
+ rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
+
+ if (tg3_flag(tp, HW_TSO_1) ||
+ tg3_flag(tp, HW_TSO_2) ||
+ tg3_flag(tp, HW_TSO_3))
+ rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
+
+ if (tg3_flag(tp, 57765_PLUS) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ tg3_flag(tp, 57765_PLUS)) {
+ val = tr32(TG3_RDMA_RSRVCTRL_REG);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
+ TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
+ TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
+ val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
+ TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
+ TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
+ }
+ tw32(TG3_RDMA_RSRVCTRL_REG,
+ val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
+ tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
+ TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
+ TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
+ }
+
+ /* Receive/send statistics. */
+ if (tg3_flag(tp, 5750_PLUS)) {
+ val = tr32(RCVLPC_STATS_ENABLE);
+ val &= ~RCVLPC_STATSENAB_DACK_FIX;
+ tw32(RCVLPC_STATS_ENABLE, val);
+ } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
+ tg3_flag(tp, TSO_CAPABLE)) {
+ val = tr32(RCVLPC_STATS_ENABLE);
+ val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
+ tw32(RCVLPC_STATS_ENABLE, val);
+ } else {
+ tw32(RCVLPC_STATS_ENABLE, 0xffffff);
+ }
+ tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
+ tw32(SNDDATAI_STATSENAB, 0xffffff);
+ tw32(SNDDATAI_STATSCTRL,
+ (SNDDATAI_SCTRL_ENABLE |
+ SNDDATAI_SCTRL_FASTUPD));
+
+ /* Setup host coalescing engine. */
+ tw32(HOSTCC_MODE, 0);
+ for (i = 0; i < 2000; i++) {
+ if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
+ break;
+ udelay(10);
+ }
+
+ __tg3_set_coalesce(tp, &tp->coal);
+
+ if (!tg3_flag(tp, 5705_PLUS)) {
+ /* Status/statistics block address. See tg3_timer,
+ * the tg3_periodic_fetch_stats call there, and
+ * tg3_get_stats to see how this works for 5705/5750 chips.
+ */
+ tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
+ ((u64) tp->stats_mapping >> 32));
+ tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
+ ((u64) tp->stats_mapping & 0xffffffff));
+ tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
+
+ tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
+
+ /* Clear statistics and status block memory areas */
+ for (i = NIC_SRAM_STATS_BLK;
+ i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
+ i += sizeof(u32)) {
+ tg3_write_mem(tp, i, 0);
+ udelay(40);
+ }
+ }
+
+ tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
+
+ tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
+ tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
+ if (!tg3_flag(tp, 5705_PLUS))
+ tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
+
+ if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
+ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+ /* reset to prevent losing 1st rx packet intermittently */
+ tw32_f(MAC_RX_MODE, RX_MODE_RESET);
+ udelay(10);
+ }
+
+ tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
+ MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
+ MAC_MODE_FHDE_ENABLE;
+ if (tg3_flag(tp, ENABLE_APE))
+ tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
+ if (!tg3_flag(tp, 5705_PLUS) &&
+ !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
+ tp->mac_mode |= MAC_MODE_LINK_POLARITY;
+ tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
+ udelay(40);
+
+ /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
+ * If TG3_FLAG_IS_NIC is zero, we should read the
+ * register to preserve the GPIO settings for LOMs. The GPIOs,
+ * whether used as inputs or outputs, are set by boot code after
+ * reset.
+ */
+ if (!tg3_flag(tp, IS_NIC)) {
+ u32 gpio_mask;
+
+ gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
+ GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
+ GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+ gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
+ GRC_LCLCTRL_GPIO_OUTPUT3;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+ gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
+
+ tp->grc_local_ctrl &= ~gpio_mask;
+ tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
+
+ /* GPIO1 must be driven high for eeprom write protect */
+ if (tg3_flag(tp, EEPROM_WRITE_PROT))
+ tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
+ GRC_LCLCTRL_GPIO_OUTPUT1);
+ }
+ tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
+ udelay(100);
+
+ if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
+ val = tr32(MSGINT_MODE);
+ val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
+ if (!tg3_flag(tp, 1SHOT_MSI))
+ val |= MSGINT_MODE_ONE_SHOT_DISABLE;
+ tw32(MSGINT_MODE, val);
+ }
+
+ if (!tg3_flag(tp, 5705_PLUS)) {
+ tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
+ udelay(40);
+ }
+
+ val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
+ WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
+ WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
+ WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
+ WDMAC_MODE_LNGREAD_ENAB);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
+ tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
+ if (tg3_flag(tp, TSO_CAPABLE) &&
+ (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
+ /* nothing */
+ } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
+ !tg3_flag(tp, IS_5788)) {
+ val |= WDMAC_MODE_RX_ACCEL;
+ }
+ }
+
+ /* Enable host coalescing bug fix */
+ if (tg3_flag(tp, 5755_PLUS))
+ val |= WDMAC_MODE_STATUS_TAG_FIX;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ val |= WDMAC_MODE_BURST_ALL_DATA;
+
+ tw32_f(WDMAC_MODE, val);
+ udelay(40);
+
+ if (tg3_flag(tp, PCIX_MODE)) {
+ u16 pcix_cmd;
+
+ pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
+ &pcix_cmd);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
+ pcix_cmd &= ~PCI_X_CMD_MAX_READ;
+ pcix_cmd |= PCI_X_CMD_READ_2K;
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+ pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
+ pcix_cmd |= PCI_X_CMD_READ_2K;
+ }
+ pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
+ pcix_cmd);
+ }
+
+ tw32_f(RDMAC_MODE, rdmac_mode);
+ udelay(40);
+
+ tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
+ if (!tg3_flag(tp, 5705_PLUS))
+ tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ tw32(SNDDATAC_MODE,
+ SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
+ else
+ tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
+
+ tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
+ tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
+ val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
+ if (tg3_flag(tp, LRG_PROD_RING_CAP))
+ val |= RCVDBDI_MODE_LRG_RING_SZ;
+ tw32(RCVDBDI_MODE, val);
+ tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
+ if (tg3_flag(tp, HW_TSO_1) ||
+ tg3_flag(tp, HW_TSO_2) ||
+ tg3_flag(tp, HW_TSO_3))
+ tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
+ val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
+ if (tg3_flag(tp, ENABLE_TSS))
+ val |= SNDBDI_MODE_MULTI_TXQ_EN;
+ tw32(SNDBDI_MODE, val);
+ tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
+
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
+ err = tg3_load_5701_a0_firmware_fix(tp);
+ if (err)
+ return err;
+ }
+
+ if (tg3_flag(tp, TSO_CAPABLE)) {
+ err = tg3_load_tso_firmware(tp);
+ if (err)
+ return err;
+ }
+
+ tp->tx_mode = TX_MODE_ENABLE;
+
+ if (tg3_flag(tp, 5755_PLUS) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
+ tp->tx_mode &= ~val;
+ tp->tx_mode |= tr32(MAC_TX_MODE) & val;
+ }
+
+ tw32_f(MAC_TX_MODE, tp->tx_mode);
+ udelay(100);
+
+ if (tg3_flag(tp, ENABLE_RSS)) {
+ int i = 0;
+ u32 reg = MAC_RSS_INDIR_TBL_0;
+
+ if (tp->irq_cnt == 2) {
+ for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
+ tw32(reg, 0x0);
+ reg += 4;
+ }
+ } else {
+ u32 val;
+
+ while (i < TG3_RSS_INDIR_TBL_SIZE) {
+ val = i % (tp->irq_cnt - 1);
+ i++;
+ for (; i % 8; i++) {
+ val <<= 4;
+ val |= (i % (tp->irq_cnt - 1));
+ }
+ tw32(reg, val);
+ reg += 4;
+ }
+ }
+
+ /* Setup the "secret" hash key. */
+ tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
+ tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
+ tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
+ tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
+ tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
+ tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
+ tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
+ tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
+ tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
+ tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
+ }
+
+ tp->rx_mode = RX_MODE_ENABLE;
+ if (tg3_flag(tp, 5755_PLUS))
+ tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
+
+ if (tg3_flag(tp, ENABLE_RSS))
+ tp->rx_mode |= RX_MODE_RSS_ENABLE |
+ RX_MODE_RSS_ITBL_HASH_BITS_7 |
+ RX_MODE_RSS_IPV6_HASH_EN |
+ RX_MODE_RSS_TCP_IPV6_HASH_EN |
+ RX_MODE_RSS_IPV4_HASH_EN |
+ RX_MODE_RSS_TCP_IPV4_HASH_EN;
+
+ tw32_f(MAC_RX_MODE, tp->rx_mode);
+ udelay(10);
+
+ tw32(MAC_LED_CTRL, tp->led_ctrl);
+
+ tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
+ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
+ tw32_f(MAC_RX_MODE, RX_MODE_RESET);
+ udelay(10);
+ }
+ tw32_f(MAC_RX_MODE, tp->rx_mode);
+ udelay(10);
+
+ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
+ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
+ !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
+ /* Set drive transmission level to 1.2V */
+ /* only if the signal pre-emphasis bit is not set */
+ val = tr32(MAC_SERDES_CFG);
+ val &= 0xfffff000;
+ val |= 0x880;
+ tw32(MAC_SERDES_CFG, val);
+ }
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
+ tw32(MAC_SERDES_CFG, 0x616000);
+ }
+
+ /* Prevent chip from dropping frames when flow control
+ * is enabled.
+ */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ val = 1;
+ else
+ val = 2;
+ tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
+ (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
+ /* Use hardware link auto-negotiation */
+ tg3_flag_set(tp, HW_AUTONEG);
+ }
+
+ if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+ u32 tmp;
+
+ tmp = tr32(SERDES_RX_CTRL);
+ tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
+ tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
+ tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
+ tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
+ }
+
+ if (!tg3_flag(tp, USE_PHYLIB)) {
+ if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
+ tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
+ tp->link_config.speed = tp->link_config.orig_speed;
+ tp->link_config.duplex = tp->link_config.orig_duplex;
+ tp->link_config.autoneg = tp->link_config.orig_autoneg;
+ }
+
+ err = tg3_setup_phy(tp, 0);
+ if (err)
+ return err;
+
+ if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
+ !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
+ u32 tmp;
+
+ /* Clear CRC stats. */
+ if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
+ tg3_writephy(tp, MII_TG3_TEST1,
+ tmp | MII_TG3_TEST1_CRC_EN);
+ tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
+ }
+ }
+ }
+
+ __tg3_set_rx_mode(tp->dev);
+
+ /* Initialize receive rules. */
+ tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
+ tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
+ tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
+ tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
+
+ if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
+ limit = 8;
+ else
+ limit = 16;
+ if (tg3_flag(tp, ENABLE_ASF))
+ limit -= 4;
+ switch (limit) {
+ case 16:
+ tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
+ case 15:
+ tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
+ case 14:
+ tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
+ case 13:
+ tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
+ case 12:
+ tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
+ case 11:
+ tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
+ case 10:
+ tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
+ case 9:
+ tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
+ case 8:
+ tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
+ case 7:
+ tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
+ case 6:
+ tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
+ case 5:
+ tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
+ case 4:
+ /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
+ case 3:
+ /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
+ case 2:
+ case 1:
+
+ default:
+ break;
+ }
+
+ if (tg3_flag(tp, ENABLE_APE))
+ /* Write our heartbeat update interval to APE. */
+ tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
+ APE_HOST_HEARTBEAT_INT_DISABLE);
+
+ tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
+
+ return 0;
+}
+
+/* Called at device open time to get the chip ready for
+ * packet processing. Invoked with tp->lock held.
+ */
+static int tg3_init_hw(struct tg3 *tp, int reset_phy)
+{
+ tg3_switch_clocks(tp);
+
+ tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
+
+ return tg3_reset_hw(tp, reset_phy);
+}
+
+#define TG3_STAT_ADD32(PSTAT, REG) \
+do { u32 __val = tr32(REG); \
+ (PSTAT)->low += __val; \
+ if ((PSTAT)->low < __val) \
+ (PSTAT)->high += 1; \
+} while (0)
+
+static void tg3_periodic_fetch_stats(struct tg3 *tp)
+{
+ struct tg3_hw_stats *sp = tp->hw_stats;
+
+ if (!netif_carrier_ok(tp->dev))
+ return;
+
+ TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
+ TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
+ TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
+ TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
+ TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
+ TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
+ TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
+ TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
+ TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
+ TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
+ TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
+ TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
+ TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
+
+ TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
+ TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
+ TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
+ TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
+ TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
+ TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
+ TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
+ TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
+ TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
+ TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
+ TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
+ TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
+ TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
+ TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
+
+ TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+ tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
+ tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
+ TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
+ } else {
+ u32 val = tr32(HOSTCC_FLOW_ATTN);
+ val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
+ if (val) {
+ tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
+ sp->rx_discards.low += val;
+ if (sp->rx_discards.low < val)
+ sp->rx_discards.high += 1;
+ }
+ sp->mbuf_lwm_thresh_hit = sp->rx_discards;
+ }
+ TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
+}
+
+static void tg3_chk_missed_msi(struct tg3 *tp)
+{
+ u32 i;
+
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+
+ if (tg3_has_work(tnapi)) {
+ if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
+ tnapi->last_tx_cons == tnapi->tx_cons) {
+ if (tnapi->chk_msi_cnt < 1) {
+ tnapi->chk_msi_cnt++;
+ return;
+ }
+ tg3_msi(0, tnapi);
+ }
+ }
+ tnapi->chk_msi_cnt = 0;
+ tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
+ tnapi->last_tx_cons = tnapi->tx_cons;
+ }
+}
+
+static void tg3_timer(unsigned long __opaque)
+{
+ struct tg3 *tp = (struct tg3 *) __opaque;
+
+ if (tp->irq_sync)
+ goto restart_timer;
+
+ spin_lock(&tp->lock);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ tg3_chk_missed_msi(tp);
+
+ if (!tg3_flag(tp, TAGGED_STATUS)) {
+ /* All of this garbage is because when using non-tagged
+ * IRQ status the mailbox/status_block protocol the chip
+ * uses with the cpu is race prone.
+ */
+ if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
+ tw32(GRC_LOCAL_CTRL,
+ tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
+ } else {
+ tw32(HOSTCC_MODE, tp->coalesce_mode |
+ HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
+ }
+
+ if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
+ tg3_flag_set(tp, RESTART_TIMER);
+ spin_unlock(&tp->lock);
+ schedule_work(&tp->reset_task);
+ return;
+ }
+ }
+
+ /* This part only runs once per second. */
+ if (!--tp->timer_counter) {
+ if (tg3_flag(tp, 5705_PLUS))
+ tg3_periodic_fetch_stats(tp);
+
+ if (tp->setlpicnt && !--tp->setlpicnt)
+ tg3_phy_eee_enable(tp);
+
+ if (tg3_flag(tp, USE_LINKCHG_REG)) {
+ u32 mac_stat;
+ int phy_event;
+
+ mac_stat = tr32(MAC_STATUS);
+
+ phy_event = 0;
+ if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
+ if (mac_stat & MAC_STATUS_MI_INTERRUPT)
+ phy_event = 1;
+ } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
+ phy_event = 1;
+
+ if (phy_event)
+ tg3_setup_phy(tp, 0);
+ } else if (tg3_flag(tp, POLL_SERDES)) {
+ u32 mac_stat = tr32(MAC_STATUS);
+ int need_setup = 0;
+
+ if (netif_carrier_ok(tp->dev) &&
+ (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
+ need_setup = 1;
+ }
+ if (!netif_carrier_ok(tp->dev) &&
+ (mac_stat & (MAC_STATUS_PCS_SYNCED |
+ MAC_STATUS_SIGNAL_DET))) {
+ need_setup = 1;
+ }
+ if (need_setup) {
+ if (!tp->serdes_counter) {
+ tw32_f(MAC_MODE,
+ (tp->mac_mode &
+ ~MAC_MODE_PORT_MODE_MASK));
+ udelay(40);
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+ }
+ tg3_setup_phy(tp, 0);
+ }
+ } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
+ tg3_flag(tp, 5780_CLASS)) {
+ tg3_serdes_parallel_detect(tp);
+ }
+
+ tp->timer_counter = tp->timer_multiplier;
+ }
+
+ /* Heartbeat is only sent once every 2 seconds.
+ *
+ * The heartbeat is to tell the ASF firmware that the host
+ * driver is still alive. In the event that the OS crashes,
+ * ASF needs to reset the hardware to free up the FIFO space
+ * that may be filled with rx packets destined for the host.
+ * If the FIFO is full, ASF will no longer function properly.
+ *
+ * Unintended resets have been reported on real time kernels
+ * where the timer doesn't run on time. Netpoll will also have
+ * same problem.
+ *
+ * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
+ * to check the ring condition when the heartbeat is expiring
+ * before doing the reset. This will prevent most unintended
+ * resets.
+ */
+ if (!--tp->asf_counter) {
+ if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
+ tg3_wait_for_event_ack(tp);
+
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
+ FWCMD_NICDRV_ALIVE3);
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
+ TG3_FW_UPDATE_TIMEOUT_SEC);
+
+ tg3_generate_fw_event(tp);
+ }
+ tp->asf_counter = tp->asf_multiplier;
+ }
+
+ spin_unlock(&tp->lock);
+
+restart_timer:
+ tp->timer.expires = jiffies + tp->timer_offset;
+ add_timer(&tp->timer);
+}
+
+static int tg3_request_irq(struct tg3 *tp, int irq_num)
+{
+ irq_handler_t fn;
+ unsigned long flags;
+ char *name;
+ struct tg3_napi *tnapi = &tp->napi[irq_num];
+
+ if (tp->irq_cnt == 1)
+ name = tp->dev->name;
+ else {
+ name = &tnapi->irq_lbl[0];
+ snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
+ name[IFNAMSIZ-1] = 0;
+ }
+
+ if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
+ fn = tg3_msi;
+ if (tg3_flag(tp, 1SHOT_MSI))
+ fn = tg3_msi_1shot;
+ flags = 0;
+ } else {
+ fn = tg3_interrupt;
+ if (tg3_flag(tp, TAGGED_STATUS))
+ fn = tg3_interrupt_tagged;
+ flags = IRQF_SHARED;
+ }
+
+ return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
+}
+
+static int tg3_test_interrupt(struct tg3 *tp)
+{
+ struct tg3_napi *tnapi = &tp->napi[0];
+ struct net_device *dev = tp->dev;
+ int err, i, intr_ok = 0;
+ u32 val;
+
+ if (!netif_running(dev))
+ return -ENODEV;
+
+ tg3_disable_ints(tp);
+
+ free_irq(tnapi->irq_vec, tnapi);
+
+ /*
+ * Turn off MSI one shot mode. Otherwise this test has no
+ * observable way to know whether the interrupt was delivered.
+ */
+ if (tg3_flag(tp, 57765_PLUS)) {
+ val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
+ tw32(MSGINT_MODE, val);
+ }
+
+ err = request_irq(tnapi->irq_vec, tg3_test_isr,
+ IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
+ if (err)
+ return err;
+
+ tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
+ tg3_enable_ints(tp);
+
+ tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
+ tnapi->coal_now);
+
+ for (i = 0; i < 5; i++) {
+ u32 int_mbox, misc_host_ctrl;
+
+ int_mbox = tr32_mailbox(tnapi->int_mbox);
+ misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
+
+ if ((int_mbox != 0) ||
+ (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
+ intr_ok = 1;
+ break;
+ }
+
+ if (tg3_flag(tp, 57765_PLUS) &&
+ tnapi->hw_status->status_tag != tnapi->last_tag)
+ tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
+
+ msleep(10);
+ }
+
+ tg3_disable_ints(tp);
+
+ free_irq(tnapi->irq_vec, tnapi);
+
+ err = tg3_request_irq(tp, 0);
+
+ if (err)
+ return err;
+
+ if (intr_ok) {
+ /* Reenable MSI one shot mode. */
+ if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
+ val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
+ tw32(MSGINT_MODE, val);
+ }
+ return 0;
+ }
+
+ return -EIO;
+}
+
+/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
+ * successfully restored
+ */
+static int tg3_test_msi(struct tg3 *tp)
+{
+ int err;
+ u16 pci_cmd;
+
+ if (!tg3_flag(tp, USING_MSI))
+ return 0;
+
+ /* Turn off SERR reporting in case MSI terminates with Master
+ * Abort.
+ */
+ pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
+ pci_write_config_word(tp->pdev, PCI_COMMAND,
+ pci_cmd & ~PCI_COMMAND_SERR);
+
+ err = tg3_test_interrupt(tp);
+
+ pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
+
+ if (!err)
+ return 0;
+
+ /* other failures */
+ if (err != -EIO)
+ return err;
+
+ /* MSI test failed, go back to INTx mode */
+ netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
+ "to INTx mode. Please report this failure to the PCI "
+ "maintainer and include system chipset information\n");
+
+ free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
+
+ pci_disable_msi(tp->pdev);
+
+ tg3_flag_clear(tp, USING_MSI);
+ tp->napi[0].irq_vec = tp->pdev->irq;
+
+ err = tg3_request_irq(tp, 0);
+ if (err)
+ return err;
+
+ /* Need to reset the chip because the MSI cycle may have terminated
+ * with Master Abort.
+ */
+ tg3_full_lock(tp, 1);
+
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+ err = tg3_init_hw(tp, 1);
+
+ tg3_full_unlock(tp);
+
+ if (err)
+ free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
+
+ return err;
+}
+
+static int tg3_request_firmware(struct tg3 *tp)
+{
+ const __be32 *fw_data;
+
+ if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
+ netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
+ tp->fw_needed);
+ return -ENOENT;
+ }
+
+ fw_data = (void *)tp->fw->data;
+
+ /* Firmware blob starts with version numbers, followed by
+ * start address and _full_ length including BSS sections
+ * (which must be longer than the actual data, of course
+ */
+
+ tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
+ if (tp->fw_len < (tp->fw->size - 12)) {
+ netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
+ tp->fw_len, tp->fw_needed);
+ release_firmware(tp->fw);
+ tp->fw = NULL;
+ return -EINVAL;
+ }
+
+ /* We no longer need firmware; we have it. */
+ tp->fw_needed = NULL;
+ return 0;
+}
+
+static bool tg3_enable_msix(struct tg3 *tp)
+{
+ int i, rc, cpus = num_online_cpus();
+ struct msix_entry msix_ent[tp->irq_max];
+
+ if (cpus == 1)
+ /* Just fallback to the simpler MSI mode. */
+ return false;
+
+ /*
+ * We want as many rx rings enabled as there are cpus.
+ * The first MSIX vector only deals with link interrupts, etc,
+ * so we add one to the number of vectors we are requesting.
+ */
+ tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
+
+ for (i = 0; i < tp->irq_max; i++) {
+ msix_ent[i].entry = i;
+ msix_ent[i].vector = 0;
+ }
+
+ rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
+ if (rc < 0) {
+ return false;
+ } else if (rc != 0) {
+ if (pci_enable_msix(tp->pdev, msix_ent, rc))
+ return false;
+ netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
+ tp->irq_cnt, rc);
+ tp->irq_cnt = rc;
+ }
+
+ for (i = 0; i < tp->irq_max; i++)
+ tp->napi[i].irq_vec = msix_ent[i].vector;
+
+ netif_set_real_num_tx_queues(tp->dev, 1);
+ rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
+ if (netif_set_real_num_rx_queues(tp->dev, rc)) {
+ pci_disable_msix(tp->pdev);
+ return false;
+ }
+
+ if (tp->irq_cnt > 1) {
+ tg3_flag_set(tp, ENABLE_RSS);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ tg3_flag_set(tp, ENABLE_TSS);
+ netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
+ }
+ }
+
+ return true;
+}
+
+static void tg3_ints_init(struct tg3 *tp)
+{
+ if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
+ !tg3_flag(tp, TAGGED_STATUS)) {
+ /* All MSI supporting chips should support tagged
+ * status. Assert that this is the case.
+ */
+ netdev_warn(tp->dev,
+ "MSI without TAGGED_STATUS? Not using MSI\n");
+ goto defcfg;
+ }
+
+ if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
+ tg3_flag_set(tp, USING_MSIX);
+ else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
+ tg3_flag_set(tp, USING_MSI);
+
+ if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
+ u32 msi_mode = tr32(MSGINT_MODE);
+ if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
+ msi_mode |= MSGINT_MODE_MULTIVEC_EN;
+ if (!tg3_flag(tp, 1SHOT_MSI))
+ msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
+ tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
+ }
+defcfg:
+ if (!tg3_flag(tp, USING_MSIX)) {
+ tp->irq_cnt = 1;
+ tp->napi[0].irq_vec = tp->pdev->irq;
+ netif_set_real_num_tx_queues(tp->dev, 1);
+ netif_set_real_num_rx_queues(tp->dev, 1);
+ }
+}
+
+static void tg3_ints_fini(struct tg3 *tp)
+{
+ if (tg3_flag(tp, USING_MSIX))
+ pci_disable_msix(tp->pdev);
+ else if (tg3_flag(tp, USING_MSI))
+ pci_disable_msi(tp->pdev);
+ tg3_flag_clear(tp, USING_MSI);
+ tg3_flag_clear(tp, USING_MSIX);
+ tg3_flag_clear(tp, ENABLE_RSS);
+ tg3_flag_clear(tp, ENABLE_TSS);
+}
+
+static int tg3_open(struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ int i, err;
+
+ if (tp->fw_needed) {
+ err = tg3_request_firmware(tp);
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
+ if (err)
+ return err;
+ } else if (err) {
+ netdev_warn(tp->dev, "TSO capability disabled\n");
+ tg3_flag_clear(tp, TSO_CAPABLE);
+ } else if (!tg3_flag(tp, TSO_CAPABLE)) {
+ netdev_notice(tp->dev, "TSO capability restored\n");
+ tg3_flag_set(tp, TSO_CAPABLE);
+ }
+ }
+
+ netif_carrier_off(tp->dev);
+
+ err = tg3_power_up(tp);
+ if (err)
+ return err;
+
+ tg3_full_lock(tp, 0);
+
+ tg3_disable_ints(tp);
+ tg3_flag_clear(tp, INIT_COMPLETE);
+
+ tg3_full_unlock(tp);
+
+ /*
+ * Setup interrupts first so we know how
+ * many NAPI resources to allocate
+ */
+ tg3_ints_init(tp);
+
+ /* The placement of this call is tied
+ * to the setup and use of Host TX descriptors.
+ */
+ err = tg3_alloc_consistent(tp);
+ if (err)
+ goto err_out1;
+
+ tg3_napi_init(tp);
+
+ tg3_napi_enable(tp);
+
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+ err = tg3_request_irq(tp, i);
+ if (err) {
+ for (i--; i >= 0; i--)
+ free_irq(tnapi->irq_vec, tnapi);
+ break;
+ }
+ }
+
+ if (err)
+ goto err_out2;
+
+ tg3_full_lock(tp, 0);
+
+ err = tg3_init_hw(tp, 1);
+ if (err) {
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+ tg3_free_rings(tp);
+ } else {
+ if (tg3_flag(tp, TAGGED_STATUS) &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
+ tp->timer_offset = HZ;
+ else
+ tp->timer_offset = HZ / 10;
+
+ BUG_ON(tp->timer_offset > HZ);
+ tp->timer_counter = tp->timer_multiplier =
+ (HZ / tp->timer_offset);
+ tp->asf_counter = tp->asf_multiplier =
+ ((HZ / tp->timer_offset) * 2);
+
+ init_timer(&tp->timer);
+ tp->timer.expires = jiffies + tp->timer_offset;
+ tp->timer.data = (unsigned long) tp;
+ tp->timer.function = tg3_timer;
+ }
+
+ tg3_full_unlock(tp);
+
+ if (err)
+ goto err_out3;
+
+ if (tg3_flag(tp, USING_MSI)) {
+ err = tg3_test_msi(tp);
+
+ if (err) {
+ tg3_full_lock(tp, 0);
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+ tg3_free_rings(tp);
+ tg3_full_unlock(tp);
+
+ goto err_out2;
+ }
+
+ if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
+ u32 val = tr32(PCIE_TRANSACTION_CFG);
+
+ tw32(PCIE_TRANSACTION_CFG,
+ val | PCIE_TRANS_CFG_1SHOT_MSI);
+ }
+ }
+
+ tg3_phy_start(tp);
+
+ tg3_full_lock(tp, 0);
+
+ add_timer(&tp->timer);
+ tg3_flag_set(tp, INIT_COMPLETE);
+ tg3_enable_ints(tp);
+
+ tg3_full_unlock(tp);
+
+ netif_tx_start_all_queues(dev);
+
+ /*
+ * Reset loopback feature if it was turned on while the device was down
+ * make sure that it's installed properly now.
+ */
+ if (dev->features & NETIF_F_LOOPBACK)
+ tg3_set_loopback(dev, dev->features);
+
+ return 0;
+
+err_out3:
+ for (i = tp->irq_cnt - 1; i >= 0; i--) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+ free_irq(tnapi->irq_vec, tnapi);
+ }
+
+err_out2:
+ tg3_napi_disable(tp);
+ tg3_napi_fini(tp);
+ tg3_free_consistent(tp);
+
+err_out1:
+ tg3_ints_fini(tp);
+ tg3_frob_aux_power(tp, false);
+ pci_set_power_state(tp->pdev, PCI_D3hot);
+ return err;
+}
+
+static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
+ struct rtnl_link_stats64 *);
+static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
+
+static int tg3_close(struct net_device *dev)
+{
+ int i;
+ struct tg3 *tp = netdev_priv(dev);
+
+ tg3_napi_disable(tp);
+ cancel_work_sync(&tp->reset_task);
+
+ netif_tx_stop_all_queues(dev);
+
+ del_timer_sync(&tp->timer);
+
+ tg3_phy_stop(tp);
+
+ tg3_full_lock(tp, 1);
+
+ tg3_disable_ints(tp);
+
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+ tg3_free_rings(tp);
+ tg3_flag_clear(tp, INIT_COMPLETE);
+
+ tg3_full_unlock(tp);
+
+ for (i = tp->irq_cnt - 1; i >= 0; i--) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+ free_irq(tnapi->irq_vec, tnapi);
+ }
+
+ tg3_ints_fini(tp);
+
+ tg3_get_stats64(tp->dev, &tp->net_stats_prev);
+
+ memcpy(&tp->estats_prev, tg3_get_estats(tp),
+ sizeof(tp->estats_prev));
+
+ tg3_napi_fini(tp);
+
+ tg3_free_consistent(tp);
+
+ tg3_power_down(tp);
+
+ netif_carrier_off(tp->dev);
+
+ return 0;
+}
+
+static inline u64 get_stat64(tg3_stat64_t *val)
+{
+ return ((u64)val->high << 32) | ((u64)val->low);
+}
+
+static u64 calc_crc_errors(struct tg3 *tp)
+{
+ struct tg3_hw_stats *hw_stats = tp->hw_stats;
+
+ if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
+ u32 val;
+
+ spin_lock_bh(&tp->lock);
+ if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
+ tg3_writephy(tp, MII_TG3_TEST1,
+ val | MII_TG3_TEST1_CRC_EN);
+ tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
+ } else
+ val = 0;
+ spin_unlock_bh(&tp->lock);
+
+ tp->phy_crc_errors += val;
+
+ return tp->phy_crc_errors;
+ }
+
+ return get_stat64(&hw_stats->rx_fcs_errors);
+}
+
+#define ESTAT_ADD(member) \
+ estats->member = old_estats->member + \
+ get_stat64(&hw_stats->member)
+
+static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
+{
+ struct tg3_ethtool_stats *estats = &tp->estats;
+ struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
+ struct tg3_hw_stats *hw_stats = tp->hw_stats;
+
+ if (!hw_stats)
+ return old_estats;
+
+ ESTAT_ADD(rx_octets);
+ ESTAT_ADD(rx_fragments);
+ ESTAT_ADD(rx_ucast_packets);
+ ESTAT_ADD(rx_mcast_packets);
+ ESTAT_ADD(rx_bcast_packets);
+ ESTAT_ADD(rx_fcs_errors);
+ ESTAT_ADD(rx_align_errors);
+ ESTAT_ADD(rx_xon_pause_rcvd);
+ ESTAT_ADD(rx_xoff_pause_rcvd);
+ ESTAT_ADD(rx_mac_ctrl_rcvd);
+ ESTAT_ADD(rx_xoff_entered);
+ ESTAT_ADD(rx_frame_too_long_errors);
+ ESTAT_ADD(rx_jabbers);
+ ESTAT_ADD(rx_undersize_packets);
+ ESTAT_ADD(rx_in_length_errors);
+ ESTAT_ADD(rx_out_length_errors);
+ ESTAT_ADD(rx_64_or_less_octet_packets);
+ ESTAT_ADD(rx_65_to_127_octet_packets);
+ ESTAT_ADD(rx_128_to_255_octet_packets);
+ ESTAT_ADD(rx_256_to_511_octet_packets);
+ ESTAT_ADD(rx_512_to_1023_octet_packets);
+ ESTAT_ADD(rx_1024_to_1522_octet_packets);
+ ESTAT_ADD(rx_1523_to_2047_octet_packets);
+ ESTAT_ADD(rx_2048_to_4095_octet_packets);
+ ESTAT_ADD(rx_4096_to_8191_octet_packets);
+ ESTAT_ADD(rx_8192_to_9022_octet_packets);
+
+ ESTAT_ADD(tx_octets);
+ ESTAT_ADD(tx_collisions);
+ ESTAT_ADD(tx_xon_sent);
+ ESTAT_ADD(tx_xoff_sent);
+ ESTAT_ADD(tx_flow_control);
+ ESTAT_ADD(tx_mac_errors);
+ ESTAT_ADD(tx_single_collisions);
+ ESTAT_ADD(tx_mult_collisions);
+ ESTAT_ADD(tx_deferred);
+ ESTAT_ADD(tx_excessive_collisions);
+ ESTAT_ADD(tx_late_collisions);
+ ESTAT_ADD(tx_collide_2times);
+ ESTAT_ADD(tx_collide_3times);
+ ESTAT_ADD(tx_collide_4times);
+ ESTAT_ADD(tx_collide_5times);
+ ESTAT_ADD(tx_collide_6times);
+ ESTAT_ADD(tx_collide_7times);
+ ESTAT_ADD(tx_collide_8times);
+ ESTAT_ADD(tx_collide_9times);
+ ESTAT_ADD(tx_collide_10times);
+ ESTAT_ADD(tx_collide_11times);
+ ESTAT_ADD(tx_collide_12times);
+ ESTAT_ADD(tx_collide_13times);
+ ESTAT_ADD(tx_collide_14times);
+ ESTAT_ADD(tx_collide_15times);
+ ESTAT_ADD(tx_ucast_packets);
+ ESTAT_ADD(tx_mcast_packets);
+ ESTAT_ADD(tx_bcast_packets);
+ ESTAT_ADD(tx_carrier_sense_errors);
+ ESTAT_ADD(tx_discards);
+ ESTAT_ADD(tx_errors);
+
+ ESTAT_ADD(dma_writeq_full);
+ ESTAT_ADD(dma_write_prioq_full);
+ ESTAT_ADD(rxbds_empty);
+ ESTAT_ADD(rx_discards);
+ ESTAT_ADD(rx_errors);
+ ESTAT_ADD(rx_threshold_hit);
+
+ ESTAT_ADD(dma_readq_full);
+ ESTAT_ADD(dma_read_prioq_full);
+ ESTAT_ADD(tx_comp_queue_full);
+
+ ESTAT_ADD(ring_set_send_prod_index);
+ ESTAT_ADD(ring_status_update);
+ ESTAT_ADD(nic_irqs);
+ ESTAT_ADD(nic_avoided_irqs);
+ ESTAT_ADD(nic_tx_threshold_hit);
+
+ ESTAT_ADD(mbuf_lwm_thresh_hit);
+
+ return estats;
+}
+
+static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
+ struct tg3_hw_stats *hw_stats = tp->hw_stats;
+
+ if (!hw_stats)
+ return old_stats;
+
+ stats->rx_packets = old_stats->rx_packets +
+ get_stat64(&hw_stats->rx_ucast_packets) +
+ get_stat64(&hw_stats->rx_mcast_packets) +
+ get_stat64(&hw_stats->rx_bcast_packets);
+
+ stats->tx_packets = old_stats->tx_packets +
+ get_stat64(&hw_stats->tx_ucast_packets) +
+ get_stat64(&hw_stats->tx_mcast_packets) +
+ get_stat64(&hw_stats->tx_bcast_packets);
+
+ stats->rx_bytes = old_stats->rx_bytes +
+ get_stat64(&hw_stats->rx_octets);
+ stats->tx_bytes = old_stats->tx_bytes +
+ get_stat64(&hw_stats->tx_octets);
+
+ stats->rx_errors = old_stats->rx_errors +
+ get_stat64(&hw_stats->rx_errors);
+ stats->tx_errors = old_stats->tx_errors +
+ get_stat64(&hw_stats->tx_errors) +
+ get_stat64(&hw_stats->tx_mac_errors) +
+ get_stat64(&hw_stats->tx_carrier_sense_errors) +
+ get_stat64(&hw_stats->tx_discards);
+
+ stats->multicast = old_stats->multicast +
+ get_stat64(&hw_stats->rx_mcast_packets);
+ stats->collisions = old_stats->collisions +
+ get_stat64(&hw_stats->tx_collisions);
+
+ stats->rx_length_errors = old_stats->rx_length_errors +
+ get_stat64(&hw_stats->rx_frame_too_long_errors) +
+ get_stat64(&hw_stats->rx_undersize_packets);
+
+ stats->rx_over_errors = old_stats->rx_over_errors +
+ get_stat64(&hw_stats->rxbds_empty);
+ stats->rx_frame_errors = old_stats->rx_frame_errors +
+ get_stat64(&hw_stats->rx_align_errors);
+ stats->tx_aborted_errors = old_stats->tx_aborted_errors +
+ get_stat64(&hw_stats->tx_discards);
+ stats->tx_carrier_errors = old_stats->tx_carrier_errors +
+ get_stat64(&hw_stats->tx_carrier_sense_errors);
+
+ stats->rx_crc_errors = old_stats->rx_crc_errors +
+ calc_crc_errors(tp);
+
+ stats->rx_missed_errors = old_stats->rx_missed_errors +
+ get_stat64(&hw_stats->rx_discards);
+
+ stats->rx_dropped = tp->rx_dropped;
+
+ return stats;
+}
+
+static inline u32 calc_crc(unsigned char *buf, int len)
+{
+ u32 reg;
+ u32 tmp;
+ int j, k;
+
+ reg = 0xffffffff;
+
+ for (j = 0; j < len; j++) {
+ reg ^= buf[j];
+
+ for (k = 0; k < 8; k++) {
+ tmp = reg & 0x01;
+
+ reg >>= 1;
+
+ if (tmp)
+ reg ^= 0xedb88320;
+ }
+ }
+
+ return ~reg;
+}
+
+static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
+{
+ /* accept or reject all multicast frames */
+ tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
+ tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
+ tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
+ tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
+}
+
+static void __tg3_set_rx_mode(struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ u32 rx_mode;
+
+ rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
+ RX_MODE_KEEP_VLAN_TAG);
+
+#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
+ /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
+ * flag clear.
+ */
+ if (!tg3_flag(tp, ENABLE_ASF))
+ rx_mode |= RX_MODE_KEEP_VLAN_TAG;
+#endif
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Promiscuous mode. */
+ rx_mode |= RX_MODE_PROMISC;
+ } else if (dev->flags & IFF_ALLMULTI) {
+ /* Accept all multicast. */
+ tg3_set_multi(tp, 1);
+ } else if (netdev_mc_empty(dev)) {
+ /* Reject all multicast. */
+ tg3_set_multi(tp, 0);
+ } else {
+ /* Accept one or more multicast(s). */
+ struct netdev_hw_addr *ha;
+ u32 mc_filter[4] = { 0, };
+ u32 regidx;
+ u32 bit;
+ u32 crc;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = calc_crc(ha->addr, ETH_ALEN);
+ bit = ~crc & 0x7f;
+ regidx = (bit & 0x60) >> 5;
+ bit &= 0x1f;
+ mc_filter[regidx] |= (1 << bit);
+ }
+
+ tw32(MAC_HASH_REG_0, mc_filter[0]);
+ tw32(MAC_HASH_REG_1, mc_filter[1]);
+ tw32(MAC_HASH_REG_2, mc_filter[2]);
+ tw32(MAC_HASH_REG_3, mc_filter[3]);
+ }
+
+ if (rx_mode != tp->rx_mode) {
+ tp->rx_mode = rx_mode;
+ tw32_f(MAC_RX_MODE, rx_mode);
+ udelay(10);
+ }
+}
+
+static void tg3_set_rx_mode(struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return;
+
+ tg3_full_lock(tp, 0);
+ __tg3_set_rx_mode(dev);
+ tg3_full_unlock(tp);
+}
+
+static int tg3_get_regs_len(struct net_device *dev)
+{
+ return TG3_REG_BLK_SIZE;
+}
+
+static void tg3_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *_p)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ regs->version = 0;
+
+ memset(_p, 0, TG3_REG_BLK_SIZE);
+
+ if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+ return;
+
+ tg3_full_lock(tp, 0);
+
+ tg3_dump_legacy_regs(tp, (u32 *)_p);
+
+ tg3_full_unlock(tp);
+}
+
+static int tg3_get_eeprom_len(struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ return tp->nvram_size;
+}
+
+static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ int ret;
+ u8 *pd;
+ u32 i, offset, len, b_offset, b_count;
+ __be32 val;
+
+ if (tg3_flag(tp, NO_NVRAM))
+ return -EINVAL;
+
+ if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+ return -EAGAIN;
+
+ offset = eeprom->offset;
+ len = eeprom->len;
+ eeprom->len = 0;
+
+ eeprom->magic = TG3_EEPROM_MAGIC;
+
+ if (offset & 3) {
+ /* adjustments to start on required 4 byte boundary */
+ b_offset = offset & 3;
+ b_count = 4 - b_offset;
+ if (b_count > len) {
+ /* i.e. offset=1 len=2 */
+ b_count = len;
+ }
+ ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
+ if (ret)
+ return ret;
+ memcpy(data, ((char *)&val) + b_offset, b_count);
+ len -= b_count;
+ offset += b_count;
+ eeprom->len += b_count;
+ }
+
+ /* read bytes up to the last 4 byte boundary */
+ pd = &data[eeprom->len];
+ for (i = 0; i < (len - (len & 3)); i += 4) {
+ ret = tg3_nvram_read_be32(tp, offset + i, &val);
+ if (ret) {
+ eeprom->len += i;
+ return ret;
+ }
+ memcpy(pd + i, &val, 4);
+ }
+ eeprom->len += i;
+
+ if (len & 3) {
+ /* read last bytes not ending on 4 byte boundary */
+ pd = &data[eeprom->len];
+ b_count = len & 3;
+ b_offset = offset + len - b_count;
+ ret = tg3_nvram_read_be32(tp, b_offset, &val);
+ if (ret)
+ return ret;
+ memcpy(pd, &val, b_count);
+ eeprom->len += b_count;
+ }
+ return 0;
+}
+
+static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
+
+static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ int ret;
+ u32 offset, len, b_offset, odd_len;
+ u8 *buf;
+ __be32 start, end;
+
+ if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+ return -EAGAIN;
+
+ if (tg3_flag(tp, NO_NVRAM) ||
+ eeprom->magic != TG3_EEPROM_MAGIC)
+ return -EINVAL;
+
+ offset = eeprom->offset;
+ len = eeprom->len;
+
+ if ((b_offset = (offset & 3))) {
+ /* adjustments to start on required 4 byte boundary */
+ ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
+ if (ret)
+ return ret;
+ len += b_offset;
+ offset &= ~3;
+ if (len < 4)
+ len = 4;
+ }
+
+ odd_len = 0;
+ if (len & 3) {
+ /* adjustments to end on required 4 byte boundary */
+ odd_len = 1;
+ len = (len + 3) & ~3;
+ ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
+ if (ret)
+ return ret;
+ }
+
+ buf = data;
+ if (b_offset || odd_len) {
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ if (b_offset)
+ memcpy(buf, &start, 4);
+ if (odd_len)
+ memcpy(buf+len-4, &end, 4);
+ memcpy(buf + b_offset, data, eeprom->len);
+ }
+
+ ret = tg3_nvram_write_block(tp, offset, len, buf);
+
+ if (buf != data)
+ kfree(buf);
+
+ return ret;
+}
+
+static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (tg3_flag(tp, USE_PHYLIB)) {
+ struct phy_device *phydev;
+ if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
+ return -EAGAIN;
+ phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ return phy_ethtool_gset(phydev, cmd);
+ }
+
+ cmd->supported = (SUPPORTED_Autoneg);
+
+ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
+ cmd->supported |= (SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full);
+
+ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
+ cmd->supported |= (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_TP);
+ cmd->port = PORT_TP;
+ } else {
+ cmd->supported |= SUPPORTED_FIBRE;
+ cmd->port = PORT_FIBRE;
+ }
+
+ cmd->advertising = tp->link_config.advertising;
+ if (tg3_flag(tp, PAUSE_AUTONEG)) {
+ if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
+ if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
+ cmd->advertising |= ADVERTISED_Pause;
+ } else {
+ cmd->advertising |= ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause;
+ }
+ } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
+ cmd->advertising |= ADVERTISED_Asym_Pause;
+ }
+ }
+ if (netif_running(dev)) {
+ ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
+ cmd->duplex = tp->link_config.active_duplex;
+ } else {
+ ethtool_cmd_speed_set(cmd, SPEED_INVALID);
+ cmd->duplex = DUPLEX_INVALID;
+ }
+ cmd->phy_address = tp->phy_addr;
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->autoneg = tp->link_config.autoneg;
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 0;
+ return 0;
+}
+
+static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ u32 speed = ethtool_cmd_speed(cmd);
+
+ if (tg3_flag(tp, USE_PHYLIB)) {
+ struct phy_device *phydev;
+ if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
+ return -EAGAIN;
+ phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ return phy_ethtool_sset(phydev, cmd);
+ }
+
+ if (cmd->autoneg != AUTONEG_ENABLE &&
+ cmd->autoneg != AUTONEG_DISABLE)
+ return -EINVAL;
+
+ if (cmd->autoneg == AUTONEG_DISABLE &&
+ cmd->duplex != DUPLEX_FULL &&
+ cmd->duplex != DUPLEX_HALF)
+ return -EINVAL;
+
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ u32 mask = ADVERTISED_Autoneg |
+ ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause;
+
+ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
+ mask |= ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full;
+
+ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
+ mask |= ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_TP;
+ else
+ mask |= ADVERTISED_FIBRE;
+
+ if (cmd->advertising & ~mask)
+ return -EINVAL;
+
+ mask &= (ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full);
+
+ cmd->advertising &= mask;
+ } else {
+ if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
+ if (speed != SPEED_1000)
+ return -EINVAL;
+
+ if (cmd->duplex != DUPLEX_FULL)
+ return -EINVAL;
+ } else {
+ if (speed != SPEED_100 &&
+ speed != SPEED_10)
+ return -EINVAL;
+ }
+ }
+
+ tg3_full_lock(tp, 0);
+
+ tp->link_config.autoneg = cmd->autoneg;
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ tp->link_config.advertising = (cmd->advertising |
+ ADVERTISED_Autoneg);
+ tp->link_config.speed = SPEED_INVALID;
+ tp->link_config.duplex = DUPLEX_INVALID;
+ } else {
+ tp->link_config.advertising = 0;
+ tp->link_config.speed = speed;
+ tp->link_config.duplex = cmd->duplex;
+ }
+
+ tp->link_config.orig_speed = tp->link_config.speed;
+ tp->link_config.orig_duplex = tp->link_config.duplex;
+ tp->link_config.orig_autoneg = tp->link_config.autoneg;
+
+ if (netif_running(dev))
+ tg3_setup_phy(tp, 1);
+
+ tg3_full_unlock(tp);
+
+ return 0;
+}
+
+static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ strcpy(info->driver, DRV_MODULE_NAME);
+ strcpy(info->version, DRV_MODULE_VERSION);
+ strcpy(info->fw_version, tp->fw_ver);
+ strcpy(info->bus_info, pci_name(tp->pdev));
+}
+
+static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
+ wol->supported = WAKE_MAGIC;
+ else
+ wol->supported = 0;
+ wol->wolopts = 0;
+ if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
+ wol->wolopts = WAKE_MAGIC;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ struct device *dp = &tp->pdev->dev;
+
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EINVAL;
+ if ((wol->wolopts & WAKE_MAGIC) &&
+ !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
+ return -EINVAL;
+
+ device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
+
+ spin_lock_bh(&tp->lock);
+ if (device_may_wakeup(dp))
+ tg3_flag_set(tp, WOL_ENABLE);
+ else
+ tg3_flag_clear(tp, WOL_ENABLE);
+ spin_unlock_bh(&tp->lock);
+
+ return 0;
+}
+
+static u32 tg3_get_msglevel(struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ return tp->msg_enable;
+}
+
+static void tg3_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ tp->msg_enable = value;
+}
+
+static int tg3_nway_reset(struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ int r;
+
+ if (!netif_running(dev))
+ return -EAGAIN;
+
+ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
+ return -EINVAL;
+
+ if (tg3_flag(tp, USE_PHYLIB)) {
+ if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
+ return -EAGAIN;
+ r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+ } else {
+ u32 bmcr;
+
+ spin_lock_bh(&tp->lock);
+ r = -EINVAL;
+ tg3_readphy(tp, MII_BMCR, &bmcr);
+ if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
+ ((bmcr & BMCR_ANENABLE) ||
+ (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
+ tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
+ BMCR_ANENABLE);
+ r = 0;
+ }
+ spin_unlock_bh(&tp->lock);
+ }
+
+ return r;
+}
+
+static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ ering->rx_max_pending = tp->rx_std_ring_mask;
+ ering->rx_mini_max_pending = 0;
+ if (tg3_flag(tp, JUMBO_RING_ENABLE))
+ ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
+ else
+ ering->rx_jumbo_max_pending = 0;
+
+ ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
+
+ ering->rx_pending = tp->rx_pending;
+ ering->rx_mini_pending = 0;
+ if (tg3_flag(tp, JUMBO_RING_ENABLE))
+ ering->rx_jumbo_pending = tp->rx_jumbo_pending;
+ else
+ ering->rx_jumbo_pending = 0;
+
+ ering->tx_pending = tp->napi[0].tx_pending;
+}
+
+static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ int i, irq_sync = 0, err = 0;
+
+ if ((ering->rx_pending > tp->rx_std_ring_mask) ||
+ (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
+ (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
+ (ering->tx_pending <= MAX_SKB_FRAGS) ||
+ (tg3_flag(tp, TSO_BUG) &&
+ (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
+ return -EINVAL;
+
+ if (netif_running(dev)) {
+ tg3_phy_stop(tp);
+ tg3_netif_stop(tp);
+ irq_sync = 1;
+ }
+
+ tg3_full_lock(tp, irq_sync);
+
+ tp->rx_pending = ering->rx_pending;
+
+ if (tg3_flag(tp, MAX_RXPEND_64) &&
+ tp->rx_pending > 63)
+ tp->rx_pending = 63;
+ tp->rx_jumbo_pending = ering->rx_jumbo_pending;
+
+ for (i = 0; i < tp->irq_max; i++)
+ tp->napi[i].tx_pending = ering->tx_pending;
+
+ if (netif_running(dev)) {
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+ err = tg3_restart_hw(tp, 1);
+ if (!err)
+ tg3_netif_start(tp);
+ }
+
+ tg3_full_unlock(tp);
+
+ if (irq_sync && !err)
+ tg3_phy_start(tp);
+
+ return err;
+}
+
+static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
+
+ if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
+ epause->rx_pause = 1;
+ else
+ epause->rx_pause = 0;
+
+ if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
+ epause->tx_pause = 1;
+ else
+ epause->tx_pause = 0;
+}
+
+static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ int err = 0;
+
+ if (tg3_flag(tp, USE_PHYLIB)) {
+ u32 newadv;
+ struct phy_device *phydev;
+
+ phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+
+ if (!(phydev->supported & SUPPORTED_Pause) ||
+ (!(phydev->supported & SUPPORTED_Asym_Pause) &&
+ (epause->rx_pause != epause->tx_pause)))
+ return -EINVAL;
+
+ tp->link_config.flowctrl = 0;
+ if (epause->rx_pause) {
+ tp->link_config.flowctrl |= FLOW_CTRL_RX;
+
+ if (epause->tx_pause) {
+ tp->link_config.flowctrl |= FLOW_CTRL_TX;
+ newadv = ADVERTISED_Pause;
+ } else
+ newadv = ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause;
+ } else if (epause->tx_pause) {
+ tp->link_config.flowctrl |= FLOW_CTRL_TX;
+ newadv = ADVERTISED_Asym_Pause;
+ } else
+ newadv = 0;
+
+ if (epause->autoneg)
+ tg3_flag_set(tp, PAUSE_AUTONEG);
+ else
+ tg3_flag_clear(tp, PAUSE_AUTONEG);
+
+ if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
+ u32 oldadv = phydev->advertising &
+ (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+ if (oldadv != newadv) {
+ phydev->advertising &=
+ ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+ phydev->advertising |= newadv;
+ if (phydev->autoneg) {
+ /*
+ * Always renegotiate the link to
+ * inform our link partner of our
+ * flow control settings, even if the
+ * flow control is forced. Let
+ * tg3_adjust_link() do the final
+ * flow control setup.
+ */
+ return phy_start_aneg(phydev);
+ }
+ }
+
+ if (!epause->autoneg)
+ tg3_setup_flow_control(tp, 0, 0);
+ } else {
+ tp->link_config.orig_advertising &=
+ ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+ tp->link_config.orig_advertising |= newadv;
+ }
+ } else {
+ int irq_sync = 0;
+
+ if (netif_running(dev)) {
+ tg3_netif_stop(tp);
+ irq_sync = 1;
+ }
+
+ tg3_full_lock(tp, irq_sync);
+
+ if (epause->autoneg)
+ tg3_flag_set(tp, PAUSE_AUTONEG);
+ else
+ tg3_flag_clear(tp, PAUSE_AUTONEG);
+ if (epause->rx_pause)
+ tp->link_config.flowctrl |= FLOW_CTRL_RX;
+ else
+ tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
+ if (epause->tx_pause)
+ tp->link_config.flowctrl |= FLOW_CTRL_TX;
+ else
+ tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
+
+ if (netif_running(dev)) {
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+ err = tg3_restart_hw(tp, 1);
+ if (!err)
+ tg3_netif_start(tp);
+ }
+
+ tg3_full_unlock(tp);
+ }
+
+ return err;
+}
+
+static int tg3_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return TG3_NUM_TEST;
+ case ETH_SS_STATS:
+ return TG3_NUM_STATS;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
+ break;
+ case ETH_SS_TEST:
+ memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
+ break;
+ default:
+ WARN_ON(1); /* we need a WARN() */
+ break;
+ }
+}
+
+static int tg3_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (!netif_running(tp->dev))
+ return -EAGAIN;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return 1; /* cycle on/off once per second */
+
+ case ETHTOOL_ID_ON:
+ tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
+ LED_CTRL_1000MBPS_ON |
+ LED_CTRL_100MBPS_ON |
+ LED_CTRL_10MBPS_ON |
+ LED_CTRL_TRAFFIC_OVERRIDE |
+ LED_CTRL_TRAFFIC_BLINK |
+ LED_CTRL_TRAFFIC_LED);
+ break;
+
+ case ETHTOOL_ID_OFF:
+ tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
+ LED_CTRL_TRAFFIC_OVERRIDE);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ tw32(MAC_LED_CTRL, tp->led_ctrl);
+ break;
+ }
+
+ return 0;
+}
+
+static void tg3_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *estats, u64 *tmp_stats)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
+}
+
+static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
+{
+ int i;
+ __be32 *buf;
+ u32 offset = 0, len = 0;
+ u32 magic, val;
+
+ if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
+ return NULL;
+
+ if (magic == TG3_EEPROM_MAGIC) {
+ for (offset = TG3_NVM_DIR_START;
+ offset < TG3_NVM_DIR_END;
+ offset += TG3_NVM_DIRENT_SIZE) {
+ if (tg3_nvram_read(tp, offset, &val))
+ return NULL;
+
+ if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
+ TG3_NVM_DIRTYPE_EXTVPD)
+ break;
+ }
+
+ if (offset != TG3_NVM_DIR_END) {
+ len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
+ if (tg3_nvram_read(tp, offset + 4, &offset))
+ return NULL;
+
+ offset = tg3_nvram_logical_addr(tp, offset);
+ }
+ }
+
+ if (!offset || !len) {
+ offset = TG3_NVM_VPD_OFF;
+ len = TG3_NVM_VPD_LEN;
+ }
+
+ buf = kmalloc(len, GFP_KERNEL);
+ if (buf == NULL)
+ return NULL;
+
+ if (magic == TG3_EEPROM_MAGIC) {
+ for (i = 0; i < len; i += 4) {
+ /* The data is in little-endian format in NVRAM.
+ * Use the big-endian read routines to preserve
+ * the byte order as it exists in NVRAM.
+ */
+ if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
+ goto error;
+ }
+ } else {
+ u8 *ptr;
+ ssize_t cnt;
+ unsigned int pos = 0;
+
+ ptr = (u8 *)&buf[0];
+ for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
+ cnt = pci_read_vpd(tp->pdev, pos,
+ len - pos, ptr);
+ if (cnt == -ETIMEDOUT || cnt == -EINTR)
+ cnt = 0;
+ else if (cnt < 0)
+ goto error;
+ }
+ if (pos != len)
+ goto error;
+ }
+
+ *vpdlen = len;
+
+ return buf;
+
+error:
+ kfree(buf);
+ return NULL;
+}
+
+#define NVRAM_TEST_SIZE 0x100
+#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
+#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
+#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
+#define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
+#define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
+#define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
+#define NVRAM_SELFBOOT_HW_SIZE 0x20
+#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
+
+static int tg3_test_nvram(struct tg3 *tp)
+{
+ u32 csum, magic, len;
+ __be32 *buf;
+ int i, j, k, err = 0, size;
+
+ if (tg3_flag(tp, NO_NVRAM))
+ return 0;
+
+ if (tg3_nvram_read(tp, 0, &magic) != 0)
+ return -EIO;
+
+ if (magic == TG3_EEPROM_MAGIC)
+ size = NVRAM_TEST_SIZE;
+ else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
+ if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
+ TG3_EEPROM_SB_FORMAT_1) {
+ switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
+ case TG3_EEPROM_SB_REVISION_0:
+ size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
+ break;
+ case TG3_EEPROM_SB_REVISION_2:
+ size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
+ break;
+ case TG3_EEPROM_SB_REVISION_3:
+ size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
+ break;
+ case TG3_EEPROM_SB_REVISION_4:
+ size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
+ break;
+ case TG3_EEPROM_SB_REVISION_5:
+ size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
+ break;
+ case TG3_EEPROM_SB_REVISION_6:
+ size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
+ break;
+ default:
+ return -EIO;
+ }
+ } else
+ return 0;
+ } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
+ size = NVRAM_SELFBOOT_HW_SIZE;
+ else
+ return -EIO;
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ err = -EIO;
+ for (i = 0, j = 0; i < size; i += 4, j++) {
+ err = tg3_nvram_read_be32(tp, i, &buf[j]);
+ if (err)
+ break;
+ }
+ if (i < size)
+ goto out;
+
+ /* Selfboot format */
+ magic = be32_to_cpu(buf[0]);
+ if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
+ TG3_EEPROM_MAGIC_FW) {
+ u8 *buf8 = (u8 *) buf, csum8 = 0;
+
+ if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
+ TG3_EEPROM_SB_REVISION_2) {
+ /* For rev 2, the csum doesn't include the MBA. */
+ for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
+ csum8 += buf8[i];
+ for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
+ csum8 += buf8[i];
+ } else {
+ for (i = 0; i < size; i++)
+ csum8 += buf8[i];
+ }
+
+ if (csum8 == 0) {
+ err = 0;
+ goto out;
+ }
+
+ err = -EIO;
+ goto out;
+ }
+
+ if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
+ TG3_EEPROM_MAGIC_HW) {
+ u8 data[NVRAM_SELFBOOT_DATA_SIZE];
+ u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
+ u8 *buf8 = (u8 *) buf;
+
+ /* Separate the parity bits and the data bytes. */
+ for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
+ if ((i == 0) || (i == 8)) {
+ int l;
+ u8 msk;
+
+ for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
+ parity[k++] = buf8[i] & msk;
+ i++;
+ } else if (i == 16) {
+ int l;
+ u8 msk;
+
+ for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
+ parity[k++] = buf8[i] & msk;
+ i++;
+
+ for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
+ parity[k++] = buf8[i] & msk;
+ i++;
+ }
+ data[j++] = buf8[i];
+ }
+
+ err = -EIO;
+ for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
+ u8 hw8 = hweight8(data[i]);
+
+ if ((hw8 & 0x1) && parity[i])
+ goto out;
+ else if (!(hw8 & 0x1) && !parity[i])
+ goto out;
+ }
+ err = 0;
+ goto out;
+ }
+
+ err = -EIO;
+
+ /* Bootstrap checksum at offset 0x10 */
+ csum = calc_crc((unsigned char *) buf, 0x10);
+ if (csum != le32_to_cpu(buf[0x10/4]))
+ goto out;
+
+ /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
+ csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
+ if (csum != le32_to_cpu(buf[0xfc/4]))
+ goto out;
+
+ kfree(buf);
+
+ buf = tg3_vpd_readblock(tp, &len);
+ if (!buf)
+ return -ENOMEM;
+
+ i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
+ if (i > 0) {
+ j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
+ if (j < 0)
+ goto out;
+
+ if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
+ goto out;
+
+ i += PCI_VPD_LRDT_TAG_SIZE;
+ j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
+ PCI_VPD_RO_KEYWORD_CHKSUM);
+ if (j > 0) {
+ u8 csum8 = 0;
+
+ j += PCI_VPD_INFO_FLD_HDR_SIZE;
+
+ for (i = 0; i <= j; i++)
+ csum8 += ((u8 *)buf)[i];
+
+ if (csum8)
+ goto out;
+ }
+ }
+
+ err = 0;
+
+out:
+ kfree(buf);
+ return err;
+}
+
+#define TG3_SERDES_TIMEOUT_SEC 2
+#define TG3_COPPER_TIMEOUT_SEC 6
+
+static int tg3_test_link(struct tg3 *tp)
+{
+ int i, max;
+
+ if (!netif_running(tp->dev))
+ return -ENODEV;
+
+ if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
+ max = TG3_SERDES_TIMEOUT_SEC;
+ else
+ max = TG3_COPPER_TIMEOUT_SEC;
+
+ for (i = 0; i < max; i++) {
+ if (netif_carrier_ok(tp->dev))
+ return 0;
+
+ if (msleep_interruptible(1000))
+ break;
+ }
+
+ return -EIO;
+}
+
+/* Only test the commonly used registers */
+static int tg3_test_registers(struct tg3 *tp)
+{
+ int i, is_5705, is_5750;
+ u32 offset, read_mask, write_mask, val, save_val, read_val;
+ static struct {
+ u16 offset;
+ u16 flags;
+#define TG3_FL_5705 0x1
+#define TG3_FL_NOT_5705 0x2
+#define TG3_FL_NOT_5788 0x4
+#define TG3_FL_NOT_5750 0x8
+ u32 read_mask;
+ u32 write_mask;
+ } reg_tbl[] = {
+ /* MAC Control Registers */
+ { MAC_MODE, TG3_FL_NOT_5705,
+ 0x00000000, 0x00ef6f8c },
+ { MAC_MODE, TG3_FL_5705,
+ 0x00000000, 0x01ef6b8c },
+ { MAC_STATUS, TG3_FL_NOT_5705,
+ 0x03800107, 0x00000000 },
+ { MAC_STATUS, TG3_FL_5705,
+ 0x03800100, 0x00000000 },
+ { MAC_ADDR_0_HIGH, 0x0000,
+ 0x00000000, 0x0000ffff },
+ { MAC_ADDR_0_LOW, 0x0000,
+ 0x00000000, 0xffffffff },
+ { MAC_RX_MTU_SIZE, 0x0000,
+ 0x00000000, 0x0000ffff },
+ { MAC_TX_MODE, 0x0000,
+ 0x00000000, 0x00000070 },
+ { MAC_TX_LENGTHS, 0x0000,
+ 0x00000000, 0x00003fff },
+ { MAC_RX_MODE, TG3_FL_NOT_5705,
+ 0x00000000, 0x000007fc },
+ { MAC_RX_MODE, TG3_FL_5705,
+ 0x00000000, 0x000007dc },
+ { MAC_HASH_REG_0, 0x0000,
+ 0x00000000, 0xffffffff },
+ { MAC_HASH_REG_1, 0x0000,
+ 0x00000000, 0xffffffff },
+ { MAC_HASH_REG_2, 0x0000,
+ 0x00000000, 0xffffffff },
+ { MAC_HASH_REG_3, 0x0000,
+ 0x00000000, 0xffffffff },
+
+ /* Receive Data and Receive BD Initiator Control Registers. */
+ { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
+ 0x00000000, 0x00000003 },
+ { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { RCVDBDI_STD_BD+0, 0x0000,
+ 0x00000000, 0xffffffff },
+ { RCVDBDI_STD_BD+4, 0x0000,
+ 0x00000000, 0xffffffff },
+ { RCVDBDI_STD_BD+8, 0x0000,
+ 0x00000000, 0xffff0002 },
+ { RCVDBDI_STD_BD+0xc, 0x0000,
+ 0x00000000, 0xffffffff },
+
+ /* Receive BD Initiator Control Registers. */
+ { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { RCVBDI_STD_THRESH, TG3_FL_5705,
+ 0x00000000, 0x000003ff },
+ { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+
+ /* Host Coalescing Control Registers. */
+ { HOSTCC_MODE, TG3_FL_NOT_5705,
+ 0x00000000, 0x00000004 },
+ { HOSTCC_MODE, TG3_FL_5705,
+ 0x00000000, 0x000000f6 },
+ { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
+ 0x00000000, 0x000003ff },
+ { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
+ 0x00000000, 0x000003ff },
+ { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
+ 0x00000000, 0x000000ff },
+ { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
+ 0x00000000, 0x000000ff },
+ { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
+ 0x00000000, 0x000000ff },
+ { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
+ 0x00000000, 0x000000ff },
+ { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
+ 0xffffffff, 0x00000000 },
+ { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
+ 0xffffffff, 0x00000000 },
+
+ /* Buffer Manager Control Registers. */
+ { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
+ 0x00000000, 0x007fff80 },
+ { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
+ 0x00000000, 0x007fffff },
+ { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
+ 0x00000000, 0x0000003f },
+ { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
+ 0x00000000, 0x000001ff },
+ { BUFMGR_MB_HIGH_WATER, 0x0000,
+ 0x00000000, 0x000001ff },
+ { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
+ 0xffffffff, 0x00000000 },
+ { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
+ 0xffffffff, 0x00000000 },
+
+ /* Mailbox Registers */
+ { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
+ 0x00000000, 0x000001ff },
+ { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
+ 0x00000000, 0x000001ff },
+ { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
+ 0x00000000, 0x000007ff },
+ { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
+ 0x00000000, 0x000001ff },
+
+ { 0xffff, 0x0000, 0x00000000, 0x00000000 },
+ };
+
+ is_5705 = is_5750 = 0;
+ if (tg3_flag(tp, 5705_PLUS)) {
+ is_5705 = 1;
+ if (tg3_flag(tp, 5750_PLUS))
+ is_5750 = 1;
+ }
+
+ for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
+ if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
+ continue;
+
+ if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
+ continue;
+
+ if (tg3_flag(tp, IS_5788) &&
+ (reg_tbl[i].flags & TG3_FL_NOT_5788))
+ continue;
+
+ if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
+ continue;
+
+ offset = (u32) reg_tbl[i].offset;
+ read_mask = reg_tbl[i].read_mask;
+ write_mask = reg_tbl[i].write_mask;
+
+ /* Save the original register content */
+ save_val = tr32(offset);
+
+ /* Determine the read-only value. */
+ read_val = save_val & read_mask;
+
+ /* Write zero to the register, then make sure the read-only bits
+ * are not changed and the read/write bits are all zeros.
+ */
+ tw32(offset, 0);
+
+ val = tr32(offset);
+
+ /* Test the read-only and read/write bits. */
+ if (((val & read_mask) != read_val) || (val & write_mask))
+ goto out;
+
+ /* Write ones to all the bits defined by RdMask and WrMask, then
+ * make sure the read-only bits are not changed and the
+ * read/write bits are all ones.
+ */
+ tw32(offset, read_mask | write_mask);
+
+ val = tr32(offset);
+
+ /* Test the read-only bits. */
+ if ((val & read_mask) != read_val)
+ goto out;
+
+ /* Test the read/write bits. */
+ if ((val & write_mask) != write_mask)
+ goto out;
+
+ tw32(offset, save_val);
+ }
+
+ return 0;
+
+out:
+ if (netif_msg_hw(tp))
+ netdev_err(tp->dev,
+ "Register test failed at offset %x\n", offset);
+ tw32(offset, save_val);
+ return -EIO;
+}
+
+static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
+{
+ static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
+ int i;
+ u32 j;
+
+ for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
+ for (j = 0; j < len; j += 4) {
+ u32 val;
+
+ tg3_write_mem(tp, offset + j, test_pattern[i]);
+ tg3_read_mem(tp, offset + j, &val);
+ if (val != test_pattern[i])
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+static int tg3_test_memory(struct tg3 *tp)
+{
+ static struct mem_entry {
+ u32 offset;
+ u32 len;
+ } mem_tbl_570x[] = {
+ { 0x00000000, 0x00b50},
+ { 0x00002000, 0x1c000},
+ { 0xffffffff, 0x00000}
+ }, mem_tbl_5705[] = {
+ { 0x00000100, 0x0000c},
+ { 0x00000200, 0x00008},
+ { 0x00004000, 0x00800},
+ { 0x00006000, 0x01000},
+ { 0x00008000, 0x02000},
+ { 0x00010000, 0x0e000},
+ { 0xffffffff, 0x00000}
+ }, mem_tbl_5755[] = {
+ { 0x00000200, 0x00008},
+ { 0x00004000, 0x00800},
+ { 0x00006000, 0x00800},
+ { 0x00008000, 0x02000},
+ { 0x00010000, 0x0c000},
+ { 0xffffffff, 0x00000}
+ }, mem_tbl_5906[] = {
+ { 0x00000200, 0x00008},
+ { 0x00004000, 0x00400},
+ { 0x00006000, 0x00400},
+ { 0x00008000, 0x01000},
+ { 0x00010000, 0x01000},
+ { 0xffffffff, 0x00000}
+ }, mem_tbl_5717[] = {
+ { 0x00000200, 0x00008},
+ { 0x00010000, 0x0a000},
+ { 0x00020000, 0x13c00},
+ { 0xffffffff, 0x00000}
+ }, mem_tbl_57765[] = {
+ { 0x00000200, 0x00008},
+ { 0x00004000, 0x00800},
+ { 0x00006000, 0x09800},
+ { 0x00010000, 0x0a000},
+ { 0xffffffff, 0x00000}
+ };
+ struct mem_entry *mem_tbl;
+ int err = 0;
+ int i;
+
+ if (tg3_flag(tp, 5717_PLUS))
+ mem_tbl = mem_tbl_5717;
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ mem_tbl = mem_tbl_57765;
+ else if (tg3_flag(tp, 5755_PLUS))
+ mem_tbl = mem_tbl_5755;
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ mem_tbl = mem_tbl_5906;
+ else if (tg3_flag(tp, 5705_PLUS))
+ mem_tbl = mem_tbl_5705;
+ else
+ mem_tbl = mem_tbl_570x;
+
+ for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
+ err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+#define TG3_TSO_MSS 500
+
+#define TG3_TSO_IP_HDR_LEN 20
+#define TG3_TSO_TCP_HDR_LEN 20
+#define TG3_TSO_TCP_OPT_LEN 12
+
+static const u8 tg3_tso_header[] = {
+0x08, 0x00,
+0x45, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x40, 0x00,
+0x40, 0x06, 0x00, 0x00,
+0x0a, 0x00, 0x00, 0x01,
+0x0a, 0x00, 0x00, 0x02,
+0x0d, 0x00, 0xe0, 0x00,
+0x00, 0x00, 0x01, 0x00,
+0x00, 0x00, 0x02, 0x00,
+0x80, 0x10, 0x10, 0x00,
+0x14, 0x09, 0x00, 0x00,
+0x01, 0x01, 0x08, 0x0a,
+0x11, 0x11, 0x11, 0x11,
+0x11, 0x11, 0x11, 0x11,
+};
+
+static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
+{
+ u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
+ u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
+ u32 budget;
+ struct sk_buff *skb, *rx_skb;
+ u8 *tx_data;
+ dma_addr_t map;
+ int num_pkts, tx_len, rx_len, i, err;
+ struct tg3_rx_buffer_desc *desc;
+ struct tg3_napi *tnapi, *rnapi;
+ struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
+
+ tnapi = &tp->napi[0];
+ rnapi = &tp->napi[0];
+ if (tp->irq_cnt > 1) {
+ if (tg3_flag(tp, ENABLE_RSS))
+ rnapi = &tp->napi[1];
+ if (tg3_flag(tp, ENABLE_TSS))
+ tnapi = &tp->napi[1];
+ }
+ coal_now = tnapi->coal_now | rnapi->coal_now;
+
+ err = -EIO;
+
+ tx_len = pktsz;
+ skb = netdev_alloc_skb(tp->dev, tx_len);
+ if (!skb)
+ return -ENOMEM;
+
+ tx_data = skb_put(skb, tx_len);
+ memcpy(tx_data, tp->dev->dev_addr, 6);
+ memset(tx_data + 6, 0x0, 8);
+
+ tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
+
+ if (tso_loopback) {
+ struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
+
+ u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
+ TG3_TSO_TCP_OPT_LEN;
+
+ memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
+ sizeof(tg3_tso_header));
+ mss = TG3_TSO_MSS;
+
+ val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
+ num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
+
+ /* Set the total length field in the IP header */
+ iph->tot_len = htons((u16)(mss + hdr_len));
+
+ base_flags = (TXD_FLAG_CPU_PRE_DMA |
+ TXD_FLAG_CPU_POST_DMA);
+
+ if (tg3_flag(tp, HW_TSO_1) ||
+ tg3_flag(tp, HW_TSO_2) ||
+ tg3_flag(tp, HW_TSO_3)) {
+ struct tcphdr *th;
+ val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
+ th = (struct tcphdr *)&tx_data[val];
+ th->check = 0;
+ } else
+ base_flags |= TXD_FLAG_TCPUDP_CSUM;
+
+ if (tg3_flag(tp, HW_TSO_3)) {
+ mss |= (hdr_len & 0xc) << 12;
+ if (hdr_len & 0x10)
+ base_flags |= 0x00000010;
+ base_flags |= (hdr_len & 0x3e0) << 5;
+ } else if (tg3_flag(tp, HW_TSO_2))
+ mss |= hdr_len << 9;
+ else if (tg3_flag(tp, HW_TSO_1) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ mss |= (TG3_TSO_TCP_OPT_LEN << 9);
+ } else {
+ base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
+ }
+
+ data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
+ } else {
+ num_pkts = 1;
+ data_off = ETH_HLEN;
+ }
+
+ for (i = data_off; i < tx_len; i++)
+ tx_data[i] = (u8) (i & 0xff);
+
+ map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(tp->pdev, map)) {
+ dev_kfree_skb(skb);
+ return -EIO;
+ }
+
+ val = tnapi->tx_prod;
+ tnapi->tx_buffers[val].skb = skb;
+ dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
+
+ tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
+ rnapi->coal_now);
+
+ udelay(10);
+
+ rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
+
+ budget = tg3_tx_avail(tnapi);
+ if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
+ base_flags | TXD_FLAG_END, mss, 0)) {
+ tnapi->tx_buffers[val].skb = NULL;
+ dev_kfree_skb(skb);
+ return -EIO;
+ }
+
+ tnapi->tx_prod++;
+
+ tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
+ tr32_mailbox(tnapi->prodmbox);
+
+ udelay(10);
+
+ /* 350 usec to allow enough time on some 10/100 Mbps devices. */
+ for (i = 0; i < 35; i++) {
+ tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
+ coal_now);
+
+ udelay(10);
+
+ tx_idx = tnapi->hw_status->idx[0].tx_consumer;
+ rx_idx = rnapi->hw_status->idx[0].rx_producer;
+ if ((tx_idx == tnapi->tx_prod) &&
+ (rx_idx == (rx_start_idx + num_pkts)))
+ break;
+ }
+
+ tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0);
+ dev_kfree_skb(skb);
+
+ if (tx_idx != tnapi->tx_prod)
+ goto out;
+
+ if (rx_idx != rx_start_idx + num_pkts)
+ goto out;
+
+ val = data_off;
+ while (rx_idx != rx_start_idx) {
+ desc = &rnapi->rx_rcb[rx_start_idx++];
+ desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
+ opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
+
+ if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
+ (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
+ goto out;
+
+ rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
+ - ETH_FCS_LEN;
+
+ if (!tso_loopback) {
+ if (rx_len != tx_len)
+ goto out;
+
+ if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
+ if (opaque_key != RXD_OPAQUE_RING_STD)
+ goto out;
+ } else {
+ if (opaque_key != RXD_OPAQUE_RING_JUMBO)
+ goto out;
+ }
+ } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
+ (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
+ >> RXD_TCPCSUM_SHIFT != 0xffff) {
+ goto out;
+ }
+
+ if (opaque_key == RXD_OPAQUE_RING_STD) {
+ rx_skb = tpr->rx_std_buffers[desc_idx].skb;
+ map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
+ mapping);
+ } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
+ rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
+ map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
+ mapping);
+ } else
+ goto out;
+
+ pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
+ PCI_DMA_FROMDEVICE);
+
+ for (i = data_off; i < rx_len; i++, val++) {
+ if (*(rx_skb->data + i) != (u8) (val & 0xff))
+ goto out;
+ }
+ }
+
+ err = 0;
+
+ /* tg3_free_rings will unmap and free the rx_skb */
+out:
+ return err;
+}
+
+#define TG3_STD_LOOPBACK_FAILED 1
+#define TG3_JMB_LOOPBACK_FAILED 2
+#define TG3_TSO_LOOPBACK_FAILED 4
+#define TG3_LOOPBACK_FAILED \
+ (TG3_STD_LOOPBACK_FAILED | \
+ TG3_JMB_LOOPBACK_FAILED | \
+ TG3_TSO_LOOPBACK_FAILED)
+
+static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
+{
+ int err = -EIO;
+ u32 eee_cap;
+
+ eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
+ tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
+
+ if (!netif_running(tp->dev)) {
+ data[0] = TG3_LOOPBACK_FAILED;
+ data[1] = TG3_LOOPBACK_FAILED;
+ if (do_extlpbk)
+ data[2] = TG3_LOOPBACK_FAILED;
+ goto done;
+ }
+
+ err = tg3_reset_hw(tp, 1);
+ if (err) {
+ data[0] = TG3_LOOPBACK_FAILED;
+ data[1] = TG3_LOOPBACK_FAILED;
+ if (do_extlpbk)
+ data[2] = TG3_LOOPBACK_FAILED;
+ goto done;
+ }
+
+ if (tg3_flag(tp, ENABLE_RSS)) {
+ int i;
+
+ /* Reroute all rx packets to the 1st queue */
+ for (i = MAC_RSS_INDIR_TBL_0;
+ i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
+ tw32(i, 0x0);
+ }
+
+ /* HW errata - mac loopback fails in some cases on 5780.
+ * Normal traffic and PHY loopback are not affected by
+ * errata. Also, the MAC loopback test is deprecated for
+ * all newer ASIC revisions.
+ */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
+ !tg3_flag(tp, CPMU_PRESENT)) {
+ tg3_mac_loopback(tp, true);
+
+ if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
+ data[0] |= TG3_STD_LOOPBACK_FAILED;
+
+ if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
+ tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
+ data[0] |= TG3_JMB_LOOPBACK_FAILED;
+
+ tg3_mac_loopback(tp, false);
+ }
+
+ if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
+ !tg3_flag(tp, USE_PHYLIB)) {
+ int i;
+
+ tg3_phy_lpbk_set(tp, 0, false);
+
+ /* Wait for link */
+ for (i = 0; i < 100; i++) {
+ if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
+ break;
+ mdelay(1);
+ }
+
+ if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
+ data[1] |= TG3_STD_LOOPBACK_FAILED;
+ if (tg3_flag(tp, TSO_CAPABLE) &&
+ tg3_run_loopback(tp, ETH_FRAME_LEN, true))
+ data[1] |= TG3_TSO_LOOPBACK_FAILED;
+ if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
+ tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
+ data[1] |= TG3_JMB_LOOPBACK_FAILED;
+
+ if (do_extlpbk) {
+ tg3_phy_lpbk_set(tp, 0, true);
+
+ /* All link indications report up, but the hardware
+ * isn't really ready for about 20 msec. Double it
+ * to be sure.
+ */
+ mdelay(40);
+
+ if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
+ data[2] |= TG3_STD_LOOPBACK_FAILED;
+ if (tg3_flag(tp, TSO_CAPABLE) &&
+ tg3_run_loopback(tp, ETH_FRAME_LEN, true))
+ data[2] |= TG3_TSO_LOOPBACK_FAILED;
+ if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
+ tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
+ data[2] |= TG3_JMB_LOOPBACK_FAILED;
+ }
+
+ /* Re-enable gphy autopowerdown. */
+ if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
+ tg3_phy_toggle_apd(tp, true);
+ }
+
+ err = (data[0] | data[1] | data[2]) ? -EIO : 0;
+
+done:
+ tp->phy_flags |= eee_cap;
+
+ return err;
+}
+
+static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
+ u64 *data)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
+
+ if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
+ tg3_power_up(tp)) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
+ return;
+ }
+
+ memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
+
+ if (tg3_test_nvram(tp) != 0) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ data[0] = 1;
+ }
+ if (!doextlpbk && tg3_test_link(tp)) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ data[1] = 1;
+ }
+ if (etest->flags & ETH_TEST_FL_OFFLINE) {
+ int err, err2 = 0, irq_sync = 0;
+
+ if (netif_running(dev)) {
+ tg3_phy_stop(tp);
+ tg3_netif_stop(tp);
+ irq_sync = 1;
+ }
+
+ tg3_full_lock(tp, irq_sync);
+
+ tg3_halt(tp, RESET_KIND_SUSPEND, 1);
+ err = tg3_nvram_lock(tp);
+ tg3_halt_cpu(tp, RX_CPU_BASE);
+ if (!tg3_flag(tp, 5705_PLUS))
+ tg3_halt_cpu(tp, TX_CPU_BASE);
+ if (!err)
+ tg3_nvram_unlock(tp);
+
+ if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
+ tg3_phy_reset(tp);
+
+ if (tg3_test_registers(tp) != 0) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ data[2] = 1;
+ }
+
+ if (tg3_test_memory(tp) != 0) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ data[3] = 1;
+ }
+
+ if (doextlpbk)
+ etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
+
+ if (tg3_test_loopback(tp, &data[4], doextlpbk))
+ etest->flags |= ETH_TEST_FL_FAILED;
+
+ tg3_full_unlock(tp);
+
+ if (tg3_test_interrupt(tp) != 0) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ data[7] = 1;
+ }
+
+ tg3_full_lock(tp, 0);
+
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+ if (netif_running(dev)) {
+ tg3_flag_set(tp, INIT_COMPLETE);
+ err2 = tg3_restart_hw(tp, 1);
+ if (!err2)
+ tg3_netif_start(tp);
+ }
+
+ tg3_full_unlock(tp);
+
+ if (irq_sync && !err2)
+ tg3_phy_start(tp);
+ }
+ if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+ tg3_power_down(tp);
+
+}
+
+static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct mii_ioctl_data *data = if_mii(ifr);
+ struct tg3 *tp = netdev_priv(dev);
+ int err;
+
+ if (tg3_flag(tp, USE_PHYLIB)) {
+ struct phy_device *phydev;
+ if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
+ return -EAGAIN;
+ phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ return phy_mii_ioctl(phydev, ifr, cmd);
+ }
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = tp->phy_addr;
+
+ /* fallthru */
+ case SIOCGMIIREG: {
+ u32 mii_regval;
+
+ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
+ break; /* We have no PHY */
+
+ if (!netif_running(dev))
+ return -EAGAIN;
+
+ spin_lock_bh(&tp->lock);
+ err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
+ spin_unlock_bh(&tp->lock);
+
+ data->val_out = mii_regval;
+
+ return err;
+ }
+
+ case SIOCSMIIREG:
+ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
+ break; /* We have no PHY */
+
+ if (!netif_running(dev))
+ return -EAGAIN;
+
+ spin_lock_bh(&tp->lock);
+ err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
+ spin_unlock_bh(&tp->lock);
+
+ return err;
+
+ default:
+ /* do nothing */
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ memcpy(ec, &tp->coal, sizeof(*ec));
+ return 0;
+}
+
+static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
+ u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
+
+ if (!tg3_flag(tp, 5705_PLUS)) {
+ max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
+ max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
+ max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
+ min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
+ }
+
+ if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
+ (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
+ (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
+ (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
+ (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
+ (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
+ (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
+ (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
+ (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
+ (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
+ return -EINVAL;
+
+ /* No rx interrupts will be generated if both are zero */
+ if ((ec->rx_coalesce_usecs == 0) &&
+ (ec->rx_max_coalesced_frames == 0))
+ return -EINVAL;
+
+ /* No tx interrupts will be generated if both are zero */
+ if ((ec->tx_coalesce_usecs == 0) &&
+ (ec->tx_max_coalesced_frames == 0))
+ return -EINVAL;
+
+ /* Only copy relevant parameters, ignore all others. */
+ tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
+ tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
+ tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
+ tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
+ tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
+ tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
+ tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
+ tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
+ tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
+
+ if (netif_running(dev)) {
+ tg3_full_lock(tp, 0);
+ __tg3_set_coalesce(tp, &tp->coal);
+ tg3_full_unlock(tp);
+ }
+ return 0;
+}
+
+static const struct ethtool_ops tg3_ethtool_ops = {
+ .get_settings = tg3_get_settings,
+ .set_settings = tg3_set_settings,
+ .get_drvinfo = tg3_get_drvinfo,
+ .get_regs_len = tg3_get_regs_len,
+ .get_regs = tg3_get_regs,
+ .get_wol = tg3_get_wol,
+ .set_wol = tg3_set_wol,
+ .get_msglevel = tg3_get_msglevel,
+ .set_msglevel = tg3_set_msglevel,
+ .nway_reset = tg3_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = tg3_get_eeprom_len,
+ .get_eeprom = tg3_get_eeprom,
+ .set_eeprom = tg3_set_eeprom,
+ .get_ringparam = tg3_get_ringparam,
+ .set_ringparam = tg3_set_ringparam,
+ .get_pauseparam = tg3_get_pauseparam,
+ .set_pauseparam = tg3_set_pauseparam,
+ .self_test = tg3_self_test,
+ .get_strings = tg3_get_strings,
+ .set_phys_id = tg3_set_phys_id,
+ .get_ethtool_stats = tg3_get_ethtool_stats,
+ .get_coalesce = tg3_get_coalesce,
+ .set_coalesce = tg3_set_coalesce,
+ .get_sset_count = tg3_get_sset_count,
+};
+
+static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
+{
+ u32 cursize, val, magic;
+
+ tp->nvram_size = EEPROM_CHIP_SIZE;
+
+ if (tg3_nvram_read(tp, 0, &magic) != 0)
+ return;
+
+ if ((magic != TG3_EEPROM_MAGIC) &&
+ ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
+ ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
+ return;
+
+ /*
+ * Size the chip by reading offsets at increasing powers of two.
+ * When we encounter our validation signature, we know the addressing
+ * has wrapped around, and thus have our chip size.
+ */
+ cursize = 0x10;
+
+ while (cursize < tp->nvram_size) {
+ if (tg3_nvram_read(tp, cursize, &val) != 0)
+ return;
+
+ if (val == magic)
+ break;
+
+ cursize <<= 1;
+ }
+
+ tp->nvram_size = cursize;
+}
+
+static void __devinit tg3_get_nvram_size(struct tg3 *tp)
+{
+ u32 val;
+
+ if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
+ return;
+
+ /* Selfboot format */
+ if (val != TG3_EEPROM_MAGIC) {
+ tg3_get_eeprom_size(tp);
+ return;
+ }
+
+ if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
+ if (val != 0) {
+ /* This is confusing. We want to operate on the
+ * 16-bit value at offset 0xf2. The tg3_nvram_read()
+ * call will read from NVRAM and byteswap the data
+ * according to the byteswapping settings for all
+ * other register accesses. This ensures the data we
+ * want will always reside in the lower 16-bits.
+ * However, the data in NVRAM is in LE format, which
+ * means the data from the NVRAM read will always be
+ * opposite the endianness of the CPU. The 16-bit
+ * byteswap then brings the data to CPU endianness.
+ */
+ tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
+ return;
+ }
+ }
+ tp->nvram_size = TG3_NVRAM_SIZE_512KB;
+}
+
+static void __devinit tg3_get_nvram_info(struct tg3 *tp)
+{
+ u32 nvcfg1;
+
+ nvcfg1 = tr32(NVRAM_CFG1);
+ if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
+ tg3_flag_set(tp, FLASH);
+ } else {
+ nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
+ tw32(NVRAM_CFG1, nvcfg1);
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
+ tg3_flag(tp, 5780_CLASS)) {
+ switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
+ case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ break;
+ case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
+ break;
+ case FLASH_VENDOR_ATMEL_EEPROM:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ break;
+ case FLASH_VENDOR_ST:
+ tp->nvram_jedecnum = JEDEC_ST;
+ tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ break;
+ case FLASH_VENDOR_SAIFUN:
+ tp->nvram_jedecnum = JEDEC_SAIFUN;
+ tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
+ break;
+ case FLASH_VENDOR_SST_SMALL:
+ case FLASH_VENDOR_SST_LARGE:
+ tp->nvram_jedecnum = JEDEC_SST;
+ tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
+ break;
+ }
+ } else {
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ }
+}
+
+static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
+{
+ switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
+ case FLASH_5752PAGE_SIZE_256:
+ tp->nvram_pagesize = 256;
+ break;
+ case FLASH_5752PAGE_SIZE_512:
+ tp->nvram_pagesize = 512;
+ break;
+ case FLASH_5752PAGE_SIZE_1K:
+ tp->nvram_pagesize = 1024;
+ break;
+ case FLASH_5752PAGE_SIZE_2K:
+ tp->nvram_pagesize = 2048;
+ break;
+ case FLASH_5752PAGE_SIZE_4K:
+ tp->nvram_pagesize = 4096;
+ break;
+ case FLASH_5752PAGE_SIZE_264:
+ tp->nvram_pagesize = 264;
+ break;
+ case FLASH_5752PAGE_SIZE_528:
+ tp->nvram_pagesize = 528;
+ break;
+ }
+}
+
+static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
+{
+ u32 nvcfg1;
+
+ nvcfg1 = tr32(NVRAM_CFG1);
+
+ /* NVRAM protection for TPM */
+ if (nvcfg1 & (1 << 27))
+ tg3_flag_set(tp, PROTECTED_NVRAM);
+
+ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+ case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
+ case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ break;
+ case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
+ break;
+ case FLASH_5752VENDOR_ST_M45PE10:
+ case FLASH_5752VENDOR_ST_M45PE20:
+ case FLASH_5752VENDOR_ST_M45PE40:
+ tp->nvram_jedecnum = JEDEC_ST;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
+ break;
+ }
+
+ if (tg3_flag(tp, FLASH)) {
+ tg3_nvram_get_pagesize(tp, nvcfg1);
+ } else {
+ /* For eeprom, set pagesize to maximum eeprom size */
+ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+
+ nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
+ tw32(NVRAM_CFG1, nvcfg1);
+ }
+}
+
+static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
+{
+ u32 nvcfg1, protect = 0;
+
+ nvcfg1 = tr32(NVRAM_CFG1);
+
+ /* NVRAM protection for TPM */
+ if (nvcfg1 & (1 << 27)) {
+ tg3_flag_set(tp, PROTECTED_NVRAM);
+ protect = 1;
+ }
+
+ nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
+ switch (nvcfg1) {
+ case FLASH_5755VENDOR_ATMEL_FLASH_1:
+ case FLASH_5755VENDOR_ATMEL_FLASH_2:
+ case FLASH_5755VENDOR_ATMEL_FLASH_3:
+ case FLASH_5755VENDOR_ATMEL_FLASH_5:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
+ tp->nvram_pagesize = 264;
+ if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
+ nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
+ tp->nvram_size = (protect ? 0x3e200 :
+ TG3_NVRAM_SIZE_512KB);
+ else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
+ tp->nvram_size = (protect ? 0x1f200 :
+ TG3_NVRAM_SIZE_256KB);
+ else
+ tp->nvram_size = (protect ? 0x1f200 :
+ TG3_NVRAM_SIZE_128KB);
+ break;
+ case FLASH_5752VENDOR_ST_M45PE10:
+ case FLASH_5752VENDOR_ST_M45PE20:
+ case FLASH_5752VENDOR_ST_M45PE40:
+ tp->nvram_jedecnum = JEDEC_ST;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
+ tp->nvram_pagesize = 256;
+ if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
+ tp->nvram_size = (protect ?
+ TG3_NVRAM_SIZE_64KB :
+ TG3_NVRAM_SIZE_128KB);
+ else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
+ tp->nvram_size = (protect ?
+ TG3_NVRAM_SIZE_64KB :
+ TG3_NVRAM_SIZE_256KB);
+ else
+ tp->nvram_size = (protect ?
+ TG3_NVRAM_SIZE_128KB :
+ TG3_NVRAM_SIZE_512KB);
+ break;
+ }
+}
+
+static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
+{
+ u32 nvcfg1;
+
+ nvcfg1 = tr32(NVRAM_CFG1);
+
+ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+ case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
+ case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
+ case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
+ case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+
+ nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
+ tw32(NVRAM_CFG1, nvcfg1);
+ break;
+ case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
+ case FLASH_5755VENDOR_ATMEL_FLASH_1:
+ case FLASH_5755VENDOR_ATMEL_FLASH_2:
+ case FLASH_5755VENDOR_ATMEL_FLASH_3:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
+ tp->nvram_pagesize = 264;
+ break;
+ case FLASH_5752VENDOR_ST_M45PE10:
+ case FLASH_5752VENDOR_ST_M45PE20:
+ case FLASH_5752VENDOR_ST_M45PE40:
+ tp->nvram_jedecnum = JEDEC_ST;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
+ tp->nvram_pagesize = 256;
+ break;
+ }
+}
+
+static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
+{
+ u32 nvcfg1, protect = 0;
+
+ nvcfg1 = tr32(NVRAM_CFG1);
+
+ /* NVRAM protection for TPM */
+ if (nvcfg1 & (1 << 27)) {
+ tg3_flag_set(tp, PROTECTED_NVRAM);
+ protect = 1;
+ }
+
+ nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
+ switch (nvcfg1) {
+ case FLASH_5761VENDOR_ATMEL_ADB021D:
+ case FLASH_5761VENDOR_ATMEL_ADB041D:
+ case FLASH_5761VENDOR_ATMEL_ADB081D:
+ case FLASH_5761VENDOR_ATMEL_ADB161D:
+ case FLASH_5761VENDOR_ATMEL_MDB021D:
+ case FLASH_5761VENDOR_ATMEL_MDB041D:
+ case FLASH_5761VENDOR_ATMEL_MDB081D:
+ case FLASH_5761VENDOR_ATMEL_MDB161D:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
+ tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
+ tp->nvram_pagesize = 256;
+ break;
+ case FLASH_5761VENDOR_ST_A_M45PE20:
+ case FLASH_5761VENDOR_ST_A_M45PE40:
+ case FLASH_5761VENDOR_ST_A_M45PE80:
+ case FLASH_5761VENDOR_ST_A_M45PE16:
+ case FLASH_5761VENDOR_ST_M_M45PE20:
+ case FLASH_5761VENDOR_ST_M_M45PE40:
+ case FLASH_5761VENDOR_ST_M_M45PE80:
+ case FLASH_5761VENDOR_ST_M_M45PE16:
+ tp->nvram_jedecnum = JEDEC_ST;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
+ tp->nvram_pagesize = 256;
+ break;
+ }
+
+ if (protect) {
+ tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
+ } else {
+ switch (nvcfg1) {
+ case FLASH_5761VENDOR_ATMEL_ADB161D:
+ case FLASH_5761VENDOR_ATMEL_MDB161D:
+ case FLASH_5761VENDOR_ST_A_M45PE16:
+ case FLASH_5761VENDOR_ST_M_M45PE16:
+ tp->nvram_size = TG3_NVRAM_SIZE_2MB;
+ break;
+ case FLASH_5761VENDOR_ATMEL_ADB081D:
+ case FLASH_5761VENDOR_ATMEL_MDB081D:
+ case FLASH_5761VENDOR_ST_A_M45PE80:
+ case FLASH_5761VENDOR_ST_M_M45PE80:
+ tp->nvram_size = TG3_NVRAM_SIZE_1MB;
+ break;
+ case FLASH_5761VENDOR_ATMEL_ADB041D:
+ case FLASH_5761VENDOR_ATMEL_MDB041D:
+ case FLASH_5761VENDOR_ST_A_M45PE40:
+ case FLASH_5761VENDOR_ST_M_M45PE40:
+ tp->nvram_size = TG3_NVRAM_SIZE_512KB;
+ break;
+ case FLASH_5761VENDOR_ATMEL_ADB021D:
+ case FLASH_5761VENDOR_ATMEL_MDB021D:
+ case FLASH_5761VENDOR_ST_A_M45PE20:
+ case FLASH_5761VENDOR_ST_M_M45PE20:
+ tp->nvram_size = TG3_NVRAM_SIZE_256KB;
+ break;
+ }
+ }
+}
+
+static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
+{
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+}
+
+static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
+{
+ u32 nvcfg1;
+
+ nvcfg1 = tr32(NVRAM_CFG1);
+
+ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+ case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
+ case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+
+ nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
+ tw32(NVRAM_CFG1, nvcfg1);
+ return;
+ case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
+ case FLASH_57780VENDOR_ATMEL_AT45DB011D:
+ case FLASH_57780VENDOR_ATMEL_AT45DB011B:
+ case FLASH_57780VENDOR_ATMEL_AT45DB021D:
+ case FLASH_57780VENDOR_ATMEL_AT45DB021B:
+ case FLASH_57780VENDOR_ATMEL_AT45DB041D:
+ case FLASH_57780VENDOR_ATMEL_AT45DB041B:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
+
+ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+ case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
+ case FLASH_57780VENDOR_ATMEL_AT45DB011D:
+ case FLASH_57780VENDOR_ATMEL_AT45DB011B:
+ tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+ break;
+ case FLASH_57780VENDOR_ATMEL_AT45DB021D:
+ case FLASH_57780VENDOR_ATMEL_AT45DB021B:
+ tp->nvram_size = TG3_NVRAM_SIZE_256KB;
+ break;
+ case FLASH_57780VENDOR_ATMEL_AT45DB041D:
+ case FLASH_57780VENDOR_ATMEL_AT45DB041B:
+ tp->nvram_size = TG3_NVRAM_SIZE_512KB;
+ break;
+ }
+ break;
+ case FLASH_5752VENDOR_ST_M45PE10:
+ case FLASH_5752VENDOR_ST_M45PE20:
+ case FLASH_5752VENDOR_ST_M45PE40:
+ tp->nvram_jedecnum = JEDEC_ST;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
+
+ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+ case FLASH_5752VENDOR_ST_M45PE10:
+ tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+ break;
+ case FLASH_5752VENDOR_ST_M45PE20:
+ tp->nvram_size = TG3_NVRAM_SIZE_256KB;
+ break;
+ case FLASH_5752VENDOR_ST_M45PE40:
+ tp->nvram_size = TG3_NVRAM_SIZE_512KB;
+ break;
+ }
+ break;
+ default:
+ tg3_flag_set(tp, NO_NVRAM);
+ return;
+ }
+
+ tg3_nvram_get_pagesize(tp, nvcfg1);
+ if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
+ tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
+}
+
+
+static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
+{
+ u32 nvcfg1;
+
+ nvcfg1 = tr32(NVRAM_CFG1);
+
+ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+ case FLASH_5717VENDOR_ATMEL_EEPROM:
+ case FLASH_5717VENDOR_MICRO_EEPROM:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+
+ nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
+ tw32(NVRAM_CFG1, nvcfg1);
+ return;
+ case FLASH_5717VENDOR_ATMEL_MDB011D:
+ case FLASH_5717VENDOR_ATMEL_ADB011B:
+ case FLASH_5717VENDOR_ATMEL_ADB011D:
+ case FLASH_5717VENDOR_ATMEL_MDB021D:
+ case FLASH_5717VENDOR_ATMEL_ADB021B:
+ case FLASH_5717VENDOR_ATMEL_ADB021D:
+ case FLASH_5717VENDOR_ATMEL_45USPT:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
+
+ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+ case FLASH_5717VENDOR_ATMEL_MDB021D:
+ /* Detect size with tg3_nvram_get_size() */
+ break;
+ case FLASH_5717VENDOR_ATMEL_ADB021B:
+ case FLASH_5717VENDOR_ATMEL_ADB021D:
+ tp->nvram_size = TG3_NVRAM_SIZE_256KB;
+ break;
+ default:
+ tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+ break;
+ }
+ break;
+ case FLASH_5717VENDOR_ST_M_M25PE10:
+ case FLASH_5717VENDOR_ST_A_M25PE10:
+ case FLASH_5717VENDOR_ST_M_M45PE10:
+ case FLASH_5717VENDOR_ST_A_M45PE10:
+ case FLASH_5717VENDOR_ST_M_M25PE20:
+ case FLASH_5717VENDOR_ST_A_M25PE20:
+ case FLASH_5717VENDOR_ST_M_M45PE20:
+ case FLASH_5717VENDOR_ST_A_M45PE20:
+ case FLASH_5717VENDOR_ST_25USPT:
+ case FLASH_5717VENDOR_ST_45USPT:
+ tp->nvram_jedecnum = JEDEC_ST;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
+
+ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+ case FLASH_5717VENDOR_ST_M_M25PE20:
+ case FLASH_5717VENDOR_ST_M_M45PE20:
+ /* Detect size with tg3_nvram_get_size() */
+ break;
+ case FLASH_5717VENDOR_ST_A_M25PE20:
+ case FLASH_5717VENDOR_ST_A_M45PE20:
+ tp->nvram_size = TG3_NVRAM_SIZE_256KB;
+ break;
+ default:
+ tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+ break;
+ }
+ break;
+ default:
+ tg3_flag_set(tp, NO_NVRAM);
+ return;
+ }
+
+ tg3_nvram_get_pagesize(tp, nvcfg1);
+ if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
+ tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
+}
+
+static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
+{
+ u32 nvcfg1, nvmpinstrp;
+
+ nvcfg1 = tr32(NVRAM_CFG1);
+ nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
+
+ switch (nvmpinstrp) {
+ case FLASH_5720_EEPROM_HD:
+ case FLASH_5720_EEPROM_LD:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+
+ nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
+ tw32(NVRAM_CFG1, nvcfg1);
+ if (nvmpinstrp == FLASH_5720_EEPROM_HD)
+ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+ else
+ tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
+ return;
+ case FLASH_5720VENDOR_M_ATMEL_DB011D:
+ case FLASH_5720VENDOR_A_ATMEL_DB011B:
+ case FLASH_5720VENDOR_A_ATMEL_DB011D:
+ case FLASH_5720VENDOR_M_ATMEL_DB021D:
+ case FLASH_5720VENDOR_A_ATMEL_DB021B:
+ case FLASH_5720VENDOR_A_ATMEL_DB021D:
+ case FLASH_5720VENDOR_M_ATMEL_DB041D:
+ case FLASH_5720VENDOR_A_ATMEL_DB041B:
+ case FLASH_5720VENDOR_A_ATMEL_DB041D:
+ case FLASH_5720VENDOR_M_ATMEL_DB081D:
+ case FLASH_5720VENDOR_A_ATMEL_DB081D:
+ case FLASH_5720VENDOR_ATMEL_45USPT:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
+
+ switch (nvmpinstrp) {
+ case FLASH_5720VENDOR_M_ATMEL_DB021D:
+ case FLASH_5720VENDOR_A_ATMEL_DB021B:
+ case FLASH_5720VENDOR_A_ATMEL_DB021D:
+ tp->nvram_size = TG3_NVRAM_SIZE_256KB;
+ break;
+ case FLASH_5720VENDOR_M_ATMEL_DB041D:
+ case FLASH_5720VENDOR_A_ATMEL_DB041B:
+ case FLASH_5720VENDOR_A_ATMEL_DB041D:
+ tp->nvram_size = TG3_NVRAM_SIZE_512KB;
+ break;
+ case FLASH_5720VENDOR_M_ATMEL_DB081D:
+ case FLASH_5720VENDOR_A_ATMEL_DB081D:
+ tp->nvram_size = TG3_NVRAM_SIZE_1MB;
+ break;
+ default:
+ tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+ break;
+ }
+ break;
+ case FLASH_5720VENDOR_M_ST_M25PE10:
+ case FLASH_5720VENDOR_M_ST_M45PE10:
+ case FLASH_5720VENDOR_A_ST_M25PE10:
+ case FLASH_5720VENDOR_A_ST_M45PE10:
+ case FLASH_5720VENDOR_M_ST_M25PE20:
+ case FLASH_5720VENDOR_M_ST_M45PE20:
+ case FLASH_5720VENDOR_A_ST_M25PE20:
+ case FLASH_5720VENDOR_A_ST_M45PE20:
+ case FLASH_5720VENDOR_M_ST_M25PE40:
+ case FLASH_5720VENDOR_M_ST_M45PE40:
+ case FLASH_5720VENDOR_A_ST_M25PE40:
+ case FLASH_5720VENDOR_A_ST_M45PE40:
+ case FLASH_5720VENDOR_M_ST_M25PE80:
+ case FLASH_5720VENDOR_M_ST_M45PE80:
+ case FLASH_5720VENDOR_A_ST_M25PE80:
+ case FLASH_5720VENDOR_A_ST_M45PE80:
+ case FLASH_5720VENDOR_ST_25USPT:
+ case FLASH_5720VENDOR_ST_45USPT:
+ tp->nvram_jedecnum = JEDEC_ST;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
+
+ switch (nvmpinstrp) {
+ case FLASH_5720VENDOR_M_ST_M25PE20:
+ case FLASH_5720VENDOR_M_ST_M45PE20:
+ case FLASH_5720VENDOR_A_ST_M25PE20:
+ case FLASH_5720VENDOR_A_ST_M45PE20:
+ tp->nvram_size = TG3_NVRAM_SIZE_256KB;
+ break;
+ case FLASH_5720VENDOR_M_ST_M25PE40:
+ case FLASH_5720VENDOR_M_ST_M45PE40:
+ case FLASH_5720VENDOR_A_ST_M25PE40:
+ case FLASH_5720VENDOR_A_ST_M45PE40:
+ tp->nvram_size = TG3_NVRAM_SIZE_512KB;
+ break;
+ case FLASH_5720VENDOR_M_ST_M25PE80:
+ case FLASH_5720VENDOR_M_ST_M45PE80:
+ case FLASH_5720VENDOR_A_ST_M25PE80:
+ case FLASH_5720VENDOR_A_ST_M45PE80:
+ tp->nvram_size = TG3_NVRAM_SIZE_1MB;
+ break;
+ default:
+ tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+ break;
+ }
+ break;
+ default:
+ tg3_flag_set(tp, NO_NVRAM);
+ return;
+ }
+
+ tg3_nvram_get_pagesize(tp, nvcfg1);
+ if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
+ tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
+}
+
+/* Chips other than 5700/5701 use the NVRAM for fetching info. */
+static void __devinit tg3_nvram_init(struct tg3 *tp)
+{
+ tw32_f(GRC_EEPROM_ADDR,
+ (EEPROM_ADDR_FSM_RESET |
+ (EEPROM_DEFAULT_CLOCK_PERIOD <<
+ EEPROM_ADDR_CLKPERD_SHIFT)));
+
+ msleep(1);
+
+ /* Enable seeprom accesses. */
+ tw32_f(GRC_LOCAL_CTRL,
+ tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
+ udelay(100);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
+ tg3_flag_set(tp, NVRAM);
+
+ if (tg3_nvram_lock(tp)) {
+ netdev_warn(tp->dev,
+ "Cannot get nvram lock, %s failed\n",
+ __func__);
+ return;
+ }
+ tg3_enable_nvram_access(tp);
+
+ tp->nvram_size = 0;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+ tg3_get_5752_nvram_info(tp);
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+ tg3_get_5755_nvram_info(tp);
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ tg3_get_5787_nvram_info(tp);
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ tg3_get_5761_nvram_info(tp);
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ tg3_get_5906_nvram_info(tp);
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ tg3_get_57780_nvram_info(tp);
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ tg3_get_5717_nvram_info(tp);
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ tg3_get_5720_nvram_info(tp);
+ else
+ tg3_get_nvram_info(tp);
+
+ if (tp->nvram_size == 0)
+ tg3_get_nvram_size(tp);
+
+ tg3_disable_nvram_access(tp);
+ tg3_nvram_unlock(tp);
+
+ } else {
+ tg3_flag_clear(tp, NVRAM);
+ tg3_flag_clear(tp, NVRAM_BUFFERED);
+
+ tg3_get_eeprom_size(tp);
+ }
+}
+
+static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
+ u32 offset, u32 len, u8 *buf)
+{
+ int i, j, rc = 0;
+ u32 val;
+
+ for (i = 0; i < len; i += 4) {
+ u32 addr;
+ __be32 data;
+
+ addr = offset + i;
+
+ memcpy(&data, buf + i, 4);
+
+ /*
+ * The SEEPROM interface expects the data to always be opposite
+ * the native endian format. We accomplish this by reversing
+ * all the operations that would have been performed on the
+ * data from a call to tg3_nvram_read_be32().
+ */
+ tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
+
+ val = tr32(GRC_EEPROM_ADDR);
+ tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
+
+ val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
+ EEPROM_ADDR_READ);
+ tw32(GRC_EEPROM_ADDR, val |
+ (0 << EEPROM_ADDR_DEVID_SHIFT) |
+ (addr & EEPROM_ADDR_ADDR_MASK) |
+ EEPROM_ADDR_START |
+ EEPROM_ADDR_WRITE);
+
+ for (j = 0; j < 1000; j++) {
+ val = tr32(GRC_EEPROM_ADDR);
+
+ if (val & EEPROM_ADDR_COMPLETE)
+ break;
+ msleep(1);
+ }
+ if (!(val & EEPROM_ADDR_COMPLETE)) {
+ rc = -EBUSY;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+/* offset and length are dword aligned */
+static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
+ u8 *buf)
+{
+ int ret = 0;
+ u32 pagesize = tp->nvram_pagesize;
+ u32 pagemask = pagesize - 1;
+ u32 nvram_cmd;
+ u8 *tmp;
+
+ tmp = kmalloc(pagesize, GFP_KERNEL);
+ if (tmp == NULL)
+ return -ENOMEM;
+
+ while (len) {
+ int j;
+ u32 phy_addr, page_off, size;
+
+ phy_addr = offset & ~pagemask;
+
+ for (j = 0; j < pagesize; j += 4) {
+ ret = tg3_nvram_read_be32(tp, phy_addr + j,
+ (__be32 *) (tmp + j));
+ if (ret)
+ break;
+ }
+ if (ret)
+ break;
+
+ page_off = offset & pagemask;
+ size = pagesize;
+ if (len < size)
+ size = len;
+
+ len -= size;
+
+ memcpy(tmp + page_off, buf, size);
+
+ offset = offset + (pagesize - page_off);
+
+ tg3_enable_nvram_access(tp);
+
+ /*
+ * Before we can erase the flash page, we need
+ * to issue a special "write enable" command.
+ */
+ nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
+
+ if (tg3_nvram_exec_cmd(tp, nvram_cmd))
+ break;
+
+ /* Erase the target page */
+ tw32(NVRAM_ADDR, phy_addr);
+
+ nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
+ NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
+
+ if (tg3_nvram_exec_cmd(tp, nvram_cmd))
+ break;
+
+ /* Issue another write enable to start the write. */
+ nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
+
+ if (tg3_nvram_exec_cmd(tp, nvram_cmd))
+ break;
+
+ for (j = 0; j < pagesize; j += 4) {
+ __be32 data;
+
+ data = *((__be32 *) (tmp + j));
+
+ tw32(NVRAM_WRDATA, be32_to_cpu(data));
+
+ tw32(NVRAM_ADDR, phy_addr + j);
+
+ nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
+ NVRAM_CMD_WR;
+
+ if (j == 0)
+ nvram_cmd |= NVRAM_CMD_FIRST;
+ else if (j == (pagesize - 4))
+ nvram_cmd |= NVRAM_CMD_LAST;
+
+ if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
+ break;
+ }
+ if (ret)
+ break;
+ }
+
+ nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
+ tg3_nvram_exec_cmd(tp, nvram_cmd);
+
+ kfree(tmp);
+
+ return ret;
+}
+
+/* offset and length are dword aligned */
+static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
+ u8 *buf)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < len; i += 4, offset += 4) {
+ u32 page_off, phy_addr, nvram_cmd;
+ __be32 data;
+
+ memcpy(&data, buf + i, 4);
+ tw32(NVRAM_WRDATA, be32_to_cpu(data));
+
+ page_off = offset % tp->nvram_pagesize;
+
+ phy_addr = tg3_nvram_phys_addr(tp, offset);
+
+ tw32(NVRAM_ADDR, phy_addr);
+
+ nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
+
+ if (page_off == 0 || i == 0)
+ nvram_cmd |= NVRAM_CMD_FIRST;
+ if (page_off == (tp->nvram_pagesize - 4))
+ nvram_cmd |= NVRAM_CMD_LAST;
+
+ if (i == (len - 4))
+ nvram_cmd |= NVRAM_CMD_LAST;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
+ !tg3_flag(tp, 5755_PLUS) &&
+ (tp->nvram_jedecnum == JEDEC_ST) &&
+ (nvram_cmd & NVRAM_CMD_FIRST)) {
+
+ if ((ret = tg3_nvram_exec_cmd(tp,
+ NVRAM_CMD_WREN | NVRAM_CMD_GO |
+ NVRAM_CMD_DONE)))
+
+ break;
+ }
+ if (!tg3_flag(tp, FLASH)) {
+ /* We always do complete word writes to eeprom. */
+ nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
+ }
+
+ if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
+ break;
+ }
+ return ret;
+}
+
+/* offset and length are dword aligned */
+static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
+{
+ int ret;
+
+ if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
+ tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
+ ~GRC_LCLCTRL_GPIO_OUTPUT1);
+ udelay(40);
+ }
+
+ if (!tg3_flag(tp, NVRAM)) {
+ ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
+ } else {
+ u32 grc_mode;
+
+ ret = tg3_nvram_lock(tp);
+ if (ret)
+ return ret;
+
+ tg3_enable_nvram_access(tp);
+ if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
+ tw32(NVRAM_WRITE1, 0x406);
+
+ grc_mode = tr32(GRC_MODE);
+ tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
+
+ if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
+ ret = tg3_nvram_write_block_buffered(tp, offset, len,
+ buf);
+ } else {
+ ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
+ buf);
+ }
+
+ grc_mode = tr32(GRC_MODE);
+ tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
+
+ tg3_disable_nvram_access(tp);
+ tg3_nvram_unlock(tp);
+ }
+
+ if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
+ tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
+ udelay(40);
+ }
+
+ return ret;
+}
+
+struct subsys_tbl_ent {
+ u16 subsys_vendor, subsys_devid;
+ u32 phy_id;
+};
+
+static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
+ /* Broadcom boards. */
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
+
+ /* 3com boards. */
+ { TG3PCI_SUBVENDOR_ID_3COM,
+ TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
+ { TG3PCI_SUBVENDOR_ID_3COM,
+ TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_3COM,
+ TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
+ { TG3PCI_SUBVENDOR_ID_3COM,
+ TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_3COM,
+ TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
+
+ /* DELL boards. */
+ { TG3PCI_SUBVENDOR_ID_DELL,
+ TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
+ { TG3PCI_SUBVENDOR_ID_DELL,
+ TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
+ { TG3PCI_SUBVENDOR_ID_DELL,
+ TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
+ { TG3PCI_SUBVENDOR_ID_DELL,
+ TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
+
+ /* Compaq boards. */
+ { TG3PCI_SUBVENDOR_ID_COMPAQ,
+ TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_COMPAQ,
+ TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_COMPAQ,
+ TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
+ { TG3PCI_SUBVENDOR_ID_COMPAQ,
+ TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_COMPAQ,
+ TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
+
+ /* IBM boards. */
+ { TG3PCI_SUBVENDOR_ID_IBM,
+ TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
+};
+
+static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
+ if ((subsys_id_to_phy_id[i].subsys_vendor ==
+ tp->pdev->subsystem_vendor) &&
+ (subsys_id_to_phy_id[i].subsys_devid ==
+ tp->pdev->subsystem_device))
+ return &subsys_id_to_phy_id[i];
+ }
+ return NULL;
+}
+
+static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
+{
+ u32 val;
+
+ tp->phy_id = TG3_PHY_ID_INVALID;
+ tp->led_ctrl = LED_CTRL_MODE_PHY_1;
+
+ /* Assume an onboard device and WOL capable by default. */
+ tg3_flag_set(tp, EEPROM_WRITE_PROT);
+ tg3_flag_set(tp, WOL_CAP);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
+ tg3_flag_clear(tp, EEPROM_WRITE_PROT);
+ tg3_flag_set(tp, IS_NIC);
+ }
+ val = tr32(VCPU_CFGSHDW);
+ if (val & VCPU_CFGSHDW_ASPM_DBNC)
+ tg3_flag_set(tp, ASPM_WORKAROUND);
+ if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
+ (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
+ tg3_flag_set(tp, WOL_ENABLE);
+ device_set_wakeup_enable(&tp->pdev->dev, true);
+ }
+ goto done;
+ }
+
+ tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
+ if (val == NIC_SRAM_DATA_SIG_MAGIC) {
+ u32 nic_cfg, led_cfg;
+ u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
+ int eeprom_phy_serdes = 0;
+
+ tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
+ tp->nic_sram_data_cfg = nic_cfg;
+
+ tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
+ ver >>= NIC_SRAM_DATA_VER_SHIFT;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
+ (ver > 0) && (ver < 0x100))
+ tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
+
+ if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
+ NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
+ eeprom_phy_serdes = 1;
+
+ tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
+ if (nic_phy_id != 0) {
+ u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
+ u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
+
+ eeprom_phy_id = (id1 >> 16) << 10;
+ eeprom_phy_id |= (id2 & 0xfc00) << 16;
+ eeprom_phy_id |= (id2 & 0x03ff) << 0;
+ } else
+ eeprom_phy_id = 0;
+
+ tp->phy_id = eeprom_phy_id;
+ if (eeprom_phy_serdes) {
+ if (!tg3_flag(tp, 5705_PLUS))
+ tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
+ else
+ tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
+ }
+
+ if (tg3_flag(tp, 5750_PLUS))
+ led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
+ SHASTA_EXT_LED_MODE_MASK);
+ else
+ led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
+
+ switch (led_cfg) {
+ default:
+ case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
+ tp->led_ctrl = LED_CTRL_MODE_PHY_1;
+ break;
+
+ case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
+ tp->led_ctrl = LED_CTRL_MODE_PHY_2;
+ break;
+
+ case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
+ tp->led_ctrl = LED_CTRL_MODE_MAC;
+
+ /* Default to PHY_1_MODE if 0 (MAC_MODE) is
+ * read on some older 5700/5701 bootcode.
+ */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
+ ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) ==
+ ASIC_REV_5701)
+ tp->led_ctrl = LED_CTRL_MODE_PHY_1;
+
+ break;
+
+ case SHASTA_EXT_LED_SHARED:
+ tp->led_ctrl = LED_CTRL_MODE_SHARED;
+ if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
+ tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
+ tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
+ LED_CTRL_MODE_PHY_2);
+ break;
+
+ case SHASTA_EXT_LED_MAC:
+ tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
+ break;
+
+ case SHASTA_EXT_LED_COMBO:
+ tp->led_ctrl = LED_CTRL_MODE_COMBO;
+ if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
+ tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
+ LED_CTRL_MODE_PHY_2);
+ break;
+
+ }
+
+ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
+ tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
+ tp->led_ctrl = LED_CTRL_MODE_PHY_2;
+
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
+ tp->led_ctrl = LED_CTRL_MODE_PHY_1;
+
+ if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
+ tg3_flag_set(tp, EEPROM_WRITE_PROT);
+ if ((tp->pdev->subsystem_vendor ==
+ PCI_VENDOR_ID_ARIMA) &&
+ (tp->pdev->subsystem_device == 0x205a ||
+ tp->pdev->subsystem_device == 0x2063))
+ tg3_flag_clear(tp, EEPROM_WRITE_PROT);
+ } else {
+ tg3_flag_clear(tp, EEPROM_WRITE_PROT);
+ tg3_flag_set(tp, IS_NIC);
+ }
+
+ if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
+ tg3_flag_set(tp, ENABLE_ASF);
+ if (tg3_flag(tp, 5750_PLUS))
+ tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
+ }
+
+ if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
+ tg3_flag(tp, 5750_PLUS))
+ tg3_flag_set(tp, ENABLE_APE);
+
+ if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
+ !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
+ tg3_flag_clear(tp, WOL_CAP);
+
+ if (tg3_flag(tp, WOL_CAP) &&
+ (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
+ tg3_flag_set(tp, WOL_ENABLE);
+ device_set_wakeup_enable(&tp->pdev->dev, true);
+ }
+
+ if (cfg2 & (1 << 17))
+ tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
+
+ /* serdes signal pre-emphasis in register 0x590 set by */
+ /* bootcode if bit 18 is set */
+ if (cfg2 & (1 << 18))
+ tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
+
+ if ((tg3_flag(tp, 57765_PLUS) ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
+ GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
+ (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
+ tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
+
+ if (tg3_flag(tp, PCI_EXPRESS) &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
+ !tg3_flag(tp, 57765_PLUS)) {
+ u32 cfg3;
+
+ tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
+ if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
+ tg3_flag_set(tp, ASPM_WORKAROUND);
+ }
+
+ if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
+ tg3_flag_set(tp, RGMII_INBAND_DISABLE);
+ if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
+ tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
+ if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
+ tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
+ }
+done:
+ if (tg3_flag(tp, WOL_CAP))
+ device_set_wakeup_enable(&tp->pdev->dev,
+ tg3_flag(tp, WOL_ENABLE));
+ else
+ device_set_wakeup_capable(&tp->pdev->dev, false);
+}
+
+static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
+{
+ int i;
+ u32 val;
+
+ tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
+ tw32(OTP_CTRL, cmd);
+
+ /* Wait for up to 1 ms for command to execute. */
+ for (i = 0; i < 100; i++) {
+ val = tr32(OTP_STATUS);
+ if (val & OTP_STATUS_CMD_DONE)
+ break;
+ udelay(10);
+ }
+
+ return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
+}
+
+/* Read the gphy configuration from the OTP region of the chip. The gphy
+ * configuration is a 32-bit value that straddles the alignment boundary.
+ * We do two 32-bit reads and then shift and merge the results.
+ */
+static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
+{
+ u32 bhalf_otp, thalf_otp;
+
+ tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
+
+ if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
+ return 0;
+
+ tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
+
+ if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
+ return 0;
+
+ thalf_otp = tr32(OTP_READ_DATA);
+
+ tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
+
+ if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
+ return 0;
+
+ bhalf_otp = tr32(OTP_READ_DATA);
+
+ return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
+}
+
+static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
+{
+ u32 adv = ADVERTISED_Autoneg |
+ ADVERTISED_Pause;
+
+ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
+ adv |= ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full;
+
+ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
+ adv |= ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_TP;
+ else
+ adv |= ADVERTISED_FIBRE;
+
+ tp->link_config.advertising = adv;
+ tp->link_config.speed = SPEED_INVALID;
+ tp->link_config.duplex = DUPLEX_INVALID;
+ tp->link_config.autoneg = AUTONEG_ENABLE;
+ tp->link_config.active_speed = SPEED_INVALID;
+ tp->link_config.active_duplex = DUPLEX_INVALID;
+ tp->link_config.orig_speed = SPEED_INVALID;
+ tp->link_config.orig_duplex = DUPLEX_INVALID;
+ tp->link_config.orig_autoneg = AUTONEG_INVALID;
+}
+
+static int __devinit tg3_phy_probe(struct tg3 *tp)
+{
+ u32 hw_phy_id_1, hw_phy_id_2;
+ u32 hw_phy_id, hw_phy_id_masked;
+ int err;
+
+ /* flow control autonegotiation is default behavior */
+ tg3_flag_set(tp, PAUSE_AUTONEG);
+ tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
+
+ if (tg3_flag(tp, USE_PHYLIB))
+ return tg3_phy_init(tp);
+
+ /* Reading the PHY ID register can conflict with ASF
+ * firmware access to the PHY hardware.
+ */
+ err = 0;
+ if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
+ hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
+ } else {
+ /* Now read the physical PHY_ID from the chip and verify
+ * that it is sane. If it doesn't look good, we fall back
+ * to either the hard-coded table based PHY_ID and failing
+ * that the value found in the eeprom area.
+ */
+ err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
+ err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
+
+ hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
+ hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
+ hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
+
+ hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
+ }
+
+ if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
+ tp->phy_id = hw_phy_id;
+ if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
+ tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
+ else
+ tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
+ } else {
+ if (tp->phy_id != TG3_PHY_ID_INVALID) {
+ /* Do nothing, phy ID already set up in
+ * tg3_get_eeprom_hw_cfg().
+ */
+ } else {
+ struct subsys_tbl_ent *p;
+
+ /* No eeprom signature? Try the hardcoded
+ * subsys device table.
+ */
+ p = tg3_lookup_by_subsys(tp);
+ if (!p)
+ return -ENODEV;
+
+ tp->phy_id = p->phy_id;
+ if (!tp->phy_id ||
+ tp->phy_id == TG3_PHY_ID_BCM8002)
+ tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
+ }
+ }
+
+ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
+ (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
+ tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
+ tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
+ tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
+
+ tg3_phy_init_link_config(tp);
+
+ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
+ !tg3_flag(tp, ENABLE_APE) &&
+ !tg3_flag(tp, ENABLE_ASF)) {
+ u32 bmsr, mask;
+
+ tg3_readphy(tp, MII_BMSR, &bmsr);
+ if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
+ (bmsr & BMSR_LSTATUS))
+ goto skip_phy_reset;
+
+ err = tg3_phy_reset(tp);
+ if (err)
+ return err;
+
+ tg3_phy_set_wirespeed(tp);
+
+ mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
+ ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
+ if (!tg3_copper_is_advertising_all(tp, mask)) {
+ tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
+ tp->link_config.flowctrl);
+
+ tg3_writephy(tp, MII_BMCR,
+ BMCR_ANENABLE | BMCR_ANRESTART);
+ }
+ }
+
+skip_phy_reset:
+ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
+ err = tg3_init_5401phy_dsp(tp);
+ if (err)
+ return err;
+
+ err = tg3_init_5401phy_dsp(tp);
+ }
+
+ return err;
+}
+
+static void __devinit tg3_read_vpd(struct tg3 *tp)
+{
+ u8 *vpd_data;
+ unsigned int block_end, rosize, len;
+ u32 vpdlen;
+ int j, i = 0;
+
+ vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
+ if (!vpd_data)
+ goto out_no_vpd;
+
+ i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
+ if (i < 0)
+ goto out_not_found;
+
+ rosize = pci_vpd_lrdt_size(&vpd_data[i]);
+ block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
+ i += PCI_VPD_LRDT_TAG_SIZE;
+
+ if (block_end > vpdlen)
+ goto out_not_found;
+
+ j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
+ PCI_VPD_RO_KEYWORD_MFR_ID);
+ if (j > 0) {
+ len = pci_vpd_info_field_size(&vpd_data[j]);
+
+ j += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (j + len > block_end || len != 4 ||
+ memcmp(&vpd_data[j], "1028", 4))
+ goto partno;
+
+ j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
+ PCI_VPD_RO_KEYWORD_VENDOR0);
+ if (j < 0)
+ goto partno;
+
+ len = pci_vpd_info_field_size(&vpd_data[j]);
+
+ j += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (j + len > block_end)
+ goto partno;
+
+ memcpy(tp->fw_ver, &vpd_data[j], len);
+ strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
+ }
+
+partno:
+ i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
+ PCI_VPD_RO_KEYWORD_PARTNO);
+ if (i < 0)
+ goto out_not_found;
+
+ len = pci_vpd_info_field_size(&vpd_data[i]);
+
+ i += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (len > TG3_BPN_SIZE ||
+ (len + i) > vpdlen)
+ goto out_not_found;
+
+ memcpy(tp->board_part_number, &vpd_data[i], len);
+
+out_not_found:
+ kfree(vpd_data);
+ if (tp->board_part_number[0])
+ return;
+
+out_no_vpd:
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
+ if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
+ strcpy(tp->board_part_number, "BCM5717");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
+ strcpy(tp->board_part_number, "BCM5718");
+ else
+ goto nomatch;
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
+ if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
+ strcpy(tp->board_part_number, "BCM57780");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
+ strcpy(tp->board_part_number, "BCM57760");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
+ strcpy(tp->board_part_number, "BCM57790");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
+ strcpy(tp->board_part_number, "BCM57788");
+ else
+ goto nomatch;
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+ if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
+ strcpy(tp->board_part_number, "BCM57761");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
+ strcpy(tp->board_part_number, "BCM57765");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
+ strcpy(tp->board_part_number, "BCM57781");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
+ strcpy(tp->board_part_number, "BCM57785");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
+ strcpy(tp->board_part_number, "BCM57791");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
+ strcpy(tp->board_part_number, "BCM57795");
+ else
+ goto nomatch;
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ strcpy(tp->board_part_number, "BCM95906");
+ } else {
+nomatch:
+ strcpy(tp->board_part_number, "none");
+ }
+}
+
+static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
+{
+ u32 val;
+
+ if (tg3_nvram_read(tp, offset, &val) ||
+ (val & 0xfc000000) != 0x0c000000 ||
+ tg3_nvram_read(tp, offset + 4, &val) ||
+ val != 0)
+ return 0;
+
+ return 1;
+}
+
+static void __devinit tg3_read_bc_ver(struct tg3 *tp)
+{
+ u32 val, offset, start, ver_offset;
+ int i, dst_off;
+ bool newver = false;
+
+ if (tg3_nvram_read(tp, 0xc, &offset) ||
+ tg3_nvram_read(tp, 0x4, &start))
+ return;
+
+ offset = tg3_nvram_logical_addr(tp, offset);
+
+ if (tg3_nvram_read(tp, offset, &val))
+ return;
+
+ if ((val & 0xfc000000) == 0x0c000000) {
+ if (tg3_nvram_read(tp, offset + 4, &val))
+ return;
+
+ if (val == 0)
+ newver = true;
+ }
+
+ dst_off = strlen(tp->fw_ver);
+
+ if (newver) {
+ if (TG3_VER_SIZE - dst_off < 16 ||
+ tg3_nvram_read(tp, offset + 8, &ver_offset))
+ return;
+
+ offset = offset + ver_offset - start;
+ for (i = 0; i < 16; i += 4) {
+ __be32 v;
+ if (tg3_nvram_read_be32(tp, offset + i, &v))
+ return;
+
+ memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
+ }
+ } else {
+ u32 major, minor;
+
+ if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
+ return;
+
+ major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
+ TG3_NVM_BCVER_MAJSFT;
+ minor = ver_offset & TG3_NVM_BCVER_MINMSK;
+ snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
+ "v%d.%02d", major, minor);
+ }
+}
+
+static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
+{
+ u32 val, major, minor;
+
+ /* Use native endian representation */
+ if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
+ return;
+
+ major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
+ TG3_NVM_HWSB_CFG1_MAJSFT;
+ minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
+ TG3_NVM_HWSB_CFG1_MINSFT;
+
+ snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
+}
+
+static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
+{
+ u32 offset, major, minor, build;
+
+ strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
+
+ if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
+ return;
+
+ switch (val & TG3_EEPROM_SB_REVISION_MASK) {
+ case TG3_EEPROM_SB_REVISION_0:
+ offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
+ break;
+ case TG3_EEPROM_SB_REVISION_2:
+ offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
+ break;
+ case TG3_EEPROM_SB_REVISION_3:
+ offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
+ break;
+ case TG3_EEPROM_SB_REVISION_4:
+ offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
+ break;
+ case TG3_EEPROM_SB_REVISION_5:
+ offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
+ break;
+ case TG3_EEPROM_SB_REVISION_6:
+ offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
+ break;
+ default:
+ return;
+ }
+
+ if (tg3_nvram_read(tp, offset, &val))
+ return;
+
+ build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
+ TG3_EEPROM_SB_EDH_BLD_SHFT;
+ major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
+ TG3_EEPROM_SB_EDH_MAJ_SHFT;
+ minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
+
+ if (minor > 99 || build > 26)
+ return;
+
+ offset = strlen(tp->fw_ver);
+ snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
+ " v%d.%02d", major, minor);
+
+ if (build > 0) {
+ offset = strlen(tp->fw_ver);
+ if (offset < TG3_VER_SIZE - 1)
+ tp->fw_ver[offset] = 'a' + build - 1;
+ }
+}
+
+static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
+{
+ u32 val, offset, start;
+ int i, vlen;
+
+ for (offset = TG3_NVM_DIR_START;
+ offset < TG3_NVM_DIR_END;
+ offset += TG3_NVM_DIRENT_SIZE) {
+ if (tg3_nvram_read(tp, offset, &val))
+ return;
+
+ if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
+ break;
+ }
+
+ if (offset == TG3_NVM_DIR_END)
+ return;
+
+ if (!tg3_flag(tp, 5705_PLUS))
+ start = 0x08000000;
+ else if (tg3_nvram_read(tp, offset - 4, &start))
+ return;
+
+ if (tg3_nvram_read(tp, offset + 4, &offset) ||
+ !tg3_fw_img_is_valid(tp, offset) ||
+ tg3_nvram_read(tp, offset + 8, &val))
+ return;
+
+ offset += val - start;
+
+ vlen = strlen(tp->fw_ver);
+
+ tp->fw_ver[vlen++] = ',';
+ tp->fw_ver[vlen++] = ' ';
+
+ for (i = 0; i < 4; i++) {
+ __be32 v;
+ if (tg3_nvram_read_be32(tp, offset, &v))
+ return;
+
+ offset += sizeof(v);
+
+ if (vlen > TG3_VER_SIZE - sizeof(v)) {
+ memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
+ break;
+ }
+
+ memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
+ vlen += sizeof(v);
+ }
+}
+
+static void __devinit tg3_read_dash_ver(struct tg3 *tp)
+{
+ int vlen;
+ u32 apedata;
+ char *fwtype;
+
+ if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
+ return;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
+ if (apedata != APE_SEG_SIG_MAGIC)
+ return;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
+ if (!(apedata & APE_FW_STATUS_READY))
+ return;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
+
+ if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
+ tg3_flag_set(tp, APE_HAS_NCSI);
+ fwtype = "NCSI";
+ } else {
+ fwtype = "DASH";
+ }
+
+ vlen = strlen(tp->fw_ver);
+
+ snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
+ fwtype,
+ (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
+ (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
+ (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
+ (apedata & APE_FW_VERSION_BLDMSK));
+}
+
+static void __devinit tg3_read_fw_ver(struct tg3 *tp)
+{
+ u32 val;
+ bool vpd_vers = false;
+
+ if (tp->fw_ver[0] != 0)
+ vpd_vers = true;
+
+ if (tg3_flag(tp, NO_NVRAM)) {
+ strcat(tp->fw_ver, "sb");
+ return;
+ }
+
+ if (tg3_nvram_read(tp, 0, &val))
+ return;
+
+ if (val == TG3_EEPROM_MAGIC)
+ tg3_read_bc_ver(tp);
+ else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
+ tg3_read_sb_ver(tp, val);
+ else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
+ tg3_read_hwsb_ver(tp);
+ else
+ return;
+
+ if (vpd_vers)
+ goto done;
+
+ if (tg3_flag(tp, ENABLE_APE)) {
+ if (tg3_flag(tp, ENABLE_ASF))
+ tg3_read_dash_ver(tp);
+ } else if (tg3_flag(tp, ENABLE_ASF)) {
+ tg3_read_mgmtfw_ver(tp);
+ }
+
+done:
+ tp->fw_ver[TG3_VER_SIZE - 1] = 0;
+}
+
+static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
+
+static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
+{
+ if (tg3_flag(tp, LRG_PROD_RING_CAP))
+ return TG3_RX_RET_MAX_SIZE_5717;
+ else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
+ return TG3_RX_RET_MAX_SIZE_5700;
+ else
+ return TG3_RX_RET_MAX_SIZE_5705;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
+ { },
+};
+
+static int __devinit tg3_get_invariants(struct tg3 *tp)
+{
+ u32 misc_ctrl_reg;
+ u32 pci_state_reg, grc_misc_cfg;
+ u32 val;
+ u16 pci_cmd;
+ int err;
+
+ /* Force memory write invalidate off. If we leave it on,
+ * then on 5700_BX chips we have to enable a workaround.
+ * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
+ * to match the cacheline size. The Broadcom driver have this
+ * workaround but turns MWI off all the times so never uses
+ * it. This seems to suggest that the workaround is insufficient.
+ */
+ pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
+ pci_cmd &= ~PCI_COMMAND_INVALIDATE;
+ pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
+
+ /* Important! -- Make sure register accesses are byteswapped
+ * correctly. Also, for those chips that require it, make
+ * sure that indirect register accesses are enabled before
+ * the first operation.
+ */
+ pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
+ &misc_ctrl_reg);
+ tp->misc_host_ctrl |= (misc_ctrl_reg &
+ MISC_HOST_CTRL_CHIPREV);
+ pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
+ tp->misc_host_ctrl);
+
+ tp->pci_chip_rev_id = (misc_ctrl_reg >>
+ MISC_HOST_CTRL_CHIPREV_SHIFT);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
+ u32 prod_id_asic_rev;
+
+ if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
+ pci_read_config_dword(tp->pdev,
+ TG3PCI_GEN2_PRODID_ASICREV,
+ &prod_id_asic_rev);
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
+ pci_read_config_dword(tp->pdev,
+ TG3PCI_GEN15_PRODID_ASICREV,
+ &prod_id_asic_rev);
+ else
+ pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
+ &prod_id_asic_rev);
+
+ tp->pci_chip_rev_id = prod_id_asic_rev;
+ }
+
+ /* Wrong chip ID in 5752 A0. This code can be removed later
+ * as A0 is not in production.
+ */
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
+ tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
+
+ /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
+ * we need to disable memory and use config. cycles
+ * only to access all registers. The 5702/03 chips
+ * can mistakenly decode the special cycles from the
+ * ICH chipsets as memory write cycles, causing corruption
+ * of register and memory space. Only certain ICH bridges
+ * will drive special cycles with non-zero data during the
+ * address phase which can fall within the 5703's address
+ * range. This is not an ICH bug as the PCI spec allows
+ * non-zero address during special cycles. However, only
+ * these ICH bridges are known to drive non-zero addresses
+ * during special cycles.
+ *
+ * Since special cycles do not cross PCI bridges, we only
+ * enable this workaround if the 5703 is on the secondary
+ * bus of these ICH bridges.
+ */
+ if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
+ (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
+ static struct tg3_dev_id {
+ u32 vendor;
+ u32 device;
+ u32 rev;
+ } ich_chipsets[] = {
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
+ PCI_ANY_ID },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
+ PCI_ANY_ID },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
+ 0xa },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
+ PCI_ANY_ID },
+ { },
+ };
+ struct tg3_dev_id *pci_id = &ich_chipsets[0];
+ struct pci_dev *bridge = NULL;
+
+ while (pci_id->vendor != 0) {
+ bridge = pci_get_device(pci_id->vendor, pci_id->device,
+ bridge);
+ if (!bridge) {
+ pci_id++;
+ continue;
+ }
+ if (pci_id->rev != PCI_ANY_ID) {
+ if (bridge->revision > pci_id->rev)
+ continue;
+ }
+ if (bridge->subordinate &&
+ (bridge->subordinate->number ==
+ tp->pdev->bus->number)) {
+ tg3_flag_set(tp, ICH_WORKAROUND);
+ pci_dev_put(bridge);
+ break;
+ }
+ }
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ static struct tg3_dev_id {
+ u32 vendor;
+ u32 device;
+ } bridge_chipsets[] = {
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
+ { },
+ };
+ struct tg3_dev_id *pci_id = &bridge_chipsets[0];
+ struct pci_dev *bridge = NULL;
+
+ while (pci_id->vendor != 0) {
+ bridge = pci_get_device(pci_id->vendor,
+ pci_id->device,
+ bridge);
+ if (!bridge) {
+ pci_id++;
+ continue;
+ }
+ if (bridge->subordinate &&
+ (bridge->subordinate->number <=
+ tp->pdev->bus->number) &&
+ (bridge->subordinate->subordinate >=
+ tp->pdev->bus->number)) {
+ tg3_flag_set(tp, 5701_DMA_BUG);
+ pci_dev_put(bridge);
+ break;
+ }
+ }
+ }
+
+ /* The EPB bridge inside 5714, 5715, and 5780 cannot support
+ * DMA addresses > 40-bit. This bridge may have other additional
+ * 57xx devices behind it in some 4-port NIC designs for example.
+ * Any tg3 device found behind the bridge will also need the 40-bit
+ * DMA workaround.
+ */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+ tg3_flag_set(tp, 5780_CLASS);
+ tg3_flag_set(tp, 40BIT_DMA_BUG);
+ tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
+ } else {
+ struct pci_dev *bridge = NULL;
+
+ do {
+ bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
+ PCI_DEVICE_ID_SERVERWORKS_EPB,
+ bridge);
+ if (bridge && bridge->subordinate &&
+ (bridge->subordinate->number <=
+ tp->pdev->bus->number) &&
+ (bridge->subordinate->subordinate >=
+ tp->pdev->bus->number)) {
+ tg3_flag_set(tp, 40BIT_DMA_BUG);
+ pci_dev_put(bridge);
+ break;
+ }
+ } while (bridge);
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
+ tp->pdev_peer = tg3_find_peer(tp);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ tg3_flag_set(tp, 5717_PLUS);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
+ tg3_flag(tp, 5717_PLUS))
+ tg3_flag_set(tp, 57765_PLUS);
+
+ /* Intentionally exclude ASIC_REV_5906 */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ tg3_flag(tp, 57765_PLUS))
+ tg3_flag_set(tp, 5755_PLUS);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
+ tg3_flag(tp, 5755_PLUS) ||
+ tg3_flag(tp, 5780_CLASS))
+ tg3_flag_set(tp, 5750_PLUS);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
+ tg3_flag(tp, 5750_PLUS))
+ tg3_flag_set(tp, 5705_PLUS);
+
+ /* Determine TSO capabilities */
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
+ ; /* Do nothing. HW bug. */
+ else if (tg3_flag(tp, 57765_PLUS))
+ tg3_flag_set(tp, HW_TSO_3);
+ else if (tg3_flag(tp, 5755_PLUS) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ tg3_flag_set(tp, HW_TSO_2);
+ else if (tg3_flag(tp, 5750_PLUS)) {
+ tg3_flag_set(tp, HW_TSO_1);
+ tg3_flag_set(tp, TSO_BUG);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
+ tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
+ tg3_flag_clear(tp, TSO_BUG);
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
+ tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
+ tg3_flag_set(tp, TSO_BUG);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
+ tp->fw_needed = FIRMWARE_TG3TSO5;
+ else
+ tp->fw_needed = FIRMWARE_TG3TSO;
+ }
+
+ /* Selectively allow TSO based on operating conditions */
+ if (tg3_flag(tp, HW_TSO_1) ||
+ tg3_flag(tp, HW_TSO_2) ||
+ tg3_flag(tp, HW_TSO_3) ||
+ (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
+ tg3_flag_set(tp, TSO_CAPABLE);
+ else {
+ tg3_flag_clear(tp, TSO_CAPABLE);
+ tg3_flag_clear(tp, TSO_BUG);
+ tp->fw_needed = NULL;
+ }
+
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
+ tp->fw_needed = FIRMWARE_TG3;
+
+ tp->irq_max = 1;
+
+ if (tg3_flag(tp, 5750_PLUS)) {
+ tg3_flag_set(tp, SUPPORT_MSI);
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
+ GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
+ tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
+ tp->pdev_peer == tp->pdev))
+ tg3_flag_clear(tp, SUPPORT_MSI);
+
+ if (tg3_flag(tp, 5755_PLUS) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ tg3_flag_set(tp, 1SHOT_MSI);
+ }
+
+ if (tg3_flag(tp, 57765_PLUS)) {
+ tg3_flag_set(tp, SUPPORT_MSIX);
+ tp->irq_max = TG3_IRQ_MAX_VECS;
+ }
+ }
+
+ if (tg3_flag(tp, 5755_PLUS))
+ tg3_flag_set(tp, SHORT_DMA_BUG);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ tg3_flag_set(tp, 4K_FIFO_LIMIT);
+
+ if (tg3_flag(tp, 5717_PLUS))
+ tg3_flag_set(tp, LRG_PROD_RING_CAP);
+
+ if (tg3_flag(tp, 57765_PLUS) &&
+ tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
+ tg3_flag_set(tp, USE_JUMBO_BDFLAG);
+
+ if (!tg3_flag(tp, 5705_PLUS) ||
+ tg3_flag(tp, 5780_CLASS) ||
+ tg3_flag(tp, USE_JUMBO_BDFLAG))
+ tg3_flag_set(tp, JUMBO_CAPABLE);
+
+ pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
+ &pci_state_reg);
+
+ if (pci_is_pcie(tp->pdev)) {
+ u16 lnkctl;
+
+ tg3_flag_set(tp, PCI_EXPRESS);
+
+ tp->pcie_readrq = 4096;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ tp->pcie_readrq = 2048;
+
+ pcie_set_readrq(tp->pdev, tp->pcie_readrq);
+
+ pci_read_config_word(tp->pdev,
+ pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
+ &lnkctl);
+ if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
+ ASIC_REV_5906) {
+ tg3_flag_clear(tp, HW_TSO_2);
+ tg3_flag_clear(tp, TSO_CAPABLE);
+ }
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
+ tg3_flag_set(tp, CLKREQ_BUG);
+ } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
+ tg3_flag_set(tp, L1PLLPD_EN);
+ }
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
+ /* BCM5785 devices are effectively PCIe devices, and should
+ * follow PCIe codepaths, but do not have a PCIe capabilities
+ * section.
+ */
+ tg3_flag_set(tp, PCI_EXPRESS);
+ } else if (!tg3_flag(tp, 5705_PLUS) ||
+ tg3_flag(tp, 5780_CLASS)) {
+ tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
+ if (!tp->pcix_cap) {
+ dev_err(&tp->pdev->dev,
+ "Cannot find PCI-X capability, aborting\n");
+ return -EIO;
+ }
+
+ if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
+ tg3_flag_set(tp, PCIX_MODE);
+ }
+
+ /* If we have an AMD 762 or VIA K8T800 chipset, write
+ * reordering to the mailbox registers done by the host
+ * controller can cause major troubles. We read back from
+ * every mailbox register write to force the writes to be
+ * posted to the chip in order.
+ */
+ if (pci_dev_present(tg3_write_reorder_chipsets) &&
+ !tg3_flag(tp, PCI_EXPRESS))
+ tg3_flag_set(tp, MBOX_WRITE_REORDER);
+
+ pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
+ &tp->pci_cacheline_sz);
+ pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
+ &tp->pci_lat_timer);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
+ tp->pci_lat_timer < 64) {
+ tp->pci_lat_timer = 64;
+ pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
+ tp->pci_lat_timer);
+ }
+
+ /* Important! -- It is critical that the PCI-X hw workaround
+ * situation is decided before the first MMIO register access.
+ */
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
+ /* 5700 BX chips need to have their TX producer index
+ * mailboxes written twice to workaround a bug.
+ */
+ tg3_flag_set(tp, TXD_MBOX_HWBUG);
+
+ /* If we are in PCI-X mode, enable register write workaround.
+ *
+ * The workaround is to use indirect register accesses
+ * for all chip writes not to mailbox registers.
+ */
+ if (tg3_flag(tp, PCIX_MODE)) {
+ u32 pm_reg;
+
+ tg3_flag_set(tp, PCIX_TARGET_HWBUG);
+
+ /* The chip can have it's power management PCI config
+ * space registers clobbered due to this bug.
+ * So explicitly force the chip into D0 here.
+ */
+ pci_read_config_dword(tp->pdev,
+ tp->pm_cap + PCI_PM_CTRL,
+ &pm_reg);
+ pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
+ pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
+ pci_write_config_dword(tp->pdev,
+ tp->pm_cap + PCI_PM_CTRL,
+ pm_reg);
+
+ /* Also, force SERR#/PERR# in PCI command. */
+ pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
+ pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
+ pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
+ }
+ }
+
+ if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
+ tg3_flag_set(tp, PCI_HIGH_SPEED);
+ if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
+ tg3_flag_set(tp, PCI_32BIT);
+
+ /* Chip-specific fixup from Broadcom driver */
+ if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
+ (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
+ pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
+ pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
+ }
+
+ /* Default fast path register access methods */
+ tp->read32 = tg3_read32;
+ tp->write32 = tg3_write32;
+ tp->read32_mbox = tg3_read32;
+ tp->write32_mbox = tg3_write32;
+ tp->write32_tx_mbox = tg3_write32;
+ tp->write32_rx_mbox = tg3_write32;
+
+ /* Various workaround register access methods */
+ if (tg3_flag(tp, PCIX_TARGET_HWBUG))
+ tp->write32 = tg3_write_indirect_reg32;
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
+ (tg3_flag(tp, PCI_EXPRESS) &&
+ tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
+ /*
+ * Back to back register writes can cause problems on these
+ * chips, the workaround is to read back all reg writes
+ * except those to mailbox regs.
+ *
+ * See tg3_write_indirect_reg32().
+ */
+ tp->write32 = tg3_write_flush_reg32;
+ }
+
+ if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
+ tp->write32_tx_mbox = tg3_write32_tx_mbox;
+ if (tg3_flag(tp, MBOX_WRITE_REORDER))
+ tp->write32_rx_mbox = tg3_write_flush_reg32;
+ }
+
+ if (tg3_flag(tp, ICH_WORKAROUND)) {
+ tp->read32 = tg3_read_indirect_reg32;
+ tp->write32 = tg3_write_indirect_reg32;
+ tp->read32_mbox = tg3_read_indirect_mbox;
+ tp->write32_mbox = tg3_write_indirect_mbox;
+ tp->write32_tx_mbox = tg3_write_indirect_mbox;
+ tp->write32_rx_mbox = tg3_write_indirect_mbox;
+
+ iounmap(tp->regs);
+ tp->regs = NULL;
+
+ pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
+ pci_cmd &= ~PCI_COMMAND_MEMORY;
+ pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
+ }
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ tp->read32_mbox = tg3_read32_mbox_5906;
+ tp->write32_mbox = tg3_write32_mbox_5906;
+ tp->write32_tx_mbox = tg3_write32_mbox_5906;
+ tp->write32_rx_mbox = tg3_write32_mbox_5906;
+ }
+
+ if (tp->write32 == tg3_write_indirect_reg32 ||
+ (tg3_flag(tp, PCIX_MODE) &&
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
+ tg3_flag_set(tp, SRAM_USE_CONFIG);
+
+ /* The memory arbiter has to be enabled in order for SRAM accesses
+ * to succeed. Normally on powerup the tg3 chip firmware will make
+ * sure it is enabled, but other entities such as system netboot
+ * code might disable it.
+ */
+ val = tr32(MEMARB_MODE);
+ tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
+
+ if (tg3_flag(tp, PCIX_MODE)) {
+ pci_read_config_dword(tp->pdev,
+ tp->pcix_cap + PCI_X_STATUS, &val);
+ tp->pci_fn = val & 0x7;
+ } else {
+ tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
+ }
+
+ /* Get eeprom hw config before calling tg3_set_power_state().
+ * In particular, the TG3_FLAG_IS_NIC flag must be
+ * determined before calling tg3_set_power_state() so that
+ * we know whether or not to switch out of Vaux power.
+ * When the flag is set, it means that GPIO1 is used for eeprom
+ * write protect and also implies that it is a LOM where GPIOs
+ * are not used to switch power.
+ */
+ tg3_get_eeprom_hw_cfg(tp);
+
+ if (tg3_flag(tp, ENABLE_APE)) {
+ /* Allow reads and writes to the
+ * APE register and memory space.
+ */
+ pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
+ PCISTATE_ALLOW_APE_SHMEM_WR |
+ PCISTATE_ALLOW_APE_PSPACE_WR;
+ pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
+ pci_state_reg);
+
+ tg3_ape_lock_init(tp);
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ tg3_flag(tp, 57765_PLUS))
+ tg3_flag_set(tp, CPMU_PRESENT);
+
+ /* Set up tp->grc_local_ctrl before calling
+ * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
+ * will bring 5700's external PHY out of reset.
+ * It is also used as eeprom write protect on LOMs.
+ */
+ tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ tg3_flag(tp, EEPROM_WRITE_PROT))
+ tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
+ GRC_LCLCTRL_GPIO_OUTPUT1);
+ /* Unused GPIO3 must be driven as output on 5752 because there
+ * are no pull-up resistors on unused GPIO pins.
+ */
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+ tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
+
+ if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
+ /* Turn off the debug UART. */
+ tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
+ if (tg3_flag(tp, IS_NIC))
+ /* Keep VMain power. */
+ tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
+ GRC_LCLCTRL_GPIO_OUTPUT0;
+ }
+
+ /* Switch out of Vaux if it is a NIC */
+ tg3_pwrsrc_switch_to_vmain(tp);
+
+ /* Derive initial jumbo mode from MTU assigned in
+ * ether_setup() via the alloc_etherdev() call
+ */
+ if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
+ tg3_flag_set(tp, JUMBO_RING_ENABLE);
+
+ /* Determine WakeOnLan speed to use. */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
+ tg3_flag_clear(tp, WOL_SPEED_100MB);
+ } else {
+ tg3_flag_set(tp, WOL_SPEED_100MB);
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ tp->phy_flags |= TG3_PHYFLG_IS_FET;
+
+ /* A few boards don't want Ethernet@WireSpeed phy feature */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
+ (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
+ (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
+ (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
+ (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
+ tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
+
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
+ GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
+ tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
+ tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
+
+ if (tg3_flag(tp, 5705_PLUS) &&
+ !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
+ !tg3_flag(tp, 57765_PLUS)) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
+ if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
+ tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
+ tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
+ if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
+ tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
+ } else
+ tp->phy_flags |= TG3_PHYFLG_BER_BUG;
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
+ GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
+ tp->phy_otp = tg3_read_otp_phycfg(tp);
+ if (tp->phy_otp == 0)
+ tp->phy_otp = TG3_OTP_DEFAULT;
+ }
+
+ if (tg3_flag(tp, CPMU_PRESENT))
+ tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
+ else
+ tp->mi_mode = MAC_MI_MODE_BASE;
+
+ tp->coalesce_mode = 0;
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
+ GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
+ tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
+
+ /* Set these bits to enable statistics workaround. */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
+ tp->coalesce_mode |= HOSTCC_MODE_ATTN;
+ tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ tg3_flag_set(tp, USE_PHYLIB);
+
+ err = tg3_mdio_init(tp);
+ if (err)
+ return err;
+
+ /* Initialize data/descriptor byte/word swapping. */
+ val = tr32(GRC_MODE);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
+ GRC_MODE_WORD_SWAP_B2HRX_DATA |
+ GRC_MODE_B2HRX_ENABLE |
+ GRC_MODE_HTX2B_ENABLE |
+ GRC_MODE_HOST_STACKUP);
+ else
+ val &= GRC_MODE_HOST_STACKUP;
+
+ tw32(GRC_MODE, val | tp->grc_mode);
+
+ tg3_switch_clocks(tp);
+
+ /* Clear this out for sanity. */
+ tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
+
+ pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
+ &pci_state_reg);
+ if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
+ !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
+ u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
+
+ if (chiprevid == CHIPREV_ID_5701_A0 ||
+ chiprevid == CHIPREV_ID_5701_B0 ||
+ chiprevid == CHIPREV_ID_5701_B2 ||
+ chiprevid == CHIPREV_ID_5701_B5) {
+ void __iomem *sram_base;
+
+ /* Write some dummy words into the SRAM status block
+ * area, see if it reads back correctly. If the return
+ * value is bad, force enable the PCIX workaround.
+ */
+ sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
+
+ writel(0x00000000, sram_base);
+ writel(0x00000000, sram_base + 4);
+ writel(0xffffffff, sram_base + 4);
+ if (readl(sram_base) != 0x00000000)
+ tg3_flag_set(tp, PCIX_TARGET_HWBUG);
+ }
+ }
+
+ udelay(50);
+ tg3_nvram_init(tp);
+
+ grc_misc_cfg = tr32(GRC_MISC_CFG);
+ grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
+ (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
+ grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
+ tg3_flag_set(tp, IS_5788);
+
+ if (!tg3_flag(tp, IS_5788) &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
+ tg3_flag_set(tp, TAGGED_STATUS);
+ if (tg3_flag(tp, TAGGED_STATUS)) {
+ tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
+ HOSTCC_MODE_CLRTICK_TXBD);
+
+ tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
+ pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
+ tp->misc_host_ctrl);
+ }
+
+ /* Preserve the APE MAC_MODE bits */
+ if (tg3_flag(tp, ENABLE_APE))
+ tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
+ else
+ tp->mac_mode = 0;
+
+ /* these are limited to 10/100 only */
+ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
+ (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
+ tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
+ (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
+ tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
+ tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
+ (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
+ (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
+ tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
+ tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
+ (tp->phy_flags & TG3_PHYFLG_IS_FET))
+ tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
+
+ err = tg3_phy_probe(tp);
+ if (err) {
+ dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
+ /* ... but do not return immediately ... */
+ tg3_mdio_fini(tp);
+ }
+
+ tg3_read_vpd(tp);
+ tg3_read_fw_ver(tp);
+
+ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
+ tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
+ } else {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
+ tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
+ else
+ tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
+ }
+
+ /* 5700 {AX,BX} chips have a broken status block link
+ * change bit implementation, so we must use the
+ * status register in those cases.
+ */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
+ tg3_flag_set(tp, USE_LINKCHG_REG);
+ else
+ tg3_flag_clear(tp, USE_LINKCHG_REG);
+
+ /* The led_ctrl is set during tg3_phy_probe, here we might
+ * have to force the link status polling mechanism based
+ * upon subsystem IDs.
+ */
+ if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
+ !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
+ tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
+ tg3_flag_set(tp, USE_LINKCHG_REG);
+ }
+
+ /* For all SERDES we poll the MAC status register. */
+ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
+ tg3_flag_set(tp, POLL_SERDES);
+ else
+ tg3_flag_clear(tp, POLL_SERDES);
+
+ tp->rx_offset = NET_IP_ALIGN;
+ tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
+ tg3_flag(tp, PCIX_MODE)) {
+ tp->rx_offset = 0;
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ tp->rx_copy_thresh = ~(u16)0;
+#endif
+ }
+
+ tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
+ tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
+ tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
+
+ tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
+
+ /* Increment the rx prod index on the rx std ring by at most
+ * 8 for these chips to workaround hw errata.
+ */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+ tp->rx_std_max_post = 8;
+
+ if (tg3_flag(tp, ASPM_WORKAROUND))
+ tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
+ PCIE_PWR_MGMT_L1_THRESH_MSK;
+
+ return err;
+}
+
+#ifdef CONFIG_SPARC
+static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
+{
+ struct net_device *dev = tp->dev;
+ struct pci_dev *pdev = tp->pdev;
+ struct device_node *dp = pci_device_to_OF_node(pdev);
+ const unsigned char *addr;
+ int len;
+
+ addr = of_get_property(dp, "local-mac-address", &len);
+ if (addr && len == 6) {
+ memcpy(dev->dev_addr, addr, 6);
+ memcpy(dev->perm_addr, dev->dev_addr, 6);
+ return 0;
+ }
+ return -ENODEV;
+}
+
+static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
+{
+ struct net_device *dev = tp->dev;
+
+ memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+ memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
+ return 0;
+}
+#endif
+
+static int __devinit tg3_get_device_address(struct tg3 *tp)
+{
+ struct net_device *dev = tp->dev;
+ u32 hi, lo, mac_offset;
+ int addr_ok = 0;
+
+#ifdef CONFIG_SPARC
+ if (!tg3_get_macaddr_sparc(tp))
+ return 0;
+#endif
+
+ mac_offset = 0x7c;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+ tg3_flag(tp, 5780_CLASS)) {
+ if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
+ mac_offset = 0xcc;
+ if (tg3_nvram_lock(tp))
+ tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
+ else
+ tg3_nvram_unlock(tp);
+ } else if (tg3_flag(tp, 5717_PLUS)) {
+ if (tp->pci_fn & 1)
+ mac_offset = 0xcc;
+ if (tp->pci_fn > 1)
+ mac_offset += 0x18c;
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ mac_offset = 0x10;
+
+ /* First try to get it from MAC address mailbox. */
+ tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
+ if ((hi >> 16) == 0x484b) {
+ dev->dev_addr[0] = (hi >> 8) & 0xff;
+ dev->dev_addr[1] = (hi >> 0) & 0xff;
+
+ tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
+ dev->dev_addr[2] = (lo >> 24) & 0xff;
+ dev->dev_addr[3] = (lo >> 16) & 0xff;
+ dev->dev_addr[4] = (lo >> 8) & 0xff;
+ dev->dev_addr[5] = (lo >> 0) & 0xff;
+
+ /* Some old bootcode may report a 0 MAC address in SRAM */
+ addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
+ }
+ if (!addr_ok) {
+ /* Next, try NVRAM. */
+ if (!tg3_flag(tp, NO_NVRAM) &&
+ !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
+ !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
+ memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
+ memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
+ }
+ /* Finally just fetch it out of the MAC control regs. */
+ else {
+ hi = tr32(MAC_ADDR_0_HIGH);
+ lo = tr32(MAC_ADDR_0_LOW);
+
+ dev->dev_addr[5] = lo & 0xff;
+ dev->dev_addr[4] = (lo >> 8) & 0xff;
+ dev->dev_addr[3] = (lo >> 16) & 0xff;
+ dev->dev_addr[2] = (lo >> 24) & 0xff;
+ dev->dev_addr[1] = hi & 0xff;
+ dev->dev_addr[0] = (hi >> 8) & 0xff;
+ }
+ }
+
+ if (!is_valid_ether_addr(&dev->dev_addr[0])) {
+#ifdef CONFIG_SPARC
+ if (!tg3_get_default_macaddr_sparc(tp))
+ return 0;
+#endif
+ return -EINVAL;
+ }
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+ return 0;
+}
+
+#define BOUNDARY_SINGLE_CACHELINE 1
+#define BOUNDARY_MULTI_CACHELINE 2
+
+static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
+{
+ int cacheline_size;
+ u8 byte;
+ int goal;
+
+ pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
+ if (byte == 0)
+ cacheline_size = 1024;
+ else
+ cacheline_size = (int) byte * 4;
+
+ /* On 5703 and later chips, the boundary bits have no
+ * effect.
+ */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
+ !tg3_flag(tp, PCI_EXPRESS))
+ goto out;
+
+#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
+ goal = BOUNDARY_MULTI_CACHELINE;
+#else
+#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
+ goal = BOUNDARY_SINGLE_CACHELINE;
+#else
+ goal = 0;
+#endif
+#endif
+
+ if (tg3_flag(tp, 57765_PLUS)) {
+ val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
+ goto out;
+ }
+
+ if (!goal)
+ goto out;
+
+ /* PCI controllers on most RISC systems tend to disconnect
+ * when a device tries to burst across a cache-line boundary.
+ * Therefore, letting tg3 do so just wastes PCI bandwidth.
+ *
+ * Unfortunately, for PCI-E there are only limited
+ * write-side controls for this, and thus for reads
+ * we will still get the disconnects. We'll also waste
+ * these PCI cycles for both read and write for chips
+ * other than 5700 and 5701 which do not implement the
+ * boundary bits.
+ */
+ if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
+ switch (cacheline_size) {
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ if (goal == BOUNDARY_SINGLE_CACHELINE) {
+ val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
+ DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
+ } else {
+ val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
+ DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
+ }
+ break;
+
+ case 256:
+ val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
+ DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
+ break;
+
+ default:
+ val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
+ DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
+ break;
+ }
+ } else if (tg3_flag(tp, PCI_EXPRESS)) {
+ switch (cacheline_size) {
+ case 16:
+ case 32:
+ case 64:
+ if (goal == BOUNDARY_SINGLE_CACHELINE) {
+ val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
+ val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
+ break;
+ }
+ /* fallthrough */
+ case 128:
+ default:
+ val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
+ val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
+ break;
+ }
+ } else {
+ switch (cacheline_size) {
+ case 16:
+ if (goal == BOUNDARY_SINGLE_CACHELINE) {
+ val |= (DMA_RWCTRL_READ_BNDRY_16 |
+ DMA_RWCTRL_WRITE_BNDRY_16);
+ break;
+ }
+ /* fallthrough */
+ case 32:
+ if (goal == BOUNDARY_SINGLE_CACHELINE) {
+ val |= (DMA_RWCTRL_READ_BNDRY_32 |
+ DMA_RWCTRL_WRITE_BNDRY_32);
+ break;
+ }
+ /* fallthrough */
+ case 64:
+ if (goal == BOUNDARY_SINGLE_CACHELINE) {
+ val |= (DMA_RWCTRL_READ_BNDRY_64 |
+ DMA_RWCTRL_WRITE_BNDRY_64);
+ break;
+ }
+ /* fallthrough */
+ case 128:
+ if (goal == BOUNDARY_SINGLE_CACHELINE) {
+ val |= (DMA_RWCTRL_READ_BNDRY_128 |
+ DMA_RWCTRL_WRITE_BNDRY_128);
+ break;
+ }
+ /* fallthrough */
+ case 256:
+ val |= (DMA_RWCTRL_READ_BNDRY_256 |
+ DMA_RWCTRL_WRITE_BNDRY_256);
+ break;
+ case 512:
+ val |= (DMA_RWCTRL_READ_BNDRY_512 |
+ DMA_RWCTRL_WRITE_BNDRY_512);
+ break;
+ case 1024:
+ default:
+ val |= (DMA_RWCTRL_READ_BNDRY_1024 |
+ DMA_RWCTRL_WRITE_BNDRY_1024);
+ break;
+ }
+ }
+
+out:
+ return val;
+}
+
+static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
+{
+ struct tg3_internal_buffer_desc test_desc;
+ u32 sram_dma_descs;
+ int i, ret;
+
+ sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
+
+ tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
+ tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
+ tw32(RDMAC_STATUS, 0);
+ tw32(WDMAC_STATUS, 0);
+
+ tw32(BUFMGR_MODE, 0);
+ tw32(FTQ_RESET, 0);
+
+ test_desc.addr_hi = ((u64) buf_dma) >> 32;
+ test_desc.addr_lo = buf_dma & 0xffffffff;
+ test_desc.nic_mbuf = 0x00002100;
+ test_desc.len = size;
+
+ /*
+ * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
+ * the *second* time the tg3 driver was getting loaded after an
+ * initial scan.
+ *
+ * Broadcom tells me:
+ * ...the DMA engine is connected to the GRC block and a DMA
+ * reset may affect the GRC block in some unpredictable way...
+ * The behavior of resets to individual blocks has not been tested.
+ *
+ * Broadcom noted the GRC reset will also reset all sub-components.
+ */
+ if (to_device) {
+ test_desc.cqid_sqid = (13 << 8) | 2;
+
+ tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
+ udelay(40);
+ } else {
+ test_desc.cqid_sqid = (16 << 8) | 7;
+
+ tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
+ udelay(40);
+ }
+ test_desc.flags = 0x00000005;
+
+ for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
+ u32 val;
+
+ val = *(((u32 *)&test_desc) + i);
+ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
+ sram_dma_descs + (i * sizeof(u32)));
+ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
+ }
+ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
+
+ if (to_device)
+ tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
+ else
+ tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
+
+ ret = -ENODEV;
+ for (i = 0; i < 40; i++) {
+ u32 val;
+
+ if (to_device)
+ val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
+ else
+ val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
+ if ((val & 0xffff) == sram_dma_descs) {
+ ret = 0;
+ break;
+ }
+
+ udelay(100);
+ }
+
+ return ret;
+}
+
+#define TEST_BUFFER_SIZE 0x2000
+
+static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
+ { },
+};
+
+static int __devinit tg3_test_dma(struct tg3 *tp)
+{
+ dma_addr_t buf_dma;
+ u32 *buf, saved_dma_rwctrl;
+ int ret = 0;
+
+ buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
+ &buf_dma, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto out_nofree;
+ }
+
+ tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
+ (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
+
+ tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
+
+ if (tg3_flag(tp, 57765_PLUS))
+ goto out;
+
+ if (tg3_flag(tp, PCI_EXPRESS)) {
+ /* DMA read watermark not used on PCIE */
+ tp->dma_rwctrl |= 0x00180000;
+ } else if (!tg3_flag(tp, PCIX_MODE)) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
+ tp->dma_rwctrl |= 0x003f0000;
+ else
+ tp->dma_rwctrl |= 0x003f000f;
+ } else {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+ u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
+ u32 read_water = 0x7;
+
+ /* If the 5704 is behind the EPB bridge, we can
+ * do the less restrictive ONE_DMA workaround for
+ * better performance.
+ */
+ if (tg3_flag(tp, 40BIT_DMA_BUG) &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
+ tp->dma_rwctrl |= 0x8000;
+ else if (ccval == 0x6 || ccval == 0x7)
+ tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
+ read_water = 4;
+ /* Set bit 23 to enable PCIX hw bug fix */
+ tp->dma_rwctrl |=
+ (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
+ (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
+ (1 << 23);
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
+ /* 5780 always in PCIX mode */
+ tp->dma_rwctrl |= 0x00144000;
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+ /* 5714 always in PCIX mode */
+ tp->dma_rwctrl |= 0x00148000;
+ } else {
+ tp->dma_rwctrl |= 0x001b000f;
+ }
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
+ tp->dma_rwctrl &= 0xfffffff0;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ /* Remove this if it causes problems for some boards. */
+ tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
+
+ /* On 5700/5701 chips, we need to set this bit.
+ * Otherwise the chip will issue cacheline transactions
+ * to streamable DMA memory with not all the byte
+ * enables turned on. This is an error on several
+ * RISC PCI controllers, in particular sparc64.
+ *
+ * On 5703/5704 chips, this bit has been reassigned
+ * a different meaning. In particular, it is used
+ * on those chips to enable a PCI-X workaround.
+ */
+ tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
+ }
+
+ tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
+
+#if 0
+ /* Unneeded, already done by tg3_get_invariants. */
+ tg3_switch_clocks(tp);
+#endif
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
+ goto out;
+
+ /* It is best to perform DMA test with maximum write burst size
+ * to expose the 5700/5701 write DMA bug.
+ */
+ saved_dma_rwctrl = tp->dma_rwctrl;
+ tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
+ tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
+
+ while (1) {
+ u32 *p = buf, i;
+
+ for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
+ p[i] = i;
+
+ /* Send the buffer to the chip. */
+ ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
+ if (ret) {
+ dev_err(&tp->pdev->dev,
+ "%s: Buffer write failed. err = %d\n",
+ __func__, ret);
+ break;
+ }
+
+#if 0
+ /* validate data reached card RAM correctly. */
+ for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
+ u32 val;
+ tg3_read_mem(tp, 0x2100 + (i*4), &val);
+ if (le32_to_cpu(val) != p[i]) {
+ dev_err(&tp->pdev->dev,
+ "%s: Buffer corrupted on device! "
+ "(%d != %d)\n", __func__, val, i);
+ /* ret = -ENODEV here? */
+ }
+ p[i] = 0;
+ }
+#endif
+ /* Now read it back. */
+ ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
+ if (ret) {
+ dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
+ "err = %d\n", __func__, ret);
+ break;
+ }
+
+ /* Verify it. */
+ for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
+ if (p[i] == i)
+ continue;
+
+ if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
+ DMA_RWCTRL_WRITE_BNDRY_16) {
+ tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
+ tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
+ tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
+ break;
+ } else {
+ dev_err(&tp->pdev->dev,
+ "%s: Buffer corrupted on read back! "
+ "(%d != %d)\n", __func__, p[i], i);
+ ret = -ENODEV;
+ goto out;
+ }
+ }
+
+ if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
+ /* Success. */
+ ret = 0;
+ break;
+ }
+ }
+ if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
+ DMA_RWCTRL_WRITE_BNDRY_16) {
+ /* DMA test passed without adjusting DMA boundary,
+ * now look for chipsets that are known to expose the
+ * DMA bug without failing the test.
+ */
+ if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
+ tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
+ tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
+ } else {
+ /* Safe to use the calculated DMA boundary. */
+ tp->dma_rwctrl = saved_dma_rwctrl;
+ }
+
+ tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
+ }
+
+out:
+ dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
+out_nofree:
+ return ret;
+}
+
+static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
+{
+ if (tg3_flag(tp, 57765_PLUS)) {
+ tp->bufmgr_config.mbuf_read_dma_low_water =
+ DEFAULT_MB_RDMA_LOW_WATER_5705;
+ tp->bufmgr_config.mbuf_mac_rx_low_water =
+ DEFAULT_MB_MACRX_LOW_WATER_57765;
+ tp->bufmgr_config.mbuf_high_water =
+ DEFAULT_MB_HIGH_WATER_57765;
+
+ tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
+ DEFAULT_MB_RDMA_LOW_WATER_5705;
+ tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
+ DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
+ tp->bufmgr_config.mbuf_high_water_jumbo =
+ DEFAULT_MB_HIGH_WATER_JUMBO_57765;
+ } else if (tg3_flag(tp, 5705_PLUS)) {
+ tp->bufmgr_config.mbuf_read_dma_low_water =
+ DEFAULT_MB_RDMA_LOW_WATER_5705;
+ tp->bufmgr_config.mbuf_mac_rx_low_water =
+ DEFAULT_MB_MACRX_LOW_WATER_5705;
+ tp->bufmgr_config.mbuf_high_water =
+ DEFAULT_MB_HIGH_WATER_5705;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ tp->bufmgr_config.mbuf_mac_rx_low_water =
+ DEFAULT_MB_MACRX_LOW_WATER_5906;
+ tp->bufmgr_config.mbuf_high_water =
+ DEFAULT_MB_HIGH_WATER_5906;
+ }
+
+ tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
+ DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
+ tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
+ DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
+ tp->bufmgr_config.mbuf_high_water_jumbo =
+ DEFAULT_MB_HIGH_WATER_JUMBO_5780;
+ } else {
+ tp->bufmgr_config.mbuf_read_dma_low_water =
+ DEFAULT_MB_RDMA_LOW_WATER;
+ tp->bufmgr_config.mbuf_mac_rx_low_water =
+ DEFAULT_MB_MACRX_LOW_WATER;
+ tp->bufmgr_config.mbuf_high_water =
+ DEFAULT_MB_HIGH_WATER;
+
+ tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
+ DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
+ tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
+ DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
+ tp->bufmgr_config.mbuf_high_water_jumbo =
+ DEFAULT_MB_HIGH_WATER_JUMBO;
+ }
+
+ tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
+ tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
+}
+
+static char * __devinit tg3_phy_string(struct tg3 *tp)
+{
+ switch (tp->phy_id & TG3_PHY_ID_MASK) {
+ case TG3_PHY_ID_BCM5400: return "5400";
+ case TG3_PHY_ID_BCM5401: return "5401";
+ case TG3_PHY_ID_BCM5411: return "5411";
+ case TG3_PHY_ID_BCM5701: return "5701";
+ case TG3_PHY_ID_BCM5703: return "5703";
+ case TG3_PHY_ID_BCM5704: return "5704";
+ case TG3_PHY_ID_BCM5705: return "5705";
+ case TG3_PHY_ID_BCM5750: return "5750";
+ case TG3_PHY_ID_BCM5752: return "5752";
+ case TG3_PHY_ID_BCM5714: return "5714";
+ case TG3_PHY_ID_BCM5780: return "5780";
+ case TG3_PHY_ID_BCM5755: return "5755";
+ case TG3_PHY_ID_BCM5787: return "5787";
+ case TG3_PHY_ID_BCM5784: return "5784";
+ case TG3_PHY_ID_BCM5756: return "5722/5756";
+ case TG3_PHY_ID_BCM5906: return "5906";
+ case TG3_PHY_ID_BCM5761: return "5761";
+ case TG3_PHY_ID_BCM5718C: return "5718C";
+ case TG3_PHY_ID_BCM5718S: return "5718S";
+ case TG3_PHY_ID_BCM57765: return "57765";
+ case TG3_PHY_ID_BCM5719C: return "5719C";
+ case TG3_PHY_ID_BCM5720C: return "5720C";
+ case TG3_PHY_ID_BCM8002: return "8002/serdes";
+ case 0: return "serdes";
+ default: return "unknown";
+ }
+}
+
+static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
+{
+ if (tg3_flag(tp, PCI_EXPRESS)) {
+ strcpy(str, "PCI Express");
+ return str;
+ } else if (tg3_flag(tp, PCIX_MODE)) {
+ u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
+
+ strcpy(str, "PCIX:");
+
+ if ((clock_ctrl == 7) ||
+ ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
+ GRC_MISC_CFG_BOARD_ID_5704CIOBE))
+ strcat(str, "133MHz");
+ else if (clock_ctrl == 0)
+ strcat(str, "33MHz");
+ else if (clock_ctrl == 2)
+ strcat(str, "50MHz");
+ else if (clock_ctrl == 4)
+ strcat(str, "66MHz");
+ else if (clock_ctrl == 6)
+ strcat(str, "100MHz");
+ } else {
+ strcpy(str, "PCI:");
+ if (tg3_flag(tp, PCI_HIGH_SPEED))
+ strcat(str, "66MHz");
+ else
+ strcat(str, "33MHz");
+ }
+ if (tg3_flag(tp, PCI_32BIT))
+ strcat(str, ":32-bit");
+ else
+ strcat(str, ":64-bit");
+ return str;
+}
+
+static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
+{
+ struct pci_dev *peer;
+ unsigned int func, devnr = tp->pdev->devfn & ~7;
+
+ for (func = 0; func < 8; func++) {
+ peer = pci_get_slot(tp->pdev->bus, devnr | func);
+ if (peer && peer != tp->pdev)
+ break;
+ pci_dev_put(peer);
+ }
+ /* 5704 can be configured in single-port mode, set peer to
+ * tp->pdev in that case.
+ */
+ if (!peer) {
+ peer = tp->pdev;
+ return peer;
+ }
+
+ /*
+ * We don't need to keep the refcount elevated; there's no way
+ * to remove one half of this device without removing the other
+ */
+ pci_dev_put(peer);
+
+ return peer;
+}
+
+static void __devinit tg3_init_coal(struct tg3 *tp)
+{
+ struct ethtool_coalesce *ec = &tp->coal;
+
+ memset(ec, 0, sizeof(*ec));
+ ec->cmd = ETHTOOL_GCOALESCE;
+ ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
+ ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
+ ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
+ ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
+ ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
+ ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
+ ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
+ ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
+ ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
+
+ if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
+ HOSTCC_MODE_CLRTICK_TXBD)) {
+ ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
+ ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
+ ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
+ ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
+ }
+
+ if (tg3_flag(tp, 5705_PLUS)) {
+ ec->rx_coalesce_usecs_irq = 0;
+ ec->tx_coalesce_usecs_irq = 0;
+ ec->stats_block_coalesce_usecs = 0;
+ }
+}
+
+static const struct net_device_ops tg3_netdev_ops = {
+ .ndo_open = tg3_open,
+ .ndo_stop = tg3_close,
+ .ndo_start_xmit = tg3_start_xmit,
+ .ndo_get_stats64 = tg3_get_stats64,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_rx_mode = tg3_set_rx_mode,
+ .ndo_set_mac_address = tg3_set_mac_addr,
+ .ndo_do_ioctl = tg3_ioctl,
+ .ndo_tx_timeout = tg3_tx_timeout,
+ .ndo_change_mtu = tg3_change_mtu,
+ .ndo_fix_features = tg3_fix_features,
+ .ndo_set_features = tg3_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = tg3_poll_controller,
+#endif
+};
+
+static int __devinit tg3_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ struct tg3 *tp;
+ int i, err, pm_cap;
+ u32 sndmbx, rcvmbx, intmbx;
+ char str[40];
+ u64 dma_mask, persist_dma_mask;
+ u32 features = 0;
+
+ printk_once(KERN_INFO "%s\n", version);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_MODULE_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
+ goto err_out_disable_pdev;
+ }
+
+ pci_set_master(pdev);
+
+ /* Find power-management capability. */
+ pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+ if (pm_cap == 0) {
+ dev_err(&pdev->dev,
+ "Cannot find Power Management capability, aborting\n");
+ err = -EIO;
+ goto err_out_free_res;
+ }
+
+ err = pci_set_power_state(pdev, PCI_D0);
+ if (err) {
+ dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
+ goto err_out_free_res;
+ }
+
+ dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
+ if (!dev) {
+ dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
+ err = -ENOMEM;
+ goto err_out_power_down;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ tp = netdev_priv(dev);
+ tp->pdev = pdev;
+ tp->dev = dev;
+ tp->pm_cap = pm_cap;
+ tp->rx_mode = TG3_DEF_RX_MODE;
+ tp->tx_mode = TG3_DEF_TX_MODE;
+
+ if (tg3_debug > 0)
+ tp->msg_enable = tg3_debug;
+ else
+ tp->msg_enable = TG3_DEF_MSG_ENABLE;
+
+ /* The word/byte swap controls here control register access byte
+ * swapping. DMA data byte swapping is controlled in the GRC_MODE
+ * setting below.
+ */
+ tp->misc_host_ctrl =
+ MISC_HOST_CTRL_MASK_PCI_INT |
+ MISC_HOST_CTRL_WORD_SWAP |
+ MISC_HOST_CTRL_INDIR_ACCESS |
+ MISC_HOST_CTRL_PCISTATE_RW;
+
+ /* The NONFRM (non-frame) byte/word swap controls take effect
+ * on descriptor entries, anything which isn't packet data.
+ *
+ * The StrongARM chips on the board (one for tx, one for rx)
+ * are running in big-endian mode.
+ */
+ tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
+ GRC_MODE_WSWAP_NONFRM_DATA);
+#ifdef __BIG_ENDIAN
+ tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
+#endif
+ spin_lock_init(&tp->lock);
+ spin_lock_init(&tp->indirect_lock);
+ INIT_WORK(&tp->reset_task, tg3_reset_task);
+
+ tp->regs = pci_ioremap_bar(pdev, BAR_0);
+ if (!tp->regs) {
+ dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
+ err = -ENOMEM;
+ goto err_out_free_dev;
+ }
+
+ if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
+ tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
+ tg3_flag_set(tp, ENABLE_APE);
+ tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
+ if (!tp->aperegs) {
+ dev_err(&pdev->dev,
+ "Cannot map APE registers, aborting\n");
+ err = -ENOMEM;
+ goto err_out_iounmap;
+ }
+ }
+
+ tp->rx_pending = TG3_DEF_RX_RING_PENDING;
+ tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
+
+ dev->ethtool_ops = &tg3_ethtool_ops;
+ dev->watchdog_timeo = TG3_TX_TIMEOUT;
+ dev->netdev_ops = &tg3_netdev_ops;
+ dev->irq = pdev->irq;
+
+ err = tg3_get_invariants(tp);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Problem fetching invariants of chip, aborting\n");
+ goto err_out_apeunmap;
+ }
+
+ /* The EPB bridge inside 5714, 5715, and 5780 and any
+ * device behind the EPB cannot support DMA addresses > 40-bit.
+ * On 64-bit systems with IOMMU, use 40-bit dma_mask.
+ * On 64-bit systems without IOMMU, use 64-bit dma_mask and
+ * do DMA address check in tg3_start_xmit().
+ */
+ if (tg3_flag(tp, IS_5788))
+ persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
+ else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
+ persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
+#ifdef CONFIG_HIGHMEM
+ dma_mask = DMA_BIT_MASK(64);
+#endif
+ } else
+ persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
+
+ /* Configure DMA attributes. */
+ if (dma_mask > DMA_BIT_MASK(32)) {
+ err = pci_set_dma_mask(pdev, dma_mask);
+ if (!err) {
+ features |= NETIF_F_HIGHDMA;
+ err = pci_set_consistent_dma_mask(pdev,
+ persist_dma_mask);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Unable to obtain 64 bit "
+ "DMA for consistent allocations\n");
+ goto err_out_apeunmap;
+ }
+ }
+ }
+ if (err || dma_mask == DMA_BIT_MASK(32)) {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_out_apeunmap;
+ }
+ }
+
+ tg3_init_bufmgr_config(tp);
+
+ features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+
+ /* 5700 B0 chips do not support checksumming correctly due
+ * to hardware bugs.
+ */
+ if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
+ features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
+
+ if (tg3_flag(tp, 5755_PLUS))
+ features |= NETIF_F_IPV6_CSUM;
+ }
+
+ /* TSO is on by default on chips that support hardware TSO.
+ * Firmware TSO on older chips gives lower performance, so it
+ * is off by default, but can be enabled using ethtool.
+ */
+ if ((tg3_flag(tp, HW_TSO_1) ||
+ tg3_flag(tp, HW_TSO_2) ||
+ tg3_flag(tp, HW_TSO_3)) &&
+ (features & NETIF_F_IP_CSUM))
+ features |= NETIF_F_TSO;
+ if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
+ if (features & NETIF_F_IPV6_CSUM)
+ features |= NETIF_F_TSO6;
+ if (tg3_flag(tp, HW_TSO_3) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
+ GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ features |= NETIF_F_TSO_ECN;
+ }
+
+ dev->features |= features;
+ dev->vlan_features |= features;
+
+ /*
+ * Add loopback capability only for a subset of devices that support
+ * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
+ * loopback for the remaining devices.
+ */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
+ !tg3_flag(tp, CPMU_PRESENT))
+ /* Add the loopback capability */
+ features |= NETIF_F_LOOPBACK;
+
+ dev->hw_features |= features;
+
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
+ !tg3_flag(tp, TSO_CAPABLE) &&
+ !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
+ tg3_flag_set(tp, MAX_RXPEND_64);
+ tp->rx_pending = 63;
+ }
+
+ err = tg3_get_device_address(tp);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Could not obtain valid ethernet address, aborting\n");
+ goto err_out_apeunmap;
+ }
+
+ /*
+ * Reset chip in case UNDI or EFI driver did not shutdown
+ * DMA self test will enable WDMAC and we'll see (spurious)
+ * pending DMA on the PCI bus at that point.
+ */
+ if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
+ (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
+ tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+ }
+
+ err = tg3_test_dma(tp);
+ if (err) {
+ dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
+ goto err_out_apeunmap;
+ }
+
+ intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
+ rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
+ sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
+ for (i = 0; i < tp->irq_max; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+
+ tnapi->tp = tp;
+ tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
+
+ tnapi->int_mbox = intmbx;
+ if (i <= 4)
+ intmbx += 0x8;
+ else
+ intmbx += 0x4;
+
+ tnapi->consmbox = rcvmbx;
+ tnapi->prodmbox = sndmbx;
+
+ if (i)
+ tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
+ else
+ tnapi->coal_now = HOSTCC_MODE_NOW;
+
+ if (!tg3_flag(tp, SUPPORT_MSIX))
+ break;
+
+ /*
+ * If we support MSIX, we'll be using RSS. If we're using
+ * RSS, the first vector only handles link interrupts and the
+ * remaining vectors handle rx and tx interrupts. Reuse the
+ * mailbox values for the next iteration. The values we setup
+ * above are still useful for the single vectored mode.
+ */
+ if (!i)
+ continue;
+
+ rcvmbx += 0x8;
+
+ if (sndmbx & 0x4)
+ sndmbx -= 0x4;
+ else
+ sndmbx += 0xc;
+ }
+
+ tg3_init_coal(tp);
+
+ pci_set_drvdata(pdev, dev);
+
+ if (tg3_flag(tp, 5717_PLUS)) {
+ /* Resume a low-power mode */
+ tg3_frob_aux_power(tp, false);
+ }
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot register net device, aborting\n");
+ goto err_out_apeunmap;
+ }
+
+ netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
+ tp->board_part_number,
+ tp->pci_chip_rev_id,
+ tg3_bus_string(tp, str),
+ dev->dev_addr);
+
+ if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
+ struct phy_device *phydev;
+ phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ netdev_info(dev,
+ "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
+ phydev->drv->name, dev_name(&phydev->dev));
+ } else {
+ char *ethtype;
+
+ if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
+ ethtype = "10/100Base-TX";
+ else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
+ ethtype = "1000Base-SX";
+ else
+ ethtype = "10/100/1000Base-T";
+
+ netdev_info(dev, "attached PHY is %s (%s Ethernet) "
+ "(WireSpeed[%d], EEE[%d])\n",
+ tg3_phy_string(tp), ethtype,
+ (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
+ (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
+ }
+
+ netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
+ (dev->features & NETIF_F_RXCSUM) != 0,
+ tg3_flag(tp, USE_LINKCHG_REG) != 0,
+ (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
+ tg3_flag(tp, ENABLE_ASF) != 0,
+ tg3_flag(tp, TSO_CAPABLE) != 0);
+ netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
+ tp->dma_rwctrl,
+ pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
+ ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
+
+ pci_save_state(pdev);
+
+ return 0;
+
+err_out_apeunmap:
+ if (tp->aperegs) {
+ iounmap(tp->aperegs);
+ tp->aperegs = NULL;
+ }
+
+err_out_iounmap:
+ if (tp->regs) {
+ iounmap(tp->regs);
+ tp->regs = NULL;
+ }
+
+err_out_free_dev:
+ free_netdev(dev);
+
+err_out_power_down:
+ pci_set_power_state(pdev, PCI_D3hot);
+
+err_out_free_res:
+ pci_release_regions(pdev);
+
+err_out_disable_pdev:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void __devexit tg3_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev) {
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (tp->fw)
+ release_firmware(tp->fw);
+
+ cancel_work_sync(&tp->reset_task);
+
+ if (!tg3_flag(tp, USE_PHYLIB)) {
+ tg3_phy_fini(tp);
+ tg3_mdio_fini(tp);
+ }
+
+ unregister_netdev(dev);
+ if (tp->aperegs) {
+ iounmap(tp->aperegs);
+ tp->aperegs = NULL;
+ }
+ if (tp->regs) {
+ iounmap(tp->regs);
+ tp->regs = NULL;
+ }
+ free_netdev(dev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ }
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tg3_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct tg3 *tp = netdev_priv(dev);
+ int err;
+
+ if (!netif_running(dev))
+ return 0;
+
+ flush_work_sync(&tp->reset_task);
+ tg3_phy_stop(tp);
+ tg3_netif_stop(tp);
+
+ del_timer_sync(&tp->timer);
+
+ tg3_full_lock(tp, 1);
+ tg3_disable_ints(tp);
+ tg3_full_unlock(tp);
+
+ netif_device_detach(dev);
+
+ tg3_full_lock(tp, 0);
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+ tg3_flag_clear(tp, INIT_COMPLETE);
+ tg3_full_unlock(tp);
+
+ err = tg3_power_down_prepare(tp);
+ if (err) {
+ int err2;
+
+ tg3_full_lock(tp, 0);
+
+ tg3_flag_set(tp, INIT_COMPLETE);
+ err2 = tg3_restart_hw(tp, 1);
+ if (err2)
+ goto out;
+
+ tp->timer.expires = jiffies + tp->timer_offset;
+ add_timer(&tp->timer);
+
+ netif_device_attach(dev);
+ tg3_netif_start(tp);
+
+out:
+ tg3_full_unlock(tp);
+
+ if (!err2)
+ tg3_phy_start(tp);
+ }
+
+ return err;
+}
+
+static int tg3_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct tg3 *tp = netdev_priv(dev);
+ int err;
+
+ if (!netif_running(dev))
+ return 0;
+
+ netif_device_attach(dev);
+
+ tg3_full_lock(tp, 0);
+
+ tg3_flag_set(tp, INIT_COMPLETE);
+ err = tg3_restart_hw(tp, 1);
+ if (err)
+ goto out;
+
+ tp->timer.expires = jiffies + tp->timer_offset;
+ add_timer(&tp->timer);
+
+ tg3_netif_start(tp);
+
+out:
+ tg3_full_unlock(tp);
+
+ if (!err)
+ tg3_phy_start(tp);
+
+ return err;
+}
+
+static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
+#define TG3_PM_OPS (&tg3_pm_ops)
+
+#else
+
+#define TG3_PM_OPS NULL
+
+#endif /* CONFIG_PM_SLEEP */
+
+/**
+ * tg3_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct tg3 *tp = netdev_priv(netdev);
+ pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
+
+ netdev_info(netdev, "PCI I/O error detected\n");
+
+ rtnl_lock();
+
+ if (!netif_running(netdev))
+ goto done;
+
+ tg3_phy_stop(tp);
+
+ tg3_netif_stop(tp);
+
+ del_timer_sync(&tp->timer);
+ tg3_flag_clear(tp, RESTART_TIMER);
+
+ /* Want to make sure that the reset task doesn't run */
+ cancel_work_sync(&tp->reset_task);
+ tg3_flag_clear(tp, TX_RECOVERY_PENDING);
+ tg3_flag_clear(tp, RESTART_TIMER);
+
+ netif_device_detach(netdev);
+
+ /* Clean up software state, even if MMIO is blocked */
+ tg3_full_lock(tp, 0);
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
+ tg3_full_unlock(tp);
+
+done:
+ if (state == pci_channel_io_perm_failure)
+ err = PCI_ERS_RESULT_DISCONNECT;
+ else
+ pci_disable_device(pdev);
+
+ rtnl_unlock();
+
+ return err;
+}
+
+/**
+ * tg3_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot.
+ * At this point, the card has exprienced a hard reset,
+ * followed by fixups by BIOS, and has its config space
+ * set up identically to what it was at cold boot.
+ */
+static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct tg3 *tp = netdev_priv(netdev);
+ pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
+ int err;
+
+ rtnl_lock();
+
+ if (pci_enable_device(pdev)) {
+ netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
+ goto done;
+ }
+
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ if (!netif_running(netdev)) {
+ rc = PCI_ERS_RESULT_RECOVERED;
+ goto done;
+ }
+
+ err = tg3_power_up(tp);
+ if (err)
+ goto done;
+
+ rc = PCI_ERS_RESULT_RECOVERED;
+
+done:
+ rtnl_unlock();
+
+ return rc;
+}
+
+/**
+ * tg3_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells
+ * us that its OK to resume normal operation.
+ */
+static void tg3_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct tg3 *tp = netdev_priv(netdev);
+ int err;
+
+ rtnl_lock();
+
+ if (!netif_running(netdev))
+ goto done;
+
+ tg3_full_lock(tp, 0);
+ tg3_flag_set(tp, INIT_COMPLETE);
+ err = tg3_restart_hw(tp, 1);
+ tg3_full_unlock(tp);
+ if (err) {
+ netdev_err(netdev, "Cannot restart hardware after reset.\n");
+ goto done;
+ }
+
+ netif_device_attach(netdev);
+
+ tp->timer.expires = jiffies + tp->timer_offset;
+ add_timer(&tp->timer);
+
+ tg3_netif_start(tp);
+
+ tg3_phy_start(tp);
+
+done:
+ rtnl_unlock();
+}
+
+static struct pci_error_handlers tg3_err_handler = {
+ .error_detected = tg3_io_error_detected,
+ .slot_reset = tg3_io_slot_reset,
+ .resume = tg3_io_resume
+};
+
+static struct pci_driver tg3_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = tg3_pci_tbl,
+ .probe = tg3_init_one,
+ .remove = __devexit_p(tg3_remove_one),
+ .err_handler = &tg3_err_handler,
+ .driver.pm = TG3_PM_OPS,
+};
+
+static int __init tg3_init(void)
+{
+ return pci_register_driver(&tg3_driver);
+}
+
+static void __exit tg3_cleanup(void)
+{
+ pci_unregister_driver(&tg3_driver);
+}
+
+module_init(tg3_init);
+module_exit(tg3_cleanup);
--- /dev/null
- if (i <= cmd->rule_cnt) {
- rule_locs[i] = comp->fs.location;
- i++;
- }
+/*
+ * drivers/net/gianfar_ethtool.c
+ *
+ * Gianfar Ethernet Driver
+ * Ethtool support for Gianfar Enet
+ * Based on e1000 ethtool support
+ *
+ * Author: Andy Fleming
+ * Maintainer: Kumar Gala
+ * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
+ *
+ * Copyright 2003-2006, 2008-2009, 2011 Freescale Semiconductor, Inc.
+ *
+ * This software may be used and distributed according to
+ * the terms of the GNU Public License, Version 2, incorporated herein
+ * by reference.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+#include <linux/crc32.h>
+#include <asm/types.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/sort.h>
+#include <linux/if_vlan.h>
+
+#include "gianfar.h"
+
+extern void gfar_start(struct net_device *dev);
+extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
+
+#define GFAR_MAX_COAL_USECS 0xffff
+#define GFAR_MAX_COAL_FRAMES 0xff
+static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
+ u64 * buf);
+static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf);
+static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals);
+static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals);
+static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals);
+static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals);
+static void gfar_gdrvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo);
+
+static char stat_gstrings[][ETH_GSTRING_LEN] = {
+ "rx-dropped-by-kernel",
+ "rx-large-frame-errors",
+ "rx-short-frame-errors",
+ "rx-non-octet-errors",
+ "rx-crc-errors",
+ "rx-overrun-errors",
+ "rx-busy-errors",
+ "rx-babbling-errors",
+ "rx-truncated-frames",
+ "ethernet-bus-error",
+ "tx-babbling-errors",
+ "tx-underrun-errors",
+ "rx-skb-missing-errors",
+ "tx-timeout-errors",
+ "tx-rx-64-frames",
+ "tx-rx-65-127-frames",
+ "tx-rx-128-255-frames",
+ "tx-rx-256-511-frames",
+ "tx-rx-512-1023-frames",
+ "tx-rx-1024-1518-frames",
+ "tx-rx-1519-1522-good-vlan",
+ "rx-bytes",
+ "rx-packets",
+ "rx-fcs-errors",
+ "receive-multicast-packet",
+ "receive-broadcast-packet",
+ "rx-control-frame-packets",
+ "rx-pause-frame-packets",
+ "rx-unknown-op-code",
+ "rx-alignment-error",
+ "rx-frame-length-error",
+ "rx-code-error",
+ "rx-carrier-sense-error",
+ "rx-undersize-packets",
+ "rx-oversize-packets",
+ "rx-fragmented-frames",
+ "rx-jabber-frames",
+ "rx-dropped-frames",
+ "tx-byte-counter",
+ "tx-packets",
+ "tx-multicast-packets",
+ "tx-broadcast-packets",
+ "tx-pause-control-frames",
+ "tx-deferral-packets",
+ "tx-excessive-deferral-packets",
+ "tx-single-collision-packets",
+ "tx-multiple-collision-packets",
+ "tx-late-collision-packets",
+ "tx-excessive-collision-packets",
+ "tx-total-collision",
+ "reserved",
+ "tx-dropped-frames",
+ "tx-jabber-frames",
+ "tx-fcs-errors",
+ "tx-control-frames",
+ "tx-oversize-frames",
+ "tx-undersize-frames",
+ "tx-fragmented-frames",
+};
+
+/* Fill in a buffer with the strings which correspond to the
+ * stats */
+static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
+ memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
+ else
+ memcpy(buf, stat_gstrings,
+ GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
+}
+
+/* Fill in an array of 64-bit statistics from various sources.
+ * This array will be appended to the end of the ethtool_stats
+ * structure, and returned to user space
+ */
+static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 * buf)
+{
+ int i;
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u64 *extra = (u64 *) & priv->extra_stats;
+
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
+ u32 __iomem *rmon = (u32 __iomem *) ®s->rmon;
+ struct gfar_stats *stats = (struct gfar_stats *) buf;
+
+ for (i = 0; i < GFAR_RMON_LEN; i++)
+ stats->rmon[i] = (u64) gfar_read(&rmon[i]);
+
+ for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
+ stats->extra[i] = extra[i];
+ } else
+ for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
+ buf[i] = extra[i];
+}
+
+static int gfar_sset_count(struct net_device *dev, int sset)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
+ return GFAR_STATS_LEN;
+ else
+ return GFAR_EXTRA_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* Fills in the drvinfo structure with some basic info */
+static void gfar_gdrvinfo(struct net_device *dev, struct
+ ethtool_drvinfo *drvinfo)
+{
+ strncpy(drvinfo->driver, DRV_NAME, GFAR_INFOSTR_LEN);
+ strncpy(drvinfo->version, gfar_driver_version, GFAR_INFOSTR_LEN);
+ strncpy(drvinfo->fw_version, "N/A", GFAR_INFOSTR_LEN);
+ strncpy(drvinfo->bus_info, "N/A", GFAR_INFOSTR_LEN);
+ drvinfo->regdump_len = 0;
+ drvinfo->eedump_len = 0;
+}
+
+
+static int gfar_ssettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+
+ if (NULL == phydev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(phydev, cmd);
+}
+
+
+/* Return the current settings in the ethtool_cmd structure */
+static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ struct gfar_priv_rx_q *rx_queue = NULL;
+ struct gfar_priv_tx_q *tx_queue = NULL;
+
+ if (NULL == phydev)
+ return -ENODEV;
+ tx_queue = priv->tx_queue[0];
+ rx_queue = priv->rx_queue[0];
+
+ /* etsec-1.7 and older versions have only one txic
+ * and rxic regs although they support multiple queues */
+ cmd->maxtxpkt = get_icft_value(tx_queue->txic);
+ cmd->maxrxpkt = get_icft_value(rx_queue->rxic);
+
+ return phy_ethtool_gset(phydev, cmd);
+}
+
+/* Return the length of the register structure */
+static int gfar_reglen(struct net_device *dev)
+{
+ return sizeof (struct gfar);
+}
+
+/* Return a dump of the GFAR register space */
+static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf)
+{
+ int i;
+ struct gfar_private *priv = netdev_priv(dev);
+ u32 __iomem *theregs = (u32 __iomem *) priv->gfargrp[0].regs;
+ u32 *buf = (u32 *) regbuf;
+
+ for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++)
+ buf[i] = gfar_read(&theregs[i]);
+}
+
+/* Convert microseconds to ethernet clock ticks, which changes
+ * depending on what speed the controller is running at */
+static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int usecs)
+{
+ unsigned int count;
+
+ /* The timer is different, depending on the interface speed */
+ switch (priv->phydev->speed) {
+ case SPEED_1000:
+ count = GFAR_GBIT_TIME;
+ break;
+ case SPEED_100:
+ count = GFAR_100_TIME;
+ break;
+ case SPEED_10:
+ default:
+ count = GFAR_10_TIME;
+ break;
+ }
+
+ /* Make sure we return a number greater than 0
+ * if usecs > 0 */
+ return (usecs * 1000 + count - 1) / count;
+}
+
+/* Convert ethernet clock ticks to microseconds */
+static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int ticks)
+{
+ unsigned int count;
+
+ /* The timer is different, depending on the interface speed */
+ switch (priv->phydev->speed) {
+ case SPEED_1000:
+ count = GFAR_GBIT_TIME;
+ break;
+ case SPEED_100:
+ count = GFAR_100_TIME;
+ break;
+ case SPEED_10:
+ default:
+ count = GFAR_10_TIME;
+ break;
+ }
+
+ /* Make sure we return a number greater than 0 */
+ /* if ticks is > 0 */
+ return (ticks * count) / 1000;
+}
+
+/* Get the coalescing parameters, and put them in the cvals
+ * structure. */
+static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar_priv_rx_q *rx_queue = NULL;
+ struct gfar_priv_tx_q *tx_queue = NULL;
+ unsigned long rxtime;
+ unsigned long rxcount;
+ unsigned long txtime;
+ unsigned long txcount;
+
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
+ return -EOPNOTSUPP;
+
+ if (NULL == priv->phydev)
+ return -ENODEV;
+
+ rx_queue = priv->rx_queue[0];
+ tx_queue = priv->tx_queue[0];
+
+ rxtime = get_ictt_value(rx_queue->rxic);
+ rxcount = get_icft_value(rx_queue->rxic);
+ txtime = get_ictt_value(tx_queue->txic);
+ txcount = get_icft_value(tx_queue->txic);
+ cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime);
+ cvals->rx_max_coalesced_frames = rxcount;
+
+ cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, txtime);
+ cvals->tx_max_coalesced_frames = txcount;
+
+ cvals->use_adaptive_rx_coalesce = 0;
+ cvals->use_adaptive_tx_coalesce = 0;
+
+ cvals->pkt_rate_low = 0;
+ cvals->rx_coalesce_usecs_low = 0;
+ cvals->rx_max_coalesced_frames_low = 0;
+ cvals->tx_coalesce_usecs_low = 0;
+ cvals->tx_max_coalesced_frames_low = 0;
+
+ /* When the packet rate is below pkt_rate_high but above
+ * pkt_rate_low (both measured in packets per second) the
+ * normal {rx,tx}_* coalescing parameters are used.
+ */
+
+ /* When the packet rate is (measured in packets per second)
+ * is above pkt_rate_high, the {rx,tx}_*_high parameters are
+ * used.
+ */
+ cvals->pkt_rate_high = 0;
+ cvals->rx_coalesce_usecs_high = 0;
+ cvals->rx_max_coalesced_frames_high = 0;
+ cvals->tx_coalesce_usecs_high = 0;
+ cvals->tx_max_coalesced_frames_high = 0;
+
+ /* How often to do adaptive coalescing packet rate sampling,
+ * measured in seconds. Must not be zero.
+ */
+ cvals->rate_sample_interval = 0;
+
+ return 0;
+}
+
+/* Change the coalescing values.
+ * Both cvals->*_usecs and cvals->*_frames have to be > 0
+ * in order for coalescing to be active
+ */
+static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ int i = 0;
+
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
+ return -EOPNOTSUPP;
+
+ /* Set up rx coalescing */
+ /* As of now, we will enable/disable coalescing for all
+ * queues together in case of eTSEC2, this will be modified
+ * along with the ethtool interface */
+ if ((cvals->rx_coalesce_usecs == 0) ||
+ (cvals->rx_max_coalesced_frames == 0)) {
+ for (i = 0; i < priv->num_rx_queues; i++)
+ priv->rx_queue[i]->rxcoalescing = 0;
+ } else {
+ for (i = 0; i < priv->num_rx_queues; i++)
+ priv->rx_queue[i]->rxcoalescing = 1;
+ }
+
+ if (NULL == priv->phydev)
+ return -ENODEV;
+
+ /* Check the bounds of the values */
+ if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
+ pr_info("Coalescing is limited to %d microseconds\n",
+ GFAR_MAX_COAL_USECS);
+ return -EINVAL;
+ }
+
+ if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
+ pr_info("Coalescing is limited to %d frames\n",
+ GFAR_MAX_COAL_FRAMES);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ priv->rx_queue[i]->rxic = mk_ic_value(
+ cvals->rx_max_coalesced_frames,
+ gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
+ }
+
+ /* Set up tx coalescing */
+ if ((cvals->tx_coalesce_usecs == 0) ||
+ (cvals->tx_max_coalesced_frames == 0)) {
+ for (i = 0; i < priv->num_tx_queues; i++)
+ priv->tx_queue[i]->txcoalescing = 0;
+ } else {
+ for (i = 0; i < priv->num_tx_queues; i++)
+ priv->tx_queue[i]->txcoalescing = 1;
+ }
+
+ /* Check the bounds of the values */
+ if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
+ pr_info("Coalescing is limited to %d microseconds\n",
+ GFAR_MAX_COAL_USECS);
+ return -EINVAL;
+ }
+
+ if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
+ pr_info("Coalescing is limited to %d frames\n",
+ GFAR_MAX_COAL_FRAMES);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ priv->tx_queue[i]->txic = mk_ic_value(
+ cvals->tx_max_coalesced_frames,
+ gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
+ }
+
+ gfar_configure_coalescing(priv, 0xFF, 0xFF);
+
+ return 0;
+}
+
+/* Fills in rvals with the current ring parameters. Currently,
+ * rx, rx_mini, and rx_jumbo rings are the same size, as mini and
+ * jumbo are ignored by the driver */
+static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar_priv_tx_q *tx_queue = NULL;
+ struct gfar_priv_rx_q *rx_queue = NULL;
+
+ tx_queue = priv->tx_queue[0];
+ rx_queue = priv->rx_queue[0];
+
+ rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
+ rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
+ rvals->rx_jumbo_max_pending = GFAR_RX_MAX_RING_SIZE;
+ rvals->tx_max_pending = GFAR_TX_MAX_RING_SIZE;
+
+ /* Values changeable by the user. The valid values are
+ * in the range 1 to the "*_max_pending" counterpart above.
+ */
+ rvals->rx_pending = rx_queue->rx_ring_size;
+ rvals->rx_mini_pending = rx_queue->rx_ring_size;
+ rvals->rx_jumbo_pending = rx_queue->rx_ring_size;
+ rvals->tx_pending = tx_queue->tx_ring_size;
+}
+
+/* Change the current ring parameters, stopping the controller if
+ * necessary so that we don't mess things up while we're in
+ * motion. We wait for the ring to be clean before reallocating
+ * the rings. */
+static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ int err = 0, i = 0;
+
+ if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
+ return -EINVAL;
+
+ if (!is_power_of_2(rvals->rx_pending)) {
+ netdev_err(dev, "Ring sizes must be a power of 2\n");
+ return -EINVAL;
+ }
+
+ if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE)
+ return -EINVAL;
+
+ if (!is_power_of_2(rvals->tx_pending)) {
+ netdev_err(dev, "Ring sizes must be a power of 2\n");
+ return -EINVAL;
+ }
+
+
+ if (dev->flags & IFF_UP) {
+ unsigned long flags;
+
+ /* Halt TX and RX, and process the frames which
+ * have already been received */
+ local_irq_save(flags);
+ lock_tx_qs(priv);
+ lock_rx_qs(priv);
+
+ gfar_halt(dev);
+
+ unlock_rx_qs(priv);
+ unlock_tx_qs(priv);
+ local_irq_restore(flags);
+
+ for (i = 0; i < priv->num_rx_queues; i++)
+ gfar_clean_rx_ring(priv->rx_queue[i],
+ priv->rx_queue[i]->rx_ring_size);
+
+ /* Now we take down the rings to rebuild them */
+ stop_gfar(dev);
+ }
+
+ /* Change the size */
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
+ priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
+ priv->tx_queue[i]->num_txbdfree = priv->tx_queue[i]->tx_ring_size;
+ }
+
+ /* Rebuild the rings with the new size */
+ if (dev->flags & IFF_UP) {
+ err = startup_gfar(dev);
+ netif_tx_wake_all_queues(dev);
+ }
+ return err;
+}
+
+int gfar_set_features(struct net_device *dev, u32 features)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ unsigned long flags;
+ int err = 0, i = 0;
+ u32 changed = dev->features ^ features;
+
+ if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
+ gfar_vlan_mode(dev, features);
+
+ if (!(changed & NETIF_F_RXCSUM))
+ return 0;
+
+ if (dev->flags & IFF_UP) {
+ /* Halt TX and RX, and process the frames which
+ * have already been received */
+ local_irq_save(flags);
+ lock_tx_qs(priv);
+ lock_rx_qs(priv);
+
+ gfar_halt(dev);
+
+ unlock_tx_qs(priv);
+ unlock_rx_qs(priv);
+ local_irq_restore(flags);
+
+ for (i = 0; i < priv->num_rx_queues; i++)
+ gfar_clean_rx_ring(priv->rx_queue[i],
+ priv->rx_queue[i]->rx_ring_size);
+
+ /* Now we take down the rings to rebuild them */
+ stop_gfar(dev);
+
+ dev->features = features;
+
+ err = startup_gfar(dev);
+ netif_tx_wake_all_queues(dev);
+ }
+ return err;
+}
+
+static uint32_t gfar_get_msglevel(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ return priv->msg_enable;
+}
+
+static void gfar_set_msglevel(struct net_device *dev, uint32_t data)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ priv->msg_enable = data;
+}
+
+#ifdef CONFIG_PM
+static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) {
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = priv->wol_en ? WAKE_MAGIC : 0;
+ } else {
+ wol->supported = wol->wolopts = 0;
+ }
+}
+
+static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
+ wol->wolopts != 0)
+ return -EINVAL;
+
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EINVAL;
+
+ device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
+
+ spin_lock_irqsave(&priv->bflock, flags);
+ priv->wol_en = !!device_may_wakeup(&dev->dev);
+ spin_unlock_irqrestore(&priv->bflock, flags);
+
+ return 0;
+}
+#endif
+
+static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
+{
+ u32 fcr = 0x0, fpr = FPR_FILER_MASK;
+
+ if (ethflow & RXH_L2DA) {
+ fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
+ RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+
+ fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
+ RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+ }
+
+ if (ethflow & RXH_VLAN) {
+ fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+ RQFCR_AND | RQFCR_HASHTBL_0;
+ gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+ }
+
+ if (ethflow & RXH_IP_SRC) {
+ fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+ RQFCR_AND | RQFCR_HASHTBL_0;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+ }
+
+ if (ethflow & (RXH_IP_DST)) {
+ fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+ RQFCR_AND | RQFCR_HASHTBL_0;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+ }
+
+ if (ethflow & RXH_L3_PROTO) {
+ fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+ RQFCR_AND | RQFCR_HASHTBL_0;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+ }
+
+ if (ethflow & RXH_L4_B_0_1) {
+ fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+ RQFCR_AND | RQFCR_HASHTBL_0;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+ }
+
+ if (ethflow & RXH_L4_B_2_3) {
+ fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+ RQFCR_AND | RQFCR_HASHTBL_0;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+ }
+}
+
+static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u64 class)
+{
+ unsigned int last_rule_idx = priv->cur_filer_idx;
+ unsigned int cmp_rqfpr;
+ unsigned int *local_rqfpr;
+ unsigned int *local_rqfcr;
+ int i = 0x0, k = 0x0;
+ int j = MAX_FILER_IDX, l = 0x0;
+ int ret = 1;
+
+ local_rqfpr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
+ GFP_KERNEL);
+ local_rqfcr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
+ GFP_KERNEL);
+ if (!local_rqfpr || !local_rqfcr) {
+ pr_err("Out of memory\n");
+ ret = 0;
+ goto err;
+ }
+
+ switch (class) {
+ case TCP_V4_FLOW:
+ cmp_rqfpr = RQFPR_IPV4 |RQFPR_TCP;
+ break;
+ case UDP_V4_FLOW:
+ cmp_rqfpr = RQFPR_IPV4 |RQFPR_UDP;
+ break;
+ case TCP_V6_FLOW:
+ cmp_rqfpr = RQFPR_IPV6 |RQFPR_TCP;
+ break;
+ case UDP_V6_FLOW:
+ cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP;
+ break;
+ default:
+ pr_err("Right now this class is not supported\n");
+ ret = 0;
+ goto err;
+ }
+
+ for (i = 0; i < MAX_FILER_IDX + 1; i++) {
+ local_rqfpr[j] = priv->ftp_rqfpr[i];
+ local_rqfcr[j] = priv->ftp_rqfcr[i];
+ j--;
+ if ((priv->ftp_rqfcr[i] == (RQFCR_PID_PARSE |
+ RQFCR_CLE |RQFCR_AND)) &&
+ (priv->ftp_rqfpr[i] == cmp_rqfpr))
+ break;
+ }
+
+ if (i == MAX_FILER_IDX + 1) {
+ pr_err("No parse rule found, can't create hash rules\n");
+ ret = 0;
+ goto err;
+ }
+
+ /* If a match was found, then it begins the starting of a cluster rule
+ * if it was already programmed, we need to overwrite these rules
+ */
+ for (l = i+1; l < MAX_FILER_IDX; l++) {
+ if ((priv->ftp_rqfcr[l] & RQFCR_CLE) &&
+ !(priv->ftp_rqfcr[l] & RQFCR_AND)) {
+ priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
+ RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
+ priv->ftp_rqfpr[l] = FPR_FILER_MASK;
+ gfar_write_filer(priv, l, priv->ftp_rqfcr[l],
+ priv->ftp_rqfpr[l]);
+ break;
+ }
+
+ if (!(priv->ftp_rqfcr[l] & RQFCR_CLE) &&
+ (priv->ftp_rqfcr[l] & RQFCR_AND))
+ continue;
+ else {
+ local_rqfpr[j] = priv->ftp_rqfpr[l];
+ local_rqfcr[j] = priv->ftp_rqfcr[l];
+ j--;
+ }
+ }
+
+ priv->cur_filer_idx = l - 1;
+ last_rule_idx = l;
+
+ /* hash rules */
+ ethflow_to_filer_rules(priv, ethflow);
+
+ /* Write back the popped out rules again */
+ for (k = j+1; k < MAX_FILER_IDX; k++) {
+ priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
+ priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
+ gfar_write_filer(priv, priv->cur_filer_idx,
+ local_rqfcr[k], local_rqfpr[k]);
+ if (!priv->cur_filer_idx)
+ break;
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+ }
+
+err:
+ kfree(local_rqfcr);
+ kfree(local_rqfpr);
+ return ret;
+}
+
+static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
+{
+ /* write the filer rules here */
+ if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int gfar_check_filer_hardware(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = NULL;
+ u32 i;
+
+ regs = priv->gfargrp[0].regs;
+
+ /* Check if we are in FIFO mode */
+ i = gfar_read(®s->ecntrl);
+ i &= ECNTRL_FIFM;
+ if (i == ECNTRL_FIFM) {
+ netdev_notice(priv->ndev, "Interface in FIFO mode\n");
+ i = gfar_read(®s->rctrl);
+ i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM;
+ if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) {
+ netdev_info(priv->ndev,
+ "Receive Queue Filtering enabled\n");
+ } else {
+ netdev_warn(priv->ndev,
+ "Receive Queue Filtering disabled\n");
+ return -EOPNOTSUPP;
+ }
+ }
+ /* Or in standard mode */
+ else {
+ i = gfar_read(®s->rctrl);
+ i &= RCTRL_PRSDEP_MASK;
+ if (i == RCTRL_PRSDEP_MASK) {
+ netdev_info(priv->ndev,
+ "Receive Queue Filtering enabled\n");
+ } else {
+ netdev_warn(priv->ndev,
+ "Receive Queue Filtering disabled\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ /* Sets the properties for arbitrary filer rule
+ * to the first 4 Layer 4 Bytes */
+ regs->rbifx = 0xC0C1C2C3;
+ return 0;
+}
+
+static int gfar_comp_asc(const void *a, const void *b)
+{
+ return memcmp(a, b, 4);
+}
+
+static int gfar_comp_desc(const void *a, const void *b)
+{
+ return -memcmp(a, b, 4);
+}
+
+static void gfar_swap(void *a, void *b, int size)
+{
+ u32 *_a = a;
+ u32 *_b = b;
+
+ swap(_a[0], _b[0]);
+ swap(_a[1], _b[1]);
+ swap(_a[2], _b[2]);
+ swap(_a[3], _b[3]);
+}
+
+/* Write a mask to filer cache */
+static void gfar_set_mask(u32 mask, struct filer_table *tab)
+{
+ tab->fe[tab->index].ctrl = RQFCR_AND | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
+ tab->fe[tab->index].prop = mask;
+ tab->index++;
+}
+
+/* Sets parse bits (e.g. IP or TCP) */
+static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab)
+{
+ gfar_set_mask(mask, tab);
+ tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE
+ | RQFCR_AND;
+ tab->fe[tab->index].prop = value;
+ tab->index++;
+}
+
+static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
+ struct filer_table *tab)
+{
+ gfar_set_mask(mask, tab);
+ tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag;
+ tab->fe[tab->index].prop = value;
+ tab->index++;
+}
+
+/*
+ * For setting a tuple of value and mask of type flag
+ * Example:
+ * IP-Src = 10.0.0.0/255.0.0.0
+ * value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4
+ *
+ * Ethtool gives us a value=0 and mask=~0 for don't care a tuple
+ * For a don't care mask it gives us a 0
+ *
+ * The check if don't care and the mask adjustment if mask=0 is done for VLAN
+ * and MAC stuff on an upper level (due to missing information on this level).
+ * For these guys we can discard them if they are value=0 and mask=0.
+ *
+ * Further the all masks are one-padded for better hardware efficiency.
+ */
+static void gfar_set_attribute(u32 value, u32 mask, u32 flag,
+ struct filer_table *tab)
+{
+ switch (flag) {
+ /* 3bit */
+ case RQFCR_PID_PRI:
+ if (!(value | mask))
+ return;
+ mask |= RQFCR_PID_PRI_MASK;
+ break;
+ /* 8bit */
+ case RQFCR_PID_L4P:
+ case RQFCR_PID_TOS:
+ if (!~(mask | RQFCR_PID_L4P_MASK))
+ return;
+ if (!mask)
+ mask = ~0;
+ else
+ mask |= RQFCR_PID_L4P_MASK;
+ break;
+ /* 12bit */
+ case RQFCR_PID_VID:
+ if (!(value | mask))
+ return;
+ mask |= RQFCR_PID_VID_MASK;
+ break;
+ /* 16bit */
+ case RQFCR_PID_DPT:
+ case RQFCR_PID_SPT:
+ case RQFCR_PID_ETY:
+ if (!~(mask | RQFCR_PID_PORT_MASK))
+ return;
+ if (!mask)
+ mask = ~0;
+ else
+ mask |= RQFCR_PID_PORT_MASK;
+ break;
+ /* 24bit */
+ case RQFCR_PID_DAH:
+ case RQFCR_PID_DAL:
+ case RQFCR_PID_SAH:
+ case RQFCR_PID_SAL:
+ if (!(value | mask))
+ return;
+ mask |= RQFCR_PID_MAC_MASK;
+ break;
+ /* for all real 32bit masks */
+ default:
+ if (!~mask)
+ return;
+ if (!mask)
+ mask = ~0;
+ break;
+ }
+ gfar_set_general_attribute(value, mask, flag, tab);
+}
+
+/* Translates value and mask for UDP, TCP or SCTP */
+static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
+ struct ethtool_tcpip4_spec *mask, struct filer_table *tab)
+{
+ gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
+ gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
+ gfar_set_attribute(value->pdst, mask->pdst, RQFCR_PID_DPT, tab);
+ gfar_set_attribute(value->psrc, mask->psrc, RQFCR_PID_SPT, tab);
+ gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
+}
+
+/* Translates value and mask for RAW-IP4 */
+static void gfar_set_user_ip(struct ethtool_usrip4_spec *value,
+ struct ethtool_usrip4_spec *mask, struct filer_table *tab)
+{
+ gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
+ gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
+ gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
+ gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab);
+ gfar_set_attribute(value->l4_4_bytes, mask->l4_4_bytes, RQFCR_PID_ARB,
+ tab);
+
+}
+
+/* Translates value and mask for ETHER spec */
+static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask,
+ struct filer_table *tab)
+{
+ u32 upper_temp_mask = 0;
+ u32 lower_temp_mask = 0;
+ /* Source address */
+ if (!is_broadcast_ether_addr(mask->h_source)) {
+
+ if (is_zero_ether_addr(mask->h_source)) {
+ upper_temp_mask = 0xFFFFFFFF;
+ lower_temp_mask = 0xFFFFFFFF;
+ } else {
+ upper_temp_mask = mask->h_source[0] << 16
+ | mask->h_source[1] << 8
+ | mask->h_source[2];
+ lower_temp_mask = mask->h_source[3] << 16
+ | mask->h_source[4] << 8
+ | mask->h_source[5];
+ }
+ /* Upper 24bit */
+ gfar_set_attribute(
+ value->h_source[0] << 16 | value->h_source[1]
+ << 8 | value->h_source[2],
+ upper_temp_mask, RQFCR_PID_SAH, tab);
+ /* And the same for the lower part */
+ gfar_set_attribute(
+ value->h_source[3] << 16 | value->h_source[4]
+ << 8 | value->h_source[5],
+ lower_temp_mask, RQFCR_PID_SAL, tab);
+ }
+ /* Destination address */
+ if (!is_broadcast_ether_addr(mask->h_dest)) {
+
+ /* Special for destination is limited broadcast */
+ if ((is_broadcast_ether_addr(value->h_dest)
+ && is_zero_ether_addr(mask->h_dest))) {
+ gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab);
+ } else {
+
+ if (is_zero_ether_addr(mask->h_dest)) {
+ upper_temp_mask = 0xFFFFFFFF;
+ lower_temp_mask = 0xFFFFFFFF;
+ } else {
+ upper_temp_mask = mask->h_dest[0] << 16
+ | mask->h_dest[1] << 8
+ | mask->h_dest[2];
+ lower_temp_mask = mask->h_dest[3] << 16
+ | mask->h_dest[4] << 8
+ | mask->h_dest[5];
+ }
+
+ /* Upper 24bit */
+ gfar_set_attribute(
+ value->h_dest[0] << 16
+ | value->h_dest[1] << 8
+ | value->h_dest[2],
+ upper_temp_mask, RQFCR_PID_DAH, tab);
+ /* And the same for the lower part */
+ gfar_set_attribute(
+ value->h_dest[3] << 16
+ | value->h_dest[4] << 8
+ | value->h_dest[5],
+ lower_temp_mask, RQFCR_PID_DAL, tab);
+ }
+ }
+
+ gfar_set_attribute(value->h_proto, mask->h_proto, RQFCR_PID_ETY, tab);
+
+}
+
+/* Convert a rule to binary filter format of gianfar */
+static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
+ struct filer_table *tab)
+{
+ u32 vlan = 0, vlan_mask = 0;
+ u32 id = 0, id_mask = 0;
+ u32 cfi = 0, cfi_mask = 0;
+ u32 prio = 0, prio_mask = 0;
+
+ u32 old_index = tab->index;
+
+ /* Check if vlan is wanted */
+ if ((rule->flow_type & FLOW_EXT) && (rule->m_ext.vlan_tci != 0xFFFF)) {
+ if (!rule->m_ext.vlan_tci)
+ rule->m_ext.vlan_tci = 0xFFFF;
+
+ vlan = RQFPR_VLN;
+ vlan_mask = RQFPR_VLN;
+
+ /* Separate the fields */
+ id = rule->h_ext.vlan_tci & VLAN_VID_MASK;
+ id_mask = rule->m_ext.vlan_tci & VLAN_VID_MASK;
+ cfi = rule->h_ext.vlan_tci & VLAN_CFI_MASK;
+ cfi_mask = rule->m_ext.vlan_tci & VLAN_CFI_MASK;
+ prio = (rule->h_ext.vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ prio_mask = (rule->m_ext.vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+
+ if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
+ vlan |= RQFPR_CFI;
+ vlan_mask |= RQFPR_CFI;
+ } else if (cfi != VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
+ vlan_mask |= RQFPR_CFI;
+ }
+ }
+
+ switch (rule->flow_type & ~FLOW_EXT) {
+ case TCP_V4_FLOW:
+ gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan,
+ RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab);
+ gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec,
+ &rule->m_u.tcp_ip4_spec, tab);
+ break;
+ case UDP_V4_FLOW:
+ gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan,
+ RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab);
+ gfar_set_basic_ip(&rule->h_u.udp_ip4_spec,
+ &rule->m_u.udp_ip4_spec, tab);
+ break;
+ case SCTP_V4_FLOW:
+ gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
+ tab);
+ gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab);
+ gfar_set_basic_ip((struct ethtool_tcpip4_spec *) &rule->h_u,
+ (struct ethtool_tcpip4_spec *) &rule->m_u, tab);
+ break;
+ case IP_USER_FLOW:
+ gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
+ tab);
+ gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u,
+ (struct ethtool_usrip4_spec *) &rule->m_u, tab);
+ break;
+ case ETHER_FLOW:
+ if (vlan)
+ gfar_set_parse_bits(vlan, vlan_mask, tab);
+ gfar_set_ether((struct ethhdr *) &rule->h_u,
+ (struct ethhdr *) &rule->m_u, tab);
+ break;
+ default:
+ return -1;
+ }
+
+ /* Set the vlan attributes in the end */
+ if (vlan) {
+ gfar_set_attribute(id, id_mask, RQFCR_PID_VID, tab);
+ gfar_set_attribute(prio, prio_mask, RQFCR_PID_PRI, tab);
+ }
+
+ /* If there has been nothing written till now, it must be a default */
+ if (tab->index == old_index) {
+ gfar_set_mask(0xFFFFFFFF, tab);
+ tab->fe[tab->index].ctrl = 0x20;
+ tab->fe[tab->index].prop = 0x0;
+ tab->index++;
+ }
+
+ /* Remove last AND */
+ tab->fe[tab->index - 1].ctrl &= (~RQFCR_AND);
+
+ /* Specify which queue to use or to drop */
+ if (rule->ring_cookie == RX_CLS_FLOW_DISC)
+ tab->fe[tab->index - 1].ctrl |= RQFCR_RJE;
+ else
+ tab->fe[tab->index - 1].ctrl |= (rule->ring_cookie << 10);
+
+ /* Only big enough entries can be clustered */
+ if (tab->index > (old_index + 2)) {
+ tab->fe[old_index + 1].ctrl |= RQFCR_CLE;
+ tab->fe[tab->index - 1].ctrl |= RQFCR_CLE;
+ }
+
+ /* In rare cases the cache can be full while there is free space in hw */
+ if (tab->index > MAX_FILER_CACHE_IDX - 1)
+ return -EBUSY;
+
+ return 0;
+}
+
+/* Copy size filer entries */
+static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
+ struct gfar_filer_entry src[0], s32 size)
+{
+ while (size > 0) {
+ size--;
+ dst[size].ctrl = src[size].ctrl;
+ dst[size].prop = src[size].prop;
+ }
+}
+
+/* Delete the contents of the filer-table between start and end
+ * and collapse them */
+static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
+{
+ int length;
+ if (end > MAX_FILER_CACHE_IDX || end < begin)
+ return -EINVAL;
+
+ end++;
+ length = end - begin;
+
+ /* Copy */
+ while (end < tab->index) {
+ tab->fe[begin].ctrl = tab->fe[end].ctrl;
+ tab->fe[begin++].prop = tab->fe[end++].prop;
+
+ }
+ /* Fill up with don't cares */
+ while (begin < tab->index) {
+ tab->fe[begin].ctrl = 0x60;
+ tab->fe[begin].prop = 0xFFFFFFFF;
+ begin++;
+ }
+
+ tab->index -= length;
+ return 0;
+}
+
+/* Make space on the wanted location */
+static int gfar_expand_filer_entries(u32 begin, u32 length,
+ struct filer_table *tab)
+{
+ if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX || begin
+ > MAX_FILER_CACHE_IDX)
+ return -EINVAL;
+
+ gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
+ tab->index - length + 1);
+
+ tab->index += length;
+ return 0;
+}
+
+static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
+{
+ for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); start++) {
+ if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE))
+ == (RQFCR_AND | RQFCR_CLE))
+ return start;
+ }
+ return -1;
+}
+
+static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
+{
+ for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); start++) {
+ if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE))
+ == (RQFCR_CLE))
+ return start;
+ }
+ return -1;
+}
+
+/*
+ * Uses hardwares clustering option to reduce
+ * the number of filer table entries
+ */
+static void gfar_cluster_filer(struct filer_table *tab)
+{
+ s32 i = -1, j, iend, jend;
+
+ while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
+ j = i;
+ while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
+ /*
+ * The cluster entries self and the previous one
+ * (a mask) must be identical!
+ */
+ if (tab->fe[i].ctrl != tab->fe[j].ctrl)
+ break;
+ if (tab->fe[i].prop != tab->fe[j].prop)
+ break;
+ if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl)
+ break;
+ if (tab->fe[i - 1].prop != tab->fe[j - 1].prop)
+ break;
+ iend = gfar_get_next_cluster_end(i, tab);
+ jend = gfar_get_next_cluster_end(j, tab);
+ if (jend == -1 || iend == -1)
+ break;
+ /*
+ * First we make some free space, where our cluster
+ * element should be. Then we copy it there and finally
+ * delete in from its old location.
+ */
+
+ if (gfar_expand_filer_entries(iend, (jend - j), tab)
+ == -EINVAL)
+ break;
+
+ gfar_copy_filer_entries(&(tab->fe[iend + 1]),
+ &(tab->fe[jend + 1]), jend - j);
+
+ if (gfar_trim_filer_entries(jend - 1,
+ jend + (jend - j), tab) == -EINVAL)
+ return;
+
+ /* Mask out cluster bit */
+ tab->fe[iend].ctrl &= ~(RQFCR_CLE);
+ }
+ }
+}
+
+/* Swaps the masked bits of a1<>a2 and b1<>b2 */
+static void gfar_swap_bits(struct gfar_filer_entry *a1,
+ struct gfar_filer_entry *a2, struct gfar_filer_entry *b1,
+ struct gfar_filer_entry *b2, u32 mask)
+{
+ u32 temp[4];
+ temp[0] = a1->ctrl & mask;
+ temp[1] = a2->ctrl & mask;
+ temp[2] = b1->ctrl & mask;
+ temp[3] = b2->ctrl & mask;
+
+ a1->ctrl &= ~mask;
+ a2->ctrl &= ~mask;
+ b1->ctrl &= ~mask;
+ b2->ctrl &= ~mask;
+
+ a1->ctrl |= temp[1];
+ a2->ctrl |= temp[0];
+ b1->ctrl |= temp[3];
+ b2->ctrl |= temp[2];
+}
+
+/*
+ * Generate a list consisting of masks values with their start and
+ * end of validity and block as indicator for parts belonging
+ * together (glued by ANDs) in mask_table
+ */
+static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
+ struct filer_table *tab)
+{
+ u32 i, and_index = 0, block_index = 1;
+
+ for (i = 0; i < tab->index; i++) {
+
+ /* LSByte of control = 0 sets a mask */
+ if (!(tab->fe[i].ctrl & 0xF)) {
+ mask_table[and_index].mask = tab->fe[i].prop;
+ mask_table[and_index].start = i;
+ mask_table[and_index].block = block_index;
+ if (and_index >= 1)
+ mask_table[and_index - 1].end = i - 1;
+ and_index++;
+ }
+ /* cluster starts and ends will be separated because they should
+ * hold their position */
+ if (tab->fe[i].ctrl & RQFCR_CLE)
+ block_index++;
+ /* A not set AND indicates the end of a depended block */
+ if (!(tab->fe[i].ctrl & RQFCR_AND))
+ block_index++;
+
+ }
+
+ mask_table[and_index - 1].end = i - 1;
+
+ return and_index;
+}
+
+/*
+ * Sorts the entries of mask_table by the values of the masks.
+ * Important: The 0xFF80 flags of the first and last entry of a
+ * block must hold their position (which queue, CLusterEnable, ReJEct,
+ * AND)
+ */
+static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
+ struct filer_table *temp_table, u32 and_index)
+{
+ /* Pointer to compare function (_asc or _desc) */
+ int (*gfar_comp)(const void *, const void *);
+
+ u32 i, size = 0, start = 0, prev = 1;
+ u32 old_first, old_last, new_first, new_last;
+
+ gfar_comp = &gfar_comp_desc;
+
+ for (i = 0; i < and_index; i++) {
+
+ if (prev != mask_table[i].block) {
+ old_first = mask_table[start].start + 1;
+ old_last = mask_table[i - 1].end;
+ sort(mask_table + start, size,
+ sizeof(struct gfar_mask_entry),
+ gfar_comp, &gfar_swap);
+
+ /* Toggle order for every block. This makes the
+ * thing more efficient! */
+ if (gfar_comp == gfar_comp_desc)
+ gfar_comp = &gfar_comp_asc;
+ else
+ gfar_comp = &gfar_comp_desc;
+
+ new_first = mask_table[start].start + 1;
+ new_last = mask_table[i - 1].end;
+
+ gfar_swap_bits(&temp_table->fe[new_first],
+ &temp_table->fe[old_first],
+ &temp_table->fe[new_last],
+ &temp_table->fe[old_last],
+ RQFCR_QUEUE | RQFCR_CLE |
+ RQFCR_RJE | RQFCR_AND
+ );
+
+ start = i;
+ size = 0;
+ }
+ size++;
+ prev = mask_table[i].block;
+ }
+
+}
+
+/*
+ * Reduces the number of masks needed in the filer table to save entries
+ * This is done by sorting the masks of a depended block. A depended block is
+ * identified by gluing ANDs or CLE. The sorting order toggles after every
+ * block. Of course entries in scope of a mask must change their location with
+ * it.
+ */
+static int gfar_optimize_filer_masks(struct filer_table *tab)
+{
+ struct filer_table *temp_table;
+ struct gfar_mask_entry *mask_table;
+
+ u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0;
+ s32 ret = 0;
+
+ /* We need a copy of the filer table because
+ * we want to change its order */
+ temp_table = kmalloc(sizeof(*temp_table), GFP_KERNEL);
+ if (temp_table == NULL)
+ return -ENOMEM;
+ memcpy(temp_table, tab, sizeof(*temp_table));
+
+ mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
+ sizeof(struct gfar_mask_entry), GFP_KERNEL);
+
+ if (mask_table == NULL) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ and_index = gfar_generate_mask_table(mask_table, tab);
+
+ gfar_sort_mask_table(mask_table, temp_table, and_index);
+
+ /* Now we can copy the data from our duplicated filer table to
+ * the real one in the order the mask table says */
+ for (i = 0; i < and_index; i++) {
+ size = mask_table[i].end - mask_table[i].start + 1;
+ gfar_copy_filer_entries(&(tab->fe[j]),
+ &(temp_table->fe[mask_table[i].start]), size);
+ j += size;
+ }
+
+ /* And finally we just have to check for duplicated masks and drop the
+ * second ones */
+ for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
+ if (tab->fe[i].ctrl == 0x80) {
+ previous_mask = i++;
+ break;
+ }
+ }
+ for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
+ if (tab->fe[i].ctrl == 0x80) {
+ if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
+ /* Two identical ones found!
+ * So drop the second one! */
+ gfar_trim_filer_entries(i, i, tab);
+ } else
+ /* Not identical! */
+ previous_mask = i;
+ }
+ }
+
+ kfree(mask_table);
+end: kfree(temp_table);
+ return ret;
+}
+
+/* Write the bit-pattern from software's buffer to hardware registers */
+static int gfar_write_filer_table(struct gfar_private *priv,
+ struct filer_table *tab)
+{
+ u32 i = 0;
+ if (tab->index > MAX_FILER_IDX - 1)
+ return -EBUSY;
+
+ /* Avoid inconsistent filer table to be processed */
+ lock_rx_qs(priv);
+
+ /* Fill regular entries */
+ for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl); i++)
+ gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
+ /* Fill the rest with fall-troughs */
+ for (; i < MAX_FILER_IDX - 1; i++)
+ gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
+ /* Last entry must be default accept
+ * because that's what people expect */
+ gfar_write_filer(priv, i, 0x20, 0x0);
+
+ unlock_rx_qs(priv);
+
+ return 0;
+}
+
+static int gfar_check_capability(struct ethtool_rx_flow_spec *flow,
+ struct gfar_private *priv)
+{
+
+ if (flow->flow_type & FLOW_EXT) {
+ if (~flow->m_ext.data[0] || ~flow->m_ext.data[1])
+ netdev_warn(priv->ndev,
+ "User-specific data not supported!\n");
+ if (~flow->m_ext.vlan_etype)
+ netdev_warn(priv->ndev,
+ "VLAN-etype not supported!\n");
+ }
+ if (flow->flow_type == IP_USER_FLOW)
+ if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
+ netdev_warn(priv->ndev,
+ "IP-Version differing from IPv4 not supported!\n");
+
+ return 0;
+}
+
+static int gfar_process_filer_changes(struct gfar_private *priv)
+{
+ struct ethtool_flow_spec_container *j;
+ struct filer_table *tab;
+ s32 i = 0;
+ s32 ret = 0;
+
+ /* So index is set to zero, too! */
+ tab = kzalloc(sizeof(*tab), GFP_KERNEL);
+ if (tab == NULL)
+ return -ENOMEM;
+
+ /* Now convert the existing filer data from flow_spec into
+ * filer tables binary format */
+ list_for_each_entry(j, &priv->rx_list.list, list) {
+ ret = gfar_convert_to_filer(&j->fs, tab);
+ if (ret == -EBUSY) {
+ netdev_err(priv->ndev, "Rule not added: No free space!\n");
+ goto end;
+ }
+ if (ret == -1) {
+ netdev_err(priv->ndev, "Rule not added: Unsupported Flow-type!\n");
+ goto end;
+ }
+ }
+
+ i = tab->index;
+
+ /* Optimizations to save entries */
+ gfar_cluster_filer(tab);
+ gfar_optimize_filer_masks(tab);
+
+ pr_debug("\n\tSummary:\n"
+ "\tData on hardware: %d\n"
+ "\tCompression rate: %d%%\n",
+ tab->index, 100 - (100 * tab->index) / i);
+
+ /* Write everything to hardware */
+ ret = gfar_write_filer_table(priv, tab);
+ if (ret == -EBUSY) {
+ netdev_err(priv->ndev, "Rule not added: No free space!\n");
+ goto end;
+ }
+
+end: kfree(tab);
+ return ret;
+}
+
+static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow)
+{
+ u32 i = 0;
+
+ for (i = 0; i < sizeof(flow->m_u); i++)
+ flow->m_u.hdata[i] ^= 0xFF;
+
+ flow->m_ext.vlan_etype ^= 0xFFFF;
+ flow->m_ext.vlan_tci ^= 0xFFFF;
+ flow->m_ext.data[0] ^= ~0;
+ flow->m_ext.data[1] ^= ~0;
+}
+
+static int gfar_add_cls(struct gfar_private *priv,
+ struct ethtool_rx_flow_spec *flow)
+{
+ struct ethtool_flow_spec_container *temp, *comp;
+ int ret = 0;
+
+ temp = kmalloc(sizeof(*temp), GFP_KERNEL);
+ if (temp == NULL)
+ return -ENOMEM;
+ memcpy(&temp->fs, flow, sizeof(temp->fs));
+
+ gfar_invert_masks(&temp->fs);
+ ret = gfar_check_capability(&temp->fs, priv);
+ if (ret)
+ goto clean_mem;
+ /* Link in the new element at the right @location */
+ if (list_empty(&priv->rx_list.list)) {
+ ret = gfar_check_filer_hardware(priv);
+ if (ret != 0)
+ goto clean_mem;
+ list_add(&temp->list, &priv->rx_list.list);
+ goto process;
+ } else {
+
+ list_for_each_entry(comp, &priv->rx_list.list, list) {
+ if (comp->fs.location > flow->location) {
+ list_add_tail(&temp->list, &comp->list);
+ goto process;
+ }
+ if (comp->fs.location == flow->location) {
+ netdev_err(priv->ndev,
+ "Rule not added: ID %d not free!\n",
+ flow->location);
+ ret = -EBUSY;
+ goto clean_mem;
+ }
+ }
+ list_add_tail(&temp->list, &priv->rx_list.list);
+ }
+
+process:
+ ret = gfar_process_filer_changes(priv);
+ if (ret)
+ goto clean_list;
+ priv->rx_list.count++;
+ return ret;
+
+clean_list:
+ list_del(&temp->list);
+clean_mem:
+ kfree(temp);
+ return ret;
+}
+
+static int gfar_del_cls(struct gfar_private *priv, u32 loc)
+{
+ struct ethtool_flow_spec_container *comp;
+ u32 ret = -EINVAL;
+
+ if (list_empty(&priv->rx_list.list))
+ return ret;
+
+ list_for_each_entry(comp, &priv->rx_list.list, list) {
+ if (comp->fs.location == loc) {
+ list_del(&comp->list);
+ kfree(comp);
+ priv->rx_list.count--;
+ gfar_process_filer_changes(priv);
+ ret = 0;
+ break;
+ }
+ }
+
+ return ret;
+
+}
+
+static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_flow_spec_container *comp;
+ u32 ret = -EINVAL;
+
+ list_for_each_entry(comp, &priv->rx_list.list, list) {
+ if (comp->fs.location == cmd->fs.location) {
+ memcpy(&cmd->fs, &comp->fs, sizeof(cmd->fs));
+ gfar_invert_masks(&cmd->fs);
+ ret = 0;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int gfar_get_cls_all(struct gfar_private *priv,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ struct ethtool_flow_spec_container *comp;
+ u32 i = 0;
+
+ list_for_each_entry(comp, &priv->rx_list.list, list) {
++ if (i == cmd->rule_cnt)
++ return -EMSGSIZE;
++ rule_locs[i] = comp->fs.location;
++ i++;
+ }
+
+ cmd->data = MAX_FILER_IDX;
+ cmd->rule_cnt = i;
+
+ return 0;
+}
+
+static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ int ret = 0;
+
+ mutex_lock(&priv->rx_queue_access);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = gfar_set_hash_opts(priv, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLINS:
+ if (cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
+ cmd->fs.ring_cookie >= priv->num_rx_queues) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = gfar_add_cls(priv, &cmd->fs);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = gfar_del_cls(priv, cmd->fs.location);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&priv->rx_queue_access);
+
+ return ret;
+}
+
+static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ int ret = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = priv->num_rx_queues;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = priv->rx_list.count;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = gfar_get_cls(priv, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = gfar_get_cls_all(priv, cmd, rule_locs);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+const struct ethtool_ops gfar_ethtool_ops = {
+ .get_settings = gfar_gsettings,
+ .set_settings = gfar_ssettings,
+ .get_drvinfo = gfar_gdrvinfo,
+ .get_regs_len = gfar_reglen,
+ .get_regs = gfar_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = gfar_gcoalesce,
+ .set_coalesce = gfar_scoalesce,
+ .get_ringparam = gfar_gringparam,
+ .set_ringparam = gfar_sringparam,
+ .get_strings = gfar_gstrings,
+ .get_sset_count = gfar_sset_count,
+ .get_ethtool_stats = gfar_fill_stats,
+ .get_msglevel = gfar_get_msglevel,
+ .set_msglevel = gfar_set_msglevel,
+#ifdef CONFIG_PM
+ .get_wol = gfar_get_wol,
+ .set_wol = gfar_set_wol,
+#endif
+ .set_rxnfc = gfar_set_nfc,
+ .get_rxnfc = gfar_get_nfc,
+};
--- /dev/null
- static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
+/*
+ * IBM Power Virtual Ethernet Device Driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2003, 2010
+ *
+ * Authors: Dave Larson <larson1@us.ibm.com>
+ * Santiago Leon <santil@linux.vnet.ibm.com>
+ * Brian King <brking@linux.vnet.ibm.com>
+ * Robert Jennings <rcj@linux.vnet.ibm.com>
+ * Anton Blanchard <anton@au.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/pm.h>
+#include <linux/ethtool.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/slab.h>
+#include <asm/hvcall.h>
+#include <linux/atomic.h>
+#include <asm/vio.h>
+#include <asm/iommu.h>
+#include <asm/firmware.h>
+
+#include "ibmveth.h"
+
+static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
+static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
+static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
+
+static struct kobj_type ktype_veth_pool;
+
+
+static const char ibmveth_driver_name[] = "ibmveth";
+static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
+#define ibmveth_driver_version "1.04"
+
+MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
+MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(ibmveth_driver_version);
+
+static unsigned int tx_copybreak __read_mostly = 128;
+module_param(tx_copybreak, uint, 0644);
+MODULE_PARM_DESC(tx_copybreak,
+ "Maximum size of packet that is copied to a new buffer on transmit");
+
+static unsigned int rx_copybreak __read_mostly = 128;
+module_param(rx_copybreak, uint, 0644);
+MODULE_PARM_DESC(rx_copybreak,
+ "Maximum size of packet that is copied to a new buffer on receive");
+
+static unsigned int rx_flush __read_mostly = 0;
+module_param(rx_flush, uint, 0644);
+MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
+
+struct ibmveth_stat {
+ char name[ETH_GSTRING_LEN];
+ int offset;
+};
+
+#define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
+#define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
+
+struct ibmveth_stat ibmveth_stats[] = {
+ { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
+ { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
+ { "replenish_add_buff_failure",
+ IBMVETH_STAT_OFF(replenish_add_buff_failure) },
+ { "replenish_add_buff_success",
+ IBMVETH_STAT_OFF(replenish_add_buff_success) },
+ { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
+ { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
+ { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
+ { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
+ { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
+ { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
+};
+
+/* simple methods of getting data from the current rxq entry */
+static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
+{
+ return adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off;
+}
+
+static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
+{
+ return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
+ IBMVETH_RXQ_TOGGLE_SHIFT;
+}
+
+static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
+{
+ return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
+}
+
+static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
+{
+ return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
+}
+
+static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
+{
+ return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
+}
+
+static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
+{
+ return adapter->rx_queue.queue_addr[adapter->rx_queue.index].length;
+}
+
+static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
+{
+ return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
+}
+
+/* setup the initial settings for a buffer pool */
+static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
+ u32 pool_index, u32 pool_size,
+ u32 buff_size, u32 pool_active)
+{
+ pool->size = pool_size;
+ pool->index = pool_index;
+ pool->buff_size = buff_size;
+ pool->threshold = pool_size * 7 / 8;
+ pool->active = pool_active;
+}
+
+/* allocate and setup an buffer pool - called during open */
+static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
+{
+ int i;
+
+ pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
+
+ if (!pool->free_map)
+ return -1;
+
+ pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
+ if (!pool->dma_addr) {
+ kfree(pool->free_map);
+ pool->free_map = NULL;
+ return -1;
+ }
+
+ pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
+
+ if (!pool->skbuff) {
+ kfree(pool->dma_addr);
+ pool->dma_addr = NULL;
+
+ kfree(pool->free_map);
+ pool->free_map = NULL;
+ return -1;
+ }
+
+ memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
+
+ for (i = 0; i < pool->size; ++i)
+ pool->free_map[i] = i;
+
+ atomic_set(&pool->available, 0);
+ pool->producer_index = 0;
+ pool->consumer_index = 0;
+
+ return 0;
+}
+
+static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
+{
+ unsigned long offset;
+
+ for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
+ asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
+}
+
+/* replenish the buffers for a pool. note that we don't need to
+ * skb_reserve these since they are used for incoming...
+ */
+static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
+ struct ibmveth_buff_pool *pool)
+{
+ u32 i;
+ u32 count = pool->size - atomic_read(&pool->available);
+ u32 buffers_added = 0;
+ struct sk_buff *skb;
+ unsigned int free_index, index;
+ u64 correlator;
+ unsigned long lpar_rc;
+ dma_addr_t dma_addr;
+
+ mb();
+
+ for (i = 0; i < count; ++i) {
+ union ibmveth_buf_desc desc;
+
+ skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
+
+ if (!skb) {
+ netdev_dbg(adapter->netdev,
+ "replenish: unable to allocate skb\n");
+ adapter->replenish_no_mem++;
+ break;
+ }
+
+ free_index = pool->consumer_index;
+ pool->consumer_index++;
+ if (pool->consumer_index >= pool->size)
+ pool->consumer_index = 0;
+ index = pool->free_map[free_index];
+
+ BUG_ON(index == IBM_VETH_INVALID_MAP);
+ BUG_ON(pool->skbuff[index] != NULL);
+
+ dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
+ pool->buff_size, DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
+ goto failure;
+
+ pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
+ pool->dma_addr[index] = dma_addr;
+ pool->skbuff[index] = skb;
+
+ correlator = ((u64)pool->index << 32) | index;
+ *(u64 *)skb->data = correlator;
+
+ desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
+ desc.fields.address = dma_addr;
+
+ if (rx_flush) {
+ unsigned int len = min(pool->buff_size,
+ adapter->netdev->mtu +
+ IBMVETH_BUFF_OH);
+ ibmveth_flush_buffer(skb->data, len);
+ }
+ lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
+ desc.desc);
+
+ if (lpar_rc != H_SUCCESS) {
+ goto failure;
+ } else {
+ buffers_added++;
+ adapter->replenish_add_buff_success++;
+ }
+ }
+
+ mb();
+ atomic_add(buffers_added, &(pool->available));
+ return;
+
+failure:
+ pool->free_map[free_index] = index;
+ pool->skbuff[index] = NULL;
+ if (pool->consumer_index == 0)
+ pool->consumer_index = pool->size - 1;
+ else
+ pool->consumer_index--;
+ if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
+ dma_unmap_single(&adapter->vdev->dev,
+ pool->dma_addr[index], pool->buff_size,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ adapter->replenish_add_buff_failure++;
+
+ mb();
+ atomic_add(buffers_added, &(pool->available));
+}
+
+/* replenish routine */
+static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
+{
+ int i;
+
+ adapter->replenish_task_cycles++;
+
+ for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
+ struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
+
+ if (pool->active &&
+ (atomic_read(&pool->available) < pool->threshold))
+ ibmveth_replenish_buffer_pool(adapter, pool);
+ }
+
+ adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) +
+ 4096 - 8);
+}
+
+/* empty and free ana buffer pool - also used to do cleanup in error paths */
+static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
+ struct ibmveth_buff_pool *pool)
+{
+ int i;
+
+ kfree(pool->free_map);
+ pool->free_map = NULL;
+
+ if (pool->skbuff && pool->dma_addr) {
+ for (i = 0; i < pool->size; ++i) {
+ struct sk_buff *skb = pool->skbuff[i];
+ if (skb) {
+ dma_unmap_single(&adapter->vdev->dev,
+ pool->dma_addr[i],
+ pool->buff_size,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ pool->skbuff[i] = NULL;
+ }
+ }
+ }
+
+ if (pool->dma_addr) {
+ kfree(pool->dma_addr);
+ pool->dma_addr = NULL;
+ }
+
+ if (pool->skbuff) {
+ kfree(pool->skbuff);
+ pool->skbuff = NULL;
+ }
+}
+
+/* remove a buffer from a pool */
+static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
+ u64 correlator)
+{
+ unsigned int pool = correlator >> 32;
+ unsigned int index = correlator & 0xffffffffUL;
+ unsigned int free_index;
+ struct sk_buff *skb;
+
+ BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
+ BUG_ON(index >= adapter->rx_buff_pool[pool].size);
+
+ skb = adapter->rx_buff_pool[pool].skbuff[index];
+
+ BUG_ON(skb == NULL);
+
+ adapter->rx_buff_pool[pool].skbuff[index] = NULL;
+
+ dma_unmap_single(&adapter->vdev->dev,
+ adapter->rx_buff_pool[pool].dma_addr[index],
+ adapter->rx_buff_pool[pool].buff_size,
+ DMA_FROM_DEVICE);
+
+ free_index = adapter->rx_buff_pool[pool].producer_index;
+ adapter->rx_buff_pool[pool].producer_index++;
+ if (adapter->rx_buff_pool[pool].producer_index >=
+ adapter->rx_buff_pool[pool].size)
+ adapter->rx_buff_pool[pool].producer_index = 0;
+ adapter->rx_buff_pool[pool].free_map[free_index] = index;
+
+ mb();
+
+ atomic_dec(&(adapter->rx_buff_pool[pool].available));
+}
+
+/* get the current buffer on the rx queue */
+static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
+{
+ u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
+ unsigned int pool = correlator >> 32;
+ unsigned int index = correlator & 0xffffffffUL;
+
+ BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
+ BUG_ON(index >= adapter->rx_buff_pool[pool].size);
+
+ return adapter->rx_buff_pool[pool].skbuff[index];
+}
+
+/* recycle the current buffer on the rx queue */
- return;
++static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
+{
+ u32 q_index = adapter->rx_queue.index;
+ u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
+ unsigned int pool = correlator >> 32;
+ unsigned int index = correlator & 0xffffffffUL;
+ union ibmveth_buf_desc desc;
+ unsigned long lpar_rc;
++ int ret = 1;
+
+ BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
+ BUG_ON(index >= adapter->rx_buff_pool[pool].size);
+
+ if (!adapter->rx_buff_pool[pool].active) {
+ ibmveth_rxq_harvest_buffer(adapter);
+ ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
- long ret, ret6;
++ goto out;
+ }
+
+ desc.fields.flags_len = IBMVETH_BUF_VALID |
+ adapter->rx_buff_pool[pool].buff_size;
+ desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
+
+ lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
+
+ if (lpar_rc != H_SUCCESS) {
+ netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
+ "during recycle rc=%ld", lpar_rc);
+ ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
++ ret = 0;
+ }
+
+ if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
+ adapter->rx_queue.index = 0;
+ adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
+ }
++
++out:
++ return ret;
+}
+
+static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
+{
+ ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
+
+ if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
+ adapter->rx_queue.index = 0;
+ adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
+ }
+}
+
+static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
+{
+ int i;
+ struct device *dev = &adapter->vdev->dev;
+
+ if (adapter->buffer_list_addr != NULL) {
+ if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
+ dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
+ DMA_BIDIRECTIONAL);
+ adapter->buffer_list_dma = DMA_ERROR_CODE;
+ }
+ free_page((unsigned long)adapter->buffer_list_addr);
+ adapter->buffer_list_addr = NULL;
+ }
+
+ if (adapter->filter_list_addr != NULL) {
+ if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
+ dma_unmap_single(dev, adapter->filter_list_dma, 4096,
+ DMA_BIDIRECTIONAL);
+ adapter->filter_list_dma = DMA_ERROR_CODE;
+ }
+ free_page((unsigned long)adapter->filter_list_addr);
+ adapter->filter_list_addr = NULL;
+ }
+
+ if (adapter->rx_queue.queue_addr != NULL) {
+ if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) {
+ dma_unmap_single(dev,
+ adapter->rx_queue.queue_dma,
+ adapter->rx_queue.queue_len,
+ DMA_BIDIRECTIONAL);
+ adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
+ }
+ kfree(adapter->rx_queue.queue_addr);
+ adapter->rx_queue.queue_addr = NULL;
+ }
+
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
+ if (adapter->rx_buff_pool[i].active)
+ ibmveth_free_buffer_pool(adapter,
+ &adapter->rx_buff_pool[i]);
+
+ if (adapter->bounce_buffer != NULL) {
+ if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
+ dma_unmap_single(&adapter->vdev->dev,
+ adapter->bounce_buffer_dma,
+ adapter->netdev->mtu + IBMVETH_BUFF_OH,
+ DMA_BIDIRECTIONAL);
+ adapter->bounce_buffer_dma = DMA_ERROR_CODE;
+ }
+ kfree(adapter->bounce_buffer);
+ adapter->bounce_buffer = NULL;
+ }
+}
+
+static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
+ union ibmveth_buf_desc rxq_desc, u64 mac_address)
+{
+ int rc, try_again = 1;
+
+ /*
+ * After a kexec the adapter will still be open, so our attempt to
+ * open it will fail. So if we get a failure we free the adapter and
+ * try again, but only once.
+ */
+retry:
+ rc = h_register_logical_lan(adapter->vdev->unit_address,
+ adapter->buffer_list_dma, rxq_desc.desc,
+ adapter->filter_list_dma, mac_address);
+
+ if (rc != H_SUCCESS && try_again) {
+ do {
+ rc = h_free_logical_lan(adapter->vdev->unit_address);
+ } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
+
+ try_again = 0;
+ goto retry;
+ }
+
+ return rc;
+}
+
+static int ibmveth_open(struct net_device *netdev)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(netdev);
+ u64 mac_address = 0;
+ int rxq_entries = 1;
+ unsigned long lpar_rc;
+ int rc;
+ union ibmveth_buf_desc rxq_desc;
+ int i;
+ struct device *dev;
+
+ netdev_dbg(netdev, "open starting\n");
+
+ napi_enable(&adapter->napi);
+
+ for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
+ rxq_entries += adapter->rx_buff_pool[i].size;
+
+ adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
+ adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
+
+ if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
+ netdev_err(netdev, "unable to allocate filter or buffer list "
+ "pages\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
+ rxq_entries;
+ adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len,
+ GFP_KERNEL);
+
+ if (!adapter->rx_queue.queue_addr) {
+ netdev_err(netdev, "unable to allocate rx queue pages\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ dev = &adapter->vdev->dev;
+
+ adapter->buffer_list_dma = dma_map_single(dev,
+ adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
+ adapter->filter_list_dma = dma_map_single(dev,
+ adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
+ adapter->rx_queue.queue_dma = dma_map_single(dev,
+ adapter->rx_queue.queue_addr,
+ adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
+
+ if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
+ (dma_mapping_error(dev, adapter->filter_list_dma)) ||
+ (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
+ netdev_err(netdev, "unable to map filter or buffer list "
+ "pages\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ adapter->rx_queue.index = 0;
+ adapter->rx_queue.num_slots = rxq_entries;
+ adapter->rx_queue.toggle = 1;
+
+ memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
+ mac_address = mac_address >> 16;
+
+ rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
+ adapter->rx_queue.queue_len;
+ rxq_desc.fields.address = adapter->rx_queue.queue_dma;
+
+ netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
+ netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
+ netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
+
+ h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
+
+ lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
+
+ if (lpar_rc != H_SUCCESS) {
+ netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
+ lpar_rc);
+ netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
+ "desc:0x%llx MAC:0x%llx\n",
+ adapter->buffer_list_dma,
+ adapter->filter_list_dma,
+ rxq_desc.desc,
+ mac_address);
+ rc = -ENONET;
+ goto err_out;
+ }
+
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
+ if (!adapter->rx_buff_pool[i].active)
+ continue;
+ if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
+ netdev_err(netdev, "unable to alloc pool\n");
+ adapter->rx_buff_pool[i].active = 0;
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ }
+
+ netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
+ rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
+ netdev);
+ if (rc != 0) {
+ netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
+ netdev->irq, rc);
+ do {
+ rc = h_free_logical_lan(adapter->vdev->unit_address);
+ } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
+
+ goto err_out;
+ }
+
+ adapter->bounce_buffer =
+ kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
+ if (!adapter->bounce_buffer) {
+ netdev_err(netdev, "unable to allocate bounce buffer\n");
+ rc = -ENOMEM;
+ goto err_out_free_irq;
+ }
+ adapter->bounce_buffer_dma =
+ dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
+ netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
+ netdev_err(netdev, "unable to map bounce buffer\n");
+ rc = -ENOMEM;
+ goto err_out_free_irq;
+ }
+
+ netdev_dbg(netdev, "initial replenish cycle\n");
+ ibmveth_interrupt(netdev->irq, netdev);
+
+ netif_start_queue(netdev);
+
+ netdev_dbg(netdev, "open complete\n");
+
+ return 0;
+
+err_out_free_irq:
+ free_irq(netdev->irq, netdev);
+err_out:
+ ibmveth_cleanup(adapter);
+ napi_disable(&adapter->napi);
+ return rc;
+}
+
+static int ibmveth_close(struct net_device *netdev)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(netdev);
+ long lpar_rc;
+
+ netdev_dbg(netdev, "close starting\n");
+
+ napi_disable(&adapter->napi);
+
+ if (!adapter->pool_config)
+ netif_stop_queue(netdev);
+
+ h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
+
+ do {
+ lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
+ } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
+
+ if (lpar_rc != H_SUCCESS) {
+ netdev_err(netdev, "h_free_logical_lan failed with %lx, "
+ "continuing with close\n", lpar_rc);
+ }
+
+ free_irq(netdev->irq, netdev);
+
+ adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) +
+ 4096 - 8);
+
+ ibmveth_cleanup(adapter);
+
+ netdev_dbg(netdev, "close complete\n");
+
+ return 0;
+}
+
+static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
+ SUPPORTED_FIBRE);
+ cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
+ ADVERTISED_FIBRE);
+ ethtool_cmd_speed_set(cmd, SPEED_1000);
+ cmd->duplex = DUPLEX_FULL;
+ cmd->port = PORT_FIBRE;
+ cmd->phy_address = 0;
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->autoneg = AUTONEG_ENABLE;
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 1;
+ return 0;
+}
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1);
+ strncpy(info->version, ibmveth_driver_version,
+ sizeof(info->version) - 1);
+}
+
+static u32 ibmveth_fix_features(struct net_device *dev, u32 features)
+{
+ /*
+ * Since the ibmveth firmware interface does not have the
+ * concept of separate tx/rx checksum offload enable, if rx
+ * checksum is disabled we also have to disable tx checksum
+ * offload. Once we disable rx checksum offload, we are no
+ * longer allowed to send tx buffers that are not properly
+ * checksummed.
+ */
+
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_ALL_CSUM;
+
+ return features;
+}
+
+static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(dev);
+ unsigned long set_attr, clr_attr, ret_attr;
+ unsigned long set_attr6, clr_attr6;
- ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
++ long ret, ret4, ret6;
+ int rc1 = 0, rc2 = 0;
+ int restart = 0;
+
+ if (netif_running(dev)) {
+ restart = 1;
+ adapter->pool_config = 1;
+ ibmveth_close(dev);
+ adapter->pool_config = 0;
+ }
+
+ set_attr = 0;
+ clr_attr = 0;
++ set_attr6 = 0;
++ clr_attr6 = 0;
+
+ if (data) {
+ set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
+ set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
+ } else {
+ clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
+ clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
+ }
+
+ ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
+
+ if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
+ !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
+ (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
- if (ret != H_SUCCESS) {
++ ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
+ set_attr, &ret_attr);
+
- data, ret);
++ if (ret4 != H_SUCCESS) {
+ netdev_err(dev, "unable to change IPv4 checksum "
+ "offload settings. %d rc=%ld\n",
- ret = h_illan_attributes(adapter->vdev->unit_address,
- set_attr, clr_attr, &ret_attr);
++ data, ret4);
++
++ h_illan_attributes(adapter->vdev->unit_address,
++ set_attr, clr_attr, &ret_attr);
++
++ if (data == 1)
++ dev->features &= ~NETIF_F_IP_CSUM;
+
- data, ret);
+ } else {
+ adapter->fw_ipv4_csum_support = data;
+ }
+
+ ret6 = h_illan_attributes(adapter->vdev->unit_address,
+ clr_attr6, set_attr6, &ret_attr);
+
+ if (ret6 != H_SUCCESS) {
+ netdev_err(dev, "unable to change IPv6 checksum "
+ "offload settings. %d rc=%ld\n",
- ret = h_illan_attributes(adapter->vdev->unit_address,
- set_attr6, clr_attr6,
- &ret_attr);
++ data, ret6);
++
++ h_illan_attributes(adapter->vdev->unit_address,
++ set_attr6, clr_attr6, &ret_attr);
++
++ if (data == 1)
++ dev->features &= ~NETIF_F_IPV6_CSUM;
+
- if (ret != H_SUCCESS || ret6 != H_SUCCESS)
+ } else
+ adapter->fw_ipv6_csum_support = data;
+
- descs[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data,
- skb_headlen(skb),
- DMA_TO_DEVICE);
- if (dma_mapping_error(&adapter->vdev->dev, descs[0].fields.address))
++ if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
+ adapter->rx_csum = data;
+ else
+ rc1 = -EIO;
+ } else {
+ rc1 = -EIO;
+ netdev_err(dev, "unable to change checksum offload settings."
+ " %d rc=%ld ret_attr=%lx\n", data, ret,
+ ret_attr);
+ }
+
+ if (restart)
+ rc2 = ibmveth_open(dev);
+
+ return rc1 ? rc1 : rc2;
+}
+
+static int ibmveth_set_features(struct net_device *dev, u32 features)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(dev);
+ int rx_csum = !!(features & NETIF_F_RXCSUM);
+ int rc;
+
+ if (rx_csum == adapter->rx_csum)
+ return 0;
+
+ rc = ibmveth_set_csum_offload(dev, rx_csum);
+ if (rc && !adapter->rx_csum)
+ dev->features = features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
+
+ return rc;
+}
+
+static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN)
+ memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);
+}
+
+static int ibmveth_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(ibmveth_stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void ibmveth_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ int i;
+ struct ibmveth_adapter *adapter = netdev_priv(dev);
+
+ for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
+ data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
+}
+
+static const struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_settings = netdev_get_settings,
+ .get_link = ethtool_op_get_link,
+ .get_strings = ibmveth_get_strings,
+ .get_sset_count = ibmveth_get_sset_count,
+ .get_ethtool_stats = ibmveth_get_ethtool_stats,
+};
+
+static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ return -EOPNOTSUPP;
+}
+
+#define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
+
+static int ibmveth_send(struct ibmveth_adapter *adapter,
+ union ibmveth_buf_desc *descs)
+{
+ unsigned long correlator;
+ unsigned int retry_count;
+ unsigned long ret;
+
+ /*
+ * The retry count sets a maximum for the number of broadcast and
+ * multicast destinations within the system.
+ */
+ retry_count = 1024;
+ correlator = 0;
+ do {
+ ret = h_send_logical_lan(adapter->vdev->unit_address,
+ descs[0].desc, descs[1].desc,
+ descs[2].desc, descs[3].desc,
+ descs[4].desc, descs[5].desc,
+ correlator, &correlator);
+ } while ((ret == H_BUSY) && (retry_count--));
+
+ if (ret != H_SUCCESS && ret != H_DROPPED) {
+ netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
+ "with rc=%ld\n", ret);
+ return 1;
+ }
+
+ return 0;
+}
+
+static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(netdev);
+ unsigned int desc_flags;
+ union ibmveth_buf_desc descs[6];
+ int last, i;
+ int force_bounce = 0;
++ dma_addr_t dma_addr;
+
+ /*
+ * veth handles a maximum of 6 segments including the header, so
+ * we have to linearize the skb if there are more than this.
+ */
+ if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
+ netdev->stats.tx_dropped++;
+ goto out;
+ }
+
+ /* veth can't checksum offload UDP */
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ ((skb->protocol == htons(ETH_P_IP) &&
+ ip_hdr(skb)->protocol != IPPROTO_TCP) ||
+ (skb->protocol == htons(ETH_P_IPV6) &&
+ ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
+ skb_checksum_help(skb)) {
+
+ netdev_err(netdev, "tx: failed to checksum packet\n");
+ netdev->stats.tx_dropped++;
+ goto out;
+ }
+
+ desc_flags = IBMVETH_BUF_VALID;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ unsigned char *buf = skb_transport_header(skb) +
+ skb->csum_offset;
+
+ desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
+
+ /* Need to zero out the checksum */
+ buf[0] = 0;
+ buf[1] = 0;
+ }
+
+retry_bounce:
+ memset(descs, 0, sizeof(descs));
+
+ /*
+ * If a linear packet is below the rx threshold then
+ * copy it into the static bounce buffer. This avoids the
+ * cost of a TCE insert and remove.
+ */
+ if (force_bounce || (!skb_is_nonlinear(skb) &&
+ (skb->len < tx_copybreak))) {
+ skb_copy_from_linear_data(skb, adapter->bounce_buffer,
+ skb->len);
+
+ descs[0].fields.flags_len = desc_flags | skb->len;
+ descs[0].fields.address = adapter->bounce_buffer_dma;
+
+ if (ibmveth_send(adapter, descs)) {
+ adapter->tx_send_failed++;
+ netdev->stats.tx_dropped++;
+ } else {
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
+ }
+
+ goto out;
+ }
+
+ /* Map the header */
- unsigned long dma_addr;
++ dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
++ skb_headlen(skb), DMA_TO_DEVICE);
++ if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
+ goto map_failed;
+
+ descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
++ descs[0].fields.address = dma_addr;
+
+ /* Map the frags */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- for (i = 0; i < skb_shinfo(skb)->nr_frags + 1; i++)
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
+ frag->size, DMA_TO_DEVICE);
+
+ if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
+ goto map_failed_frags;
+
+ descs[i+1].fields.flags_len = desc_flags | frag->size;
+ descs[i+1].fields.address = dma_addr;
+ }
+
+ if (ibmveth_send(adapter, descs)) {
+ adapter->tx_send_failed++;
+ netdev->stats.tx_dropped++;
+ } else {
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
+ }
+
- ibmveth_rxq_recycle_buffer(adapter);
++ dma_unmap_single(&adapter->vdev->dev,
++ descs[0].fields.address,
++ descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
++ DMA_TO_DEVICE);
++
++ for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
+ dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
+ descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
+ DMA_TO_DEVICE);
+
+out:
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+
+map_failed_frags:
+ last = i+1;
+ for (i = 0; i < last; i++)
+ dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
+ descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
+ DMA_TO_DEVICE);
+
+map_failed:
+ if (!firmware_has_feature(FW_FEATURE_CMO))
+ netdev_err(netdev, "tx: unable to map xmit buffer\n");
+ adapter->tx_map_failed++;
+ skb_linearize(skb);
+ force_bounce = 1;
+ goto retry_bounce;
+}
+
+static int ibmveth_poll(struct napi_struct *napi, int budget)
+{
+ struct ibmveth_adapter *adapter =
+ container_of(napi, struct ibmveth_adapter, napi);
+ struct net_device *netdev = adapter->netdev;
+ int frames_processed = 0;
+ unsigned long lpar_rc;
+
+restart_poll:
+ do {
+ if (!ibmveth_rxq_pending_buffer(adapter))
+ break;
+
+ smp_rmb();
+ if (!ibmveth_rxq_buffer_valid(adapter)) {
+ wmb(); /* suggested by larson1 */
+ adapter->rx_invalid_buffer++;
+ netdev_dbg(netdev, "recycling invalid buffer\n");
+ ibmveth_rxq_recycle_buffer(adapter);
+ } else {
+ struct sk_buff *skb, *new_skb;
+ int length = ibmveth_rxq_frame_length(adapter);
+ int offset = ibmveth_rxq_frame_offset(adapter);
+ int csum_good = ibmveth_rxq_csum_good(adapter);
+
+ skb = ibmveth_rxq_get_buffer(adapter);
+
+ new_skb = NULL;
+ if (length < rx_copybreak)
+ new_skb = netdev_alloc_skb(netdev, length);
+
+ if (new_skb) {
+ skb_copy_to_linear_data(new_skb,
+ skb->data + offset,
+ length);
+ if (rx_flush)
+ ibmveth_flush_buffer(skb->data,
+ length + offset);
++ if (!ibmveth_rxq_recycle_buffer(adapter))
++ kfree_skb(skb);
+ skb = new_skb;
+ } else {
+ ibmveth_rxq_harvest_buffer(adapter);
+ skb_reserve(skb, offset);
+ }
+
+ skb_put(skb, length);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (csum_good)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ netif_receive_skb(skb); /* send it up */
+
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += length;
+ frames_processed++;
+ }
+ } while (frames_processed < budget);
+
+ ibmveth_replenish_task(adapter);
+
+ if (frames_processed < budget) {
+ /* We think we are done - reenable interrupts,
+ * then check once more to make sure we are done.
+ */
+ lpar_rc = h_vio_signal(adapter->vdev->unit_address,
+ VIO_IRQ_ENABLE);
+
+ BUG_ON(lpar_rc != H_SUCCESS);
+
+ napi_complete(napi);
+
+ if (ibmveth_rxq_pending_buffer(adapter) &&
+ napi_reschedule(napi)) {
+ lpar_rc = h_vio_signal(adapter->vdev->unit_address,
+ VIO_IRQ_DISABLE);
+ goto restart_poll;
+ }
+ }
+
+ return frames_processed;
+}
+
+static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
+{
+ struct net_device *netdev = dev_instance;
+ struct ibmveth_adapter *adapter = netdev_priv(netdev);
+ unsigned long lpar_rc;
+
+ if (napi_schedule_prep(&adapter->napi)) {
+ lpar_rc = h_vio_signal(adapter->vdev->unit_address,
+ VIO_IRQ_DISABLE);
+ BUG_ON(lpar_rc != H_SUCCESS);
+ __napi_schedule(&adapter->napi);
+ }
+ return IRQ_HANDLED;
+}
+
+static void ibmveth_set_multicast_list(struct net_device *netdev)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(netdev);
+ unsigned long lpar_rc;
+
+ if ((netdev->flags & IFF_PROMISC) ||
+ (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
+ lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
+ IbmVethMcastEnableRecv |
+ IbmVethMcastDisableFiltering,
+ 0);
+ if (lpar_rc != H_SUCCESS) {
+ netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
+ "entering promisc mode\n", lpar_rc);
+ }
+ } else {
+ struct netdev_hw_addr *ha;
+ /* clear the filter table & disable filtering */
+ lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
+ IbmVethMcastEnableRecv |
+ IbmVethMcastDisableFiltering |
+ IbmVethMcastClearFilterTable,
+ 0);
+ if (lpar_rc != H_SUCCESS) {
+ netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
+ "attempting to clear filter table\n",
+ lpar_rc);
+ }
+ /* add the addresses to the filter table */
+ netdev_for_each_mc_addr(ha, netdev) {
+ /* add the multicast address to the filter table */
+ unsigned long mcast_addr = 0;
+ memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
+ lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
+ IbmVethMcastAddFilter,
+ mcast_addr);
+ if (lpar_rc != H_SUCCESS) {
+ netdev_err(netdev, "h_multicast_ctrl rc=%ld "
+ "when adding an entry to the filter "
+ "table\n", lpar_rc);
+ }
+ }
+
+ /* re-enable filtering */
+ lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
+ IbmVethMcastEnableFiltering,
+ 0);
+ if (lpar_rc != H_SUCCESS) {
+ netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
+ "enabling filtering\n", lpar_rc);
+ }
+ }
+}
+
+static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(dev);
+ struct vio_dev *viodev = adapter->vdev;
+ int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
+ int i, rc;
+ int need_restart = 0;
+
+ if (new_mtu < IBMVETH_MIN_MTU)
+ return -EINVAL;
+
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
+ if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size)
+ break;
+
+ if (i == IBMVETH_NUM_BUFF_POOLS)
+ return -EINVAL;
+
+ /* Deactivate all the buffer pools so that the next loop can activate
+ only the buffer pools necessary to hold the new MTU */
+ if (netif_running(adapter->netdev)) {
+ need_restart = 1;
+ adapter->pool_config = 1;
+ ibmveth_close(adapter->netdev);
+ adapter->pool_config = 0;
+ }
+
+ /* Look for an active buffer pool that can hold the new MTU */
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
+ adapter->rx_buff_pool[i].active = 1;
+
+ if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
+ dev->mtu = new_mtu;
+ vio_cmo_set_dev_desired(viodev,
+ ibmveth_get_desired_dma
+ (viodev));
+ if (need_restart) {
+ return ibmveth_open(adapter->netdev);
+ }
+ return 0;
+ }
+ }
+
+ if (need_restart && (rc = ibmveth_open(adapter->netdev)))
+ return rc;
+
+ return -EINVAL;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void ibmveth_poll_controller(struct net_device *dev)
+{
+ ibmveth_replenish_task(netdev_priv(dev));
+ ibmveth_interrupt(dev->irq, dev);
+}
+#endif
+
+/**
+ * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
+ *
+ * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
+ *
+ * Return value:
+ * Number of bytes of IO data the driver will need to perform well.
+ */
+static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
+{
+ struct net_device *netdev = dev_get_drvdata(&vdev->dev);
+ struct ibmveth_adapter *adapter;
+ unsigned long ret;
+ int i;
+ int rxqentries = 1;
+
+ /* netdev inits at probe time along with the structures we need below*/
+ if (netdev == NULL)
+ return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT);
+
+ adapter = netdev_priv(netdev);
+
+ ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
+ ret += IOMMU_PAGE_ALIGN(netdev->mtu);
+
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
+ /* add the size of the active receive buffers */
+ if (adapter->rx_buff_pool[i].active)
+ ret +=
+ adapter->rx_buff_pool[i].size *
+ IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
+ buff_size);
+ rxqentries += adapter->rx_buff_pool[i].size;
+ }
+ /* add the size of the receive queue entries */
+ ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry));
+
+ return ret;
+}
+
+static const struct net_device_ops ibmveth_netdev_ops = {
+ .ndo_open = ibmveth_open,
+ .ndo_stop = ibmveth_close,
+ .ndo_start_xmit = ibmveth_start_xmit,
+ .ndo_set_rx_mode = ibmveth_set_multicast_list,
+ .ndo_do_ioctl = ibmveth_ioctl,
+ .ndo_change_mtu = ibmveth_change_mtu,
+ .ndo_fix_features = ibmveth_fix_features,
+ .ndo_set_features = ibmveth_set_features,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ibmveth_poll_controller,
+#endif
+};
+
+static int __devinit ibmveth_probe(struct vio_dev *dev,
+ const struct vio_device_id *id)
+{
+ int rc, i;
+ struct net_device *netdev;
+ struct ibmveth_adapter *adapter;
+ unsigned char *mac_addr_p;
+ unsigned int *mcastFilterSize_p;
+
+ dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
+ dev->unit_address);
+
+ mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
+ NULL);
+ if (!mac_addr_p) {
+ dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
+ return -EINVAL;
+ }
+
+ mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
+ VETH_MCAST_FILTER_SIZE, NULL);
+ if (!mcastFilterSize_p) {
+ dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
+ "attribute\n");
+ return -EINVAL;
+ }
+
+ netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
+
+ if (!netdev)
+ return -ENOMEM;
+
+ adapter = netdev_priv(netdev);
+ dev_set_drvdata(&dev->dev, netdev);
+
+ adapter->vdev = dev;
+ adapter->netdev = netdev;
+ adapter->mcastFilterSize = *mcastFilterSize_p;
+ adapter->pool_config = 0;
+
+ netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
+
+ /*
+ * Some older boxes running PHYP non-natively have an OF that returns
+ * a 8-byte local-mac-address field (and the first 2 bytes have to be
+ * ignored) while newer boxes' OF return a 6-byte field. Note that
+ * IEEE 1275 specifies that local-mac-address must be a 6-byte field.
+ * The RPA doc specifies that the first byte must be 10b, so we'll
+ * just look for it to solve this 8 vs. 6 byte field issue
+ */
+ if ((*mac_addr_p & 0x3) != 0x02)
+ mac_addr_p += 2;
+
+ adapter->mac_addr = 0;
+ memcpy(&adapter->mac_addr, mac_addr_p, 6);
+
+ netdev->irq = dev->irq;
+ netdev->netdev_ops = &ibmveth_netdev_ops;
+ netdev->ethtool_ops = &netdev_ethtool_ops;
+ SET_NETDEV_DEV(netdev, &dev->dev);
+ netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ netdev->features |= netdev->hw_features;
+
+ memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
+
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
+ struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
+ int error;
+
+ ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
+ pool_count[i], pool_size[i],
+ pool_active[i]);
+ error = kobject_init_and_add(kobj, &ktype_veth_pool,
+ &dev->dev.kobj, "pool%d", i);
+ if (!error)
+ kobject_uevent(kobj, KOBJ_ADD);
+ }
+
+ netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
+
+ adapter->buffer_list_dma = DMA_ERROR_CODE;
+ adapter->filter_list_dma = DMA_ERROR_CODE;
+ adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
+
+ netdev_dbg(netdev, "registering netdev...\n");
+
+ ibmveth_set_features(netdev, netdev->features);
+
+ rc = register_netdev(netdev);
+
+ if (rc) {
+ netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc);
+ free_netdev(netdev);
+ return rc;
+ }
+
+ netdev_dbg(netdev, "registered\n");
+
+ return 0;
+}
+
+static int __devexit ibmveth_remove(struct vio_dev *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(&dev->dev);
+ struct ibmveth_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
+ kobject_put(&adapter->rx_buff_pool[i].kobj);
+
+ unregister_netdev(netdev);
+
+ free_netdev(netdev);
+ dev_set_drvdata(&dev->dev, NULL);
+
+ return 0;
+}
+
+static struct attribute veth_active_attr;
+static struct attribute veth_num_attr;
+static struct attribute veth_size_attr;
+
+static ssize_t veth_pool_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct ibmveth_buff_pool *pool = container_of(kobj,
+ struct ibmveth_buff_pool,
+ kobj);
+
+ if (attr == &veth_active_attr)
+ return sprintf(buf, "%d\n", pool->active);
+ else if (attr == &veth_num_attr)
+ return sprintf(buf, "%d\n", pool->size);
+ else if (attr == &veth_size_attr)
+ return sprintf(buf, "%d\n", pool->buff_size);
+ return 0;
+}
+
+static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ibmveth_buff_pool *pool = container_of(kobj,
+ struct ibmveth_buff_pool,
+ kobj);
+ struct net_device *netdev = dev_get_drvdata(
+ container_of(kobj->parent, struct device, kobj));
+ struct ibmveth_adapter *adapter = netdev_priv(netdev);
+ long value = simple_strtol(buf, NULL, 10);
+ long rc;
+
+ if (attr == &veth_active_attr) {
+ if (value && !pool->active) {
+ if (netif_running(netdev)) {
+ if (ibmveth_alloc_buffer_pool(pool)) {
+ netdev_err(netdev,
+ "unable to alloc pool\n");
+ return -ENOMEM;
+ }
+ pool->active = 1;
+ adapter->pool_config = 1;
+ ibmveth_close(netdev);
+ adapter->pool_config = 0;
+ if ((rc = ibmveth_open(netdev)))
+ return rc;
+ } else {
+ pool->active = 1;
+ }
+ } else if (!value && pool->active) {
+ int mtu = netdev->mtu + IBMVETH_BUFF_OH;
+ int i;
+ /* Make sure there is a buffer pool with buffers that
+ can hold a packet of the size of the MTU */
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
+ if (pool == &adapter->rx_buff_pool[i])
+ continue;
+ if (!adapter->rx_buff_pool[i].active)
+ continue;
+ if (mtu <= adapter->rx_buff_pool[i].buff_size)
+ break;
+ }
+
+ if (i == IBMVETH_NUM_BUFF_POOLS) {
+ netdev_err(netdev, "no active pool >= MTU\n");
+ return -EPERM;
+ }
+
+ if (netif_running(netdev)) {
+ adapter->pool_config = 1;
+ ibmveth_close(netdev);
+ pool->active = 0;
+ adapter->pool_config = 0;
+ if ((rc = ibmveth_open(netdev)))
+ return rc;
+ }
+ pool->active = 0;
+ }
+ } else if (attr == &veth_num_attr) {
+ if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
+ return -EINVAL;
+ } else {
+ if (netif_running(netdev)) {
+ adapter->pool_config = 1;
+ ibmveth_close(netdev);
+ adapter->pool_config = 0;
+ pool->size = value;
+ if ((rc = ibmveth_open(netdev)))
+ return rc;
+ } else {
+ pool->size = value;
+ }
+ }
+ } else if (attr == &veth_size_attr) {
+ if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
+ return -EINVAL;
+ } else {
+ if (netif_running(netdev)) {
+ adapter->pool_config = 1;
+ ibmveth_close(netdev);
+ adapter->pool_config = 0;
+ pool->buff_size = value;
+ if ((rc = ibmveth_open(netdev)))
+ return rc;
+ } else {
+ pool->buff_size = value;
+ }
+ }
+ }
+
+ /* kick the interrupt handler to allocate/deallocate pools */
+ ibmveth_interrupt(netdev->irq, netdev);
+ return count;
+}
+
+
+#define ATTR(_name, _mode) \
+ struct attribute veth_##_name##_attr = { \
+ .name = __stringify(_name), .mode = _mode, \
+ };
+
+static ATTR(active, 0644);
+static ATTR(num, 0644);
+static ATTR(size, 0644);
+
+static struct attribute *veth_pool_attrs[] = {
+ &veth_active_attr,
+ &veth_num_attr,
+ &veth_size_attr,
+ NULL,
+};
+
+static const struct sysfs_ops veth_pool_ops = {
+ .show = veth_pool_show,
+ .store = veth_pool_store,
+};
+
+static struct kobj_type ktype_veth_pool = {
+ .release = NULL,
+ .sysfs_ops = &veth_pool_ops,
+ .default_attrs = veth_pool_attrs,
+};
+
+static int ibmveth_resume(struct device *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+ ibmveth_interrupt(netdev->irq, netdev);
+ return 0;
+}
+
+static struct vio_device_id ibmveth_device_table[] __devinitdata = {
+ { "network", "IBM,l-lan"},
+ { "", "" }
+};
+MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
+
+static struct dev_pm_ops ibmveth_pm_ops = {
+ .resume = ibmveth_resume
+};
+
+static struct vio_driver ibmveth_driver = {
+ .id_table = ibmveth_device_table,
+ .probe = ibmveth_probe,
+ .remove = ibmveth_remove,
+ .get_desired_dma = ibmveth_get_desired_dma,
+ .driver = {
+ .name = ibmveth_driver_name,
+ .owner = THIS_MODULE,
+ .pm = &ibmveth_pm_ops,
+ }
+};
+
+static int __init ibmveth_module_init(void)
+{
+ printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
+ ibmveth_driver_string, ibmveth_driver_version);
+
+ return vio_register_driver(&ibmveth_driver);
+}
+
+static void __exit ibmveth_module_exit(void)
+{
+ vio_unregister_driver(&ibmveth_driver);
+}
+
+module_init(ibmveth_module_init);
+module_exit(ibmveth_module_exit);
--- /dev/null
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2006 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+ */
+
+/* e1000_hw.c
+ * Shared functions for accessing and configuring the MAC
+ */
+
+#include "e1000.h"
+
+static s32 e1000_check_downshift(struct e1000_hw *hw);
+static s32 e1000_check_polarity(struct e1000_hw *hw,
+ e1000_rev_polarity *polarity);
+static void e1000_clear_hw_cntrs(struct e1000_hw *hw);
+static void e1000_clear_vfta(struct e1000_hw *hw);
+static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw,
+ bool link_up);
+static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw);
+static s32 e1000_detect_gig_phy(struct e1000_hw *hw);
+static s32 e1000_get_auto_rd_done(struct e1000_hw *hw);
+static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
+ u16 *max_length);
+static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw);
+static s32 e1000_id_led_init(struct e1000_hw *hw);
+static void e1000_init_rx_addrs(struct e1000_hw *hw);
+static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
+ struct e1000_phy_info *phy_info);
+static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
+ struct e1000_phy_info *phy_info);
+static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
+static s32 e1000_wait_autoneg(struct e1000_hw *hw);
+static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value);
+static s32 e1000_set_phy_type(struct e1000_hw *hw);
+static void e1000_phy_init_script(struct e1000_hw *hw);
+static s32 e1000_setup_copper_link(struct e1000_hw *hw);
+static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw);
+static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw);
+static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw);
+static s32 e1000_config_mac_to_phy(struct e1000_hw *hw);
+static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl);
+static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl);
+static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count);
+static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw);
+static s32 e1000_phy_reset_dsp(struct e1000_hw *hw);
+static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data);
+static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data);
+static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw);
+static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd);
+static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd);
+static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count);
+static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
+ u16 phy_data);
+static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
+ u16 *phy_data);
+static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count);
+static s32 e1000_acquire_eeprom(struct e1000_hw *hw);
+static void e1000_release_eeprom(struct e1000_hw *hw);
+static void e1000_standby_eeprom(struct e1000_hw *hw);
+static s32 e1000_set_vco_speed(struct e1000_hw *hw);
+static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw);
+static s32 e1000_set_phy_mode(struct e1000_hw *hw);
+static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+
+/* IGP cable length table */
+static const
+u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = {
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25,
+ 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40,
+ 40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60,
+ 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90,
+ 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100,
+ 100,
+ 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110,
+ 110, 110,
+ 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120,
+ 120, 120
+};
+
+static DEFINE_SPINLOCK(e1000_eeprom_lock);
+
+/**
+ * e1000_set_phy_type - Set the phy type member in the hw struct.
+ * @hw: Struct containing variables accessed by shared code
+ */
+static s32 e1000_set_phy_type(struct e1000_hw *hw)
+{
+ e_dbg("e1000_set_phy_type");
+
+ if (hw->mac_type == e1000_undefined)
+ return -E1000_ERR_PHY_TYPE;
+
+ switch (hw->phy_id) {
+ case M88E1000_E_PHY_ID:
+ case M88E1000_I_PHY_ID:
+ case M88E1011_I_PHY_ID:
+ case M88E1111_I_PHY_ID:
+ case M88E1118_E_PHY_ID:
+ hw->phy_type = e1000_phy_m88;
+ break;
+ case IGP01E1000_I_PHY_ID:
+ if (hw->mac_type == e1000_82541 ||
+ hw->mac_type == e1000_82541_rev_2 ||
+ hw->mac_type == e1000_82547 ||
+ hw->mac_type == e1000_82547_rev_2)
+ hw->phy_type = e1000_phy_igp;
+ break;
+ case RTL8211B_PHY_ID:
+ hw->phy_type = e1000_phy_8211;
+ break;
+ case RTL8201N_PHY_ID:
+ hw->phy_type = e1000_phy_8201;
+ break;
+ default:
+ /* Should never have loaded on this device */
+ hw->phy_type = e1000_phy_undefined;
+ return -E1000_ERR_PHY_TYPE;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_init_script - IGP phy init script - initializes the GbE PHY
+ * @hw: Struct containing variables accessed by shared code
+ */
+static void e1000_phy_init_script(struct e1000_hw *hw)
+{
+ u32 ret_val;
+ u16 phy_saved_data;
+
+ e_dbg("e1000_phy_init_script");
+
+ if (hw->phy_init_script) {
+ msleep(20);
+
+ /* Save off the current value of register 0x2F5B to be restored at
+ * the end of this routine. */
+ ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+
+ /* Disabled the PHY transmitter */
+ e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+ msleep(20);
+
+ e1000_write_phy_reg(hw, 0x0000, 0x0140);
+ msleep(5);
+
+ switch (hw->mac_type) {
+ case e1000_82541:
+ case e1000_82547:
+ e1000_write_phy_reg(hw, 0x1F95, 0x0001);
+ e1000_write_phy_reg(hw, 0x1F71, 0xBD21);
+ e1000_write_phy_reg(hw, 0x1F79, 0x0018);
+ e1000_write_phy_reg(hw, 0x1F30, 0x1600);
+ e1000_write_phy_reg(hw, 0x1F31, 0x0014);
+ e1000_write_phy_reg(hw, 0x1F32, 0x161C);
+ e1000_write_phy_reg(hw, 0x1F94, 0x0003);
+ e1000_write_phy_reg(hw, 0x1F96, 0x003F);
+ e1000_write_phy_reg(hw, 0x2010, 0x0008);
+ break;
+
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ e1000_write_phy_reg(hw, 0x1F73, 0x0099);
+ break;
+ default:
+ break;
+ }
+
+ e1000_write_phy_reg(hw, 0x0000, 0x3300);
+ msleep(20);
+
+ /* Now enable the transmitter */
+ e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+ if (hw->mac_type == e1000_82547) {
+ u16 fused, fine, coarse;
+
+ /* Move to analog registers page */
+ e1000_read_phy_reg(hw,
+ IGP01E1000_ANALOG_SPARE_FUSE_STATUS,
+ &fused);
+
+ if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) {
+ e1000_read_phy_reg(hw,
+ IGP01E1000_ANALOG_FUSE_STATUS,
+ &fused);
+
+ fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK;
+ coarse =
+ fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK;
+
+ if (coarse >
+ IGP01E1000_ANALOG_FUSE_COARSE_THRESH) {
+ coarse -=
+ IGP01E1000_ANALOG_FUSE_COARSE_10;
+ fine -= IGP01E1000_ANALOG_FUSE_FINE_1;
+ } else if (coarse ==
+ IGP01E1000_ANALOG_FUSE_COARSE_THRESH)
+ fine -= IGP01E1000_ANALOG_FUSE_FINE_10;
+
+ fused =
+ (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) |
+ (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) |
+ (coarse &
+ IGP01E1000_ANALOG_FUSE_COARSE_MASK);
+
+ e1000_write_phy_reg(hw,
+ IGP01E1000_ANALOG_FUSE_CONTROL,
+ fused);
+ e1000_write_phy_reg(hw,
+ IGP01E1000_ANALOG_FUSE_BYPASS,
+ IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL);
+ }
+ }
+ }
+}
+
+/**
+ * e1000_set_mac_type - Set the mac type member in the hw struct.
+ * @hw: Struct containing variables accessed by shared code
+ */
+s32 e1000_set_mac_type(struct e1000_hw *hw)
+{
+ e_dbg("e1000_set_mac_type");
+
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82542:
+ switch (hw->revision_id) {
+ case E1000_82542_2_0_REV_ID:
+ hw->mac_type = e1000_82542_rev2_0;
+ break;
+ case E1000_82542_2_1_REV_ID:
+ hw->mac_type = e1000_82542_rev2_1;
+ break;
+ default:
+ /* Invalid 82542 revision ID */
+ return -E1000_ERR_MAC_TYPE;
+ }
+ break;
+ case E1000_DEV_ID_82543GC_FIBER:
+ case E1000_DEV_ID_82543GC_COPPER:
+ hw->mac_type = e1000_82543;
+ break;
+ case E1000_DEV_ID_82544EI_COPPER:
+ case E1000_DEV_ID_82544EI_FIBER:
+ case E1000_DEV_ID_82544GC_COPPER:
+ case E1000_DEV_ID_82544GC_LOM:
+ hw->mac_type = e1000_82544;
+ break;
+ case E1000_DEV_ID_82540EM:
+ case E1000_DEV_ID_82540EM_LOM:
+ case E1000_DEV_ID_82540EP:
+ case E1000_DEV_ID_82540EP_LOM:
+ case E1000_DEV_ID_82540EP_LP:
+ hw->mac_type = e1000_82540;
+ break;
+ case E1000_DEV_ID_82545EM_COPPER:
+ case E1000_DEV_ID_82545EM_FIBER:
+ hw->mac_type = e1000_82545;
+ break;
+ case E1000_DEV_ID_82545GM_COPPER:
+ case E1000_DEV_ID_82545GM_FIBER:
+ case E1000_DEV_ID_82545GM_SERDES:
+ hw->mac_type = e1000_82545_rev_3;
+ break;
+ case E1000_DEV_ID_82546EB_COPPER:
+ case E1000_DEV_ID_82546EB_FIBER:
+ case E1000_DEV_ID_82546EB_QUAD_COPPER:
+ hw->mac_type = e1000_82546;
+ break;
+ case E1000_DEV_ID_82546GB_COPPER:
+ case E1000_DEV_ID_82546GB_FIBER:
+ case E1000_DEV_ID_82546GB_SERDES:
+ case E1000_DEV_ID_82546GB_PCIE:
+ case E1000_DEV_ID_82546GB_QUAD_COPPER:
+ case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+ hw->mac_type = e1000_82546_rev_3;
+ break;
+ case E1000_DEV_ID_82541EI:
+ case E1000_DEV_ID_82541EI_MOBILE:
+ case E1000_DEV_ID_82541ER_LOM:
+ hw->mac_type = e1000_82541;
+ break;
+ case E1000_DEV_ID_82541ER:
+ case E1000_DEV_ID_82541GI:
+ case E1000_DEV_ID_82541GI_LF:
+ case E1000_DEV_ID_82541GI_MOBILE:
+ hw->mac_type = e1000_82541_rev_2;
+ break;
+ case E1000_DEV_ID_82547EI:
+ case E1000_DEV_ID_82547EI_MOBILE:
+ hw->mac_type = e1000_82547;
+ break;
+ case E1000_DEV_ID_82547GI:
+ hw->mac_type = e1000_82547_rev_2;
+ break;
+ case E1000_DEV_ID_INTEL_CE4100_GBE:
+ hw->mac_type = e1000_ce4100;
+ break;
+ default:
+ /* Should never have loaded on this device */
+ return -E1000_ERR_MAC_TYPE;
+ }
+
+ switch (hw->mac_type) {
+ case e1000_82541:
+ case e1000_82547:
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ hw->asf_firmware_present = true;
+ break;
+ default:
+ break;
+ }
+
+ /* The 82543 chip does not count tx_carrier_errors properly in
+ * FD mode
+ */
+ if (hw->mac_type == e1000_82543)
+ hw->bad_tx_carr_stats_fd = true;
+
+ if (hw->mac_type > e1000_82544)
+ hw->has_smbus = true;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_media_type - Set media type and TBI compatibility.
+ * @hw: Struct containing variables accessed by shared code
+ */
+void e1000_set_media_type(struct e1000_hw *hw)
+{
+ u32 status;
+
+ e_dbg("e1000_set_media_type");
+
+ if (hw->mac_type != e1000_82543) {
+ /* tbi_compatibility is only valid on 82543 */
+ hw->tbi_compatibility_en = false;
+ }
+
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82545GM_SERDES:
+ case E1000_DEV_ID_82546GB_SERDES:
+ hw->media_type = e1000_media_type_internal_serdes;
+ break;
+ default:
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ hw->media_type = e1000_media_type_fiber;
+ break;
+ case e1000_ce4100:
+ hw->media_type = e1000_media_type_copper;
+ break;
+ default:
+ status = er32(STATUS);
+ if (status & E1000_STATUS_TBIMODE) {
+ hw->media_type = e1000_media_type_fiber;
+ /* tbi_compatibility not valid on fiber */
+ hw->tbi_compatibility_en = false;
+ } else {
+ hw->media_type = e1000_media_type_copper;
+ }
+ break;
+ }
+ }
+}
+
+/**
+ * e1000_reset_hw: reset the hardware completely
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Reset the transmit and receive units; mask and clear all interrupts.
+ */
+s32 e1000_reset_hw(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ u32 ctrl_ext;
+ u32 icr;
+ u32 manc;
+ u32 led_ctrl;
+ s32 ret_val;
+
+ e_dbg("e1000_reset_hw");
+
+ /* For 82542 (rev 2.0), disable MWI before issuing a device reset */
+ if (hw->mac_type == e1000_82542_rev2_0) {
+ e_dbg("Disabling MWI on 82542 rev 2.0\n");
+ e1000_pci_clear_mwi(hw);
+ }
+
+ /* Clear interrupt mask to stop board from generating interrupts */
+ e_dbg("Masking off all interrupts\n");
+ ew32(IMC, 0xffffffff);
+
+ /* Disable the Transmit and Receive units. Then delay to allow
+ * any pending transactions to complete before we hit the MAC with
+ * the global reset.
+ */
+ ew32(RCTL, 0);
+ ew32(TCTL, E1000_TCTL_PSP);
+ E1000_WRITE_FLUSH();
+
+ /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */
+ hw->tbi_compatibility_on = false;
+
+ /* Delay to allow any outstanding PCI transactions to complete before
+ * resetting the device
+ */
+ msleep(10);
+
+ ctrl = er32(CTRL);
+
+ /* Must reset the PHY before resetting the MAC */
+ if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+ ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST));
+ E1000_WRITE_FLUSH();
+ msleep(5);
+ }
+
+ /* Issue a global reset to the MAC. This will reset the chip's
+ * transmit, receive, DMA, and link units. It will not effect
+ * the current PCI configuration. The global reset bit is self-
+ * clearing, and should clear within a microsecond.
+ */
+ e_dbg("Issuing a global reset to MAC\n");
+
+ switch (hw->mac_type) {
+ case e1000_82544:
+ case e1000_82540:
+ case e1000_82545:
+ case e1000_82546:
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ /* These controllers can't ack the 64-bit write when issuing the
+ * reset, so use IO-mapping as a workaround to issue the reset */
+ E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST));
+ break;
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ /* Reset is performed on a shadow of the control register */
+ ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST));
+ break;
+ case e1000_ce4100:
+ default:
+ ew32(CTRL, (ctrl | E1000_CTRL_RST));
+ break;
+ }
+
+ /* After MAC reset, force reload of EEPROM to restore power-on settings to
+ * device. Later controllers reload the EEPROM automatically, so just wait
+ * for reload to complete.
+ */
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ case e1000_82544:
+ /* Wait for reset to complete */
+ udelay(10);
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+ ew32(CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH();
+ /* Wait for EEPROM reload */
+ msleep(2);
+ break;
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ case e1000_82547:
+ case e1000_82547_rev_2:
+ /* Wait for EEPROM reload */
+ msleep(20);
+ break;
+ default:
+ /* Auto read done will delay 5ms or poll based on mac type */
+ ret_val = e1000_get_auto_rd_done(hw);
+ if (ret_val)
+ return ret_val;
+ break;
+ }
+
+ /* Disable HW ARPs on ASF enabled adapters */
+ if (hw->mac_type >= e1000_82540) {
+ manc = er32(MANC);
+ manc &= ~(E1000_MANC_ARP_EN);
+ ew32(MANC, manc);
+ }
+
+ if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+ e1000_phy_init_script(hw);
+
+ /* Configure activity LED after PHY reset */
+ led_ctrl = er32(LEDCTL);
+ led_ctrl &= IGP_ACTIVITY_LED_MASK;
+ led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+ ew32(LEDCTL, led_ctrl);
+ }
+
+ /* Clear interrupt mask to stop board from generating interrupts */
+ e_dbg("Masking off all interrupts\n");
+ ew32(IMC, 0xffffffff);
+
+ /* Clear any pending interrupt events. */
+ icr = er32(ICR);
+
+ /* If MWI was previously enabled, reenable it. */
+ if (hw->mac_type == e1000_82542_rev2_0) {
+ if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
+ e1000_pci_set_mwi(hw);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_hw: Performs basic configuration of the adapter.
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Assumes that the controller has previously been reset and is in a
+ * post-reset uninitialized state. Initializes the receive address registers,
+ * multicast table, and VLAN filter table. Calls routines to setup link
+ * configuration and flow control settings. Clears all on-chip counters. Leaves
+ * the transmit and receive units disabled and uninitialized.
+ */
+s32 e1000_init_hw(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ u32 i;
+ s32 ret_val;
+ u32 mta_size;
+ u32 ctrl_ext;
+
+ e_dbg("e1000_init_hw");
+
+ /* Initialize Identification LED */
+ ret_val = e1000_id_led_init(hw);
+ if (ret_val) {
+ e_dbg("Error Initializing Identification LED\n");
+ return ret_val;
+ }
+
+ /* Set the media type and TBI compatibility */
+ e1000_set_media_type(hw);
+
+ /* Disabling VLAN filtering. */
+ e_dbg("Initializing the IEEE VLAN\n");
+ if (hw->mac_type < e1000_82545_rev_3)
+ ew32(VET, 0);
+ e1000_clear_vfta(hw);
+
+ /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */
+ if (hw->mac_type == e1000_82542_rev2_0) {
+ e_dbg("Disabling MWI on 82542 rev 2.0\n");
+ e1000_pci_clear_mwi(hw);
+ ew32(RCTL, E1000_RCTL_RST);
+ E1000_WRITE_FLUSH();
+ msleep(5);
+ }
+
+ /* Setup the receive address. This involves initializing all of the Receive
+ * Address Registers (RARs 0 - 15).
+ */
+ e1000_init_rx_addrs(hw);
+
+ /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */
+ if (hw->mac_type == e1000_82542_rev2_0) {
+ ew32(RCTL, 0);
+ E1000_WRITE_FLUSH();
+ msleep(1);
+ if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
+ e1000_pci_set_mwi(hw);
+ }
+
+ /* Zero out the Multicast HASH table */
+ e_dbg("Zeroing the MTA\n");
+ mta_size = E1000_MC_TBL_SIZE;
+ for (i = 0; i < mta_size; i++) {
+ E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
+ /* use write flush to prevent Memory Write Block (MWB) from
+ * occurring when accessing our register space */
+ E1000_WRITE_FLUSH();
+ }
+
+ /* Set the PCI priority bit correctly in the CTRL register. This
+ * determines if the adapter gives priority to receives, or if it
+ * gives equal priority to transmits and receives. Valid only on
+ * 82542 and 82543 silicon.
+ */
+ if (hw->dma_fairness && hw->mac_type <= e1000_82543) {
+ ctrl = er32(CTRL);
+ ew32(CTRL, ctrl | E1000_CTRL_PRIOR);
+ }
+
+ switch (hw->mac_type) {
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ break;
+ default:
+ /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */
+ if (hw->bus_type == e1000_bus_type_pcix
+ && e1000_pcix_get_mmrbc(hw) > 2048)
+ e1000_pcix_set_mmrbc(hw, 2048);
+ break;
+ }
+
+ /* Call a subroutine to configure the link and setup flow control. */
+ ret_val = e1000_setup_link(hw);
+
+ /* Set the transmit descriptor write-back policy */
+ if (hw->mac_type > e1000_82544) {
+ ctrl = er32(TXDCTL);
+ ctrl =
+ (ctrl & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB;
+ ew32(TXDCTL, ctrl);
+ }
+
+ /* Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ e1000_clear_hw_cntrs(hw);
+
+ if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER ||
+ hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
+ ctrl_ext = er32(CTRL_EXT);
+ /* Relaxed ordering must be disabled to avoid a parity
+ * error crash in a PCI slot. */
+ ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
+ ew32(CTRL_EXT, ctrl_ext);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_adjust_serdes_amplitude - Adjust SERDES output amplitude based on EEPROM setting.
+ * @hw: Struct containing variables accessed by shared code.
+ */
+static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
+{
+ u16 eeprom_data;
+ s32 ret_val;
+
+ e_dbg("e1000_adjust_serdes_amplitude");
+
+ if (hw->media_type != e1000_media_type_internal_serdes)
+ return E1000_SUCCESS;
+
+ switch (hw->mac_type) {
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ break;
+ default:
+ return E1000_SUCCESS;
+ }
+
+ ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1,
+ &eeprom_data);
+ if (ret_val) {
+ return ret_val;
+ }
+
+ if (eeprom_data != EEPROM_RESERVED_WORD) {
+ /* Adjust SERDES output amplitude only. */
+ eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK;
+ ret_val =
+ e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_setup_link - Configures flow control and link settings.
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Determines which flow control settings to use. Calls the appropriate media-
+ * specific link configuration function. Configures the flow control settings.
+ * Assuming the adapter has a valid link partner, a valid link should be
+ * established. Assumes the hardware has previously been reset and the
+ * transmitter and receiver are not enabled.
+ */
+s32 e1000_setup_link(struct e1000_hw *hw)
+{
+ u32 ctrl_ext;
+ s32 ret_val;
+ u16 eeprom_data;
+
+ e_dbg("e1000_setup_link");
+
+ /* Read and store word 0x0F of the EEPROM. This word contains bits
+ * that determine the hardware's default PAUSE (flow control) mode,
+ * a bit that determines whether the HW defaults to enabling or
+ * disabling auto-negotiation, and the direction of the
+ * SW defined pins. If there is no SW over-ride of the flow
+ * control setting, then the variable hw->fc will
+ * be initialized based on a value in the EEPROM.
+ */
+ if (hw->fc == E1000_FC_DEFAULT) {
+ ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
+ 1, &eeprom_data);
+ if (ret_val) {
+ e_dbg("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0)
+ hw->fc = E1000_FC_NONE;
+ else if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) ==
+ EEPROM_WORD0F_ASM_DIR)
+ hw->fc = E1000_FC_TX_PAUSE;
+ else
+ hw->fc = E1000_FC_FULL;
+ }
+
+ /* We want to save off the original Flow Control configuration just
+ * in case we get disconnected and then reconnected into a different
+ * hub or switch with different Flow Control capabilities.
+ */
+ if (hw->mac_type == e1000_82542_rev2_0)
+ hw->fc &= (~E1000_FC_TX_PAUSE);
+
+ if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1))
+ hw->fc &= (~E1000_FC_RX_PAUSE);
+
+ hw->original_fc = hw->fc;
+
+ e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc);
+
+ /* Take the 4 bits from EEPROM word 0x0F that determine the initial
+ * polarity value for the SW controlled pins, and setup the
+ * Extended Device Control reg with that info.
+ * This is needed because one of the SW controlled pins is used for
+ * signal detection. So this should be done before e1000_setup_pcs_link()
+ * or e1000_phy_setup() is called.
+ */
+ if (hw->mac_type == e1000_82543) {
+ ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
+ 1, &eeprom_data);
+ if (ret_val) {
+ e_dbg("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) <<
+ SWDPIO__EXT_SHIFT);
+ ew32(CTRL_EXT, ctrl_ext);
+ }
+
+ /* Call the necessary subroutine to configure the link. */
+ ret_val = (hw->media_type == e1000_media_type_copper) ?
+ e1000_setup_copper_link(hw) : e1000_setup_fiber_serdes_link(hw);
+
+ /* Initialize the flow control address, type, and PAUSE timer
+ * registers to their default values. This is done even if flow
+ * control is disabled, because it does not hurt anything to
+ * initialize these registers.
+ */
+ e_dbg("Initializing the Flow Control address, type and timer regs\n");
+
+ ew32(FCT, FLOW_CONTROL_TYPE);
+ ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+ ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
+
+ ew32(FCTTV, hw->fc_pause_time);
+
+ /* Set the flow control receive threshold registers. Normally,
+ * these registers will be set to a default threshold that may be
+ * adjusted later by the driver's runtime code. However, if the
+ * ability to transmit pause frames in not enabled, then these
+ * registers will be set to 0.
+ */
+ if (!(hw->fc & E1000_FC_TX_PAUSE)) {
+ ew32(FCRTL, 0);
+ ew32(FCRTH, 0);
+ } else {
+ /* We need to set up the Receive Threshold high and low water marks
+ * as well as (optionally) enabling the transmission of XON frames.
+ */
+ if (hw->fc_send_xon) {
+ ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE));
+ ew32(FCRTH, hw->fc_high_water);
+ } else {
+ ew32(FCRTL, hw->fc_low_water);
+ ew32(FCRTH, hw->fc_high_water);
+ }
+ }
+ return ret_val;
+}
+
+/**
+ * e1000_setup_fiber_serdes_link - prepare fiber or serdes link
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Manipulates Physical Coding Sublayer functions in order to configure
+ * link. Assumes the hardware has been previously reset and the transmitter
+ * and receiver are not enabled.
+ */
+static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ u32 status;
+ u32 txcw = 0;
+ u32 i;
+ u32 signal = 0;
+ s32 ret_val;
+
+ e_dbg("e1000_setup_fiber_serdes_link");
+
+ /* On adapters with a MAC newer than 82544, SWDP 1 will be
+ * set when the optics detect a signal. On older adapters, it will be
+ * cleared when there is a signal. This applies to fiber media only.
+ * If we're on serdes media, adjust the output amplitude to value
+ * set in the EEPROM.
+ */
+ ctrl = er32(CTRL);
+ if (hw->media_type == e1000_media_type_fiber)
+ signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
+
+ ret_val = e1000_adjust_serdes_amplitude(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Take the link out of reset */
+ ctrl &= ~(E1000_CTRL_LRST);
+
+ /* Adjust VCO speed to improve BER performance */
+ ret_val = e1000_set_vco_speed(hw);
+ if (ret_val)
+ return ret_val;
+
+ e1000_config_collision_dist(hw);
+
+ /* Check for a software override of the flow control settings, and setup
+ * the device accordingly. If auto-negotiation is enabled, then software
+ * will have to set the "PAUSE" bits to the correct value in the Tranmsit
+ * Config Word Register (TXCW) and re-start auto-negotiation. However, if
+ * auto-negotiation is disabled, then software will have to manually
+ * configure the two flow control enable bits in the CTRL register.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames, but
+ * not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but we do
+ * not support receiving pause frames).
+ * 3: Both Rx and TX flow control (symmetric) are enabled.
+ */
+ switch (hw->fc) {
+ case E1000_FC_NONE:
+ /* Flow control is completely disabled by a software over-ride. */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
+ break;
+ case E1000_FC_RX_PAUSE:
+ /* RX Flow control is enabled and TX Flow control is disabled by a
+ * software over-ride. Since there really isn't a way to advertise
+ * that we are capable of RX Pause ONLY, we will advertise that we
+ * support both symmetric and asymmetric RX PAUSE. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+ break;
+ case E1000_FC_TX_PAUSE:
+ /* TX Flow control is enabled, and RX Flow control is disabled, by a
+ * software over-ride.
+ */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
+ break;
+ case E1000_FC_FULL:
+ /* Flow control (both RX and TX) is enabled by a software over-ride. */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+ break;
+ default:
+ e_dbg("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
+ break;
+ }
+
+ /* Since auto-negotiation is enabled, take the link out of reset (the link
+ * will be in reset, because we previously reset the chip). This will
+ * restart auto-negotiation. If auto-negotiation is successful then the
+ * link-up status bit will be set and the flow control enable bits (RFCE
+ * and TFCE) will be set according to their negotiated value.
+ */
+ e_dbg("Auto-negotiation enabled\n");
+
+ ew32(TXCW, txcw);
+ ew32(CTRL, ctrl);
+ E1000_WRITE_FLUSH();
+
+ hw->txcw = txcw;
+ msleep(1);
+
+ /* If we have a signal (the cable is plugged in) then poll for a "Link-Up"
+ * indication in the Device Status Register. Time-out if a link isn't
+ * seen in 500 milliseconds seconds (Auto-negotiation should complete in
+ * less than 500 milliseconds even if the other end is doing it in SW).
+ * For internal serdes, we just assume a signal is present, then poll.
+ */
+ if (hw->media_type == e1000_media_type_internal_serdes ||
+ (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) {
+ e_dbg("Looking for Link\n");
+ for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) {
+ msleep(10);
+ status = er32(STATUS);
+ if (status & E1000_STATUS_LU)
+ break;
+ }
+ if (i == (LINK_UP_TIMEOUT / 10)) {
+ e_dbg("Never got a valid link from auto-neg!!!\n");
+ hw->autoneg_failed = 1;
+ /* AutoNeg failed to achieve a link, so we'll call
+ * e1000_check_for_link. This routine will force the link up if
+ * we detect a signal. This will allow us to communicate with
+ * non-autonegotiating link partners.
+ */
+ ret_val = e1000_check_for_link(hw);
+ if (ret_val) {
+ e_dbg("Error while checking for link\n");
+ return ret_val;
+ }
+ hw->autoneg_failed = 0;
+ } else {
+ hw->autoneg_failed = 0;
+ e_dbg("Valid Link Found\n");
+ }
+ } else {
+ e_dbg("No Signal Detected\n");
+ }
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_copper_link_rtl_setup - Copper link setup for e1000_phy_rtl series.
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Commits changes to PHY configuration by calling e1000_phy_reset().
+ */
+static s32 e1000_copper_link_rtl_setup(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ /* SW reset the PHY so all changes take effect */
+ ret_val = e1000_phy_reset(hw);
+ if (ret_val) {
+ e_dbg("Error Resetting the PHY\n");
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+static s32 gbe_dhg_phy_setup(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u32 ctrl_aux;
+
+ switch (hw->phy_type) {
+ case e1000_phy_8211:
+ ret_val = e1000_copper_link_rtl_setup(hw);
+ if (ret_val) {
+ e_dbg("e1000_copper_link_rtl_setup failed!\n");
+ return ret_val;
+ }
+ break;
+ case e1000_phy_8201:
+ /* Set RMII mode */
+ ctrl_aux = er32(CTL_AUX);
+ ctrl_aux |= E1000_CTL_AUX_RMII;
+ ew32(CTL_AUX, ctrl_aux);
+ E1000_WRITE_FLUSH();
+
+ /* Disable the J/K bits required for receive */
+ ctrl_aux = er32(CTL_AUX);
+ ctrl_aux |= 0x4;
+ ctrl_aux &= ~0x2;
+ ew32(CTL_AUX, ctrl_aux);
+ E1000_WRITE_FLUSH();
+ ret_val = e1000_copper_link_rtl_setup(hw);
+
+ if (ret_val) {
+ e_dbg("e1000_copper_link_rtl_setup failed!\n");
+ return ret_val;
+ }
+ break;
+ default:
+ e_dbg("Error Resetting the PHY\n");
+ return E1000_ERR_PHY_TYPE;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_copper_link_preconfig - early configuration for copper
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Make sure we have a valid PHY and change PHY mode before link setup.
+ */
+static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+ u16 phy_data;
+
+ e_dbg("e1000_copper_link_preconfig");
+
+ ctrl = er32(CTRL);
+ /* With 82543, we need to force speed and duplex on the MAC equal to what
+ * the PHY speed and duplex configuration is. In addition, we need to
+ * perform a hardware reset on the PHY to take it out of reset.
+ */
+ if (hw->mac_type > e1000_82543) {
+ ctrl |= E1000_CTRL_SLU;
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ew32(CTRL, ctrl);
+ } else {
+ ctrl |=
+ (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU);
+ ew32(CTRL, ctrl);
+ ret_val = e1000_phy_hw_reset(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Make sure we have a valid PHY */
+ ret_val = e1000_detect_gig_phy(hw);
+ if (ret_val) {
+ e_dbg("Error, did not detect valid phy.\n");
+ return ret_val;
+ }
+ e_dbg("Phy ID = %x\n", hw->phy_id);
+
+ /* Set PHY to class A mode (if necessary) */
+ ret_val = e1000_set_phy_mode(hw);
+ if (ret_val)
+ return ret_val;
+
+ if ((hw->mac_type == e1000_82545_rev_3) ||
+ (hw->mac_type == e1000_82546_rev_3)) {
+ ret_val =
+ e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ phy_data |= 0x00000008;
+ ret_val =
+ e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ }
+
+ if (hw->mac_type <= e1000_82543 ||
+ hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 ||
+ hw->mac_type == e1000_82541_rev_2
+ || hw->mac_type == e1000_82547_rev_2)
+ hw->phy_reset_disable = false;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_copper_link_igp_setup - Copper link setup for e1000_phy_igp series.
+ * @hw: Struct containing variables accessed by shared code
+ */
+static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
+{
+ u32 led_ctrl;
+ s32 ret_val;
+ u16 phy_data;
+
+ e_dbg("e1000_copper_link_igp_setup");
+
+ if (hw->phy_reset_disable)
+ return E1000_SUCCESS;
+
+ ret_val = e1000_phy_reset(hw);
+ if (ret_val) {
+ e_dbg("Error Resetting the PHY\n");
+ return ret_val;
+ }
+
+ /* Wait 15ms for MAC to configure PHY from eeprom settings */
+ msleep(15);
+ /* Configure activity LED after PHY reset */
+ led_ctrl = er32(LEDCTL);
+ led_ctrl &= IGP_ACTIVITY_LED_MASK;
+ led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+ ew32(LEDCTL, led_ctrl);
+
+ /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */
+ if (hw->phy_type == e1000_phy_igp) {
+ /* disable lplu d3 during driver init */
+ ret_val = e1000_set_d3_lplu_state(hw, false);
+ if (ret_val) {
+ e_dbg("Error Disabling LPLU D3\n");
+ return ret_val;
+ }
+ }
+
+ /* Configure mdi-mdix settings */
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+ hw->dsp_config_state = e1000_dsp_config_disabled;
+ /* Force MDI for earlier revs of the IGP PHY */
+ phy_data &=
+ ~(IGP01E1000_PSCR_AUTO_MDIX |
+ IGP01E1000_PSCR_FORCE_MDI_MDIX);
+ hw->mdix = 1;
+
+ } else {
+ hw->dsp_config_state = e1000_dsp_config_enabled;
+ phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+
+ switch (hw->mdix) {
+ case 1:
+ phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+ break;
+ case 2:
+ phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
+ break;
+ case 0:
+ default:
+ phy_data |= IGP01E1000_PSCR_AUTO_MDIX;
+ break;
+ }
+ }
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* set auto-master slave resolution settings */
+ if (hw->autoneg) {
+ e1000_ms_type phy_ms_setting = hw->master_slave;
+
+ if (hw->ffe_config_state == e1000_ffe_config_active)
+ hw->ffe_config_state = e1000_ffe_config_enabled;
+
+ if (hw->dsp_config_state == e1000_dsp_config_activated)
+ hw->dsp_config_state = e1000_dsp_config_enabled;
+
+ /* when autonegotiation advertisement is only 1000Mbps then we
+ * should disable SmartSpeed and enable Auto MasterSlave
+ * resolution as hardware default. */
+ if (hw->autoneg_advertised == ADVERTISE_1000_FULL) {
+ /* Disable SmartSpeed */
+ ret_val =
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+ phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val =
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ /* Set auto Master/Slave resolution process */
+ ret_val =
+ e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+ phy_data &= ~CR_1000T_MS_ENABLE;
+ ret_val =
+ e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* load defaults for future use */
+ hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ?
+ ((phy_data & CR_1000T_MS_VALUE) ?
+ e1000_ms_force_master :
+ e1000_ms_force_slave) : e1000_ms_auto;
+
+ switch (phy_ms_setting) {
+ case e1000_ms_force_master:
+ phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+ break;
+ case e1000_ms_force_slave:
+ phy_data |= CR_1000T_MS_ENABLE;
+ phy_data &= ~(CR_1000T_MS_VALUE);
+ break;
+ case e1000_ms_auto:
+ phy_data &= ~CR_1000T_MS_ENABLE;
+ default:
+ break;
+ }
+ ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_copper_link_mgp_setup - Copper link setup for e1000_phy_m88 series.
+ * @hw: Struct containing variables accessed by shared code
+ */
+static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ e_dbg("e1000_copper_link_mgp_setup");
+
+ if (hw->phy_reset_disable)
+ return E1000_SUCCESS;
+
+ /* Enable CRS on TX. This must be set for half-duplex operation. */
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+
+ /* Options:
+ * MDI/MDI-X = 0 (default)
+ * 0 - Auto for all speeds
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+ */
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+ switch (hw->mdix) {
+ case 1:
+ phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+ break;
+ case 2:
+ phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+ break;
+ case 3:
+ phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+ break;
+ case 0:
+ default:
+ phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+ break;
+ }
+
+ /* Options:
+ * disable_polarity_correction = 0 (default)
+ * Automatic Correction for Reversed Cable Polarity
+ * 0 - Disabled
+ * 1 - Enabled
+ */
+ phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+ if (hw->disable_polarity_correction == 1)
+ phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (hw->phy_revision < M88E1011_I_REV_4) {
+ /* Force TX_CLK in the Extended PHY Specific Control Register
+ * to 25MHz clock.
+ */
+ ret_val =
+ e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_EPSCR_TX_CLK_25;
+
+ if ((hw->phy_revision == E1000_REVISION_2) &&
+ (hw->phy_id == M88E1111_I_PHY_ID)) {
+ /* Vidalia Phy, set the downshift counter to 5x */
+ phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK);
+ phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
+ ret_val = e1000_write_phy_reg(hw,
+ M88E1000_EXT_PHY_SPEC_CTRL,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ } else {
+ /* Configure Master and Slave downshift values */
+ phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
+ M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
+ phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
+ M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
+ ret_val = e1000_write_phy_reg(hw,
+ M88E1000_EXT_PHY_SPEC_CTRL,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ /* SW Reset the PHY so all changes take effect */
+ ret_val = e1000_phy_reset(hw);
+ if (ret_val) {
+ e_dbg("Error Resetting the PHY\n");
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_copper_link_autoneg - setup auto-neg
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Setup auto-negotiation and flow control advertisements,
+ * and then perform auto-negotiation.
+ */
+static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ e_dbg("e1000_copper_link_autoneg");
+
+ /* Perform some bounds checking on the hw->autoneg_advertised
+ * parameter. If this variable is zero, then set it to the default.
+ */
+ hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT;
+
+ /* If autoneg_advertised is zero, we assume it was not defaulted
+ * by the calling code so we set to advertise full capability.
+ */
+ if (hw->autoneg_advertised == 0)
+ hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+
+ /* IFE/RTL8201N PHY only supports 10/100 */
+ if (hw->phy_type == e1000_phy_8201)
+ hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL;
+
+ e_dbg("Reconfiguring auto-neg advertisement params\n");
+ ret_val = e1000_phy_setup_autoneg(hw);
+ if (ret_val) {
+ e_dbg("Error Setting up Auto-Negotiation\n");
+ return ret_val;
+ }
+ e_dbg("Restarting Auto-Neg\n");
+
+ /* Restart auto-negotiation by setting the Auto Neg Enable bit and
+ * the Auto Neg Restart bit in the PHY control register.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
+ ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Does the user want to wait for Auto-Neg to complete here, or
+ * check at a later time (for example, callback routine).
+ */
+ if (hw->wait_autoneg_complete) {
+ ret_val = e1000_wait_autoneg(hw);
+ if (ret_val) {
+ e_dbg
+ ("Error while waiting for autoneg to complete\n");
+ return ret_val;
+ }
+ }
+
+ hw->get_link_status = true;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_copper_link_postconfig - post link setup
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Config the MAC and the PHY after link is up.
+ * 1) Set up the MAC to the current PHY speed/duplex
+ * if we are on 82543. If we
+ * are on newer silicon, we only need to configure
+ * collision distance in the Transmit Control Register.
+ * 2) Set up flow control on the MAC to that established with
+ * the link partner.
+ * 3) Config DSP to improve Gigabit link quality for some PHY revisions.
+ */
+static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ e_dbg("e1000_copper_link_postconfig");
+
+ if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) {
+ e1000_config_collision_dist(hw);
+ } else {
+ ret_val = e1000_config_mac_to_phy(hw);
+ if (ret_val) {
+ e_dbg("Error configuring MAC to PHY settings\n");
+ return ret_val;
+ }
+ }
+ ret_val = e1000_config_fc_after_link_up(hw);
+ if (ret_val) {
+ e_dbg("Error Configuring Flow Control\n");
+ return ret_val;
+ }
+
+ /* Config DSP to improve Giga link quality */
+ if (hw->phy_type == e1000_phy_igp) {
+ ret_val = e1000_config_dsp_after_link_change(hw, true);
+ if (ret_val) {
+ e_dbg("Error Configuring DSP after link up\n");
+ return ret_val;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_setup_copper_link - phy/speed/duplex setting
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Detects which PHY is present and sets up the speed and duplex
+ */
+static s32 e1000_setup_copper_link(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 i;
+ u16 phy_data;
+
+ e_dbg("e1000_setup_copper_link");
+
+ /* Check if it is a valid PHY and set PHY mode if necessary. */
+ ret_val = e1000_copper_link_preconfig(hw);
+ if (ret_val)
+ return ret_val;
+
+ if (hw->phy_type == e1000_phy_igp) {
+ ret_val = e1000_copper_link_igp_setup(hw);
+ if (ret_val)
+ return ret_val;
+ } else if (hw->phy_type == e1000_phy_m88) {
+ ret_val = e1000_copper_link_mgp_setup(hw);
+ if (ret_val)
+ return ret_val;
+ } else {
+ ret_val = gbe_dhg_phy_setup(hw);
+ if (ret_val) {
+ e_dbg("gbe_dhg_phy_setup failed!\n");
+ return ret_val;
+ }
+ }
+
+ if (hw->autoneg) {
+ /* Setup autoneg and flow control advertisement
+ * and perform autonegotiation */
+ ret_val = e1000_copper_link_autoneg(hw);
+ if (ret_val)
+ return ret_val;
+ } else {
+ /* PHY will be set to 10H, 10F, 100H,or 100F
+ * depending on value from forced_speed_duplex. */
+ e_dbg("Forcing speed and duplex\n");
+ ret_val = e1000_phy_force_speed_duplex(hw);
+ if (ret_val) {
+ e_dbg("Error Forcing Speed and Duplex\n");
+ return ret_val;
+ }
+ }
+
+ /* Check link status. Wait up to 100 microseconds for link to become
+ * valid.
+ */
+ for (i = 0; i < 10; i++) {
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (phy_data & MII_SR_LINK_STATUS) {
+ /* Config the MAC and PHY after link is up */
+ ret_val = e1000_copper_link_postconfig(hw);
+ if (ret_val)
+ return ret_val;
+
+ e_dbg("Valid link established!!!\n");
+ return E1000_SUCCESS;
+ }
+ udelay(10);
+ }
+
+ e_dbg("Unable to establish link!!!\n");
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_setup_autoneg - phy settings
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Configures PHY autoneg and flow control advertisement settings
+ */
+s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 mii_autoneg_adv_reg;
+ u16 mii_1000t_ctrl_reg;
+
+ e_dbg("e1000_phy_setup_autoneg");
+
+ /* Read the MII Auto-Neg Advertisement Register (Address 4). */
+ ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
+ if (ret_val)
+ return ret_val;
+
+ /* Read the MII 1000Base-T Control Register (Address 9). */
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+ else if (hw->phy_type == e1000_phy_8201)
+ mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
+
+ /* Need to parse both autoneg_advertised and fc and set up
+ * the appropriate PHY registers. First we will parse for
+ * autoneg_advertised software override. Since we can advertise
+ * a plethora of combinations, we need to check each bit
+ * individually.
+ */
+
+ /* First we clear all the 10/100 mb speed bits in the Auto-Neg
+ * Advertisement Register (Address 4) and the 1000 mb speed bits in
+ * the 1000Base-T Control Register (Address 9).
+ */
+ mii_autoneg_adv_reg &= ~REG4_SPEED_MASK;
+ mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
+
+ e_dbg("autoneg_advertised %x\n", hw->autoneg_advertised);
+
+ /* Do we want to advertise 10 Mb Half Duplex? */
+ if (hw->autoneg_advertised & ADVERTISE_10_HALF) {
+ e_dbg("Advertise 10mb Half duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
+ }
+
+ /* Do we want to advertise 10 Mb Full Duplex? */
+ if (hw->autoneg_advertised & ADVERTISE_10_FULL) {
+ e_dbg("Advertise 10mb Full duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
+ }
+
+ /* Do we want to advertise 100 Mb Half Duplex? */
+ if (hw->autoneg_advertised & ADVERTISE_100_HALF) {
+ e_dbg("Advertise 100mb Half duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
+ }
+
+ /* Do we want to advertise 100 Mb Full Duplex? */
+ if (hw->autoneg_advertised & ADVERTISE_100_FULL) {
+ e_dbg("Advertise 100mb Full duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
+ }
+
+ /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
+ if (hw->autoneg_advertised & ADVERTISE_1000_HALF) {
+ e_dbg
+ ("Advertise 1000mb Half duplex requested, request denied!\n");
+ }
+
+ /* Do we want to advertise 1000 Mb Full Duplex? */
+ if (hw->autoneg_advertised & ADVERTISE_1000_FULL) {
+ e_dbg("Advertise 1000mb Full duplex\n");
+ mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+ }
+
+ /* Check for a software override of the flow control settings, and
+ * setup the PHY advertisement registers accordingly. If
+ * auto-negotiation is enabled, then software will have to set the
+ * "PAUSE" bits to the correct value in the Auto-Negotiation
+ * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-negotiation.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * but we do not support receiving pause frames).
+ * 3: Both Rx and TX flow control (symmetric) are enabled.
+ * other: No software override. The flow control configuration
+ * in the EEPROM is used.
+ */
+ switch (hw->fc) {
+ case E1000_FC_NONE: /* 0 */
+ /* Flow control (RX & TX) is completely disabled by a
+ * software over-ride.
+ */
+ mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ case E1000_FC_RX_PAUSE: /* 1 */
+ /* RX Flow control is enabled, and TX Flow control is
+ * disabled, by a software over-ride.
+ */
+ /* Since there really isn't a way to advertise that we are
+ * capable of RX Pause ONLY, we will advertise that we
+ * support both symmetric and asymmetric RX PAUSE. Later
+ * (in e1000_config_fc_after_link_up) we will disable the
+ *hw's ability to send PAUSE frames.
+ */
+ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ case E1000_FC_TX_PAUSE: /* 2 */
+ /* TX Flow control is enabled, and RX Flow control is
+ * disabled, by a software over-ride.
+ */
+ mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
+ mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
+ break;
+ case E1000_FC_FULL: /* 3 */
+ /* Flow control (both RX and TX) is enabled by a software
+ * over-ride.
+ */
+ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ default:
+ e_dbg("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
+ if (ret_val)
+ return ret_val;
+
+ e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
+
+ if (hw->phy_type == e1000_phy_8201) {
+ mii_1000t_ctrl_reg = 0;
+ } else {
+ ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL,
+ mii_1000t_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_force_speed_duplex - force link settings
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Force PHY speed and duplex settings to hw->forced_speed_duplex
+ */
+static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+ u16 mii_ctrl_reg;
+ u16 mii_status_reg;
+ u16 phy_data;
+ u16 i;
+
+ e_dbg("e1000_phy_force_speed_duplex");
+
+ /* Turn off Flow control if we are forcing speed and duplex. */
+ hw->fc = E1000_FC_NONE;
+
+ e_dbg("hw->fc = %d\n", hw->fc);
+
+ /* Read the Device Control Register. */
+ ctrl = er32(CTRL);
+
+ /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */
+ ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ctrl &= ~(DEVICE_SPEED_MASK);
+
+ /* Clear the Auto Speed Detect Enable bit. */
+ ctrl &= ~E1000_CTRL_ASDE;
+
+ /* Read the MII Control Register. */
+ ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+
+ /* We need to disable autoneg in order to force link and duplex. */
+
+ mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN;
+
+ /* Are we forcing Full or Half Duplex? */
+ if (hw->forced_speed_duplex == e1000_100_full ||
+ hw->forced_speed_duplex == e1000_10_full) {
+ /* We want to force full duplex so we SET the full duplex bits in the
+ * Device and MII Control Registers.
+ */
+ ctrl |= E1000_CTRL_FD;
+ mii_ctrl_reg |= MII_CR_FULL_DUPLEX;
+ e_dbg("Full Duplex\n");
+ } else {
+ /* We want to force half duplex so we CLEAR the full duplex bits in
+ * the Device and MII Control Registers.
+ */
+ ctrl &= ~E1000_CTRL_FD;
+ mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX;
+ e_dbg("Half Duplex\n");
+ }
+
+ /* Are we forcing 100Mbps??? */
+ if (hw->forced_speed_duplex == e1000_100_full ||
+ hw->forced_speed_duplex == e1000_100_half) {
+ /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */
+ ctrl |= E1000_CTRL_SPD_100;
+ mii_ctrl_reg |= MII_CR_SPEED_100;
+ mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
+ e_dbg("Forcing 100mb ");
+ } else {
+ /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */
+ ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+ mii_ctrl_reg |= MII_CR_SPEED_10;
+ mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
+ e_dbg("Forcing 10mb ");
+ }
+
+ e1000_config_collision_dist(hw);
+
+ /* Write the configured values back to the Device Control Reg. */
+ ew32(CTRL, ctrl);
+
+ if (hw->phy_type == e1000_phy_m88) {
+ ret_val =
+ e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
+ * forced whenever speed are duplex are forced.
+ */
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+ ret_val =
+ e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ e_dbg("M88E1000 PSCR: %x\n", phy_data);
+
+ /* Need to reset the PHY or these changes will be ignored */
+ mii_ctrl_reg |= MII_CR_RESET;
+
+ /* Disable MDI-X support for 10/100 */
+ } else {
+ /* Clear Auto-Crossover to force MDI manually. IGP requires MDI
+ * forced whenever speed or duplex are forced.
+ */
+ ret_val =
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+ phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+
+ ret_val =
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Write back the modified PHY MII control register. */
+ ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+
+ udelay(1);
+
+ /* The wait_autoneg_complete flag may be a little misleading here.
+ * Since we are forcing speed and duplex, Auto-Neg is not enabled.
+ * But we do want to delay for a period while forcing only so we
+ * don't generate false No Link messages. So we will wait here
+ * only if the user has set wait_autoneg_complete to 1, which is
+ * the default.
+ */
+ if (hw->wait_autoneg_complete) {
+ /* We will wait for autoneg to complete. */
+ e_dbg("Waiting for forced speed/duplex link.\n");
+ mii_status_reg = 0;
+
+ /* We will wait for autoneg to complete or 4.5 seconds to expire. */
+ for (i = PHY_FORCE_TIME; i > 0; i--) {
+ /* Read the MII Status Register and wait for Auto-Neg Complete bit
+ * to be set.
+ */
+ ret_val =
+ e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ ret_val =
+ e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (mii_status_reg & MII_SR_LINK_STATUS)
+ break;
+ msleep(100);
+ }
+ if ((i == 0) && (hw->phy_type == e1000_phy_m88)) {
+ /* We didn't get link. Reset the DSP and wait again for link. */
+ ret_val = e1000_phy_reset_dsp(hw);
+ if (ret_val) {
+ e_dbg("Error Resetting PHY DSP\n");
+ return ret_val;
+ }
+ }
+ /* This loop will early-out if the link condition has been met. */
+ for (i = PHY_FORCE_TIME; i > 0; i--) {
+ if (mii_status_reg & MII_SR_LINK_STATUS)
+ break;
+ msleep(100);
+ /* Read the MII Status Register and wait for Auto-Neg Complete bit
+ * to be set.
+ */
+ ret_val =
+ e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ ret_val =
+ e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ if (hw->phy_type == e1000_phy_m88) {
+ /* Because we reset the PHY above, we need to re-force TX_CLK in the
+ * Extended PHY Specific Control Register to 25MHz clock. This value
+ * defaults back to a 2.5MHz clock when the PHY is reset.
+ */
+ ret_val =
+ e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_EPSCR_TX_CLK_25;
+ ret_val =
+ e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* In addition, because of the s/w reset above, we need to enable CRS on
+ * TX. This must be set for both full and half duplex operation.
+ */
+ ret_val =
+ e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+ ret_val =
+ e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543)
+ && (!hw->autoneg)
+ && (hw->forced_speed_duplex == e1000_10_full
+ || hw->forced_speed_duplex == e1000_10_half)) {
+ ret_val = e1000_polarity_reversal_workaround(hw);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_config_collision_dist - set collision distance register
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Sets the collision distance in the Transmit Control register.
+ * Link should have been established previously. Reads the speed and duplex
+ * information from the Device Status register.
+ */
+void e1000_config_collision_dist(struct e1000_hw *hw)
+{
+ u32 tctl, coll_dist;
+
+ e_dbg("e1000_config_collision_dist");
+
+ if (hw->mac_type < e1000_82543)
+ coll_dist = E1000_COLLISION_DISTANCE_82542;
+ else
+ coll_dist = E1000_COLLISION_DISTANCE;
+
+ tctl = er32(TCTL);
+
+ tctl &= ~E1000_TCTL_COLD;
+ tctl |= coll_dist << E1000_COLD_SHIFT;
+
+ ew32(TCTL, tctl);
+ E1000_WRITE_FLUSH();
+}
+
+/**
+ * e1000_config_mac_to_phy - sync phy and mac settings
+ * @hw: Struct containing variables accessed by shared code
+ * @mii_reg: data to write to the MII control register
+ *
+ * Sets MAC speed and duplex settings to reflect the those in the PHY
+ * The contents of the PHY register containing the needed information need to
+ * be passed in.
+ */
+static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+ u16 phy_data;
+
+ e_dbg("e1000_config_mac_to_phy");
+
+ /* 82544 or newer MAC, Auto Speed Detection takes care of
+ * MAC speed/duplex configuration.*/
+ if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100))
+ return E1000_SUCCESS;
+
+ /* Read the Device Control Register and set the bits to Force Speed
+ * and Duplex.
+ */
+ ctrl = er32(CTRL);
+ ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS);
+
+ switch (hw->phy_type) {
+ case e1000_phy_8201:
+ ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (phy_data & RTL_PHY_CTRL_FD)
+ ctrl |= E1000_CTRL_FD;
+ else
+ ctrl &= ~E1000_CTRL_FD;
+
+ if (phy_data & RTL_PHY_CTRL_SPD_100)
+ ctrl |= E1000_CTRL_SPD_100;
+ else
+ ctrl |= E1000_CTRL_SPD_10;
+
+ e1000_config_collision_dist(hw);
+ break;
+ default:
+ /* Set up duplex in the Device Control and Transmit Control
+ * registers depending on negotiated values.
+ */
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (phy_data & M88E1000_PSSR_DPLX)
+ ctrl |= E1000_CTRL_FD;
+ else
+ ctrl &= ~E1000_CTRL_FD;
+
+ e1000_config_collision_dist(hw);
+
+ /* Set up speed in the Device Control register depending on
+ * negotiated values.
+ */
+ if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
+ ctrl |= E1000_CTRL_SPD_1000;
+ else if ((phy_data & M88E1000_PSSR_SPEED) ==
+ M88E1000_PSSR_100MBS)
+ ctrl |= E1000_CTRL_SPD_100;
+ }
+
+ /* Write the configured values back to the Device Control Reg. */
+ ew32(CTRL, ctrl);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_force_mac_fc - force flow control settings
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Forces the MAC's flow control settings.
+ * Sets the TFCE and RFCE bits in the device control register to reflect
+ * the adapter settings. TFCE and RFCE need to be explicitly set by
+ * software when a Copper PHY is used because autonegotiation is managed
+ * by the PHY rather than the MAC. Software must also configure these
+ * bits when link is forced on a fiber connection.
+ */
+s32 e1000_force_mac_fc(struct e1000_hw *hw)
+{
+ u32 ctrl;
+
+ e_dbg("e1000_force_mac_fc");
+
+ /* Get the current configuration of the Device Control Register */
+ ctrl = er32(CTRL);
+
+ /* Because we didn't get link via the internal auto-negotiation
+ * mechanism (we either forced link or we got link via PHY
+ * auto-neg), we have to manually enable/disable transmit an
+ * receive flow control.
+ *
+ * The "Case" statement below enables/disable flow control
+ * according to the "hw->fc" parameter.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause
+ * frames but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * frames but we do not receive pause frames).
+ * 3: Both Rx and TX flow control (symmetric) is enabled.
+ * other: No other values should be possible at this point.
+ */
+
+ switch (hw->fc) {
+ case E1000_FC_NONE:
+ ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
+ break;
+ case E1000_FC_RX_PAUSE:
+ ctrl &= (~E1000_CTRL_TFCE);
+ ctrl |= E1000_CTRL_RFCE;
+ break;
+ case E1000_FC_TX_PAUSE:
+ ctrl &= (~E1000_CTRL_RFCE);
+ ctrl |= E1000_CTRL_TFCE;
+ break;
+ case E1000_FC_FULL:
+ ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
+ break;
+ default:
+ e_dbg("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ /* Disable TX Flow Control for 82542 (rev 2.0) */
+ if (hw->mac_type == e1000_82542_rev2_0)
+ ctrl &= (~E1000_CTRL_TFCE);
+
+ ew32(CTRL, ctrl);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_config_fc_after_link_up - configure flow control after autoneg
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Configures flow control settings after link is established
+ * Should be called immediately after a valid link has been established.
+ * Forces MAC flow control settings if link was forced. When in MII/GMII mode
+ * and autonegotiation is enabled, the MAC flow control settings will be set
+ * based on the flow control negotiated by the PHY. In TBI mode, the TFCE
+ * and RFCE bits will be automatically set to the negotiated flow control mode.
+ */
+static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 mii_status_reg;
+ u16 mii_nway_adv_reg;
+ u16 mii_nway_lp_ability_reg;
+ u16 speed;
+ u16 duplex;
+
+ e_dbg("e1000_config_fc_after_link_up");
+
+ /* Check for the case where we have fiber media and auto-neg failed
+ * so we had to force link. In this case, we need to force the
+ * configuration of the MAC to match the "fc" parameter.
+ */
+ if (((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed))
+ || ((hw->media_type == e1000_media_type_internal_serdes)
+ && (hw->autoneg_failed))
+ || ((hw->media_type == e1000_media_type_copper)
+ && (!hw->autoneg))) {
+ ret_val = e1000_force_mac_fc(hw);
+ if (ret_val) {
+ e_dbg("Error forcing flow control settings\n");
+ return ret_val;
+ }
+ }
+
+ /* Check for the case where we have copper media and auto-neg is
+ * enabled. In this case, we need to check and see if Auto-Neg
+ * has completed, and if so, how the PHY and link partner has
+ * flow control configured.
+ */
+ if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) {
+ /* Read the MII Status Register and check to see if AutoNeg
+ * has completed. We read this twice because this reg has
+ * some "sticky" (latched) bits.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) {
+ /* The AutoNeg process has completed, so we now need to
+ * read both the Auto Negotiation Advertisement Register
+ * (Address 4) and the Auto_Negotiation Base Page Ability
+ * Register (Address 5) to determine how flow control was
+ * negotiated.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
+ &mii_nway_adv_reg);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY,
+ &mii_nway_lp_ability_reg);
+ if (ret_val)
+ return ret_val;
+
+ /* Two bits in the Auto Negotiation Advertisement Register
+ * (Address 4) and two bits in the Auto Negotiation Base
+ * Page Ability Register (Address 5) determine flow control
+ * for both the PHY and the link partner. The following
+ * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+ * 1999, describes these PAUSE resolution bits and how flow
+ * control is determined based upon these settings.
+ * NOTE: DC = Don't Care
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+ *-------|---------|-------|---------|--------------------
+ * 0 | 0 | DC | DC | E1000_FC_NONE
+ * 0 | 1 | 0 | DC | E1000_FC_NONE
+ * 0 | 1 | 1 | 0 | E1000_FC_NONE
+ * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE
+ * 1 | 0 | 0 | DC | E1000_FC_NONE
+ * 1 | DC | 1 | DC | E1000_FC_FULL
+ * 1 | 1 | 0 | 0 | E1000_FC_NONE
+ * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE
+ *
+ */
+ /* Are both PAUSE bits set to 1? If so, this implies
+ * Symmetric Flow Control is enabled at both ends. The
+ * ASM_DIR bits are irrelevant per the spec.
+ *
+ * For Symmetric Flow Control:
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | DC | 1 | DC | E1000_FC_FULL
+ *
+ */
+ if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+ /* Now we need to check if the user selected RX ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise RX
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->original_fc == E1000_FC_FULL) {
+ hw->fc = E1000_FC_FULL;
+ e_dbg("Flow Control = FULL.\n");
+ } else {
+ hw->fc = E1000_FC_RX_PAUSE;
+ e_dbg
+ ("Flow Control = RX PAUSE frames only.\n");
+ }
+ }
+ /* For receiving PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE
+ *
+ */
+ else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR))
+ {
+ hw->fc = E1000_FC_TX_PAUSE;
+ e_dbg
+ ("Flow Control = TX PAUSE frames only.\n");
+ }
+ /* For transmitting PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE
+ *
+ */
+ else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR))
+ {
+ hw->fc = E1000_FC_RX_PAUSE;
+ e_dbg
+ ("Flow Control = RX PAUSE frames only.\n");
+ }
+ /* Per the IEEE spec, at this point flow control should be
+ * disabled. However, we want to consider that we could
+ * be connected to a legacy switch that doesn't advertise
+ * desired flow control, but can be forced on the link
+ * partner. So if we advertised no flow control, that is
+ * what we will resolve to. If we advertised some kind of
+ * receive capability (Rx Pause Only or Full Flow Control)
+ * and the link partner advertised none, we will configure
+ * ourselves to enable Rx Flow Control only. We can do
+ * this safely for two reasons: If the link partner really
+ * didn't want flow control enabled, and we enable Rx, no
+ * harm done since we won't be receiving any PAUSE frames
+ * anyway. If the intent on the link partner was to have
+ * flow control enabled, then by us enabling RX only, we
+ * can at least receive pause frames and process them.
+ * This is a good idea because in most cases, since we are
+ * predominantly a server NIC, more times than not we will
+ * be asked to delay transmission of packets than asking
+ * our link partner to pause transmission of frames.
+ */
+ else if ((hw->original_fc == E1000_FC_NONE ||
+ hw->original_fc == E1000_FC_TX_PAUSE) ||
+ hw->fc_strict_ieee) {
+ hw->fc = E1000_FC_NONE;
+ e_dbg("Flow Control = NONE.\n");
+ } else {
+ hw->fc = E1000_FC_RX_PAUSE;
+ e_dbg
+ ("Flow Control = RX PAUSE frames only.\n");
+ }
+
+ /* Now we need to do one last check... If we auto-
+ * negotiated to HALF DUPLEX, flow control should not be
+ * enabled per IEEE 802.3 spec.
+ */
+ ret_val =
+ e1000_get_speed_and_duplex(hw, &speed, &duplex);
+ if (ret_val) {
+ e_dbg
+ ("Error getting link speed and duplex\n");
+ return ret_val;
+ }
+
+ if (duplex == HALF_DUPLEX)
+ hw->fc = E1000_FC_NONE;
+
+ /* Now we call a subroutine to actually force the MAC
+ * controller to use the correct flow control settings.
+ */
+ ret_val = e1000_force_mac_fc(hw);
+ if (ret_val) {
+ e_dbg
+ ("Error forcing flow control settings\n");
+ return ret_val;
+ }
+ } else {
+ e_dbg
+ ("Copper PHY and Auto Neg has not completed.\n");
+ }
+ }
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_check_for_serdes_link_generic - Check for link (Serdes)
+ * @hw: pointer to the HW structure
+ *
+ * Checks for link up on the hardware. If link is not up and we have
+ * a signal, then we need to force link up.
+ */
+static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
+{
+ u32 rxcw;
+ u32 ctrl;
+ u32 status;
+ s32 ret_val = E1000_SUCCESS;
+
+ e_dbg("e1000_check_for_serdes_link_generic");
+
+ ctrl = er32(CTRL);
+ status = er32(STATUS);
+ rxcw = er32(RXCW);
+
+ /*
+ * If we don't have link (auto-negotiation failed or link partner
+ * cannot auto-negotiate), and our link partner is not trying to
+ * auto-negotiate with us (we are receiving idles or data),
+ * we need to force link up. We also need to give auto-negotiation
+ * time to complete.
+ */
+ /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
+ if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) {
+ if (hw->autoneg_failed == 0) {
+ hw->autoneg_failed = 1;
+ goto out;
+ }
+ e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n");
+
+ /* Disable auto-negotiation in the TXCW register */
+ ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE));
+
+ /* Force link-up and also force full-duplex. */
+ ctrl = er32(CTRL);
+ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+ ew32(CTRL, ctrl);
+
+ /* Configure Flow Control after forcing link up. */
+ ret_val = e1000_config_fc_after_link_up(hw);
+ if (ret_val) {
+ e_dbg("Error configuring flow control\n");
+ goto out;
+ }
+ } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+ /*
+ * If we are forcing link and we are receiving /C/ ordered
+ * sets, re-enable auto-negotiation in the TXCW register
+ * and disable forced link in the Device Control register
+ * in an attempt to auto-negotiate with our link partner.
+ */
+ e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n");
+ ew32(TXCW, hw->txcw);
+ ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+ hw->serdes_has_link = true;
+ } else if (!(E1000_TXCW_ANE & er32(TXCW))) {
+ /*
+ * If we force link for non-auto-negotiation switch, check
+ * link status based on MAC synchronization for internal
+ * serdes media type.
+ */
+ /* SYNCH bit and IV bit are sticky. */
+ udelay(10);
+ rxcw = er32(RXCW);
+ if (rxcw & E1000_RXCW_SYNCH) {
+ if (!(rxcw & E1000_RXCW_IV)) {
+ hw->serdes_has_link = true;
+ e_dbg("SERDES: Link up - forced.\n");
+ }
+ } else {
+ hw->serdes_has_link = false;
+ e_dbg("SERDES: Link down - force failed.\n");
+ }
+ }
+
+ if (E1000_TXCW_ANE & er32(TXCW)) {
+ status = er32(STATUS);
+ if (status & E1000_STATUS_LU) {
+ /* SYNCH bit and IV bit are sticky, so reread rxcw. */
+ udelay(10);
+ rxcw = er32(RXCW);
+ if (rxcw & E1000_RXCW_SYNCH) {
+ if (!(rxcw & E1000_RXCW_IV)) {
+ hw->serdes_has_link = true;
+ e_dbg("SERDES: Link up - autoneg "
+ "completed successfully.\n");
+ } else {
+ hw->serdes_has_link = false;
+ e_dbg("SERDES: Link down - invalid"
+ "codewords detected in autoneg.\n");
+ }
+ } else {
+ hw->serdes_has_link = false;
+ e_dbg("SERDES: Link down - no sync.\n");
+ }
+ } else {
+ hw->serdes_has_link = false;
+ e_dbg("SERDES: Link down - autoneg failed\n");
+ }
+ }
+
+ out:
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_link
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Checks to see if the link status of the hardware has changed.
+ * Called by any function that needs to check the link status of the adapter.
+ */
+s32 e1000_check_for_link(struct e1000_hw *hw)
+{
+ u32 rxcw = 0;
+ u32 ctrl;
+ u32 status;
+ u32 rctl;
+ u32 icr;
+ u32 signal = 0;
+ s32 ret_val;
+ u16 phy_data;
+
+ e_dbg("e1000_check_for_link");
+
+ ctrl = er32(CTRL);
+ status = er32(STATUS);
+
+ /* On adapters with a MAC newer than 82544, SW Definable pin 1 will be
+ * set when the optics detect a signal. On older adapters, it will be
+ * cleared when there is a signal. This applies to fiber media only.
+ */
+ if ((hw->media_type == e1000_media_type_fiber) ||
+ (hw->media_type == e1000_media_type_internal_serdes)) {
+ rxcw = er32(RXCW);
+
+ if (hw->media_type == e1000_media_type_fiber) {
+ signal =
+ (hw->mac_type >
+ e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
+ if (status & E1000_STATUS_LU)
+ hw->get_link_status = false;
+ }
+ }
+
+ /* If we have a copper PHY then we only want to go out to the PHY
+ * registers to see if Auto-Neg has completed and/or if our link
+ * status has changed. The get_link_status flag will be set if we
+ * receive a Link Status Change interrupt or we have Rx Sequence
+ * Errors.
+ */
+ if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) {
+ /* First we want to see if the MII Status Register reports
+ * link. If so, then we want to get the current speed/duplex
+ * of the PHY.
+ * Read the register twice since the link bit is sticky.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (phy_data & MII_SR_LINK_STATUS) {
+ hw->get_link_status = false;
+ /* Check if there was DownShift, must be checked immediately after
+ * link-up */
+ e1000_check_downshift(hw);
+
+ /* If we are on 82544 or 82543 silicon and speed/duplex
+ * are forced to 10H or 10F, then we will implement the polarity
+ * reversal workaround. We disable interrupts first, and upon
+ * returning, place the devices interrupt state to its previous
+ * value except for the link status change interrupt which will
+ * happen due to the execution of this workaround.
+ */
+
+ if ((hw->mac_type == e1000_82544
+ || hw->mac_type == e1000_82543) && (!hw->autoneg)
+ && (hw->forced_speed_duplex == e1000_10_full
+ || hw->forced_speed_duplex == e1000_10_half)) {
+ ew32(IMC, 0xffffffff);
+ ret_val =
+ e1000_polarity_reversal_workaround(hw);
+ icr = er32(ICR);
+ ew32(ICS, (icr & ~E1000_ICS_LSC));
+ ew32(IMS, IMS_ENABLE_MASK);
+ }
+
+ } else {
+ /* No link detected */
+ e1000_config_dsp_after_link_change(hw, false);
+ return 0;
+ }
+
+ /* If we are forcing speed/duplex, then we simply return since
+ * we have already determined whether we have link or not.
+ */
+ if (!hw->autoneg)
+ return -E1000_ERR_CONFIG;
+
+ /* optimize the dsp settings for the igp phy */
+ e1000_config_dsp_after_link_change(hw, true);
+
+ /* We have a M88E1000 PHY and Auto-Neg is enabled. If we
+ * have Si on board that is 82544 or newer, Auto
+ * Speed Detection takes care of MAC speed/duplex
+ * configuration. So we only need to configure Collision
+ * Distance in the MAC. Otherwise, we need to force
+ * speed/duplex on the MAC to the current PHY speed/duplex
+ * settings.
+ */
+ if ((hw->mac_type >= e1000_82544) &&
+ (hw->mac_type != e1000_ce4100))
+ e1000_config_collision_dist(hw);
+ else {
+ ret_val = e1000_config_mac_to_phy(hw);
+ if (ret_val) {
+ e_dbg
+ ("Error configuring MAC to PHY settings\n");
+ return ret_val;
+ }
+ }
+
+ /* Configure Flow Control now that Auto-Neg has completed. First, we
+ * need to restore the desired flow control settings because we may
+ * have had to re-autoneg with a different link partner.
+ */
+ ret_val = e1000_config_fc_after_link_up(hw);
+ if (ret_val) {
+ e_dbg("Error configuring flow control\n");
+ return ret_val;
+ }
+
+ /* At this point we know that we are on copper and we have
+ * auto-negotiated link. These are conditions for checking the link
+ * partner capability register. We use the link speed to determine if
+ * TBI compatibility needs to be turned on or off. If the link is not
+ * at gigabit speed, then TBI compatibility is not needed. If we are
+ * at gigabit speed, we turn on TBI compatibility.
+ */
+ if (hw->tbi_compatibility_en) {
+ u16 speed, duplex;
+ ret_val =
+ e1000_get_speed_and_duplex(hw, &speed, &duplex);
+ if (ret_val) {
+ e_dbg
+ ("Error getting link speed and duplex\n");
+ return ret_val;
+ }
+ if (speed != SPEED_1000) {
+ /* If link speed is not set to gigabit speed, we do not need
+ * to enable TBI compatibility.
+ */
+ if (hw->tbi_compatibility_on) {
+ /* If we previously were in the mode, turn it off. */
+ rctl = er32(RCTL);
+ rctl &= ~E1000_RCTL_SBP;
+ ew32(RCTL, rctl);
+ hw->tbi_compatibility_on = false;
+ }
+ } else {
+ /* If TBI compatibility is was previously off, turn it on. For
+ * compatibility with a TBI link partner, we will store bad
+ * packets. Some frames have an additional byte on the end and
+ * will look like CRC errors to to the hardware.
+ */
+ if (!hw->tbi_compatibility_on) {
+ hw->tbi_compatibility_on = true;
+ rctl = er32(RCTL);
+ rctl |= E1000_RCTL_SBP;
+ ew32(RCTL, rctl);
+ }
+ }
+ }
+ }
+
+ if ((hw->media_type == e1000_media_type_fiber) ||
+ (hw->media_type == e1000_media_type_internal_serdes))
+ e1000_check_for_serdes_link_generic(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_speed_and_duplex
+ * @hw: Struct containing variables accessed by shared code
+ * @speed: Speed of the connection
+ * @duplex: Duplex setting of the connection
+
+ * Detects the current speed and duplex settings of the hardware.
+ */
+s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
+{
+ u32 status;
+ s32 ret_val;
+ u16 phy_data;
+
+ e_dbg("e1000_get_speed_and_duplex");
+
+ if (hw->mac_type >= e1000_82543) {
+ status = er32(STATUS);
+ if (status & E1000_STATUS_SPEED_1000) {
+ *speed = SPEED_1000;
+ e_dbg("1000 Mbs, ");
+ } else if (status & E1000_STATUS_SPEED_100) {
+ *speed = SPEED_100;
+ e_dbg("100 Mbs, ");
+ } else {
+ *speed = SPEED_10;
+ e_dbg("10 Mbs, ");
+ }
+
+ if (status & E1000_STATUS_FD) {
+ *duplex = FULL_DUPLEX;
+ e_dbg("Full Duplex\n");
+ } else {
+ *duplex = HALF_DUPLEX;
+ e_dbg(" Half Duplex\n");
+ }
+ } else {
+ e_dbg("1000 Mbs, Full Duplex\n");
+ *speed = SPEED_1000;
+ *duplex = FULL_DUPLEX;
+ }
+
+ /* IGP01 PHY may advertise full duplex operation after speed downgrade even
+ * if it is operating at half duplex. Here we set the duplex settings to
+ * match the duplex in the link partner's capabilities.
+ */
+ if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) {
+ ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (!(phy_data & NWAY_ER_LP_NWAY_CAPS))
+ *duplex = HALF_DUPLEX;
+ else {
+ ret_val =
+ e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data);
+ if (ret_val)
+ return ret_val;
+ if ((*speed == SPEED_100
+ && !(phy_data & NWAY_LPAR_100TX_FD_CAPS))
+ || (*speed == SPEED_10
+ && !(phy_data & NWAY_LPAR_10T_FD_CAPS)))
+ *duplex = HALF_DUPLEX;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_wait_autoneg
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Blocks until autoneg completes or times out (~4.5 seconds)
+ */
+static s32 e1000_wait_autoneg(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 i;
+ u16 phy_data;
+
+ e_dbg("e1000_wait_autoneg");
+ e_dbg("Waiting for Auto-Neg to complete.\n");
+
+ /* We will wait for autoneg to complete or 4.5 seconds to expire. */
+ for (i = PHY_AUTO_NEG_TIME; i > 0; i--) {
+ /* Read the MII Status Register and wait for Auto-Neg
+ * Complete bit to be set.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+ if (phy_data & MII_SR_AUTONEG_COMPLETE) {
+ return E1000_SUCCESS;
+ }
+ msleep(100);
+ }
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_raise_mdi_clk - Raises the Management Data Clock
+ * @hw: Struct containing variables accessed by shared code
+ * @ctrl: Device control register's current value
+ */
+static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
+{
+ /* Raise the clock input to the Management Data Clock (by setting the MDC
+ * bit), and then delay 10 microseconds.
+ */
+ ew32(CTRL, (*ctrl | E1000_CTRL_MDC));
+ E1000_WRITE_FLUSH();
+ udelay(10);
+}
+
+/**
+ * e1000_lower_mdi_clk - Lowers the Management Data Clock
+ * @hw: Struct containing variables accessed by shared code
+ * @ctrl: Device control register's current value
+ */
+static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
+{
+ /* Lower the clock input to the Management Data Clock (by clearing the MDC
+ * bit), and then delay 10 microseconds.
+ */
+ ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC));
+ E1000_WRITE_FLUSH();
+ udelay(10);
+}
+
+/**
+ * e1000_shift_out_mdi_bits - Shifts data bits out to the PHY
+ * @hw: Struct containing variables accessed by shared code
+ * @data: Data to send out to the PHY
+ * @count: Number of bits to shift out
+ *
+ * Bits are shifted out in MSB to LSB order.
+ */
+static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count)
+{
+ u32 ctrl;
+ u32 mask;
+
+ /* We need to shift "count" number of bits out to the PHY. So, the value
+ * in the "data" parameter will be shifted out to the PHY one bit at a
+ * time. In order to do this, "data" must be broken down into bits.
+ */
+ mask = 0x01;
+ mask <<= (count - 1);
+
+ ctrl = er32(CTRL);
+
+ /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */
+ ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR);
+
+ while (mask) {
+ /* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and
+ * then raising and lowering the Management Data Clock. A "0" is
+ * shifted out to the PHY by setting the MDIO bit to "0" and then
+ * raising and lowering the clock.
+ */
+ if (data & mask)
+ ctrl |= E1000_CTRL_MDIO;
+ else
+ ctrl &= ~E1000_CTRL_MDIO;
+
+ ew32(CTRL, ctrl);
+ E1000_WRITE_FLUSH();
+
+ udelay(10);
+
+ e1000_raise_mdi_clk(hw, &ctrl);
+ e1000_lower_mdi_clk(hw, &ctrl);
+
+ mask = mask >> 1;
+ }
+}
+
+/**
+ * e1000_shift_in_mdi_bits - Shifts data bits in from the PHY
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Bits are shifted in in MSB to LSB order.
+ */
+static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ u16 data = 0;
+ u8 i;
+
+ /* In order to read a register from the PHY, we need to shift in a total
+ * of 18 bits from the PHY. The first two bit (turnaround) times are used
+ * to avoid contention on the MDIO pin when a read operation is performed.
+ * These two bits are ignored by us and thrown away. Bits are "shifted in"
+ * by raising the input to the Management Data Clock (setting the MDC bit),
+ * and then reading the value of the MDIO bit.
+ */
+ ctrl = er32(CTRL);
+
+ /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */
+ ctrl &= ~E1000_CTRL_MDIO_DIR;
+ ctrl &= ~E1000_CTRL_MDIO;
+
+ ew32(CTRL, ctrl);
+ E1000_WRITE_FLUSH();
+
+ /* Raise and Lower the clock before reading in the data. This accounts for
+ * the turnaround bits. The first clock occurred when we clocked out the
+ * last bit of the Register Address.
+ */
+ e1000_raise_mdi_clk(hw, &ctrl);
+ e1000_lower_mdi_clk(hw, &ctrl);
+
+ for (data = 0, i = 0; i < 16; i++) {
+ data = data << 1;
+ e1000_raise_mdi_clk(hw, &ctrl);
+ ctrl = er32(CTRL);
+ /* Check to see if we shifted in a "1". */
+ if (ctrl & E1000_CTRL_MDIO)
+ data |= 1;
+ e1000_lower_mdi_clk(hw, &ctrl);
+ }
+
+ e1000_raise_mdi_clk(hw, &ctrl);
+ e1000_lower_mdi_clk(hw, &ctrl);
+
+ return data;
+}
+
+
+/**
+ * e1000_read_phy_reg - read a phy register
+ * @hw: Struct containing variables accessed by shared code
+ * @reg_addr: address of the PHY register to read
+ *
+ * Reads the value from a PHY register, if the value is on a specific non zero
+ * page, sets the page first.
+ */
+s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data)
+{
+ u32 ret_val;
+
+ e_dbg("e1000_read_phy_reg");
+
+ if ((hw->phy_type == e1000_phy_igp) &&
+ (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
+ ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
+ (u16) reg_addr);
+ if (ret_val)
+ return ret_val;
+ }
+
+ ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
+ phy_data);
+
+ return ret_val;
+}
+
+static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
+ u16 *phy_data)
+{
+ u32 i;
+ u32 mdic = 0;
+ const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1;
+
+ e_dbg("e1000_read_phy_reg_ex");
+
+ if (reg_addr > MAX_PHY_REG_ADDRESS) {
+ e_dbg("PHY Address %d is out of range\n", reg_addr);
+ return -E1000_ERR_PARAM;
+ }
+
+ if (hw->mac_type > e1000_82543) {
+ /* Set up Op-code, Phy Address, and register address in the MDI
+ * Control register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+ if (hw->mac_type == e1000_ce4100) {
+ mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
+ (phy_addr << E1000_MDIC_PHY_SHIFT) |
+ (INTEL_CE_GBE_MDIC_OP_READ) |
+ (INTEL_CE_GBE_MDIC_GO));
+
+ writel(mdic, E1000_MDIO_CMD);
+
+ /* Poll the ready bit to see if the MDI read
+ * completed
+ */
+ for (i = 0; i < 64; i++) {
+ udelay(50);
+ mdic = readl(E1000_MDIO_CMD);
+ if (!(mdic & INTEL_CE_GBE_MDIC_GO))
+ break;
+ }
+
+ if (mdic & INTEL_CE_GBE_MDIC_GO) {
+ e_dbg("MDI Read did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+
+ mdic = readl(E1000_MDIO_STS);
+ if (mdic & INTEL_CE_GBE_MDIC_READ_ERROR) {
+ e_dbg("MDI Read Error\n");
+ return -E1000_ERR_PHY;
+ }
+ *phy_data = (u16) mdic;
+ } else {
+ mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
+ (phy_addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_READ));
+
+ ew32(MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read
+ * completed
+ */
+ for (i = 0; i < 64; i++) {
+ udelay(50);
+ mdic = er32(MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ e_dbg("MDI Read did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (mdic & E1000_MDIC_ERROR) {
+ e_dbg("MDI Error\n");
+ return -E1000_ERR_PHY;
+ }
+ *phy_data = (u16) mdic;
+ }
+ } else {
+ /* We must first send a preamble through the MDIO pin to signal the
+ * beginning of an MII instruction. This is done by sending 32
+ * consecutive "1" bits.
+ */
+ e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
+
+ /* Now combine the next few fields that are required for a read
+ * operation. We use this method instead of calling the
+ * e1000_shift_out_mdi_bits routine five different times. The format of
+ * a MII read instruction consists of a shift out of 14 bits and is
+ * defined as follows:
+ * <Preamble><SOF><Op Code><Phy Addr><Reg Addr>
+ * followed by a shift in of 18 bits. This first two bits shifted in
+ * are TurnAround bits used to avoid contention on the MDIO pin when a
+ * READ operation is performed. These two bits are thrown away
+ * followed by a shift in of 16 bits which contains the desired data.
+ */
+ mdic = ((reg_addr) | (phy_addr << 5) |
+ (PHY_OP_READ << 10) | (PHY_SOF << 12));
+
+ e1000_shift_out_mdi_bits(hw, mdic, 14);
+
+ /* Now that we've shifted out the read command to the MII, we need to
+ * "shift in" the 16-bit value (18 total bits) of the requested PHY
+ * register address.
+ */
+ *phy_data = e1000_shift_in_mdi_bits(hw);
+ }
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_phy_reg - write a phy register
+ *
+ * @hw: Struct containing variables accessed by shared code
+ * @reg_addr: address of the PHY register to write
+ * @data: data to write to the PHY
+
+ * Writes a value to a PHY register
+ */
+s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
+{
+ u32 ret_val;
+
+ e_dbg("e1000_write_phy_reg");
+
+ if ((hw->phy_type == e1000_phy_igp) &&
+ (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
+ ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
+ (u16) reg_addr);
+ if (ret_val)
+ return ret_val;
+ }
+
+ ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
+ phy_data);
+
+ return ret_val;
+}
+
+static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
+ u16 phy_data)
+{
+ u32 i;
+ u32 mdic = 0;
+ const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1;
+
+ e_dbg("e1000_write_phy_reg_ex");
+
+ if (reg_addr > MAX_PHY_REG_ADDRESS) {
+ e_dbg("PHY Address %d is out of range\n", reg_addr);
+ return -E1000_ERR_PARAM;
+ }
+
+ if (hw->mac_type > e1000_82543) {
+ /* Set up Op-code, Phy Address, register address, and data
+ * intended for the PHY register in the MDI Control register.
+ * The MAC will take care of interfacing with the PHY to send
+ * the desired data.
+ */
+ if (hw->mac_type == e1000_ce4100) {
+ mdic = (((u32) phy_data) |
+ (reg_addr << E1000_MDIC_REG_SHIFT) |
+ (phy_addr << E1000_MDIC_PHY_SHIFT) |
+ (INTEL_CE_GBE_MDIC_OP_WRITE) |
+ (INTEL_CE_GBE_MDIC_GO));
+
+ writel(mdic, E1000_MDIO_CMD);
+
+ /* Poll the ready bit to see if the MDI read
+ * completed
+ */
+ for (i = 0; i < 640; i++) {
+ udelay(5);
+ mdic = readl(E1000_MDIO_CMD);
+ if (!(mdic & INTEL_CE_GBE_MDIC_GO))
+ break;
+ }
+ if (mdic & INTEL_CE_GBE_MDIC_GO) {
+ e_dbg("MDI Write did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ } else {
+ mdic = (((u32) phy_data) |
+ (reg_addr << E1000_MDIC_REG_SHIFT) |
+ (phy_addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_WRITE));
+
+ ew32(MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read
+ * completed
+ */
+ for (i = 0; i < 641; i++) {
+ udelay(5);
+ mdic = er32(MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ e_dbg("MDI Write did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ }
+ } else {
+ /* We'll need to use the SW defined pins to shift the write command
+ * out to the PHY. We first send a preamble to the PHY to signal the
+ * beginning of the MII instruction. This is done by sending 32
+ * consecutive "1" bits.
+ */
+ e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
+
+ /* Now combine the remaining required fields that will indicate a
+ * write operation. We use this method instead of calling the
+ * e1000_shift_out_mdi_bits routine for each field in the command. The
+ * format of a MII write instruction is as follows:
+ * <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>.
+ */
+ mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) |
+ (PHY_OP_WRITE << 12) | (PHY_SOF << 14));
+ mdic <<= 16;
+ mdic |= (u32) phy_data;
+
+ e1000_shift_out_mdi_bits(hw, mdic, 32);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_hw_reset - reset the phy, hardware style
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Returns the PHY to the power-on reset state
+ */
+s32 e1000_phy_hw_reset(struct e1000_hw *hw)
+{
+ u32 ctrl, ctrl_ext;
+ u32 led_ctrl;
+
+ e_dbg("e1000_phy_hw_reset");
+
+ e_dbg("Resetting Phy...\n");
+
+ if (hw->mac_type > e1000_82543) {
+ /* Read the device control register and assert the E1000_CTRL_PHY_RST
+ * bit. Then, take it out of reset.
+ * For e1000 hardware, we delay for 10ms between the assert
+ * and deassert.
+ */
+ ctrl = er32(CTRL);
+ ew32(CTRL, ctrl | E1000_CTRL_PHY_RST);
+ E1000_WRITE_FLUSH();
+
+ msleep(10);
+
+ ew32(CTRL, ctrl);
+ E1000_WRITE_FLUSH();
+
+ } else {
+ /* Read the Extended Device Control Register, assert the PHY_RESET_DIR
+ * bit to put the PHY into reset. Then, take it out of reset.
+ */
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR;
+ ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA;
+ ew32(CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH();
+ msleep(10);
+ ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA;
+ ew32(CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH();
+ }
+ udelay(150);
+
+ if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+ /* Configure activity LED after PHY reset */
+ led_ctrl = er32(LEDCTL);
+ led_ctrl &= IGP_ACTIVITY_LED_MASK;
+ led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+ ew32(LEDCTL, led_ctrl);
+ }
+
+ /* Wait for FW to finish PHY configuration. */
+ return e1000_get_phy_cfg_done(hw);
+}
+
+/**
+ * e1000_phy_reset - reset the phy to commit settings
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Resets the PHY
+ * Sets bit 15 of the MII Control register
+ */
+s32 e1000_phy_reset(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ e_dbg("e1000_phy_reset");
+
+ switch (hw->phy_type) {
+ case e1000_phy_igp:
+ ret_val = e1000_phy_hw_reset(hw);
+ if (ret_val)
+ return ret_val;
+ break;
+ default:
+ ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= MII_CR_RESET;
+ ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ udelay(1);
+ break;
+ }
+
+ if (hw->phy_type == e1000_phy_igp)
+ e1000_phy_init_script(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_detect_gig_phy - check the phy type
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Probes the expected PHY address for known PHY IDs
+ */
+static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
+{
+ s32 phy_init_status, ret_val;
+ u16 phy_id_high, phy_id_low;
+ bool match = false;
+
+ e_dbg("e1000_detect_gig_phy");
+
+ if (hw->phy_id != 0)
+ return E1000_SUCCESS;
+
+ /* Read the PHY ID Registers to identify which PHY is onboard. */
+ ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high);
+ if (ret_val)
+ return ret_val;
+
+ hw->phy_id = (u32) (phy_id_high << 16);
+ udelay(20);
+ ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low);
+ if (ret_val)
+ return ret_val;
+
+ hw->phy_id |= (u32) (phy_id_low & PHY_REVISION_MASK);
+ hw->phy_revision = (u32) phy_id_low & ~PHY_REVISION_MASK;
+
+ switch (hw->mac_type) {
+ case e1000_82543:
+ if (hw->phy_id == M88E1000_E_PHY_ID)
+ match = true;
+ break;
+ case e1000_82544:
+ if (hw->phy_id == M88E1000_I_PHY_ID)
+ match = true;
+ break;
+ case e1000_82540:
+ case e1000_82545:
+ case e1000_82545_rev_3:
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ if (hw->phy_id == M88E1011_I_PHY_ID)
+ match = true;
+ break;
+ case e1000_ce4100:
+ if ((hw->phy_id == RTL8211B_PHY_ID) ||
+ (hw->phy_id == RTL8201N_PHY_ID) ||
+ (hw->phy_id == M88E1118_E_PHY_ID))
+ match = true;
+ break;
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ case e1000_82547:
+ case e1000_82547_rev_2:
+ if (hw->phy_id == IGP01E1000_I_PHY_ID)
+ match = true;
+ break;
+ default:
+ e_dbg("Invalid MAC type %d\n", hw->mac_type);
+ return -E1000_ERR_CONFIG;
+ }
+ phy_init_status = e1000_set_phy_type(hw);
+
+ if ((match) && (phy_init_status == E1000_SUCCESS)) {
+ e_dbg("PHY ID 0x%X detected\n", hw->phy_id);
+ return E1000_SUCCESS;
+ }
+ e_dbg("Invalid PHY ID 0x%X\n", hw->phy_id);
+ return -E1000_ERR_PHY;
+}
+
+/**
+ * e1000_phy_reset_dsp - reset DSP
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Resets the PHY's DSP
+ */
+static s32 e1000_phy_reset_dsp(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ e_dbg("e1000_phy_reset_dsp");
+
+ do {
+ ret_val = e1000_write_phy_reg(hw, 29, 0x001d);
+ if (ret_val)
+ break;
+ ret_val = e1000_write_phy_reg(hw, 30, 0x00c1);
+ if (ret_val)
+ break;
+ ret_val = e1000_write_phy_reg(hw, 30, 0x0000);
+ if (ret_val)
+ break;
+ ret_val = E1000_SUCCESS;
+ } while (0);
+
+ return ret_val;
+}
+
+/**
+ * e1000_phy_igp_get_info - get igp specific registers
+ * @hw: Struct containing variables accessed by shared code
+ * @phy_info: PHY information structure
+ *
+ * Get PHY information from various PHY registers for igp PHY only.
+ */
+static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
+ struct e1000_phy_info *phy_info)
+{
+ s32 ret_val;
+ u16 phy_data, min_length, max_length, average;
+ e1000_rev_polarity polarity;
+
+ e_dbg("e1000_phy_igp_get_info");
+
+ /* The downshift status is checked only once, after link is established,
+ * and it stored in the hw->speed_downgraded parameter. */
+ phy_info->downshift = (e1000_downshift) hw->speed_downgraded;
+
+ /* IGP01E1000 does not need to support it. */
+ phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal;
+
+ /* IGP01E1000 always correct polarity reversal */
+ phy_info->polarity_correction = e1000_polarity_reversal_enabled;
+
+ /* Check polarity status */
+ ret_val = e1000_check_polarity(hw, &polarity);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->cable_polarity = polarity;
+
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->mdix_mode =
+ (e1000_auto_x_mode) ((phy_data & IGP01E1000_PSSR_MDIX) >>
+ IGP01E1000_PSSR_MDIX_SHIFT);
+
+ if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
+ IGP01E1000_PSSR_SPEED_1000MBPS) {
+ /* Local/Remote Receiver Information are only valid at 1000 Mbps */
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >>
+ SR_1000T_LOCAL_RX_STATUS_SHIFT) ?
+ e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+ phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >>
+ SR_1000T_REMOTE_RX_STATUS_SHIFT) ?
+ e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+
+ /* Get cable length */
+ ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
+ if (ret_val)
+ return ret_val;
+
+ /* Translate to old method */
+ average = (max_length + min_length) / 2;
+
+ if (average <= e1000_igp_cable_length_50)
+ phy_info->cable_length = e1000_cable_length_50;
+ else if (average <= e1000_igp_cable_length_80)
+ phy_info->cable_length = e1000_cable_length_50_80;
+ else if (average <= e1000_igp_cable_length_110)
+ phy_info->cable_length = e1000_cable_length_80_110;
+ else if (average <= e1000_igp_cable_length_140)
+ phy_info->cable_length = e1000_cable_length_110_140;
+ else
+ phy_info->cable_length = e1000_cable_length_140;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_m88_get_info - get m88 specific registers
+ * @hw: Struct containing variables accessed by shared code
+ * @phy_info: PHY information structure
+ *
+ * Get PHY information from various PHY registers for m88 PHY only.
+ */
+static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
+ struct e1000_phy_info *phy_info)
+{
+ s32 ret_val;
+ u16 phy_data;
+ e1000_rev_polarity polarity;
+
+ e_dbg("e1000_phy_m88_get_info");
+
+ /* The downshift status is checked only once, after link is established,
+ * and it stored in the hw->speed_downgraded parameter. */
+ phy_info->downshift = (e1000_downshift) hw->speed_downgraded;
+
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->extended_10bt_distance =
+ ((phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >>
+ M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT) ?
+ e1000_10bt_ext_dist_enable_lower :
+ e1000_10bt_ext_dist_enable_normal;
+
+ phy_info->polarity_correction =
+ ((phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >>
+ M88E1000_PSCR_POLARITY_REVERSAL_SHIFT) ?
+ e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled;
+
+ /* Check polarity status */
+ ret_val = e1000_check_polarity(hw, &polarity);
+ if (ret_val)
+ return ret_val;
+ phy_info->cable_polarity = polarity;
+
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->mdix_mode =
+ (e1000_auto_x_mode) ((phy_data & M88E1000_PSSR_MDIX) >>
+ M88E1000_PSSR_MDIX_SHIFT);
+
+ if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
+ /* Cable Length Estimation and Local/Remote Receiver Information
+ * are only valid at 1000 Mbps.
+ */
+ phy_info->cable_length =
+ (e1000_cable_length) ((phy_data &
+ M88E1000_PSSR_CABLE_LENGTH) >>
+ M88E1000_PSSR_CABLE_LENGTH_SHIFT);
+
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >>
+ SR_1000T_LOCAL_RX_STATUS_SHIFT) ?
+ e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+ phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >>
+ SR_1000T_REMOTE_RX_STATUS_SHIFT) ?
+ e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_get_info - request phy info
+ * @hw: Struct containing variables accessed by shared code
+ * @phy_info: PHY information structure
+ *
+ * Get PHY information from various PHY registers
+ */
+s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ e_dbg("e1000_phy_get_info");
+
+ phy_info->cable_length = e1000_cable_length_undefined;
+ phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined;
+ phy_info->cable_polarity = e1000_rev_polarity_undefined;
+ phy_info->downshift = e1000_downshift_undefined;
+ phy_info->polarity_correction = e1000_polarity_reversal_undefined;
+ phy_info->mdix_mode = e1000_auto_x_mode_undefined;
+ phy_info->local_rx = e1000_1000t_rx_status_undefined;
+ phy_info->remote_rx = e1000_1000t_rx_status_undefined;
+
+ if (hw->media_type != e1000_media_type_copper) {
+ e_dbg("PHY info is only valid for copper media\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) {
+ e_dbg("PHY info is only valid if link is up\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ if (hw->phy_type == e1000_phy_igp)
+ return e1000_phy_igp_get_info(hw, phy_info);
+ else if ((hw->phy_type == e1000_phy_8211) ||
+ (hw->phy_type == e1000_phy_8201))
+ return E1000_SUCCESS;
+ else
+ return e1000_phy_m88_get_info(hw, phy_info);
+}
+
+s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
+{
+ e_dbg("e1000_validate_mdi_settings");
+
+ if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) {
+ e_dbg("Invalid MDI setting detected\n");
+ hw->mdix = 1;
+ return -E1000_ERR_CONFIG;
+ }
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_eeprom_params - initialize sw eeprom vars
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Sets up eeprom variables in the hw struct. Must be called after mac_type
+ * is configured.
+ */
+s32 e1000_init_eeprom_params(struct e1000_hw *hw)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u32 eecd = er32(EECD);
+ s32 ret_val = E1000_SUCCESS;
+ u16 eeprom_size;
+
+ e_dbg("e1000_init_eeprom_params");
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ case e1000_82544:
+ eeprom->type = e1000_eeprom_microwire;
+ eeprom->word_size = 64;
+ eeprom->opcode_bits = 3;
+ eeprom->address_bits = 6;
+ eeprom->delay_usec = 50;
+ break;
+ case e1000_82540:
+ case e1000_82545:
+ case e1000_82545_rev_3:
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ eeprom->type = e1000_eeprom_microwire;
+ eeprom->opcode_bits = 3;
+ eeprom->delay_usec = 50;
+ if (eecd & E1000_EECD_SIZE) {
+ eeprom->word_size = 256;
+ eeprom->address_bits = 8;
+ } else {
+ eeprom->word_size = 64;
+ eeprom->address_bits = 6;
+ }
+ break;
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ case e1000_82547:
+ case e1000_82547_rev_2:
+ if (eecd & E1000_EECD_TYPE) {
+ eeprom->type = e1000_eeprom_spi;
+ eeprom->opcode_bits = 8;
+ eeprom->delay_usec = 1;
+ if (eecd & E1000_EECD_ADDR_BITS) {
+ eeprom->page_size = 32;
+ eeprom->address_bits = 16;
+ } else {
+ eeprom->page_size = 8;
+ eeprom->address_bits = 8;
+ }
+ } else {
+ eeprom->type = e1000_eeprom_microwire;
+ eeprom->opcode_bits = 3;
+ eeprom->delay_usec = 50;
+ if (eecd & E1000_EECD_ADDR_BITS) {
+ eeprom->word_size = 256;
+ eeprom->address_bits = 8;
+ } else {
+ eeprom->word_size = 64;
+ eeprom->address_bits = 6;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (eeprom->type == e1000_eeprom_spi) {
+ /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to
+ * 32KB (incremented by powers of 2).
+ */
+ /* Set to default value for initial eeprom read. */
+ eeprom->word_size = 64;
+ ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size);
+ if (ret_val)
+ return ret_val;
+ eeprom_size =
+ (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT;
+ /* 256B eeprom size was not supported in earlier hardware, so we
+ * bump eeprom_size up one to ensure that "1" (which maps to 256B)
+ * is never the result used in the shifting logic below. */
+ if (eeprom_size)
+ eeprom_size++;
+
+ eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT);
+ }
+ return ret_val;
+}
+
+/**
+ * e1000_raise_ee_clk - Raises the EEPROM's clock input.
+ * @hw: Struct containing variables accessed by shared code
+ * @eecd: EECD's current value
+ */
+static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd)
+{
+ /* Raise the clock input to the EEPROM (by setting the SK bit), and then
+ * wait <delay> microseconds.
+ */
+ *eecd = *eecd | E1000_EECD_SK;
+ ew32(EECD, *eecd);
+ E1000_WRITE_FLUSH();
+ udelay(hw->eeprom.delay_usec);
+}
+
+/**
+ * e1000_lower_ee_clk - Lowers the EEPROM's clock input.
+ * @hw: Struct containing variables accessed by shared code
+ * @eecd: EECD's current value
+ */
+static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd)
+{
+ /* Lower the clock input to the EEPROM (by clearing the SK bit), and then
+ * wait 50 microseconds.
+ */
+ *eecd = *eecd & ~E1000_EECD_SK;
+ ew32(EECD, *eecd);
+ E1000_WRITE_FLUSH();
+ udelay(hw->eeprom.delay_usec);
+}
+
+/**
+ * e1000_shift_out_ee_bits - Shift data bits out to the EEPROM.
+ * @hw: Struct containing variables accessed by shared code
+ * @data: data to send to the EEPROM
+ * @count: number of bits to shift out
+ */
+static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u32 eecd;
+ u32 mask;
+
+ /* We need to shift "count" bits out to the EEPROM. So, value in the
+ * "data" parameter will be shifted out to the EEPROM one bit at a time.
+ * In order to do this, "data" must be broken down into bits.
+ */
+ mask = 0x01 << (count - 1);
+ eecd = er32(EECD);
+ if (eeprom->type == e1000_eeprom_microwire) {
+ eecd &= ~E1000_EECD_DO;
+ } else if (eeprom->type == e1000_eeprom_spi) {
+ eecd |= E1000_EECD_DO;
+ }
+ do {
+ /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1",
+ * and then raising and then lowering the clock (the SK bit controls
+ * the clock input to the EEPROM). A "0" is shifted out to the EEPROM
+ * by setting "DI" to "0" and then raising and then lowering the clock.
+ */
+ eecd &= ~E1000_EECD_DI;
+
+ if (data & mask)
+ eecd |= E1000_EECD_DI;
+
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+
+ udelay(eeprom->delay_usec);
+
+ e1000_raise_ee_clk(hw, &eecd);
+ e1000_lower_ee_clk(hw, &eecd);
+
+ mask = mask >> 1;
+
+ } while (mask);
+
+ /* We leave the "DI" bit set to "0" when we leave this routine. */
+ eecd &= ~E1000_EECD_DI;
+ ew32(EECD, eecd);
+}
+
+/**
+ * e1000_shift_in_ee_bits - Shift data bits in from the EEPROM
+ * @hw: Struct containing variables accessed by shared code
+ * @count: number of bits to shift in
+ */
+static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count)
+{
+ u32 eecd;
+ u32 i;
+ u16 data;
+
+ /* In order to read a register from the EEPROM, we need to shift 'count'
+ * bits in from the EEPROM. Bits are "shifted in" by raising the clock
+ * input to the EEPROM (setting the SK bit), and then reading the value of
+ * the "DO" bit. During this "shifting in" process the "DI" bit should
+ * always be clear.
+ */
+
+ eecd = er32(EECD);
+
+ eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
+ data = 0;
+
+ for (i = 0; i < count; i++) {
+ data = data << 1;
+ e1000_raise_ee_clk(hw, &eecd);
+
+ eecd = er32(EECD);
+
+ eecd &= ~(E1000_EECD_DI);
+ if (eecd & E1000_EECD_DO)
+ data |= 1;
+
+ e1000_lower_ee_clk(hw, &eecd);
+ }
+
+ return data;
+}
+
+/**
+ * e1000_acquire_eeprom - Prepares EEPROM for access
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This
+ * function should be called before issuing a command to the EEPROM.
+ */
+static s32 e1000_acquire_eeprom(struct e1000_hw *hw)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u32 eecd, i = 0;
+
+ e_dbg("e1000_acquire_eeprom");
+
+ eecd = er32(EECD);
+
+ /* Request EEPROM Access */
+ if (hw->mac_type > e1000_82544) {
+ eecd |= E1000_EECD_REQ;
+ ew32(EECD, eecd);
+ eecd = er32(EECD);
+ while ((!(eecd & E1000_EECD_GNT)) &&
+ (i < E1000_EEPROM_GRANT_ATTEMPTS)) {
+ i++;
+ udelay(5);
+ eecd = er32(EECD);
+ }
+ if (!(eecd & E1000_EECD_GNT)) {
+ eecd &= ~E1000_EECD_REQ;
+ ew32(EECD, eecd);
+ e_dbg("Could not acquire EEPROM grant\n");
+ return -E1000_ERR_EEPROM;
+ }
+ }
+
+ /* Setup EEPROM for Read/Write */
+
+ if (eeprom->type == e1000_eeprom_microwire) {
+ /* Clear SK and DI */
+ eecd &= ~(E1000_EECD_DI | E1000_EECD_SK);
+ ew32(EECD, eecd);
+
+ /* Set CS */
+ eecd |= E1000_EECD_CS;
+ ew32(EECD, eecd);
+ } else if (eeprom->type == e1000_eeprom_spi) {
+ /* Clear SK and CS */
+ eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(1);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_standby_eeprom - Returns EEPROM to a "standby" state
+ * @hw: Struct containing variables accessed by shared code
+ */
+static void e1000_standby_eeprom(struct e1000_hw *hw)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u32 eecd;
+
+ eecd = er32(EECD);
+
+ if (eeprom->type == e1000_eeprom_microwire) {
+ eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(eeprom->delay_usec);
+
+ /* Clock high */
+ eecd |= E1000_EECD_SK;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(eeprom->delay_usec);
+
+ /* Select EEPROM */
+ eecd |= E1000_EECD_CS;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(eeprom->delay_usec);
+
+ /* Clock low */
+ eecd &= ~E1000_EECD_SK;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(eeprom->delay_usec);
+ } else if (eeprom->type == e1000_eeprom_spi) {
+ /* Toggle CS to flush commands */
+ eecd |= E1000_EECD_CS;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(eeprom->delay_usec);
+ eecd &= ~E1000_EECD_CS;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(eeprom->delay_usec);
+ }
+}
+
+/**
+ * e1000_release_eeprom - drop chip select
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Terminates a command by inverting the EEPROM's chip select pin
+ */
+static void e1000_release_eeprom(struct e1000_hw *hw)
+{
+ u32 eecd;
+
+ e_dbg("e1000_release_eeprom");
+
+ eecd = er32(EECD);
+
+ if (hw->eeprom.type == e1000_eeprom_spi) {
+ eecd |= E1000_EECD_CS; /* Pull CS high */
+ eecd &= ~E1000_EECD_SK; /* Lower SCK */
+
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+
+ udelay(hw->eeprom.delay_usec);
+ } else if (hw->eeprom.type == e1000_eeprom_microwire) {
+ /* cleanup eeprom */
+
+ /* CS on Microwire is active-high */
+ eecd &= ~(E1000_EECD_CS | E1000_EECD_DI);
+
+ ew32(EECD, eecd);
+
+ /* Rising edge of clock */
+ eecd |= E1000_EECD_SK;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(hw->eeprom.delay_usec);
+
+ /* Falling edge of clock */
+ eecd &= ~E1000_EECD_SK;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(hw->eeprom.delay_usec);
+ }
+
+ /* Stop requesting EEPROM access */
+ if (hw->mac_type > e1000_82544) {
+ eecd &= ~E1000_EECD_REQ;
+ ew32(EECD, eecd);
+ }
+}
+
+/**
+ * e1000_spi_eeprom_ready - Reads a 16 bit word from the EEPROM.
+ * @hw: Struct containing variables accessed by shared code
+ */
+static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw)
+{
+ u16 retry_count = 0;
+ u8 spi_stat_reg;
+
+ e_dbg("e1000_spi_eeprom_ready");
+
+ /* Read "Status Register" repeatedly until the LSB is cleared. The
+ * EEPROM will signal that the command has been completed by clearing
+ * bit 0 of the internal status register. If it's not cleared within
+ * 5 milliseconds, then error out.
+ */
+ retry_count = 0;
+ do {
+ e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI,
+ hw->eeprom.opcode_bits);
+ spi_stat_reg = (u8) e1000_shift_in_ee_bits(hw, 8);
+ if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI))
+ break;
+
+ udelay(5);
+ retry_count += 5;
+
+ e1000_standby_eeprom(hw);
+ } while (retry_count < EEPROM_MAX_RETRY_SPI);
+
+ /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and
+ * only 0-5mSec on 5V devices)
+ */
+ if (retry_count >= EEPROM_MAX_RETRY_SPI) {
+ e_dbg("SPI EEPROM Status error\n");
+ return -E1000_ERR_EEPROM;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_eeprom - Reads a 16 bit word from the EEPROM.
+ * @hw: Struct containing variables accessed by shared code
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ * @words: number of words to read
+ */
+s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ s32 ret;
+ spin_lock(&e1000_eeprom_lock);
+ ret = e1000_do_read_eeprom(hw, offset, words, data);
+ spin_unlock(&e1000_eeprom_lock);
+ return ret;
+}
+
+static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u32 i = 0;
+
+ e_dbg("e1000_read_eeprom");
+
+ if (hw->mac_type == e1000_ce4100) {
+ GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words,
+ data);
+ return E1000_SUCCESS;
+ }
+
+ /* If eeprom is not yet detected, do so now */
+ if (eeprom->word_size == 0)
+ e1000_init_eeprom_params(hw);
+
+ /* A check for invalid values: offset too large, too many words, and not
+ * enough words.
+ */
+ if ((offset >= eeprom->word_size)
+ || (words > eeprom->word_size - offset) || (words == 0)) {
+ e_dbg("\"words\" parameter out of bounds. Words = %d,"
+ "size = %d\n", offset, eeprom->word_size);
+ return -E1000_ERR_EEPROM;
+ }
+
+ /* EEPROM's that don't use EERD to read require us to bit-bang the SPI
+ * directly. In this case, we need to acquire the EEPROM so that
+ * FW or other port software does not interrupt.
+ */
+ /* Prepare the EEPROM for bit-bang reading */
+ if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
+ return -E1000_ERR_EEPROM;
+
+ /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have
+ * acquired the EEPROM at this point, so any returns should release it */
+ if (eeprom->type == e1000_eeprom_spi) {
+ u16 word_in;
+ u8 read_opcode = EEPROM_READ_OPCODE_SPI;
+
+ if (e1000_spi_eeprom_ready(hw)) {
+ e1000_release_eeprom(hw);
+ return -E1000_ERR_EEPROM;
+ }
+
+ e1000_standby_eeprom(hw);
+
+ /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+ if ((eeprom->address_bits == 8) && (offset >= 128))
+ read_opcode |= EEPROM_A8_OPCODE_SPI;
+
+ /* Send the READ command (opcode + addr) */
+ e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits);
+ e1000_shift_out_ee_bits(hw, (u16) (offset * 2),
+ eeprom->address_bits);
+
+ /* Read the data. The address of the eeprom internally increments with
+ * each byte (spi) being read, saving on the overhead of eeprom setup
+ * and tear-down. The address counter will roll over if reading beyond
+ * the size of the eeprom, thus allowing the entire memory to be read
+ * starting from any offset. */
+ for (i = 0; i < words; i++) {
+ word_in = e1000_shift_in_ee_bits(hw, 16);
+ data[i] = (word_in >> 8) | (word_in << 8);
+ }
+ } else if (eeprom->type == e1000_eeprom_microwire) {
+ for (i = 0; i < words; i++) {
+ /* Send the READ command (opcode + addr) */
+ e1000_shift_out_ee_bits(hw,
+ EEPROM_READ_OPCODE_MICROWIRE,
+ eeprom->opcode_bits);
+ e1000_shift_out_ee_bits(hw, (u16) (offset + i),
+ eeprom->address_bits);
+
+ /* Read the data. For microwire, each word requires the overhead
+ * of eeprom setup and tear-down. */
+ data[i] = e1000_shift_in_ee_bits(hw, 16);
+ e1000_standby_eeprom(hw);
+ }
+ }
+
+ /* End this read operation */
+ e1000_release_eeprom(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_validate_eeprom_checksum - Verifies that the EEPROM has a valid checksum
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Reads the first 64 16 bit words of the EEPROM and sums the values read.
+ * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
+ * valid.
+ */
+s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
+{
+ u16 checksum = 0;
+ u16 i, eeprom_data;
+
+ e_dbg("e1000_validate_eeprom_checksum");
+
+ for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
+ if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
+ e_dbg("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ checksum += eeprom_data;
+ }
+
++#ifdef CONFIG_PARISC
++ /* This is a signature and not a checksum on HP c8000 */
++ if ((hw->subsystem_vendor_id == 0x103C) && (eeprom_data == 0x16d6))
++ return E1000_SUCCESS;
++
++#endif
+ if (checksum == (u16) EEPROM_SUM)
+ return E1000_SUCCESS;
+ else {
+ e_dbg("EEPROM Checksum Invalid\n");
+ return -E1000_ERR_EEPROM;
+ }
+}
+
+/**
+ * e1000_update_eeprom_checksum - Calculates/writes the EEPROM checksum
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA.
+ * Writes the difference to word offset 63 of the EEPROM.
+ */
+s32 e1000_update_eeprom_checksum(struct e1000_hw *hw)
+{
+ u16 checksum = 0;
+ u16 i, eeprom_data;
+
+ e_dbg("e1000_update_eeprom_checksum");
+
+ for (i = 0; i < EEPROM_CHECKSUM_REG; i++) {
+ if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
+ e_dbg("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ checksum += eeprom_data;
+ }
+ checksum = (u16) EEPROM_SUM - checksum;
+ if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
+ e_dbg("EEPROM Write Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_eeprom - write words to the different EEPROM types.
+ * @hw: Struct containing variables accessed by shared code
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of words to write
+ * @data: 16 bit word to be written to the EEPROM
+ *
+ * If e1000_update_eeprom_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ */
+s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ s32 ret;
+ spin_lock(&e1000_eeprom_lock);
+ ret = e1000_do_write_eeprom(hw, offset, words, data);
+ spin_unlock(&e1000_eeprom_lock);
+ return ret;
+}
+
+static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ s32 status = 0;
+
+ e_dbg("e1000_write_eeprom");
+
+ if (hw->mac_type == e1000_ce4100) {
+ GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words,
+ data);
+ return E1000_SUCCESS;
+ }
+
+ /* If eeprom is not yet detected, do so now */
+ if (eeprom->word_size == 0)
+ e1000_init_eeprom_params(hw);
+
+ /* A check for invalid values: offset too large, too many words, and not
+ * enough words.
+ */
+ if ((offset >= eeprom->word_size)
+ || (words > eeprom->word_size - offset) || (words == 0)) {
+ e_dbg("\"words\" parameter out of bounds\n");
+ return -E1000_ERR_EEPROM;
+ }
+
+ /* Prepare the EEPROM for writing */
+ if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
+ return -E1000_ERR_EEPROM;
+
+ if (eeprom->type == e1000_eeprom_microwire) {
+ status = e1000_write_eeprom_microwire(hw, offset, words, data);
+ } else {
+ status = e1000_write_eeprom_spi(hw, offset, words, data);
+ msleep(10);
+ }
+
+ /* Done with writing */
+ e1000_release_eeprom(hw);
+
+ return status;
+}
+
+/**
+ * e1000_write_eeprom_spi - Writes a 16 bit word to a given offset in an SPI EEPROM.
+ * @hw: Struct containing variables accessed by shared code
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of words to write
+ * @data: pointer to array of 8 bit words to be written to the EEPROM
+ */
+static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u16 widx = 0;
+
+ e_dbg("e1000_write_eeprom_spi");
+
+ while (widx < words) {
+ u8 write_opcode = EEPROM_WRITE_OPCODE_SPI;
+
+ if (e1000_spi_eeprom_ready(hw))
+ return -E1000_ERR_EEPROM;
+
+ e1000_standby_eeprom(hw);
+
+ /* Send the WRITE ENABLE command (8 bit opcode ) */
+ e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI,
+ eeprom->opcode_bits);
+
+ e1000_standby_eeprom(hw);
+
+ /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+ if ((eeprom->address_bits == 8) && (offset >= 128))
+ write_opcode |= EEPROM_A8_OPCODE_SPI;
+
+ /* Send the Write command (8-bit opcode + addr) */
+ e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits);
+
+ e1000_shift_out_ee_bits(hw, (u16) ((offset + widx) * 2),
+ eeprom->address_bits);
+
+ /* Send the data */
+
+ /* Loop to allow for up to whole page write (32 bytes) of eeprom */
+ while (widx < words) {
+ u16 word_out = data[widx];
+ word_out = (word_out >> 8) | (word_out << 8);
+ e1000_shift_out_ee_bits(hw, word_out, 16);
+ widx++;
+
+ /* Some larger eeprom sizes are capable of a 32-byte PAGE WRITE
+ * operation, while the smaller eeproms are capable of an 8-byte
+ * PAGE WRITE operation. Break the inner loop to pass new address
+ */
+ if ((((offset + widx) * 2) % eeprom->page_size) == 0) {
+ e1000_standby_eeprom(hw);
+ break;
+ }
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_eeprom_microwire - Writes a 16 bit word to a given offset in a Microwire EEPROM.
+ * @hw: Struct containing variables accessed by shared code
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of words to write
+ * @data: pointer to array of 8 bit words to be written to the EEPROM
+ */
+static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u32 eecd;
+ u16 words_written = 0;
+ u16 i = 0;
+
+ e_dbg("e1000_write_eeprom_microwire");
+
+ /* Send the write enable command to the EEPROM (3-bit opcode plus
+ * 6/8-bit dummy address beginning with 11). It's less work to include
+ * the 11 of the dummy address as part of the opcode than it is to shift
+ * it over the correct number of bits for the address. This puts the
+ * EEPROM into write/erase mode.
+ */
+ e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE,
+ (u16) (eeprom->opcode_bits + 2));
+
+ e1000_shift_out_ee_bits(hw, 0, (u16) (eeprom->address_bits - 2));
+
+ /* Prepare the EEPROM */
+ e1000_standby_eeprom(hw);
+
+ while (words_written < words) {
+ /* Send the Write command (3-bit opcode + addr) */
+ e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE,
+ eeprom->opcode_bits);
+
+ e1000_shift_out_ee_bits(hw, (u16) (offset + words_written),
+ eeprom->address_bits);
+
+ /* Send the data */
+ e1000_shift_out_ee_bits(hw, data[words_written], 16);
+
+ /* Toggle the CS line. This in effect tells the EEPROM to execute
+ * the previous command.
+ */
+ e1000_standby_eeprom(hw);
+
+ /* Read DO repeatedly until it is high (equal to '1'). The EEPROM will
+ * signal that the command has been completed by raising the DO signal.
+ * If DO does not go high in 10 milliseconds, then error out.
+ */
+ for (i = 0; i < 200; i++) {
+ eecd = er32(EECD);
+ if (eecd & E1000_EECD_DO)
+ break;
+ udelay(50);
+ }
+ if (i == 200) {
+ e_dbg("EEPROM Write did not complete\n");
+ return -E1000_ERR_EEPROM;
+ }
+
+ /* Recover from write */
+ e1000_standby_eeprom(hw);
+
+ words_written++;
+ }
+
+ /* Send the write disable command to the EEPROM (3-bit opcode plus
+ * 6/8-bit dummy address beginning with 10). It's less work to include
+ * the 10 of the dummy address as part of the opcode than it is to shift
+ * it over the correct number of bits for the address. This takes the
+ * EEPROM out of write/erase mode.
+ */
+ e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE,
+ (u16) (eeprom->opcode_bits + 2));
+
+ e1000_shift_out_ee_bits(hw, 0, (u16) (eeprom->address_bits - 2));
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_mac_addr - read the adapters MAC from eeprom
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Reads the adapter's MAC address from the EEPROM and inverts the LSB for the
+ * second function of dual function devices
+ */
+s32 e1000_read_mac_addr(struct e1000_hw *hw)
+{
+ u16 offset;
+ u16 eeprom_data, i;
+
+ e_dbg("e1000_read_mac_addr");
+
+ for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) {
+ offset = i >> 1;
+ if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
+ e_dbg("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ hw->perm_mac_addr[i] = (u8) (eeprom_data & 0x00FF);
+ hw->perm_mac_addr[i + 1] = (u8) (eeprom_data >> 8);
+ }
+
+ switch (hw->mac_type) {
+ default:
+ break;
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ if (er32(STATUS) & E1000_STATUS_FUNC_1)
+ hw->perm_mac_addr[5] ^= 0x01;
+ break;
+ }
+
+ for (i = 0; i < NODE_ADDRESS_SIZE; i++)
+ hw->mac_addr[i] = hw->perm_mac_addr[i];
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_rx_addrs - Initializes receive address filters.
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Places the MAC address in receive address register 0 and clears the rest
+ * of the receive address registers. Clears the multicast table. Assumes
+ * the receiver is in reset when the routine is called.
+ */
+static void e1000_init_rx_addrs(struct e1000_hw *hw)
+{
+ u32 i;
+ u32 rar_num;
+
+ e_dbg("e1000_init_rx_addrs");
+
+ /* Setup the receive address. */
+ e_dbg("Programming MAC Address into RAR[0]\n");
+
+ e1000_rar_set(hw, hw->mac_addr, 0);
+
+ rar_num = E1000_RAR_ENTRIES;
+
+ /* Zero out the other 15 receive addresses. */
+ e_dbg("Clearing RAR[1-15]\n");
+ for (i = 1; i < rar_num; i++) {
+ E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
+ E1000_WRITE_FLUSH();
+ E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
+ E1000_WRITE_FLUSH();
+ }
+}
+
+/**
+ * e1000_hash_mc_addr - Hashes an address to determine its location in the multicast table
+ * @hw: Struct containing variables accessed by shared code
+ * @mc_addr: the multicast address to hash
+ */
+u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
+{
+ u32 hash_value = 0;
+
+ /* The portion of the address that is used for the hash table is
+ * determined by the mc_filter_type setting.
+ */
+ switch (hw->mc_filter_type) {
+ /* [0] [1] [2] [3] [4] [5]
+ * 01 AA 00 12 34 56
+ * LSB MSB
+ */
+ case 0:
+ /* [47:36] i.e. 0x563 for above example address */
+ hash_value = ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4));
+ break;
+ case 1:
+ /* [46:35] i.e. 0xAC6 for above example address */
+ hash_value = ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5));
+ break;
+ case 2:
+ /* [45:34] i.e. 0x5D8 for above example address */
+ hash_value = ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6));
+ break;
+ case 3:
+ /* [43:32] i.e. 0x634 for above example address */
+ hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8));
+ break;
+ }
+
+ hash_value &= 0xFFF;
+ return hash_value;
+}
+
+/**
+ * e1000_rar_set - Puts an ethernet address into a receive address register.
+ * @hw: Struct containing variables accessed by shared code
+ * @addr: Address to put into receive address register
+ * @index: Receive address register to write
+ */
+void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+ u32 rar_low, rar_high;
+
+ /* HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
+ ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+ rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+ /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx
+ * unit hang.
+ *
+ * Description:
+ * If there are any Rx frames queued up or otherwise present in the HW
+ * before RSS is enabled, and then we enable RSS, the HW Rx unit will
+ * hang. To work around this issue, we have to disable receives and
+ * flush out all Rx frames before we enable RSS. To do so, we modify we
+ * redirect all Rx traffic to manageability and then reset the HW.
+ * This flushes away Rx frames, and (since the redirections to
+ * manageability persists across resets) keeps new ones from coming in
+ * while we work. Then, we clear the Address Valid AV bit for all MAC
+ * addresses and undo the re-direction to manageability.
+ * Now, frames are coming in again, but the MAC won't accept them, so
+ * far so good. We now proceed to initialize RSS (if necessary) and
+ * configure the Rx unit. Last, we re-enable the AV bits and continue
+ * on our merry way.
+ */
+ switch (hw->mac_type) {
+ default:
+ /* Indicate to hardware the Address is Valid. */
+ rar_high |= E1000_RAH_AV;
+ break;
+ }
+
+ E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
+ E1000_WRITE_FLUSH();
+ E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
+ E1000_WRITE_FLUSH();
+}
+
+/**
+ * e1000_write_vfta - Writes a value to the specified offset in the VLAN filter table.
+ * @hw: Struct containing variables accessed by shared code
+ * @offset: Offset in VLAN filer table to write
+ * @value: Value to write into VLAN filter table
+ */
+void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
+{
+ u32 temp;
+
+ if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) {
+ temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1));
+ E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
+ E1000_WRITE_FLUSH();
+ E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp);
+ E1000_WRITE_FLUSH();
+ } else {
+ E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
+ E1000_WRITE_FLUSH();
+ }
+}
+
+/**
+ * e1000_clear_vfta - Clears the VLAN filer table
+ * @hw: Struct containing variables accessed by shared code
+ */
+static void e1000_clear_vfta(struct e1000_hw *hw)
+{
+ u32 offset;
+ u32 vfta_value = 0;
+ u32 vfta_offset = 0;
+ u32 vfta_bit_in_reg = 0;
+
+ for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+ /* If the offset we want to clear is the same offset of the
+ * manageability VLAN ID, then clear all bits except that of the
+ * manageability unit */
+ vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
+ E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value);
+ E1000_WRITE_FLUSH();
+ }
+}
+
+static s32 e1000_id_led_init(struct e1000_hw *hw)
+{
+ u32 ledctl;
+ const u32 ledctl_mask = 0x000000FF;
+ const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
+ const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
+ u16 eeprom_data, i, temp;
+ const u16 led_mask = 0x0F;
+
+ e_dbg("e1000_id_led_init");
+
+ if (hw->mac_type < e1000_82540) {
+ /* Nothing to do */
+ return E1000_SUCCESS;
+ }
+
+ ledctl = er32(LEDCTL);
+ hw->ledctl_default = ledctl;
+ hw->ledctl_mode1 = hw->ledctl_default;
+ hw->ledctl_mode2 = hw->ledctl_default;
+
+ if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) {
+ e_dbg("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+
+ if ((eeprom_data == ID_LED_RESERVED_0000) ||
+ (eeprom_data == ID_LED_RESERVED_FFFF)) {
+ eeprom_data = ID_LED_DEFAULT;
+ }
+
+ for (i = 0; i < 4; i++) {
+ temp = (eeprom_data >> (i << 2)) & led_mask;
+ switch (temp) {
+ case ID_LED_ON1_DEF2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_ON1_OFF2:
+ hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+ hw->ledctl_mode1 |= ledctl_on << (i << 3);
+ break;
+ case ID_LED_OFF1_DEF2:
+ case ID_LED_OFF1_ON2:
+ case ID_LED_OFF1_OFF2:
+ hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+ hw->ledctl_mode1 |= ledctl_off << (i << 3);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ switch (temp) {
+ case ID_LED_DEF1_ON2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_OFF1_ON2:
+ hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+ hw->ledctl_mode2 |= ledctl_on << (i << 3);
+ break;
+ case ID_LED_DEF1_OFF2:
+ case ID_LED_ON1_OFF2:
+ case ID_LED_OFF1_OFF2:
+ hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+ hw->ledctl_mode2 |= ledctl_off << (i << 3);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ }
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_setup_led
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Prepares SW controlable LED for use and saves the current state of the LED.
+ */
+s32 e1000_setup_led(struct e1000_hw *hw)
+{
+ u32 ledctl;
+ s32 ret_val = E1000_SUCCESS;
+
+ e_dbg("e1000_setup_led");
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ case e1000_82544:
+ /* No setup necessary */
+ break;
+ case e1000_82541:
+ case e1000_82547:
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ /* Turn off PHY Smart Power Down (if enabled) */
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO,
+ &hw->phy_spd_default);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
+ (u16) (hw->phy_spd_default &
+ ~IGP01E1000_GMII_SPD));
+ if (ret_val)
+ return ret_val;
+ /* Fall Through */
+ default:
+ if (hw->media_type == e1000_media_type_fiber) {
+ ledctl = er32(LEDCTL);
+ /* Save current LEDCTL settings */
+ hw->ledctl_default = ledctl;
+ /* Turn off LED0 */
+ ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
+ E1000_LEDCTL_LED0_BLINK |
+ E1000_LEDCTL_LED0_MODE_MASK);
+ ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
+ E1000_LEDCTL_LED0_MODE_SHIFT);
+ ew32(LEDCTL, ledctl);
+ } else if (hw->media_type == e1000_media_type_copper)
+ ew32(LEDCTL, hw->ledctl_mode1);
+ break;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_cleanup_led - Restores the saved state of the SW controlable LED.
+ * @hw: Struct containing variables accessed by shared code
+ */
+s32 e1000_cleanup_led(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ e_dbg("e1000_cleanup_led");
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ case e1000_82544:
+ /* No cleanup necessary */
+ break;
+ case e1000_82541:
+ case e1000_82547:
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ /* Turn on PHY Smart Power Down (if previously enabled) */
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
+ hw->phy_spd_default);
+ if (ret_val)
+ return ret_val;
+ /* Fall Through */
+ default:
+ /* Restore LEDCTL settings */
+ ew32(LEDCTL, hw->ledctl_default);
+ break;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_led_on - Turns on the software controllable LED
+ * @hw: Struct containing variables accessed by shared code
+ */
+s32 e1000_led_on(struct e1000_hw *hw)
+{
+ u32 ctrl = er32(CTRL);
+
+ e_dbg("e1000_led_on");
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ /* Set SW Defineable Pin 0 to turn on the LED */
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ break;
+ case e1000_82544:
+ if (hw->media_type == e1000_media_type_fiber) {
+ /* Set SW Defineable Pin 0 to turn on the LED */
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ } else {
+ /* Clear SW Defineable Pin 0 to turn on the LED */
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ }
+ break;
+ default:
+ if (hw->media_type == e1000_media_type_fiber) {
+ /* Clear SW Defineable Pin 0 to turn on the LED */
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ } else if (hw->media_type == e1000_media_type_copper) {
+ ew32(LEDCTL, hw->ledctl_mode2);
+ return E1000_SUCCESS;
+ }
+ break;
+ }
+
+ ew32(CTRL, ctrl);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_led_off - Turns off the software controllable LED
+ * @hw: Struct containing variables accessed by shared code
+ */
+s32 e1000_led_off(struct e1000_hw *hw)
+{
+ u32 ctrl = er32(CTRL);
+
+ e_dbg("e1000_led_off");
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ /* Clear SW Defineable Pin 0 to turn off the LED */
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ break;
+ case e1000_82544:
+ if (hw->media_type == e1000_media_type_fiber) {
+ /* Clear SW Defineable Pin 0 to turn off the LED */
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ } else {
+ /* Set SW Defineable Pin 0 to turn off the LED */
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ }
+ break;
+ default:
+ if (hw->media_type == e1000_media_type_fiber) {
+ /* Set SW Defineable Pin 0 to turn off the LED */
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ } else if (hw->media_type == e1000_media_type_copper) {
+ ew32(LEDCTL, hw->ledctl_mode1);
+ return E1000_SUCCESS;
+ }
+ break;
+ }
+
+ ew32(CTRL, ctrl);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_clear_hw_cntrs - Clears all hardware statistics counters.
+ * @hw: Struct containing variables accessed by shared code
+ */
+static void e1000_clear_hw_cntrs(struct e1000_hw *hw)
+{
+ volatile u32 temp;
+
+ temp = er32(CRCERRS);
+ temp = er32(SYMERRS);
+ temp = er32(MPC);
+ temp = er32(SCC);
+ temp = er32(ECOL);
+ temp = er32(MCC);
+ temp = er32(LATECOL);
+ temp = er32(COLC);
+ temp = er32(DC);
+ temp = er32(SEC);
+ temp = er32(RLEC);
+ temp = er32(XONRXC);
+ temp = er32(XONTXC);
+ temp = er32(XOFFRXC);
+ temp = er32(XOFFTXC);
+ temp = er32(FCRUC);
+
+ temp = er32(PRC64);
+ temp = er32(PRC127);
+ temp = er32(PRC255);
+ temp = er32(PRC511);
+ temp = er32(PRC1023);
+ temp = er32(PRC1522);
+
+ temp = er32(GPRC);
+ temp = er32(BPRC);
+ temp = er32(MPRC);
+ temp = er32(GPTC);
+ temp = er32(GORCL);
+ temp = er32(GORCH);
+ temp = er32(GOTCL);
+ temp = er32(GOTCH);
+ temp = er32(RNBC);
+ temp = er32(RUC);
+ temp = er32(RFC);
+ temp = er32(ROC);
+ temp = er32(RJC);
+ temp = er32(TORL);
+ temp = er32(TORH);
+ temp = er32(TOTL);
+ temp = er32(TOTH);
+ temp = er32(TPR);
+ temp = er32(TPT);
+
+ temp = er32(PTC64);
+ temp = er32(PTC127);
+ temp = er32(PTC255);
+ temp = er32(PTC511);
+ temp = er32(PTC1023);
+ temp = er32(PTC1522);
+
+ temp = er32(MPTC);
+ temp = er32(BPTC);
+
+ if (hw->mac_type < e1000_82543)
+ return;
+
+ temp = er32(ALGNERRC);
+ temp = er32(RXERRC);
+ temp = er32(TNCRS);
+ temp = er32(CEXTERR);
+ temp = er32(TSCTC);
+ temp = er32(TSCTFC);
+
+ if (hw->mac_type <= e1000_82544)
+ return;
+
+ temp = er32(MGTPRC);
+ temp = er32(MGTPDC);
+ temp = er32(MGTPTC);
+}
+
+/**
+ * e1000_reset_adaptive - Resets Adaptive IFS to its default state.
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Call this after e1000_init_hw. You may override the IFS defaults by setting
+ * hw->ifs_params_forced to true. However, you must initialize hw->
+ * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio
+ * before calling this function.
+ */
+void e1000_reset_adaptive(struct e1000_hw *hw)
+{
+ e_dbg("e1000_reset_adaptive");
+
+ if (hw->adaptive_ifs) {
+ if (!hw->ifs_params_forced) {
+ hw->current_ifs_val = 0;
+ hw->ifs_min_val = IFS_MIN;
+ hw->ifs_max_val = IFS_MAX;
+ hw->ifs_step_size = IFS_STEP;
+ hw->ifs_ratio = IFS_RATIO;
+ }
+ hw->in_ifs_mode = false;
+ ew32(AIT, 0);
+ } else {
+ e_dbg("Not in Adaptive IFS mode!\n");
+ }
+}
+
+/**
+ * e1000_update_adaptive - update adaptive IFS
+ * @hw: Struct containing variables accessed by shared code
+ * @tx_packets: Number of transmits since last callback
+ * @total_collisions: Number of collisions since last callback
+ *
+ * Called during the callback/watchdog routine to update IFS value based on
+ * the ratio of transmits to collisions.
+ */
+void e1000_update_adaptive(struct e1000_hw *hw)
+{
+ e_dbg("e1000_update_adaptive");
+
+ if (hw->adaptive_ifs) {
+ if ((hw->collision_delta *hw->ifs_ratio) > hw->tx_packet_delta) {
+ if (hw->tx_packet_delta > MIN_NUM_XMITS) {
+ hw->in_ifs_mode = true;
+ if (hw->current_ifs_val < hw->ifs_max_val) {
+ if (hw->current_ifs_val == 0)
+ hw->current_ifs_val =
+ hw->ifs_min_val;
+ else
+ hw->current_ifs_val +=
+ hw->ifs_step_size;
+ ew32(AIT, hw->current_ifs_val);
+ }
+ }
+ } else {
+ if (hw->in_ifs_mode
+ && (hw->tx_packet_delta <= MIN_NUM_XMITS)) {
+ hw->current_ifs_val = 0;
+ hw->in_ifs_mode = false;
+ ew32(AIT, 0);
+ }
+ }
+ } else {
+ e_dbg("Not in Adaptive IFS mode!\n");
+ }
+}
+
+/**
+ * e1000_tbi_adjust_stats
+ * @hw: Struct containing variables accessed by shared code
+ * @frame_len: The length of the frame in question
+ * @mac_addr: The Ethernet destination address of the frame in question
+ *
+ * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
+ */
+void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
+ u32 frame_len, u8 *mac_addr)
+{
+ u64 carry_bit;
+
+ /* First adjust the frame length. */
+ frame_len--;
+ /* We need to adjust the statistics counters, since the hardware
+ * counters overcount this packet as a CRC error and undercount
+ * the packet as a good packet
+ */
+ /* This packet should not be counted as a CRC error. */
+ stats->crcerrs--;
+ /* This packet does count as a Good Packet Received. */
+ stats->gprc++;
+
+ /* Adjust the Good Octets received counters */
+ carry_bit = 0x80000000 & stats->gorcl;
+ stats->gorcl += frame_len;
+ /* If the high bit of Gorcl (the low 32 bits of the Good Octets
+ * Received Count) was one before the addition,
+ * AND it is zero after, then we lost the carry out,
+ * need to add one to Gorch (Good Octets Received Count High).
+ * This could be simplified if all environments supported
+ * 64-bit integers.
+ */
+ if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
+ stats->gorch++;
+ /* Is this a broadcast or multicast? Check broadcast first,
+ * since the test for a multicast frame will test positive on
+ * a broadcast frame.
+ */
+ if ((mac_addr[0] == (u8) 0xff) && (mac_addr[1] == (u8) 0xff))
+ /* Broadcast packet */
+ stats->bprc++;
+ else if (*mac_addr & 0x01)
+ /* Multicast packet */
+ stats->mprc++;
+
+ if (frame_len == hw->max_frame_size) {
+ /* In this case, the hardware has overcounted the number of
+ * oversize frames.
+ */
+ if (stats->roc > 0)
+ stats->roc--;
+ }
+
+ /* Adjust the bin counters when the extra byte put the frame in the
+ * wrong bin. Remember that the frame_len was adjusted above.
+ */
+ if (frame_len == 64) {
+ stats->prc64++;
+ stats->prc127--;
+ } else if (frame_len == 127) {
+ stats->prc127++;
+ stats->prc255--;
+ } else if (frame_len == 255) {
+ stats->prc255++;
+ stats->prc511--;
+ } else if (frame_len == 511) {
+ stats->prc511++;
+ stats->prc1023--;
+ } else if (frame_len == 1023) {
+ stats->prc1023++;
+ stats->prc1522--;
+ } else if (frame_len == 1522) {
+ stats->prc1522++;
+ }
+}
+
+/**
+ * e1000_get_bus_info
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Gets the current PCI bus type, speed, and width of the hardware
+ */
+void e1000_get_bus_info(struct e1000_hw *hw)
+{
+ u32 status;
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ hw->bus_type = e1000_bus_type_pci;
+ hw->bus_speed = e1000_bus_speed_unknown;
+ hw->bus_width = e1000_bus_width_unknown;
+ break;
+ default:
+ status = er32(STATUS);
+ hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ?
+ e1000_bus_type_pcix : e1000_bus_type_pci;
+
+ if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) {
+ hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ?
+ e1000_bus_speed_66 : e1000_bus_speed_120;
+ } else if (hw->bus_type == e1000_bus_type_pci) {
+ hw->bus_speed = (status & E1000_STATUS_PCI66) ?
+ e1000_bus_speed_66 : e1000_bus_speed_33;
+ } else {
+ switch (status & E1000_STATUS_PCIX_SPEED) {
+ case E1000_STATUS_PCIX_SPEED_66:
+ hw->bus_speed = e1000_bus_speed_66;
+ break;
+ case E1000_STATUS_PCIX_SPEED_100:
+ hw->bus_speed = e1000_bus_speed_100;
+ break;
+ case E1000_STATUS_PCIX_SPEED_133:
+ hw->bus_speed = e1000_bus_speed_133;
+ break;
+ default:
+ hw->bus_speed = e1000_bus_speed_reserved;
+ break;
+ }
+ }
+ hw->bus_width = (status & E1000_STATUS_BUS64) ?
+ e1000_bus_width_64 : e1000_bus_width_32;
+ break;
+ }
+}
+
+/**
+ * e1000_write_reg_io
+ * @hw: Struct containing variables accessed by shared code
+ * @offset: offset to write to
+ * @value: value to write
+ *
+ * Writes a value to one of the devices registers using port I/O (as opposed to
+ * memory mapped I/O). Only 82544 and newer devices support port I/O.
+ */
+static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value)
+{
+ unsigned long io_addr = hw->io_base;
+ unsigned long io_data = hw->io_base + 4;
+
+ e1000_io_write(hw, io_addr, offset);
+ e1000_io_write(hw, io_data, value);
+}
+
+/**
+ * e1000_get_cable_length - Estimates the cable length.
+ * @hw: Struct containing variables accessed by shared code
+ * @min_length: The estimated minimum length
+ * @max_length: The estimated maximum length
+ *
+ * returns: - E1000_ERR_XXX
+ * E1000_SUCCESS
+ *
+ * This function always returns a ranged length (minimum & maximum).
+ * So for M88 phy's, this function interprets the one value returned from the
+ * register to the minimum and maximum range.
+ * For IGP phy's, the function calculates the range by the AGC registers.
+ */
+static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
+ u16 *max_length)
+{
+ s32 ret_val;
+ u16 agc_value = 0;
+ u16 i, phy_data;
+ u16 cable_length;
+
+ e_dbg("e1000_get_cable_length");
+
+ *min_length = *max_length = 0;
+
+ /* Use old method for Phy older than IGP */
+ if (hw->phy_type == e1000_phy_m88) {
+
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+ cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+ M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+
+ /* Convert the enum value to ranged values */
+ switch (cable_length) {
+ case e1000_cable_length_50:
+ *min_length = 0;
+ *max_length = e1000_igp_cable_length_50;
+ break;
+ case e1000_cable_length_50_80:
+ *min_length = e1000_igp_cable_length_50;
+ *max_length = e1000_igp_cable_length_80;
+ break;
+ case e1000_cable_length_80_110:
+ *min_length = e1000_igp_cable_length_80;
+ *max_length = e1000_igp_cable_length_110;
+ break;
+ case e1000_cable_length_110_140:
+ *min_length = e1000_igp_cable_length_110;
+ *max_length = e1000_igp_cable_length_140;
+ break;
+ case e1000_cable_length_140:
+ *min_length = e1000_igp_cable_length_140;
+ *max_length = e1000_igp_cable_length_170;
+ break;
+ default:
+ return -E1000_ERR_PHY;
+ break;
+ }
+ } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */
+ u16 cur_agc_value;
+ u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
+ static const u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = {
+ IGP01E1000_PHY_AGC_A,
+ IGP01E1000_PHY_AGC_B,
+ IGP01E1000_PHY_AGC_C,
+ IGP01E1000_PHY_AGC_D
+ };
+ /* Read the AGC registers for all channels */
+ for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+
+ ret_val =
+ e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT;
+
+ /* Value bound check. */
+ if ((cur_agc_value >=
+ IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1)
+ || (cur_agc_value == 0))
+ return -E1000_ERR_PHY;
+
+ agc_value += cur_agc_value;
+
+ /* Update minimal AGC value. */
+ if (min_agc_value > cur_agc_value)
+ min_agc_value = cur_agc_value;
+ }
+
+ /* Remove the minimal AGC result for length < 50m */
+ if (agc_value <
+ IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) {
+ agc_value -= min_agc_value;
+
+ /* Get the average length of the remaining 3 channels */
+ agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1);
+ } else {
+ /* Get the average length of all the 4 channels. */
+ agc_value /= IGP01E1000_PHY_CHANNEL_NUM;
+ }
+
+ /* Set the range of the calculated length. */
+ *min_length = ((e1000_igp_cable_length_table[agc_value] -
+ IGP01E1000_AGC_RANGE) > 0) ?
+ (e1000_igp_cable_length_table[agc_value] -
+ IGP01E1000_AGC_RANGE) : 0;
+ *max_length = e1000_igp_cable_length_table[agc_value] +
+ IGP01E1000_AGC_RANGE;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_check_polarity - Check the cable polarity
+ * @hw: Struct containing variables accessed by shared code
+ * @polarity: output parameter : 0 - Polarity is not reversed
+ * 1 - Polarity is reversed.
+ *
+ * returns: - E1000_ERR_XXX
+ * E1000_SUCCESS
+ *
+ * For phy's older than IGP, this function simply reads the polarity bit in the
+ * Phy Status register. For IGP phy's, this bit is valid only if link speed is
+ * 10 Mbps. If the link speed is 100 Mbps there is no polarity so this bit will
+ * return 0. If the link speed is 1000 Mbps the polarity status is in the
+ * IGP01E1000_PHY_PCS_INIT_REG.
+ */
+static s32 e1000_check_polarity(struct e1000_hw *hw,
+ e1000_rev_polarity *polarity)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ e_dbg("e1000_check_polarity");
+
+ if (hw->phy_type == e1000_phy_m88) {
+ /* return the Polarity bit in the Status register. */
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+ *polarity = ((phy_data & M88E1000_PSSR_REV_POLARITY) >>
+ M88E1000_PSSR_REV_POLARITY_SHIFT) ?
+ e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
+
+ } else if (hw->phy_type == e1000_phy_igp) {
+ /* Read the Status register to check the speed */
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to
+ * find the polarity status */
+ if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
+ IGP01E1000_PSSR_SPEED_1000MBPS) {
+
+ /* Read the GIG initialization PCS register (0x00B4) */
+ ret_val =
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Check the polarity bits */
+ *polarity = (phy_data & IGP01E1000_PHY_POLARITY_MASK) ?
+ e1000_rev_polarity_reversed :
+ e1000_rev_polarity_normal;
+ } else {
+ /* For 10 Mbps, read the polarity bit in the status register. (for
+ * 100 Mbps this bit is always 0) */
+ *polarity =
+ (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ?
+ e1000_rev_polarity_reversed :
+ e1000_rev_polarity_normal;
+ }
+ }
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_check_downshift - Check if Downshift occurred
+ * @hw: Struct containing variables accessed by shared code
+ * @downshift: output parameter : 0 - No Downshift occurred.
+ * 1 - Downshift occurred.
+ *
+ * returns: - E1000_ERR_XXX
+ * E1000_SUCCESS
+ *
+ * For phy's older than IGP, this function reads the Downshift bit in the Phy
+ * Specific Status register. For IGP phy's, it reads the Downgrade bit in the
+ * Link Health register. In IGP this bit is latched high, so the driver must
+ * read it immediately after link is established.
+ */
+static s32 e1000_check_downshift(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ e_dbg("e1000_check_downshift");
+
+ if (hw->phy_type == e1000_phy_igp) {
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ hw->speed_downgraded =
+ (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0;
+ } else if (hw->phy_type == e1000_phy_m88) {
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >>
+ M88E1000_PSSR_DOWNSHIFT_SHIFT;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_config_dsp_after_link_change
+ * @hw: Struct containing variables accessed by shared code
+ * @link_up: was link up at the time this was called
+ *
+ * returns: - E1000_ERR_PHY if fail to read/write the PHY
+ * E1000_SUCCESS at any other case.
+ *
+ * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a
+ * gigabit link is achieved to improve link quality.
+ */
+
+static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
+{
+ s32 ret_val;
+ u16 phy_data, phy_saved_data, speed, duplex, i;
+ static const u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = {
+ IGP01E1000_PHY_AGC_PARAM_A,
+ IGP01E1000_PHY_AGC_PARAM_B,
+ IGP01E1000_PHY_AGC_PARAM_C,
+ IGP01E1000_PHY_AGC_PARAM_D
+ };
+ u16 min_length, max_length;
+
+ e_dbg("e1000_config_dsp_after_link_change");
+
+ if (hw->phy_type != e1000_phy_igp)
+ return E1000_SUCCESS;
+
+ if (link_up) {
+ ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
+ if (ret_val) {
+ e_dbg("Error getting link speed and duplex\n");
+ return ret_val;
+ }
+
+ if (speed == SPEED_1000) {
+
+ ret_val =
+ e1000_get_cable_length(hw, &min_length,
+ &max_length);
+ if (ret_val)
+ return ret_val;
+
+ if ((hw->dsp_config_state == e1000_dsp_config_enabled)
+ && min_length >= e1000_igp_cable_length_50) {
+
+ for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+ ret_val =
+ e1000_read_phy_reg(hw,
+ dsp_reg_array[i],
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &=
+ ~IGP01E1000_PHY_EDAC_MU_INDEX;
+
+ ret_val =
+ e1000_write_phy_reg(hw,
+ dsp_reg_array
+ [i], phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+ hw->dsp_config_state =
+ e1000_dsp_config_activated;
+ }
+
+ if ((hw->ffe_config_state == e1000_ffe_config_enabled)
+ && (min_length < e1000_igp_cable_length_50)) {
+
+ u16 ffe_idle_err_timeout =
+ FFE_IDLE_ERR_COUNT_TIMEOUT_20;
+ u32 idle_errs = 0;
+
+ /* clear previous idle error counts */
+ ret_val =
+ e1000_read_phy_reg(hw, PHY_1000T_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ for (i = 0; i < ffe_idle_err_timeout; i++) {
+ udelay(1000);
+ ret_val =
+ e1000_read_phy_reg(hw,
+ PHY_1000T_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ idle_errs +=
+ (phy_data &
+ SR_1000T_IDLE_ERROR_CNT);
+ if (idle_errs >
+ SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT)
+ {
+ hw->ffe_config_state =
+ e1000_ffe_config_active;
+
+ ret_val =
+ e1000_write_phy_reg(hw,
+ IGP01E1000_PHY_DSP_FFE,
+ IGP01E1000_PHY_DSP_FFE_CM_CP);
+ if (ret_val)
+ return ret_val;
+ break;
+ }
+
+ if (idle_errs)
+ ffe_idle_err_timeout =
+ FFE_IDLE_ERR_COUNT_TIMEOUT_100;
+ }
+ }
+ }
+ } else {
+ if (hw->dsp_config_state == e1000_dsp_config_activated) {
+ /* Save off the current value of register 0x2F5B to be restored at
+ * the end of the routines. */
+ ret_val =
+ e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+
+ if (ret_val)
+ return ret_val;
+
+ /* Disable the PHY transmitter */
+ ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+
+ if (ret_val)
+ return ret_val;
+
+ mdelay(20);
+
+ ret_val = e1000_write_phy_reg(hw, 0x0000,
+ IGP01E1000_IEEE_FORCE_GIGA);
+ if (ret_val)
+ return ret_val;
+ for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+ ret_val =
+ e1000_read_phy_reg(hw, dsp_reg_array[i],
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
+ phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS;
+
+ ret_val =
+ e1000_write_phy_reg(hw, dsp_reg_array[i],
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ ret_val = e1000_write_phy_reg(hw, 0x0000,
+ IGP01E1000_IEEE_RESTART_AUTONEG);
+ if (ret_val)
+ return ret_val;
+
+ mdelay(20);
+
+ /* Now enable the transmitter */
+ ret_val =
+ e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+ if (ret_val)
+ return ret_val;
+
+ hw->dsp_config_state = e1000_dsp_config_enabled;
+ }
+
+ if (hw->ffe_config_state == e1000_ffe_config_active) {
+ /* Save off the current value of register 0x2F5B to be restored at
+ * the end of the routines. */
+ ret_val =
+ e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+
+ if (ret_val)
+ return ret_val;
+
+ /* Disable the PHY transmitter */
+ ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+
+ if (ret_val)
+ return ret_val;
+
+ mdelay(20);
+
+ ret_val = e1000_write_phy_reg(hw, 0x0000,
+ IGP01E1000_IEEE_FORCE_GIGA);
+ if (ret_val)
+ return ret_val;
+ ret_val =
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE,
+ IGP01E1000_PHY_DSP_FFE_DEFAULT);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg(hw, 0x0000,
+ IGP01E1000_IEEE_RESTART_AUTONEG);
+ if (ret_val)
+ return ret_val;
+
+ mdelay(20);
+
+ /* Now enable the transmitter */
+ ret_val =
+ e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+ if (ret_val)
+ return ret_val;
+
+ hw->ffe_config_state = e1000_ffe_config_enabled;
+ }
+ }
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_phy_mode - Set PHY to class A mode
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Assumes the following operations will follow to enable the new class mode.
+ * 1. Do a PHY soft reset
+ * 2. Restart auto-negotiation or force link.
+ */
+static s32 e1000_set_phy_mode(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 eeprom_data;
+
+ e_dbg("e1000_set_phy_mode");
+
+ if ((hw->mac_type == e1000_82545_rev_3) &&
+ (hw->media_type == e1000_media_type_copper)) {
+ ret_val =
+ e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1,
+ &eeprom_data);
+ if (ret_val) {
+ return ret_val;
+ }
+
+ if ((eeprom_data != EEPROM_RESERVED_WORD) &&
+ (eeprom_data & EEPROM_PHY_CLASS_A)) {
+ ret_val =
+ e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT,
+ 0x000B);
+ if (ret_val)
+ return ret_val;
+ ret_val =
+ e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL,
+ 0x8104);
+ if (ret_val)
+ return ret_val;
+
+ hw->phy_reset_disable = false;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_d3_lplu_state - set d3 link power state
+ * @hw: Struct containing variables accessed by shared code
+ * @active: true to enable lplu false to disable lplu.
+ *
+ * This function sets the lplu state according to the active flag. When
+ * activating lplu this function also disables smart speed and vise versa.
+ * lplu will not be activated unless the device autonegotiation advertisement
+ * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes.
+ *
+ * returns: - E1000_ERR_PHY if fail to read/write the PHY
+ * E1000_SUCCESS at any other case.
+ */
+static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
+{
+ s32 ret_val;
+ u16 phy_data;
+ e_dbg("e1000_set_d3_lplu_state");
+
+ if (hw->phy_type != e1000_phy_igp)
+ return E1000_SUCCESS;
+
+ /* During driver activity LPLU should not be used or it will attain link
+ * from the lowest speeds starting from 10Mbps. The capability is used for
+ * Dx transitions and states */
+ if (hw->mac_type == e1000_82541_rev_2
+ || hw->mac_type == e1000_82547_rev_2) {
+ ret_val =
+ e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (!active) {
+ if (hw->mac_type == e1000_82541_rev_2 ||
+ hw->mac_type == e1000_82547_rev_2) {
+ phy_data &= ~IGP01E1000_GMII_FLEX_SPD;
+ ret_val =
+ e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during
+ * Dx states where the power conservation is most important. During
+ * driver activity we should enable SmartSpeed, so performance is
+ * maintained. */
+ if (hw->smart_speed == e1000_smart_speed_on) {
+ ret_val =
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val =
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ } else if (hw->smart_speed == e1000_smart_speed_off) {
+ ret_val =
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val =
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+ } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT)
+ || (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL)
+ || (hw->autoneg_advertised ==
+ AUTONEG_ADVERTISE_10_100_ALL)) {
+
+ if (hw->mac_type == e1000_82541_rev_2 ||
+ hw->mac_type == e1000_82547_rev_2) {
+ phy_data |= IGP01E1000_GMII_FLEX_SPD;
+ ret_val =
+ e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* When LPLU is enabled we should disable SmartSpeed */
+ ret_val =
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val =
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+
+ }
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_vco_speed
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Change VCO speed register to improve Bit Error Rate performance of SERDES.
+ */
+static s32 e1000_set_vco_speed(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 default_page = 0;
+ u16 phy_data;
+
+ e_dbg("e1000_set_vco_speed");
+
+ switch (hw->mac_type) {
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ break;
+ default:
+ return E1000_SUCCESS;
+ }
+
+ /* Set PHY register 30, page 5, bit 8 to 0 */
+
+ ret_val =
+ e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~M88E1000_PHY_VCO_REG_BIT8;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Set PHY register 30, page 4, bit 11 to 1 */
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_PHY_VCO_REG_BIT11;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val =
+ e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page);
+ if (ret_val)
+ return ret_val;
+
+ return E1000_SUCCESS;
+}
+
+
+/**
+ * e1000_enable_mng_pass_thru - check for bmc pass through
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Verifies the hardware needs to allow ARPs to be processed by the host
+ * returns: - true/false
+ */
+u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw)
+{
+ u32 manc;
+
+ if (hw->asf_firmware_present) {
+ manc = er32(MANC);
+
+ if (!(manc & E1000_MANC_RCV_TCO_EN) ||
+ !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
+ return false;
+ if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN))
+ return true;
+ }
+ return false;
+}
+
+static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 mii_status_reg;
+ u16 i;
+
+ /* Polarity reversal workaround for forced 10F/10H links. */
+
+ /* Disable the transmitter on the PHY */
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
+ if (ret_val)
+ return ret_val;
+
+ /* This loop will early-out if the NO link condition has been met. */
+ for (i = PHY_FORCE_TIME; i > 0; i--) {
+ /* Read the MII Status Register and wait for Link Status bit
+ * to be clear.
+ */
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0)
+ break;
+ mdelay(100);
+ }
+
+ /* Recommended delay time after link has been lost */
+ mdelay(1000);
+
+ /* Now we will re-enable th transmitter on the PHY */
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
+ if (ret_val)
+ return ret_val;
+ mdelay(50);
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0);
+ if (ret_val)
+ return ret_val;
+ mdelay(50);
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00);
+ if (ret_val)
+ return ret_val;
+ mdelay(50);
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
+ if (ret_val)
+ return ret_val;
+
+ /* This loop will early-out if the link condition has been met. */
+ for (i = PHY_FORCE_TIME; i > 0; i--) {
+ /* Read the MII Status Register and wait for Link Status bit
+ * to be set.
+ */
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (mii_status_reg & MII_SR_LINK_STATUS)
+ break;
+ mdelay(100);
+ }
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_auto_rd_done
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Check for EEPROM Auto Read bit done.
+ * returns: - E1000_ERR_RESET if fail to reset MAC
+ * E1000_SUCCESS at any other case.
+ */
+static s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
+{
+ e_dbg("e1000_get_auto_rd_done");
+ msleep(5);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_phy_cfg_done
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Checks if the PHY configuration is done
+ * returns: - E1000_ERR_RESET if fail to reset MAC
+ * E1000_SUCCESS at any other case.
+ */
+static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
+{
+ e_dbg("e1000_get_phy_cfg_done");
+ mdelay(10);
+ return E1000_SUCCESS;
+}
--- /dev/null
- /* if this is a skb from previous receive DMA will be 0 */
- if (rx_buffer_info->dma) {
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/sctp.h>
+#include <linux/pkt_sched.h>
+#include <linux/ipv6.h>
+#include <linux/slab.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/ethtool.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/prefetch.h>
+#include <scsi/fc/fc_fcoe.h>
+
+#include "ixgbe.h"
+#include "ixgbe_common.h"
+#include "ixgbe_dcb_82599.h"
+#include "ixgbe_sriov.h"
+
+char ixgbe_driver_name[] = "ixgbe";
+static const char ixgbe_driver_string[] =
+ "Intel(R) 10 Gigabit PCI Express Network Driver";
+#define MAJ 3
+#define MIN 4
+#define BUILD 8
+#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
+ __stringify(BUILD) "-k"
+const char ixgbe_driver_version[] = DRV_VERSION;
+static const char ixgbe_copyright[] =
+ "Copyright (c) 1999-2011 Intel Corporation.";
+
+static const struct ixgbe_info *ixgbe_info_tbl[] = {
+ [board_82598] = &ixgbe_82598_info,
+ [board_82599] = &ixgbe_82599_info,
+ [board_X540] = &ixgbe_X540_info,
+};
+
+/* ixgbe_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
+ /* required last entry */
+ {0, }
+};
+MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
+
+#ifdef CONFIG_IXGBE_DCA
+static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
+ void *p);
+static struct notifier_block dca_notifier = {
+ .notifier_call = ixgbe_notify_dca,
+ .next = NULL,
+ .priority = 0
+};
+#endif
+
+#ifdef CONFIG_PCI_IOV
+static unsigned int max_vfs;
+module_param(max_vfs, uint, 0);
+MODULE_PARM_DESC(max_vfs,
+ "Maximum number of virtual functions to allocate per physical function");
+#endif /* CONFIG_PCI_IOV */
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+
+static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gcr;
+ u32 gpie;
+ u32 vmdctl;
+
+#ifdef CONFIG_PCI_IOV
+ /* disable iov and allow time for transactions to clear */
+ pci_disable_sriov(adapter->pdev);
+#endif
+
+ /* turn off device IOV mode */
+ gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+ gcr &= ~(IXGBE_GCR_EXT_SRIOV);
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
+ gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+ gpie &= ~IXGBE_GPIE_VTMODE_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+
+ /* set default pool back to 0 */
+ vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* take a breather then clean up driver data */
+ msleep(100);
+
+ kfree(adapter->vfinfo);
+ adapter->vfinfo = NULL;
+
+ adapter->num_vfs = 0;
+ adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+}
+
+static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
+{
+ if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
+ !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
+ schedule_work(&adapter->service_task);
+}
+
+static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
+{
+ BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
+
+ /* flush memory to make sure state is correct before next watchog */
+ smp_mb__before_clear_bit();
+ clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
+}
+
+struct ixgbe_reg_info {
+ u32 ofs;
+ char *name;
+};
+
+static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
+
+ /* General Registers */
+ {IXGBE_CTRL, "CTRL"},
+ {IXGBE_STATUS, "STATUS"},
+ {IXGBE_CTRL_EXT, "CTRL_EXT"},
+
+ /* Interrupt Registers */
+ {IXGBE_EICR, "EICR"},
+
+ /* RX Registers */
+ {IXGBE_SRRCTL(0), "SRRCTL"},
+ {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
+ {IXGBE_RDLEN(0), "RDLEN"},
+ {IXGBE_RDH(0), "RDH"},
+ {IXGBE_RDT(0), "RDT"},
+ {IXGBE_RXDCTL(0), "RXDCTL"},
+ {IXGBE_RDBAL(0), "RDBAL"},
+ {IXGBE_RDBAH(0), "RDBAH"},
+
+ /* TX Registers */
+ {IXGBE_TDBAL(0), "TDBAL"},
+ {IXGBE_TDBAH(0), "TDBAH"},
+ {IXGBE_TDLEN(0), "TDLEN"},
+ {IXGBE_TDH(0), "TDH"},
+ {IXGBE_TDT(0), "TDT"},
+ {IXGBE_TXDCTL(0), "TXDCTL"},
+
+ /* List Terminator */
+ {}
+};
+
+
+/*
+ * ixgbe_regdump - register printout routine
+ */
+static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
+{
+ int i = 0, j = 0;
+ char rname[16];
+ u32 regs[64];
+
+ switch (reginfo->ofs) {
+ case IXGBE_SRRCTL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
+ break;
+ case IXGBE_DCA_RXCTRL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+ break;
+ case IXGBE_RDLEN(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
+ break;
+ case IXGBE_RDH(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
+ break;
+ case IXGBE_RDT(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
+ break;
+ case IXGBE_RXDCTL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+ break;
+ case IXGBE_RDBAL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
+ break;
+ case IXGBE_RDBAH(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
+ break;
+ case IXGBE_TDBAL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
+ break;
+ case IXGBE_TDBAH(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
+ break;
+ case IXGBE_TDLEN(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
+ break;
+ case IXGBE_TDH(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
+ break;
+ case IXGBE_TDT(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
+ break;
+ case IXGBE_TXDCTL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
+ break;
+ default:
+ pr_info("%-15s %08x\n", reginfo->name,
+ IXGBE_READ_REG(hw, reginfo->ofs));
+ return;
+ }
+
+ for (i = 0; i < 8; i++) {
+ snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
+ pr_err("%-15s", rname);
+ for (j = 0; j < 8; j++)
+ pr_cont(" %08x", regs[i*8+j]);
+ pr_cont("\n");
+ }
+
+}
+
+/*
+ * ixgbe_dump - Print registers, tx-rings and rx-rings
+ */
+static void ixgbe_dump(struct ixgbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_reg_info *reginfo;
+ int n = 0;
+ struct ixgbe_ring *tx_ring;
+ struct ixgbe_tx_buffer *tx_buffer_info;
+ union ixgbe_adv_tx_desc *tx_desc;
+ struct my_u0 { u64 a; u64 b; } *u0;
+ struct ixgbe_ring *rx_ring;
+ union ixgbe_adv_rx_desc *rx_desc;
+ struct ixgbe_rx_buffer *rx_buffer_info;
+ u32 staterr;
+ int i = 0;
+
+ if (!netif_msg_hw(adapter))
+ return;
+
+ /* Print netdevice Info */
+ if (netdev) {
+ dev_info(&adapter->pdev->dev, "Net device Info\n");
+ pr_info("Device Name state "
+ "trans_start last_rx\n");
+ pr_info("%-15s %016lX %016lX %016lX\n",
+ netdev->name,
+ netdev->state,
+ netdev->trans_start,
+ netdev->last_rx);
+ }
+
+ /* Print Registers */
+ dev_info(&adapter->pdev->dev, "Register Dump\n");
+ pr_info(" Register Name Value\n");
+ for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
+ reginfo->name; reginfo++) {
+ ixgbe_regdump(hw, reginfo);
+ }
+
+ /* Print TX Ring Summary */
+ if (!netdev || !netif_running(netdev))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
+ pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
+ for (n = 0; n < adapter->num_tx_queues; n++) {
+ tx_ring = adapter->tx_ring[n];
+ tx_buffer_info =
+ &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
+ pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
+ n, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (u64)tx_buffer_info->dma,
+ tx_buffer_info->length,
+ tx_buffer_info->next_to_watch,
+ (u64)tx_buffer_info->time_stamp);
+ }
+
+ /* Print TX Rings */
+ if (!netif_msg_tx_done(adapter))
+ goto rx_ring_summary;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
+
+ /* Transmit Descriptor Formats
+ *
+ * Advanced Transmit Descriptor
+ * +--------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] |
+ * +--------------------------------------------------------------+
+ * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN |
+ * +--------------------------------------------------------------+
+ * 63 46 45 40 39 36 35 32 31 24 23 20 19 0
+ */
+
+ for (n = 0; n < adapter->num_tx_queues; n++) {
+ tx_ring = adapter->tx_ring[n];
+ pr_info("------------------------------------\n");
+ pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
+ pr_info("------------------------------------\n");
+ pr_info("T [desc] [address 63:0 ] "
+ "[PlPOIdStDDt Ln] [bi->dma ] "
+ "leng ntw timestamp bi->skb\n");
+
+ for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ u0 = (struct my_u0 *)tx_desc;
+ pr_info("T [0x%03X] %016llX %016llX %016llX"
+ " %04X %p %016llX %p", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)tx_buffer_info->dma,
+ tx_buffer_info->length,
+ tx_buffer_info->next_to_watch,
+ (u64)tx_buffer_info->time_stamp,
+ tx_buffer_info->skb);
+ if (i == tx_ring->next_to_use &&
+ i == tx_ring->next_to_clean)
+ pr_cont(" NTC/U\n");
+ else if (i == tx_ring->next_to_use)
+ pr_cont(" NTU\n");
+ else if (i == tx_ring->next_to_clean)
+ pr_cont(" NTC\n");
+ else
+ pr_cont("\n");
+
+ if (netif_msg_pktdata(adapter) &&
+ tx_buffer_info->dma != 0)
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS, 16, 1,
+ phys_to_virt(tx_buffer_info->dma),
+ tx_buffer_info->length, true);
+ }
+ }
+
+ /* Print RX Rings Summary */
+rx_ring_summary:
+ dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
+ pr_info("Queue [NTU] [NTC]\n");
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ pr_info("%5d %5X %5X\n",
+ n, rx_ring->next_to_use, rx_ring->next_to_clean);
+ }
+
+ /* Print RX Rings */
+ if (!netif_msg_rx_status(adapter))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
+
+ /* Advanced Receive Descriptor (Read) Format
+ * 63 1 0
+ * +-----------------------------------------------------+
+ * 0 | Packet Buffer Address [63:1] |A0/NSE|
+ * +----------------------------------------------+------+
+ * 8 | Header Buffer Address [63:1] | DD |
+ * +-----------------------------------------------------+
+ *
+ *
+ * Advanced Receive Descriptor (Write-Back) Format
+ *
+ * 63 48 47 32 31 30 21 20 16 15 4 3 0
+ * +------------------------------------------------------+
+ * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
+ * | Checksum Ident | | | | Type | Type |
+ * +------------------------------------------------------+
+ * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+ * +------------------------------------------------------+
+ * 63 48 47 32 31 20 19 0
+ */
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ pr_info("------------------------------------\n");
+ pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
+ pr_info("------------------------------------\n");
+ pr_info("R [desc] [ PktBuf A0] "
+ "[ HeadBuf DD] [bi->dma ] [bi->skb] "
+ "<-- Adv Rx Read format\n");
+ pr_info("RWB[desc] [PcsmIpSHl PtRs] "
+ "[vl er S cks ln] ---------------- [bi->skb] "
+ "<-- Adv Rx Write-Back format\n");
+
+ for (i = 0; i < rx_ring->count; i++) {
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+ rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
+ u0 = (struct my_u0 *)rx_desc;
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ if (staterr & IXGBE_RXD_STAT_DD) {
+ /* Descriptor Done */
+ pr_info("RWB[0x%03X] %016llX "
+ "%016llX ---------------- %p", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ rx_buffer_info->skb);
+ } else {
+ pr_info("R [0x%03X] %016llX "
+ "%016llX %016llX %p", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)rx_buffer_info->dma,
+ rx_buffer_info->skb);
+
+ if (netif_msg_pktdata(adapter)) {
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS, 16, 1,
+ phys_to_virt(rx_buffer_info->dma),
+ rx_ring->rx_buf_len, true);
+
+ if (rx_ring->rx_buf_len
+ < IXGBE_RXBUFFER_2K)
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS, 16, 1,
+ phys_to_virt(
+ rx_buffer_info->page_dma +
+ rx_buffer_info->page_offset
+ ),
+ PAGE_SIZE/2, true);
+ }
+ }
+
+ if (i == rx_ring->next_to_use)
+ pr_cont(" NTU\n");
+ else if (i == rx_ring->next_to_clean)
+ pr_cont(" NTC\n");
+ else
+ pr_cont("\n");
+
+ }
+ }
+
+exit:
+ return;
+}
+
+static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
+{
+ u32 ctrl_ext;
+
+ /* Let firmware take over control of h/w */
+ ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
+ ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
+}
+
+static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
+{
+ u32 ctrl_ext;
+
+ /* Let firmware know the driver has taken over */
+ ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
+ ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
+}
+
+/*
+ * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
+ * @adapter: pointer to adapter struct
+ * @direction: 0 for Rx, 1 for Tx, -1 for other causes
+ * @queue: queue to map the corresponding interrupt to
+ * @msix_vector: the vector to map to the corresponding queue
+ *
+ */
+static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
+ u8 queue, u8 msix_vector)
+{
+ u32 ivar, index;
+ struct ixgbe_hw *hw = &adapter->hw;
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+ if (direction == -1)
+ direction = 0;
+ index = (((direction * 64) + queue) >> 2) & 0x1F;
+ ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
+ ivar &= ~(0xFF << (8 * (queue & 0x3)));
+ ivar |= (msix_vector << (8 * (queue & 0x3)));
+ IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ if (direction == -1) {
+ /* other causes */
+ msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+ index = ((queue & 1) * 8);
+ ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
+ ivar &= ~(0xFF << index);
+ ivar |= (msix_vector << index);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
+ break;
+ } else {
+ /* tx or rx causes */
+ msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+ index = ((16 * (queue & 1)) + (8 * direction));
+ ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
+ ivar &= ~(0xFF << index);
+ ivar |= (msix_vector << index);
+ IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
+ u64 qmask)
+{
+ u32 mask;
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
+ mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ mask = (qmask & 0xFFFFFFFF);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
+ mask = (qmask >> 32);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
+ break;
+ default:
+ break;
+ }
+}
+
+static inline void ixgbe_unmap_tx_resource(struct ixgbe_ring *ring,
+ struct ixgbe_tx_buffer *tx_buffer)
+{
+ if (tx_buffer->dma) {
+ if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_MAPPED_AS_PAGE)
+ dma_unmap_page(ring->dev,
+ tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(ring->dev,
+ tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
+ }
+ tx_buffer->dma = 0;
+}
+
+void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
+ struct ixgbe_tx_buffer *tx_buffer_info)
+{
+ ixgbe_unmap_tx_resource(tx_ring, tx_buffer_info);
+ if (tx_buffer_info->skb)
+ dev_kfree_skb_any(tx_buffer_info->skb);
+ tx_buffer_info->skb = NULL;
+ /* tx_buffer_info must be completely set up in the transmit path */
+}
+
+static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_hw_stats *hwstats = &adapter->stats;
+ u32 data = 0;
+ u32 xoff[8] = {0};
+ int i;
+
+ if ((hw->fc.current_mode == ixgbe_fc_full) ||
+ (hw->fc.current_mode == ixgbe_fc_rx_pause)) {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+ break;
+ default:
+ data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+ }
+ hwstats->lxoffrxc += data;
+
+ /* refill credits (no tx hang) if we received xoff */
+ if (!data)
+ return;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ clear_bit(__IXGBE_HANG_CHECK_ARMED,
+ &adapter->tx_ring[i]->state);
+ return;
+ } else if (!(adapter->dcb_cfg.pfc_mode_enable))
+ return;
+
+ /* update stats for each tc, only valid with PFC enabled */
+ for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+ break;
+ default:
+ xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
+ }
+ hwstats->pxoffrxc[i] += xoff[i];
+ }
+
+ /* disarm tx queues that have received xoff frames */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+ u8 tc = tx_ring->dcb_tc;
+
+ if (xoff[tc])
+ clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
+ }
+}
+
+static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
+{
+ return ring->tx_stats.completed;
+}
+
+static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(ring->netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
+ u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
+
+ if (head != tail)
+ return (head < tail) ?
+ tail - head : (tail + ring->count - head);
+
+ return 0;
+}
+
+static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
+{
+ u32 tx_done = ixgbe_get_tx_completed(tx_ring);
+ u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
+ u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
+ bool ret = false;
+
+ clear_check_for_tx_hang(tx_ring);
+
+ /*
+ * Check for a hung queue, but be thorough. This verifies
+ * that a transmit has been completed since the previous
+ * check AND there is at least one packet pending. The
+ * ARMED bit is set to indicate a potential hang. The
+ * bit is cleared if a pause frame is received to remove
+ * false hang detection due to PFC or 802.3x frames. By
+ * requiring this to fail twice we avoid races with
+ * pfc clearing the ARMED bit and conditions where we
+ * run the check_tx_hang logic with a transmit completion
+ * pending but without time to complete it yet.
+ */
+ if ((tx_done_old == tx_done) && tx_pending) {
+ /* make sure it is true for two checks in a row */
+ ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
+ &tx_ring->state);
+ } else {
+ /* update completed stats and continue */
+ tx_ring->tx_stats.tx_done_old = tx_done;
+ /* reset the countdown */
+ clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
+ }
+
+ return ret;
+}
+
+/**
+ * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout
+ * @adapter: driver private struct
+ **/
+static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
+{
+
+ /* Do the reset outside of interrupt context */
+ if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+ adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
+ ixgbe_service_event_schedule(adapter);
+ }
+}
+
+/**
+ * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
+ * @q_vector: structure containing interrupt and ring information
+ * @tx_ring: tx ring to clean
+ **/
+static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
+ struct ixgbe_ring *tx_ring)
+{
+ struct ixgbe_adapter *adapter = q_vector->adapter;
+ struct ixgbe_tx_buffer *tx_buffer;
+ union ixgbe_adv_tx_desc *tx_desc;
+ unsigned int total_bytes = 0, total_packets = 0;
+ unsigned int budget = q_vector->tx.work_limit;
+ u16 i = tx_ring->next_to_clean;
+
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
+
+ for (; budget; budget--) {
+ union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
+
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* if DD is not set pending work has not been completed */
+ if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
+ break;
+
+ /* count the packet as being completed */
+ tx_ring->tx_stats.completed++;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buffer->next_to_watch = NULL;
+
+ /* prevent any other reads prior to eop_desc being verified */
+ rmb();
+
+ do {
+ ixgbe_unmap_tx_resource(tx_ring, tx_buffer);
+ tx_desc->wb.status = 0;
+ if (likely(tx_desc == eop_desc)) {
+ eop_desc = NULL;
+ dev_kfree_skb_any(tx_buffer->skb);
+ tx_buffer->skb = NULL;
+
+ total_bytes += tx_buffer->bytecount;
+ total_packets += tx_buffer->gso_segs;
+ }
+
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, 0);
+ }
+
+ } while (eop_desc);
+ }
+
+ tx_ring->next_to_clean = i;
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.bytes += total_bytes;
+ tx_ring->stats.packets += total_packets;
+ u64_stats_update_end(&tx_ring->syncp);
+ q_vector->tx.total_bytes += total_bytes;
+ q_vector->tx.total_packets += total_packets;
+
+ if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
+ /* schedule immediate reset if we believe we hung */
+ struct ixgbe_hw *hw = &adapter->hw;
+ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
+ e_err(drv, "Detected Tx Unit Hang\n"
+ " Tx Queue <%d>\n"
+ " TDH, TDT <%x>, <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "tx_buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " jiffies <%lx>\n",
+ tx_ring->queue_index,
+ IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
+ IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
+ tx_ring->next_to_use, i,
+ tx_ring->tx_buffer_info[i].time_stamp, jiffies);
+
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+
+ e_info(probe,
+ "tx hang %d detected on queue %d, resetting adapter\n",
+ adapter->tx_timeout_count + 1, tx_ring->queue_index);
+
+ /* schedule immediate reset if we believe we hung */
+ ixgbe_tx_timeout_reset(adapter);
+
+ /* the adapter is about to reset, no point in enabling stuff */
+ return true;
+ }
+
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+ if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
+ (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+ if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) &&
+ !test_bit(__IXGBE_DOWN, &adapter->state)) {
+ netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
+ }
+ }
+
+ return !!budget;
+}
+
+#ifdef CONFIG_IXGBE_DCA
+static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *rx_ring,
+ int cpu)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 rxctrl;
+ u8 reg_idx = rx_ring->reg_idx;
+
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx));
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
+ rxctrl |= dca3_get_tag(rx_ring->dev, cpu);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
+ rxctrl |= (dca3_get_tag(rx_ring->dev, cpu) <<
+ IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
+ break;
+ default:
+ break;
+ }
+ rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
+ rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
+ rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
+}
+
+static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *tx_ring,
+ int cpu)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 txctrl;
+ u8 reg_idx = tx_ring->reg_idx;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx));
+ txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
+ txctrl |= dca3_get_tag(tx_ring->dev, cpu);
+ txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx));
+ txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
+ txctrl |= (dca3_get_tag(tx_ring->dev, cpu) <<
+ IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
+ txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl);
+ break;
+ default:
+ break;
+ }
+}
+
+static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
+{
+ struct ixgbe_adapter *adapter = q_vector->adapter;
+ struct ixgbe_ring *ring;
+ int cpu = get_cpu();
+
+ if (q_vector->cpu == cpu)
+ goto out_no_update;
+
+ for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next)
+ ixgbe_update_tx_dca(adapter, ring, cpu);
+
+ for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next)
+ ixgbe_update_rx_dca(adapter, ring, cpu);
+
+ q_vector->cpu = cpu;
+out_no_update:
+ put_cpu();
+}
+
+static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
+{
+ int num_q_vectors;
+ int i;
+
+ if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
+ return;
+
+ /* always use CB2 mode, difference is masked in the CB driver */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
+
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ else
+ num_q_vectors = 1;
+
+ for (i = 0; i < num_q_vectors; i++) {
+ adapter->q_vector[i]->cpu = -1;
+ ixgbe_update_dca(adapter->q_vector[i]);
+ }
+}
+
+static int __ixgbe_notify_dca(struct device *dev, void *data)
+{
+ struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
+ unsigned long event = *(unsigned long *)data;
+
+ if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
+ return 0;
+
+ switch (event) {
+ case DCA_PROVIDER_ADD:
+ /* if we're already enabled, don't do it again */
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+ break;
+ if (dca_add_requester(dev) == 0) {
+ adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
+ ixgbe_setup_dca(adapter);
+ break;
+ }
+ /* Fall Through since DCA is disabled. */
+ case DCA_PROVIDER_REMOVE:
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
+ dca_remove_requester(dev);
+ adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
+ }
+ break;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_IXGBE_DCA */
+
+static inline void ixgbe_rx_hash(union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
+}
+
+/**
+ * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
+ * @adapter: address of board private structure
+ * @rx_desc: advanced rx descriptor
+ *
+ * Returns : true if it is FCoE pkt
+ */
+static inline bool ixgbe_rx_is_fcoe(struct ixgbe_adapter *adapter,
+ union ixgbe_adv_rx_desc *rx_desc)
+{
+ __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+
+ return (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
+ ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
+ (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
+ IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
+}
+
+/**
+ * ixgbe_receive_skb - Send a completed packet up the stack
+ * @adapter: board private structure
+ * @skb: packet to send up
+ * @status: hardware indication of status of receive
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ * @rx_desc: rx descriptor
+ **/
+static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
+ struct sk_buff *skb, u8 status,
+ struct ixgbe_ring *ring,
+ union ixgbe_adv_rx_desc *rx_desc)
+{
+ struct ixgbe_adapter *adapter = q_vector->adapter;
+ struct napi_struct *napi = &q_vector->napi;
+ bool is_vlan = (status & IXGBE_RXD_STAT_VP);
+ u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
+
+ if (is_vlan && (tag & VLAN_VID_MASK))
+ __vlan_hwaccel_put_tag(skb, tag);
+
+ if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
+ napi_gro_receive(napi, skb);
+ else
+ netif_rx(skb);
+}
+
+/**
+ * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
+ * @adapter: address of board private structure
+ * @status_err: hardware indication of status of receive
+ * @skb: skb currently being received and modified
+ * @status_err: status error value of last descriptor in packet
+ **/
+static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb,
+ u32 status_err)
+{
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* Rx csum disabled */
+ if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
+ return;
+
+ /* if IP and error */
+ if ((status_err & IXGBE_RXD_STAT_IPCS) &&
+ (status_err & IXGBE_RXDADV_ERR_IPE)) {
+ adapter->hw_csum_rx_error++;
+ return;
+ }
+
+ if (!(status_err & IXGBE_RXD_STAT_L4CS))
+ return;
+
+ if (status_err & IXGBE_RXDADV_ERR_TCPE) {
+ u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+
+ /*
+ * 82599 errata, UDP frames with a 0 checksum can be marked as
+ * checksum errors.
+ */
+ if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) &&
+ (adapter->hw.mac.type == ixgbe_mac_82599EB))
+ return;
+
+ adapter->hw_csum_rx_error++;
+ return;
+ }
+
+ /* It must be a TCP or UDP packet with a valid checksum */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
+{
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(val, rx_ring->tail);
+}
+
+/**
+ * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ **/
+void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
+{
+ union ixgbe_adv_rx_desc *rx_desc;
+ struct ixgbe_rx_buffer *bi;
+ struct sk_buff *skb;
+ u16 i = rx_ring->next_to_use;
+
+ /* do nothing if no valid netdev defined */
+ if (!rx_ring->netdev)
+ return;
+
+ while (cleaned_count--) {
+ rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
+ bi = &rx_ring->rx_buffer_info[i];
+ skb = bi->skb;
+
+ if (!skb) {
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ rx_ring->rx_buf_len);
+ if (!skb) {
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
+ goto no_buffers;
+ }
+ /* initialize queue mapping */
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+ bi->skb = skb;
+ }
+
+ if (!bi->dma) {
+ bi->dma = dma_map_single(rx_ring->dev,
+ skb->data,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev, bi->dma)) {
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
+ bi->dma = 0;
+ goto no_buffers;
+ }
+ }
+
+ if (ring_is_ps_enabled(rx_ring)) {
+ if (!bi->page) {
+ bi->page = netdev_alloc_page(rx_ring->netdev);
+ if (!bi->page) {
+ rx_ring->rx_stats.alloc_rx_page_failed++;
+ goto no_buffers;
+ }
+ }
+
+ if (!bi->page_dma) {
+ /* use a half page if we're re-using */
+ bi->page_offset ^= PAGE_SIZE / 2;
+ bi->page_dma = dma_map_page(rx_ring->dev,
+ bi->page,
+ bi->page_offset,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev,
+ bi->page_dma)) {
+ rx_ring->rx_stats.alloc_rx_page_failed++;
+ bi->page_dma = 0;
+ goto no_buffers;
+ }
+ }
+
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info. */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+ rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
+ } else {
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ rx_desc->read.hdr_addr = 0;
+ }
+
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ }
+
+no_buffers:
+ if (rx_ring->next_to_use != i) {
+ rx_ring->next_to_use = i;
+ ixgbe_release_rx_desc(rx_ring, i);
+ }
+}
+
+static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc)
+{
+ /* HW will not DMA in data larger than the given buffer, even if it
+ * parses the (NFS, of course) header to be larger. In that case, it
+ * fills the header buffer and spills the rest into the page.
+ */
+ u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info);
+ u16 hlen = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
+ IXGBE_RXDADV_HDRBUFLEN_SHIFT;
+ if (hlen > IXGBE_RX_HDR_SIZE)
+ hlen = IXGBE_RX_HDR_SIZE;
+ return hlen;
+}
+
+/**
+ * ixgbe_transform_rsc_queue - change rsc queue into a full packet
+ * @skb: pointer to the last skb in the rsc queue
+ *
+ * This function changes a queue full of hw rsc buffers into a completed
+ * packet. It uses the ->prev pointers to find the first packet and then
+ * turns it into the frag list owner.
+ **/
+static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
+{
+ unsigned int frag_list_size = 0;
+ unsigned int skb_cnt = 1;
+
+ while (skb->prev) {
+ struct sk_buff *prev = skb->prev;
+ frag_list_size += skb->len;
+ skb->prev = NULL;
+ skb = prev;
+ skb_cnt++;
+ }
+
+ skb_shinfo(skb)->frag_list = skb->next;
+ skb->next = NULL;
+ skb->len += frag_list_size;
+ skb->data_len += frag_list_size;
+ skb->truesize += frag_list_size;
+ IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt;
+
+ return skb;
+}
+
+static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc)
+{
+ return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
+ IXGBE_RXDADV_RSCCNT_MASK);
+}
+
+static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
+ struct ixgbe_ring *rx_ring,
+ int budget)
+{
+ struct ixgbe_adapter *adapter = q_vector->adapter;
+ union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
+ struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
+ struct sk_buff *skb;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ const int current_node = numa_node_id();
+#ifdef IXGBE_FCOE
+ int ddp_bytes = 0;
+#endif /* IXGBE_FCOE */
+ u32 staterr;
+ u16 i;
+ u16 cleaned_count = 0;
+ bool pkt_is_rsc = false;
+
+ i = rx_ring->next_to_clean;
+ rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+ while (staterr & IXGBE_RXD_STAT_DD) {
+ u32 upper_len = 0;
+
+ rmb(); /* read descriptor and rx_buffer_info after status DD */
+
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+
+ skb = rx_buffer_info->skb;
+ rx_buffer_info->skb = NULL;
+ prefetch(skb->data);
+
+ if (ring_is_rsc_enabled(rx_ring))
+ pkt_is_rsc = ixgbe_get_rsc_state(rx_desc);
+
++ /* linear means we are building an skb from multiple pages */
++ if (!skb_is_nonlinear(skb)) {
+ u16 hlen;
+ if (pkt_is_rsc &&
+ !(staterr & IXGBE_RXD_STAT_EOP) &&
+ !skb->prev) {
+ /*
+ * When HWRSC is enabled, delay unmapping
+ * of the first packet. It carries the
+ * header information, HW may still
+ * access the header after the writeback.
+ * Only unmap it when EOP is reached
+ */
+ IXGBE_RSC_CB(skb)->delay_unmap = true;
+ IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
+ } else {
+ dma_unmap_single(rx_ring->dev,
+ rx_buffer_info->dma,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ }
+ rx_buffer_info->dma = 0;
+
+ if (ring_is_ps_enabled(rx_ring)) {
+ hlen = ixgbe_get_hlen(rx_desc);
+ upper_len = le16_to_cpu(rx_desc->wb.upper.length);
+ } else {
+ hlen = le16_to_cpu(rx_desc->wb.upper.length);
+ }
+
+ skb_put(skb, hlen);
+ } else {
+ /* assume packet split since header is unmapped */
+ upper_len = le16_to_cpu(rx_desc->wb.upper.length);
+ }
+
+ if (upper_len) {
+ dma_unmap_page(rx_ring->dev,
+ rx_buffer_info->page_dma,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ rx_buffer_info->page_dma = 0;
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ rx_buffer_info->page,
+ rx_buffer_info->page_offset,
+ upper_len);
+
+ if ((page_count(rx_buffer_info->page) == 1) &&
+ (page_to_nid(rx_buffer_info->page) == current_node))
+ get_page(rx_buffer_info->page);
+ else
+ rx_buffer_info->page = NULL;
+
+ skb->len += upper_len;
+ skb->data_len += upper_len;
+ skb->truesize += upper_len;
+ }
+
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+
+ next_rxd = IXGBE_RX_DESC_ADV(rx_ring, i);
+ prefetch(next_rxd);
+ cleaned_count++;
+
+ if (pkt_is_rsc) {
+ u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
+ IXGBE_RXDADV_NEXTP_SHIFT;
+ next_buffer = &rx_ring->rx_buffer_info[nextp];
+ } else {
+ next_buffer = &rx_ring->rx_buffer_info[i];
+ }
+
+ if (!(staterr & IXGBE_RXD_STAT_EOP)) {
+ if (ring_is_ps_enabled(rx_ring)) {
+ rx_buffer_info->skb = next_buffer->skb;
+ rx_buffer_info->dma = next_buffer->dma;
+ next_buffer->skb = skb;
+ next_buffer->dma = 0;
+ } else {
+ skb->next = next_buffer->skb;
+ skb->next->prev = skb;
+ }
+ rx_ring->rx_stats.non_eop_descs++;
+ goto next_desc;
+ }
+
+ if (skb->prev) {
+ skb = ixgbe_transform_rsc_queue(skb);
+ /* if we got here without RSC the packet is invalid */
+ if (!pkt_is_rsc) {
+ __pskb_trim(skb, 0);
+ rx_buffer_info->skb = skb;
+ goto next_desc;
+ }
+ }
+
+ if (ring_is_rsc_enabled(rx_ring)) {
+ if (IXGBE_RSC_CB(skb)->delay_unmap) {
+ dma_unmap_single(rx_ring->dev,
+ IXGBE_RSC_CB(skb)->dma,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ IXGBE_RSC_CB(skb)->dma = 0;
+ IXGBE_RSC_CB(skb)->delay_unmap = false;
+ }
+ }
+ if (pkt_is_rsc) {
+ if (ring_is_ps_enabled(rx_ring))
+ rx_ring->rx_stats.rsc_count +=
+ skb_shinfo(skb)->nr_frags;
+ else
+ rx_ring->rx_stats.rsc_count +=
+ IXGBE_RSC_CB(skb)->skb_cnt;
+ rx_ring->rx_stats.rsc_flush++;
+ }
+
+ /* ERR_MASK will only have valid bits if EOP set */
+ if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
+ dev_kfree_skb_any(skb);
+ goto next_desc;
+ }
+
+ ixgbe_rx_checksum(adapter, rx_desc, skb, staterr);
+ if (adapter->netdev->features & NETIF_F_RXHASH)
+ ixgbe_rx_hash(rx_desc, skb);
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+#ifdef IXGBE_FCOE
+ /* if ddp, not passing to ULD unless for FCP_RSP or error */
+ if (ixgbe_rx_is_fcoe(adapter, rx_desc)) {
+ ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb,
+ staterr);
+ if (!ddp_bytes) {
+ dev_kfree_skb_any(skb);
+ goto next_desc;
+ }
+ }
+#endif /* IXGBE_FCOE */
+ ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
+
+ budget--;
+next_desc:
+ rx_desc->wb.upper.status_error = 0;
+
+ if (!budget)
+ break;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
+ ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ }
+
+ rx_ring->next_to_clean = i;
+ cleaned_count = ixgbe_desc_unused(rx_ring);
+
+ if (cleaned_count)
+ ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
+
+#ifdef IXGBE_FCOE
+ /* include DDPed FCoE data */
+ if (ddp_bytes > 0) {
+ unsigned int mss;
+
+ mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) -
+ sizeof(struct fc_frame_header) -
+ sizeof(struct fcoe_crc_eof);
+ if (mss > 512)
+ mss &= ~511;
+ total_rx_bytes += ddp_bytes;
+ total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
+ }
+#endif /* IXGBE_FCOE */
+
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->stats.packets += total_rx_packets;
+ rx_ring->stats.bytes += total_rx_bytes;
+ u64_stats_update_end(&rx_ring->syncp);
+ q_vector->rx.total_packets += total_rx_packets;
+ q_vector->rx.total_bytes += total_rx_bytes;
+
+ return !!budget;
+}
+
+/**
+ * ixgbe_configure_msix - Configure MSI-X hardware
+ * @adapter: board private structure
+ *
+ * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
+ * interrupts.
+ **/
+static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_q_vector *q_vector;
+ int q_vectors, v_idx;
+ u32 mask;
+
+ q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ /* Populate MSIX to EITR Select */
+ if (adapter->num_vfs > 32) {
+ u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
+ }
+
+ /*
+ * Populate the IVAR table and set the ITR values to the
+ * corresponding register.
+ */
+ for (v_idx = 0; v_idx < q_vectors; v_idx++) {
+ struct ixgbe_ring *ring;
+ q_vector = adapter->q_vector[v_idx];
+
+ for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next)
+ ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
+
+ for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next)
+ ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
+
+ if (q_vector->tx.ring && !q_vector->rx.ring)
+ /* tx only */
+ q_vector->eitr = adapter->tx_eitr_param;
+ else if (q_vector->rx.ring)
+ /* rx or mixed */
+ q_vector->eitr = adapter->rx_eitr_param;
+
+ ixgbe_write_eitr(q_vector);
+ }
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
+ ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
+ v_idx);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ ixgbe_set_ivar(adapter, -1, 1, v_idx);
+ break;
+
+ default:
+ break;
+ }
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
+
+ /* set up to autoclear timer, and the vectors */
+ mask = IXGBE_EIMS_ENABLE_MASK;
+ if (adapter->num_vfs)
+ mask &= ~(IXGBE_EIMS_OTHER |
+ IXGBE_EIMS_MAILBOX |
+ IXGBE_EIMS_LSC);
+ else
+ mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
+}
+
+enum latency_range {
+ lowest_latency = 0,
+ low_latency = 1,
+ bulk_latency = 2,
+ latency_invalid = 255
+};
+
+/**
+ * ixgbe_update_itr - update the dynamic ITR value based on statistics
+ * @q_vector: structure containing interrupt and ring information
+ * @ring_container: structure containing ring performance data
+ *
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ * this functionality is controlled by the InterruptThrottleRate module
+ * parameter (see ixgbe_param.c)
+ **/
+static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
+ struct ixgbe_ring_container *ring_container)
+{
+ u64 bytes_perint;
+ struct ixgbe_adapter *adapter = q_vector->adapter;
+ int bytes = ring_container->total_bytes;
+ int packets = ring_container->total_packets;
+ u32 timepassed_us;
+ u8 itr_setting = ring_container->itr;
+
+ if (packets == 0)
+ return;
+
+ /* simple throttlerate management
+ * 0-20MB/s lowest (100000 ints/s)
+ * 20-100MB/s low (20000 ints/s)
+ * 100-1249MB/s bulk (8000 ints/s)
+ */
+ /* what was last interrupt timeslice? */
+ timepassed_us = 1000000/q_vector->eitr;
+ bytes_perint = bytes / timepassed_us; /* bytes/usec */
+
+ switch (itr_setting) {
+ case lowest_latency:
+ if (bytes_perint > adapter->eitr_low)
+ itr_setting = low_latency;
+ break;
+ case low_latency:
+ if (bytes_perint > adapter->eitr_high)
+ itr_setting = bulk_latency;
+ else if (bytes_perint <= adapter->eitr_low)
+ itr_setting = lowest_latency;
+ break;
+ case bulk_latency:
+ if (bytes_perint <= adapter->eitr_high)
+ itr_setting = low_latency;
+ break;
+ }
+
+ /* clear work counters since we have the values we need */
+ ring_container->total_bytes = 0;
+ ring_container->total_packets = 0;
+
+ /* write updated itr to ring container */
+ ring_container->itr = itr_setting;
+}
+
+/**
+ * ixgbe_write_eitr - write EITR register in hardware specific way
+ * @q_vector: structure containing interrupt and ring information
+ *
+ * This function is made to be called by ethtool and by the driver
+ * when it needs to update EITR registers at runtime. Hardware
+ * specific quirks/differences are taken care of here.
+ */
+void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
+{
+ struct ixgbe_adapter *adapter = q_vector->adapter;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int v_idx = q_vector->v_idx;
+ u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
+ /* must write high and low 16 bits to reset counter */
+ itr_reg |= (itr_reg << 16);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ /*
+ * 82599 and X540 can support a value of zero, so allow it for
+ * max interrupt rate, but there is an errata where it can
+ * not be zero with RSC
+ */
+ if (itr_reg == 8 &&
+ !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
+ itr_reg = 0;
+
+ /*
+ * set the WDIS bit to not clear the timer bits and cause an
+ * immediate assertion of the interrupt
+ */
+ itr_reg |= IXGBE_EITR_CNT_WDIS;
+ break;
+ default:
+ break;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
+}
+
+static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
+{
+ u32 new_itr = q_vector->eitr;
+ u8 current_itr;
+
+ ixgbe_update_itr(q_vector, &q_vector->tx);
+ ixgbe_update_itr(q_vector, &q_vector->rx);
+
+ current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
+
+ switch (current_itr) {
+ /* counts and packets in update_itr are dependent on these numbers */
+ case lowest_latency:
+ new_itr = 100000;
+ break;
+ case low_latency:
+ new_itr = 20000; /* aka hwitr = ~200 */
+ break;
+ case bulk_latency:
+ new_itr = 8000;
+ break;
+ default:
+ break;
+ }
+
+ if (new_itr != q_vector->eitr) {
+ /* do an exponential smoothing */
+ new_itr = ((q_vector->eitr * 9) + new_itr)/10;
+
+ /* save the algorithm value here */
+ q_vector->eitr = new_itr;
+
+ ixgbe_write_eitr(q_vector);
+ }
+}
+
+/**
+ * ixgbe_check_overtemp_subtask - check for over tempurature
+ * @adapter: pointer to adapter
+ **/
+static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 eicr = adapter->interrupt_event;
+
+ if (test_bit(__IXGBE_DOWN, &adapter->state))
+ return;
+
+ if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
+ !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
+ return;
+
+ adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82599_T3_LOM:
+ /*
+ * Since the warning interrupt is for both ports
+ * we don't have to check if:
+ * - This interrupt wasn't for our port.
+ * - We may have missed the interrupt so always have to
+ * check if we got a LSC
+ */
+ if (!(eicr & IXGBE_EICR_GPI_SDP0) &&
+ !(eicr & IXGBE_EICR_LSC))
+ return;
+
+ if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
+ u32 autoneg;
+ bool link_up = false;
+
+ hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
+
+ if (link_up)
+ return;
+ }
+
+ /* Check if this is not due to overtemp */
+ if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
+ return;
+
+ break;
+ default:
+ if (!(eicr & IXGBE_EICR_GPI_SDP0))
+ return;
+ break;
+ }
+ e_crit(drv,
+ "Network adapter has been stopped because it has over heated. "
+ "Restart the computer. If the problem persists, "
+ "power off the system and replace the adapter\n");
+
+ adapter->interrupt_event = 0;
+}
+
+static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
+ (eicr & IXGBE_EICR_GPI_SDP1)) {
+ e_crit(probe, "Fan has stopped, replace the adapter\n");
+ /* write to clear the interrupt */
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
+ }
+}
+
+static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (eicr & IXGBE_EICR_GPI_SDP2) {
+ /* Clear the interrupt */
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
+ if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+ adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
+ ixgbe_service_event_schedule(adapter);
+ }
+ }
+
+ if (eicr & IXGBE_EICR_GPI_SDP1) {
+ /* Clear the interrupt */
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
+ if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+ adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
+ ixgbe_service_event_schedule(adapter);
+ }
+ }
+}
+
+static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ adapter->lsc_int++;
+ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+ adapter->link_check_timeout = jiffies;
+ if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
+ IXGBE_WRITE_FLUSH(hw);
+ ixgbe_service_event_schedule(adapter);
+ }
+}
+
+static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
+ u64 qmask)
+{
+ u32 mask;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ mask = (qmask & 0xFFFFFFFF);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
+ mask = (qmask >> 32);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+ break;
+ default:
+ break;
+ }
+ /* skip the flush */
+}
+
+static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
+ u64 qmask)
+{
+ u32 mask;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ mask = (qmask & 0xFFFFFFFF);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
+ mask = (qmask >> 32);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
+ break;
+ default:
+ break;
+ }
+ /* skip the flush */
+}
+
+/**
+ * ixgbe_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
+ bool flush)
+{
+ u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
+
+ /* don't reenable LSC while waiting for link */
+ if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
+ mask &= ~IXGBE_EIMS_LSC;
+
+ if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
+ mask |= IXGBE_EIMS_GPI_SDP0;
+ if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
+ mask |= IXGBE_EIMS_GPI_SDP1;
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ mask |= IXGBE_EIMS_ECC;
+ mask |= IXGBE_EIMS_GPI_SDP1;
+ mask |= IXGBE_EIMS_GPI_SDP2;
+ mask |= IXGBE_EIMS_MAILBOX;
+ break;
+ default:
+ break;
+ }
+ if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
+ !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
+ mask |= IXGBE_EIMS_FLOW_DIR;
+
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
+ if (queues)
+ ixgbe_irq_enable_queues(adapter, ~0);
+ if (flush)
+ IXGBE_WRITE_FLUSH(&adapter->hw);
+}
+
+static irqreturn_t ixgbe_msix_other(int irq, void *data)
+{
+ struct ixgbe_adapter *adapter = data;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 eicr;
+
+ /*
+ * Workaround for Silicon errata. Use clear-by-write instead
+ * of clear-by-read. Reading with EICS will return the
+ * interrupt causes without clearing, which later be done
+ * with the write to EICR.
+ */
+ eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
+
+ if (eicr & IXGBE_EICR_LSC)
+ ixgbe_check_lsc(adapter);
+
+ if (eicr & IXGBE_EICR_MAILBOX)
+ ixgbe_msg_task(adapter);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ if (eicr & IXGBE_EICR_ECC)
+ e_info(link, "Received unrecoverable ECC Err, please "
+ "reboot\n");
+ /* Handle Flow Director Full threshold interrupt */
+ if (eicr & IXGBE_EICR_FLOW_DIR) {
+ int reinit_count = 0;
+ int i;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *ring = adapter->tx_ring[i];
+ if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
+ &ring->state))
+ reinit_count++;
+ }
+ if (reinit_count) {
+ /* no more flow director interrupts until after init */
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
+ adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
+ ixgbe_service_event_schedule(adapter);
+ }
+ }
+ ixgbe_check_sfp_event(adapter, eicr);
+ if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
+ ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
+ if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+ adapter->interrupt_event = eicr;
+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
+ ixgbe_service_event_schedule(adapter);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ ixgbe_check_fan_failure(adapter, eicr);
+
+ /* re-enable the original interrupt state, no lsc, no queues */
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ ixgbe_irq_enable(adapter, false, false);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
+{
+ struct ixgbe_q_vector *q_vector = data;
+
+ /* EIAM disabled interrupts (on this vector) for us */
+
+ if (q_vector->rx.ring || q_vector->tx.ring)
+ napi_schedule(&q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
+static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
+ int r_idx)
+{
+ struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
+ struct ixgbe_ring *rx_ring = a->rx_ring[r_idx];
+
+ rx_ring->q_vector = q_vector;
+ rx_ring->next = q_vector->rx.ring;
+ q_vector->rx.ring = rx_ring;
+ q_vector->rx.count++;
+}
+
+static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
+ int t_idx)
+{
+ struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
+ struct ixgbe_ring *tx_ring = a->tx_ring[t_idx];
+
+ tx_ring->q_vector = q_vector;
+ tx_ring->next = q_vector->tx.ring;
+ q_vector->tx.ring = tx_ring;
+ q_vector->tx.count++;
+ q_vector->tx.work_limit = a->tx_work_limit;
+}
+
+/**
+ * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function maps descriptor rings to the queue-specific vectors
+ * we were allotted through the MSI-X enabling code. Ideally, we'd have
+ * one vector per ring/queue, but on a constrained vector budget, we
+ * group the rings as "efficiently" as possible. You would add new
+ * mapping configurations in here.
+ **/
+static void ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter)
+{
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ int rxr_remaining = adapter->num_rx_queues, rxr_idx = 0;
+ int txr_remaining = adapter->num_tx_queues, txr_idx = 0;
+ int v_start = 0;
+
+ /* only one q_vector if MSI-X is disabled. */
+ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
+ q_vectors = 1;
+
+ /*
+ * If we don't have enough vectors for a 1-to-1 mapping, we'll have to
+ * group them so there are multiple queues per vector.
+ *
+ * Re-adjusting *qpv takes care of the remainder.
+ */
+ for (; v_start < q_vectors && rxr_remaining; v_start++) {
+ int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_start);
+ for (; rqpv; rqpv--, rxr_idx++, rxr_remaining--)
+ map_vector_to_rxq(adapter, v_start, rxr_idx);
+ }
+
+ /*
+ * If there are not enough q_vectors for each ring to have it's own
+ * vector then we must pair up Rx/Tx on a each vector
+ */
+ if ((v_start + txr_remaining) > q_vectors)
+ v_start = 0;
+
+ for (; v_start < q_vectors && txr_remaining; v_start++) {
+ int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_start);
+ for (; tqpv; tqpv--, txr_idx++, txr_remaining--)
+ map_vector_to_txq(adapter, v_start, txr_idx);
+ }
+}
+
+/**
+ * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
+ * interrupts from the kernel.
+ **/
+static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ int vector, err;
+ int ri = 0, ti = 0;
+
+ for (vector = 0; vector < q_vectors; vector++) {
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
+ struct msix_entry *entry = &adapter->msix_entries[vector];
+
+ if (q_vector->tx.ring && q_vector->rx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", netdev->name, "TxRx", ri++);
+ ti++;
+ } else if (q_vector->rx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", netdev->name, "rx", ri++);
+ } else if (q_vector->tx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", netdev->name, "tx", ti++);
+ } else {
+ /* skip this unused q_vector */
+ continue;
+ }
+ err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
+ q_vector->name, q_vector);
+ if (err) {
+ e_err(probe, "request_irq failed for MSIX interrupt "
+ "Error: %d\n", err);
+ goto free_queue_irqs;
+ }
+ /* If Flow Director is enabled, set interrupt affinity */
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
+ /* assign the mask for this irq */
+ irq_set_affinity_hint(entry->vector,
+ q_vector->affinity_mask);
+ }
+ }
+
+ err = request_irq(adapter->msix_entries[vector].vector,
+ ixgbe_msix_other, 0, netdev->name, adapter);
+ if (err) {
+ e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
+ goto free_queue_irqs;
+ }
+
+ return 0;
+
+free_queue_irqs:
+ while (vector) {
+ vector--;
+ irq_set_affinity_hint(adapter->msix_entries[vector].vector,
+ NULL);
+ free_irq(adapter->msix_entries[vector].vector,
+ adapter->q_vector[vector]);
+ }
+ adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ return err;
+}
+
+/**
+ * ixgbe_intr - legacy mode Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t ixgbe_intr(int irq, void *data)
+{
+ struct ixgbe_adapter *adapter = data;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
+ u32 eicr;
+
+ /*
+ * Workaround for silicon errata on 82598. Mask the interrupts
+ * before the read of EICR.
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
+
+ /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
+ * therefore no explict interrupt disable is necessary */
+ eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
+ if (!eicr) {
+ /*
+ * shared interrupt alert!
+ * make sure interrupts are enabled because the read will
+ * have disabled interrupts due to EIAM
+ * finish the workaround of silicon errata on 82598. Unmask
+ * the interrupt that we masked before the EICR read.
+ */
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ ixgbe_irq_enable(adapter, true, true);
+ return IRQ_NONE; /* Not our interrupt */
+ }
+
+ if (eicr & IXGBE_EICR_LSC)
+ ixgbe_check_lsc(adapter);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ ixgbe_check_sfp_event(adapter, eicr);
+ if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
+ ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
+ if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+ adapter->interrupt_event = eicr;
+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
+ ixgbe_service_event_schedule(adapter);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ ixgbe_check_fan_failure(adapter, eicr);
+
+ if (napi_schedule_prep(&(q_vector->napi))) {
+ /* would disable interrupts here but EIAM disabled it */
+ __napi_schedule(&(q_vector->napi));
+ }
+
+ /*
+ * re-enable link(maybe) and non-queue interrupts, no flush.
+ * ixgbe_poll will re-enable the queue interrupts
+ */
+
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ ixgbe_irq_enable(adapter, false, false);
+
+ return IRQ_HANDLED;
+}
+
+static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
+{
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ int i;
+
+ /* legacy and MSI only use one vector */
+ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
+ q_vectors = 1;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ adapter->rx_ring[i]->q_vector = NULL;
+ adapter->rx_ring[i]->next = NULL;
+ }
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ adapter->tx_ring[i]->q_vector = NULL;
+ adapter->tx_ring[i]->next = NULL;
+ }
+
+ for (i = 0; i < q_vectors; i++) {
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
+ memset(&q_vector->rx, 0, sizeof(struct ixgbe_ring_container));
+ memset(&q_vector->tx, 0, sizeof(struct ixgbe_ring_container));
+ }
+}
+
+/**
+ * ixgbe_request_irq - initialize interrupts
+ * @adapter: board private structure
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ /* map all of the rings to the q_vectors */
+ ixgbe_map_rings_to_vectors(adapter);
+
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ err = ixgbe_request_msix_irqs(adapter);
+ else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
+ err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
+ netdev->name, adapter);
+ else
+ err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
+ netdev->name, adapter);
+
+ if (err) {
+ e_err(probe, "request_irq failed, Error %d\n", err);
+
+ /* place q_vectors and rings back into a known good state */
+ ixgbe_reset_q_vectors(adapter);
+ }
+
+ return err;
+}
+
+static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
+{
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+ int i, q_vectors;
+
+ q_vectors = adapter->num_msix_vectors;
+ i = q_vectors - 1;
+ free_irq(adapter->msix_entries[i].vector, adapter);
+ i--;
+
+ for (; i >= 0; i--) {
+ /* free only the irqs that were actually requested */
+ if (!adapter->q_vector[i]->rx.ring &&
+ !adapter->q_vector[i]->tx.ring)
+ continue;
+
+ /* clear the affinity_mask in the IRQ descriptor */
+ irq_set_affinity_hint(adapter->msix_entries[i].vector,
+ NULL);
+
+ free_irq(adapter->msix_entries[i].vector,
+ adapter->q_vector[i]);
+ }
+ } else {
+ free_irq(adapter->pdev->irq, adapter);
+ }
+
+ /* clear q_vector state information */
+ ixgbe_reset_q_vectors(adapter);
+}
+
+/**
+ * ixgbe_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
+{
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
+ break;
+ default:
+ break;
+ }
+ IXGBE_WRITE_FLUSH(&adapter->hw);
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+ int i;
+ for (i = 0; i < adapter->num_msix_vectors; i++)
+ synchronize_irq(adapter->msix_entries[i].vector);
+ } else {
+ synchronize_irq(adapter->pdev->irq);
+ }
+}
+
+/**
+ * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
+ *
+ **/
+static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
+ EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param));
+
+ ixgbe_set_ivar(adapter, 0, 0, 0);
+ ixgbe_set_ivar(adapter, 1, 0, 0);
+
+ e_info(hw, "Legacy interrupt IVAR setup done\n");
+}
+
+/**
+ * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset
+ * @adapter: board private structure
+ * @ring: structure containing ring specific data
+ *
+ * Configure the Tx descriptor ring after a reset.
+ **/
+void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 tdba = ring->dma;
+ int wait_loop = 10;
+ u32 txdctl = IXGBE_TXDCTL_ENABLE;
+ u8 reg_idx = ring->reg_idx;
+
+ /* disable queue to avoid issues while updating state */
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
+ IXGBE_WRITE_FLUSH(hw);
+
+ IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
+ (tdba & DMA_BIT_MASK(32)));
+ IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
+ ring->count * sizeof(union ixgbe_adv_tx_desc));
+ IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
+ ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
+
+ /*
+ * set WTHRESH to encourage burst writeback, it should not be set
+ * higher than 1 when ITR is 0 as it could cause false TX hangs
+ *
+ * In order to avoid issues WTHRESH + PTHRESH should always be equal
+ * to or less than the number of on chip descriptors, which is
+ * currently 40.
+ */
+ if (!adapter->tx_itr_setting || !adapter->rx_itr_setting)
+ txdctl |= (1 << 16); /* WTHRESH = 1 */
+ else
+ txdctl |= (8 << 16); /* WTHRESH = 8 */
+
+ /* PTHRESH=32 is needed to avoid a Tx hang with DFP enabled. */
+ txdctl |= (1 << 8) | /* HTHRESH = 1 */
+ 32; /* PTHRESH = 32 */
+
+ /* reinitialize flowdirector state */
+ if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
+ adapter->atr_sample_rate) {
+ ring->atr_sample_rate = adapter->atr_sample_rate;
+ ring->atr_count = 0;
+ set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
+ } else {
+ ring->atr_sample_rate = 0;
+ }
+
+ clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
+
+ /* enable queue */
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
+
+ /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
+ if (hw->mac.type == ixgbe_mac_82598EB &&
+ !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
+ return;
+
+ /* poll to verify queue is enabled */
+ do {
+ usleep_range(1000, 2000);
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
+ } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
+ if (!wait_loop)
+ e_err(drv, "Could not enable Tx Queue %d\n", reg_idx);
+}
+
+static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 rttdcs;
+ u32 reg;
+ u8 tcs = netdev_get_num_tc(adapter->netdev);
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return;
+
+ /* disable the arbiter while setting MTQC */
+ rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+ rttdcs |= IXGBE_RTTDCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
+
+ /* set transmit pool layout */
+ switch (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ case (IXGBE_FLAG_SRIOV_ENABLED):
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC,
+ (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
+ break;
+ default:
+ if (!tcs)
+ reg = IXGBE_MTQC_64Q_1PB;
+ else if (tcs <= 4)
+ reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
+ else
+ reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
+
+ /* Enable Security TX Buffer IFG for multiple pb */
+ if (tcs) {
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+ reg |= IXGBE_SECTX_DCB;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
+ }
+ break;
+ }
+
+ /* re-enable the arbiter */
+ rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
+}
+
+/**
+ * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 dmatxctl;
+ u32 i;
+
+ ixgbe_setup_mtqc(adapter);
+
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ /* DMATXCTL.EN must be before Tx queues are enabled */
+ dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+ dmatxctl |= IXGBE_DMATXCTL_TE;
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
+ }
+
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
+}
+
+#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
+
+static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *rx_ring)
+{
+ u32 srrctl;
+ u8 reg_idx = rx_ring->reg_idx;
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB: {
+ struct ixgbe_ring_feature *feature = adapter->ring_feature;
+ const int mask = feature[RING_F_RSS].mask;
+ reg_idx = reg_idx & mask;
+ }
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ default:
+ break;
+ }
+
+ srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx));
+
+ srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
+ srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
+ if (adapter->num_vfs)
+ srrctl |= IXGBE_SRRCTL_DROP_EN;
+
+ srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+ IXGBE_SRRCTL_BSIZEHDR_MASK;
+
+ if (ring_is_ps_enabled(rx_ring)) {
+#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
+ srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+#else
+ srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+#endif
+ srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+ } else {
+ srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
+ IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+ }
+
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl);
+}
+
+static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
+ 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
+ 0x6A3E67EA, 0x14364D17, 0x3BED200D};
+ u32 mrqc = 0, reta = 0;
+ u32 rxcsum;
+ int i, j;
+ u8 tcs = netdev_get_num_tc(adapter->netdev);
+ int maxq = adapter->ring_feature[RING_F_RSS].indices;
+
+ if (tcs)
+ maxq = min(maxq, adapter->num_tx_queues / tcs);
+
+ /* Fill out hash function seeds */
+ for (i = 0; i < 10; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
+
+ /* Fill out redirection table */
+ for (i = 0, j = 0; i < 128; i++, j++) {
+ if (j == maxq)
+ j = 0;
+ /* reta = 4-byte sliding window of
+ * 0x00..(indices-1)(indices-1)00..etc. */
+ reta = (reta << 8) | (j * 0x11);
+ if ((i & 3) == 3)
+ IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
+ }
+
+ /* Disable indicating checksum in descriptor, enables RSS hash */
+ rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
+ rxcsum |= IXGBE_RXCSUM_PCSD;
+ IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
+
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB &&
+ (adapter->flags & IXGBE_FLAG_RSS_ENABLED)) {
+ mrqc = IXGBE_MRQC_RSSEN;
+ } else {
+ int mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
+ | IXGBE_FLAG_SRIOV_ENABLED);
+
+ switch (mask) {
+ case (IXGBE_FLAG_RSS_ENABLED):
+ if (!tcs)
+ mrqc = IXGBE_MRQC_RSSEN;
+ else if (tcs <= 4)
+ mrqc = IXGBE_MRQC_RTRSS4TCEN;
+ else
+ mrqc = IXGBE_MRQC_RTRSS8TCEN;
+ break;
+ case (IXGBE_FLAG_SRIOV_ENABLED):
+ mrqc = IXGBE_MRQC_VMDQEN;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Perform hash on these packet types */
+ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
+ | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
+ | IXGBE_MRQC_RSS_FIELD_IPV6
+ | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+}
+
+/**
+ * ixgbe_configure_rscctl - enable RSC for the indicated ring
+ * @adapter: address of board private structure
+ * @index: index of ring to set
+ **/
+static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 rscctrl;
+ int rx_buf_len;
+ u8 reg_idx = ring->reg_idx;
+
+ if (!ring_is_rsc_enabled(ring))
+ return;
+
+ rx_buf_len = ring->rx_buf_len;
+ rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
+ rscctrl |= IXGBE_RSCCTL_RSCEN;
+ /*
+ * we must limit the number of descriptors so that the
+ * total size of max desc * buf_len is not greater
+ * than 65535
+ */
+ if (ring_is_ps_enabled(ring)) {
+#if (MAX_SKB_FRAGS > 16)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
+#elif (MAX_SKB_FRAGS > 8)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
+#elif (MAX_SKB_FRAGS > 4)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
+#else
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
+#endif
+ } else {
+ if (rx_buf_len < IXGBE_RXBUFFER_4K)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
+ else if (rx_buf_len < IXGBE_RXBUFFER_8K)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
+ else
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
+}
+
+/**
+ * ixgbe_set_uta - Set unicast filter table address
+ * @adapter: board private structure
+ *
+ * The unicast table address is a register array of 32-bit registers.
+ * The table is meant to be used in a way similar to how the MTA is used
+ * however due to certain limitations in the hardware it is necessary to
+ * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
+ * enable bit to allow vlan tag stripping when promiscuous mode is enabled
+ **/
+static void ixgbe_set_uta(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+
+ /* The UTA table only exists on 82599 hardware and newer */
+ if (hw->mac.type < ixgbe_mac_82599EB)
+ return;
+
+ /* we only need to do this if VMDq is enabled */
+ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ return;
+
+ for (i = 0; i < 128; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
+}
+
+#define IXGBE_MAX_RX_DESC_POLL 10
+static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int wait_loop = IXGBE_MAX_RX_DESC_POLL;
+ u32 rxdctl;
+ u8 reg_idx = ring->reg_idx;
+
+ /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
+ if (hw->mac.type == ixgbe_mac_82598EB &&
+ !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
+ return;
+
+ do {
+ usleep_range(1000, 2000);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+ } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
+
+ if (!wait_loop) {
+ e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
+ "the polling period\n", reg_idx);
+ }
+}
+
+void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int wait_loop = IXGBE_MAX_RX_DESC_POLL;
+ u32 rxdctl;
+ u8 reg_idx = ring->reg_idx;
+
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+
+ /* write value back with RXDCTL.ENABLE bit cleared */
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
+
+ if (hw->mac.type == ixgbe_mac_82598EB &&
+ !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
+ return;
+
+ /* the hardware may take up to 100us to really disable the rx queue */
+ do {
+ udelay(10);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+ } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
+
+ if (!wait_loop) {
+ e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
+ "the polling period\n", reg_idx);
+ }
+}
+
+void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 rdba = ring->dma;
+ u32 rxdctl;
+ u8 reg_idx = ring->reg_idx;
+
+ /* disable queue to avoid issues while updating state */
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+ ixgbe_disable_rx_queue(adapter, ring);
+
+ IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
+ IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
+ ring->count * sizeof(union ixgbe_adv_rx_desc));
+ IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
+ ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
+
+ ixgbe_configure_srrctl(adapter, ring);
+ ixgbe_configure_rscctl(adapter, ring);
+
+ /* If operating in IOV mode set RLPML for X540 */
+ if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
+ hw->mac.type == ixgbe_mac_X540) {
+ rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
+ rxdctl |= ((ring->netdev->mtu + ETH_HLEN +
+ ETH_FCS_LEN + VLAN_HLEN) | IXGBE_RXDCTL_RLPML_EN);
+ }
+
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ /*
+ * enable cache line friendly hardware writes:
+ * PTHRESH=32 descriptors (half the internal cache),
+ * this also removes ugly rx_no_buffer_count increment
+ * HTHRESH=4 descriptors (to minimize latency on fetch)
+ * WTHRESH=8 burst writeback up to two cache lines
+ */
+ rxdctl &= ~0x3FFFFF;
+ rxdctl |= 0x080420;
+ }
+
+ /* enable receive descriptor ring */
+ rxdctl |= IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
+
+ ixgbe_rx_desc_queue_enable(adapter, ring);
+ ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
+}
+
+static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int p;
+
+ /* PSRTYPE must be initialized in non 82598 adapters */
+ u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
+ IXGBE_PSRTYPE_UDPHDR |
+ IXGBE_PSRTYPE_IPV4HDR |
+ IXGBE_PSRTYPE_L2HDR |
+ IXGBE_PSRTYPE_IPV6HDR;
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return;
+
+ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED)
+ psrtype |= (adapter->num_rx_queues_per_pool << 29);
+
+ for (p = 0; p < adapter->num_rx_pools; p++)
+ IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p),
+ psrtype);
+}
+
+static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gcr_ext;
+ u32 vt_reg_bits;
+ u32 reg_offset, vf_shift;
+ u32 vmdctl;
+
+ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ return;
+
+ vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | IXGBE_VT_CTL_REPLEN;
+ vt_reg_bits |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT);
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
+
+ vf_shift = adapter->num_vfs % 32;
+ reg_offset = (adapter->num_vfs > 32) ? 1 : 0;
+
+ /* Enable only the PF's pool for Tx/Rx */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+
+ /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
+ hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
+
+ /*
+ * Set up VF register offsets for selected VT Mode,
+ * i.e. 32 or 64 VFs for SR-IOV
+ */
+ gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+ gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
+ gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
+
+ /* enable Tx loopback for VF/PF communication */
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+ /* Enable MAC Anti-Spoofing */
+ hw->mac.ops.set_mac_anti_spoofing(hw,
+ (adapter->antispoofing_enabled =
+ (adapter->num_vfs != 0)),
+ adapter->num_vfs);
+}
+
+static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ int rx_buf_len;
+ struct ixgbe_ring *rx_ring;
+ int i;
+ u32 mhadd, hlreg0;
+
+ /* Decide whether to use packet split mode or not */
+ /* On by default */
+ adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+
+ /* Do not use packet split if we're in SR-IOV Mode */
+ if (adapter->num_vfs)
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+
+ /* Disable packet split due to 82599 erratum #45 */
+ if (hw->mac.type == ixgbe_mac_82599EB)
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+
+#ifdef IXGBE_FCOE
+ /* adjust max frame to be able to do baby jumbo for FCoE */
+ if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
+ (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
+ max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
+
+#endif /* IXGBE_FCOE */
+ mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
+ if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
+ mhadd &= ~IXGBE_MHADD_MFS_MASK;
+ mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
+ }
+
+ /* MHADD will allow an extra 4 bytes past for vlan tagged frames */
+ max_frame += VLAN_HLEN;
+
+ /* Set the RX buffer length according to the mode */
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ rx_buf_len = IXGBE_RX_HDR_SIZE;
+ } else {
+ if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
+ (netdev->mtu <= ETH_DATA_LEN))
+ rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+ /*
+ * Make best use of allocation by using all but 1K of a
+ * power of 2 allocation that will be used for skb->head.
+ */
+ else if (max_frame <= IXGBE_RXBUFFER_3K)
+ rx_buf_len = IXGBE_RXBUFFER_3K;
+ else if (max_frame <= IXGBE_RXBUFFER_7K)
+ rx_buf_len = IXGBE_RXBUFFER_7K;
+ else if (max_frame <= IXGBE_RXBUFFER_15K)
+ rx_buf_len = IXGBE_RXBUFFER_15K;
+ else
+ rx_buf_len = IXGBE_MAX_RXBUFFER;
+ }
+
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
+ hlreg0 |= IXGBE_HLREG0_JUMBOEN;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+
+ /*
+ * Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring
+ */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ rx_ring = adapter->rx_ring[i];
+ rx_ring->rx_buf_len = rx_buf_len;
+
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
+ set_ring_ps_enabled(rx_ring);
+ else
+ clear_ring_ps_enabled(rx_ring);
+
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
+ set_ring_rsc_enabled(rx_ring);
+ else
+ clear_ring_rsc_enabled(rx_ring);
+
+#ifdef IXGBE_FCOE
+ if (netdev->features & NETIF_F_FCOE_MTU) {
+ struct ixgbe_ring_feature *f;
+ f = &adapter->ring_feature[RING_F_FCOE];
+ if ((i >= f->mask) && (i < f->mask + f->indices)) {
+ clear_ring_ps_enabled(rx_ring);
+ if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
+ rx_ring->rx_buf_len =
+ IXGBE_FCOE_JUMBO_FRAME_SIZE;
+ } else if (!ring_is_rsc_enabled(rx_ring) &&
+ !ring_is_ps_enabled(rx_ring)) {
+ rx_ring->rx_buf_len =
+ IXGBE_FCOE_JUMBO_FRAME_SIZE;
+ }
+ }
+#endif /* IXGBE_FCOE */
+ }
+}
+
+static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ /*
+ * For VMDq support of different descriptor types or
+ * buffer sizes through the use of multiple SRRCTL
+ * registers, RDRXCTL.MVMEN must be set to 1
+ *
+ * also, the manual doesn't mention it clearly but DCA hints
+ * will only use queue 0's tags unless this bit is set. Side
+ * effects of setting this bit are only that SRRCTL must be
+ * fully programmed [0..15]
+ */
+ rdrxctl |= IXGBE_RDRXCTL_MVMEN;
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ /* Disable RSC for ACK packets */
+ IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
+ (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
+ rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
+ /* hardware requires some bits to be set by default */
+ rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
+ rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
+ break;
+ default:
+ /* We should do nothing since we don't know this hardware */
+ return;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
+}
+
+/**
+ * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+ u32 rxctrl;
+
+ /* disable receives while setting up the descriptors */
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
+
+ ixgbe_setup_psrtype(adapter);
+ ixgbe_setup_rdrxctl(adapter);
+
+ /* Program registers for the distribution of queues */
+ ixgbe_setup_mrqc(adapter);
+
+ ixgbe_set_uta(adapter);
+
+ /* set_rx_buffer_len must be called before ring initialization */
+ ixgbe_set_rx_buffer_len(adapter);
+
+ /*
+ * Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring
+ */
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
+
+ /* disable drop enable for 82598 parts */
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ rxctrl |= IXGBE_RXCTRL_DMBYPS;
+
+ /* enable all receives */
+ rxctrl |= IXGBE_RXCTRL_RXEN;
+ hw->mac.ops.enable_rx_dma(hw, rxctrl);
+}
+
+static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int pool_ndx = adapter->num_vfs;
+
+ /* add VID to filter table */
+ hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
+ set_bit(vid, adapter->active_vlans);
+}
+
+static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int pool_ndx = adapter->num_vfs;
+
+ /* remove VID from filter table */
+ hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
+ clear_bit(vid, adapter->active_vlans);
+}
+
+/**
+ * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
+ * @adapter: driver data
+ */
+static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vlnctrl;
+
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+}
+
+/**
+ * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
+ * @adapter: driver data
+ */
+static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vlnctrl;
+
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlnctrl |= IXGBE_VLNCTRL_VFE;
+ vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+}
+
+/**
+ * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
+ * @adapter: driver data
+ */
+static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vlnctrl;
+ int i, j;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlnctrl &= ~IXGBE_VLNCTRL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ j = adapter->rx_ring[i]->reg_idx;
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
+ vlnctrl &= ~IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping
+ * @adapter: driver data
+ */
+static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vlnctrl;
+ int i, j;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlnctrl |= IXGBE_VLNCTRL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ j = adapter->rx_ring[i]->reg_idx;
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
+ vlnctrl |= IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
+{
+ u16 vid;
+
+ ixgbe_vlan_rx_add_vid(adapter->netdev, 0);
+
+ for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+ ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
+}
+
+/**
+ * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
+ * @netdev: network interface device structure
+ *
+ * Writes unicast address list to the RAR table.
+ * Returns: -ENOMEM on failure/insufficient address space
+ * 0 on no addresses written
+ * X on writing X addresses to the RAR table
+ **/
+static int ixgbe_write_uc_addr_list(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ unsigned int vfn = adapter->num_vfs;
+ unsigned int rar_entries = IXGBE_MAX_PF_MACVLANS;
+ int count = 0;
+
+ /* return ENOMEM indicating insufficient memory for addresses */
+ if (netdev_uc_count(netdev) > rar_entries)
+ return -ENOMEM;
+
+ if (!netdev_uc_empty(netdev) && rar_entries) {
+ struct netdev_hw_addr *ha;
+ /* return error if we do not support writing to RAR table */
+ if (!hw->mac.ops.set_rar)
+ return -ENOMEM;
+
+ netdev_for_each_uc_addr(ha, netdev) {
+ if (!rar_entries)
+ break;
+ hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
+ vfn, IXGBE_RAH_AV);
+ count++;
+ }
+ }
+ /* write the addresses in reverse order to avoid write combining */
+ for (; rar_entries > 0 ; rar_entries--)
+ hw->mac.ops.clear_rar(hw, rar_entries);
+
+ return count;
+}
+
+/**
+ * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_rx_method entry point is called whenever the unicast/multicast
+ * address list or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper unicast, multicast and
+ * promiscuous mode.
+ **/
+void ixgbe_set_rx_mode(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
+ int count;
+
+ /* Check for Promiscuous and All Multicast modes */
+
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+
+ /* set all bits that we expect to always be set */
+ fctrl |= IXGBE_FCTRL_BAM;
+ fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
+ fctrl |= IXGBE_FCTRL_PMCF;
+
+ /* clear the bits we are changing the status of */
+ fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+
+ if (netdev->flags & IFF_PROMISC) {
+ hw->addr_ctrl.user_set_promisc = true;
+ fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+ vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
+ /* don't hardware filter vlans in promisc mode */
+ ixgbe_vlan_filter_disable(adapter);
+ } else {
+ if (netdev->flags & IFF_ALLMULTI) {
+ fctrl |= IXGBE_FCTRL_MPE;
+ vmolr |= IXGBE_VMOLR_MPE;
+ } else {
+ /*
+ * Write addresses to the MTA, if the attempt fails
+ * then we should just turn on promiscuous mode so
+ * that we can at least receive multicast traffic
+ */
+ hw->mac.ops.update_mc_addr_list(hw, netdev);
+ vmolr |= IXGBE_VMOLR_ROMPE;
+ }
+ ixgbe_vlan_filter_enable(adapter);
+ hw->addr_ctrl.user_set_promisc = false;
+ /*
+ * Write addresses to available RAR registers, if there is not
+ * sufficient space to store all the addresses then enable
+ * unicast promiscuous mode
+ */
+ count = ixgbe_write_uc_addr_list(netdev);
+ if (count < 0) {
+ fctrl |= IXGBE_FCTRL_UPE;
+ vmolr |= IXGBE_VMOLR_ROPE;
+ }
+ }
+
+ if (adapter->num_vfs) {
+ ixgbe_restore_vf_multicasts(adapter);
+ vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) &
+ ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
+ IXGBE_VMOLR_ROPE);
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr);
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+
+ if (netdev->features & NETIF_F_HW_VLAN_RX)
+ ixgbe_vlan_strip_enable(adapter);
+ else
+ ixgbe_vlan_strip_disable(adapter);
+}
+
+static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
+{
+ int q_idx;
+ struct ixgbe_q_vector *q_vector;
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ /* legacy and MSI only use one vector */
+ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
+ q_vectors = 1;
+
+ for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+ q_vector = adapter->q_vector[q_idx];
+ napi_enable(&q_vector->napi);
+ }
+}
+
+static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
+{
+ int q_idx;
+ struct ixgbe_q_vector *q_vector;
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ /* legacy and MSI only use one vector */
+ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
+ q_vectors = 1;
+
+ for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+ q_vector = adapter->q_vector[q_idx];
+ napi_disable(&q_vector->napi);
+ }
+}
+
+#ifdef CONFIG_IXGBE_DCB
+/*
+ * ixgbe_configure_dcb - Configure DCB hardware
+ * @adapter: ixgbe adapter struct
+ *
+ * This is called by the driver on open to configure the DCB hardware.
+ * This is also called by the gennetlink interface when reconfiguring
+ * the DCB state.
+ */
+static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ netif_set_gso_max_size(adapter->netdev, 65536);
+ return;
+ }
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ netif_set_gso_max_size(adapter->netdev, 32768);
+
+
+ /* Enable VLAN tag insert/strip */
+ adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
+
+ hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
+
+ /* reconfigure the hardware */
+ if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
+#ifdef IXGBE_FCOE
+ if (adapter->netdev->features & NETIF_F_FCOE_MTU)
+ max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
+#endif
+ ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
+ DCB_TX_CONFIG);
+ ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
+ DCB_RX_CONFIG);
+ ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
+ } else {
+ struct net_device *dev = adapter->netdev;
+
+ if (adapter->ixgbe_ieee_ets)
+ dev->dcbnl_ops->ieee_setets(dev,
+ adapter->ixgbe_ieee_ets);
+ if (adapter->ixgbe_ieee_pfc)
+ dev->dcbnl_ops->ieee_setpfc(dev,
+ adapter->ixgbe_ieee_pfc);
+ }
+
+ /* Enable RSS Hash per TC */
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ int i;
+ u32 reg = 0;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ u8 msb = 0;
+ u8 cnt = adapter->netdev->tc_to_txq[i].count;
+
+ while (cnt >>= 1)
+ msb++;
+
+ reg |= msb << IXGBE_RQTC_SHIFT_TC(i);
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_RQTC, reg);
+ }
+}
+
+#endif
+
+static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int hdrm;
+ u8 tc = netdev_get_num_tc(adapter->netdev);
+
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
+ adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
+ hdrm = 32 << adapter->fdir_pballoc;
+ else
+ hdrm = 0;
+
+ hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
+}
+
+static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct hlist_node *node, *node2;
+ struct ixgbe_fdir_filter *filter;
+
+ spin_lock(&adapter->fdir_perfect_lock);
+
+ if (!hlist_empty(&adapter->fdir_filter_list))
+ ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
+
+ hlist_for_each_entry_safe(filter, node, node2,
+ &adapter->fdir_filter_list, fdir_node) {
+ ixgbe_fdir_write_perfect_filter_82599(hw,
+ &filter->filter,
+ filter->sw_idx,
+ (filter->action == IXGBE_FDIR_DROP_QUEUE) ?
+ IXGBE_FDIR_DROP_QUEUE :
+ adapter->rx_ring[filter->action]->reg_idx);
+ }
+
+ spin_unlock(&adapter->fdir_perfect_lock);
+}
+
+static void ixgbe_configure(struct ixgbe_adapter *adapter)
+{
+ ixgbe_configure_pb(adapter);
+#ifdef CONFIG_IXGBE_DCB
+ ixgbe_configure_dcb(adapter);
+#endif
+
+ ixgbe_set_rx_mode(adapter->netdev);
+ ixgbe_restore_vlan(adapter);
+
+#ifdef IXGBE_FCOE
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+ ixgbe_configure_fcoe(adapter);
+
+#endif /* IXGBE_FCOE */
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
+ ixgbe_init_fdir_signature_82599(&adapter->hw,
+ adapter->fdir_pballoc);
+ } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
+ ixgbe_init_fdir_perfect_82599(&adapter->hw,
+ adapter->fdir_pballoc);
+ ixgbe_fdir_filter_restore(adapter);
+ }
+
+ ixgbe_configure_virtualization(adapter);
+
+ ixgbe_configure_tx(adapter);
+ ixgbe_configure_rx(adapter);
+}
+
+static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
+{
+ switch (hw->phy.type) {
+ case ixgbe_phy_sfp_avago:
+ case ixgbe_phy_sfp_ftl:
+ case ixgbe_phy_sfp_intel:
+ case ixgbe_phy_sfp_unknown:
+ case ixgbe_phy_sfp_passive_tyco:
+ case ixgbe_phy_sfp_passive_unknown:
+ case ixgbe_phy_sfp_active_unknown:
+ case ixgbe_phy_sfp_ftl_active:
+ return true;
+ case ixgbe_phy_nl:
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * ixgbe_sfp_link_config - set up SFP+ link
+ * @adapter: pointer to private adapter struct
+ **/
+static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
+{
+ /*
+ * We are assuming the worst case scenerio here, and that
+ * is that an SFP was inserted/removed after the reset
+ * but before SFP detection was enabled. As such the best
+ * solution is to just start searching as soon as we start
+ */
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
+
+ adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
+}
+
+/**
+ * ixgbe_non_sfp_link_config - set up non-SFP+ link
+ * @hw: pointer to private hardware struct
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
+{
+ u32 autoneg;
+ bool negotiation, link_up = false;
+ u32 ret = IXGBE_ERR_LINK_SETUP;
+
+ if (hw->mac.ops.check_link)
+ ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
+
+ if (ret)
+ goto link_cfg_out;
+
+ autoneg = hw->phy.autoneg_advertised;
+ if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
+ ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
+ &negotiation);
+ if (ret)
+ goto link_cfg_out;
+
+ if (hw->mac.ops.setup_link)
+ ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up);
+link_cfg_out:
+ return ret;
+}
+
+static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gpie = 0;
+
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+ gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
+ IXGBE_GPIE_OCD;
+ gpie |= IXGBE_GPIE_EIAME;
+ /*
+ * use EIAM to auto-mask when MSI-X interrupt is asserted
+ * this saves a register write for every interrupt
+ */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ default:
+ IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
+ IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
+ break;
+ }
+ } else {
+ /* legacy interrupts, use EIAM to auto-mask when reading EICR,
+ * specifically only auto mask tx and rx interrupts */
+ IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
+ }
+
+ /* XXX: to interrupt immediately for EICS writes, enable this */
+ /* gpie |= IXGBE_GPIE_EIMEN; */
+
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ gpie &= ~IXGBE_GPIE_VTMODE_MASK;
+ gpie |= IXGBE_GPIE_VTMODE_64;
+ }
+
+ /* Enable Thermal over heat sensor interrupt */
+ if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
+ gpie |= IXGBE_SDP0_GPIEN;
+
+ /* Enable fan failure interrupt */
+ if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
+ gpie |= IXGBE_SDP1_GPIEN;
+
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ gpie |= IXGBE_SDP1_GPIEN;
+ gpie |= IXGBE_SDP2_GPIEN;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+}
+
+static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+ u32 ctrl_ext;
+
+ ixgbe_get_hw_control(adapter);
+ ixgbe_setup_gpie(adapter);
+
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ ixgbe_configure_msix(adapter);
+ else
+ ixgbe_configure_msi_and_legacy(adapter);
+
+ /* enable the optics for both mult-speed fiber and 82599 SFP+ fiber */
+ if (hw->mac.ops.enable_tx_laser &&
+ ((hw->phy.multispeed_fiber) ||
+ ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
+ (hw->mac.type == ixgbe_mac_82599EB))))
+ hw->mac.ops.enable_tx_laser(hw);
+
+ clear_bit(__IXGBE_DOWN, &adapter->state);
+ ixgbe_napi_enable_all(adapter);
+
+ if (ixgbe_is_sfp(hw)) {
+ ixgbe_sfp_link_config(adapter);
+ } else {
+ err = ixgbe_non_sfp_link_config(hw);
+ if (err)
+ e_err(probe, "link_config FAILED %d\n", err);
+ }
+
+ /* clear any pending interrupts, may auto mask */
+ IXGBE_READ_REG(hw, IXGBE_EICR);
+ ixgbe_irq_enable(adapter, true, true);
+
+ /*
+ * If this adapter has a fan, check to see if we had a failure
+ * before we enabled the interrupt.
+ */
+ if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
+ u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ if (esdp & IXGBE_ESDP_SDP1)
+ e_crit(drv, "Fan has stopped, replace the adapter\n");
+ }
+
+ /* enable transmits */
+ netif_tx_start_all_queues(adapter->netdev);
+
+ /* bring the link up in the watchdog, this could race with our first
+ * link up interrupt but shouldn't be a problem */
+ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+ adapter->link_check_timeout = jiffies;
+ mod_timer(&adapter->service_timer, jiffies);
+
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+ ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+}
+
+void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
+{
+ WARN_ON(in_interrupt());
+ /* put off any impending NetWatchDogTimeout */
+ adapter->netdev->trans_start = jiffies;
+
+ while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+ ixgbe_down(adapter);
+ /*
+ * If SR-IOV enabled then wait a bit before bringing the adapter
+ * back up to give the VFs time to respond to the reset. The
+ * two second wait is based upon the watchdog timer cycle in
+ * the VF driver.
+ */
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ msleep(2000);
+ ixgbe_up(adapter);
+ clear_bit(__IXGBE_RESETTING, &adapter->state);
+}
+
+void ixgbe_up(struct ixgbe_adapter *adapter)
+{
+ /* hardware has been reset, we need to reload some things */
+ ixgbe_configure(adapter);
+
+ ixgbe_up_complete(adapter);
+}
+
+void ixgbe_reset(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ /* lock SFP init bit to prevent race conditions with the watchdog */
+ while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
+ usleep_range(1000, 2000);
+
+ /* clear all SFP and link config related flags while holding SFP_INIT */
+ adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
+ IXGBE_FLAG2_SFP_NEEDS_RESET);
+ adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
+
+ err = hw->mac.ops.init_hw(hw);
+ switch (err) {
+ case 0:
+ case IXGBE_ERR_SFP_NOT_PRESENT:
+ case IXGBE_ERR_SFP_NOT_SUPPORTED:
+ break;
+ case IXGBE_ERR_MASTER_REQUESTS_PENDING:
+ e_dev_err("master disable timed out\n");
+ break;
+ case IXGBE_ERR_EEPROM_VERSION:
+ /* We are running on a pre-production device, log a warning */
+ e_dev_warn("This device is a pre-production adapter/LOM. "
+ "Please be aware there may be issuesassociated with "
+ "your hardware. If you are experiencing problems "
+ "please contact your Intel or hardware "
+ "representative who provided you with this "
+ "hardware.\n");
+ break;
+ default:
+ e_dev_err("Hardware Error: %d\n", err);
+ }
+
+ clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
+
+ /* reprogram the RAR[0] in case user changed it. */
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
+ IXGBE_RAH_AV);
+}
+
+/**
+ * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
+ * @rx_ring: ring to free buffers from
+ **/
+static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ unsigned long size;
+ u16 i;
+
+ /* ring already cleared, nothing to do */
+ if (!rx_ring->rx_buffer_info)
+ return;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ struct ixgbe_rx_buffer *rx_buffer_info;
+
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+ if (rx_buffer_info->dma) {
+ dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ rx_buffer_info->dma = 0;
+ }
+ if (rx_buffer_info->skb) {
+ struct sk_buff *skb = rx_buffer_info->skb;
+ rx_buffer_info->skb = NULL;
+ do {
+ struct sk_buff *this = skb;
+ if (IXGBE_RSC_CB(this)->delay_unmap) {
+ dma_unmap_single(dev,
+ IXGBE_RSC_CB(this)->dma,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ IXGBE_RSC_CB(this)->dma = 0;
+ IXGBE_RSC_CB(skb)->delay_unmap = false;
+ }
+ skb = skb->prev;
+ dev_kfree_skb(this);
+ } while (skb);
+ }
+ if (!rx_buffer_info->page)
+ continue;
+ if (rx_buffer_info->page_dma) {
+ dma_unmap_page(dev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, DMA_FROM_DEVICE);
+ rx_buffer_info->page_dma = 0;
+ }
+ put_page(rx_buffer_info->page);
+ rx_buffer_info->page = NULL;
+ rx_buffer_info->page_offset = 0;
+ }
+
+ size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
+ memset(rx_ring->rx_buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+}
+
+/**
+ * ixgbe_clean_tx_ring - Free Tx Buffers
+ * @tx_ring: ring to be cleaned
+ **/
+static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
+{
+ struct ixgbe_tx_buffer *tx_buffer_info;
+ unsigned long size;
+ u16 i;
+
+ /* ring already cleared, nothing to do */
+ if (!tx_ring->tx_buffer_info)
+ return;
+
+ /* Free all the Tx ring sk_buffs */
+ for (i = 0; i < tx_ring->count; i++) {
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
+ }
+
+ size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
+ memset(tx_ring->tx_buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+}
+
+/**
+ * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ ixgbe_clean_rx_ring(adapter->rx_ring[i]);
+}
+
+/**
+ * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ ixgbe_clean_tx_ring(adapter->tx_ring[i]);
+}
+
+static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
+{
+ struct hlist_node *node, *node2;
+ struct ixgbe_fdir_filter *filter;
+
+ spin_lock(&adapter->fdir_perfect_lock);
+
+ hlist_for_each_entry_safe(filter, node, node2,
+ &adapter->fdir_filter_list, fdir_node) {
+ hlist_del(&filter->fdir_node);
+ kfree(filter);
+ }
+ adapter->fdir_filter_count = 0;
+
+ spin_unlock(&adapter->fdir_perfect_lock);
+}
+
+void ixgbe_down(struct ixgbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 rxctrl;
+ int i;
+
+ /* signal that we are down to the interrupt handler */
+ set_bit(__IXGBE_DOWN, &adapter->state);
+
+ /* disable receives */
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
+
+ /* disable all enabled rx queues */
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ /* this call also flushes the previous write */
+ ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
+
+ usleep_range(10000, 20000);
+
+ netif_tx_stop_all_queues(netdev);
+
+ /* call carrier off first to avoid false dev_watchdog timeouts */
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ ixgbe_irq_disable(adapter);
+
+ ixgbe_napi_disable_all(adapter);
+
+ adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT |
+ IXGBE_FLAG2_RESET_REQUESTED);
+ adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
+
+ del_timer_sync(&adapter->service_timer);
+
+ if (adapter->num_vfs) {
+ /* Clear EITR Select mapping */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
+
+ /* Mark all the VFs as inactive */
+ for (i = 0 ; i < adapter->num_vfs; i++)
+ adapter->vfinfo[i].clear_to_send = 0;
+
+ /* ping all the active vfs to let them know we are going down */
+ ixgbe_ping_all_vfs(adapter);
+
+ /* Disable all VFTE/VFRE TX/RX */
+ ixgbe_disable_tx_rx(adapter);
+ }
+
+ /* disable transmits in the hardware now that interrupts are off */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ u8 reg_idx = adapter->tx_ring[i]->reg_idx;
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
+ }
+
+ /* Disable the Tx DMA engine on 82599 and X540 */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
+ (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
+ ~IXGBE_DMATXCTL_TE));
+ break;
+ default:
+ break;
+ }
+
+ if (!pci_channel_offline(adapter->pdev))
+ ixgbe_reset(adapter);
+
+ /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
+ if (hw->mac.ops.disable_tx_laser &&
+ ((hw->phy.multispeed_fiber) ||
+ ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
+ (hw->mac.type == ixgbe_mac_82599EB))))
+ hw->mac.ops.disable_tx_laser(hw);
+
+ ixgbe_clean_all_tx_rings(adapter);
+ ixgbe_clean_all_rx_rings(adapter);
+
+#ifdef CONFIG_IXGBE_DCA
+ /* since we reset the hardware DCA settings were cleared */
+ ixgbe_setup_dca(adapter);
+#endif
+}
+
+/**
+ * ixgbe_poll - NAPI Rx polling callback
+ * @napi: structure for representing this polling device
+ * @budget: how many packets driver is allowed to clean
+ *
+ * This function is used for legacy and MSI, NAPI mode
+ **/
+static int ixgbe_poll(struct napi_struct *napi, int budget)
+{
+ struct ixgbe_q_vector *q_vector =
+ container_of(napi, struct ixgbe_q_vector, napi);
+ struct ixgbe_adapter *adapter = q_vector->adapter;
+ struct ixgbe_ring *ring;
+ int per_ring_budget;
+ bool clean_complete = true;
+
+#ifdef CONFIG_IXGBE_DCA
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+ ixgbe_update_dca(q_vector);
+#endif
+
+ for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next)
+ clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
+
+ /* attempt to distribute budget to each queue fairly, but don't allow
+ * the budget to go below 1 because we'll exit polling */
+ if (q_vector->rx.count > 1)
+ per_ring_budget = max(budget/q_vector->rx.count, 1);
+ else
+ per_ring_budget = budget;
+
+ for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next)
+ clean_complete &= ixgbe_clean_rx_irq(q_vector, ring,
+ per_ring_budget);
+
+ /* If all work not completed, return budget and keep polling */
+ if (!clean_complete)
+ return budget;
+
+ /* all work done, exit the polling mode */
+ napi_complete(napi);
+ if (adapter->rx_itr_setting & 1)
+ ixgbe_set_itr(q_vector);
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
+
+ return 0;
+}
+
+/**
+ * ixgbe_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+static void ixgbe_tx_timeout(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ /* Do the reset outside of interrupt context */
+ ixgbe_tx_timeout_reset(adapter);
+}
+
+/**
+ * ixgbe_set_rss_queues: Allocate queues for RSS
+ * @adapter: board private structure to initialize
+ *
+ * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
+ * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
+ *
+ **/
+static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
+{
+ bool ret = false;
+ struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
+
+ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
+ f->mask = 0xF;
+ adapter->num_rx_queues = f->indices;
+ adapter->num_tx_queues = f->indices;
+ ret = true;
+ } else {
+ ret = false;
+ }
+
+ return ret;
+}
+
+/**
+ * ixgbe_set_fdir_queues: Allocate queues for Flow Director
+ * @adapter: board private structure to initialize
+ *
+ * Flow Director is an advanced Rx filter, attempting to get Rx flows back
+ * to the original CPU that initiated the Tx session. This runs in addition
+ * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
+ * Rx load across CPUs using RSS.
+ *
+ **/
+static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
+{
+ bool ret = false;
+ struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
+
+ f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices);
+ f_fdir->mask = 0;
+
+ /* Flow Director must have RSS enabled */
+ if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
+ (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
+ adapter->num_tx_queues = f_fdir->indices;
+ adapter->num_rx_queues = f_fdir->indices;
+ ret = true;
+ } else {
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ }
+ return ret;
+}
+
+#ifdef IXGBE_FCOE
+/**
+ * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
+ * @adapter: board private structure to initialize
+ *
+ * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
+ * The ring feature mask is not used as a mask for FCoE, as it can take any 8
+ * rx queues out of the max number of rx queues, instead, it is used as the
+ * index of the first rx queue used by FCoE.
+ *
+ **/
+static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
+
+ if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+ return false;
+
+ f->indices = min((int)num_online_cpus(), f->indices);
+
+ adapter->num_rx_queues = 1;
+ adapter->num_tx_queues = 1;
+
+ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
+ e_info(probe, "FCoE enabled with RSS\n");
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
+ ixgbe_set_fdir_queues(adapter);
+ else
+ ixgbe_set_rss_queues(adapter);
+ }
+
+ /* adding FCoE rx rings to the end */
+ f->mask = adapter->num_rx_queues;
+ adapter->num_rx_queues += f->indices;
+ adapter->num_tx_queues += f->indices;
+
+ return true;
+}
+#endif /* IXGBE_FCOE */
+
+/* Artificial max queue cap per traffic class in DCB mode */
+#define DCB_QUEUE_CAP 8
+
+#ifdef CONFIG_IXGBE_DCB
+static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
+{
+ int per_tc_q, q, i, offset = 0;
+ struct net_device *dev = adapter->netdev;
+ int tcs = netdev_get_num_tc(dev);
+
+ if (!tcs)
+ return false;
+
+ /* Map queue offset and counts onto allocated tx queues */
+ per_tc_q = min(dev->num_tx_queues / tcs, (unsigned int)DCB_QUEUE_CAP);
+ q = min((int)num_online_cpus(), per_tc_q);
+
+ for (i = 0; i < tcs; i++) {
+ netdev_set_prio_tc_map(dev, i, i);
+ netdev_set_tc_queue(dev, i, q, offset);
+ offset += q;
+ }
+
+ adapter->num_tx_queues = q * tcs;
+ adapter->num_rx_queues = q * tcs;
+
+#ifdef IXGBE_FCOE
+ /* FCoE enabled queues require special configuration indexed
+ * by feature specific indices and mask. Here we map FCoE
+ * indices onto the DCB queue pairs allowing FCoE to own
+ * configuration later.
+ */
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+ int tc;
+ struct ixgbe_ring_feature *f =
+ &adapter->ring_feature[RING_F_FCOE];
+
+ tc = netdev_get_prio_tc_map(dev, adapter->fcoe.up);
+ f->indices = dev->tc_to_txq[tc].count;
+ f->mask = dev->tc_to_txq[tc].offset;
+ }
+#endif
+
+ return true;
+}
+#endif
+
+/**
+ * ixgbe_set_sriov_queues: Allocate queues for IOV use
+ * @adapter: board private structure to initialize
+ *
+ * IOV doesn't actually use anything, so just NAK the
+ * request for now and let the other queue routines
+ * figure out what to do.
+ */
+static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
+{
+ return false;
+}
+
+/*
+ * ixgbe_set_num_queues: Allocate queues for device, feature dependent
+ * @adapter: board private structure to initialize
+ *
+ * This is the top level queue allocation routine. The order here is very
+ * important, starting with the "most" number of features turned on at once,
+ * and ending with the smallest set of features. This way large combinations
+ * can be allocated if they're turned on, and smaller combinations are the
+ * fallthrough conditions.
+ *
+ **/
+static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
+{
+ /* Start with base case */
+ adapter->num_rx_queues = 1;
+ adapter->num_tx_queues = 1;
+ adapter->num_rx_pools = adapter->num_rx_queues;
+ adapter->num_rx_queues_per_pool = 1;
+
+ if (ixgbe_set_sriov_queues(adapter))
+ goto done;
+
+#ifdef CONFIG_IXGBE_DCB
+ if (ixgbe_set_dcb_queues(adapter))
+ goto done;
+
+#endif
+#ifdef IXGBE_FCOE
+ if (ixgbe_set_fcoe_queues(adapter))
+ goto done;
+
+#endif /* IXGBE_FCOE */
+ if (ixgbe_set_fdir_queues(adapter))
+ goto done;
+
+ if (ixgbe_set_rss_queues(adapter))
+ goto done;
+
+ /* fallback to base case */
+ adapter->num_rx_queues = 1;
+ adapter->num_tx_queues = 1;
+
+done:
+ /* Notify the stack of the (possibly) reduced queue counts. */
+ netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
+ return netif_set_real_num_rx_queues(adapter->netdev,
+ adapter->num_rx_queues);
+}
+
+static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
+ int vectors)
+{
+ int err, vector_threshold;
+
+ /* We'll want at least 3 (vector_threshold):
+ * 1) TxQ[0] Cleanup
+ * 2) RxQ[0] Cleanup
+ * 3) Other (Link Status Change, etc.)
+ * 4) TCP Timer (optional)
+ */
+ vector_threshold = MIN_MSIX_COUNT;
+
+ /* The more we get, the more we will assign to Tx/Rx Cleanup
+ * for the separate queues...where Rx Cleanup >= Tx Cleanup.
+ * Right now, we simply care about how many we'll get; we'll
+ * set them up later while requesting irq's.
+ */
+ while (vectors >= vector_threshold) {
+ err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
+ vectors);
+ if (!err) /* Success in acquiring all requested vectors. */
+ break;
+ else if (err < 0)
+ vectors = 0; /* Nasty failure, quit now */
+ else /* err == number of vectors we should try again with */
+ vectors = err;
+ }
+
+ if (vectors < vector_threshold) {
+ /* Can't allocate enough MSI-X interrupts? Oh well.
+ * This just means we'll go with either a single MSI
+ * vector or fall back to legacy interrupts.
+ */
+ netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
+ "Unable to allocate MSI-X interrupts\n");
+ adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ } else {
+ adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
+ /*
+ * Adjust for only the vectors we'll use, which is minimum
+ * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
+ * vectors we were allocated.
+ */
+ adapter->num_msix_vectors = min(vectors,
+ adapter->max_msix_q_vectors + NON_Q_VECTORS);
+ }
+}
+
+/**
+ * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
+ * @adapter: board private structure to initialize
+ *
+ * Cache the descriptor ring offsets for RSS to the assigned rings.
+ *
+ **/
+static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
+{
+ int i;
+
+ if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
+ return false;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i]->reg_idx = i;
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_ring[i]->reg_idx = i;
+
+ return true;
+}
+
+#ifdef CONFIG_IXGBE_DCB
+
+/* ixgbe_get_first_reg_idx - Return first register index associated with ring */
+static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
+ unsigned int *tx, unsigned int *rx)
+{
+ struct net_device *dev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 num_tcs = netdev_get_num_tc(dev);
+
+ *tx = 0;
+ *rx = 0;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ *tx = tc << 2;
+ *rx = tc << 3;
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ if (num_tcs > 4) {
+ if (tc < 3) {
+ *tx = tc << 5;
+ *rx = tc << 4;
+ } else if (tc < 5) {
+ *tx = ((tc + 2) << 4);
+ *rx = tc << 4;
+ } else if (tc < num_tcs) {
+ *tx = ((tc + 8) << 3);
+ *rx = tc << 4;
+ }
+ } else {
+ *rx = tc << 5;
+ switch (tc) {
+ case 0:
+ *tx = 0;
+ break;
+ case 1:
+ *tx = 64;
+ break;
+ case 2:
+ *tx = 96;
+ break;
+ case 3:
+ *tx = 112;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
+ * @adapter: board private structure to initialize
+ *
+ * Cache the descriptor ring offsets for DCB to the assigned rings.
+ *
+ **/
+static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
+{
+ struct net_device *dev = adapter->netdev;
+ int i, j, k;
+ u8 num_tcs = netdev_get_num_tc(dev);
+
+ if (!num_tcs)
+ return false;
+
+ for (i = 0, k = 0; i < num_tcs; i++) {
+ unsigned int tx_s, rx_s;
+ u16 count = dev->tc_to_txq[i].count;
+
+ ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s);
+ for (j = 0; j < count; j++, k++) {
+ adapter->tx_ring[k]->reg_idx = tx_s + j;
+ adapter->rx_ring[k]->reg_idx = rx_s + j;
+ adapter->tx_ring[k]->dcb_tc = i;
+ adapter->rx_ring[k]->dcb_tc = i;
+ }
+ }
+
+ return true;
+}
+#endif
+
+/**
+ * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
+ * @adapter: board private structure to initialize
+ *
+ * Cache the descriptor ring offsets for Flow Director to the assigned rings.
+ *
+ **/
+static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
+{
+ int i;
+ bool ret = false;
+
+ if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
+ (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i]->reg_idx = i;
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_ring[i]->reg_idx = i;
+ ret = true;
+ }
+
+ return ret;
+}
+
+#ifdef IXGBE_FCOE
+/**
+ * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
+ * @adapter: board private structure to initialize
+ *
+ * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
+ *
+ */
+static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
+ int i;
+ u8 fcoe_rx_i = 0, fcoe_tx_i = 0;
+
+ if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+ return false;
+
+ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
+ ixgbe_cache_ring_fdir(adapter);
+ else
+ ixgbe_cache_ring_rss(adapter);
+
+ fcoe_rx_i = f->mask;
+ fcoe_tx_i = f->mask;
+ }
+ for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
+ adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
+ adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
+ }
+ return true;
+}
+
+#endif /* IXGBE_FCOE */
+/**
+ * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
+ * @adapter: board private structure to initialize
+ *
+ * SR-IOV doesn't use any descriptor rings but changes the default if
+ * no other mapping is used.
+ *
+ */
+static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
+{
+ adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
+ adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
+ if (adapter->num_vfs)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * ixgbe_cache_ring_register - Descriptor ring to register mapping
+ * @adapter: board private structure to initialize
+ *
+ * Once we know the feature-set enabled for the device, we'll cache
+ * the register offset the descriptor ring is assigned to.
+ *
+ * Note, the order the various feature calls is important. It must start with
+ * the "most" features enabled at the same time, then trickle down to the
+ * least amount of features turned on at once.
+ **/
+static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
+{
+ /* start with default case */
+ adapter->rx_ring[0]->reg_idx = 0;
+ adapter->tx_ring[0]->reg_idx = 0;
+
+ if (ixgbe_cache_ring_sriov(adapter))
+ return;
+
+#ifdef CONFIG_IXGBE_DCB
+ if (ixgbe_cache_ring_dcb(adapter))
+ return;
+#endif
+
+#ifdef IXGBE_FCOE
+ if (ixgbe_cache_ring_fcoe(adapter))
+ return;
+#endif /* IXGBE_FCOE */
+
+ if (ixgbe_cache_ring_fdir(adapter))
+ return;
+
+ if (ixgbe_cache_ring_rss(adapter))
+ return;
+}
+
+/**
+ * ixgbe_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one ring per queue at run-time since we don't know the
+ * number of queues at compile-time. The polling_netdev array is
+ * intended for Multiqueue, but should work fine with a single queue.
+ **/
+static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
+{
+ int rx = 0, tx = 0, nid = adapter->node;
+
+ if (nid < 0 || !node_online(nid))
+ nid = first_online_node;
+
+ for (; tx < adapter->num_tx_queues; tx++) {
+ struct ixgbe_ring *ring;
+
+ ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
+ if (!ring)
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring)
+ goto err_allocation;
+ ring->count = adapter->tx_ring_count;
+ ring->queue_index = tx;
+ ring->numa_node = nid;
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+
+ adapter->tx_ring[tx] = ring;
+ }
+
+ for (; rx < adapter->num_rx_queues; rx++) {
+ struct ixgbe_ring *ring;
+
+ ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
+ if (!ring)
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring)
+ goto err_allocation;
+ ring->count = adapter->rx_ring_count;
+ ring->queue_index = rx;
+ ring->numa_node = nid;
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+
+ adapter->rx_ring[rx] = ring;
+ }
+
+ ixgbe_cache_ring_register(adapter);
+
+ return 0;
+
+err_allocation:
+ while (tx)
+ kfree(adapter->tx_ring[--tx]);
+
+ while (rx)
+ kfree(adapter->rx_ring[--rx]);
+ return -ENOMEM;
+}
+
+/**
+ * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
+ * @adapter: board private structure to initialize
+ *
+ * Attempt to configure the interrupts using the best available
+ * capabilities of the hardware and the kernel.
+ **/
+static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err = 0;
+ int vector, v_budget;
+
+ /*
+ * It's easy to be greedy for MSI-X vectors, but it really
+ * doesn't do us much good if we have a lot more vectors
+ * than CPU's. So let's be conservative and only ask for
+ * (roughly) the same number of vectors as there are CPU's.
+ */
+ v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
+ (int)num_online_cpus()) + NON_Q_VECTORS;
+
+ /*
+ * At the same time, hardware can only support a maximum of
+ * hw.mac->max_msix_vectors vectors. With features
+ * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
+ * descriptor queues supported by our device. Thus, we cap it off in
+ * those rare cases where the cpu count also exceeds our vector limit.
+ */
+ v_budget = min(v_budget, (int)hw->mac.max_msix_vectors);
+
+ /* A failure in MSI-X entry allocation isn't fatal, but it does
+ * mean we disable MSI-X capabilities of the adapter. */
+ adapter->msix_entries = kcalloc(v_budget,
+ sizeof(struct msix_entry), GFP_KERNEL);
+ if (adapter->msix_entries) {
+ for (vector = 0; vector < v_budget; vector++)
+ adapter->msix_entries[vector].entry = vector;
+
+ ixgbe_acquire_msix_vectors(adapter, v_budget);
+
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ goto out;
+ }
+
+ adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+ adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
+ e_err(probe,
+ "ATR is not supported while multiple "
+ "queues are disabled. Disabling Flow Director\n");
+ }
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ adapter->atr_sample_rate = 0;
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ ixgbe_disable_sriov(adapter);
+
+ err = ixgbe_set_num_queues(adapter);
+ if (err)
+ return err;
+
+ err = pci_enable_msi(adapter->pdev);
+ if (!err) {
+ adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
+ } else {
+ netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
+ "Unable to allocate MSI interrupt, "
+ "falling back to legacy. Error: %d\n", err);
+ /* reset err */
+ err = 0;
+ }
+
+out:
+ return err;
+}
+
+/**
+ * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ **/
+static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
+{
+ int v_idx, num_q_vectors;
+ struct ixgbe_q_vector *q_vector;
+
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ else
+ num_q_vectors = 1;
+
+ for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
+ q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
+ GFP_KERNEL, adapter->node);
+ if (!q_vector)
+ q_vector = kzalloc(sizeof(struct ixgbe_q_vector),
+ GFP_KERNEL);
+ if (!q_vector)
+ goto err_out;
+
+ q_vector->adapter = adapter;
+ q_vector->v_idx = v_idx;
+
+ /* Allocate the affinity_hint cpumask, configure the mask */
+ if (!alloc_cpumask_var(&q_vector->affinity_mask, GFP_KERNEL))
+ goto err_out;
+ cpumask_set_cpu(v_idx, q_vector->affinity_mask);
+
+ if (q_vector->tx.count && !q_vector->rx.count)
+ q_vector->eitr = adapter->tx_eitr_param;
+ else
+ q_vector->eitr = adapter->rx_eitr_param;
+
+ netif_napi_add(adapter->netdev, &q_vector->napi,
+ ixgbe_poll, 64);
+ adapter->q_vector[v_idx] = q_vector;
+ }
+
+ return 0;
+
+err_out:
+ while (v_idx) {
+ v_idx--;
+ q_vector = adapter->q_vector[v_idx];
+ netif_napi_del(&q_vector->napi);
+ free_cpumask_var(q_vector->affinity_mask);
+ kfree(q_vector);
+ adapter->q_vector[v_idx] = NULL;
+ }
+ return -ENOMEM;
+}
+
+/**
+ * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
+{
+ int v_idx, num_q_vectors;
+
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ else
+ num_q_vectors = 1;
+
+ for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
+ adapter->q_vector[v_idx] = NULL;
+ netif_napi_del(&q_vector->napi);
+ free_cpumask_var(q_vector->affinity_mask);
+ kfree(q_vector);
+ }
+}
+
+static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
+{
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+ adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
+ adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
+ pci_disable_msi(adapter->pdev);
+ }
+}
+
+/**
+ * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
+ * @adapter: board private structure to initialize
+ *
+ * We determine which interrupt scheme to use based on...
+ * - Kernel support (MSI, MSI-X)
+ * - which can be user-defined (via MODULE_PARAM)
+ * - Hardware queue count (num_*_queues)
+ * - defined by miscellaneous hardware support/features (RSS, etc.)
+ **/
+int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
+{
+ int err;
+
+ /* Number of supported queues */
+ err = ixgbe_set_num_queues(adapter);
+ if (err)
+ return err;
+
+ err = ixgbe_set_interrupt_capability(adapter);
+ if (err) {
+ e_dev_err("Unable to setup interrupt capabilities\n");
+ goto err_set_interrupt;
+ }
+
+ err = ixgbe_alloc_q_vectors(adapter);
+ if (err) {
+ e_dev_err("Unable to allocate memory for queue vectors\n");
+ goto err_alloc_q_vectors;
+ }
+
+ err = ixgbe_alloc_queues(adapter);
+ if (err) {
+ e_dev_err("Unable to allocate memory for queues\n");
+ goto err_alloc_queues;
+ }
+
+ e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
+ (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
+ adapter->num_rx_queues, adapter->num_tx_queues);
+
+ set_bit(__IXGBE_DOWN, &adapter->state);
+
+ return 0;
+
+err_alloc_queues:
+ ixgbe_free_q_vectors(adapter);
+err_alloc_q_vectors:
+ ixgbe_reset_interrupt_capability(adapter);
+err_set_interrupt:
+ return err;
+}
+
+/**
+ * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
+ * @adapter: board private structure to clear interrupt scheme on
+ *
+ * We go through and clear interrupt specific resources and reset the structure
+ * to pre-load conditions
+ **/
+void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ kfree(adapter->tx_ring[i]);
+ adapter->tx_ring[i] = NULL;
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbe_ring *ring = adapter->rx_ring[i];
+
+ /* ixgbe_get_stats64() might access this ring, we must wait
+ * a grace period before freeing it.
+ */
+ kfree_rcu(ring, rcu);
+ adapter->rx_ring[i] = NULL;
+ }
+
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+
+ ixgbe_free_q_vectors(adapter);
+ ixgbe_reset_interrupt_capability(adapter);
+}
+
+/**
+ * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * ixgbe_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ struct net_device *dev = adapter->netdev;
+ unsigned int rss;
+#ifdef CONFIG_IXGBE_DCB
+ int j;
+ struct tc_configuration *tc;
+#endif
+ int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ /* PCI config space info */
+
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ hw->revision_id = pdev->revision;
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_device_id = pdev->subsystem_device;
+
+ /* Set capability flags */
+ rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
+ adapter->ring_feature[RING_F_RSS].indices = rss;
+ adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ if (hw->device_id == IXGBE_DEV_ID_82598AT)
+ adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
+ adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
+ adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
+ adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
+ if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
+ /* Flow Director hash filters enabled */
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ adapter->atr_sample_rate = 20;
+ adapter->ring_feature[RING_F_FDIR].indices =
+ IXGBE_MAX_FDIR_INDICES;
+ adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
+#ifdef IXGBE_FCOE
+ adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
+ adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
+ adapter->ring_feature[RING_F_FCOE].indices = 0;
+#ifdef CONFIG_IXGBE_DCB
+ /* Default traffic class to use for FCoE */
+ adapter->fcoe.up = IXGBE_FCOE_DEFTC;
+#endif
+#endif /* IXGBE_FCOE */
+ break;
+ default:
+ break;
+ }
+
+ /* n-tuple support exists, always init our spinlock */
+ spin_lock_init(&adapter->fdir_perfect_lock);
+
+#ifdef CONFIG_IXGBE_DCB
+ /* Configure DCB traffic classes */
+ for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
+ tc = &adapter->dcb_cfg.tc_config[j];
+ tc->path[DCB_TX_CONFIG].bwg_id = 0;
+ tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
+ tc->path[DCB_RX_CONFIG].bwg_id = 0;
+ tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
+ tc->dcb_pfc = pfc_disabled;
+ }
+ adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
+ adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
+ adapter->dcb_cfg.pfc_mode_enable = false;
+ adapter->dcb_set_bitmap = 0x00;
+ adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
+ ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
+ MAX_TRAFFIC_CLASS);
+
+#endif
+
+ /* default flow control settings */
+ hw->fc.requested_mode = ixgbe_fc_full;
+ hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */
+#ifdef CONFIG_DCB
+ adapter->last_lfc_mode = hw->fc.current_mode;
+#endif
+ hw->fc.high_water = FC_HIGH_WATER(max_frame);
+ hw->fc.low_water = FC_LOW_WATER(max_frame);
+ hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
+ hw->fc.send_xon = true;
+ hw->fc.disable_fc_autoneg = false;
+
+ /* enable itr by default in dynamic mode */
+ adapter->rx_itr_setting = 1;
+ adapter->rx_eitr_param = 20000;
+ adapter->tx_itr_setting = 1;
+ adapter->tx_eitr_param = 10000;
+
+ /* set defaults for eitr in MegaBytes */
+ adapter->eitr_low = 10;
+ adapter->eitr_high = 20;
+
+ /* set default ring sizes */
+ adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
+ adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
+
+ /* set default work limits */
+ adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
+
+ /* initialize eeprom parameters */
+ if (ixgbe_init_eeprom_params_generic(hw)) {
+ e_dev_err("EEPROM initialization failed\n");
+ return -EIO;
+ }
+
+ /* enable rx csum by default */
+ adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
+
+ /* get assigned NUMA node */
+ adapter->node = dev_to_node(&pdev->dev);
+
+ set_bit(__IXGBE_DOWN, &adapter->state);
+
+ return 0;
+}
+
+/**
+ * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @tx_ring: tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ **/
+int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
+{
+ struct device *dev = tx_ring->dev;
+ int size;
+
+ size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
+ tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
+ if (!tx_ring->tx_buffer_info)
+ tx_ring->tx_buffer_info = vzalloc(size);
+ if (!tx_ring->tx_buffer_info)
+ goto err;
+
+ /* round up to nearest 4K */
+ tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+ if (!tx_ring->desc)
+ goto err;
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+ return 0;
+
+err:
+ vfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+ dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
+ return -ENOMEM;
+}
+
+/**
+ * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
+ if (!err)
+ continue;
+ e_err(probe, "Allocation for Tx Queue %u failed\n", i);
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ int size;
+
+ size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
+ rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
+ if (!rx_ring->rx_buffer_info)
+ rx_ring->rx_buffer_info = vzalloc(size);
+ if (!rx_ring->rx_buffer_info)
+ goto err;
+
+ /* Round up to nearest 4K */
+ rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+
+ if (!rx_ring->desc)
+ goto err;
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ return 0;
+err:
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+ dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
+ return -ENOMEM;
+}
+
+/**
+ * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
+ if (!err)
+ continue;
+ e_err(probe, "Allocation for Rx Queue %u failed\n", i);
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_free_tx_resources - Free Tx Resources per Queue
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
+{
+ ixgbe_clean_tx_ring(tx_ring);
+
+ vfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+
+ /* if not set, then don't free */
+ if (!tx_ring->desc)
+ return;
+
+ dma_free_coherent(tx_ring->dev, tx_ring->size,
+ tx_ring->desc, tx_ring->dma);
+
+ tx_ring->desc = NULL;
+}
+
+/**
+ * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ if (adapter->tx_ring[i]->desc)
+ ixgbe_free_tx_resources(adapter->tx_ring[i]);
+}
+
+/**
+ * ixgbe_free_rx_resources - Free Rx Resources
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
+{
+ ixgbe_clean_rx_ring(rx_ring);
+
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+
+ /* if not set, then don't free */
+ if (!rx_ring->desc)
+ return;
+
+ dma_free_coherent(rx_ring->dev, rx_ring->size,
+ rx_ring->desc, rx_ring->dma);
+
+ rx_ring->desc = NULL;
+}
+
+/**
+ * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ if (adapter->rx_ring[i]->desc)
+ ixgbe_free_rx_resources(adapter->rx_ring[i]);
+}
+
+/**
+ * ixgbe_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ /* MTU < 68 is an error and causes problems on some kernels */
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED &&
+ hw->mac.type != ixgbe_mac_X540) {
+ if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
+ return -EINVAL;
+ } else {
+ if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
+ return -EINVAL;
+ }
+
+ e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
+ /* must set new MTU before calling down or up */
+ netdev->mtu = new_mtu;
+
+ hw->fc.high_water = FC_HIGH_WATER(max_frame);
+ hw->fc.low_water = FC_LOW_WATER(max_frame);
+
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
+
+ return 0;
+}
+
+/**
+ * ixgbe_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+static int ixgbe_open(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ /* disallow open during test */
+ if (test_bit(__IXGBE_TESTING, &adapter->state))
+ return -EBUSY;
+
+ netif_carrier_off(netdev);
+
+ /* allocate transmit descriptors */
+ err = ixgbe_setup_all_tx_resources(adapter);
+ if (err)
+ goto err_setup_tx;
+
+ /* allocate receive descriptors */
+ err = ixgbe_setup_all_rx_resources(adapter);
+ if (err)
+ goto err_setup_rx;
+
+ ixgbe_configure(adapter);
+
+ err = ixgbe_request_irq(adapter);
+ if (err)
+ goto err_req_irq;
+
+ ixgbe_up_complete(adapter);
+
+ return 0;
+
+err_req_irq:
+err_setup_rx:
+ ixgbe_free_all_rx_resources(adapter);
+err_setup_tx:
+ ixgbe_free_all_tx_resources(adapter);
+ ixgbe_reset(adapter);
+
+ return err;
+}
+
+/**
+ * ixgbe_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+static int ixgbe_close(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ ixgbe_down(adapter);
+ ixgbe_free_irq(adapter);
+
+ ixgbe_fdir_filter_exit(adapter);
+
+ ixgbe_free_all_tx_resources(adapter);
+ ixgbe_free_all_rx_resources(adapter);
+
+ ixgbe_release_hw_control(adapter);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ixgbe_resume(struct pci_dev *pdev)
+{
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+ u32 err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ /*
+ * pci_restore_state clears dev->state_saved so call
+ * pci_save_state to restore it.
+ */
+ pci_save_state(pdev);
+
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ e_dev_err("Cannot enable PCI device from suspend\n");
+ return err;
+ }
+ pci_set_master(pdev);
+
+ pci_wake_from_d3(pdev, false);
+
+ err = ixgbe_init_interrupt_scheme(adapter);
+ if (err) {
+ e_dev_err("Cannot initialize interrupts for device\n");
+ return err;
+ }
+
+ ixgbe_reset(adapter);
+
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
+
+ if (netif_running(netdev)) {
+ err = ixgbe_open(netdev);
+ if (err)
+ return err;
+ }
+
+ netif_device_attach(netdev);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
+{
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 ctrl, fctrl;
+ u32 wufc = adapter->wol;
+#ifdef CONFIG_PM
+ int retval = 0;
+#endif
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev)) {
+ ixgbe_down(adapter);
+ ixgbe_free_irq(adapter);
+ ixgbe_free_all_tx_resources(adapter);
+ ixgbe_free_all_rx_resources(adapter);
+ }
+
+ ixgbe_clear_interrupt_scheme(adapter);
+#ifdef CONFIG_DCB
+ kfree(adapter->ixgbe_ieee_pfc);
+ kfree(adapter->ixgbe_ieee_ets);
+#endif
+
+#ifdef CONFIG_PM
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+
+#endif
+ if (wufc) {
+ ixgbe_set_rx_mode(netdev);
+
+ /* turn on all-multi mode if wake on multicast is enabled */
+ if (wufc & IXGBE_WUFC_MC) {
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ fctrl |= IXGBE_FCTRL_MPE;
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+ }
+
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ ctrl |= IXGBE_CTRL_GIO_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+
+ IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
+ }
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ pci_wake_from_d3(pdev, false);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ pci_wake_from_d3(pdev, !!wufc);
+ break;
+ default:
+ break;
+ }
+
+ *enable_wake = !!wufc;
+
+ ixgbe_release_hw_control(adapter);
+
+ pci_disable_device(pdev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ int retval;
+ bool wake;
+
+ retval = __ixgbe_shutdown(pdev, &wake);
+ if (retval)
+ return retval;
+
+ if (wake) {
+ pci_prepare_to_sleep(pdev);
+ } else {
+ pci_wake_from_d3(pdev, false);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static void ixgbe_shutdown(struct pci_dev *pdev)
+{
+ bool wake;
+
+ __ixgbe_shutdown(pdev, &wake);
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, wake);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+/**
+ * ixgbe_update_stats - Update the board statistics counters.
+ * @adapter: board private structure
+ **/
+void ixgbe_update_stats(struct ixgbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_hw_stats *hwstats = &adapter->stats;
+ u64 total_mpc = 0;
+ u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
+ u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
+ u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
+ u64 bytes = 0, packets = 0;
+
+ if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+ test_bit(__IXGBE_RESETTING, &adapter->state))
+ return;
+
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
+ u64 rsc_count = 0;
+ u64 rsc_flush = 0;
+ for (i = 0; i < 16; i++)
+ adapter->hw_rx_no_dma_resources +=
+ IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
+ rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
+ }
+ adapter->rsc_total_count = rsc_count;
+ adapter->rsc_total_flush = rsc_flush;
+ }
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
+ non_eop_descs += rx_ring->rx_stats.non_eop_descs;
+ alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
+ alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
+ bytes += rx_ring->stats.bytes;
+ packets += rx_ring->stats.packets;
+ }
+ adapter->non_eop_descs = non_eop_descs;
+ adapter->alloc_rx_page_failed = alloc_rx_page_failed;
+ adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
+ netdev->stats.rx_bytes = bytes;
+ netdev->stats.rx_packets = packets;
+
+ bytes = 0;
+ packets = 0;
+ /* gather some stats to the adapter struct that are per queue */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+ restart_queue += tx_ring->tx_stats.restart_queue;
+ tx_busy += tx_ring->tx_stats.tx_busy;
+ bytes += tx_ring->stats.bytes;
+ packets += tx_ring->stats.packets;
+ }
+ adapter->restart_queue = restart_queue;
+ adapter->tx_busy = tx_busy;
+ netdev->stats.tx_bytes = bytes;
+ netdev->stats.tx_packets = packets;
+
+ hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
+
+ /* 8 register reads */
+ for (i = 0; i < 8; i++) {
+ /* for packet buffers not used, the register should read 0 */
+ mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
+ missed_rx += mpc;
+ hwstats->mpc[i] += mpc;
+ total_mpc += hwstats->mpc[i];
+ hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
+ hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+ hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
+ hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
+ hwstats->pxonrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ hwstats->pxonrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
+ break;
+ default:
+ break;
+ }
+ }
+
+ /*16 register reads */
+ for (i = 0; i < 16; i++) {
+ hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
+ hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
+ if ((hw->mac.type == ixgbe_mac_82599EB) ||
+ (hw->mac.type == ixgbe_mac_X540)) {
+ hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
+ IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */
+ hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
+ IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); /* to clear */
+ }
+ }
+
+ hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
+ /* work around hardware counting issue */
+ hwstats->gprc -= missed_rx;
+
+ ixgbe_update_xoff_received(adapter);
+
+ /* 82598 hardware only has a 32 bit counter in the high register */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+ hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
+ hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
+ hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
+ break;
+ case ixgbe_mac_X540:
+ /* OS2BMC stats are X540 only*/
+ hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
+ hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
+ hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
+ hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
+ case ixgbe_mac_82599EB:
+ hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
+ IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
+ hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
+ IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
+ hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
+ IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
+ hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
+ hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
+ hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+#ifdef IXGBE_FCOE
+ hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
+ hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
+ hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
+ hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
+ hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
+ hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
+#endif /* IXGBE_FCOE */
+ break;
+ default:
+ break;
+ }
+ bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
+ hwstats->bprc += bprc;
+ hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ hwstats->mprc -= bprc;
+ hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
+ hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
+ hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
+ hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
+ hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
+ hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
+ hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
+ hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
+ lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
+ hwstats->lxontxc += lxon;
+ lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
+ hwstats->lxofftxc += lxoff;
+ hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
+ hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
+ /*
+ * 82598 errata - tx of flow control packets is included in tx counters
+ */
+ xon_off_tot = lxon + lxoff;
+ hwstats->gptc -= xon_off_tot;
+ hwstats->mptc -= xon_off_tot;
+ hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
+ hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
+ hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
+ hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
+ hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
+ hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
+ hwstats->ptc64 -= xon_off_tot;
+ hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
+ hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
+ hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
+ hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
+ hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
+ hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
+
+ /* Fill out the OS statistics structure */
+ netdev->stats.multicast = hwstats->mprc;
+
+ /* Rx Errors */
+ netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
+ netdev->stats.rx_dropped = 0;
+ netdev->stats.rx_length_errors = hwstats->rlec;
+ netdev->stats.rx_crc_errors = hwstats->crcerrs;
+ netdev->stats.rx_missed_errors = total_mpc;
+}
+
+/**
+ * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table
+ * @adapter - pointer to the device adapter structure
+ **/
+static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+
+ if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
+ return;
+
+ adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
+
+ /* if interface is down do nothing */
+ if (test_bit(__IXGBE_DOWN, &adapter->state))
+ return;
+
+ /* do nothing if we are not using signature filters */
+ if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE))
+ return;
+
+ adapter->fdir_overflow++;
+
+ if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ set_bit(__IXGBE_TX_FDIR_INIT_DONE,
+ &(adapter->tx_ring[i]->state));
+ /* re-enable flow director interrupts */
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
+ } else {
+ e_err(probe, "failed to finish FDIR re-initialization, "
+ "ignored adding FDIR ATR filters\n");
+ }
+}
+
+/**
+ * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts
+ * @adapter - pointer to the device adapter structure
+ *
+ * This function serves two purposes. First it strobes the interrupt lines
+ * in order to make certain interrupts are occuring. Secondly it sets the
+ * bits needed to check for TX hangs. As a result we should immediately
+ * determine if a hang has occured.
+ */
+static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 eics = 0;
+ int i;
+
+ /* If we're down or resetting, just bail */
+ if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+ test_bit(__IXGBE_RESETTING, &adapter->state))
+ return;
+
+ /* Force detection of hung controller */
+ if (netif_carrier_ok(adapter->netdev)) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ set_check_for_tx_hang(adapter->tx_ring[i]);
+ }
+
+ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
+ /*
+ * for legacy and MSI interrupts don't set any bits
+ * that are enabled for EIAM, because this operation
+ * would set *both* EIMS and EICS for any bit in EIAM
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_EICS,
+ (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
+ } else {
+ /* get one bit for every active tx/rx interrupt vector */
+ for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
+ struct ixgbe_q_vector *qv = adapter->q_vector[i];
+ if (qv->rx.ring || qv->tx.ring)
+ eics |= ((u64)1 << i);
+ }
+ }
+
+ /* Cause software interrupt to ensure rings are cleaned */
+ ixgbe_irq_rearm_queues(adapter, eics);
+
+}
+
+/**
+ * ixgbe_watchdog_update_link - update the link status
+ * @adapter - pointer to the device adapter structure
+ * @link_speed - pointer to a u32 to store the link_speed
+ **/
+static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 link_speed = adapter->link_speed;
+ bool link_up = adapter->link_up;
+ int i;
+
+ if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
+ return;
+
+ if (hw->mac.ops.check_link) {
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+ } else {
+ /* always assume link is up, if no check link function */
+ link_speed = IXGBE_LINK_SPEED_10GB_FULL;
+ link_up = true;
+ }
+ if (link_up) {
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
+ hw->mac.ops.fc_enable(hw, i);
+ } else {
+ hw->mac.ops.fc_enable(hw, 0);
+ }
+ }
+
+ if (link_up ||
+ time_after(jiffies, (adapter->link_check_timeout +
+ IXGBE_TRY_LINK_TIMEOUT))) {
+ adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
+ adapter->link_up = link_up;
+ adapter->link_speed = link_speed;
+}
+
+/**
+ * ixgbe_watchdog_link_is_up - update netif_carrier status and
+ * print link up message
+ * @adapter - pointer to the device adapter structure
+ **/
+static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 link_speed = adapter->link_speed;
+ bool flow_rx, flow_tx;
+
+ /* only continue if link was previously down */
+ if (netif_carrier_ok(netdev))
+ return;
+
+ adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB: {
+ u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
+ flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
+ flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
+ }
+ break;
+ case ixgbe_mac_X540:
+ case ixgbe_mac_82599EB: {
+ u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+ u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
+ flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
+ flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
+ }
+ break;
+ default:
+ flow_tx = false;
+ flow_rx = false;
+ break;
+ }
+ e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
+ (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
+ "10 Gbps" :
+ (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
+ "1 Gbps" :
+ (link_speed == IXGBE_LINK_SPEED_100_FULL ?
+ "100 Mbps" :
+ "unknown speed"))),
+ ((flow_rx && flow_tx) ? "RX/TX" :
+ (flow_rx ? "RX" :
+ (flow_tx ? "TX" : "None"))));
+
+ netif_carrier_on(netdev);
+ ixgbe_check_vf_rate_limit(adapter);
+}
+
+/**
+ * ixgbe_watchdog_link_is_down - update netif_carrier status and
+ * print link down message
+ * @adapter - pointer to the adapter structure
+ **/
+static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter* adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ adapter->link_up = false;
+ adapter->link_speed = 0;
+
+ /* only continue if link was up previously */
+ if (!netif_carrier_ok(netdev))
+ return;
+
+ /* poll for SFP+ cable when link is down */
+ if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
+ adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
+
+ e_info(drv, "NIC Link is Down\n");
+ netif_carrier_off(netdev);
+}
+
+/**
+ * ixgbe_watchdog_flush_tx - flush queues on link down
+ * @adapter - pointer to the device adapter structure
+ **/
+static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
+{
+ int i;
+ int some_tx_pending = 0;
+
+ if (!netif_carrier_ok(adapter->netdev)) {
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+ if (tx_ring->next_to_use != tx_ring->next_to_clean) {
+ some_tx_pending = 1;
+ break;
+ }
+ }
+
+ if (some_tx_pending) {
+ /* We've lost link, so the controller stops DMA,
+ * but we've got queued Tx work that's never going
+ * to get done, so reset controller to flush Tx.
+ * (Do the reset outside of interrupt context).
+ */
+ adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
+ }
+ }
+}
+
+static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
+{
+ u32 ssvpc;
+
+ /* Do not perform spoof check for 82598 */
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ return;
+
+ ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
+
+ /*
+ * ssvpc register is cleared on read, if zero then no
+ * spoofed packets in the last interval.
+ */
+ if (!ssvpc)
+ return;
+
+ e_warn(drv, "%d Spoofed packets detected\n", ssvpc);
+}
+
+/**
+ * ixgbe_watchdog_subtask - check and bring link up
+ * @adapter - pointer to the device adapter structure
+ **/
+static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
+{
+ /* if interface is down do nothing */
+ if (test_bit(__IXGBE_DOWN, &adapter->state))
+ return;
+
+ ixgbe_watchdog_update_link(adapter);
+
+ if (adapter->link_up)
+ ixgbe_watchdog_link_is_up(adapter);
+ else
+ ixgbe_watchdog_link_is_down(adapter);
+
+ ixgbe_spoof_check(adapter);
+ ixgbe_update_stats(adapter);
+
+ ixgbe_watchdog_flush_tx(adapter);
+}
+
+/**
+ * ixgbe_sfp_detection_subtask - poll for SFP+ cable
+ * @adapter - the ixgbe adapter structure
+ **/
+static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ s32 err;
+
+ /* not searching for SFP so there is nothing to do here */
+ if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
+ !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
+ return;
+
+ /* someone else is in init, wait until next service event */
+ if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
+ return;
+
+ err = hw->phy.ops.identify_sfp(hw);
+ if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ goto sfp_out;
+
+ if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
+ /* If no cable is present, then we need to reset
+ * the next time we find a good cable. */
+ adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
+ }
+
+ /* exit on error */
+ if (err)
+ goto sfp_out;
+
+ /* exit if reset not needed */
+ if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
+ goto sfp_out;
+
+ adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET;
+
+ /*
+ * A module may be identified correctly, but the EEPROM may not have
+ * support for that module. setup_sfp() will fail in that case, so
+ * we should not allow that module to load.
+ */
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ err = hw->phy.ops.reset(hw);
+ else
+ err = hw->mac.ops.setup_sfp(hw);
+
+ if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ goto sfp_out;
+
+ adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
+ e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
+
+sfp_out:
+ clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
+
+ if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
+ (adapter->netdev->reg_state == NETREG_REGISTERED)) {
+ e_dev_err("failed to initialize because an unsupported "
+ "SFP+ module type was detected.\n");
+ e_dev_err("Reload the driver after installing a "
+ "supported module.\n");
+ unregister_netdev(adapter->netdev);
+ }
+}
+
+/**
+ * ixgbe_sfp_link_config_subtask - set up link SFP after module install
+ * @adapter - the ixgbe adapter structure
+ **/
+static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 autoneg;
+ bool negotiation;
+
+ if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
+ return;
+
+ /* someone else is in init, wait until next service event */
+ if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
+ return;
+
+ adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
+
+ autoneg = hw->phy.autoneg_advertised;
+ if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
+ hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
+ hw->mac.autotry_restart = false;
+ if (hw->mac.ops.setup_link)
+ hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
+
+ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+ adapter->link_check_timeout = jiffies;
+ clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
+}
+
+/**
+ * ixgbe_service_timer - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void ixgbe_service_timer(unsigned long data)
+{
+ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+ unsigned long next_event_offset;
+
+ /* poll faster when waiting for link */
+ if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
+ next_event_offset = HZ / 10;
+ else
+ next_event_offset = HZ * 2;
+
+ /* Reset the timer */
+ mod_timer(&adapter->service_timer, next_event_offset + jiffies);
+
+ ixgbe_service_event_schedule(adapter);
+}
+
+static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
+{
+ if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED))
+ return;
+
+ adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED;
+
+ /* If we're already down or resetting, just bail */
+ if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+ test_bit(__IXGBE_RESETTING, &adapter->state))
+ return;
+
+ ixgbe_dump(adapter);
+ netdev_err(adapter->netdev, "Reset adapter\n");
+ adapter->tx_timeout_count++;
+
+ ixgbe_reinit_locked(adapter);
+}
+
+/**
+ * ixgbe_service_task - manages and runs subtasks
+ * @work: pointer to work_struct containing our data
+ **/
+static void ixgbe_service_task(struct work_struct *work)
+{
+ struct ixgbe_adapter *adapter = container_of(work,
+ struct ixgbe_adapter,
+ service_task);
+
+ ixgbe_reset_subtask(adapter);
+ ixgbe_sfp_detection_subtask(adapter);
+ ixgbe_sfp_link_config_subtask(adapter);
+ ixgbe_check_overtemp_subtask(adapter);
+ ixgbe_watchdog_subtask(adapter);
+ ixgbe_fdir_reinit_subtask(adapter);
+ ixgbe_check_hang_subtask(adapter);
+
+ ixgbe_service_event_complete(adapter);
+}
+
+void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
+ u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
+{
+ struct ixgbe_adv_tx_context_desc *context_desc;
+ u16 i = tx_ring->next_to_use;
+
+ context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
+
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+ /* set bits to identify this as an advanced context descriptor */
+ type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
+
+ context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+ context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
+ context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
+ context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+}
+
+static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
+ u32 tx_flags, __be16 protocol, u8 *hdr_len)
+{
+ int err;
+ u32 vlan_macip_lens, type_tucmd;
+ u32 mss_l4len_idx, l4len;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err)
+ return err;
+ }
+
+ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
+
+ if (protocol == __constant_htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, 0,
+ IPPROTO_TCP,
+ 0);
+ type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+ } else if (skb_is_gso_v6(skb)) {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ }
+
+ l4len = tcp_hdrlen(skb);
+ *hdr_len = skb_transport_offset(skb) + l4len;
+
+ /* mss_l4len_id: use 1 as index for TSO */
+ mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
+ mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
+
+ /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
+ vlan_macip_lens = skb_network_header_len(skb);
+ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
+
+ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
+ mss_l4len_idx);
+
+ return 1;
+}
+
+static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags,
+ __be16 protocol)
+{
+ u32 vlan_macip_lens = 0;
+ u32 mss_l4len_idx = 0;
+ u32 type_tucmd = 0;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL) {
+ if (!(tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
+ !(tx_flags & IXGBE_TX_FLAGS_TXSW))
+ return false;
+ } else {
+ u8 l4_hdr = 0;
+ switch (protocol) {
+ case __constant_htons(ETH_P_IP):
+ vlan_macip_lens |= skb_network_header_len(skb);
+ type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+ l4_hdr = ip_hdr(skb)->protocol;
+ break;
+ case __constant_htons(ETH_P_IPV6):
+ vlan_macip_lens |= skb_network_header_len(skb);
+ l4_hdr = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ if (unlikely(net_ratelimit())) {
+ dev_warn(tx_ring->dev,
+ "partial checksum but proto=%x!\n",
+ skb->protocol);
+ }
+ break;
+ }
+
+ switch (l4_hdr) {
+ case IPPROTO_TCP:
+ type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ mss_l4len_idx = tcp_hdrlen(skb) <<
+ IXGBE_ADVTXD_L4LEN_SHIFT;
+ break;
+ case IPPROTO_SCTP:
+ type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
+ mss_l4len_idx = sizeof(struct sctphdr) <<
+ IXGBE_ADVTXD_L4LEN_SHIFT;
+ break;
+ case IPPROTO_UDP:
+ mss_l4len_idx = sizeof(struct udphdr) <<
+ IXGBE_ADVTXD_L4LEN_SHIFT;
+ break;
+ default:
+ if (unlikely(net_ratelimit())) {
+ dev_warn(tx_ring->dev,
+ "partial checksum but l4 proto=%x!\n",
+ skb->protocol);
+ }
+ break;
+ }
+ }
+
+ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
+
+ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0,
+ type_tucmd, mss_l4len_idx);
+
+ return (skb->ip_summed == CHECKSUM_PARTIAL);
+}
+
+static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
+{
+ /* set type for advanced descriptor with frame checksum insertion */
+ __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
+ IXGBE_ADVTXD_DCMD_IFCS |
+ IXGBE_ADVTXD_DCMD_DEXT);
+
+ /* set HW vlan bit if vlan is present */
+ if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN)
+ cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
+
+ /* set segmentation enable bits for TSO/FSO */
+#ifdef IXGBE_FCOE
+ if ((tx_flags & IXGBE_TX_FLAGS_TSO) || (tx_flags & IXGBE_TX_FLAGS_FSO))
+#else
+ if (tx_flags & IXGBE_TX_FLAGS_TSO)
+#endif
+ cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
+
+ return cmd_type;
+}
+
+static __le32 ixgbe_tx_olinfo_status(u32 tx_flags, unsigned int paylen)
+{
+ __le32 olinfo_status =
+ cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
+ if (tx_flags & IXGBE_TX_FLAGS_TSO) {
+ olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM |
+ (1 << IXGBE_ADVTXD_IDX_SHIFT));
+ /* enble IPv4 checksum for TSO */
+ if (tx_flags & IXGBE_TX_FLAGS_IPV4)
+ olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
+ }
+
+ /* enable L4 checksum for TSO and TX checksum offload */
+ if (tx_flags & IXGBE_TX_FLAGS_CSUM)
+ olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
+
+#ifdef IXGBE_FCOE
+ /* use index 1 context for FCOE/FSO */
+ if (tx_flags & IXGBE_TX_FLAGS_FCOE)
+ olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC |
+ (1 << IXGBE_ADVTXD_IDX_SHIFT));
+
+#endif
+ /*
+ * Check Context must be set if Tx switch is enabled, which it
+ * always is for case where virtual functions are running
+ */
+ if (tx_flags & IXGBE_TX_FLAGS_TXSW)
+ olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
+
+ return olinfo_status;
+}
+
+#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
+ IXGBE_TXD_CMD_RS)
+
+static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
+ struct sk_buff *skb,
+ struct ixgbe_tx_buffer *first,
+ u32 tx_flags,
+ const u8 hdr_len)
+{
+ struct device *dev = tx_ring->dev;
+ struct ixgbe_tx_buffer *tx_buffer_info;
+ union ixgbe_adv_tx_desc *tx_desc;
+ dma_addr_t dma;
+ __le32 cmd_type, olinfo_status;
+ struct skb_frag_struct *frag;
+ unsigned int f = 0;
+ unsigned int data_len = skb->data_len;
+ unsigned int size = skb_headlen(skb);
+ u32 offset = 0;
+ u32 paylen = skb->len - hdr_len;
+ u16 i = tx_ring->next_to_use;
+ u16 gso_segs;
+
+#ifdef IXGBE_FCOE
+ if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
+ if (data_len >= sizeof(struct fcoe_crc_eof)) {
+ data_len -= sizeof(struct fcoe_crc_eof);
+ } else {
+ size -= sizeof(struct fcoe_crc_eof) - data_len;
+ data_len = 0;
+ }
+ }
+
+#endif
+ dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ goto dma_error;
+
+ cmd_type = ixgbe_tx_cmd_type(tx_flags);
+ olinfo_status = ixgbe_tx_olinfo_status(tx_flags, paylen);
+
+ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
+
+ for (;;) {
+ while (size > IXGBE_MAX_DATA_PER_TXD) {
+ tx_desc->read.buffer_addr = cpu_to_le64(dma + offset);
+ tx_desc->read.cmd_type_len =
+ cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
+ tx_desc->read.olinfo_status = olinfo_status;
+
+ offset += IXGBE_MAX_DATA_PER_TXD;
+ size -= IXGBE_MAX_DATA_PER_TXD;
+
+ tx_desc++;
+ i++;
+ if (i == tx_ring->count) {
+ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, 0);
+ i = 0;
+ }
+ }
+
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ tx_buffer_info->length = offset + size;
+ tx_buffer_info->tx_flags = tx_flags;
+ tx_buffer_info->dma = dma;
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma + offset);
+ tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
+ tx_desc->read.olinfo_status = olinfo_status;
+
+ if (!data_len)
+ break;
+
+ frag = &skb_shinfo(skb)->frags[f];
+#ifdef IXGBE_FCOE
+ size = min_t(unsigned int, data_len, frag->size);
+#else
+ size = frag->size;
+#endif
+ data_len -= size;
+ f++;
+
+ offset = 0;
+ tx_flags |= IXGBE_TX_FLAGS_MAPPED_AS_PAGE;
+
+ dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ goto dma_error;
+
+ tx_desc++;
+ i++;
+ if (i == tx_ring->count) {
+ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, 0);
+ i = 0;
+ }
+ }
+
+ tx_desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD);
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ tx_ring->next_to_use = i;
+
+ if (tx_flags & IXGBE_TX_FLAGS_TSO)
+ gso_segs = skb_shinfo(skb)->gso_segs;
+#ifdef IXGBE_FCOE
+ /* adjust for FCoE Sequence Offload */
+ else if (tx_flags & IXGBE_TX_FLAGS_FSO)
+ gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
+ skb_shinfo(skb)->gso_size);
+#endif /* IXGBE_FCOE */
+ else
+ gso_segs = 1;
+
+ /* multiply data chunks by size of headers */
+ tx_buffer_info->bytecount = paylen + (gso_segs * hdr_len);
+ tx_buffer_info->gso_segs = gso_segs;
+ tx_buffer_info->skb = skb;
+
+ /* set the timestamp */
+ first->time_stamp = jiffies;
+
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
+
+ /* notify HW of packet */
+ writel(i, tx_ring->tail);
+
+ return;
+dma_error:
+ dev_err(dev, "TX DMA map failed\n");
+
+ /* clear dma mappings for failed tx_buffer_info map */
+ for (;;) {
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ ixgbe_unmap_tx_resource(tx_ring, tx_buffer_info);
+ if (tx_buffer_info == first)
+ break;
+ if (i == 0)
+ i = tx_ring->count;
+ i--;
+ }
+
+ dev_kfree_skb_any(skb);
+
+ tx_ring->next_to_use = i;
+}
+
+static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb,
+ u32 tx_flags, __be16 protocol)
+{
+ struct ixgbe_q_vector *q_vector = ring->q_vector;
+ union ixgbe_atr_hash_dword input = { .dword = 0 };
+ union ixgbe_atr_hash_dword common = { .dword = 0 };
+ union {
+ unsigned char *network;
+ struct iphdr *ipv4;
+ struct ipv6hdr *ipv6;
+ } hdr;
+ struct tcphdr *th;
+ __be16 vlan_id;
+
+ /* if ring doesn't have a interrupt vector, cannot perform ATR */
+ if (!q_vector)
+ return;
+
+ /* do nothing if sampling is disabled */
+ if (!ring->atr_sample_rate)
+ return;
+
+ ring->atr_count++;
+
+ /* snag network header to get L4 type and address */
+ hdr.network = skb_network_header(skb);
+
+ /* Currently only IPv4/IPv6 with TCP is supported */
+ if ((protocol != __constant_htons(ETH_P_IPV6) ||
+ hdr.ipv6->nexthdr != IPPROTO_TCP) &&
+ (protocol != __constant_htons(ETH_P_IP) ||
+ hdr.ipv4->protocol != IPPROTO_TCP))
+ return;
+
+ th = tcp_hdr(skb);
+
+ /* skip this packet since it is invalid or the socket is closing */
+ if (!th || th->fin)
+ return;
+
+ /* sample on all syn packets or once every atr sample count */
+ if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
+ return;
+
+ /* reset sample count */
+ ring->atr_count = 0;
+
+ vlan_id = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
+
+ /*
+ * src and dst are inverted, think how the receiver sees them
+ *
+ * The input is broken into two sections, a non-compressed section
+ * containing vm_pool, vlan_id, and flow_type. The rest of the data
+ * is XORed together and stored in the compressed dword.
+ */
+ input.formatted.vlan_id = vlan_id;
+
+ /*
+ * since src port and flex bytes occupy the same word XOR them together
+ * and write the value to source port portion of compressed dword
+ */
+ if (tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
+ common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q);
+ else
+ common.port.src ^= th->dest ^ protocol;
+ common.port.dst ^= th->source;
+
+ if (protocol == __constant_htons(ETH_P_IP)) {
+ input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
+ common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
+ } else {
+ input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
+ common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
+ hdr.ipv6->saddr.s6_addr32[1] ^
+ hdr.ipv6->saddr.s6_addr32[2] ^
+ hdr.ipv6->saddr.s6_addr32[3] ^
+ hdr.ipv6->daddr.s6_addr32[0] ^
+ hdr.ipv6->daddr.s6_addr32[1] ^
+ hdr.ipv6->daddr.s6_addr32[2] ^
+ hdr.ipv6->daddr.s6_addr32[3];
+ }
+
+ /* This assumes the Rx queue and Tx queue are bound to the same CPU */
+ ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
+ input, common, ring->queue_index);
+}
+
+static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
+{
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ /* Herbert's original patch had:
+ * smp_mb__after_netif_stop_queue();
+ * but since that doesn't exist yet, just open code it. */
+ smp_mb();
+
+ /* We need to check again in a case another CPU has just
+ * made room available. */
+ if (likely(ixgbe_desc_unused(tx_ring) < size))
+ return -EBUSY;
+
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
+ return 0;
+}
+
+static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
+{
+ if (likely(ixgbe_desc_unused(tx_ring) >= size))
+ return 0;
+ return __ixgbe_maybe_stop_tx(tx_ring, size);
+}
+
+static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
+ smp_processor_id();
+#ifdef IXGBE_FCOE
+ __be16 protocol = vlan_get_protocol(skb);
+
+ if (((protocol == htons(ETH_P_FCOE)) ||
+ (protocol == htons(ETH_P_FIP))) &&
+ (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
+ txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
+ txq += adapter->ring_feature[RING_F_FCOE].mask;
+ return txq;
+ }
+#endif
+
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
+ while (unlikely(txq >= dev->real_num_tx_queues))
+ txq -= dev->real_num_tx_queues;
+ return txq;
+ }
+
+ return skb_tx_hash(dev, skb);
+}
+
+netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
+ struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *tx_ring)
+{
+ struct ixgbe_tx_buffer *first;
+ int tso;
+ u32 tx_flags = 0;
+#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
+ unsigned short f;
+#endif
+ u16 count = TXD_USE_COUNT(skb_headlen(skb));
+ __be16 protocol = skb->protocol;
+ u8 hdr_len = 0;
+
+ /*
+ * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_head_len/IXGBE_MAX_DATA_PER_TXD,
+ * + 2 desc gap to keep tail from touching head,
+ * + 1 desc for context descriptor,
+ * otherwise try next time
+ */
+#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+#else
+ count += skb_shinfo(skb)->nr_frags;
+#endif
+ if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
+ tx_ring->tx_stats.tx_busy++;
+ return NETDEV_TX_BUSY;
+ }
+
+#ifdef CONFIG_PCI_IOV
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ tx_flags |= IXGBE_TX_FLAGS_TXSW;
+
+#endif
+ /* if we have a HW VLAN tag being added default to the HW one */
+ if (vlan_tx_tag_present(skb)) {
+ tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
+ /* else if it is a SW VLAN check the next protocol and store the tag */
+ } else if (protocol == __constant_htons(ETH_P_8021Q)) {
+ struct vlan_hdr *vhdr, _vhdr;
+ vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
+ if (!vhdr)
+ goto out_drop;
+
+ protocol = vhdr->h_vlan_encapsulated_proto;
+ tx_flags |= ntohs(vhdr->h_vlan_TCI) << IXGBE_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
+ }
+
+ if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
+ ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) ||
+ (skb->priority != TC_PRIO_CONTROL))) {
+ tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
+ tx_flags |= tx_ring->dcb_tc <<
+ IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
+ if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
+ struct vlan_ethhdr *vhdr;
+ if (skb_header_cloned(skb) &&
+ pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ goto out_drop;
+ vhdr = (struct vlan_ethhdr *)skb->data;
+ vhdr->h_vlan_TCI = htons(tx_flags >>
+ IXGBE_TX_FLAGS_VLAN_SHIFT);
+ } else {
+ tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
+ }
+ }
+
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+
+#ifdef IXGBE_FCOE
+ /* setup tx offload for FCoE */
+ if ((protocol == __constant_htons(ETH_P_FCOE)) &&
+ (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
+ tso = ixgbe_fso(tx_ring, skb, tx_flags, &hdr_len);
+ if (tso < 0)
+ goto out_drop;
+ else if (tso)
+ tx_flags |= IXGBE_TX_FLAGS_FSO |
+ IXGBE_TX_FLAGS_FCOE;
+ else
+ tx_flags |= IXGBE_TX_FLAGS_FCOE;
+
+ goto xmit_fcoe;
+ }
+
+#endif /* IXGBE_FCOE */
+ /* setup IPv4/IPv6 offloads */
+ if (protocol == __constant_htons(ETH_P_IP))
+ tx_flags |= IXGBE_TX_FLAGS_IPV4;
+
+ tso = ixgbe_tso(tx_ring, skb, tx_flags, protocol, &hdr_len);
+ if (tso < 0)
+ goto out_drop;
+ else if (tso)
+ tx_flags |= IXGBE_TX_FLAGS_TSO;
+ else if (ixgbe_tx_csum(tx_ring, skb, tx_flags, protocol))
+ tx_flags |= IXGBE_TX_FLAGS_CSUM;
+
+ /* add the ATR filter if ATR is on */
+ if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
+ ixgbe_atr(tx_ring, skb, tx_flags, protocol);
+
+#ifdef IXGBE_FCOE
+xmit_fcoe:
+#endif /* IXGBE_FCOE */
+ ixgbe_tx_map(tx_ring, skb, first, tx_flags, hdr_len);
+
+ ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+ return NETDEV_TX_OK;
+
+out_drop:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_ring *tx_ring;
+
+ tx_ring = adapter->tx_ring[skb->queue_mapping];
+ return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
+}
+
+/**
+ * ixgbe_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int ixgbe_set_mac(struct net_device *netdev, void *p)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
+ IXGBE_RAH_AV);
+
+ return 0;
+}
+
+static int
+ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u16 value;
+ int rc;
+
+ if (prtad != hw->phy.mdio.prtad)
+ return -EINVAL;
+ rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
+ if (!rc)
+ rc = value;
+ return rc;
+}
+
+static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
+ u16 addr, u16 value)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (prtad != hw->phy.mdio.prtad)
+ return -EINVAL;
+ return hw->phy.ops.write_reg(hw, addr, devad, value);
+}
+
+static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
+}
+
+/**
+ * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
+ * netdev->dev_addrs
+ * @netdev: network interface device structure
+ *
+ * Returns non-zero on failure
+ **/
+static int ixgbe_add_sanmac_netdev(struct net_device *dev)
+{
+ int err = 0;
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_mac_info *mac = &adapter->hw.mac;
+
+ if (is_valid_ether_addr(mac->san_addr)) {
+ rtnl_lock();
+ err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
+ rtnl_unlock();
+ }
+ return err;
+}
+
+/**
+ * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
+ * netdev->dev_addrs
+ * @netdev: network interface device structure
+ *
+ * Returns non-zero on failure
+ **/
+static int ixgbe_del_sanmac_netdev(struct net_device *dev)
+{
+ int err = 0;
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_mac_info *mac = &adapter->hw.mac;
+
+ if (is_valid_ether_addr(mac->san_addr)) {
+ rtnl_lock();
+ err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
+ rtnl_unlock();
+ }
+ return err;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void ixgbe_netpoll(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ /* if interface is down do nothing */
+ if (test_bit(__IXGBE_DOWN, &adapter->state))
+ return;
+
+ adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+ int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ for (i = 0; i < num_q_vectors; i++) {
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
+ ixgbe_msix_clean_rings(0, q_vector);
+ }
+ } else {
+ ixgbe_intr(adapter->pdev->irq, netdev);
+ }
+ adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
+}
+#endif
+
+static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ rcu_read_lock();
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
+ u64 bytes, packets;
+ unsigned int start;
+
+ if (ring) {
+ do {
+ start = u64_stats_fetch_begin_bh(&ring->syncp);
+ packets = ring->stats.packets;
+ bytes = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ stats->rx_packets += packets;
+ stats->rx_bytes += bytes;
+ }
+ }
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
+ u64 bytes, packets;
+ unsigned int start;
+
+ if (ring) {
+ do {
+ start = u64_stats_fetch_begin_bh(&ring->syncp);
+ packets = ring->stats.packets;
+ bytes = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ stats->tx_packets += packets;
+ stats->tx_bytes += bytes;
+ }
+ }
+ rcu_read_unlock();
+ /* following stats updated by ixgbe_watchdog_task() */
+ stats->multicast = netdev->stats.multicast;
+ stats->rx_errors = netdev->stats.rx_errors;
+ stats->rx_length_errors = netdev->stats.rx_length_errors;
+ stats->rx_crc_errors = netdev->stats.rx_crc_errors;
+ stats->rx_missed_errors = netdev->stats.rx_missed_errors;
+ return stats;
+}
+
+/* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
+ * #adapter: pointer to ixgbe_adapter
+ * @tc: number of traffic classes currently enabled
+ *
+ * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm
+ * 802.1Q priority maps to a packet buffer that exists.
+ */
+static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 reg, rsave;
+ int i;
+
+ /* 82598 have a static priority to TC mapping that can not
+ * be changed so no validation is needed.
+ */
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return;
+
+ reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
+ rsave = reg;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT);
+
+ /* If up2tc is out of bounds default to zero */
+ if (up2tc > tc)
+ reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT);
+ }
+
+ if (reg != rsave)
+ IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
+
+ return;
+}
+
+
+/* ixgbe_setup_tc - routine to configure net_device for multiple traffic
+ * classes.
+ *
+ * @netdev: net device to configure
+ * @tc: number of traffic classes to enable
+ */
+int ixgbe_setup_tc(struct net_device *dev, u8 tc)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* Multiple traffic classes requires multiple queues */
+ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
+ e_err(drv, "Enable failed, needs MSI-X\n");
+ return -EINVAL;
+ }
+
+ /* Hardware supports up to 8 traffic classes */
+ if (tc > MAX_TRAFFIC_CLASS ||
+ (hw->mac.type == ixgbe_mac_82598EB && tc < MAX_TRAFFIC_CLASS))
+ return -EINVAL;
+
+ /* Hardware has to reinitialize queues and interrupts to
+ * match packet buffer alignment. Unfortunantly, the
+ * hardware is not flexible enough to do this dynamically.
+ */
+ if (netif_running(dev))
+ ixgbe_close(dev);
+ ixgbe_clear_interrupt_scheme(adapter);
+
+ if (tc) {
+ netdev_set_num_tc(dev, tc);
+ adapter->last_lfc_mode = adapter->hw.fc.current_mode;
+
+ adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ adapter->hw.fc.requested_mode = ixgbe_fc_none;
+ } else {
+ netdev_reset_tc(dev);
+
+ adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
+
+ adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+
+ adapter->temp_dcb_cfg.pfc_mode_enable = false;
+ adapter->dcb_cfg.pfc_mode_enable = false;
+ }
+
+ ixgbe_init_interrupt_scheme(adapter);
+ ixgbe_validate_rtr(adapter, tc);
+ if (netif_running(dev))
+ ixgbe_open(dev);
+
+ return 0;
+}
+
+void ixgbe_do_reset(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
+ else
+ ixgbe_reset(adapter);
+}
+
+static u32 ixgbe_fix_features(struct net_device *netdev, u32 data)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+#ifdef CONFIG_DCB
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
+ data &= ~NETIF_F_HW_VLAN_RX;
+#endif
+
+ /* return error if RXHASH is being enabled when RSS is not supported */
+ if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
+ data &= ~NETIF_F_RXHASH;
+
+ /* If Rx checksum is disabled, then RSC/LRO should also be disabled */
+ if (!(data & NETIF_F_RXCSUM))
+ data &= ~NETIF_F_LRO;
+
+ /* Turn off LRO if not RSC capable or invalid ITR settings */
+ if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) {
+ data &= ~NETIF_F_LRO;
+ } else if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
+ (adapter->rx_itr_setting != 1 &&
+ adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE)) {
+ data &= ~NETIF_F_LRO;
+ e_info(probe, "rx-usecs set too low, not enabling RSC\n");
+ }
+
+ return data;
+}
+
+static int ixgbe_set_features(struct net_device *netdev, u32 data)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ bool need_reset = false;
+
+ /* If Rx checksum is disabled, then RSC/LRO should also be disabled */
+ if (!(data & NETIF_F_RXCSUM))
+ adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
+ else
+ adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
+
+ /* Make sure RSC matches LRO, reset if change */
+ if (!!(data & NETIF_F_LRO) !=
+ !!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
+ adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_X540:
+ case ixgbe_mac_82599EB:
+ need_reset = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /*
+ * Check if Flow Director n-tuple support was enabled or disabled. If
+ * the state changed, we need to reset.
+ */
+ if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
+ /* turn off ATR, enable perfect filters and reset */
+ if (data & NETIF_F_NTUPLE) {
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+ need_reset = true;
+ }
+ } else if (!(data & NETIF_F_NTUPLE)) {
+ /* turn off Flow Director, set ATR and reset */
+ adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+ if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
+ !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ need_reset = true;
+ }
+
+ if (need_reset)
+ ixgbe_do_reset(netdev);
+
+ return 0;
+
+}
+
+static const struct net_device_ops ixgbe_netdev_ops = {
+ .ndo_open = ixgbe_open,
+ .ndo_stop = ixgbe_close,
+ .ndo_start_xmit = ixgbe_xmit_frame,
+ .ndo_select_queue = ixgbe_select_queue,
+ .ndo_set_rx_mode = ixgbe_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = ixgbe_set_mac,
+ .ndo_change_mtu = ixgbe_change_mtu,
+ .ndo_tx_timeout = ixgbe_tx_timeout,
+ .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
+ .ndo_do_ioctl = ixgbe_ioctl,
+ .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
+ .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
+ .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
+ .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
+ .ndo_get_stats64 = ixgbe_get_stats64,
+ .ndo_setup_tc = ixgbe_setup_tc,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ixgbe_netpoll,
+#endif
+#ifdef IXGBE_FCOE
+ .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
+ .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
+ .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
+ .ndo_fcoe_enable = ixgbe_fcoe_enable,
+ .ndo_fcoe_disable = ixgbe_fcoe_disable,
+ .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
+#endif /* IXGBE_FCOE */
+ .ndo_set_features = ixgbe_set_features,
+ .ndo_fix_features = ixgbe_fix_features,
+};
+
+static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
+ const struct ixgbe_info *ii)
+{
+#ifdef CONFIG_PCI_IOV
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+ int num_vf_macvlans, i;
+ struct vf_macvlans *mv_list;
+
+ if (hw->mac.type == ixgbe_mac_82598EB || !max_vfs)
+ return;
+
+ /* The 82599 supports up to 64 VFs per physical function
+ * but this implementation limits allocation to 63 so that
+ * basic networking resources are still available to the
+ * physical function
+ */
+ adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs;
+ adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
+ err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+ if (err) {
+ e_err(probe, "Failed to enable PCI sriov: %d\n", err);
+ goto err_novfs;
+ }
+
+ num_vf_macvlans = hw->mac.num_rar_entries -
+ (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs);
+
+ adapter->mv_list = mv_list = kcalloc(num_vf_macvlans,
+ sizeof(struct vf_macvlans),
+ GFP_KERNEL);
+ if (mv_list) {
+ /* Initialize list of VF macvlans */
+ INIT_LIST_HEAD(&adapter->vf_mvs.l);
+ for (i = 0; i < num_vf_macvlans; i++) {
+ mv_list->vf = -1;
+ mv_list->free = true;
+ mv_list->rar_entry = hw->mac.num_rar_entries -
+ (i + adapter->num_vfs + 1);
+ list_add(&mv_list->l, &adapter->vf_mvs.l);
+ mv_list++;
+ }
+ }
+
+ /* If call to enable VFs succeeded then allocate memory
+ * for per VF control structures.
+ */
+ adapter->vfinfo =
+ kcalloc(adapter->num_vfs,
+ sizeof(struct vf_data_storage), GFP_KERNEL);
+ if (adapter->vfinfo) {
+ /* Now that we're sure SR-IOV is enabled
+ * and memory allocated set up the mailbox parameters
+ */
+ ixgbe_init_mbx_params_pf(hw);
+ memcpy(&hw->mbx.ops, ii->mbx_ops,
+ sizeof(hw->mbx.ops));
+
+ /* Disable RSC when in SR-IOV mode */
+ adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
+ IXGBE_FLAG2_RSC_ENABLED);
+ return;
+ }
+
+ /* Oh oh */
+ e_err(probe, "Unable to allocate memory for VF Data Storage - "
+ "SRIOV disabled\n");
+ pci_disable_sriov(adapter->pdev);
+
+err_novfs:
+ adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+ adapter->num_vfs = 0;
+#endif /* CONFIG_PCI_IOV */
+}
+
+/**
+ * ixgbe_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in ixgbe_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * ixgbe_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int __devinit ixgbe_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct ixgbe_adapter *adapter = NULL;
+ struct ixgbe_hw *hw;
+ const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
+ static int cards_found;
+ int i, err, pci_using_dac;
+ u8 part_str[IXGBE_PBANUM_LENGTH];
+ unsigned int indices = num_possible_cpus();
+#ifdef IXGBE_FCOE
+ u16 device_caps;
+#endif
+ u32 eec;
+
+ /* Catch broken hardware that put the wrong VF device ID in
+ * the PCIe SR-IOV capability.
+ */
+ if (pdev->is_virtfn) {
+ WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
+ pci_name(pdev), pdev->vendor, pdev->device);
+ return -EINVAL;
+ }
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+ !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ pci_using_dac = 1;
+ } else {
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
+ }
+ }
+ pci_using_dac = 0;
+ }
+
+ err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
+ IORESOURCE_MEM), ixgbe_driver_name);
+ if (err) {
+ dev_err(&pdev->dev,
+ "pci_request_selected_regions failed 0x%x\n", err);
+ goto err_pci_reg;
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+
+ pci_set_master(pdev);
+ pci_save_state(pdev);
+
+#ifdef CONFIG_IXGBE_DCB
+ indices *= MAX_TRAFFIC_CLASS;
+#endif
+
+ if (ii->mac == ixgbe_mac_82598EB)
+ indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
+ else
+ indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
+
+#ifdef IXGBE_FCOE
+ indices += min_t(unsigned int, num_possible_cpus(),
+ IXGBE_MAX_FCOE_INDICES);
+#endif
+ netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_alloc_etherdev;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adapter = netdev_priv(netdev);
+ pci_set_drvdata(pdev, adapter);
+
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ hw = &adapter->hw;
+ hw->back = adapter;
+ adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+
+ hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!hw->hw_addr) {
+ err = -EIO;
+ goto err_ioremap;
+ }
+
+ for (i = 1; i <= 5; i++) {
+ if (pci_resource_len(pdev, i) == 0)
+ continue;
+ }
+
+ netdev->netdev_ops = &ixgbe_netdev_ops;
+ ixgbe_set_ethtool_ops(netdev);
+ netdev->watchdog_timeo = 5 * HZ;
+ strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+
+ adapter->bd_number = cards_found;
+
+ /* Setup hw api */
+ memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
+ hw->mac.type = ii->mac;
+
+ /* EEPROM */
+ memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
+ if (!(eec & (1 << 8)))
+ hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
+
+ /* PHY */
+ memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
+ hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+ /* ixgbe_identify_phy_generic will set prtad and mmds properly */
+ hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
+ hw->phy.mdio.mmds = 0;
+ hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+ hw->phy.mdio.dev = netdev;
+ hw->phy.mdio.mdio_read = ixgbe_mdio_read;
+ hw->phy.mdio.mdio_write = ixgbe_mdio_write;
+
+ ii->get_invariants(hw);
+
+ /* setup the private structure */
+ err = ixgbe_sw_init(adapter);
+ if (err)
+ goto err_sw_init;
+
+ /* Make it possible the adapter to be woken up via WOL */
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * If there is a fan on this device and it has failed log the
+ * failure.
+ */
+ if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
+ u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ if (esdp & IXGBE_ESDP_SDP1)
+ e_crit(probe, "Fan has stopped, replace the adapter\n");
+ }
+
+ /* reset_hw fills in the perm_addr as well */
+ hw->phy.reset_if_overtemp = true;
+ err = hw->mac.ops.reset_hw(hw);
+ hw->phy.reset_if_overtemp = false;
+ if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
+ hw->mac.type == ixgbe_mac_82598EB) {
+ err = 0;
+ } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+ e_dev_err("failed to load because an unsupported SFP+ "
+ "module type was detected.\n");
+ e_dev_err("Reload the driver after installing a supported "
+ "module.\n");
+ goto err_sw_init;
+ } else if (err) {
+ e_dev_err("HW Init failed: %d\n", err);
+ goto err_sw_init;
+ }
+
+ ixgbe_probe_vf(adapter, ii);
+
+ netdev->features = NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_RXHASH |
+ NETIF_F_RXCSUM;
+
+ netdev->hw_features = netdev->features;
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ netdev->features |= NETIF_F_SCTP_CSUM;
+ netdev->hw_features |= NETIF_F_SCTP_CSUM |
+ NETIF_F_NTUPLE;
+ break;
+ default:
+ break;
+ }
+
+ netdev->vlan_features |= NETIF_F_TSO;
+ netdev->vlan_features |= NETIF_F_TSO6;
+ netdev->vlan_features |= NETIF_F_IP_CSUM;
+ netdev->vlan_features |= NETIF_F_IPV6_CSUM;
+ netdev->vlan_features |= NETIF_F_SG;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
+ IXGBE_FLAG_DCB_ENABLED);
+
+#ifdef CONFIG_IXGBE_DCB
+ netdev->dcbnl_ops = &dcbnl_ops;
+#endif
+
+#ifdef IXGBE_FCOE
+ if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
+ if (hw->mac.ops.get_device_caps) {
+ hw->mac.ops.get_device_caps(hw, &device_caps);
+ if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
+ adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
+ }
+ }
+ if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
+ netdev->vlan_features |= NETIF_F_FCOE_CRC;
+ netdev->vlan_features |= NETIF_F_FSO;
+ netdev->vlan_features |= NETIF_F_FCOE_MTU;
+ }
+#endif /* IXGBE_FCOE */
+ if (pci_using_dac) {
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= NETIF_F_HIGHDMA;
+ }
+
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
+ netdev->hw_features |= NETIF_F_LRO;
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
+ netdev->features |= NETIF_F_LRO;
+
+ /* make sure the EEPROM is good */
+ if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
+ e_dev_err("The EEPROM Checksum Is Not Valid\n");
+ err = -EIO;
+ goto err_eeprom;
+ }
+
+ memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
+ memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
+
+ if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
+ e_dev_err("invalid MAC address\n");
+ err = -EIO;
+ goto err_eeprom;
+ }
+
+ /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
+ if (hw->mac.ops.disable_tx_laser &&
+ ((hw->phy.multispeed_fiber) ||
+ ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
+ (hw->mac.type == ixgbe_mac_82599EB))))
+ hw->mac.ops.disable_tx_laser(hw);
+
+ setup_timer(&adapter->service_timer, &ixgbe_service_timer,
+ (unsigned long) adapter);
+
+ INIT_WORK(&adapter->service_task, ixgbe_service_task);
+ clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
+
+ err = ixgbe_init_interrupt_scheme(adapter);
+ if (err)
+ goto err_sw_init;
+
+ if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) {
+ netdev->hw_features &= ~NETIF_F_RXHASH;
+ netdev->features &= ~NETIF_F_RXHASH;
+ }
+
+ switch (pdev->device) {
+ case IXGBE_DEV_ID_82599_SFP:
+ /* Only this subdevice supports WOL */
+ if (pdev->subsystem_device == IXGBE_SUBDEV_ID_82599_SFP)
+ adapter->wol = IXGBE_WUFC_MAG;
+ break;
+ case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+ /* All except this subdevice support WOL */
+ if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
+ adapter->wol = IXGBE_WUFC_MAG;
+ break;
+ case IXGBE_DEV_ID_82599_KX4:
+ adapter->wol = IXGBE_WUFC_MAG;
+ break;
+ default:
+ adapter->wol = 0;
+ break;
+ }
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ /* pick up the PCI bus settings for reporting later */
+ hw->mac.ops.get_bus_info(hw);
+
+ /* print bus type/speed/width info */
+ e_dev_info("(PCI Express:%s:%s) %pM\n",
+ (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" :
+ hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" :
+ "Unknown"),
+ (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
+ hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
+ hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
+ "Unknown"),
+ netdev->dev_addr);
+
+ err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
+ if (err)
+ strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
+ if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
+ e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
+ hw->mac.type, hw->phy.type, hw->phy.sfp_type,
+ part_str);
+ else
+ e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
+ hw->mac.type, hw->phy.type, part_str);
+
+ if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
+ e_dev_warn("PCI-Express bandwidth available for this card is "
+ "not sufficient for optimal performance.\n");
+ e_dev_warn("For optimal performance a x8 PCI-Express slot "
+ "is required.\n");
+ }
+
+ /* save off EEPROM version number */
+ hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version);
+
+ /* reset the hardware with the new settings */
+ err = hw->mac.ops.start_hw(hw);
+
+ if (err == IXGBE_ERR_EEPROM_VERSION) {
+ /* We are running on a pre-production device, log a warning */
+ e_dev_warn("This device is a pre-production adapter/LOM. "
+ "Please be aware there may be issues associated "
+ "with your hardware. If you are experiencing "
+ "problems please contact your Intel or hardware "
+ "representative who provided you with this "
+ "hardware.\n");
+ }
+ strcpy(netdev->name, "eth%d");
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
+ /* carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
+
+#ifdef CONFIG_IXGBE_DCA
+ if (dca_add_requester(&pdev->dev) == 0) {
+ adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
+ ixgbe_setup_dca(adapter);
+ }
+#endif
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
+ for (i = 0; i < adapter->num_vfs; i++)
+ ixgbe_vf_configuration(pdev, (i | 0x10000000));
+ }
+
+ /* Inform firmware of driver version */
+ if (hw->mac.ops.set_fw_drv_ver)
+ hw->mac.ops.set_fw_drv_ver(hw, MAJ, MIN, BUILD,
+ FW_CEM_UNUSED_VER);
+
+ /* add san mac addr to netdev */
+ ixgbe_add_sanmac_netdev(netdev);
+
+ e_dev_info("Intel(R) 10 Gigabit Network Connection\n");
+ cards_found++;
+ return 0;
+
+err_register:
+ ixgbe_release_hw_control(adapter);
+ ixgbe_clear_interrupt_scheme(adapter);
+err_sw_init:
+err_eeprom:
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ ixgbe_disable_sriov(adapter);
+ adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
+ iounmap(hw->hw_addr);
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+err_pci_reg:
+err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * ixgbe_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * ixgbe_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void __devexit ixgbe_remove(struct pci_dev *pdev)
+{
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ set_bit(__IXGBE_DOWN, &adapter->state);
+ cancel_work_sync(&adapter->service_task);
+
+#ifdef CONFIG_IXGBE_DCA
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
+ adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
+ dca_remove_requester(&pdev->dev);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
+ }
+
+#endif
+#ifdef IXGBE_FCOE
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+ ixgbe_cleanup_fcoe(adapter);
+
+#endif /* IXGBE_FCOE */
+
+ /* remove the added san mac */
+ ixgbe_del_sanmac_netdev(netdev);
+
+ if (netdev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(netdev);
+
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ ixgbe_disable_sriov(adapter);
+
+ ixgbe_clear_interrupt_scheme(adapter);
+
+ ixgbe_release_hw_control(adapter);
+
+ iounmap(adapter->hw.hw_addr);
+ pci_release_selected_regions(pdev, pci_select_bars(pdev,
+ IORESOURCE_MEM));
+
+ e_dev_info("complete\n");
+
+ free_netdev(netdev);
+
+ pci_disable_pcie_error_reporting(pdev);
+
+ pci_disable_device(pdev);
+}
+
+/**
+ * ixgbe_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ netif_device_detach(netdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (netif_running(netdev))
+ ixgbe_down(adapter);
+ pci_disable_device(pdev);
+
+ /* Request a slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * ixgbe_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot.
+ */
+static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
+{
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+ pci_ers_result_t result;
+ int err;
+
+ if (pci_enable_device_mem(pdev)) {
+ e_err(probe, "Cannot re-enable PCI device after reset.\n");
+ result = PCI_ERS_RESULT_DISCONNECT;
+ } else {
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ pci_wake_from_d3(pdev, false);
+
+ ixgbe_reset(adapter);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
+ result = PCI_ERS_RESULT_RECOVERED;
+ }
+
+ err = pci_cleanup_aer_uncorrect_error_status(pdev);
+ if (err) {
+ e_dev_err("pci_cleanup_aer_uncorrect_error_status "
+ "failed 0x%0x\n", err);
+ /* non-fatal, continue */
+ }
+
+ return result;
+}
+
+/**
+ * ixgbe_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation.
+ */
+static void ixgbe_io_resume(struct pci_dev *pdev)
+{
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ if (netif_running(netdev))
+ ixgbe_up(adapter);
+
+ netif_device_attach(netdev);
+}
+
+static struct pci_error_handlers ixgbe_err_handler = {
+ .error_detected = ixgbe_io_error_detected,
+ .slot_reset = ixgbe_io_slot_reset,
+ .resume = ixgbe_io_resume,
+};
+
+static struct pci_driver ixgbe_driver = {
+ .name = ixgbe_driver_name,
+ .id_table = ixgbe_pci_tbl,
+ .probe = ixgbe_probe,
+ .remove = __devexit_p(ixgbe_remove),
+#ifdef CONFIG_PM
+ .suspend = ixgbe_suspend,
+ .resume = ixgbe_resume,
+#endif
+ .shutdown = ixgbe_shutdown,
+ .err_handler = &ixgbe_err_handler
+};
+
+/**
+ * ixgbe_init_module - Driver Registration Routine
+ *
+ * ixgbe_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init ixgbe_init_module(void)
+{
+ int ret;
+ pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
+ pr_info("%s\n", ixgbe_copyright);
+
+#ifdef CONFIG_IXGBE_DCA
+ dca_register_notify(&dca_notifier);
+#endif
+
+ ret = pci_register_driver(&ixgbe_driver);
+ return ret;
+}
+
+module_init(ixgbe_init_module);
+
+/**
+ * ixgbe_exit_module - Driver Exit Cleanup Routine
+ *
+ * ixgbe_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit ixgbe_exit_module(void)
+{
+#ifdef CONFIG_IXGBE_DCA
+ dca_unregister_notify(&dca_notifier);
+#endif
+ pci_unregister_driver(&ixgbe_driver);
+ rcu_barrier(); /* Wait for completion of call_rcu()'s */
+}
+
+#ifdef CONFIG_IXGBE_DCA
+static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
+ void *p)
+{
+ int ret_val;
+
+ ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
+ __ixgbe_notify_dca);
+
+ return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
+}
+
+#endif /* CONFIG_IXGBE_DCA */
+
+module_exit(ixgbe_exit_module);
+
+/* ixgbe_main.c */
--- /dev/null
+/*
+ * PXA168 ethernet driver.
+ * Most of the code is derived from mv643xx ethernet driver.
+ *
+ * Copyright (C) 2010 Marvell International Ltd.
+ * Sachin Sanap <ssanap@marvell.com>
+ * Zhangfei Gao <zgao6@marvell.com>
+ * Philip Rakity <prakity@marvell.com>
+ * Mark Brown <markb@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/etherdevice.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/workqueue.h>
+#include <linux/clk.h>
+#include <linux/phy.h>
+#include <linux/io.h>
++#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/cacheflush.h>
+#include <linux/pxa168_eth.h>
+
+#define DRIVER_NAME "pxa168-eth"
+#define DRIVER_VERSION "0.3"
+
+/*
+ * Registers
+ */
+
+#define PHY_ADDRESS 0x0000
+#define SMI 0x0010
+#define PORT_CONFIG 0x0400
+#define PORT_CONFIG_EXT 0x0408
+#define PORT_COMMAND 0x0410
+#define PORT_STATUS 0x0418
+#define HTPR 0x0428
+#define SDMA_CONFIG 0x0440
+#define SDMA_CMD 0x0448
+#define INT_CAUSE 0x0450
+#define INT_W_CLEAR 0x0454
+#define INT_MASK 0x0458
+#define ETH_F_RX_DESC_0 0x0480
+#define ETH_C_RX_DESC_0 0x04A0
+#define ETH_C_TX_DESC_1 0x04E4
+
+/* smi register */
+#define SMI_BUSY (1 << 28) /* 0 - Write, 1 - Read */
+#define SMI_R_VALID (1 << 27) /* 0 - Write, 1 - Read */
+#define SMI_OP_W (0 << 26) /* Write operation */
+#define SMI_OP_R (1 << 26) /* Read operation */
+
+#define PHY_WAIT_ITERATIONS 10
+
+#define PXA168_ETH_PHY_ADDR_DEFAULT 0
+/* RX & TX descriptor command */
+#define BUF_OWNED_BY_DMA (1 << 31)
+
+/* RX descriptor status */
+#define RX_EN_INT (1 << 23)
+#define RX_FIRST_DESC (1 << 17)
+#define RX_LAST_DESC (1 << 16)
+#define RX_ERROR (1 << 15)
+
+/* TX descriptor command */
+#define TX_EN_INT (1 << 23)
+#define TX_GEN_CRC (1 << 22)
+#define TX_ZERO_PADDING (1 << 18)
+#define TX_FIRST_DESC (1 << 17)
+#define TX_LAST_DESC (1 << 16)
+#define TX_ERROR (1 << 15)
+
+/* SDMA_CMD */
+#define SDMA_CMD_AT (1 << 31)
+#define SDMA_CMD_TXDL (1 << 24)
+#define SDMA_CMD_TXDH (1 << 23)
+#define SDMA_CMD_AR (1 << 15)
+#define SDMA_CMD_ERD (1 << 7)
+
+/* Bit definitions of the Port Config Reg */
+#define PCR_HS (1 << 12)
+#define PCR_EN (1 << 7)
+#define PCR_PM (1 << 0)
+
+/* Bit definitions of the Port Config Extend Reg */
+#define PCXR_2BSM (1 << 28)
+#define PCXR_DSCP_EN (1 << 21)
+#define PCXR_MFL_1518 (0 << 14)
+#define PCXR_MFL_1536 (1 << 14)
+#define PCXR_MFL_2048 (2 << 14)
+#define PCXR_MFL_64K (3 << 14)
+#define PCXR_FLP (1 << 11)
+#define PCXR_PRIO_TX_OFF 3
+#define PCXR_TX_HIGH_PRI (7 << PCXR_PRIO_TX_OFF)
+
+/* Bit definitions of the SDMA Config Reg */
+#define SDCR_BSZ_OFF 12
+#define SDCR_BSZ8 (3 << SDCR_BSZ_OFF)
+#define SDCR_BSZ4 (2 << SDCR_BSZ_OFF)
+#define SDCR_BSZ2 (1 << SDCR_BSZ_OFF)
+#define SDCR_BSZ1 (0 << SDCR_BSZ_OFF)
+#define SDCR_BLMR (1 << 6)
+#define SDCR_BLMT (1 << 7)
+#define SDCR_RIFB (1 << 9)
+#define SDCR_RC_OFF 2
+#define SDCR_RC_MAX_RETRANS (0xf << SDCR_RC_OFF)
+
+/*
+ * Bit definitions of the Interrupt Cause Reg
+ * and Interrupt MASK Reg is the same
+ */
+#define ICR_RXBUF (1 << 0)
+#define ICR_TXBUF_H (1 << 2)
+#define ICR_TXBUF_L (1 << 3)
+#define ICR_TXEND_H (1 << 6)
+#define ICR_TXEND_L (1 << 7)
+#define ICR_RXERR (1 << 8)
+#define ICR_TXERR_H (1 << 10)
+#define ICR_TXERR_L (1 << 11)
+#define ICR_TX_UDR (1 << 13)
+#define ICR_MII_CH (1 << 28)
+
+#define ALL_INTS (ICR_TXBUF_H | ICR_TXBUF_L | ICR_TX_UDR |\
+ ICR_TXERR_H | ICR_TXERR_L |\
+ ICR_TXEND_H | ICR_TXEND_L |\
+ ICR_RXBUF | ICR_RXERR | ICR_MII_CH)
+
+#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
+
+#define NUM_RX_DESCS 64
+#define NUM_TX_DESCS 64
+
+#define HASH_ADD 0
+#define HASH_DELETE 1
+#define HASH_ADDR_TABLE_SIZE 0x4000 /* 16K (1/2K address - PCR_HS == 1) */
+#define HOP_NUMBER 12
+
+/* Bit definitions for Port status */
+#define PORT_SPEED_100 (1 << 0)
+#define FULL_DUPLEX (1 << 1)
+#define FLOW_CONTROL_ENABLED (1 << 2)
+#define LINK_UP (1 << 3)
+
+/* Bit definitions for work to be done */
+#define WORK_LINK (1 << 0)
+#define WORK_TX_DONE (1 << 1)
+
+/*
+ * Misc definitions.
+ */
+#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
+
+struct rx_desc {
+ u32 cmd_sts; /* Descriptor command status */
+ u16 byte_cnt; /* Descriptor buffer byte count */
+ u16 buf_size; /* Buffer size */
+ u32 buf_ptr; /* Descriptor buffer pointer */
+ u32 next_desc_ptr; /* Next descriptor pointer */
+};
+
+struct tx_desc {
+ u32 cmd_sts; /* Command/status field */
+ u16 reserved;
+ u16 byte_cnt; /* buffer byte count */
+ u32 buf_ptr; /* pointer to buffer for this descriptor */
+ u32 next_desc_ptr; /* Pointer to next descriptor */
+};
+
+struct pxa168_eth_private {
+ int port_num; /* User Ethernet port number */
+
+ int rx_resource_err; /* Rx ring resource error flag */
+
+ /* Next available and first returning Rx resource */
+ int rx_curr_desc_q, rx_used_desc_q;
+
+ /* Next available and first returning Tx resource */
+ int tx_curr_desc_q, tx_used_desc_q;
+
+ struct rx_desc *p_rx_desc_area;
+ dma_addr_t rx_desc_dma;
+ int rx_desc_area_size;
+ struct sk_buff **rx_skb;
+
+ struct tx_desc *p_tx_desc_area;
+ dma_addr_t tx_desc_dma;
+ int tx_desc_area_size;
+ struct sk_buff **tx_skb;
+
+ struct work_struct tx_timeout_task;
+
+ struct net_device *dev;
+ struct napi_struct napi;
+ u8 work_todo;
+ int skb_size;
+
+ struct net_device_stats stats;
+ /* Size of Tx Ring per queue */
+ int tx_ring_size;
+ /* Number of tx descriptors in use */
+ int tx_desc_count;
+ /* Size of Rx Ring per queue */
+ int rx_ring_size;
+ /* Number of rx descriptors in use */
+ int rx_desc_count;
+
+ /*
+ * Used in case RX Ring is empty, which can occur when
+ * system does not have resources (skb's)
+ */
+ struct timer_list timeout;
+ struct mii_bus *smi_bus;
+ struct phy_device *phy;
+
+ /* clock */
+ struct clk *clk;
+ struct pxa168_eth_platform_data *pd;
+ /*
+ * Ethernet controller base address.
+ */
+ void __iomem *base;
+
+ /* Pointer to the hardware address filter table */
+ void *htpr;
+ dma_addr_t htpr_dma;
+};
+
+struct addr_table_entry {
+ __le32 lo;
+ __le32 hi;
+};
+
+/* Bit fields of a Hash Table Entry */
+enum hash_table_entry {
+ HASH_ENTRY_VALID = 1,
+ SKIP = 2,
+ HASH_ENTRY_RECEIVE_DISCARD = 4,
+ HASH_ENTRY_RECEIVE_DISCARD_BIT = 2
+};
+
+static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
+static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd);
+static int pxa168_init_hw(struct pxa168_eth_private *pep);
+static void eth_port_reset(struct net_device *dev);
+static void eth_port_start(struct net_device *dev);
+static int pxa168_eth_open(struct net_device *dev);
+static int pxa168_eth_stop(struct net_device *dev);
+static int ethernet_phy_setup(struct net_device *dev);
+
+static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
+{
+ return readl(pep->base + offset);
+}
+
+static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data)
+{
+ writel(data, pep->base + offset);
+}
+
+static void abort_dma(struct pxa168_eth_private *pep)
+{
+ int delay;
+ int max_retries = 40;
+
+ do {
+ wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT);
+ udelay(100);
+
+ delay = 10;
+ while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT))
+ && delay-- > 0) {
+ udelay(10);
+ }
+ } while (max_retries-- > 0 && delay <= 0);
+
+ if (max_retries <= 0)
+ printk(KERN_ERR "%s : DMA Stuck\n", __func__);
+}
+
+static int ethernet_phy_get(struct pxa168_eth_private *pep)
+{
+ unsigned int reg_data;
+
+ reg_data = rdl(pep, PHY_ADDRESS);
+
+ return (reg_data >> (5 * pep->port_num)) & 0x1f;
+}
+
+static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr)
+{
+ u32 reg_data;
+ int addr_shift = 5 * pep->port_num;
+
+ reg_data = rdl(pep, PHY_ADDRESS);
+ reg_data &= ~(0x1f << addr_shift);
+ reg_data |= (phy_addr & 0x1f) << addr_shift;
+ wrl(pep, PHY_ADDRESS, reg_data);
+}
+
+static void ethernet_phy_reset(struct pxa168_eth_private *pep)
+{
+ int data;
+
+ data = phy_read(pep->phy, MII_BMCR);
+ if (data < 0)
+ return;
+
+ data |= BMCR_RESET;
+ if (phy_write(pep->phy, MII_BMCR, data) < 0)
+ return;
+
+ do {
+ data = phy_read(pep->phy, MII_BMCR);
+ } while (data >= 0 && data & BMCR_RESET);
+}
+
+static void rxq_refill(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct sk_buff *skb;
+ struct rx_desc *p_used_rx_desc;
+ int used_rx_desc;
+
+ while (pep->rx_desc_count < pep->rx_ring_size) {
+ int size;
+
+ skb = dev_alloc_skb(pep->skb_size);
+ if (!skb)
+ break;
+ if (SKB_DMA_REALIGN)
+ skb_reserve(skb, SKB_DMA_REALIGN);
+ pep->rx_desc_count++;
+ /* Get 'used' Rx descriptor */
+ used_rx_desc = pep->rx_used_desc_q;
+ p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc];
+ size = skb->end - skb->data;
+ p_used_rx_desc->buf_ptr = dma_map_single(NULL,
+ skb->data,
+ size,
+ DMA_FROM_DEVICE);
+ p_used_rx_desc->buf_size = size;
+ pep->rx_skb[used_rx_desc] = skb;
+
+ /* Return the descriptor to DMA ownership */
+ wmb();
+ p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT;
+ wmb();
+
+ /* Move the used descriptor pointer to the next descriptor */
+ pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size;
+
+ /* Any Rx return cancels the Rx resource error status */
+ pep->rx_resource_err = 0;
+
+ skb_reserve(skb, ETH_HW_IP_ALIGN);
+ }
+
+ /*
+ * If RX ring is empty of SKB, set a timer to try allocating
+ * again at a later time.
+ */
+ if (pep->rx_desc_count == 0) {
+ pep->timeout.expires = jiffies + (HZ / 10);
+ add_timer(&pep->timeout);
+ }
+}
+
+static inline void rxq_refill_timer_wrapper(unsigned long data)
+{
+ struct pxa168_eth_private *pep = (void *)data;
+ napi_schedule(&pep->napi);
+}
+
+static inline u8 flip_8_bits(u8 x)
+{
+ return (((x) & 0x01) << 3) | (((x) & 0x02) << 1)
+ | (((x) & 0x04) >> 1) | (((x) & 0x08) >> 3)
+ | (((x) & 0x10) << 3) | (((x) & 0x20) << 1)
+ | (((x) & 0x40) >> 1) | (((x) & 0x80) >> 3);
+}
+
+static void nibble_swap_every_byte(unsigned char *mac_addr)
+{
+ int i;
+ for (i = 0; i < ETH_ALEN; i++) {
+ mac_addr[i] = ((mac_addr[i] & 0x0f) << 4) |
+ ((mac_addr[i] & 0xf0) >> 4);
+ }
+}
+
+static void inverse_every_nibble(unsigned char *mac_addr)
+{
+ int i;
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] = flip_8_bits(mac_addr[i]);
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * This function will calculate the hash function of the address.
+ * Inputs
+ * mac_addr_orig - MAC address.
+ * Outputs
+ * return the calculated entry.
+ */
+static u32 hash_function(unsigned char *mac_addr_orig)
+{
+ u32 hash_result;
+ u32 addr0;
+ u32 addr1;
+ u32 addr2;
+ u32 addr3;
+ unsigned char mac_addr[ETH_ALEN];
+
+ /* Make a copy of MAC address since we are going to performe bit
+ * operations on it
+ */
+ memcpy(mac_addr, mac_addr_orig, ETH_ALEN);
+
+ nibble_swap_every_byte(mac_addr);
+ inverse_every_nibble(mac_addr);
+
+ addr0 = (mac_addr[5] >> 2) & 0x3f;
+ addr1 = (mac_addr[5] & 0x03) | (((mac_addr[4] & 0x7f)) << 2);
+ addr2 = ((mac_addr[4] & 0x80) >> 7) | mac_addr[3] << 1;
+ addr3 = (mac_addr[2] & 0xff) | ((mac_addr[1] & 1) << 8);
+
+ hash_result = (addr0 << 9) | (addr1 ^ addr2 ^ addr3);
+ hash_result = hash_result & 0x07ff;
+ return hash_result;
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * This function will add/del an entry to the address table.
+ * Inputs
+ * pep - ETHERNET .
+ * mac_addr - MAC address.
+ * skip - if 1, skip this address.Used in case of deleting an entry which is a
+ * part of chain in the hash table.We can't just delete the entry since
+ * that will break the chain.We need to defragment the tables time to
+ * time.
+ * rd - 0 Discard packet upon match.
+ * - 1 Receive packet upon match.
+ * Outputs
+ * address table entry is added/deleted.
+ * 0 if success.
+ * -ENOSPC if table full
+ */
+static int add_del_hash_entry(struct pxa168_eth_private *pep,
+ unsigned char *mac_addr,
+ u32 rd, u32 skip, int del)
+{
+ struct addr_table_entry *entry, *start;
+ u32 new_high;
+ u32 new_low;
+ u32 i;
+
+ new_low = (((mac_addr[1] >> 4) & 0xf) << 15)
+ | (((mac_addr[1] >> 0) & 0xf) << 11)
+ | (((mac_addr[0] >> 4) & 0xf) << 7)
+ | (((mac_addr[0] >> 0) & 0xf) << 3)
+ | (((mac_addr[3] >> 4) & 0x1) << 31)
+ | (((mac_addr[3] >> 0) & 0xf) << 27)
+ | (((mac_addr[2] >> 4) & 0xf) << 23)
+ | (((mac_addr[2] >> 0) & 0xf) << 19)
+ | (skip << SKIP) | (rd << HASH_ENTRY_RECEIVE_DISCARD_BIT)
+ | HASH_ENTRY_VALID;
+
+ new_high = (((mac_addr[5] >> 4) & 0xf) << 15)
+ | (((mac_addr[5] >> 0) & 0xf) << 11)
+ | (((mac_addr[4] >> 4) & 0xf) << 7)
+ | (((mac_addr[4] >> 0) & 0xf) << 3)
+ | (((mac_addr[3] >> 5) & 0x7) << 0);
+
+ /*
+ * Pick the appropriate table, start scanning for free/reusable
+ * entries at the index obtained by hashing the specified MAC address
+ */
+ start = pep->htpr;
+ entry = start + hash_function(mac_addr);
+ for (i = 0; i < HOP_NUMBER; i++) {
+ if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) {
+ break;
+ } else {
+ /* if same address put in same position */
+ if (((le32_to_cpu(entry->lo) & 0xfffffff8) ==
+ (new_low & 0xfffffff8)) &&
+ (le32_to_cpu(entry->hi) == new_high)) {
+ break;
+ }
+ }
+ if (entry == start + 0x7ff)
+ entry = start;
+ else
+ entry++;
+ }
+
+ if (((le32_to_cpu(entry->lo) & 0xfffffff8) != (new_low & 0xfffffff8)) &&
+ (le32_to_cpu(entry->hi) != new_high) && del)
+ return 0;
+
+ if (i == HOP_NUMBER) {
+ if (!del) {
+ printk(KERN_INFO "%s: table section is full, need to "
+ "move to 16kB implementation?\n",
+ __FILE__);
+ return -ENOSPC;
+ } else
+ return 0;
+ }
+
+ /*
+ * Update the selected entry
+ */
+ if (del) {
+ entry->hi = 0;
+ entry->lo = 0;
+ } else {
+ entry->hi = cpu_to_le32(new_high);
+ entry->lo = cpu_to_le32(new_low);
+ }
+
+ return 0;
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * Create an addressTable entry from MAC address info
+ * found in the specifed net_device struct
+ *
+ * Input : pointer to ethernet interface network device structure
+ * Output : N/A
+ */
+static void update_hash_table_mac_address(struct pxa168_eth_private *pep,
+ unsigned char *oaddr,
+ unsigned char *addr)
+{
+ /* Delete old entry */
+ if (oaddr)
+ add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE);
+ /* Add new entry */
+ add_del_hash_entry(pep, addr, 1, 0, HASH_ADD);
+}
+
+static int init_hash_table(struct pxa168_eth_private *pep)
+{
+ /*
+ * Hardware expects CPU to build a hash table based on a predefined
+ * hash function and populate it based on hardware address. The
+ * location of the hash table is identified by 32-bit pointer stored
+ * in HTPR internal register. Two possible sizes exists for the hash
+ * table 8kB (256kB of DRAM required (4 x 64 kB banks)) and 1/2kB
+ * (16kB of DRAM required (4 x 4 kB banks)).We currently only support
+ * 1/2kB.
+ */
+ /* TODO: Add support for 8kB hash table and alternative hash
+ * function.Driver can dynamically switch to them if the 1/2kB hash
+ * table is full.
+ */
+ if (pep->htpr == NULL) {
+ pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
+ HASH_ADDR_TABLE_SIZE,
+ &pep->htpr_dma, GFP_KERNEL);
+ if (pep->htpr == NULL)
+ return -ENOMEM;
+ }
+ memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
+ wrl(pep, HTPR, pep->htpr_dma);
+ return 0;
+}
+
+static void pxa168_eth_set_rx_mode(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+ u32 val;
+
+ val = rdl(pep, PORT_CONFIG);
+ if (dev->flags & IFF_PROMISC)
+ val |= PCR_PM;
+ else
+ val &= ~PCR_PM;
+ wrl(pep, PORT_CONFIG, val);
+
+ /*
+ * Remove the old list of MAC address and add dev->addr
+ * and multicast address.
+ */
+ memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
+ update_hash_table_mac_address(pep, NULL, dev->dev_addr);
+
+ netdev_for_each_mc_addr(ha, dev)
+ update_hash_table_mac_address(pep, NULL, ha->addr);
+}
+
+static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *sa = addr;
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ unsigned char oldMac[ETH_ALEN];
+
+ if (!is_valid_ether_addr(sa->sa_data))
+ return -EINVAL;
+ memcpy(oldMac, dev->dev_addr, ETH_ALEN);
+ memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
+ netif_addr_lock_bh(dev);
+ update_hash_table_mac_address(pep, oldMac, dev->dev_addr);
+ netif_addr_unlock_bh(dev);
+ return 0;
+}
+
+static void eth_port_start(struct net_device *dev)
+{
+ unsigned int val = 0;
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ int tx_curr_desc, rx_curr_desc;
+
+ /* Perform PHY reset, if there is a PHY. */
+ if (pep->phy != NULL) {
+ struct ethtool_cmd cmd;
+
+ pxa168_get_settings(pep->dev, &cmd);
+ ethernet_phy_reset(pep);
+ pxa168_set_settings(pep->dev, &cmd);
+ }
+
+ /* Assignment of Tx CTRP of given queue */
+ tx_curr_desc = pep->tx_curr_desc_q;
+ wrl(pep, ETH_C_TX_DESC_1,
+ (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc)));
+
+ /* Assignment of Rx CRDP of given queue */
+ rx_curr_desc = pep->rx_curr_desc_q;
+ wrl(pep, ETH_C_RX_DESC_0,
+ (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
+
+ wrl(pep, ETH_F_RX_DESC_0,
+ (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
+
+ /* Clear all interrupts */
+ wrl(pep, INT_CAUSE, 0);
+
+ /* Enable all interrupts for receive, transmit and error. */
+ wrl(pep, INT_MASK, ALL_INTS);
+
+ val = rdl(pep, PORT_CONFIG);
+ val |= PCR_EN;
+ wrl(pep, PORT_CONFIG, val);
+
+ /* Start RX DMA engine */
+ val = rdl(pep, SDMA_CMD);
+ val |= SDMA_CMD_ERD;
+ wrl(pep, SDMA_CMD, val);
+}
+
+static void eth_port_reset(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ unsigned int val = 0;
+
+ /* Stop all interrupts for receive, transmit and error. */
+ wrl(pep, INT_MASK, 0);
+
+ /* Clear all interrupts */
+ wrl(pep, INT_CAUSE, 0);
+
+ /* Stop RX DMA */
+ val = rdl(pep, SDMA_CMD);
+ val &= ~SDMA_CMD_ERD; /* abort dma command */
+
+ /* Abort any transmit and receive operations and put DMA
+ * in idle state.
+ */
+ abort_dma(pep);
+
+ /* Disable port */
+ val = rdl(pep, PORT_CONFIG);
+ val &= ~PCR_EN;
+ wrl(pep, PORT_CONFIG, val);
+}
+
+/*
+ * txq_reclaim - Free the tx desc data for completed descriptors
+ * If force is non-zero, frees uncompleted descriptors as well
+ */
+static int txq_reclaim(struct net_device *dev, int force)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct tx_desc *desc;
+ u32 cmd_sts;
+ struct sk_buff *skb;
+ int tx_index;
+ dma_addr_t addr;
+ int count;
+ int released = 0;
+
+ netif_tx_lock(dev);
+
+ pep->work_todo &= ~WORK_TX_DONE;
+ while (pep->tx_desc_count > 0) {
+ tx_index = pep->tx_used_desc_q;
+ desc = &pep->p_tx_desc_area[tx_index];
+ cmd_sts = desc->cmd_sts;
+ if (!force && (cmd_sts & BUF_OWNED_BY_DMA)) {
+ if (released > 0) {
+ goto txq_reclaim_end;
+ } else {
+ released = -1;
+ goto txq_reclaim_end;
+ }
+ }
+ pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size;
+ pep->tx_desc_count--;
+ addr = desc->buf_ptr;
+ count = desc->byte_cnt;
+ skb = pep->tx_skb[tx_index];
+ if (skb)
+ pep->tx_skb[tx_index] = NULL;
+
+ if (cmd_sts & TX_ERROR) {
+ if (net_ratelimit())
+ printk(KERN_ERR "%s: Error in TX\n", dev->name);
+ dev->stats.tx_errors++;
+ }
+ dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
+ if (skb)
+ dev_kfree_skb_irq(skb);
+ released++;
+ }
+txq_reclaim_end:
+ netif_tx_unlock(dev);
+ return released;
+}
+
+static void pxa168_eth_tx_timeout(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ printk(KERN_INFO "%s: TX timeout desc_count %d\n",
+ dev->name, pep->tx_desc_count);
+
+ schedule_work(&pep->tx_timeout_task);
+}
+
+static void pxa168_eth_tx_timeout_task(struct work_struct *work)
+{
+ struct pxa168_eth_private *pep = container_of(work,
+ struct pxa168_eth_private,
+ tx_timeout_task);
+ struct net_device *dev = pep->dev;
+ pxa168_eth_stop(dev);
+ pxa168_eth_open(dev);
+}
+
+static int rxq_process(struct net_device *dev, int budget)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ unsigned int received_packets = 0;
+ struct sk_buff *skb;
+
+ while (budget-- > 0) {
+ int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
+ struct rx_desc *rx_desc;
+ unsigned int cmd_sts;
+
+ /* Do not process Rx ring in case of Rx ring resource error */
+ if (pep->rx_resource_err)
+ break;
+ rx_curr_desc = pep->rx_curr_desc_q;
+ rx_used_desc = pep->rx_used_desc_q;
+ rx_desc = &pep->p_rx_desc_area[rx_curr_desc];
+ cmd_sts = rx_desc->cmd_sts;
+ rmb();
+ if (cmd_sts & (BUF_OWNED_BY_DMA))
+ break;
+ skb = pep->rx_skb[rx_curr_desc];
+ pep->rx_skb[rx_curr_desc] = NULL;
+
+ rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size;
+ pep->rx_curr_desc_q = rx_next_curr_desc;
+
+ /* Rx descriptors exhausted. */
+ /* Set the Rx ring resource error flag */
+ if (rx_next_curr_desc == rx_used_desc)
+ pep->rx_resource_err = 1;
+ pep->rx_desc_count--;
+ dma_unmap_single(NULL, rx_desc->buf_ptr,
+ rx_desc->buf_size,
+ DMA_FROM_DEVICE);
+ received_packets++;
+ /*
+ * Update statistics.
+ * Note byte count includes 4 byte CRC count
+ */
+ stats->rx_packets++;
+ stats->rx_bytes += rx_desc->byte_cnt;
+ /*
+ * In case received a packet without first / last bits on OR
+ * the error summary bit is on, the packets needs to be droped.
+ */
+ if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
+ (RX_FIRST_DESC | RX_LAST_DESC))
+ || (cmd_sts & RX_ERROR)) {
+
+ stats->rx_dropped++;
+ if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
+ (RX_FIRST_DESC | RX_LAST_DESC)) {
+ if (net_ratelimit())
+ printk(KERN_ERR
+ "%s: Rx pkt on multiple desc\n",
+ dev->name);
+ }
+ if (cmd_sts & RX_ERROR)
+ stats->rx_errors++;
+ dev_kfree_skb_irq(skb);
+ } else {
+ /*
+ * The -4 is for the CRC in the trailer of the
+ * received packet
+ */
+ skb_put(skb, rx_desc->byte_cnt - 4);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_receive_skb(skb);
+ }
+ }
+ /* Fill RX ring with skb's */
+ rxq_refill(dev);
+ return received_packets;
+}
+
+static int pxa168_eth_collect_events(struct pxa168_eth_private *pep,
+ struct net_device *dev)
+{
+ u32 icr;
+ int ret = 0;
+
+ icr = rdl(pep, INT_CAUSE);
+ if (icr == 0)
+ return IRQ_NONE;
+
+ wrl(pep, INT_CAUSE, ~icr);
+ if (icr & (ICR_TXBUF_H | ICR_TXBUF_L)) {
+ pep->work_todo |= WORK_TX_DONE;
+ ret = 1;
+ }
+ if (icr & ICR_RXBUF)
+ ret = 1;
+ if (icr & ICR_MII_CH) {
+ pep->work_todo |= WORK_LINK;
+ ret = 1;
+ }
+ return ret;
+}
+
+static void handle_link_event(struct pxa168_eth_private *pep)
+{
+ struct net_device *dev = pep->dev;
+ u32 port_status;
+ int speed;
+ int duplex;
+ int fc;
+
+ port_status = rdl(pep, PORT_STATUS);
+ if (!(port_status & LINK_UP)) {
+ if (netif_carrier_ok(dev)) {
+ printk(KERN_INFO "%s: link down\n", dev->name);
+ netif_carrier_off(dev);
+ txq_reclaim(dev, 1);
+ }
+ return;
+ }
+ if (port_status & PORT_SPEED_100)
+ speed = 100;
+ else
+ speed = 10;
+
+ duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
+ fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
+ printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
+ "flow control %sabled\n", dev->name,
+ speed, duplex ? "full" : "half", fc ? "en" : "dis");
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+}
+
+static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ if (unlikely(!pxa168_eth_collect_events(pep, dev)))
+ return IRQ_NONE;
+ /* Disable interrupts */
+ wrl(pep, INT_MASK, 0);
+ napi_schedule(&pep->napi);
+ return IRQ_HANDLED;
+}
+
+static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep)
+{
+ int skb_size;
+
+ /*
+ * Reserve 2+14 bytes for an ethernet header (the hardware
+ * automatically prepends 2 bytes of dummy data to each
+ * received packet), 16 bytes for up to four VLAN tags, and
+ * 4 bytes for the trailing FCS -- 36 bytes total.
+ */
+ skb_size = pep->dev->mtu + 36;
+
+ /*
+ * Make sure that the skb size is a multiple of 8 bytes, as
+ * the lower three bits of the receive descriptor's buffer
+ * size field are ignored by the hardware.
+ */
+ pep->skb_size = (skb_size + 7) & ~7;
+
+ /*
+ * If NET_SKB_PAD is smaller than a cache line,
+ * netdev_alloc_skb() will cause skb->data to be misaligned
+ * to a cache line boundary. If this is the case, include
+ * some extra space to allow re-aligning the data area.
+ */
+ pep->skb_size += SKB_DMA_REALIGN;
+
+}
+
+static int set_port_config_ext(struct pxa168_eth_private *pep)
+{
+ int skb_size;
+
+ pxa168_eth_recalc_skb_size(pep);
+ if (pep->skb_size <= 1518)
+ skb_size = PCXR_MFL_1518;
+ else if (pep->skb_size <= 1536)
+ skb_size = PCXR_MFL_1536;
+ else if (pep->skb_size <= 2048)
+ skb_size = PCXR_MFL_2048;
+ else
+ skb_size = PCXR_MFL_64K;
+
+ /* Extended Port Configuration */
+ wrl(pep,
+ PORT_CONFIG_EXT, PCXR_2BSM | /* Two byte prefix aligns IP hdr */
+ PCXR_DSCP_EN | /* Enable DSCP in IP */
+ skb_size | PCXR_FLP | /* do not force link pass */
+ PCXR_TX_HIGH_PRI); /* Transmit - high priority queue */
+
+ return 0;
+}
+
+static int pxa168_init_hw(struct pxa168_eth_private *pep)
+{
+ int err = 0;
+
+ /* Disable interrupts */
+ wrl(pep, INT_MASK, 0);
+ wrl(pep, INT_CAUSE, 0);
+ /* Write to ICR to clear interrupts. */
+ wrl(pep, INT_W_CLEAR, 0);
+ /* Abort any transmit and receive operations and put DMA
+ * in idle state.
+ */
+ abort_dma(pep);
+ /* Initialize address hash table */
+ err = init_hash_table(pep);
+ if (err)
+ return err;
+ /* SDMA configuration */
+ wrl(pep, SDMA_CONFIG, SDCR_BSZ8 | /* Burst size = 32 bytes */
+ SDCR_RIFB | /* Rx interrupt on frame */
+ SDCR_BLMT | /* Little endian transmit */
+ SDCR_BLMR | /* Little endian receive */
+ SDCR_RC_MAX_RETRANS); /* Max retransmit count */
+ /* Port Configuration */
+ wrl(pep, PORT_CONFIG, PCR_HS); /* Hash size is 1/2kb */
+ set_port_config_ext(pep);
+
+ return err;
+}
+
+static int rxq_init(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct rx_desc *p_rx_desc;
+ int size = 0, i = 0;
+ int rx_desc_num = pep->rx_ring_size;
+
+ /* Allocate RX skb rings */
+ pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size,
+ GFP_KERNEL);
+ if (!pep->rx_skb) {
+ printk(KERN_ERR "%s: Cannot alloc RX skb ring\n", dev->name);
+ return -ENOMEM;
+ }
+ /* Allocate RX ring */
+ pep->rx_desc_count = 0;
+ size = pep->rx_ring_size * sizeof(struct rx_desc);
+ pep->rx_desc_area_size = size;
+ pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
+ &pep->rx_desc_dma, GFP_KERNEL);
+ if (!pep->p_rx_desc_area) {
+ printk(KERN_ERR "%s: Cannot alloc RX ring (size %d bytes)\n",
+ dev->name, size);
+ goto out;
+ }
+ memset((void *)pep->p_rx_desc_area, 0, size);
+ /* initialize the next_desc_ptr links in the Rx descriptors ring */
+ p_rx_desc = (struct rx_desc *)pep->p_rx_desc_area;
+ for (i = 0; i < rx_desc_num; i++) {
+ p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma +
+ ((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
+ }
+ /* Save Rx desc pointer to driver struct. */
+ pep->rx_curr_desc_q = 0;
+ pep->rx_used_desc_q = 0;
+ pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc);
+ return 0;
+out:
+ kfree(pep->rx_skb);
+ return -ENOMEM;
+}
+
+static void rxq_deinit(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ int curr;
+
+ /* Free preallocated skb's on RX rings */
+ for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) {
+ if (pep->rx_skb[curr]) {
+ dev_kfree_skb(pep->rx_skb[curr]);
+ pep->rx_desc_count--;
+ }
+ }
+ if (pep->rx_desc_count)
+ printk(KERN_ERR
+ "Error in freeing Rx Ring. %d skb's still\n",
+ pep->rx_desc_count);
+ /* Free RX ring */
+ if (pep->p_rx_desc_area)
+ dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size,
+ pep->p_rx_desc_area, pep->rx_desc_dma);
+ kfree(pep->rx_skb);
+}
+
+static int txq_init(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct tx_desc *p_tx_desc;
+ int size = 0, i = 0;
+ int tx_desc_num = pep->tx_ring_size;
+
+ pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size,
+ GFP_KERNEL);
+ if (!pep->tx_skb) {
+ printk(KERN_ERR "%s: Cannot alloc TX skb ring\n", dev->name);
+ return -ENOMEM;
+ }
+ /* Allocate TX ring */
+ pep->tx_desc_count = 0;
+ size = pep->tx_ring_size * sizeof(struct tx_desc);
+ pep->tx_desc_area_size = size;
+ pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
+ &pep->tx_desc_dma, GFP_KERNEL);
+ if (!pep->p_tx_desc_area) {
+ printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
+ dev->name, size);
+ goto out;
+ }
+ memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size);
+ /* Initialize the next_desc_ptr links in the Tx descriptors ring */
+ p_tx_desc = (struct tx_desc *)pep->p_tx_desc_area;
+ for (i = 0; i < tx_desc_num; i++) {
+ p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma +
+ ((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
+ }
+ pep->tx_curr_desc_q = 0;
+ pep->tx_used_desc_q = 0;
+ pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc);
+ return 0;
+out:
+ kfree(pep->tx_skb);
+ return -ENOMEM;
+}
+
+static void txq_deinit(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ /* Free outstanding skb's on TX ring */
+ txq_reclaim(dev, 1);
+ BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q);
+ /* Free TX ring */
+ if (pep->p_tx_desc_area)
+ dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size,
+ pep->p_tx_desc_area, pep->tx_desc_dma);
+ kfree(pep->tx_skb);
+}
+
+static int pxa168_eth_open(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ int err;
+
+ err = request_irq(dev->irq, pxa168_eth_int_handler,
+ IRQF_DISABLED, dev->name, dev);
+ if (err) {
+ dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
+ return -EAGAIN;
+ }
+ pep->rx_resource_err = 0;
+ err = rxq_init(dev);
+ if (err != 0)
+ goto out_free_irq;
+ err = txq_init(dev);
+ if (err != 0)
+ goto out_free_rx_skb;
+ pep->rx_used_desc_q = 0;
+ pep->rx_curr_desc_q = 0;
+
+ /* Fill RX ring with skb's */
+ rxq_refill(dev);
+ pep->rx_used_desc_q = 0;
+ pep->rx_curr_desc_q = 0;
+ netif_carrier_off(dev);
+ eth_port_start(dev);
+ napi_enable(&pep->napi);
+ return 0;
+out_free_rx_skb:
+ rxq_deinit(dev);
+out_free_irq:
+ free_irq(dev->irq, dev);
+ return err;
+}
+
+static int pxa168_eth_stop(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ eth_port_reset(dev);
+
+ /* Disable interrupts */
+ wrl(pep, INT_MASK, 0);
+ wrl(pep, INT_CAUSE, 0);
+ /* Write to ICR to clear interrupts. */
+ wrl(pep, INT_W_CLEAR, 0);
+ napi_disable(&pep->napi);
+ del_timer_sync(&pep->timeout);
+ netif_carrier_off(dev);
+ free_irq(dev->irq, dev);
+ rxq_deinit(dev);
+ txq_deinit(dev);
+
+ return 0;
+}
+
+static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
+{
+ int retval;
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ if ((mtu > 9500) || (mtu < 68))
+ return -EINVAL;
+
+ dev->mtu = mtu;
+ retval = set_port_config_ext(pep);
+
+ if (!netif_running(dev))
+ return 0;
+
+ /*
+ * Stop and then re-open the interface. This will allocate RX
+ * skbs of the new MTU.
+ * There is a possible danger that the open will not succeed,
+ * due to memory being full.
+ */
+ pxa168_eth_stop(dev);
+ if (pxa168_eth_open(dev)) {
+ dev_printk(KERN_ERR, &dev->dev,
+ "fatal error on re-opening device after "
+ "MTU change\n");
+ }
+
+ return 0;
+}
+
+static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep)
+{
+ int tx_desc_curr;
+
+ tx_desc_curr = pep->tx_curr_desc_q;
+ pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size;
+ BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q);
+ pep->tx_desc_count++;
+
+ return tx_desc_curr;
+}
+
+static int pxa168_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct pxa168_eth_private *pep =
+ container_of(napi, struct pxa168_eth_private, napi);
+ struct net_device *dev = pep->dev;
+ int work_done = 0;
+
+ if (unlikely(pep->work_todo & WORK_LINK)) {
+ pep->work_todo &= ~(WORK_LINK);
+ handle_link_event(pep);
+ }
+ /*
+ * We call txq_reclaim every time since in NAPI interupts are disabled
+ * and due to this we miss the TX_DONE interrupt,which is not updated in
+ * interrupt status register.
+ */
+ txq_reclaim(dev, 0);
+ if (netif_queue_stopped(dev)
+ && pep->tx_ring_size - pep->tx_desc_count > 1) {
+ netif_wake_queue(dev);
+ }
+ work_done = rxq_process(dev, budget);
+ if (work_done < budget) {
+ napi_complete(napi);
+ wrl(pep, INT_MASK, ALL_INTS);
+ }
+
+ return work_done;
+}
+
+static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct tx_desc *desc;
+ int tx_index;
+ int length;
+
+ tx_index = eth_alloc_tx_desc_index(pep);
+ desc = &pep->p_tx_desc_area[tx_index];
+ length = skb->len;
+ pep->tx_skb[tx_index] = skb;
+ desc->byte_cnt = length;
+ desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
+
+ skb_tx_timestamp(skb);
+
+ wmb();
+ desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC |
+ TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT;
+ wmb();
+ wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD);
+
+ stats->tx_bytes += length;
+ stats->tx_packets++;
+ dev->trans_start = jiffies;
+ if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
+ /* We handled the current skb, but now we are out of space.*/
+ netif_stop_queue(dev);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static int smi_wait_ready(struct pxa168_eth_private *pep)
+{
+ int i = 0;
+
+ /* wait for the SMI register to become available */
+ for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) {
+ if (i == PHY_WAIT_ITERATIONS)
+ return -ETIMEDOUT;
+ msleep(10);
+ }
+
+ return 0;
+}
+
+static int pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum)
+{
+ struct pxa168_eth_private *pep = bus->priv;
+ int i = 0;
+ int val;
+
+ if (smi_wait_ready(pep)) {
+ printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n");
+ return -ETIMEDOUT;
+ }
+ wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R);
+ /* now wait for the data to be valid */
+ for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) {
+ if (i == PHY_WAIT_ITERATIONS) {
+ printk(KERN_WARNING
+ "pxa168_eth: SMI bus read not valid\n");
+ return -ENODEV;
+ }
+ msleep(10);
+ }
+
+ return val & 0xffff;
+}
+
+static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum,
+ u16 value)
+{
+ struct pxa168_eth_private *pep = bus->priv;
+
+ if (smi_wait_ready(pep)) {
+ printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) |
+ SMI_OP_W | (value & 0xffff));
+
+ if (smi_wait_ready(pep)) {
+ printk(KERN_ERR "pxa168_eth: SMI bus busy timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
+ int cmd)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ if (pep->phy != NULL)
+ return phy_mii_ioctl(pep->phy, ifr, cmd);
+
+ return -EOPNOTSUPP;
+}
+
+static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr)
+{
+ struct mii_bus *bus = pep->smi_bus;
+ struct phy_device *phydev;
+ int start;
+ int num;
+ int i;
+
+ if (phy_addr == PXA168_ETH_PHY_ADDR_DEFAULT) {
+ /* Scan entire range */
+ start = ethernet_phy_get(pep);
+ num = 32;
+ } else {
+ /* Use phy addr specific to platform */
+ start = phy_addr & 0x1f;
+ num = 1;
+ }
+ phydev = NULL;
+ for (i = 0; i < num; i++) {
+ int addr = (start + i) & 0x1f;
+ if (bus->phy_map[addr] == NULL)
+ mdiobus_scan(bus, addr);
+
+ if (phydev == NULL) {
+ phydev = bus->phy_map[addr];
+ if (phydev != NULL)
+ ethernet_phy_set_addr(pep, addr);
+ }
+ }
+
+ return phydev;
+}
+
+static void phy_init(struct pxa168_eth_private *pep, int speed, int duplex)
+{
+ struct phy_device *phy = pep->phy;
+ ethernet_phy_reset(pep);
+
+ phy_attach(pep->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_MII);
+
+ if (speed == 0) {
+ phy->autoneg = AUTONEG_ENABLE;
+ phy->speed = 0;
+ phy->duplex = 0;
+ phy->supported &= PHY_BASIC_FEATURES;
+ phy->advertising = phy->supported | ADVERTISED_Autoneg;
+ } else {
+ phy->autoneg = AUTONEG_DISABLE;
+ phy->advertising = 0;
+ phy->speed = speed;
+ phy->duplex = duplex;
+ }
+ phy_start_aneg(phy);
+}
+
+static int ethernet_phy_setup(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ if (pep->pd->init)
+ pep->pd->init();
+ pep->phy = phy_scan(pep, pep->pd->phy_addr & 0x1f);
+ if (pep->phy != NULL)
+ phy_init(pep, pep->pd->speed, pep->pd->duplex);
+ update_hash_table_mac_address(pep, NULL, dev->dev_addr);
+
+ return 0;
+}
+
+static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ int err;
+
+ err = phy_read_status(pep->phy);
+ if (err == 0)
+ err = phy_ethtool_gset(pep->phy, cmd);
+
+ return err;
+}
+
+static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ return phy_ethtool_sset(pep->phy, cmd);
+}
+
+static void pxa168_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strncpy(info->driver, DRIVER_NAME, 32);
+ strncpy(info->version, DRIVER_VERSION, 32);
+ strncpy(info->fw_version, "N/A", 32);
+ strncpy(info->bus_info, "N/A", 32);
+}
+
+static const struct ethtool_ops pxa168_ethtool_ops = {
+ .get_settings = pxa168_get_settings,
+ .set_settings = pxa168_set_settings,
+ .get_drvinfo = pxa168_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
+
+static const struct net_device_ops pxa168_eth_netdev_ops = {
+ .ndo_open = pxa168_eth_open,
+ .ndo_stop = pxa168_eth_stop,
+ .ndo_start_xmit = pxa168_eth_start_xmit,
+ .ndo_set_rx_mode = pxa168_eth_set_rx_mode,
+ .ndo_set_mac_address = pxa168_eth_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = pxa168_eth_do_ioctl,
+ .ndo_change_mtu = pxa168_eth_change_mtu,
+ .ndo_tx_timeout = pxa168_eth_tx_timeout,
+};
+
+static int pxa168_eth_probe(struct platform_device *pdev)
+{
+ struct pxa168_eth_private *pep = NULL;
+ struct net_device *dev = NULL;
+ struct resource *res;
+ struct clk *clk;
+ int err;
+
+ printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n");
+
+ clk = clk_get(&pdev->dev, "MFUCLK");
+ if (IS_ERR(clk)) {
+ printk(KERN_ERR "%s: Fast Ethernet failed to get clock\n",
+ DRIVER_NAME);
+ return -ENODEV;
+ }
+ clk_enable(clk);
+
+ dev = alloc_etherdev(sizeof(struct pxa168_eth_private));
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_clk;
+ }
+
+ platform_set_drvdata(pdev, dev);
+ pep = netdev_priv(dev);
+ pep->dev = dev;
+ pep->clk = clk;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ err = -ENODEV;
+ goto err_netdev;
+ }
+ pep->base = ioremap(res->start, resource_size(res));
+ if (pep->base == NULL) {
+ err = -ENOMEM;
+ goto err_netdev;
+ }
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ BUG_ON(!res);
+ dev->irq = res->start;
+ dev->netdev_ops = &pxa168_eth_netdev_ops;
+ dev->watchdog_timeo = 2 * HZ;
+ dev->base_addr = 0;
+ SET_ETHTOOL_OPS(dev, &pxa168_ethtool_ops);
+
+ INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
+
+ printk(KERN_INFO "%s:Using random mac address\n", DRIVER_NAME);
+ random_ether_addr(dev->dev_addr);
+
+ pep->pd = pdev->dev.platform_data;
+ pep->rx_ring_size = NUM_RX_DESCS;
+ if (pep->pd->rx_queue_size)
+ pep->rx_ring_size = pep->pd->rx_queue_size;
+
+ pep->tx_ring_size = NUM_TX_DESCS;
+ if (pep->pd->tx_queue_size)
+ pep->tx_ring_size = pep->pd->tx_queue_size;
+
+ pep->port_num = pep->pd->port_number;
+ /* Hardware supports only 3 ports */
+ BUG_ON(pep->port_num > 2);
+ netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size);
+
+ memset(&pep->timeout, 0, sizeof(struct timer_list));
+ init_timer(&pep->timeout);
+ pep->timeout.function = rxq_refill_timer_wrapper;
+ pep->timeout.data = (unsigned long)pep;
+
+ pep->smi_bus = mdiobus_alloc();
+ if (pep->smi_bus == NULL) {
+ err = -ENOMEM;
+ goto err_base;
+ }
+ pep->smi_bus->priv = pep;
+ pep->smi_bus->name = "pxa168_eth smi";
+ pep->smi_bus->read = pxa168_smi_read;
+ pep->smi_bus->write = pxa168_smi_write;
+ snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);
+ pep->smi_bus->parent = &pdev->dev;
+ pep->smi_bus->phy_mask = 0xffffffff;
+ err = mdiobus_register(pep->smi_bus);
+ if (err)
+ goto err_free_mdio;
+
+ pxa168_init_hw(pep);
+ err = ethernet_phy_setup(dev);
+ if (err)
+ goto err_mdiobus;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ err = register_netdev(dev);
+ if (err)
+ goto err_mdiobus;
+ return 0;
+
+err_mdiobus:
+ mdiobus_unregister(pep->smi_bus);
+err_free_mdio:
+ mdiobus_free(pep->smi_bus);
+err_base:
+ iounmap(pep->base);
+err_netdev:
+ free_netdev(dev);
+err_clk:
+ clk_disable(clk);
+ clk_put(clk);
+ return err;
+}
+
+static int pxa168_eth_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ if (pep->htpr) {
+ dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE,
+ pep->htpr, pep->htpr_dma);
+ pep->htpr = NULL;
+ }
+ if (pep->clk) {
+ clk_disable(pep->clk);
+ clk_put(pep->clk);
+ pep->clk = NULL;
+ }
+ if (pep->phy != NULL)
+ phy_detach(pep->phy);
+
+ iounmap(pep->base);
+ pep->base = NULL;
+ mdiobus_unregister(pep->smi_bus);
+ mdiobus_free(pep->smi_bus);
+ unregister_netdev(dev);
+ cancel_work_sync(&pep->tx_timeout_task);
+ free_netdev(dev);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static void pxa168_eth_shutdown(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ eth_port_reset(dev);
+}
+
+#ifdef CONFIG_PM
+static int pxa168_eth_resume(struct platform_device *pdev)
+{
+ return -ENOSYS;
+}
+
+static int pxa168_eth_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ return -ENOSYS;
+}
+
+#else
+#define pxa168_eth_resume NULL
+#define pxa168_eth_suspend NULL
+#endif
+
+static struct platform_driver pxa168_eth_driver = {
+ .probe = pxa168_eth_probe,
+ .remove = pxa168_eth_remove,
+ .shutdown = pxa168_eth_shutdown,
+ .resume = pxa168_eth_resume,
+ .suspend = pxa168_eth_suspend,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init pxa168_init_module(void)
+{
+ return platform_driver_register(&pxa168_eth_driver);
+}
+
+static void __exit pxa168_cleanup_module(void)
+{
+ platform_driver_unregister(&pxa168_eth_driver);
+}
+
+module_init(pxa168_init_module);
+module_exit(pxa168_cleanup_module);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168");
+MODULE_ALIAS("platform:pxa168_eth");
--- /dev/null
- tristate "OKI SEMICONDUCTOR ML7223 IOH GbE (Intel EG20T PCH)"
+#
+# OKI Semiconductor device configuration
+#
+
+config PCH_GBE
- Output Hub), ML7223.
- ML7223 IOH is for MP(Media Phone) use.
- ML7223 is companion chip for Intel Atom E6xx series.
- ML7223 is completely compatible for Intel EG20T PCH.
++ tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"
+ depends on PCI
+ select NET_CORE
+ select MII
+ ---help---
+ This is a gigabit ethernet driver for EG20T PCH.
+ EG20T PCH is the platform controller hub that is used in Intel's
+ general embedded platform. EG20T PCH has Gigabit Ethernet interface.
+ Using this interface, it is able to access system devices connected
+ to Gigabit Ethernet. This driver enables Gigabit Ethernet function.
+
+ This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
++ Output Hub), ML7223/ML7831.
++ ML7223 IOH is for MP(Media Phone) use. ML7831 IOH is for general
++ purpose use.
++ ML7223/ML7831 is companion chip for Intel Atom E6xx series.
++ ML7223/ML7831 is completely compatible for Intel EG20T PCH.
--- /dev/null
- #define PCH_GBE_TX_RST 0x40000000 /* TX MAC, TX FIFO, TX DMA reset */
- #define PCH_GBE_RX_RST 0x04000000 /* RX MAC, RX FIFO, RX DMA reset */
+/*
+ * Copyright (C) 1999 - 2010 Intel Corporation.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ *
+ * This code was derived from the Intel e1000e Linux driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _PCH_GBE_H_
+#define _PCH_GBE_H_
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/mii.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/vmalloc.h>
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+
+/**
+ * pch_gbe_regs_mac_adr - Structure holding values of mac address registers
+ * @high Denotes the 1st to 4th byte from the initial of MAC address
+ * @low Denotes the 5th to 6th byte from the initial of MAC address
+ */
+struct pch_gbe_regs_mac_adr {
+ u32 high;
+ u32 low;
+};
+/**
+ * pch_udc_regs - Structure holding values of MAC registers
+ */
+struct pch_gbe_regs {
+ u32 INT_ST;
+ u32 INT_EN;
+ u32 MODE;
+ u32 RESET;
+ u32 TCPIP_ACC;
+ u32 EX_LIST;
+ u32 INT_ST_HOLD;
+ u32 PHY_INT_CTRL;
+ u32 MAC_RX_EN;
+ u32 RX_FCTRL;
+ u32 PAUSE_REQ;
+ u32 RX_MODE;
+ u32 TX_MODE;
+ u32 RX_FIFO_ST;
+ u32 TX_FIFO_ST;
+ u32 TX_FID;
+ u32 TX_RESULT;
+ u32 PAUSE_PKT1;
+ u32 PAUSE_PKT2;
+ u32 PAUSE_PKT3;
+ u32 PAUSE_PKT4;
+ u32 PAUSE_PKT5;
+ u32 reserve[2];
+ struct pch_gbe_regs_mac_adr mac_adr[16];
+ u32 ADDR_MASK;
+ u32 MIIM;
+ u32 MAC_ADDR_LOAD;
+ u32 RGMII_ST;
+ u32 RGMII_CTRL;
+ u32 reserve3[3];
+ u32 DMA_CTRL;
+ u32 reserve4[3];
+ u32 RX_DSC_BASE;
+ u32 RX_DSC_SIZE;
+ u32 RX_DSC_HW_P;
+ u32 RX_DSC_HW_P_HLD;
+ u32 RX_DSC_SW_P;
+ u32 reserve5[3];
+ u32 TX_DSC_BASE;
+ u32 TX_DSC_SIZE;
+ u32 TX_DSC_HW_P;
+ u32 TX_DSC_HW_P_HLD;
+ u32 TX_DSC_SW_P;
+ u32 reserve6[3];
+ u32 RX_DMA_ST;
+ u32 TX_DMA_ST;
+ u32 reserve7[2];
+ u32 WOL_ST;
+ u32 WOL_CTRL;
+ u32 WOL_ADDR_MASK;
+};
+
+/* Interrupt Status */
+/* Interrupt Status Hold */
+/* Interrupt Enable */
+#define PCH_GBE_INT_RX_DMA_CMPLT 0x00000001 /* Receive DMA Transfer Complete */
+#define PCH_GBE_INT_RX_VALID 0x00000002 /* MAC Normal Receive Complete */
+#define PCH_GBE_INT_RX_FRAME_ERR 0x00000004 /* Receive frame error */
+#define PCH_GBE_INT_RX_FIFO_ERR 0x00000008 /* Receive FIFO Overflow */
+#define PCH_GBE_INT_RX_DMA_ERR 0x00000010 /* Receive DMA Transfer Error */
+#define PCH_GBE_INT_RX_DSC_EMP 0x00000020 /* Receive Descriptor Empty */
+#define PCH_GBE_INT_TX_CMPLT 0x00000100 /* MAC Transmission Complete */
+#define PCH_GBE_INT_TX_DMA_CMPLT 0x00000200 /* DMA Transfer Complete */
+#define PCH_GBE_INT_TX_FIFO_ERR 0x00000400 /* Transmission FIFO underflow. */
+#define PCH_GBE_INT_TX_DMA_ERR 0x00000800 /* Transmission DMA Error */
+#define PCH_GBE_INT_PAUSE_CMPLT 0x00001000 /* Pause Transmission complete */
+#define PCH_GBE_INT_MIIM_CMPLT 0x00010000 /* MIIM I/F Read completion */
+#define PCH_GBE_INT_PHY_INT 0x00100000 /* Interruption from PHY */
+#define PCH_GBE_INT_WOL_DET 0x01000000 /* Wake On LAN Event detection. */
+#define PCH_GBE_INT_TCPIP_ERR 0x10000000 /* TCP/IP Accelerator Error */
+
+/* Mode */
+#define PCH_GBE_MODE_MII_ETHER 0x00000000 /* GIGA Ethernet Mode [MII] */
+#define PCH_GBE_MODE_GMII_ETHER 0x80000000 /* GIGA Ethernet Mode [GMII] */
+#define PCH_GBE_MODE_HALF_DUPLEX 0x00000000 /* Duplex Mode [half duplex] */
+#define PCH_GBE_MODE_FULL_DUPLEX 0x40000000 /* Duplex Mode [full duplex] */
+#define PCH_GBE_MODE_FR_BST 0x04000000 /* Frame bursting is done */
+
+/* Reset */
+#define PCH_GBE_ALL_RST 0x80000000 /* All reset */
++#define PCH_GBE_TX_RST 0x00008000 /* TX MAC, TX FIFO, TX DMA reset */
++#define PCH_GBE_RX_RST 0x00004000 /* RX MAC, RX FIFO, RX DMA reset */
+
+/* TCP/IP Accelerator Control */
+#define PCH_GBE_EX_LIST_EN 0x00000008 /* External List Enable */
+#define PCH_GBE_RX_TCPIPACC_OFF 0x00000004 /* RX TCP/IP ACC Disabled */
+#define PCH_GBE_TX_TCPIPACC_EN 0x00000002 /* TX TCP/IP ACC Enable */
+#define PCH_GBE_RX_TCPIPACC_EN 0x00000001 /* RX TCP/IP ACC Enable */
+
+/* MAC RX Enable */
+#define PCH_GBE_MRE_MAC_RX_EN 0x00000001 /* MAC Receive Enable */
+
+/* RX Flow Control */
+#define PCH_GBE_FL_CTRL_EN 0x80000000 /* Pause packet is enabled */
+
+/* Pause Packet Request */
+#define PCH_GBE_PS_PKT_RQ 0x80000000 /* Pause packet Request */
+
+/* RX Mode */
+#define PCH_GBE_ADD_FIL_EN 0x80000000 /* Address Filtering Enable */
+/* Multicast Filtering Enable */
+#define PCH_GBE_MLT_FIL_EN 0x40000000
+/* Receive Almost Empty Threshold */
+#define PCH_GBE_RH_ALM_EMP_4 0x00000000 /* 4 words */
+#define PCH_GBE_RH_ALM_EMP_8 0x00004000 /* 8 words */
+#define PCH_GBE_RH_ALM_EMP_16 0x00008000 /* 16 words */
+#define PCH_GBE_RH_ALM_EMP_32 0x0000C000 /* 32 words */
+/* Receive Almost Full Threshold */
+#define PCH_GBE_RH_ALM_FULL_4 0x00000000 /* 4 words */
+#define PCH_GBE_RH_ALM_FULL_8 0x00001000 /* 8 words */
+#define PCH_GBE_RH_ALM_FULL_16 0x00002000 /* 16 words */
+#define PCH_GBE_RH_ALM_FULL_32 0x00003000 /* 32 words */
+/* RX FIFO Read Triger Threshold */
+#define PCH_GBE_RH_RD_TRG_4 0x00000000 /* 4 words */
+#define PCH_GBE_RH_RD_TRG_8 0x00000200 /* 8 words */
+#define PCH_GBE_RH_RD_TRG_16 0x00000400 /* 16 words */
+#define PCH_GBE_RH_RD_TRG_32 0x00000600 /* 32 words */
+#define PCH_GBE_RH_RD_TRG_64 0x00000800 /* 64 words */
+#define PCH_GBE_RH_RD_TRG_128 0x00000A00 /* 128 words */
+#define PCH_GBE_RH_RD_TRG_256 0x00000C00 /* 256 words */
+#define PCH_GBE_RH_RD_TRG_512 0x00000E00 /* 512 words */
+
+/* Receive Descriptor bit definitions */
+#define PCH_GBE_RXD_ACC_STAT_BCAST 0x00000400
+#define PCH_GBE_RXD_ACC_STAT_MCAST 0x00000200
+#define PCH_GBE_RXD_ACC_STAT_UCAST 0x00000100
+#define PCH_GBE_RXD_ACC_STAT_TCPIPOK 0x000000C0
+#define PCH_GBE_RXD_ACC_STAT_IPOK 0x00000080
+#define PCH_GBE_RXD_ACC_STAT_TCPOK 0x00000040
+#define PCH_GBE_RXD_ACC_STAT_IP6ERR 0x00000020
+#define PCH_GBE_RXD_ACC_STAT_OFLIST 0x00000010
+#define PCH_GBE_RXD_ACC_STAT_TYPEIP 0x00000008
+#define PCH_GBE_RXD_ACC_STAT_MACL 0x00000004
+#define PCH_GBE_RXD_ACC_STAT_PPPOE 0x00000002
+#define PCH_GBE_RXD_ACC_STAT_VTAGT 0x00000001
+#define PCH_GBE_RXD_GMAC_STAT_PAUSE 0x0200
+#define PCH_GBE_RXD_GMAC_STAT_MARBR 0x0100
+#define PCH_GBE_RXD_GMAC_STAT_MARMLT 0x0080
+#define PCH_GBE_RXD_GMAC_STAT_MARIND 0x0040
+#define PCH_GBE_RXD_GMAC_STAT_MARNOTMT 0x0020
+#define PCH_GBE_RXD_GMAC_STAT_TLONG 0x0010
+#define PCH_GBE_RXD_GMAC_STAT_TSHRT 0x0008
+#define PCH_GBE_RXD_GMAC_STAT_NOTOCTAL 0x0004
+#define PCH_GBE_RXD_GMAC_STAT_NBLERR 0x0002
+#define PCH_GBE_RXD_GMAC_STAT_CRCERR 0x0001
+
+/* Transmit Descriptor bit definitions */
+#define PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF 0x0008
+#define PCH_GBE_TXD_CTRL_ITAG 0x0004
+#define PCH_GBE_TXD_CTRL_ICRC 0x0002
+#define PCH_GBE_TXD_CTRL_APAD 0x0001
+#define PCH_GBE_TXD_WORDS_SHIFT 2
+#define PCH_GBE_TXD_GMAC_STAT_CMPLT 0x2000
+#define PCH_GBE_TXD_GMAC_STAT_ABT 0x1000
+#define PCH_GBE_TXD_GMAC_STAT_EXCOL 0x0800
+#define PCH_GBE_TXD_GMAC_STAT_SNGCOL 0x0400
+#define PCH_GBE_TXD_GMAC_STAT_MLTCOL 0x0200
+#define PCH_GBE_TXD_GMAC_STAT_CRSER 0x0100
+#define PCH_GBE_TXD_GMAC_STAT_TLNG 0x0080
+#define PCH_GBE_TXD_GMAC_STAT_TSHRT 0x0040
+#define PCH_GBE_TXD_GMAC_STAT_LTCOL 0x0020
+#define PCH_GBE_TXD_GMAC_STAT_TFUNDFLW 0x0010
+#define PCH_GBE_TXD_GMAC_STAT_RTYCNT_MASK 0x000F
+
+/* TX Mode */
+#define PCH_GBE_TM_NO_RTRY 0x80000000 /* No Retransmission */
+#define PCH_GBE_TM_LONG_PKT 0x40000000 /* Long Packt TX Enable */
+#define PCH_GBE_TM_ST_AND_FD 0x20000000 /* Stare and Forward */
+#define PCH_GBE_TM_SHORT_PKT 0x10000000 /* Short Packet TX Enable */
+#define PCH_GBE_TM_LTCOL_RETX 0x08000000 /* Retransmission at Late Collision */
+/* Frame Start Threshold */
+#define PCH_GBE_TM_TH_TX_STRT_4 0x00000000 /* 4 words */
+#define PCH_GBE_TM_TH_TX_STRT_8 0x00004000 /* 8 words */
+#define PCH_GBE_TM_TH_TX_STRT_16 0x00008000 /* 16 words */
+#define PCH_GBE_TM_TH_TX_STRT_32 0x0000C000 /* 32 words */
+/* Transmit Almost Empty Threshold */
+#define PCH_GBE_TM_TH_ALM_EMP_4 0x00000000 /* 4 words */
+#define PCH_GBE_TM_TH_ALM_EMP_8 0x00000800 /* 8 words */
+#define PCH_GBE_TM_TH_ALM_EMP_16 0x00001000 /* 16 words */
+#define PCH_GBE_TM_TH_ALM_EMP_32 0x00001800 /* 32 words */
+#define PCH_GBE_TM_TH_ALM_EMP_64 0x00002000 /* 64 words */
+#define PCH_GBE_TM_TH_ALM_EMP_128 0x00002800 /* 128 words */
+#define PCH_GBE_TM_TH_ALM_EMP_256 0x00003000 /* 256 words */
+#define PCH_GBE_TM_TH_ALM_EMP_512 0x00003800 /* 512 words */
+/* Transmit Almost Full Threshold */
+#define PCH_GBE_TM_TH_ALM_FULL_4 0x00000000 /* 4 words */
+#define PCH_GBE_TM_TH_ALM_FULL_8 0x00000200 /* 8 words */
+#define PCH_GBE_TM_TH_ALM_FULL_16 0x00000400 /* 16 words */
+#define PCH_GBE_TM_TH_ALM_FULL_32 0x00000600 /* 32 words */
+
+/* RX FIFO Status */
+#define PCH_GBE_RF_ALM_FULL 0x80000000 /* RX FIFO is almost full. */
+#define PCH_GBE_RF_ALM_EMP 0x40000000 /* RX FIFO is almost empty. */
+#define PCH_GBE_RF_RD_TRG 0x20000000 /* Become more than RH_RD_TRG. */
+#define PCH_GBE_RF_STRWD 0x1FFE0000 /* The word count of RX FIFO. */
+#define PCH_GBE_RF_RCVING 0x00010000 /* Stored in RX FIFO. */
+
+/* MAC Address Mask */
+#define PCH_GBE_BUSY 0x80000000
+
+/* MIIM */
+#define PCH_GBE_MIIM_OPER_WRITE 0x04000000
+#define PCH_GBE_MIIM_OPER_READ 0x00000000
+#define PCH_GBE_MIIM_OPER_READY 0x04000000
+#define PCH_GBE_MIIM_PHY_ADDR_SHIFT 21
+#define PCH_GBE_MIIM_REG_ADDR_SHIFT 16
+
+/* RGMII Status */
+#define PCH_GBE_LINK_UP 0x80000008
+#define PCH_GBE_RXC_SPEED_MSK 0x00000006
+#define PCH_GBE_RXC_SPEED_2_5M 0x00000000 /* 2.5MHz */
+#define PCH_GBE_RXC_SPEED_25M 0x00000002 /* 25MHz */
+#define PCH_GBE_RXC_SPEED_125M 0x00000004 /* 100MHz */
+#define PCH_GBE_DUPLEX_FULL 0x00000001
+
+/* RGMII Control */
+#define PCH_GBE_CRS_SEL 0x00000010
+#define PCH_GBE_RGMII_RATE_125M 0x00000000
+#define PCH_GBE_RGMII_RATE_25M 0x00000008
+#define PCH_GBE_RGMII_RATE_2_5M 0x0000000C
+#define PCH_GBE_RGMII_MODE_GMII 0x00000000
+#define PCH_GBE_RGMII_MODE_RGMII 0x00000002
+#define PCH_GBE_CHIP_TYPE_EXTERNAL 0x00000000
+#define PCH_GBE_CHIP_TYPE_INTERNAL 0x00000001
+
+/* DMA Control */
+#define PCH_GBE_RX_DMA_EN 0x00000002 /* Enables Receive DMA */
+#define PCH_GBE_TX_DMA_EN 0x00000001 /* Enables Transmission DMA */
+
++/* RX DMA STATUS */
++#define PCH_GBE_IDLE_CHECK 0xFFFFFFFE
++
+/* Wake On LAN Status */
+#define PCH_GBE_WLS_BR 0x00000008 /* Broadcas Address */
+#define PCH_GBE_WLS_MLT 0x00000004 /* Multicast Address */
+
+/* The Frame registered in Address Recognizer */
+#define PCH_GBE_WLS_IND 0x00000002
+#define PCH_GBE_WLS_MP 0x00000001 /* Magic packet Address */
+
+/* Wake On LAN Control */
+#define PCH_GBE_WLC_WOL_MODE 0x00010000
+#define PCH_GBE_WLC_IGN_TLONG 0x00000100
+#define PCH_GBE_WLC_IGN_TSHRT 0x00000080
+#define PCH_GBE_WLC_IGN_OCTER 0x00000040
+#define PCH_GBE_WLC_IGN_NBLER 0x00000020
+#define PCH_GBE_WLC_IGN_CRCER 0x00000010
+#define PCH_GBE_WLC_BR 0x00000008
+#define PCH_GBE_WLC_MLT 0x00000004
+#define PCH_GBE_WLC_IND 0x00000002
+#define PCH_GBE_WLC_MP 0x00000001
+
+/* Wake On LAN Address Mask */
+#define PCH_GBE_WLA_BUSY 0x80000000
+
+
+
+/* TX/RX descriptor defines */
+#define PCH_GBE_MAX_TXD 4096
+#define PCH_GBE_DEFAULT_TXD 256
+#define PCH_GBE_MIN_TXD 8
+#define PCH_GBE_MAX_RXD 4096
+#define PCH_GBE_DEFAULT_RXD 256
+#define PCH_GBE_MIN_RXD 8
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define PCH_GBE_TX_DESC_MULTIPLE 8
+#define PCH_GBE_RX_DESC_MULTIPLE 8
+
+/* Read/Write operation is done through MII Management IF */
+#define PCH_GBE_HAL_MIIM_READ ((u32)0x00000000)
+#define PCH_GBE_HAL_MIIM_WRITE ((u32)0x04000000)
+
+/* flow control values */
+#define PCH_GBE_FC_NONE 0
+#define PCH_GBE_FC_RX_PAUSE 1
+#define PCH_GBE_FC_TX_PAUSE 2
+#define PCH_GBE_FC_FULL 3
+#define PCH_GBE_FC_DEFAULT PCH_GBE_FC_FULL
+
+
+struct pch_gbe_hw;
+/**
+ * struct pch_gbe_functions - HAL APi function pointer
+ * @get_bus_info: for pch_gbe_hal_get_bus_info
+ * @init_hw: for pch_gbe_hal_init_hw
+ * @read_phy_reg: for pch_gbe_hal_read_phy_reg
+ * @write_phy_reg: for pch_gbe_hal_write_phy_reg
+ * @reset_phy: for pch_gbe_hal_phy_hw_reset
+ * @sw_reset_phy: for pch_gbe_hal_phy_sw_reset
+ * @power_up_phy: for pch_gbe_hal_power_up_phy
+ * @power_down_phy: for pch_gbe_hal_power_down_phy
+ * @read_mac_addr: for pch_gbe_hal_read_mac_addr
+ */
+struct pch_gbe_functions {
+ void (*get_bus_info) (struct pch_gbe_hw *);
+ s32 (*init_hw) (struct pch_gbe_hw *);
+ s32 (*read_phy_reg) (struct pch_gbe_hw *, u32, u16 *);
+ s32 (*write_phy_reg) (struct pch_gbe_hw *, u32, u16);
+ void (*reset_phy) (struct pch_gbe_hw *);
+ void (*sw_reset_phy) (struct pch_gbe_hw *);
+ void (*power_up_phy) (struct pch_gbe_hw *hw);
+ void (*power_down_phy) (struct pch_gbe_hw *hw);
+ s32 (*read_mac_addr) (struct pch_gbe_hw *);
+};
+
+/**
+ * struct pch_gbe_mac_info - MAC information
+ * @addr[6]: Store the MAC address
+ * @fc: Mode of flow control
+ * @fc_autoneg: Auto negotiation enable for flow control setting
+ * @tx_fc_enable: Enable flag of Transmit flow control
+ * @max_frame_size: Max transmit frame size
+ * @min_frame_size: Min transmit frame size
+ * @autoneg: Auto negotiation enable
+ * @link_speed: Link speed
+ * @link_duplex: Link duplex
+ */
+struct pch_gbe_mac_info {
+ u8 addr[6];
+ u8 fc;
+ u8 fc_autoneg;
+ u8 tx_fc_enable;
+ u32 max_frame_size;
+ u32 min_frame_size;
+ u8 autoneg;
+ u16 link_speed;
+ u16 link_duplex;
+};
+
+/**
+ * struct pch_gbe_phy_info - PHY information
+ * @addr: PHY address
+ * @id: PHY's identifier
+ * @revision: PHY's revision
+ * @reset_delay_us: HW reset delay time[us]
+ * @autoneg_advertised: Autoneg advertised
+ */
+struct pch_gbe_phy_info {
+ u32 addr;
+ u32 id;
+ u32 revision;
+ u32 reset_delay_us;
+ u16 autoneg_advertised;
+};
+
+/*!
+ * @ingroup Gigabit Ether driver Layer
+ * @struct pch_gbe_bus_info
+ * @brief Bus information
+ */
+struct pch_gbe_bus_info {
+ u8 type;
+ u8 speed;
+ u8 width;
+};
+
+/*!
+ * @ingroup Gigabit Ether driver Layer
+ * @struct pch_gbe_hw
+ * @brief Hardware information
+ */
+struct pch_gbe_hw {
+ void *back;
+
+ struct pch_gbe_regs __iomem *reg;
+ spinlock_t miim_lock;
+
+ const struct pch_gbe_functions *func;
+ struct pch_gbe_mac_info mac;
+ struct pch_gbe_phy_info phy;
+ struct pch_gbe_bus_info bus;
+};
+
+/**
+ * struct pch_gbe_rx_desc - Receive Descriptor
+ * @buffer_addr: RX Frame Buffer Address
+ * @tcp_ip_status: TCP/IP Accelerator Status
+ * @rx_words_eob: RX word count and Byte position
+ * @gbec_status: GMAC Status
+ * @dma_status: DMA Status
+ * @reserved1: Reserved
+ * @reserved2: Reserved
+ */
+struct pch_gbe_rx_desc {
+ u32 buffer_addr;
+ u32 tcp_ip_status;
+ u16 rx_words_eob;
+ u16 gbec_status;
+ u8 dma_status;
+ u8 reserved1;
+ u16 reserved2;
+};
+
+/**
+ * struct pch_gbe_tx_desc - Transmit Descriptor
+ * @buffer_addr: TX Frame Buffer Address
+ * @length: Data buffer length
+ * @reserved1: Reserved
+ * @tx_words_eob: TX word count and Byte position
+ * @tx_frame_ctrl: TX Frame Control
+ * @dma_status: DMA Status
+ * @reserved2: Reserved
+ * @gbec_status: GMAC Status
+ */
+struct pch_gbe_tx_desc {
+ u32 buffer_addr;
+ u16 length;
+ u16 reserved1;
+ u16 tx_words_eob;
+ u16 tx_frame_ctrl;
+ u8 dma_status;
+ u8 reserved2;
+ u16 gbec_status;
+};
+
+
+/**
+ * struct pch_gbe_buffer - Buffer information
+ * @skb: pointer to a socket buffer
+ * @dma: DMA address
+ * @time_stamp: time stamp
+ * @length: data size
+ */
+struct pch_gbe_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
++ unsigned char *rx_buffer;
+ unsigned long time_stamp;
+ u16 length;
+ bool mapped;
+};
+
+/**
+ * struct pch_gbe_tx_ring - tx ring information
+ * @tx_lock: spinlock structs
+ * @desc: pointer to the descriptor ring memory
+ * @dma: physical address of the descriptor ring
+ * @size: length of descriptor ring in bytes
+ * @count: number of descriptors in the ring
+ * @next_to_use: next descriptor to associate a buffer with
+ * @next_to_clean: next descriptor to check for DD status bit
+ * @buffer_info: array of buffer information structs
+ */
+struct pch_gbe_tx_ring {
+ spinlock_t tx_lock;
+ struct pch_gbe_tx_desc *desc;
+ dma_addr_t dma;
+ unsigned int size;
+ unsigned int count;
+ unsigned int next_to_use;
+ unsigned int next_to_clean;
+ struct pch_gbe_buffer *buffer_info;
+};
+
+/**
+ * struct pch_gbe_rx_ring - rx ring information
+ * @desc: pointer to the descriptor ring memory
+ * @dma: physical address of the descriptor ring
+ * @size: length of descriptor ring in bytes
+ * @count: number of descriptors in the ring
+ * @next_to_use: next descriptor to associate a buffer with
+ * @next_to_clean: next descriptor to check for DD status bit
+ * @buffer_info: array of buffer information structs
+ */
+struct pch_gbe_rx_ring {
+ struct pch_gbe_rx_desc *desc;
+ dma_addr_t dma;
++ unsigned char *rx_buff_pool;
++ dma_addr_t rx_buff_pool_logic;
++ unsigned int rx_buff_pool_size;
+ unsigned int size;
+ unsigned int count;
+ unsigned int next_to_use;
+ unsigned int next_to_clean;
+ struct pch_gbe_buffer *buffer_info;
+};
+
+/**
+ * struct pch_gbe_hw_stats - Statistics counters collected by the MAC
+ * @rx_packets: total packets received
+ * @tx_packets: total packets transmitted
+ * @rx_bytes: total bytes received
+ * @tx_bytes: total bytes transmitted
+ * @rx_errors: bad packets received
+ * @tx_errors: packet transmit problems
+ * @rx_dropped: no space in Linux buffers
+ * @tx_dropped: no space available in Linux
+ * @multicast: multicast packets received
+ * @collisions: collisions
+ * @rx_crc_errors: received packet with crc error
+ * @rx_frame_errors: received frame alignment error
+ * @rx_alloc_buff_failed: allocate failure of a receive buffer
+ * @tx_length_errors: transmit length error
+ * @tx_aborted_errors: transmit aborted error
+ * @tx_carrier_errors: transmit carrier error
+ * @tx_timeout_count: Number of transmit timeout
+ * @tx_restart_count: Number of transmit restert
+ * @intr_rx_dsc_empty_count: Interrupt count of receive descriptor empty
+ * @intr_rx_frame_err_count: Interrupt count of receive frame error
+ * @intr_rx_fifo_err_count: Interrupt count of receive FIFO error
+ * @intr_rx_dma_err_count: Interrupt count of receive DMA error
+ * @intr_tx_fifo_err_count: Interrupt count of transmit FIFO error
+ * @intr_tx_dma_err_count: Interrupt count of transmit DMA error
+ * @intr_tcpip_err_count: Interrupt count of TCP/IP Accelerator
+ */
+struct pch_gbe_hw_stats {
+ u32 rx_packets;
+ u32 tx_packets;
+ u32 rx_bytes;
+ u32 tx_bytes;
+ u32 rx_errors;
+ u32 tx_errors;
+ u32 rx_dropped;
+ u32 tx_dropped;
+ u32 multicast;
+ u32 collisions;
+ u32 rx_crc_errors;
+ u32 rx_frame_errors;
+ u32 rx_alloc_buff_failed;
+ u32 tx_length_errors;
+ u32 tx_aborted_errors;
+ u32 tx_carrier_errors;
+ u32 tx_timeout_count;
+ u32 tx_restart_count;
+ u32 intr_rx_dsc_empty_count;
+ u32 intr_rx_frame_err_count;
+ u32 intr_rx_fifo_err_count;
+ u32 intr_rx_dma_err_count;
+ u32 intr_tx_fifo_err_count;
+ u32 intr_tx_dma_err_count;
+ u32 intr_tcpip_err_count;
+};
+
+/**
+ * struct pch_gbe_adapter - board specific private data structure
+ * @stats_lock: Spinlock structure for status
+ * @tx_queue_lock: Spinlock structure for transmit
+ * @ethtool_lock: Spinlock structure for ethtool
+ * @irq_sem: Semaphore for interrupt
+ * @netdev: Pointer of network device structure
+ * @pdev: Pointer of pci device structure
+ * @polling_netdev: Pointer of polling network device structure
+ * @napi: NAPI structure
+ * @hw: Pointer of hardware structure
+ * @stats: Hardware status
+ * @reset_task: Reset task
+ * @mii: MII information structure
+ * @watchdog_timer: Watchdog timer list
+ * @wake_up_evt: Wake up event
+ * @config_space: Configuration space
+ * @msg_enable: Driver message level
+ * @led_status: LED status
+ * @tx_ring: Pointer of Tx descriptor ring structure
+ * @rx_ring: Pointer of Rx descriptor ring structure
+ * @rx_buffer_len: Receive buffer length
+ * @tx_queue_len: Transmit queue length
+ * @have_msi: PCI MSI mode flag
+ */
+
+struct pch_gbe_adapter {
+ spinlock_t stats_lock;
+ spinlock_t tx_queue_lock;
+ spinlock_t ethtool_lock;
+ atomic_t irq_sem;
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct net_device *polling_netdev;
+ struct napi_struct napi;
+ struct pch_gbe_hw hw;
+ struct pch_gbe_hw_stats stats;
+ struct work_struct reset_task;
+ struct mii_if_info mii;
+ struct timer_list watchdog_timer;
+ u32 wake_up_evt;
+ u32 *config_space;
+ unsigned long led_status;
+ struct pch_gbe_tx_ring *tx_ring;
+ struct pch_gbe_rx_ring *rx_ring;
+ unsigned long rx_buffer_len;
+ unsigned long tx_queue_len;
+ bool have_msi;
++ bool rx_stop_flag;
+};
+
+extern const char pch_driver_version[];
+
+/* pch_gbe_main.c */
+extern int pch_gbe_up(struct pch_gbe_adapter *adapter);
+extern void pch_gbe_down(struct pch_gbe_adapter *adapter);
+extern void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter);
+extern void pch_gbe_reset(struct pch_gbe_adapter *adapter);
+extern int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *txdr);
+extern int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rxdr);
+extern void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *tx_ring);
+extern void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rx_ring);
+extern void pch_gbe_update_stats(struct pch_gbe_adapter *adapter);
+
+/* pch_gbe_param.c */
+extern void pch_gbe_check_options(struct pch_gbe_adapter *adapter);
+
+/* pch_gbe_ethtool.c */
+extern void pch_gbe_set_ethtool_ops(struct net_device *netdev);
+
+/* pch_gbe_mac.c */
+extern s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw);
+extern s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw);
+extern u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw,
+ u32 addr, u32 dir, u32 reg, u16 data);
+#endif /* _PCH_GBE_H_ */
--- /dev/null
- #include <linux/prefetch.h>
+/*
+ * Copyright (C) 1999 - 2010 Intel Corporation.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
+ *
+ * This code was derived from the Intel e1000e Linux driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include "pch_gbe.h"
+#include "pch_gbe_api.h"
- if (netdev->features & NETIF_F_RXCSUM) {
- tcpip &= ~PCH_GBE_RX_TCPIPACC_OFF;
- tcpip |= PCH_GBE_RX_TCPIPACC_EN;
- } else {
- tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
- tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
- }
+
+#define DRV_VERSION "1.00"
+const char pch_driver_version[] = DRV_VERSION;
+
+#define PCI_DEVICE_ID_INTEL_IOH1_GBE 0x8802 /* Pci device ID */
+#define PCH_GBE_MAR_ENTRIES 16
+#define PCH_GBE_SHORT_PKT 64
+#define DSC_INIT16 0xC000
+#define PCH_GBE_DMA_ALIGN 0
+#define PCH_GBE_DMA_PADDING 2
+#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */
+#define PCH_GBE_COPYBREAK_DEFAULT 256
+#define PCH_GBE_PCI_BAR 1
++#define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */
+
+/* Macros for ML7223 */
+#define PCI_VENDOR_ID_ROHM 0x10db
+#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013
+
++/* Macros for ML7831 */
++#define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802
++
+#define PCH_GBE_TX_WEIGHT 64
+#define PCH_GBE_RX_WEIGHT 64
+#define PCH_GBE_RX_BUFFER_WRITE 16
+
+/* Initialize the wake-on-LAN settings */
+#define PCH_GBE_WL_INIT_SETTING (PCH_GBE_WLC_MP)
+
+#define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \
+ PCH_GBE_CHIP_TYPE_INTERNAL | \
+ PCH_GBE_RGMII_MODE_RGMII \
+ )
+
+/* Ethertype field values */
++#define PCH_GBE_MAX_RX_BUFFER_SIZE 0x2880
+#define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318
+#define PCH_GBE_FRAME_SIZE_2048 2048
+#define PCH_GBE_FRAME_SIZE_4096 4096
+#define PCH_GBE_FRAME_SIZE_8192 8192
+
+#define PCH_GBE_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
+#define PCH_GBE_RX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_rx_desc)
+#define PCH_GBE_TX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_tx_desc)
+#define PCH_GBE_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+/* Pause packet value */
+#define PCH_GBE_PAUSE_PKT1_VALUE 0x00C28001
+#define PCH_GBE_PAUSE_PKT2_VALUE 0x00000100
+#define PCH_GBE_PAUSE_PKT4_VALUE 0x01000888
+#define PCH_GBE_PAUSE_PKT5_VALUE 0x0000FFFF
+
+#define PCH_GBE_ETH_ALEN 6
+
+/* This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register. Each bit is documented below:
+ * o RXT0 = Receiver Timer Interrupt (ring 0)
+ * o TXDW = Transmit Descriptor Written Back
+ * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ * o RXSEQ = Receive Sequence Error
+ * o LSC = Link Status Change
+ */
+#define PCH_GBE_INT_ENABLE_MASK ( \
+ PCH_GBE_INT_RX_DMA_CMPLT | \
+ PCH_GBE_INT_RX_DSC_EMP | \
++ PCH_GBE_INT_RX_FIFO_ERR | \
+ PCH_GBE_INT_WOL_DET | \
+ PCH_GBE_INT_TX_CMPLT \
+ )
+
++#define PCH_GBE_INT_DISABLE_ALL 0
+
+static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
+
+static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
+static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
+ int data);
+
+inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
+{
+ iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
+}
+
+/**
+ * pch_gbe_mac_read_mac_addr - Read MAC address
+ * @hw: Pointer to the HW structure
+ * Returns
+ * 0: Successful.
+ */
+s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
+{
+ u32 adr1a, adr1b;
+
+ adr1a = ioread32(&hw->reg->mac_adr[0].high);
+ adr1b = ioread32(&hw->reg->mac_adr[0].low);
+
+ hw->mac.addr[0] = (u8)(adr1a & 0xFF);
+ hw->mac.addr[1] = (u8)((adr1a >> 8) & 0xFF);
+ hw->mac.addr[2] = (u8)((adr1a >> 16) & 0xFF);
+ hw->mac.addr[3] = (u8)((adr1a >> 24) & 0xFF);
+ hw->mac.addr[4] = (u8)(adr1b & 0xFF);
+ hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF);
+
+ pr_debug("hw->mac.addr : %pM\n", hw->mac.addr);
+ return 0;
+}
+
+/**
+ * pch_gbe_wait_clr_bit - Wait to clear a bit
+ * @reg: Pointer of register
+ * @busy: Busy bit
+ */
+static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
+{
+ u32 tmp;
+ /* wait busy */
+ tmp = 1000;
+ while ((ioread32(reg) & bit) && --tmp)
+ cpu_relax();
+ if (!tmp)
+ pr_err("Error: busy bit is not cleared\n");
+}
++
++/**
++ * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context
++ * @reg: Pointer of register
++ * @busy: Busy bit
++ */
++static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit)
++{
++ u32 tmp;
++ int ret = -1;
++ /* wait busy */
++ tmp = 20;
++ while ((ioread32(reg) & bit) && --tmp)
++ udelay(5);
++ if (!tmp)
++ pr_err("Error: busy bit is not cleared\n");
++ else
++ ret = 0;
++ return ret;
++}
++
+/**
+ * pch_gbe_mac_mar_set - Set MAC address register
+ * @hw: Pointer to the HW structure
+ * @addr: Pointer to the MAC address
+ * @index: MAC address array register
+ */
+static void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index)
+{
+ u32 mar_low, mar_high, adrmask;
+
+ pr_debug("index : 0x%x\n", index);
+
+ /*
+ * HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+ mar_high = ((u32) addr[0] | ((u32) addr[1] << 8) |
+ ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+ mar_low = ((u32) addr[4] | ((u32) addr[5] << 8));
+ /* Stop the MAC Address of index. */
+ adrmask = ioread32(&hw->reg->ADDR_MASK);
+ iowrite32((adrmask | (0x0001 << index)), &hw->reg->ADDR_MASK);
+ /* wait busy */
+ pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
+ /* Set the MAC address to the MAC address 1A/1B register */
+ iowrite32(mar_high, &hw->reg->mac_adr[index].high);
+ iowrite32(mar_low, &hw->reg->mac_adr[index].low);
+ /* Start the MAC address of index */
+ iowrite32((adrmask & ~(0x0001 << index)), &hw->reg->ADDR_MASK);
+ /* wait busy */
+ pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
+}
+
+/**
+ * pch_gbe_mac_reset_hw - Reset hardware
+ * @hw: Pointer to the HW structure
+ */
+static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
+{
+ /* Read the MAC address. and store to the private data */
+ pch_gbe_mac_read_mac_addr(hw);
+ iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
+#ifdef PCH_GBE_MAC_IFOP_RGMII
+ iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
+#endif
+ pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
+ /* Setup the receive address */
+ pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
+ return;
+}
+
++static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw)
++{
++ /* Read the MAC address. and store to the private data */
++ pch_gbe_mac_read_mac_addr(hw);
++ iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET);
++ pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST);
++ /* Setup the MAC address */
++ pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
++ return;
++}
++
+/**
+ * pch_gbe_mac_init_rx_addrs - Initialize receive address's
+ * @hw: Pointer to the HW structure
+ * @mar_count: Receive address registers
+ */
+static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count)
+{
+ u32 i;
+
+ /* Setup the receive address */
+ pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
+
+ /* Zero out the other receive addresses */
+ for (i = 1; i < mar_count; i++) {
+ iowrite32(0, &hw->reg->mac_adr[i].high);
+ iowrite32(0, &hw->reg->mac_adr[i].low);
+ }
+ iowrite32(0xFFFE, &hw->reg->ADDR_MASK);
+ /* wait busy */
+ pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
+}
+
+
+/**
+ * pch_gbe_mac_mc_addr_list_update - Update Multicast addresses
+ * @hw: Pointer to the HW structure
+ * @mc_addr_list: Array of multicast addresses to program
+ * @mc_addr_count: Number of multicast addresses to program
+ * @mar_used_count: The first MAC Address register free to program
+ * @mar_total_num: Total number of supported MAC Address Registers
+ */
+static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count,
+ u32 mar_used_count, u32 mar_total_num)
+{
+ u32 i, adrmask;
+
+ /* Load the first set of multicast addresses into the exact
+ * filters (RAR). If there are not enough to fill the RAR
+ * array, clear the filters.
+ */
+ for (i = mar_used_count; i < mar_total_num; i++) {
+ if (mc_addr_count) {
+ pch_gbe_mac_mar_set(hw, mc_addr_list, i);
+ mc_addr_count--;
+ mc_addr_list += PCH_GBE_ETH_ALEN;
+ } else {
+ /* Clear MAC address mask */
+ adrmask = ioread32(&hw->reg->ADDR_MASK);
+ iowrite32((adrmask | (0x0001 << i)),
+ &hw->reg->ADDR_MASK);
+ /* wait busy */
+ pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
+ /* Clear MAC address */
+ iowrite32(0, &hw->reg->mac_adr[i].high);
+ iowrite32(0, &hw->reg->mac_adr[i].low);
+ }
+ }
+}
+
+/**
+ * pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings
+ * @hw: Pointer to the HW structure
+ * Returns
+ * 0: Successful.
+ * Negative value: Failed.
+ */
+s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw)
+{
+ struct pch_gbe_mac_info *mac = &hw->mac;
+ u32 rx_fctrl;
+
+ pr_debug("mac->fc = %u\n", mac->fc);
+
+ rx_fctrl = ioread32(&hw->reg->RX_FCTRL);
+
+ switch (mac->fc) {
+ case PCH_GBE_FC_NONE:
+ rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
+ mac->tx_fc_enable = false;
+ break;
+ case PCH_GBE_FC_RX_PAUSE:
+ rx_fctrl |= PCH_GBE_FL_CTRL_EN;
+ mac->tx_fc_enable = false;
+ break;
+ case PCH_GBE_FC_TX_PAUSE:
+ rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
+ mac->tx_fc_enable = true;
+ break;
+ case PCH_GBE_FC_FULL:
+ rx_fctrl |= PCH_GBE_FL_CTRL_EN;
+ mac->tx_fc_enable = true;
+ break;
+ default:
+ pr_err("Flow control param set incorrectly\n");
+ return -EINVAL;
+ }
+ if (mac->link_duplex == DUPLEX_HALF)
+ rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
+ iowrite32(rx_fctrl, &hw->reg->RX_FCTRL);
+ pr_debug("RX_FCTRL reg : 0x%08x mac->tx_fc_enable : %d\n",
+ ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable);
+ return 0;
+}
+
+/**
+ * pch_gbe_mac_set_wol_event - Set wake-on-lan event
+ * @hw: Pointer to the HW structure
+ * @wu_evt: Wake up event
+ */
+static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt)
+{
+ u32 addr_mask;
+
+ pr_debug("wu_evt : 0x%08x ADDR_MASK reg : 0x%08x\n",
+ wu_evt, ioread32(&hw->reg->ADDR_MASK));
+
+ if (wu_evt) {
+ /* Set Wake-On-Lan address mask */
+ addr_mask = ioread32(&hw->reg->ADDR_MASK);
+ iowrite32(addr_mask, &hw->reg->WOL_ADDR_MASK);
+ /* wait busy */
+ pch_gbe_wait_clr_bit(&hw->reg->WOL_ADDR_MASK, PCH_GBE_WLA_BUSY);
+ iowrite32(0, &hw->reg->WOL_ST);
+ iowrite32((wu_evt | PCH_GBE_WLC_WOL_MODE), &hw->reg->WOL_CTRL);
+ iowrite32(0x02, &hw->reg->TCPIP_ACC);
+ iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
+ } else {
+ iowrite32(0, &hw->reg->WOL_CTRL);
+ iowrite32(0, &hw->reg->WOL_ST);
+ }
+ return;
+}
+
+/**
+ * pch_gbe_mac_ctrl_miim - Control MIIM interface
+ * @hw: Pointer to the HW structure
+ * @addr: Address of PHY
+ * @dir: Operetion. (Write or Read)
+ * @reg: Access register of PHY
+ * @data: Write data.
+ *
+ * Returns: Read date.
+ */
+u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
+ u16 data)
+{
+ u32 data_out = 0;
+ unsigned int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->miim_lock, flags);
+
+ for (i = 100; i; --i) {
+ if ((ioread32(&hw->reg->MIIM) & PCH_GBE_MIIM_OPER_READY))
+ break;
+ udelay(20);
+ }
+ if (i == 0) {
+ pr_err("pch-gbe.miim won't go Ready\n");
+ spin_unlock_irqrestore(&hw->miim_lock, flags);
+ return 0; /* No way to indicate timeout error */
+ }
+ iowrite32(((reg << PCH_GBE_MIIM_REG_ADDR_SHIFT) |
+ (addr << PCH_GBE_MIIM_PHY_ADDR_SHIFT) |
+ dir | data), &hw->reg->MIIM);
+ for (i = 0; i < 100; i++) {
+ udelay(20);
+ data_out = ioread32(&hw->reg->MIIM);
+ if ((data_out & PCH_GBE_MIIM_OPER_READY))
+ break;
+ }
+ spin_unlock_irqrestore(&hw->miim_lock, flags);
+
+ pr_debug("PHY %s: reg=%d, data=0x%04X\n",
+ dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg,
+ dir == PCH_GBE_MIIM_OPER_READ ? data_out : data);
+ return (u16) data_out;
+}
+
+/**
+ * pch_gbe_mac_set_pause_packet - Set pause packet
+ * @hw: Pointer to the HW structure
+ */
+static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
+{
+ unsigned long tmp2, tmp3;
+
+ /* Set Pause packet */
+ tmp2 = hw->mac.addr[1];
+ tmp2 = (tmp2 << 8) | hw->mac.addr[0];
+ tmp2 = PCH_GBE_PAUSE_PKT2_VALUE | (tmp2 << 16);
+
+ tmp3 = hw->mac.addr[5];
+ tmp3 = (tmp3 << 8) | hw->mac.addr[4];
+ tmp3 = (tmp3 << 8) | hw->mac.addr[3];
+ tmp3 = (tmp3 << 8) | hw->mac.addr[2];
+
+ iowrite32(PCH_GBE_PAUSE_PKT1_VALUE, &hw->reg->PAUSE_PKT1);
+ iowrite32(tmp2, &hw->reg->PAUSE_PKT2);
+ iowrite32(tmp3, &hw->reg->PAUSE_PKT3);
+ iowrite32(PCH_GBE_PAUSE_PKT4_VALUE, &hw->reg->PAUSE_PKT4);
+ iowrite32(PCH_GBE_PAUSE_PKT5_VALUE, &hw->reg->PAUSE_PKT5);
+
+ /* Transmit Pause Packet */
+ iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ);
+
+ pr_debug("PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ ioread32(&hw->reg->PAUSE_PKT1), ioread32(&hw->reg->PAUSE_PKT2),
+ ioread32(&hw->reg->PAUSE_PKT3), ioread32(&hw->reg->PAUSE_PKT4),
+ ioread32(&hw->reg->PAUSE_PKT5));
+
+ return;
+}
+
+
+/**
+ * pch_gbe_alloc_queues - Allocate memory for all rings
+ * @adapter: Board private structure to initialize
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
+{
+ int size;
+
+ size = (int)sizeof(struct pch_gbe_tx_ring);
+ adapter->tx_ring = kzalloc(size, GFP_KERNEL);
+ if (!adapter->tx_ring)
+ return -ENOMEM;
+ size = (int)sizeof(struct pch_gbe_rx_ring);
+ adapter->rx_ring = kzalloc(size, GFP_KERNEL);
+ if (!adapter->rx_ring) {
+ kfree(adapter->tx_ring);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/**
+ * pch_gbe_init_stats - Initialize status
+ * @adapter: Board private structure to initialize
+ */
+static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
+{
+ memset(&adapter->stats, 0, sizeof(adapter->stats));
+ return;
+}
+
+/**
+ * pch_gbe_init_phy - Initialize PHY
+ * @adapter: Board private structure to initialize
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ u32 addr;
+ u16 bmcr, stat;
+
+ /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
+ for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
+ adapter->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
+ bmcr = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMCR);
+ stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
+ stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
+ if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
+ break;
+ }
+ adapter->hw.phy.addr = adapter->mii.phy_id;
+ pr_debug("phy_addr = %d\n", adapter->mii.phy_id);
+ if (addr == 32)
+ return -EAGAIN;
+ /* Selected the phy and isolate the rest */
+ for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
+ if (addr != adapter->mii.phy_id) {
+ pch_gbe_mdio_write(netdev, addr, MII_BMCR,
+ BMCR_ISOLATE);
+ } else {
+ bmcr = pch_gbe_mdio_read(netdev, addr, MII_BMCR);
+ pch_gbe_mdio_write(netdev, addr, MII_BMCR,
+ bmcr & ~BMCR_ISOLATE);
+ }
+ }
+
+ /* MII setup */
+ adapter->mii.phy_id_mask = 0x1F;
+ adapter->mii.reg_num_mask = 0x1F;
+ adapter->mii.dev = adapter->netdev;
+ adapter->mii.mdio_read = pch_gbe_mdio_read;
+ adapter->mii.mdio_write = pch_gbe_mdio_write;
+ adapter->mii.supports_gmii = mii_check_gmii_support(&adapter->mii);
+ return 0;
+}
+
+/**
+ * pch_gbe_mdio_read - The read function for mii
+ * @netdev: Network interface device structure
+ * @addr: Phy ID
+ * @reg: Access location
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+
+ return pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_READ, reg,
+ (u16) 0);
+}
+
+/**
+ * pch_gbe_mdio_write - The write function for mii
+ * @netdev: Network interface device structure
+ * @addr: Phy ID (not used)
+ * @reg: Access location
+ * @data: Write data
+ */
+static void pch_gbe_mdio_write(struct net_device *netdev,
+ int addr, int reg, int data)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+
+ pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_WRITE, reg, data);
+}
+
+/**
+ * pch_gbe_reset_task - Reset processing at the time of transmission timeout
+ * @work: Pointer of board private structure
+ */
+static void pch_gbe_reset_task(struct work_struct *work)
+{
+ struct pch_gbe_adapter *adapter;
+ adapter = container_of(work, struct pch_gbe_adapter, reset_task);
+
+ rtnl_lock();
+ pch_gbe_reinit_locked(adapter);
+ rtnl_unlock();
+}
+
+/**
+ * pch_gbe_reinit_locked- Re-initialization
+ * @adapter: Board private structure
+ */
+void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
+{
+ pch_gbe_down(adapter);
+ pch_gbe_up(adapter);
+}
+
+/**
+ * pch_gbe_reset - Reset GbE
+ * @adapter: Board private structure
+ */
+void pch_gbe_reset(struct pch_gbe_adapter *adapter)
+{
+ pch_gbe_mac_reset_hw(&adapter->hw);
+ /* Setup the receive address. */
+ pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES);
+ if (pch_gbe_hal_init_hw(&adapter->hw))
+ pr_err("Hardware Error\n");
+}
+
+/**
+ * pch_gbe_free_irq - Free an interrupt
+ * @adapter: Board private structure
+ */
+static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ free_irq(adapter->pdev->irq, netdev);
+ if (adapter->have_msi) {
+ pci_disable_msi(adapter->pdev);
+ pr_debug("call pci_disable_msi\n");
+ }
+}
+
+/**
+ * pch_gbe_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: Board private structure
+ */
+static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+
+ atomic_inc(&adapter->irq_sem);
+ iowrite32(0, &hw->reg->INT_EN);
+ ioread32(&hw->reg->INT_ST);
+ synchronize_irq(adapter->pdev->irq);
+
+ pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
+}
+
+/**
+ * pch_gbe_irq_enable - Enable default interrupt generation settings
+ * @adapter: Board private structure
+ */
+static void pch_gbe_irq_enable(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+
+ if (likely(atomic_dec_and_test(&adapter->irq_sem)))
+ iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
+ ioread32(&hw->reg->INT_ST);
+ pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
+}
+
+
+
+/**
+ * pch_gbe_setup_tctl - configure the Transmit control registers
+ * @adapter: Board private structure
+ */
+static void pch_gbe_setup_tctl(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 tx_mode, tcpip;
+
+ tx_mode = PCH_GBE_TM_LONG_PKT |
+ PCH_GBE_TM_ST_AND_FD |
+ PCH_GBE_TM_SHORT_PKT |
+ PCH_GBE_TM_TH_TX_STRT_8 |
+ PCH_GBE_TM_TH_ALM_EMP_4 | PCH_GBE_TM_TH_ALM_FULL_8;
+
+ iowrite32(tx_mode, &hw->reg->TX_MODE);
+
+ tcpip = ioread32(&hw->reg->TCPIP_ACC);
+ tcpip |= PCH_GBE_TX_TCPIPACC_EN;
+ iowrite32(tcpip, &hw->reg->TCPIP_ACC);
+ return;
+}
+
+/**
+ * pch_gbe_configure_tx - Configure Transmit Unit after Reset
+ * @adapter: Board private structure
+ */
+static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 tdba, tdlen, dctrl;
+
+ pr_debug("dma addr = 0x%08llx size = 0x%08x\n",
+ (unsigned long long)adapter->tx_ring->dma,
+ adapter->tx_ring->size);
+
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+ tdba = adapter->tx_ring->dma;
+ tdlen = adapter->tx_ring->size - 0x10;
+ iowrite32(tdba, &hw->reg->TX_DSC_BASE);
+ iowrite32(tdlen, &hw->reg->TX_DSC_SIZE);
+ iowrite32(tdba, &hw->reg->TX_DSC_SW_P);
+
+ /* Enables Transmission DMA */
+ dctrl = ioread32(&hw->reg->DMA_CTRL);
+ dctrl |= PCH_GBE_TX_DMA_EN;
+ iowrite32(dctrl, &hw->reg->DMA_CTRL);
+}
+
+/**
+ * pch_gbe_setup_rctl - Configure the receive control registers
+ * @adapter: Board private structure
+ */
+static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 rx_mode, tcpip;
+
+ rx_mode = PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN |
+ PCH_GBE_RH_ALM_EMP_4 | PCH_GBE_RH_ALM_FULL_4 | PCH_GBE_RH_RD_TRG_8;
+
+ iowrite32(rx_mode, &hw->reg->RX_MODE);
+
+ tcpip = ioread32(&hw->reg->TCPIP_ACC);
+
-
- /* Enables Receive DMA */
- rxdma = ioread32(&hw->reg->DMA_CTRL);
- rxdma |= PCH_GBE_RX_DMA_EN;
- iowrite32(rxdma, &hw->reg->DMA_CTRL);
- /* Enables Receive */
- iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
++ tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
++ tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
+ iowrite32(tcpip, &hw->reg->TCPIP_ACC);
+ return;
+}
+
+/**
+ * pch_gbe_configure_rx - Configure Receive Unit after Reset
+ * @adapter: Board private structure
+ */
+static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 rdba, rdlen, rctl, rxdma;
+
+ pr_debug("dma adr = 0x%08llx size = 0x%08x\n",
+ (unsigned long long)adapter->rx_ring->dma,
+ adapter->rx_ring->size);
+
+ pch_gbe_mac_force_mac_fc(hw);
+
+ /* Disables Receive MAC */
+ rctl = ioread32(&hw->reg->MAC_RX_EN);
+ iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
+
+ /* Disables Receive DMA */
+ rxdma = ioread32(&hw->reg->DMA_CTRL);
+ rxdma &= ~PCH_GBE_RX_DMA_EN;
+ iowrite32(rxdma, &hw->reg->DMA_CTRL);
+
+ pr_debug("MAC_RX_EN reg = 0x%08x DMA_CTRL reg = 0x%08x\n",
+ ioread32(&hw->reg->MAC_RX_EN),
+ ioread32(&hw->reg->DMA_CTRL));
+
+ /* Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring */
+ rdba = adapter->rx_ring->dma;
+ rdlen = adapter->rx_ring->size - 0x10;
+ iowrite32(rdba, &hw->reg->RX_DSC_BASE);
+ iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
+ iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
- adapter->stats.intr_rx_fifo_err_count++;
+}
+
+/**
+ * pch_gbe_unmap_and_free_tx_resource - Unmap and free tx socket buffer
+ * @adapter: Board private structure
+ * @buffer_info: Buffer information structure
+ */
+static void pch_gbe_unmap_and_free_tx_resource(
+ struct pch_gbe_adapter *adapter, struct pch_gbe_buffer *buffer_info)
+{
+ if (buffer_info->mapped) {
+ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
+ buffer_info->mapped = false;
+ }
+ if (buffer_info->skb) {
+ dev_kfree_skb_any(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+}
+
+/**
+ * pch_gbe_unmap_and_free_rx_resource - Unmap and free rx socket buffer
+ * @adapter: Board private structure
+ * @buffer_info: Buffer information structure
+ */
+static void pch_gbe_unmap_and_free_rx_resource(
+ struct pch_gbe_adapter *adapter,
+ struct pch_gbe_buffer *buffer_info)
+{
+ if (buffer_info->mapped) {
+ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_FROM_DEVICE);
+ buffer_info->mapped = false;
+ }
+ if (buffer_info->skb) {
+ dev_kfree_skb_any(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+}
+
+/**
+ * pch_gbe_clean_tx_ring - Free Tx Buffers
+ * @adapter: Board private structure
+ * @tx_ring: Ring to be cleaned
+ */
+static void pch_gbe_clean_tx_ring(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *tx_ring)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ struct pch_gbe_buffer *buffer_info;
+ unsigned long size;
+ unsigned int i;
+
+ /* Free all the Tx ring sk_buffs */
+ for (i = 0; i < tx_ring->count; i++) {
+ buffer_info = &tx_ring->buffer_info[i];
+ pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info);
+ }
+ pr_debug("call pch_gbe_unmap_and_free_tx_resource() %d count\n", i);
+
+ size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count;
+ memset(tx_ring->buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(tx_ring->desc, 0, tx_ring->size);
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+ iowrite32(tx_ring->dma, &hw->reg->TX_DSC_HW_P);
+ iowrite32((tx_ring->size - 0x10), &hw->reg->TX_DSC_SIZE);
+}
+
+/**
+ * pch_gbe_clean_rx_ring - Free Rx Buffers
+ * @adapter: Board private structure
+ * @rx_ring: Ring to free buffers from
+ */
+static void
+pch_gbe_clean_rx_ring(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rx_ring)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ struct pch_gbe_buffer *buffer_info;
+ unsigned long size;
+ unsigned int i;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info);
+ }
+ pr_debug("call pch_gbe_unmap_and_free_rx_resource() %d count\n", i);
+ size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count;
+ memset(rx_ring->buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+ iowrite32(rx_ring->dma, &hw->reg->RX_DSC_HW_P);
+ iowrite32((rx_ring->size - 0x10), &hw->reg->RX_DSC_SIZE);
+}
+
+static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
+ u16 duplex)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ unsigned long rgmii = 0;
+
+ /* Set the RGMII control. */
+#ifdef PCH_GBE_MAC_IFOP_RGMII
+ switch (speed) {
+ case SPEED_10:
+ rgmii = (PCH_GBE_RGMII_RATE_2_5M |
+ PCH_GBE_MAC_RGMII_CTRL_SETTING);
+ break;
+ case SPEED_100:
+ rgmii = (PCH_GBE_RGMII_RATE_25M |
+ PCH_GBE_MAC_RGMII_CTRL_SETTING);
+ break;
+ case SPEED_1000:
+ rgmii = (PCH_GBE_RGMII_RATE_125M |
+ PCH_GBE_MAC_RGMII_CTRL_SETTING);
+ break;
+ }
+ iowrite32(rgmii, &hw->reg->RGMII_CTRL);
+#else /* GMII */
+ rgmii = 0;
+ iowrite32(rgmii, &hw->reg->RGMII_CTRL);
+#endif
+}
+static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
+ u16 duplex)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pch_gbe_hw *hw = &adapter->hw;
+ unsigned long mode = 0;
+
+ /* Set the communication mode */
+ switch (speed) {
+ case SPEED_10:
+ mode = PCH_GBE_MODE_MII_ETHER;
+ netdev->tx_queue_len = 10;
+ break;
+ case SPEED_100:
+ mode = PCH_GBE_MODE_MII_ETHER;
+ netdev->tx_queue_len = 100;
+ break;
+ case SPEED_1000:
+ mode = PCH_GBE_MODE_GMII_ETHER;
+ break;
+ }
+ if (duplex == DUPLEX_FULL)
+ mode |= PCH_GBE_MODE_FULL_DUPLEX;
+ else
+ mode |= PCH_GBE_MODE_HALF_DUPLEX;
+ iowrite32(mode, &hw->reg->MODE);
+}
+
+/**
+ * pch_gbe_watchdog - Watchdog process
+ * @data: Board private structure
+ */
+static void pch_gbe_watchdog(unsigned long data)
+{
+ struct pch_gbe_adapter *adapter = (struct pch_gbe_adapter *)data;
+ struct net_device *netdev = adapter->netdev;
+ struct pch_gbe_hw *hw = &adapter->hw;
+
+ pr_debug("right now = %ld\n", jiffies);
+
+ pch_gbe_update_stats(adapter);
+ if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) {
+ struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
+ netdev->tx_queue_len = adapter->tx_queue_len;
+ /* mii library handles link maintenance tasks */
+ if (mii_ethtool_gset(&adapter->mii, &cmd)) {
+ pr_err("ethtool get setting Error\n");
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies +
+ PCH_GBE_WATCHDOG_PERIOD));
+ return;
+ }
+ hw->mac.link_speed = ethtool_cmd_speed(&cmd);
+ hw->mac.link_duplex = cmd.duplex;
+ /* Set the RGMII control. */
+ pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
+ hw->mac.link_duplex);
+ /* Set the communication mode */
+ pch_gbe_set_mode(adapter, hw->mac.link_speed,
+ hw->mac.link_duplex);
+ netdev_dbg(netdev,
+ "Link is Up %d Mbps %s-Duplex\n",
+ hw->mac.link_speed,
+ cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+ } else if ((!mii_link_ok(&adapter->mii)) &&
+ (netif_carrier_ok(netdev))) {
+ netdev_dbg(netdev, "NIC Link is Down\n");
+ hw->mac.link_speed = SPEED_10;
+ hw->mac.link_duplex = DUPLEX_HALF;
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + PCH_GBE_WATCHDOG_PERIOD));
+}
+
+/**
+ * pch_gbe_tx_queue - Carry out queuing of the transmission data
+ * @adapter: Board private structure
+ * @tx_ring: Tx descriptor ring structure
+ * @skb: Sockt buffer structure
+ */
+static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *tx_ring,
+ struct sk_buff *skb)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ struct pch_gbe_tx_desc *tx_desc;
+ struct pch_gbe_buffer *buffer_info;
+ struct sk_buff *tmp_skb;
+ unsigned int frame_ctrl;
+ unsigned int ring_num;
+ unsigned long flags;
+
+ /*-- Set frame control --*/
+ frame_ctrl = 0;
+ if (unlikely(skb->len < PCH_GBE_SHORT_PKT))
+ frame_ctrl |= PCH_GBE_TXD_CTRL_APAD;
+ if (skb->ip_summed == CHECKSUM_NONE)
+ frame_ctrl |= PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
+
+ /* Performs checksum processing */
+ /*
+ * It is because the hardware accelerator does not support a checksum,
+ * when the received data size is less than 64 bytes.
+ */
+ if (skb->len < PCH_GBE_SHORT_PKT && skb->ip_summed != CHECKSUM_NONE) {
+ frame_ctrl |= PCH_GBE_TXD_CTRL_APAD |
+ PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+ unsigned int offset;
+ iph->check = 0;
+ iph->check = ip_fast_csum((u8 *) iph, iph->ihl);
+ offset = skb_transport_offset(skb);
+ if (iph->protocol == IPPROTO_TCP) {
+ skb->csum = 0;
+ tcp_hdr(skb)->check = 0;
+ skb->csum = skb_checksum(skb, offset,
+ skb->len - offset, 0);
+ tcp_hdr(skb)->check =
+ csum_tcpudp_magic(iph->saddr,
+ iph->daddr,
+ skb->len - offset,
+ IPPROTO_TCP,
+ skb->csum);
+ } else if (iph->protocol == IPPROTO_UDP) {
+ skb->csum = 0;
+ udp_hdr(skb)->check = 0;
+ skb->csum =
+ skb_checksum(skb, offset,
+ skb->len - offset, 0);
+ udp_hdr(skb)->check =
+ csum_tcpudp_magic(iph->saddr,
+ iph->daddr,
+ skb->len - offset,
+ IPPROTO_UDP,
+ skb->csum);
+ }
+ }
+ }
+ spin_lock_irqsave(&tx_ring->tx_lock, flags);
+ ring_num = tx_ring->next_to_use;
+ if (unlikely((ring_num + 1) == tx_ring->count))
+ tx_ring->next_to_use = 0;
+ else
+ tx_ring->next_to_use = ring_num + 1;
+
+ spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+ buffer_info = &tx_ring->buffer_info[ring_num];
+ tmp_skb = buffer_info->skb;
+
+ /* [Header:14][payload] ---> [Header:14][paddong:2][payload] */
+ memcpy(tmp_skb->data, skb->data, ETH_HLEN);
+ tmp_skb->data[ETH_HLEN] = 0x00;
+ tmp_skb->data[ETH_HLEN + 1] = 0x00;
+ tmp_skb->len = skb->len;
+ memcpy(&tmp_skb->data[ETH_HLEN + 2], &skb->data[ETH_HLEN],
+ (skb->len - ETH_HLEN));
+ /*-- Set Buffer information --*/
+ buffer_info->length = tmp_skb->len;
+ buffer_info->dma = dma_map_single(&adapter->pdev->dev, tmp_skb->data,
+ buffer_info->length,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
+ pr_err("TX DMA map failed\n");
+ buffer_info->dma = 0;
+ buffer_info->time_stamp = 0;
+ tx_ring->next_to_use = ring_num;
+ return;
+ }
+ buffer_info->mapped = true;
+ buffer_info->time_stamp = jiffies;
+
+ /*-- Set Tx descriptor --*/
+ tx_desc = PCH_GBE_TX_DESC(*tx_ring, ring_num);
+ tx_desc->buffer_addr = (buffer_info->dma);
+ tx_desc->length = (tmp_skb->len);
+ tx_desc->tx_words_eob = ((tmp_skb->len + 3));
+ tx_desc->tx_frame_ctrl = (frame_ctrl);
+ tx_desc->gbec_status = (DSC_INIT16);
+
+ if (unlikely(++ring_num == tx_ring->count))
+ ring_num = 0;
+
+ /* Update software pointer of TX descriptor */
+ iowrite32(tx_ring->dma +
+ (int)sizeof(struct pch_gbe_tx_desc) * ring_num,
+ &hw->reg->TX_DSC_SW_P);
+ dev_kfree_skb_any(skb);
+}
+
+/**
+ * pch_gbe_update_stats - Update the board statistics counters
+ * @adapter: Board private structure
+ */
+void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct pch_gbe_hw_stats *stats = &adapter->stats;
+ unsigned long flags;
+
+ /*
+ * Prevent stats update while adapter is being reset, or if the pci
+ * connection is down.
+ */
+ if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
+ return;
+
+ spin_lock_irqsave(&adapter->stats_lock, flags);
+
+ /* Update device status "adapter->stats" */
+ stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
+ stats->tx_errors = stats->tx_length_errors +
+ stats->tx_aborted_errors +
+ stats->tx_carrier_errors + stats->tx_timeout_count;
+
+ /* Update network device status "adapter->net_stats" */
+ netdev->stats.rx_packets = stats->rx_packets;
+ netdev->stats.rx_bytes = stats->rx_bytes;
+ netdev->stats.rx_dropped = stats->rx_dropped;
+ netdev->stats.tx_packets = stats->tx_packets;
+ netdev->stats.tx_bytes = stats->tx_bytes;
+ netdev->stats.tx_dropped = stats->tx_dropped;
+ /* Fill out the OS statistics structure */
+ netdev->stats.multicast = stats->multicast;
+ netdev->stats.collisions = stats->collisions;
+ /* Rx Errors */
+ netdev->stats.rx_errors = stats->rx_errors;
+ netdev->stats.rx_crc_errors = stats->rx_crc_errors;
+ netdev->stats.rx_frame_errors = stats->rx_frame_errors;
+ /* Tx Errors */
+ netdev->stats.tx_errors = stats->tx_errors;
+ netdev->stats.tx_aborted_errors = stats->tx_aborted_errors;
+ netdev->stats.tx_carrier_errors = stats->tx_carrier_errors;
+
+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
+}
+
++static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter)
++{
++ struct pch_gbe_hw *hw = &adapter->hw;
++ u32 rxdma;
++ u16 value;
++ int ret;
++
++ /* Disable Receive DMA */
++ rxdma = ioread32(&hw->reg->DMA_CTRL);
++ rxdma &= ~PCH_GBE_RX_DMA_EN;
++ iowrite32(rxdma, &hw->reg->DMA_CTRL);
++ /* Wait Rx DMA BUS is IDLE */
++ ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK);
++ if (ret) {
++ /* Disable Bus master */
++ pci_read_config_word(adapter->pdev, PCI_COMMAND, &value);
++ value &= ~PCI_COMMAND_MASTER;
++ pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
++ /* Stop Receive */
++ pch_gbe_mac_reset_rx(hw);
++ /* Enable Bus master */
++ value |= PCI_COMMAND_MASTER;
++ pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
++ } else {
++ /* Stop Receive */
++ pch_gbe_mac_reset_rx(hw);
++ }
++}
++
++static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
++{
++ u32 rxdma;
++
++ /* Enables Receive DMA */
++ rxdma = ioread32(&hw->reg->DMA_CTRL);
++ rxdma |= PCH_GBE_RX_DMA_EN;
++ iowrite32(rxdma, &hw->reg->DMA_CTRL);
++ /* Enables Receive */
++ iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
++ return;
++}
++
+/**
+ * pch_gbe_intr - Interrupt Handler
+ * @irq: Interrupt number
+ * @data: Pointer to a network interface device structure
+ * Returns
+ * - IRQ_HANDLED: Our interrupt
+ * - IRQ_NONE: Not our interrupt
+ */
+static irqreturn_t pch_gbe_intr(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 int_st;
+ u32 int_en;
+
+ /* Check request status */
+ int_st = ioread32(&hw->reg->INT_ST);
+ int_st = int_st & ioread32(&hw->reg->INT_EN);
+ /* When request status is no interruption factor */
+ if (unlikely(!int_st))
+ return IRQ_NONE; /* Not our interrupt. End processing. */
+ pr_debug("%s occur int_st = 0x%08x\n", __func__, int_st);
+ if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
+ adapter->stats.intr_rx_frame_err_count++;
+ if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
- pr_err("Rx descriptor is empty\n");
++ if (!adapter->rx_stop_flag) {
++ adapter->stats.intr_rx_fifo_err_count++;
++ pr_debug("Rx fifo over run\n");
++ adapter->rx_stop_flag = true;
++ int_en = ioread32(&hw->reg->INT_EN);
++ iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
++ &hw->reg->INT_EN);
++ pch_gbe_stop_receive(adapter);
++ }
+ if (int_st & PCH_GBE_INT_RX_DMA_ERR)
+ adapter->stats.intr_rx_dma_err_count++;
+ if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
+ adapter->stats.intr_tx_fifo_err_count++;
+ if (int_st & PCH_GBE_INT_TX_DMA_ERR)
+ adapter->stats.intr_tx_dma_err_count++;
+ if (int_st & PCH_GBE_INT_TCPIP_ERR)
+ adapter->stats.intr_tcpip_err_count++;
+ /* When Rx descriptor is empty */
+ if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
+ adapter->stats.intr_rx_dsc_empty_count++;
- bufsz = adapter->rx_buffer_len + PCH_GBE_DMA_ALIGN;
++ pr_debug("Rx descriptor is empty\n");
+ int_en = ioread32(&hw->reg->INT_EN);
+ iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
+ if (hw->mac.tx_fc_enable) {
+ /* Set Pause packet */
+ pch_gbe_mac_set_pause_packet(hw);
+ }
+ if ((int_en & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))
+ == 0) {
+ return IRQ_HANDLED;
+ }
+ }
+
+ /* When request status is Receive interruption */
+ if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))) {
+ if (likely(napi_schedule_prep(&adapter->napi))) {
+ /* Enable only Rx Descriptor empty */
+ atomic_inc(&adapter->irq_sem);
+ int_en = ioread32(&hw->reg->INT_EN);
+ int_en &=
+ ~(PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT);
+ iowrite32(int_en, &hw->reg->INT_EN);
+ /* Start polling for NAPI */
+ __napi_schedule(&adapter->napi);
+ }
+ }
+ pr_debug("return = 0x%08x INT_EN reg = 0x%08x\n",
+ IRQ_HANDLED, ioread32(&hw->reg->INT_EN));
+ return IRQ_HANDLED;
+}
+
+/**
+ * pch_gbe_alloc_rx_buffers - Replace used receive buffers; legacy & extended
+ * @adapter: Board private structure
+ * @rx_ring: Rx descriptor ring
+ * @cleaned_count: Cleaned count
+ */
+static void
+pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct pch_gbe_hw *hw = &adapter->hw;
+ struct pch_gbe_rx_desc *rx_desc;
+ struct pch_gbe_buffer *buffer_info;
+ struct sk_buff *skb;
+ unsigned int i;
+ unsigned int bufsz;
+
- skb = buffer_info->skb;
- if (skb) {
- skb_trim(skb, 0);
- } else {
- skb = netdev_alloc_skb(netdev, bufsz);
- if (unlikely(!skb)) {
- /* Better luck next round */
- adapter->stats.rx_alloc_buff_failed++;
- break;
- }
- /* 64byte align */
- skb_reserve(skb, PCH_GBE_DMA_ALIGN);
-
- buffer_info->skb = skb;
- buffer_info->length = adapter->rx_buffer_len;
++ bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
+ i = rx_ring->next_to_use;
+
+ while ((cleaned_count--)) {
+ buffer_info = &rx_ring->buffer_info[i];
- skb->data,
++ skb = netdev_alloc_skb(netdev, bufsz);
++ if (unlikely(!skb)) {
++ /* Better luck next round */
++ adapter->stats.rx_alloc_buff_failed++;
++ break;
+ }
++ /* align */
++ skb_reserve(skb, NET_IP_ALIGN);
++ buffer_info->skb = skb;
++
+ buffer_info->dma = dma_map_single(&pdev->dev,
- struct sk_buff *skb, *new_skb;
++ buffer_info->rx_buffer,
+ buffer_info->length,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
+ dev_kfree_skb(skb);
+ buffer_info->skb = NULL;
+ buffer_info->dma = 0;
+ adapter->stats.rx_alloc_buff_failed++;
+ break; /* while !buffer_info->skb */
+ }
+ buffer_info->mapped = true;
+ rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
+ rx_desc->buffer_addr = (buffer_info->dma);
+ rx_desc->gbec_status = DSC_INIT16;
+
+ pr_debug("i = %d buffer_info->dma = 0x08%llx buffer_info->length = 0x%x\n",
+ i, (unsigned long long)buffer_info->dma,
+ buffer_info->length);
+
+ if (unlikely(++i == rx_ring->count))
+ i = 0;
+ }
+ if (likely(rx_ring->next_to_use != i)) {
+ rx_ring->next_to_use = i;
+ if (unlikely(i-- == 0))
+ i = (rx_ring->count - 1);
+ iowrite32(rx_ring->dma +
+ (int)sizeof(struct pch_gbe_rx_desc) * i,
+ &hw->reg->RX_DSC_SW_P);
+ }
+ return;
+}
+
++static int
++pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
++ struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
++{
++ struct pci_dev *pdev = adapter->pdev;
++ struct pch_gbe_buffer *buffer_info;
++ unsigned int i;
++ unsigned int bufsz;
++ unsigned int size;
++
++ bufsz = adapter->rx_buffer_len;
++
++ size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
++ rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
++ &rx_ring->rx_buff_pool_logic,
++ GFP_KERNEL);
++ if (!rx_ring->rx_buff_pool) {
++ pr_err("Unable to allocate memory for the receive poll buffer\n");
++ return -ENOMEM;
++ }
++ memset(rx_ring->rx_buff_pool, 0, size);
++ rx_ring->rx_buff_pool_size = size;
++ for (i = 0; i < rx_ring->count; i++) {
++ buffer_info = &rx_ring->buffer_info[i];
++ buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i;
++ buffer_info->length = bufsz;
++ }
++ return 0;
++}
++
+/**
+ * pch_gbe_alloc_tx_buffers - Allocate transmit buffers
+ * @adapter: Board private structure
+ * @tx_ring: Tx descriptor ring
+ */
+static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *tx_ring)
+{
+ struct pch_gbe_buffer *buffer_info;
+ struct sk_buff *skb;
+ unsigned int i;
+ unsigned int bufsz;
+ struct pch_gbe_tx_desc *tx_desc;
+
+ bufsz =
+ adapter->hw.mac.max_frame_size + PCH_GBE_DMA_ALIGN + NET_IP_ALIGN;
+
+ for (i = 0; i < tx_ring->count; i++) {
+ buffer_info = &tx_ring->buffer_info[i];
+ skb = netdev_alloc_skb(adapter->netdev, bufsz);
+ skb_reserve(skb, PCH_GBE_DMA_ALIGN);
+ buffer_info->skb = skb;
+ tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
+ tx_desc->gbec_status = (DSC_INIT16);
+ }
+ return;
+}
+
+/**
+ * pch_gbe_clean_tx - Reclaim resources after transmit completes
+ * @adapter: Board private structure
+ * @tx_ring: Tx descriptor ring
+ * Returns
+ * true: Cleaned the descriptor
+ * false: Not cleaned the descriptor
+ */
+static bool
+pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *tx_ring)
+{
+ struct pch_gbe_tx_desc *tx_desc;
+ struct pch_gbe_buffer *buffer_info;
+ struct sk_buff *skb;
+ unsigned int i;
+ unsigned int cleaned_count = 0;
+ bool cleaned = false;
+
+ pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
+
+ i = tx_ring->next_to_clean;
+ tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
+ pr_debug("gbec_status:0x%04x dma_status:0x%04x\n",
+ tx_desc->gbec_status, tx_desc->dma_status);
+
+ while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
+ pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status);
+ cleaned = true;
+ buffer_info = &tx_ring->buffer_info[i];
+ skb = buffer_info->skb;
+
+ if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) {
+ adapter->stats.tx_aborted_errors++;
+ pr_err("Transfer Abort Error\n");
+ } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER)
+ ) {
+ adapter->stats.tx_carrier_errors++;
+ pr_err("Transfer Carrier Sense Error\n");
+ } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL)
+ ) {
+ adapter->stats.tx_aborted_errors++;
+ pr_err("Transfer Collision Abort Error\n");
+ } else if ((tx_desc->gbec_status &
+ (PCH_GBE_TXD_GMAC_STAT_SNGCOL |
+ PCH_GBE_TXD_GMAC_STAT_MLTCOL))) {
+ adapter->stats.collisions++;
+ adapter->stats.tx_packets++;
+ adapter->stats.tx_bytes += skb->len;
+ pr_debug("Transfer Collision\n");
+ } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT)
+ ) {
+ adapter->stats.tx_packets++;
+ adapter->stats.tx_bytes += skb->len;
+ }
+ if (buffer_info->mapped) {
+ pr_debug("unmap buffer_info->dma : %d\n", i);
+ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
+ buffer_info->mapped = false;
+ }
+ if (buffer_info->skb) {
+ pr_debug("trim buffer_info->skb : %d\n", i);
+ skb_trim(buffer_info->skb, 0);
+ }
+ tx_desc->gbec_status = DSC_INIT16;
+ if (unlikely(++i == tx_ring->count))
+ i = 0;
+ tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
+
+ /* weight of a sort for tx, to avoid endless transmit cleanup */
+ if (cleaned_count++ == PCH_GBE_TX_WEIGHT)
+ break;
+ }
+ pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
+ cleaned_count);
+ /* Recover from running out of Tx resources in xmit_frame */
+ if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) {
+ netif_wake_queue(adapter->netdev);
+ adapter->stats.tx_restart_count++;
+ pr_debug("Tx wake queue\n");
+ }
+ spin_lock(&adapter->tx_queue_lock);
+ tx_ring->next_to_clean = i;
+ spin_unlock(&adapter->tx_queue_lock);
+ pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
+ return cleaned;
+}
+
+/**
+ * pch_gbe_clean_rx - Send received data up the network stack; legacy
+ * @adapter: Board private structure
+ * @rx_ring: Rx descriptor ring
+ * @work_done: Completed count
+ * @work_to_do: Request count
+ * Returns
+ * true: Cleaned the descriptor
+ * false: Not cleaned the descriptor
+ */
+static bool
+pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rx_ring,
+ int *work_done, int work_to_do)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct pch_gbe_buffer *buffer_info;
+ struct pch_gbe_rx_desc *rx_desc;
+ u32 length;
+ unsigned int i;
+ unsigned int cleaned_count = 0;
+ bool cleaned = false;
- /* Prefetch the packet */
- prefetch(skb->data);
++ struct sk_buff *skb;
+ u8 dma_status;
+ u16 gbec_status;
+ u32 tcp_ip_status;
+
+ i = rx_ring->next_to_clean;
+
+ while (*work_done < work_to_do) {
+ /* Check Rx descriptor status */
+ rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
+ if (rx_desc->gbec_status == DSC_INIT16)
+ break;
+ cleaned = true;
+ cleaned_count++;
+
+ dma_status = rx_desc->dma_status;
+ gbec_status = rx_desc->gbec_status;
+ tcp_ip_status = rx_desc->tcp_ip_status;
+ rx_desc->gbec_status = DSC_INIT16;
+ buffer_info = &rx_ring->buffer_info[i];
+ skb = buffer_info->skb;
++ buffer_info->skb = NULL;
+
+ /* unmap dma */
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_FROM_DEVICE);
+ buffer_info->mapped = false;
- /* length convert[-3] */
- length = (rx_desc->rx_words_eob) - 3;
-
- /* Decide the data conversion method */
- if (!(netdev->features & NETIF_F_RXCSUM)) {
- /* [Header:14][payload] */
- if (NET_IP_ALIGN) {
- /* Because alignment differs,
- * the new_skb is newly allocated,
- * and data is copied to new_skb.*/
- new_skb = netdev_alloc_skb(netdev,
- length + NET_IP_ALIGN);
- if (!new_skb) {
- /* dorrop error */
- pr_err("New skb allocation "
- "Error\n");
- goto dorrop;
- }
- skb_reserve(new_skb, NET_IP_ALIGN);
- memcpy(new_skb->data, skb->data,
- length);
- skb = new_skb;
- } else {
- /* DMA buffer is used as SKB as it is.*/
- buffer_info->skb = NULL;
- }
- } else {
- /* [Header:14][padding:2][payload] */
- /* The length includes padding length */
- length = length - PCH_GBE_DMA_PADDING;
- if ((length < copybreak) ||
- (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) {
- /* Because alignment differs,
- * the new_skb is newly allocated,
- * and data is copied to new_skb.
- * Padding data is deleted
- * at the time of a copy.*/
- new_skb = netdev_alloc_skb(netdev,
- length + NET_IP_ALIGN);
- if (!new_skb) {
- /* dorrop error */
- pr_err("New skb allocation "
- "Error\n");
- goto dorrop;
- }
- skb_reserve(new_skb, NET_IP_ALIGN);
- memcpy(new_skb->data, skb->data,
- ETH_HLEN);
- memcpy(&new_skb->data[ETH_HLEN],
- &skb->data[ETH_HLEN +
- PCH_GBE_DMA_PADDING],
- length - ETH_HLEN);
- skb = new_skb;
- } else {
- /* Padding data is deleted
- * by moving header data.*/
- memmove(&skb->data[PCH_GBE_DMA_PADDING],
- &skb->data[0], ETH_HLEN);
- skb_reserve(skb, NET_IP_ALIGN);
- buffer_info->skb = NULL;
- }
- }
- /* The length includes FCS length */
- length = length - ETH_FCS_LEN;
+
+ pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x "
+ "TCP:0x%08x] BufInf = 0x%p\n",
+ i, dma_status, gbec_status, tcp_ip_status,
+ buffer_info);
+ /* Error check */
+ if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) {
+ adapter->stats.rx_frame_errors++;
+ pr_err("Receive Not Octal Error\n");
+ } else if (unlikely(gbec_status &
+ PCH_GBE_RXD_GMAC_STAT_NBLERR)) {
+ adapter->stats.rx_frame_errors++;
+ pr_err("Receive Nibble Error\n");
+ } else if (unlikely(gbec_status &
+ PCH_GBE_RXD_GMAC_STAT_CRCERR)) {
+ adapter->stats.rx_crc_errors++;
+ pr_err("Receive CRC Error\n");
+ } else {
+ /* get receive length */
- dorrop:
++ /* length convert[-3], length includes FCS length */
++ length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN;
++ if (rx_desc->rx_words_eob & 0x02)
++ length = length - 4;
++ /*
++ * buffer_info->rx_buffer: [Header:14][payload]
++ * skb->data: [Reserve:2][Header:14][payload]
++ */
++ memcpy(skb->data, buffer_info->rx_buffer, length);
++
+ /* update status of driver */
+ adapter->stats.rx_bytes += length;
+ adapter->stats.rx_packets++;
+ if ((gbec_status & PCH_GBE_RXD_GMAC_STAT_MARMLT))
+ adapter->stats.multicast++;
+ /* Write meta date of skb */
+ skb_put(skb, length);
+ skb->protocol = eth_type_trans(skb, netdev);
+ if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
+ skb->ip_summed = CHECKSUM_NONE;
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ napi_gro_receive(&adapter->napi, skb);
+ (*work_done)++;
+ pr_debug("Receive skb->ip_summed: %d length: %d\n",
+ skb->ip_summed, length);
+ }
- adapter->rx_buffer_len = PCH_GBE_MAX_JUMBO_FRAME_SIZE;
- netdev->mtu = new_mtu;
- adapter->hw.mac.max_frame_size = max_frame;
+ /* return some buffers to hardware, one at a time is too slow */
+ if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
+ pch_gbe_alloc_rx_buffers(adapter, rx_ring,
+ cleaned_count);
+ cleaned_count = 0;
+ }
+ if (++i == rx_ring->count)
+ i = 0;
+ }
+ rx_ring->next_to_clean = i;
+ if (cleaned_count)
+ pch_gbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+ return cleaned;
+}
+
+/**
+ * pch_gbe_setup_tx_resources - Allocate Tx resources (Descriptors)
+ * @adapter: Board private structure
+ * @tx_ring: Tx descriptor ring (for a specific queue) to setup
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *tx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct pch_gbe_tx_desc *tx_desc;
+ int size;
+ int desNo;
+
+ size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
+ tx_ring->buffer_info = vzalloc(size);
+ if (!tx_ring->buffer_info) {
+ pr_err("Unable to allocate memory for the buffer information\n");
+ return -ENOMEM;
+ }
+
+ tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
+
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+ if (!tx_ring->desc) {
+ vfree(tx_ring->buffer_info);
+ pr_err("Unable to allocate memory for the transmit descriptor ring\n");
+ return -ENOMEM;
+ }
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+ spin_lock_init(&tx_ring->tx_lock);
+
+ for (desNo = 0; desNo < tx_ring->count; desNo++) {
+ tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
+ tx_desc->gbec_status = DSC_INIT16;
+ }
+ pr_debug("tx_ring->desc = 0x%p tx_ring->dma = 0x%08llx\n"
+ "next_to_clean = 0x%08x next_to_use = 0x%08x\n",
+ tx_ring->desc, (unsigned long long)tx_ring->dma,
+ tx_ring->next_to_clean, tx_ring->next_to_use);
+ return 0;
+}
+
+/**
+ * pch_gbe_setup_rx_resources - Allocate Rx resources (Descriptors)
+ * @adapter: Board private structure
+ * @rx_ring: Rx descriptor ring (for a specific queue) to setup
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct pch_gbe_rx_desc *rx_desc;
+ int size;
+ int desNo;
+
+ size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
+ rx_ring->buffer_info = vzalloc(size);
+ if (!rx_ring->buffer_info) {
+ pr_err("Unable to allocate memory for the receive descriptor ring\n");
+ return -ENOMEM;
+ }
+ rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+
+ if (!rx_ring->desc) {
+ pr_err("Unable to allocate memory for the receive descriptor ring\n");
+ vfree(rx_ring->buffer_info);
+ return -ENOMEM;
+ }
+ memset(rx_ring->desc, 0, rx_ring->size);
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+ for (desNo = 0; desNo < rx_ring->count; desNo++) {
+ rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo);
+ rx_desc->gbec_status = DSC_INIT16;
+ }
+ pr_debug("rx_ring->desc = 0x%p rx_ring->dma = 0x%08llx "
+ "next_to_clean = 0x%08x next_to_use = 0x%08x\n",
+ rx_ring->desc, (unsigned long long)rx_ring->dma,
+ rx_ring->next_to_clean, rx_ring->next_to_use);
+ return 0;
+}
+
+/**
+ * pch_gbe_free_tx_resources - Free Tx Resources
+ * @adapter: Board private structure
+ * @tx_ring: Tx descriptor ring for a specific queue
+ */
+void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *tx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ pch_gbe_clean_tx_ring(adapter, tx_ring);
+ vfree(tx_ring->buffer_info);
+ tx_ring->buffer_info = NULL;
+ pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+ tx_ring->desc = NULL;
+}
+
+/**
+ * pch_gbe_free_rx_resources - Free Rx Resources
+ * @adapter: Board private structure
+ * @rx_ring: Ring to clean the resources from
+ */
+void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ pch_gbe_clean_rx_ring(adapter, rx_ring);
+ vfree(rx_ring->buffer_info);
+ rx_ring->buffer_info = NULL;
+ pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+ rx_ring->desc = NULL;
+}
+
+/**
+ * pch_gbe_request_irq - Allocate an interrupt line
+ * @adapter: Board private structure
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+ int flags;
+
+ flags = IRQF_SHARED;
+ adapter->have_msi = false;
+ err = pci_enable_msi(adapter->pdev);
+ pr_debug("call pci_enable_msi\n");
+ if (err) {
+ pr_debug("call pci_enable_msi - Error: %d\n", err);
+ } else {
+ flags = 0;
+ adapter->have_msi = true;
+ }
+ err = request_irq(adapter->pdev->irq, &pch_gbe_intr,
+ flags, netdev->name, netdev);
+ if (err)
+ pr_err("Unable to allocate interrupt Error: %d\n", err);
+ pr_debug("adapter->have_msi : %d flags : 0x%04x return : 0x%04x\n",
+ adapter->have_msi, flags, err);
+ return err;
+}
+
+
+static void pch_gbe_set_multi(struct net_device *netdev);
+/**
+ * pch_gbe_up - Up GbE network device
+ * @adapter: Board private structure
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+int pch_gbe_up(struct pch_gbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
+ struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
+ int err;
+
+ /* hardware has been reset, we need to reload some things */
+ pch_gbe_set_multi(netdev);
+
+ pch_gbe_setup_tctl(adapter);
+ pch_gbe_configure_tx(adapter);
+ pch_gbe_setup_rctl(adapter);
+ pch_gbe_configure_rx(adapter);
+
+ err = pch_gbe_request_irq(adapter);
+ if (err) {
+ pr_err("Error: can't bring device up\n");
+ return err;
+ }
++ err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
++ if (err) {
++ pr_err("Error: can't bring device up\n");
++ return err;
++ }
+ pch_gbe_alloc_tx_buffers(adapter, tx_ring);
+ pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
+ adapter->tx_queue_len = netdev->tx_queue_len;
++ pch_gbe_start_receive(&adapter->hw);
+
+ mod_timer(&adapter->watchdog_timer, jiffies);
+
+ napi_enable(&adapter->napi);
+ pch_gbe_irq_enable(adapter);
+ netif_start_queue(adapter->netdev);
+
+ return 0;
+}
+
+/**
+ * pch_gbe_down - Down GbE network device
+ * @adapter: Board private structure
+ */
+void pch_gbe_down(struct pch_gbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
++ struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
+
+ /* signal that we're down so the interrupt handler does not
+ * reschedule our watchdog timer */
+ napi_disable(&adapter->napi);
+ atomic_set(&adapter->irq_sem, 0);
+
+ pch_gbe_irq_disable(adapter);
+ pch_gbe_free_irq(adapter);
+
+ del_timer_sync(&adapter->watchdog_timer);
+
+ netdev->tx_queue_len = adapter->tx_queue_len;
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
+ pch_gbe_reset(adapter);
+ pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
+ pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
++
++ pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size,
++ rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
++ rx_ring->rx_buff_pool_logic = 0;
++ rx_ring->rx_buff_pool_size = 0;
++ rx_ring->rx_buff_pool = NULL;
+}
+
+/**
+ * pch_gbe_sw_init - Initialize general software structures (struct pch_gbe_adapter)
+ * @adapter: Board private structure to initialize
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+
+ adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
+ hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+
+ /* Initialize the hardware-specific values */
+ if (pch_gbe_hal_setup_init_funcs(hw)) {
+ pr_err("Hardware Initialization Failure\n");
+ return -EIO;
+ }
+ if (pch_gbe_alloc_queues(adapter)) {
+ pr_err("Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+ spin_lock_init(&adapter->hw.miim_lock);
+ spin_lock_init(&adapter->tx_queue_lock);
+ spin_lock_init(&adapter->stats_lock);
+ spin_lock_init(&adapter->ethtool_lock);
+ atomic_set(&adapter->irq_sem, 0);
+ pch_gbe_irq_disable(adapter);
+
+ pch_gbe_init_stats(adapter);
+
+ pr_debug("rx_buffer_len : %d mac.min_frame_size : %d mac.max_frame_size : %d\n",
+ (u32) adapter->rx_buffer_len,
+ hw->mac.min_frame_size, hw->mac.max_frame_size);
+ return 0;
+}
+
+/**
+ * pch_gbe_open - Called when a network interface is made active
+ * @netdev: Network interface device structure
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+static int pch_gbe_open(struct net_device *netdev)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+ int err;
+
+ /* allocate transmit descriptors */
+ err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
+ if (err)
+ goto err_setup_tx;
+ /* allocate receive descriptors */
+ err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
+ if (err)
+ goto err_setup_rx;
+ pch_gbe_hal_power_up_phy(hw);
+ err = pch_gbe_up(adapter);
+ if (err)
+ goto err_up;
+ pr_debug("Success End\n");
+ return 0;
+
+err_up:
+ if (!adapter->wake_up_evt)
+ pch_gbe_hal_power_down_phy(hw);
+ pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
+err_setup_rx:
+ pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
+err_setup_tx:
+ pch_gbe_reset(adapter);
+ pr_err("Error End\n");
+ return err;
+}
+
+/**
+ * pch_gbe_stop - Disables a network interface
+ * @netdev: Network interface device structure
+ * Returns
+ * 0: Successfully
+ */
+static int pch_gbe_stop(struct net_device *netdev)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+
+ pch_gbe_down(adapter);
+ if (!adapter->wake_up_evt)
+ pch_gbe_hal_power_down_phy(hw);
+ pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
+ pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
+ return 0;
+}
+
+/**
+ * pch_gbe_xmit_frame - Packet transmitting start
+ * @skb: Socket buffer structure
+ * @netdev: Network interface device structure
+ * Returns
+ * - NETDEV_TX_OK: Normal end
+ * - NETDEV_TX_BUSY: Error end
+ */
+static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
+ unsigned long flags;
+
+ if (unlikely(skb->len > (adapter->hw.mac.max_frame_size - 4))) {
+ pr_err("Transfer length Error: skb len: %d > max: %d\n",
+ skb->len, adapter->hw.mac.max_frame_size);
+ dev_kfree_skb_any(skb);
+ adapter->stats.tx_length_errors++;
+ return NETDEV_TX_OK;
+ }
+ if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
+ /* Collision - tell upper layer to requeue */
+ return NETDEV_TX_LOCKED;
+ }
+ if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
+ netif_stop_queue(netdev);
+ spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+ pr_debug("Return : BUSY next_to use : 0x%08x next_to clean : 0x%08x\n",
+ tx_ring->next_to_use, tx_ring->next_to_clean);
+ return NETDEV_TX_BUSY;
+ }
+ spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+
+ /* CRC,ITAG no support */
+ pch_gbe_tx_queue(adapter, tx_ring, skb);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * pch_gbe_get_stats - Get System Network Statistics
+ * @netdev: Network interface device structure
+ * Returns: The current stats
+ */
+static struct net_device_stats *pch_gbe_get_stats(struct net_device *netdev)
+{
+ /* only return the current stats */
+ return &netdev->stats;
+}
+
+/**
+ * pch_gbe_set_multi - Multicast and Promiscuous mode set
+ * @netdev: Network interface device structure
+ */
+static void pch_gbe_set_multi(struct net_device *netdev)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+ struct netdev_hw_addr *ha;
+ u8 *mta_list;
+ u32 rctl;
+ int i;
+ int mc_count;
+
+ pr_debug("netdev->flags : 0x%08x\n", netdev->flags);
+
+ /* Check for Promiscuous and All Multicast modes */
+ rctl = ioread32(&hw->reg->RX_MODE);
+ mc_count = netdev_mc_count(netdev);
+ if ((netdev->flags & IFF_PROMISC)) {
+ rctl &= ~PCH_GBE_ADD_FIL_EN;
+ rctl &= ~PCH_GBE_MLT_FIL_EN;
+ } else if ((netdev->flags & IFF_ALLMULTI)) {
+ /* all the multicasting receive permissions */
+ rctl |= PCH_GBE_ADD_FIL_EN;
+ rctl &= ~PCH_GBE_MLT_FIL_EN;
+ } else {
+ if (mc_count >= PCH_GBE_MAR_ENTRIES) {
+ /* all the multicasting receive permissions */
+ rctl |= PCH_GBE_ADD_FIL_EN;
+ rctl &= ~PCH_GBE_MLT_FIL_EN;
+ } else {
+ rctl |= (PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
+ }
+ }
+ iowrite32(rctl, &hw->reg->RX_MODE);
+
+ if (mc_count >= PCH_GBE_MAR_ENTRIES)
+ return;
+ mta_list = kmalloc(mc_count * ETH_ALEN, GFP_ATOMIC);
+ if (!mta_list)
+ return;
+
+ /* The shared function expects a packed array of only addresses. */
+ i = 0;
+ netdev_for_each_mc_addr(ha, netdev) {
+ if (i == mc_count)
+ break;
+ memcpy(mta_list + (i++ * ETH_ALEN), &ha->addr, ETH_ALEN);
+ }
+ pch_gbe_mac_mc_addr_list_update(hw, mta_list, i, 1,
+ PCH_GBE_MAR_ENTRIES);
+ kfree(mta_list);
+
+ pr_debug("RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x netdev->mc_count : 0x%08x\n",
+ ioread32(&hw->reg->RX_MODE), mc_count);
+}
+
+/**
+ * pch_gbe_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: Network interface device structure
+ * @addr: Pointer to an address structure
+ * Returns
+ * 0: Successfully
+ * -EADDRNOTAVAIL: Failed
+ */
+static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *skaddr = addr;
+ int ret_val;
+
+ if (!is_valid_ether_addr(skaddr->sa_data)) {
+ ret_val = -EADDRNOTAVAIL;
+ } else {
+ memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
+ memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len);
+ pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
+ ret_val = 0;
+ }
+ pr_debug("ret_val : 0x%08x\n", ret_val);
+ pr_debug("dev_addr : %pM\n", netdev->dev_addr);
+ pr_debug("mac_addr : %pM\n", adapter->hw.mac.addr);
+ pr_debug("MAC_ADR1AB reg : 0x%08x 0x%08x\n",
+ ioread32(&adapter->hw.reg->mac_adr[0].high),
+ ioread32(&adapter->hw.reg->mac_adr[0].low));
+ return ret_val;
+}
+
+/**
+ * pch_gbe_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: Network interface device structure
+ * @new_mtu: New value for maximum frame size
+ * Returns
+ * 0: Successfully
+ * -EINVAL: Failed
+ */
+static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ int max_frame;
++ unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
++ int err;
+
+ max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+ if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
+ (max_frame > PCH_GBE_MAX_JUMBO_FRAME_SIZE)) {
+ pr_err("Invalid MTU setting\n");
+ return -EINVAL;
+ }
+ if (max_frame <= PCH_GBE_FRAME_SIZE_2048)
+ adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
+ else if (max_frame <= PCH_GBE_FRAME_SIZE_4096)
+ adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_4096;
+ else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
+ adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
+ else
- if (netif_running(netdev))
- pch_gbe_reinit_locked(adapter);
- else
++ adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE;
+
- cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
++ if (netif_running(netdev)) {
++ pch_gbe_down(adapter);
++ err = pch_gbe_up(adapter);
++ if (err) {
++ adapter->rx_buffer_len = old_rx_buffer_len;
++ pch_gbe_up(adapter);
++ return -ENOMEM;
++ } else {
++ netdev->mtu = new_mtu;
++ adapter->hw.mac.max_frame_size = max_frame;
++ }
++ } else {
+ pch_gbe_reset(adapter);
++ netdev->mtu = new_mtu;
++ adapter->hw.mac.max_frame_size = max_frame;
++ }
+
+ pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n",
+ max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
+ adapter->hw.mac.max_frame_size);
+ return 0;
+}
+
+/**
+ * pch_gbe_set_features - Reset device after features changed
+ * @netdev: Network interface device structure
+ * @features: New features
+ * Returns
+ * 0: HW state updated successfully
+ */
+static int pch_gbe_set_features(struct net_device *netdev, u32 features)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ u32 changed = features ^ netdev->features;
+
+ if (!(changed & NETIF_F_RXCSUM))
+ return 0;
+
+ if (netif_running(netdev))
+ pch_gbe_reinit_locked(adapter);
+ else
+ pch_gbe_reset(adapter);
+
+ return 0;
+}
+
+/**
+ * pch_gbe_ioctl - Controls register through a MII interface
+ * @netdev: Network interface device structure
+ * @ifr: Pointer to ifr structure
+ * @cmd: Control command
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ pr_debug("cmd : 0x%04x\n", cmd);
+
+ return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
+}
+
+/**
+ * pch_gbe_tx_timeout - Respond to a Tx Hang
+ * @netdev: Network interface device structure
+ */
+static void pch_gbe_tx_timeout(struct net_device *netdev)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ /* Do the reset outside of interrupt context */
+ adapter->stats.tx_timeout_count++;
+ schedule_work(&adapter->reset_task);
+}
+
+/**
+ * pch_gbe_napi_poll - NAPI receive and transfer polling callback
+ * @napi: Pointer of polling device struct
+ * @budget: The maximum number of a packet
+ * Returns
+ * false: Exit the polling mode
+ * true: Continue the polling mode
+ */
+static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct pch_gbe_adapter *adapter =
+ container_of(napi, struct pch_gbe_adapter, napi);
+ struct net_device *netdev = adapter->netdev;
+ int work_done = 0;
+ bool poll_end_flag = false;
+ bool cleaned = false;
++ u32 int_en;
+
+ pr_debug("budget : %d\n", budget);
+
+ /* Keep link state information with original netdev */
+ if (!netif_carrier_ok(netdev)) {
+ poll_end_flag = true;
+ } else {
+ pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
++ if (adapter->rx_stop_flag) {
++ adapter->rx_stop_flag = false;
++ pch_gbe_start_receive(&adapter->hw);
++ int_en = ioread32(&adapter->hw.reg->INT_EN);
++ iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR),
++ &adapter->hw.reg->INT_EN);
++ }
++ cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
+
+ if (cleaned)
+ work_done = budget;
+ /* If no Tx and not enough Rx work done,
+ * exit the polling mode
+ */
+ if ((work_done < budget) || !netif_running(netdev))
+ poll_end_flag = true;
+ }
+
+ if (poll_end_flag) {
+ napi_complete(napi);
+ pch_gbe_irq_enable(adapter);
+ }
+
+ pr_debug("poll_end_flag : %d work_done : %d budget : %d\n",
+ poll_end_flag, work_done, budget);
+
+ return work_done;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * pch_gbe_netpoll - Used by things like netconsole to send skbs
+ * @netdev: Network interface device structure
+ */
+static void pch_gbe_netpoll(struct net_device *netdev)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ disable_irq(adapter->pdev->irq);
+ pch_gbe_intr(adapter->pdev->irq, netdev);
+ enable_irq(adapter->pdev->irq);
+}
+#endif
+
+static const struct net_device_ops pch_gbe_netdev_ops = {
+ .ndo_open = pch_gbe_open,
+ .ndo_stop = pch_gbe_stop,
+ .ndo_start_xmit = pch_gbe_xmit_frame,
+ .ndo_get_stats = pch_gbe_get_stats,
+ .ndo_set_mac_address = pch_gbe_set_mac,
+ .ndo_tx_timeout = pch_gbe_tx_timeout,
+ .ndo_change_mtu = pch_gbe_change_mtu,
+ .ndo_set_features = pch_gbe_set_features,
+ .ndo_do_ioctl = pch_gbe_ioctl,
+ .ndo_set_rx_mode = pch_gbe_set_multi,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = pch_gbe_netpoll,
+#endif
+};
+
+static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+ if (netif_running(netdev))
+ pch_gbe_down(adapter);
+ pci_disable_device(pdev);
+ /* Request a slot slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+
+ if (pci_enable_device(pdev)) {
+ pr_err("Cannot re-enable PCI device after reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ pci_set_master(pdev);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pch_gbe_hal_power_up_phy(hw);
+ pch_gbe_reset(adapter);
+ /* Clear wake up status */
+ pch_gbe_mac_set_wol_event(hw, 0);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void pch_gbe_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ if (netif_running(netdev)) {
+ if (pch_gbe_up(adapter)) {
+ pr_debug("can't bring device back up after reset\n");
+ return;
+ }
+ }
+ netif_device_attach(netdev);
+}
+
+static int __pch_gbe_suspend(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 wufc = adapter->wake_up_evt;
+ int retval = 0;
+
+ netif_device_detach(netdev);
+ if (netif_running(netdev))
+ pch_gbe_down(adapter);
+ if (wufc) {
+ pch_gbe_set_multi(netdev);
+ pch_gbe_setup_rctl(adapter);
+ pch_gbe_configure_rx(adapter);
+ pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
+ hw->mac.link_duplex);
+ pch_gbe_set_mode(adapter, hw->mac.link_speed,
+ hw->mac.link_duplex);
+ pch_gbe_mac_set_wol_event(hw, wufc);
+ pci_disable_device(pdev);
+ } else {
+ pch_gbe_hal_power_down_phy(hw);
+ pch_gbe_mac_set_wol_event(hw, wufc);
+ pci_disable_device(pdev);
+ }
+ return retval;
+}
+
+#ifdef CONFIG_PM
+static int pch_gbe_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+
+ return __pch_gbe_suspend(pdev);
+}
+
+static int pch_gbe_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ pr_err("Cannot enable PCI device from suspend\n");
+ return err;
+ }
+ pci_set_master(pdev);
+ pch_gbe_hal_power_up_phy(hw);
+ pch_gbe_reset(adapter);
+ /* Clear wake on lan control and status */
+ pch_gbe_mac_set_wol_event(hw, 0);
+
+ if (netif_running(netdev))
+ pch_gbe_up(adapter);
+ netif_device_attach(netdev);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static void pch_gbe_shutdown(struct pci_dev *pdev)
+{
+ __pch_gbe_suspend(pdev);
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, true);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+static void pch_gbe_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ cancel_work_sync(&adapter->reset_task);
+ unregister_netdev(netdev);
+
+ pch_gbe_hal_phy_hw_reset(&adapter->hw);
+
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+
+ iounmap(adapter->hw.reg);
+ pci_release_regions(pdev);
+ free_netdev(netdev);
+ pci_disable_device(pdev);
+}
+
+static int pch_gbe_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_id)
+{
+ struct net_device *netdev;
+ struct pch_gbe_adapter *adapter;
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
+ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret) {
+ ret = pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "ERR: No usable DMA "
+ "configuration, aborting\n");
+ goto err_disable_device;
+ }
+ }
+ }
+
+ ret = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "ERR: Can't reserve PCI I/O and memory resources\n");
+ goto err_disable_device;
+ }
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter));
+ if (!netdev) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev,
+ "ERR: Can't allocate and set up an Ethernet device\n");
+ goto err_release_pci;
+ }
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ adapter->hw.back = adapter;
+ adapter->hw.reg = pci_iomap(pdev, PCH_GBE_PCI_BAR, 0);
+ if (!adapter->hw.reg) {
+ ret = -EIO;
+ dev_err(&pdev->dev, "Can't ioremap\n");
+ goto err_free_netdev;
+ }
+
+ netdev->netdev_ops = &pch_gbe_netdev_ops;
+ netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
+ netif_napi_add(netdev, &adapter->napi,
+ pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
+ netdev->hw_features = NETIF_F_RXCSUM |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ netdev->features = netdev->hw_features;
+ pch_gbe_set_ethtool_ops(netdev);
+
+ pch_gbe_mac_load_mac_addr(&adapter->hw);
+ pch_gbe_mac_reset_hw(&adapter->hw);
+
+ /* setup the private structure */
+ ret = pch_gbe_sw_init(adapter);
+ if (ret)
+ goto err_iounmap;
+
+ /* Initialize PHY */
+ ret = pch_gbe_init_phy(adapter);
+ if (ret) {
+ dev_err(&pdev->dev, "PHY initialize error\n");
+ goto err_free_adapter;
+ }
+ pch_gbe_hal_get_bus_info(&adapter->hw);
+
+ /* Read the MAC address. and store to the private data */
+ ret = pch_gbe_hal_read_mac_addr(&adapter->hw);
+ if (ret) {
+ dev_err(&pdev->dev, "MAC address Read Error\n");
+ goto err_free_adapter;
+ }
+
+ memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ dev_err(&pdev->dev, "Invalid MAC Address\n");
+ ret = -EIO;
+ goto err_free_adapter;
+ }
+ setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog,
+ (unsigned long)adapter);
+
+ INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
+
+ pch_gbe_check_options(adapter);
+
+ /* initialize the wol settings based on the eeprom settings */
+ adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
+ dev_info(&pdev->dev, "MAC address : %pM\n", netdev->dev_addr);
+
+ /* reset the hardware with the new settings */
+ pch_gbe_reset(adapter);
+
+ ret = register_netdev(netdev);
+ if (ret)
+ goto err_free_adapter;
+ /* tell the stack to leave us alone until pch_gbe_open() is called */
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
+ dev_dbg(&pdev->dev, "OKIsemi(R) PCH Network Connection\n");
+
+ device_set_wakeup_enable(&pdev->dev, 1);
+ return 0;
+
+err_free_adapter:
+ pch_gbe_hal_phy_hw_reset(&adapter->hw);
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+err_iounmap:
+ iounmap(adapter->hw.reg);
+err_free_netdev:
+ free_netdev(netdev);
+err_release_pci:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
+ {.vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
+ .class_mask = (0xFFFF00)
+ },
+ {.vendor = PCI_VENDOR_ID_ROHM,
+ .device = PCI_DEVICE_ID_ROHM_ML7223_GBE,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
+ .class_mask = (0xFFFF00)
+ },
++ {.vendor = PCI_VENDOR_ID_ROHM,
++ .device = PCI_DEVICE_ID_ROHM_ML7831_GBE,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
++ .class_mask = (0xFFFF00)
++ },
+ /* required last entry */
+ {0}
+};
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops pch_gbe_pm_ops = {
+ .suspend = pch_gbe_suspend,
+ .resume = pch_gbe_resume,
+ .freeze = pch_gbe_suspend,
+ .thaw = pch_gbe_resume,
+ .poweroff = pch_gbe_suspend,
+ .restore = pch_gbe_resume,
+};
+#endif
+
+static struct pci_error_handlers pch_gbe_err_handler = {
+ .error_detected = pch_gbe_io_error_detected,
+ .slot_reset = pch_gbe_io_slot_reset,
+ .resume = pch_gbe_io_resume
+};
+
+static struct pci_driver pch_gbe_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = pch_gbe_pcidev_id,
+ .probe = pch_gbe_probe,
+ .remove = pch_gbe_remove,
+#ifdef CONFIG_PM
+ .driver.pm = &pch_gbe_pm_ops,
+#endif
+ .shutdown = pch_gbe_shutdown,
+ .err_handler = &pch_gbe_err_handler
+};
+
+
+static int __init pch_gbe_init_module(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&pch_gbe_driver);
+ if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) {
+ if (copybreak == 0) {
+ pr_info("copybreak disabled\n");
+ } else {
+ pr_info("copybreak enabled for packets <= %u bytes\n",
+ copybreak);
+ }
+ }
+ return ret;
+}
+
+static void __exit pch_gbe_exit_module(void)
+{
+ pci_unregister_driver(&pch_gbe_driver);
+}
+
+module_init(pch_gbe_init_module);
+module_exit(pch_gbe_exit_module);
+
+MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
+MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
+
+module_param(copybreak, uint, 0644);
+MODULE_PARM_DESC(copybreak,
+ "Maximum size of packet that is copied to a new buffer on receive");
+
+/* pch_gbe_main.c */
--- /dev/null
- if (RTL_R8(PHYstatus) & TBI_Enable)
+/*
+ * r8169.c: RealTek 8169/8168/8101 ethernet driver.
+ *
+ * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
+ * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
+ * Copyright (c) a lot of people too. Please respect their work.
+ *
+ * See MAINTAINERS file for support contact information.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <linux/crc32.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <linux/firmware.h>
+#include <linux/pci-aspm.h>
+#include <linux/prefetch.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#define RTL8169_VERSION "2.3LK-NAPI"
+#define MODULENAME "r8169"
+#define PFX MODULENAME ": "
+
+#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
+#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
+#define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
+#define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw"
+#define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw"
+#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
+
+#ifdef RTL8169_DEBUG
+#define assert(expr) \
+ if (!(expr)) { \
+ printk( "Assertion failed! %s,%s,%s,line=%d\n", \
+ #expr,__FILE__,__func__,__LINE__); \
+ }
+#define dprintk(fmt, args...) \
+ do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
+#else
+#define assert(expr) do {} while (0)
+#define dprintk(fmt, args...) do {} while (0)
+#endif /* RTL8169_DEBUG */
+
+#define R8169_MSG_DEFAULT \
+ (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
+
+#define TX_BUFFS_AVAIL(tp) \
+ (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
+
+/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
+ The RTL chips use a 64 element hash table based on the Ethernet CRC. */
+static const int multicast_filter_limit = 32;
+
+/* MAC address length */
+#define MAC_ADDR_LEN 6
+
+#define MAX_READ_REQUEST_SHIFT 12
+#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
+#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
+#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
+
+#define R8169_REGS_SIZE 256
+#define R8169_NAPI_WEIGHT 64
+#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
+#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
+#define RX_BUF_SIZE 1536 /* Rx Buffer size */
+#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
+#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
+
+#define RTL8169_TX_TIMEOUT (6*HZ)
+#define RTL8169_PHY_TIMEOUT (10*HZ)
+
+#define RTL_EEPROM_SIG cpu_to_le32(0x8129)
+#define RTL_EEPROM_SIG_MASK cpu_to_le32(0xffff)
+#define RTL_EEPROM_SIG_ADDR 0x0000
+
+/* write/read MMIO register */
+#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
+#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
+#define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
+#define RTL_R8(reg) readb (ioaddr + (reg))
+#define RTL_R16(reg) readw (ioaddr + (reg))
+#define RTL_R32(reg) readl (ioaddr + (reg))
+
+enum mac_version {
+ RTL_GIGA_MAC_VER_01 = 0,
+ RTL_GIGA_MAC_VER_02,
+ RTL_GIGA_MAC_VER_03,
+ RTL_GIGA_MAC_VER_04,
+ RTL_GIGA_MAC_VER_05,
+ RTL_GIGA_MAC_VER_06,
+ RTL_GIGA_MAC_VER_07,
+ RTL_GIGA_MAC_VER_08,
+ RTL_GIGA_MAC_VER_09,
+ RTL_GIGA_MAC_VER_10,
+ RTL_GIGA_MAC_VER_11,
+ RTL_GIGA_MAC_VER_12,
+ RTL_GIGA_MAC_VER_13,
+ RTL_GIGA_MAC_VER_14,
+ RTL_GIGA_MAC_VER_15,
+ RTL_GIGA_MAC_VER_16,
+ RTL_GIGA_MAC_VER_17,
+ RTL_GIGA_MAC_VER_18,
+ RTL_GIGA_MAC_VER_19,
+ RTL_GIGA_MAC_VER_20,
+ RTL_GIGA_MAC_VER_21,
+ RTL_GIGA_MAC_VER_22,
+ RTL_GIGA_MAC_VER_23,
+ RTL_GIGA_MAC_VER_24,
+ RTL_GIGA_MAC_VER_25,
+ RTL_GIGA_MAC_VER_26,
+ RTL_GIGA_MAC_VER_27,
+ RTL_GIGA_MAC_VER_28,
+ RTL_GIGA_MAC_VER_29,
+ RTL_GIGA_MAC_VER_30,
+ RTL_GIGA_MAC_VER_31,
+ RTL_GIGA_MAC_VER_32,
+ RTL_GIGA_MAC_VER_33,
+ RTL_GIGA_MAC_VER_34,
+ RTL_GIGA_MAC_NONE = 0xff,
+};
+
+enum rtl_tx_desc_version {
+ RTL_TD_0 = 0,
+ RTL_TD_1 = 1,
+};
+
+#define _R(NAME,TD,FW) \
+ { .name = NAME, .txd_version = TD, .fw_name = FW }
+
+static const struct {
+ const char *name;
+ enum rtl_tx_desc_version txd_version;
+ const char *fw_name;
+} rtl_chip_infos[] = {
+ /* PCI devices. */
+ [RTL_GIGA_MAC_VER_01] =
+ _R("RTL8169", RTL_TD_0, NULL),
+ [RTL_GIGA_MAC_VER_02] =
+ _R("RTL8169s", RTL_TD_0, NULL),
+ [RTL_GIGA_MAC_VER_03] =
+ _R("RTL8110s", RTL_TD_0, NULL),
+ [RTL_GIGA_MAC_VER_04] =
+ _R("RTL8169sb/8110sb", RTL_TD_0, NULL),
+ [RTL_GIGA_MAC_VER_05] =
+ _R("RTL8169sc/8110sc", RTL_TD_0, NULL),
+ [RTL_GIGA_MAC_VER_06] =
+ _R("RTL8169sc/8110sc", RTL_TD_0, NULL),
+ /* PCI-E devices. */
+ [RTL_GIGA_MAC_VER_07] =
+ _R("RTL8102e", RTL_TD_1, NULL),
+ [RTL_GIGA_MAC_VER_08] =
+ _R("RTL8102e", RTL_TD_1, NULL),
+ [RTL_GIGA_MAC_VER_09] =
+ _R("RTL8102e", RTL_TD_1, NULL),
+ [RTL_GIGA_MAC_VER_10] =
+ _R("RTL8101e", RTL_TD_0, NULL),
+ [RTL_GIGA_MAC_VER_11] =
+ _R("RTL8168b/8111b", RTL_TD_0, NULL),
+ [RTL_GIGA_MAC_VER_12] =
+ _R("RTL8168b/8111b", RTL_TD_0, NULL),
+ [RTL_GIGA_MAC_VER_13] =
+ _R("RTL8101e", RTL_TD_0, NULL),
+ [RTL_GIGA_MAC_VER_14] =
+ _R("RTL8100e", RTL_TD_0, NULL),
+ [RTL_GIGA_MAC_VER_15] =
+ _R("RTL8100e", RTL_TD_0, NULL),
+ [RTL_GIGA_MAC_VER_16] =
+ _R("RTL8101e", RTL_TD_0, NULL),
+ [RTL_GIGA_MAC_VER_17] =
+ _R("RTL8168b/8111b", RTL_TD_0, NULL),
+ [RTL_GIGA_MAC_VER_18] =
+ _R("RTL8168cp/8111cp", RTL_TD_1, NULL),
+ [RTL_GIGA_MAC_VER_19] =
+ _R("RTL8168c/8111c", RTL_TD_1, NULL),
+ [RTL_GIGA_MAC_VER_20] =
+ _R("RTL8168c/8111c", RTL_TD_1, NULL),
+ [RTL_GIGA_MAC_VER_21] =
+ _R("RTL8168c/8111c", RTL_TD_1, NULL),
+ [RTL_GIGA_MAC_VER_22] =
+ _R("RTL8168c/8111c", RTL_TD_1, NULL),
+ [RTL_GIGA_MAC_VER_23] =
+ _R("RTL8168cp/8111cp", RTL_TD_1, NULL),
+ [RTL_GIGA_MAC_VER_24] =
+ _R("RTL8168cp/8111cp", RTL_TD_1, NULL),
+ [RTL_GIGA_MAC_VER_25] =
+ _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1),
+ [RTL_GIGA_MAC_VER_26] =
+ _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2),
+ [RTL_GIGA_MAC_VER_27] =
+ _R("RTL8168dp/8111dp", RTL_TD_1, NULL),
+ [RTL_GIGA_MAC_VER_28] =
+ _R("RTL8168dp/8111dp", RTL_TD_1, NULL),
+ [RTL_GIGA_MAC_VER_29] =
+ _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1),
+ [RTL_GIGA_MAC_VER_30] =
+ _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1),
+ [RTL_GIGA_MAC_VER_31] =
+ _R("RTL8168dp/8111dp", RTL_TD_1, NULL),
+ [RTL_GIGA_MAC_VER_32] =
+ _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1),
+ [RTL_GIGA_MAC_VER_33] =
+ _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2),
+ [RTL_GIGA_MAC_VER_34] =
+ _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3)
+};
+#undef _R
+
+enum cfg_version {
+ RTL_CFG_0 = 0x00,
+ RTL_CFG_1,
+ RTL_CFG_2
+};
+
+static void rtl_hw_start_8169(struct net_device *);
+static void rtl_hw_start_8168(struct net_device *);
+static void rtl_hw_start_8101(struct net_device *);
+
+static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
+ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
+ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
+ { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
+ { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
+ { PCI_VENDOR_ID_LINKSYS, 0x1032,
+ PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
+ { 0x0001, 0x8168,
+ PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
+ {0,},
+};
+
+MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
+
+static int rx_buf_sz = 16383;
+static int use_dac;
+static struct {
+ u32 msg_enable;
+} debug = { -1 };
+
+enum rtl_registers {
+ MAC0 = 0, /* Ethernet hardware address. */
+ MAC4 = 4,
+ MAR0 = 8, /* Multicast filter. */
+ CounterAddrLow = 0x10,
+ CounterAddrHigh = 0x14,
+ TxDescStartAddrLow = 0x20,
+ TxDescStartAddrHigh = 0x24,
+ TxHDescStartAddrLow = 0x28,
+ TxHDescStartAddrHigh = 0x2c,
+ FLASH = 0x30,
+ ERSR = 0x36,
+ ChipCmd = 0x37,
+ TxPoll = 0x38,
+ IntrMask = 0x3c,
+ IntrStatus = 0x3e,
+
+ TxConfig = 0x40,
+#define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */
+#define TXCFG_EMPTY (1 << 11) /* 8111e-vl */
+
+ RxConfig = 0x44,
+#define RX128_INT_EN (1 << 15) /* 8111c and later */
+#define RX_MULTI_EN (1 << 14) /* 8111c only */
+#define RXCFG_FIFO_SHIFT 13
+ /* No threshold before first PCI xfer */
+#define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
+#define RXCFG_DMA_SHIFT 8
+ /* Unlimited maximum PCI burst. */
+#define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
+
+ RxMissed = 0x4c,
+ Cfg9346 = 0x50,
+ Config0 = 0x51,
+ Config1 = 0x52,
+ Config2 = 0x53,
+ Config3 = 0x54,
+ Config4 = 0x55,
+ Config5 = 0x56,
+ MultiIntr = 0x5c,
+ PHYAR = 0x60,
+ PHYstatus = 0x6c,
+ RxMaxSize = 0xda,
+ CPlusCmd = 0xe0,
+ IntrMitigate = 0xe2,
+ RxDescAddrLow = 0xe4,
+ RxDescAddrHigh = 0xe8,
+ EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */
+
+#define NoEarlyTx 0x3f /* Max value : no early transmit. */
+
+ MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */
+
+#define TxPacketMax (8064 >> 7)
+
+ FuncEvent = 0xf0,
+ FuncEventMask = 0xf4,
+ FuncPresetState = 0xf8,
+ FuncForceEvent = 0xfc,
+};
+
+enum rtl8110_registers {
+ TBICSR = 0x64,
+ TBI_ANAR = 0x68,
+ TBI_LPAR = 0x6a,
+};
+
+enum rtl8168_8101_registers {
+ CSIDR = 0x64,
+ CSIAR = 0x68,
+#define CSIAR_FLAG 0x80000000
+#define CSIAR_WRITE_CMD 0x80000000
+#define CSIAR_BYTE_ENABLE 0x0f
+#define CSIAR_BYTE_ENABLE_SHIFT 12
+#define CSIAR_ADDR_MASK 0x0fff
+ PMCH = 0x6f,
+ EPHYAR = 0x80,
+#define EPHYAR_FLAG 0x80000000
+#define EPHYAR_WRITE_CMD 0x80000000
+#define EPHYAR_REG_MASK 0x1f
+#define EPHYAR_REG_SHIFT 16
+#define EPHYAR_DATA_MASK 0xffff
+ DLLPR = 0xd0,
+#define PFM_EN (1 << 6)
+ DBG_REG = 0xd1,
+#define FIX_NAK_1 (1 << 4)
+#define FIX_NAK_2 (1 << 3)
+ TWSI = 0xd2,
+ MCU = 0xd3,
+#define NOW_IS_OOB (1 << 7)
+#define EN_NDP (1 << 3)
+#define EN_OOB_RESET (1 << 2)
+ EFUSEAR = 0xdc,
+#define EFUSEAR_FLAG 0x80000000
+#define EFUSEAR_WRITE_CMD 0x80000000
+#define EFUSEAR_READ_CMD 0x00000000
+#define EFUSEAR_REG_MASK 0x03ff
+#define EFUSEAR_REG_SHIFT 8
+#define EFUSEAR_DATA_MASK 0xff
+};
+
+enum rtl8168_registers {
+ LED_FREQ = 0x1a,
+ EEE_LED = 0x1b,
+ ERIDR = 0x70,
+ ERIAR = 0x74,
+#define ERIAR_FLAG 0x80000000
+#define ERIAR_WRITE_CMD 0x80000000
+#define ERIAR_READ_CMD 0x00000000
+#define ERIAR_ADDR_BYTE_ALIGN 4
+#define ERIAR_TYPE_SHIFT 16
+#define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT)
+#define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT)
+#define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT)
+#define ERIAR_MASK_SHIFT 12
+#define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
+#define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
+#define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
+ EPHY_RXER_NUM = 0x7c,
+ OCPDR = 0xb0, /* OCP GPHY access */
+#define OCPDR_WRITE_CMD 0x80000000
+#define OCPDR_READ_CMD 0x00000000
+#define OCPDR_REG_MASK 0x7f
+#define OCPDR_GPHY_REG_SHIFT 16
+#define OCPDR_DATA_MASK 0xffff
+ OCPAR = 0xb4,
+#define OCPAR_FLAG 0x80000000
+#define OCPAR_GPHY_WRITE_CMD 0x8000f060
+#define OCPAR_GPHY_READ_CMD 0x0000f060
+ RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */
+ MISC = 0xf0, /* 8168e only. */
+#define TXPLA_RST (1 << 29)
+#define PWM_EN (1 << 22)
+};
+
+enum rtl_register_content {
+ /* InterruptStatusBits */
+ SYSErr = 0x8000,
+ PCSTimeout = 0x4000,
+ SWInt = 0x0100,
+ TxDescUnavail = 0x0080,
+ RxFIFOOver = 0x0040,
+ LinkChg = 0x0020,
+ RxOverflow = 0x0010,
+ TxErr = 0x0008,
+ TxOK = 0x0004,
+ RxErr = 0x0002,
+ RxOK = 0x0001,
+
+ /* RxStatusDesc */
++ RxBOVF = (1 << 24),
+ RxFOVF = (1 << 23),
+ RxRWT = (1 << 22),
+ RxRES = (1 << 21),
+ RxRUNT = (1 << 20),
+ RxCRC = (1 << 19),
+
+ /* ChipCmdBits */
+ StopReq = 0x80,
+ CmdReset = 0x10,
+ CmdRxEnb = 0x08,
+ CmdTxEnb = 0x04,
+ RxBufEmpty = 0x01,
+
+ /* TXPoll register p.5 */
+ HPQ = 0x80, /* Poll cmd on the high prio queue */
+ NPQ = 0x40, /* Poll cmd on the low prio queue */
+ FSWInt = 0x01, /* Forced software interrupt */
+
+ /* Cfg9346Bits */
+ Cfg9346_Lock = 0x00,
+ Cfg9346_Unlock = 0xc0,
+
+ /* rx_mode_bits */
+ AcceptErr = 0x20,
+ AcceptRunt = 0x10,
+ AcceptBroadcast = 0x08,
+ AcceptMulticast = 0x04,
+ AcceptMyPhys = 0x02,
+ AcceptAllPhys = 0x01,
+#define RX_CONFIG_ACCEPT_MASK 0x3f
+
+ /* TxConfigBits */
+ TxInterFrameGapShift = 24,
+ TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
+
+ /* Config1 register p.24 */
+ LEDS1 = (1 << 7),
+ LEDS0 = (1 << 6),
+ MSIEnable = (1 << 5), /* Enable Message Signaled Interrupt */
+ Speed_down = (1 << 4),
+ MEMMAP = (1 << 3),
+ IOMAP = (1 << 2),
+ VPD = (1 << 1),
+ PMEnable = (1 << 0), /* Power Management Enable */
+
+ /* Config2 register p. 25 */
+ PCI_Clock_66MHz = 0x01,
+ PCI_Clock_33MHz = 0x00,
+
+ /* Config3 register p.25 */
+ MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
+ LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
+ Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
+
+ /* Config5 register p.27 */
+ BWF = (1 << 6), /* Accept Broadcast wakeup frame */
+ MWF = (1 << 5), /* Accept Multicast wakeup frame */
+ UWF = (1 << 4), /* Accept Unicast wakeup frame */
+ Spi_en = (1 << 3),
+ LanWake = (1 << 1), /* LanWake enable/disable */
+ PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
+
+ /* TBICSR p.28 */
+ TBIReset = 0x80000000,
+ TBILoopback = 0x40000000,
+ TBINwEnable = 0x20000000,
+ TBINwRestart = 0x10000000,
+ TBILinkOk = 0x02000000,
+ TBINwComplete = 0x01000000,
+
+ /* CPlusCmd p.31 */
+ EnableBist = (1 << 15), // 8168 8101
+ Mac_dbgo_oe = (1 << 14), // 8168 8101
+ Normal_mode = (1 << 13), // unused
+ Force_half_dup = (1 << 12), // 8168 8101
+ Force_rxflow_en = (1 << 11), // 8168 8101
+ Force_txflow_en = (1 << 10), // 8168 8101
+ Cxpl_dbg_sel = (1 << 9), // 8168 8101
+ ASF = (1 << 8), // 8168 8101
+ PktCntrDisable = (1 << 7), // 8168 8101
+ Mac_dbgo_sel = 0x001c, // 8168
+ RxVlan = (1 << 6),
+ RxChkSum = (1 << 5),
+ PCIDAC = (1 << 4),
+ PCIMulRW = (1 << 3),
+ INTT_0 = 0x0000, // 8168
+ INTT_1 = 0x0001, // 8168
+ INTT_2 = 0x0002, // 8168
+ INTT_3 = 0x0003, // 8168
+
+ /* rtl8169_PHYstatus */
+ TBI_Enable = 0x80,
+ TxFlowCtrl = 0x40,
+ RxFlowCtrl = 0x20,
+ _1000bpsF = 0x10,
+ _100bps = 0x08,
+ _10bps = 0x04,
+ LinkStatus = 0x02,
+ FullDup = 0x01,
+
+ /* _TBICSRBit */
+ TBILinkOK = 0x02000000,
+
+ /* DumpCounterCommand */
+ CounterDump = 0x8,
+};
+
+enum rtl_desc_bit {
+ /* First doubleword. */
+ DescOwn = (1 << 31), /* Descriptor is owned by NIC */
+ RingEnd = (1 << 30), /* End of descriptor ring */
+ FirstFrag = (1 << 29), /* First segment of a packet */
+ LastFrag = (1 << 28), /* Final segment of a packet */
+};
+
+/* Generic case. */
+enum rtl_tx_desc_bit {
+ /* First doubleword. */
+ TD_LSO = (1 << 27), /* Large Send Offload */
+#define TD_MSS_MAX 0x07ffu /* MSS value */
+
+ /* Second doubleword. */
+ TxVlanTag = (1 << 17), /* Add VLAN tag */
+};
+
+/* 8169, 8168b and 810x except 8102e. */
+enum rtl_tx_desc_bit_0 {
+ /* First doubleword. */
+#define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */
+ TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */
+ TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */
+ TD0_IP_CS = (1 << 18), /* Calculate IP checksum */
+};
+
+/* 8102e, 8168c and beyond. */
+enum rtl_tx_desc_bit_1 {
+ /* Second doubleword. */
+#define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */
+ TD1_IP_CS = (1 << 29), /* Calculate IP checksum */
+ TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */
+ TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */
+};
+
+static const struct rtl_tx_desc_info {
+ struct {
+ u32 udp;
+ u32 tcp;
+ } checksum;
+ u16 mss_shift;
+ u16 opts_offset;
+} tx_desc_info [] = {
+ [RTL_TD_0] = {
+ .checksum = {
+ .udp = TD0_IP_CS | TD0_UDP_CS,
+ .tcp = TD0_IP_CS | TD0_TCP_CS
+ },
+ .mss_shift = TD0_MSS_SHIFT,
+ .opts_offset = 0
+ },
+ [RTL_TD_1] = {
+ .checksum = {
+ .udp = TD1_IP_CS | TD1_UDP_CS,
+ .tcp = TD1_IP_CS | TD1_TCP_CS
+ },
+ .mss_shift = TD1_MSS_SHIFT,
+ .opts_offset = 1
+ }
+};
+
+enum rtl_rx_desc_bit {
+ /* Rx private */
+ PID1 = (1 << 18), /* Protocol ID bit 1/2 */
+ PID0 = (1 << 17), /* Protocol ID bit 2/2 */
+
+#define RxProtoUDP (PID1)
+#define RxProtoTCP (PID0)
+#define RxProtoIP (PID1 | PID0)
+#define RxProtoMask RxProtoIP
+
+ IPFail = (1 << 16), /* IP checksum failed */
+ UDPFail = (1 << 15), /* UDP/IP checksum failed */
+ TCPFail = (1 << 14), /* TCP/IP checksum failed */
+ RxVlanTag = (1 << 16), /* VLAN tag available */
+};
+
+#define RsvdMask 0x3fffc000
+
+struct TxDesc {
+ __le32 opts1;
+ __le32 opts2;
+ __le64 addr;
+};
+
+struct RxDesc {
+ __le32 opts1;
+ __le32 opts2;
+ __le64 addr;
+};
+
+struct ring_info {
+ struct sk_buff *skb;
+ u32 len;
+ u8 __pad[sizeof(void *) - sizeof(u32)];
+};
+
+enum features {
+ RTL_FEATURE_WOL = (1 << 0),
+ RTL_FEATURE_MSI = (1 << 1),
+ RTL_FEATURE_GMII = (1 << 2),
+};
+
+struct rtl8169_counters {
+ __le64 tx_packets;
+ __le64 rx_packets;
+ __le64 tx_errors;
+ __le32 rx_errors;
+ __le16 rx_missed;
+ __le16 align_errors;
+ __le32 tx_one_collision;
+ __le32 tx_multi_collision;
+ __le64 rx_unicast;
+ __le64 rx_broadcast;
+ __le32 rx_multicast;
+ __le16 tx_aborted;
+ __le16 tx_underun;
+};
+
+struct rtl8169_private {
+ void __iomem *mmio_addr; /* memory map physical address */
+ struct pci_dev *pci_dev;
+ struct net_device *dev;
+ struct napi_struct napi;
+ spinlock_t lock;
+ u32 msg_enable;
+ u16 txd_version;
+ u16 mac_version;
+ u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
+ u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
+ u32 dirty_rx;
+ u32 dirty_tx;
+ struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
+ struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
+ dma_addr_t TxPhyAddr;
+ dma_addr_t RxPhyAddr;
+ void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
+ struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
+ struct timer_list timer;
+ u16 cp_cmd;
+ u16 intr_event;
+ u16 napi_event;
+ u16 intr_mask;
+
+ struct mdio_ops {
+ void (*write)(void __iomem *, int, int);
+ int (*read)(void __iomem *, int);
+ } mdio_ops;
+
+ struct pll_power_ops {
+ void (*down)(struct rtl8169_private *);
+ void (*up)(struct rtl8169_private *);
+ } pll_power_ops;
+
+ int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
+ int (*get_settings)(struct net_device *, struct ethtool_cmd *);
+ void (*phy_reset_enable)(struct rtl8169_private *tp);
+ void (*hw_start)(struct net_device *);
+ unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
+ unsigned int (*link_ok)(void __iomem *);
+ int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
+ struct delayed_work task;
+ unsigned features;
+
+ struct mii_if_info mii;
+ struct rtl8169_counters counters;
+ u32 saved_wolopts;
++ u32 opts1_mask;
+
+ struct rtl_fw {
+ const struct firmware *fw;
+
+#define RTL_VER_SIZE 32
+
+ char version[RTL_VER_SIZE];
+
+ struct rtl_fw_phy_action {
+ __le32 *code;
+ size_t size;
+ } phy_action;
+ } *rtl_fw;
+#define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
+};
+
+MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
+MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
+module_param(use_dac, int, 0);
+MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
+module_param_named(debug, debug.msg_enable, int, 0);
+MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(RTL8169_VERSION);
+MODULE_FIRMWARE(FIRMWARE_8168D_1);
+MODULE_FIRMWARE(FIRMWARE_8168D_2);
+MODULE_FIRMWARE(FIRMWARE_8168E_1);
+MODULE_FIRMWARE(FIRMWARE_8168E_2);
++MODULE_FIRMWARE(FIRMWARE_8168E_3);
+MODULE_FIRMWARE(FIRMWARE_8105E_1);
+
+static int rtl8169_open(struct net_device *dev);
+static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance);
+static int rtl8169_init_ring(struct net_device *dev);
+static void rtl_hw_start(struct net_device *dev);
+static int rtl8169_close(struct net_device *dev);
+static void rtl_set_rx_mode(struct net_device *dev);
+static void rtl8169_tx_timeout(struct net_device *dev);
+static struct net_device_stats *rtl8169_get_stats(struct net_device *dev);
+static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *,
+ void __iomem *, u32 budget);
+static int rtl8169_change_mtu(struct net_device *dev, int new_mtu);
+static void rtl8169_down(struct net_device *dev);
+static void rtl8169_rx_clear(struct rtl8169_private *tp);
+static int rtl8169_poll(struct napi_struct *napi, int budget);
+
+static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ int i;
+
+ RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
+ for (i = 0; i < 20; i++) {
+ udelay(100);
+ if (RTL_R32(OCPAR) & OCPAR_FLAG)
+ break;
+ }
+ return RTL_R32(OCPDR);
+}
+
+static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ int i;
+
+ RTL_W32(OCPDR, data);
+ RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
+ for (i = 0; i < 20; i++) {
+ udelay(100);
+ if ((RTL_R32(OCPAR) & OCPAR_FLAG) == 0)
+ break;
+ }
+}
+
+static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ int i;
+
+ RTL_W8(ERIDR, cmd);
+ RTL_W32(ERIAR, 0x800010e8);
+ msleep(2);
+ for (i = 0; i < 5; i++) {
+ udelay(100);
+ if (!(RTL_R32(ERIAR) & ERIAR_FLAG))
+ break;
+ }
+
+ ocp_write(tp, 0x1, 0x30, 0x00000001);
+}
+
+#define OOB_CMD_RESET 0x00
+#define OOB_CMD_DRIVER_START 0x05
+#define OOB_CMD_DRIVER_STOP 0x06
+
+static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
+{
+ return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
+}
+
+static void rtl8168_driver_start(struct rtl8169_private *tp)
+{
+ u16 reg;
+ int i;
+
+ rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
+
+ reg = rtl8168_get_ocp_reg(tp);
+
+ for (i = 0; i < 10; i++) {
+ msleep(10);
+ if (ocp_read(tp, 0x0f, reg) & 0x00000800)
+ break;
+ }
+}
+
+static void rtl8168_driver_stop(struct rtl8169_private *tp)
+{
+ u16 reg;
+ int i;
+
+ rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
+
+ reg = rtl8168_get_ocp_reg(tp);
+
+ for (i = 0; i < 10; i++) {
+ msleep(10);
+ if ((ocp_read(tp, 0x0f, reg) & 0x00000800) == 0)
+ break;
+ }
+}
+
+static int r8168dp_check_dash(struct rtl8169_private *tp)
+{
+ u16 reg = rtl8168_get_ocp_reg(tp);
+
+ return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
+}
+
+static void r8169_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
+{
+ int i;
+
+ RTL_W32(PHYAR, 0x80000000 | (reg_addr & 0x1f) << 16 | (value & 0xffff));
+
+ for (i = 20; i > 0; i--) {
+ /*
+ * Check if the RTL8169 has completed writing to the specified
+ * MII register.
+ */
+ if (!(RTL_R32(PHYAR) & 0x80000000))
+ break;
+ udelay(25);
+ }
+ /*
+ * According to hardware specs a 20us delay is required after write
+ * complete indication, but before sending next command.
+ */
+ udelay(20);
+}
+
+static int r8169_mdio_read(void __iomem *ioaddr, int reg_addr)
+{
+ int i, value = -1;
+
+ RTL_W32(PHYAR, 0x0 | (reg_addr & 0x1f) << 16);
+
+ for (i = 20; i > 0; i--) {
+ /*
+ * Check if the RTL8169 has completed retrieving data from
+ * the specified MII register.
+ */
+ if (RTL_R32(PHYAR) & 0x80000000) {
+ value = RTL_R32(PHYAR) & 0xffff;
+ break;
+ }
+ udelay(25);
+ }
+ /*
+ * According to hardware specs a 20us delay is required after read
+ * complete indication, but before sending next command.
+ */
+ udelay(20);
+
+ return value;
+}
+
+static void r8168dp_1_mdio_access(void __iomem *ioaddr, int reg_addr, u32 data)
+{
+ int i;
+
+ RTL_W32(OCPDR, data |
+ ((reg_addr & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
+ RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
+ RTL_W32(EPHY_RXER_NUM, 0);
+
+ for (i = 0; i < 100; i++) {
+ mdelay(1);
+ if (!(RTL_R32(OCPAR) & OCPAR_FLAG))
+ break;
+ }
+}
+
+static void r8168dp_1_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
+{
+ r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_WRITE_CMD |
+ (value & OCPDR_DATA_MASK));
+}
+
+static int r8168dp_1_mdio_read(void __iomem *ioaddr, int reg_addr)
+{
+ int i;
+
+ r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_READ_CMD);
+
+ mdelay(1);
+ RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
+ RTL_W32(EPHY_RXER_NUM, 0);
+
+ for (i = 0; i < 100; i++) {
+ mdelay(1);
+ if (RTL_R32(OCPAR) & OCPAR_FLAG)
+ break;
+ }
+
+ return RTL_R32(OCPDR) & OCPDR_DATA_MASK;
+}
+
+#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
+
+static void r8168dp_2_mdio_start(void __iomem *ioaddr)
+{
+ RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
+}
+
+static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
+{
+ RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
+}
+
+static void r8168dp_2_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
+{
+ r8168dp_2_mdio_start(ioaddr);
+
+ r8169_mdio_write(ioaddr, reg_addr, value);
+
+ r8168dp_2_mdio_stop(ioaddr);
+}
+
+static int r8168dp_2_mdio_read(void __iomem *ioaddr, int reg_addr)
+{
+ int value;
+
+ r8168dp_2_mdio_start(ioaddr);
+
+ value = r8169_mdio_read(ioaddr, reg_addr);
+
+ r8168dp_2_mdio_stop(ioaddr);
+
+ return value;
+}
+
+static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
+{
+ tp->mdio_ops.write(tp->mmio_addr, location, val);
+}
+
+static int rtl_readphy(struct rtl8169_private *tp, int location)
+{
+ return tp->mdio_ops.read(tp->mmio_addr, location);
+}
+
+static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
+{
+ rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
+}
+
+static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
+{
+ int val;
+
+ val = rtl_readphy(tp, reg_addr);
+ rtl_writephy(tp, reg_addr, (val | p) & ~m);
+}
+
+static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
+ int val)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ rtl_writephy(tp, location, val);
+}
+
+static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ return rtl_readphy(tp, location);
+}
+
+static void rtl_ephy_write(void __iomem *ioaddr, int reg_addr, int value)
+{
+ unsigned int i;
+
+ RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
+ (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
+
+ for (i = 0; i < 100; i++) {
+ if (!(RTL_R32(EPHYAR) & EPHYAR_FLAG))
+ break;
+ udelay(10);
+ }
+}
+
+static u16 rtl_ephy_read(void __iomem *ioaddr, int reg_addr)
+{
+ u16 value = 0xffff;
+ unsigned int i;
+
+ RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
+
+ for (i = 0; i < 100; i++) {
+ if (RTL_R32(EPHYAR) & EPHYAR_FLAG) {
+ value = RTL_R32(EPHYAR) & EPHYAR_DATA_MASK;
+ break;
+ }
+ udelay(10);
+ }
+
+ return value;
+}
+
+static void rtl_csi_write(void __iomem *ioaddr, int addr, int value)
+{
+ unsigned int i;
+
+ RTL_W32(CSIDR, value);
+ RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
+ CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
+
+ for (i = 0; i < 100; i++) {
+ if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
+ break;
+ udelay(10);
+ }
+}
+
+static u32 rtl_csi_read(void __iomem *ioaddr, int addr)
+{
+ u32 value = ~0x00;
+ unsigned int i;
+
+ RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
+ CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
+
+ for (i = 0; i < 100; i++) {
+ if (RTL_R32(CSIAR) & CSIAR_FLAG) {
+ value = RTL_R32(CSIDR);
+ break;
+ }
+ udelay(10);
+ }
+
+ return value;
+}
+
+static
+void rtl_eri_write(void __iomem *ioaddr, int addr, u32 mask, u32 val, int type)
+{
+ unsigned int i;
+
+ BUG_ON((addr & 3) || (mask == 0));
+ RTL_W32(ERIDR, val);
+ RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
+
+ for (i = 0; i < 100; i++) {
+ if (!(RTL_R32(ERIAR) & ERIAR_FLAG))
+ break;
+ udelay(100);
+ }
+}
+
+static u32 rtl_eri_read(void __iomem *ioaddr, int addr, int type)
+{
+ u32 value = ~0x00;
+ unsigned int i;
+
+ RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
+
+ for (i = 0; i < 100; i++) {
+ if (RTL_R32(ERIAR) & ERIAR_FLAG) {
+ value = RTL_R32(ERIDR);
+ break;
+ }
+ udelay(100);
+ }
+
+ return value;
+}
+
+static void
+rtl_w1w0_eri(void __iomem *ioaddr, int addr, u32 mask, u32 p, u32 m, int type)
+{
+ u32 val;
+
+ val = rtl_eri_read(ioaddr, addr, type);
+ rtl_eri_write(ioaddr, addr, mask, (val & ~m) | p, type);
+}
+
+struct exgmac_reg {
+ u16 addr;
+ u16 mask;
+ u32 val;
+};
+
+static void rtl_write_exgmac_batch(void __iomem *ioaddr,
+ const struct exgmac_reg *r, int len)
+{
+ while (len-- > 0) {
+ rtl_eri_write(ioaddr, r->addr, r->mask, r->val, ERIAR_EXGMAC);
+ r++;
+ }
+}
+
+static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
+{
+ u8 value = 0xff;
+ unsigned int i;
+
+ RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
+
+ for (i = 0; i < 300; i++) {
+ if (RTL_R32(EFUSEAR) & EFUSEAR_FLAG) {
+ value = RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK;
+ break;
+ }
+ udelay(100);
+ }
+
+ return value;
+}
+
+static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
+{
+ RTL_W16(IntrMask, 0x0000);
+
+ RTL_W16(IntrStatus, 0xffff);
+}
+
+static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R32(TBICSR) & TBIReset;
+}
+
+static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
+{
+ return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
+}
+
+static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
+{
+ return RTL_R32(TBICSR) & TBILinkOk;
+}
+
+static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
+{
+ return RTL_R8(PHYstatus) & LinkStatus;
+}
+
+static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
+}
+
+static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
+{
+ unsigned int val;
+
+ val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
+ rtl_writephy(tp, MII_BMCR, val & 0xffff);
+}
+
+static void rtl_link_chg_patch(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ struct net_device *dev = tp->dev;
+
+ if (!netif_running(dev))
+ return;
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
+ if (RTL_R8(PHYstatus) & _1000bpsF) {
+ rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
+ 0x00000011, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
+ 0x00000005, ERIAR_EXGMAC);
+ } else if (RTL_R8(PHYstatus) & _100bps) {
+ rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
+ 0x0000001f, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
+ 0x00000005, ERIAR_EXGMAC);
+ } else {
+ rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
+ 0x0000001f, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
+ 0x0000003f, ERIAR_EXGMAC);
+ }
+ /* Reset packet filter */
+ rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
+ ERIAR_EXGMAC);
+ rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
+ ERIAR_EXGMAC);
+ }
+}
+
+static void __rtl8169_check_link_status(struct net_device *dev,
+ struct rtl8169_private *tp,
+ void __iomem *ioaddr, bool pm)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&tp->lock, flags);
+ if (tp->link_ok(ioaddr)) {
+ rtl_link_chg_patch(tp);
+ /* This is to cancel a scheduled suspend if there's one. */
+ if (pm)
+ pm_request_resume(&tp->pci_dev->dev);
+ netif_carrier_on(dev);
+ if (net_ratelimit())
+ netif_info(tp, ifup, dev, "link up\n");
+ } else {
+ netif_carrier_off(dev);
+ netif_info(tp, ifdown, dev, "link down\n");
+ if (pm)
+ pm_schedule_suspend(&tp->pci_dev->dev, 100);
+ }
+ spin_unlock_irqrestore(&tp->lock, flags);
+}
+
+static void rtl8169_check_link_status(struct net_device *dev,
+ struct rtl8169_private *tp,
+ void __iomem *ioaddr)
+{
+ __rtl8169_check_link_status(dev, tp, ioaddr, false);
+}
+
+#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
+
+static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ u8 options;
+ u32 wolopts = 0;
+
+ options = RTL_R8(Config1);
+ if (!(options & PMEnable))
+ return 0;
+
+ options = RTL_R8(Config3);
+ if (options & LinkUp)
+ wolopts |= WAKE_PHY;
+ if (options & MagicPacket)
+ wolopts |= WAKE_MAGIC;
+
+ options = RTL_R8(Config5);
+ if (options & UWF)
+ wolopts |= WAKE_UCAST;
+ if (options & BWF)
+ wolopts |= WAKE_BCAST;
+ if (options & MWF)
+ wolopts |= WAKE_MCAST;
+
+ return wolopts;
+}
+
+static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ spin_lock_irq(&tp->lock);
+
+ wol->supported = WAKE_ANY;
+ wol->wolopts = __rtl8169_get_wol(tp);
+
+ spin_unlock_irq(&tp->lock);
+}
+
+static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ unsigned int i;
+ static const struct {
+ u32 opt;
+ u16 reg;
+ u8 mask;
+ } cfg[] = {
+ { WAKE_ANY, Config1, PMEnable },
+ { WAKE_PHY, Config3, LinkUp },
+ { WAKE_MAGIC, Config3, MagicPacket },
+ { WAKE_UCAST, Config5, UWF },
+ { WAKE_BCAST, Config5, BWF },
+ { WAKE_MCAST, Config5, MWF },
+ { WAKE_ANY, Config5, LanWake }
+ };
+
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+
+ for (i = 0; i < ARRAY_SIZE(cfg); i++) {
+ u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
+ if (wolopts & cfg[i].opt)
+ options |= cfg[i].mask;
+ RTL_W8(cfg[i].reg, options);
+ }
+
+ RTL_W8(Cfg9346, Cfg9346_Lock);
+}
+
+static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ spin_lock_irq(&tp->lock);
+
+ if (wol->wolopts)
+ tp->features |= RTL_FEATURE_WOL;
+ else
+ tp->features &= ~RTL_FEATURE_WOL;
+ __rtl8169_set_wol(tp, wol->wolopts);
+ spin_unlock_irq(&tp->lock);
+
+ device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
+
+ return 0;
+}
+
+static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp)
+{
+ return rtl_chip_infos[tp->mac_version].fw_name;
+}
+
+static void rtl8169_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ struct rtl_fw *rtl_fw = tp->rtl_fw;
+
+ strcpy(info->driver, MODULENAME);
+ strcpy(info->version, RTL8169_VERSION);
+ strcpy(info->bus_info, pci_name(tp->pci_dev));
+ BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
+ strcpy(info->fw_version, IS_ERR_OR_NULL(rtl_fw) ? "N/A" :
+ rtl_fw->version);
+}
+
+static int rtl8169_get_regs_len(struct net_device *dev)
+{
+ return R8169_REGS_SIZE;
+}
+
+static int rtl8169_set_speed_tbi(struct net_device *dev,
+ u8 autoneg, u16 speed, u8 duplex, u32 ignored)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ int ret = 0;
+ u32 reg;
+
+ reg = RTL_R32(TBICSR);
+ if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
+ (duplex == DUPLEX_FULL)) {
+ RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
+ } else if (autoneg == AUTONEG_ENABLE)
+ RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
+ else {
+ netif_warn(tp, link, dev,
+ "incorrect speed setting refused in TBI mode\n");
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int rtl8169_set_speed_xmii(struct net_device *dev,
+ u8 autoneg, u16 speed, u8 duplex, u32 adv)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ int giga_ctrl, bmcr;
+ int rc = -EINVAL;
+
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ if (autoneg == AUTONEG_ENABLE) {
+ int auto_nego;
+
+ auto_nego = rtl_readphy(tp, MII_ADVERTISE);
+ auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
+ ADVERTISE_100HALF | ADVERTISE_100FULL);
+
+ if (adv & ADVERTISED_10baseT_Half)
+ auto_nego |= ADVERTISE_10HALF;
+ if (adv & ADVERTISED_10baseT_Full)
+ auto_nego |= ADVERTISE_10FULL;
+ if (adv & ADVERTISED_100baseT_Half)
+ auto_nego |= ADVERTISE_100HALF;
+ if (adv & ADVERTISED_100baseT_Full)
+ auto_nego |= ADVERTISE_100FULL;
+
+ auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+
+ giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
+ giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
+
+ /* The 8100e/8101e/8102e do Fast Ethernet only. */
+ if (tp->mii.supports_gmii) {
+ if (adv & ADVERTISED_1000baseT_Half)
+ giga_ctrl |= ADVERTISE_1000HALF;
+ if (adv & ADVERTISED_1000baseT_Full)
+ giga_ctrl |= ADVERTISE_1000FULL;
+ } else if (adv & (ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full)) {
+ netif_info(tp, link, dev,
+ "PHY does not support 1000Mbps\n");
+ goto out;
+ }
+
+ bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
+
+ rtl_writephy(tp, MII_ADVERTISE, auto_nego);
+ rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
+ } else {
+ giga_ctrl = 0;
+
+ if (speed == SPEED_10)
+ bmcr = 0;
+ else if (speed == SPEED_100)
+ bmcr = BMCR_SPEED100;
+ else
+ goto out;
+
+ if (duplex == DUPLEX_FULL)
+ bmcr |= BMCR_FULLDPLX;
+ }
+
+ rtl_writephy(tp, MII_BMCR, bmcr);
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_03) {
+ if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
+ rtl_writephy(tp, 0x17, 0x2138);
+ rtl_writephy(tp, 0x0e, 0x0260);
+ } else {
+ rtl_writephy(tp, 0x17, 0x2108);
+ rtl_writephy(tp, 0x0e, 0x0000);
+ }
+ }
+
+ rc = 0;
+out:
+ return rc;
+}
+
+static int rtl8169_set_speed(struct net_device *dev,
+ u8 autoneg, u16 speed, u8 duplex, u32 advertising)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ int ret;
+
+ ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
+ if (ret < 0)
+ goto out;
+
+ if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
+ (advertising & ADVERTISED_1000baseT_Full)) {
+ mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
+ }
+out:
+ return ret;
+}
+
+static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ unsigned long flags;
+ int ret;
+
+ del_timer_sync(&tp->timer);
+
+ spin_lock_irqsave(&tp->lock, flags);
+ ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
+ cmd->duplex, cmd->advertising);
+ spin_unlock_irqrestore(&tp->lock, flags);
+
+ return ret;
+}
+
+static u32 rtl8169_fix_features(struct net_device *dev, u32 features)
+{
+ if (dev->mtu > TD_MSS_MAX)
+ features &= ~NETIF_F_ALL_TSO;
+
+ return features;
+}
+
+static int rtl8169_set_features(struct net_device *dev, u32 features)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tp->lock, flags);
+
+ if (features & NETIF_F_RXCSUM)
+ tp->cp_cmd |= RxChkSum;
+ else
+ tp->cp_cmd &= ~RxChkSum;
+
+ if (dev->features & NETIF_F_HW_VLAN_RX)
+ tp->cp_cmd |= RxVlan;
+ else
+ tp->cp_cmd &= ~RxVlan;
+
+ RTL_W16(CPlusCmd, tp->cp_cmd);
+ RTL_R16(CPlusCmd);
+
+ spin_unlock_irqrestore(&tp->lock, flags);
+
+ return 0;
+}
+
+static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
+ struct sk_buff *skb)
+{
+ return (vlan_tx_tag_present(skb)) ?
+ TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
+}
+
+static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
+{
+ u32 opts2 = le32_to_cpu(desc->opts2);
+
+ if (opts2 & RxVlanTag)
+ __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
+
+ desc->opts2 = 0;
+}
+
+static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ u32 status;
+
+ cmd->supported =
+ SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
+ cmd->port = PORT_FIBRE;
+ cmd->transceiver = XCVR_INTERNAL;
+
+ status = RTL_R32(TBICSR);
+ cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
+ cmd->autoneg = !!(status & TBINwEnable);
+
+ ethtool_cmd_speed_set(cmd, SPEED_1000);
+ cmd->duplex = DUPLEX_FULL; /* Always set */
+
+ return 0;
+}
+
+static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ return mii_ethtool_gset(&tp->mii, cmd);
+}
+
+static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&tp->lock, flags);
+
+ rc = tp->get_settings(dev, cmd);
+
+ spin_unlock_irqrestore(&tp->lock, flags);
+ return rc;
+}
+
+static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *p)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ unsigned long flags;
+
+ if (regs->len > R8169_REGS_SIZE)
+ regs->len = R8169_REGS_SIZE;
+
+ spin_lock_irqsave(&tp->lock, flags);
+ memcpy_fromio(p, tp->mmio_addr, regs->len);
+ spin_unlock_irqrestore(&tp->lock, flags);
+}
+
+static u32 rtl8169_get_msglevel(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ return tp->msg_enable;
+}
+
+static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ tp->msg_enable = value;
+}
+
+static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
+ "tx_packets",
+ "rx_packets",
+ "tx_errors",
+ "rx_errors",
+ "rx_missed",
+ "align_errors",
+ "tx_single_collisions",
+ "tx_multi_collisions",
+ "unicast",
+ "broadcast",
+ "multicast",
+ "tx_aborted",
+ "tx_underrun",
+};
+
+static int rtl8169_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(rtl8169_gstrings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void rtl8169_update_counters(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ struct device *d = &tp->pci_dev->dev;
+ struct rtl8169_counters *counters;
+ dma_addr_t paddr;
+ u32 cmd;
+ int wait = 1000;
+
+ /*
+ * Some chips are unable to dump tally counters when the receiver
+ * is disabled.
+ */
+ if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
+ return;
+
+ counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL);
+ if (!counters)
+ return;
+
+ RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
+ cmd = (u64)paddr & DMA_BIT_MASK(32);
+ RTL_W32(CounterAddrLow, cmd);
+ RTL_W32(CounterAddrLow, cmd | CounterDump);
+
+ while (wait--) {
+ if ((RTL_R32(CounterAddrLow) & CounterDump) == 0) {
+ memcpy(&tp->counters, counters, sizeof(*counters));
+ break;
+ }
+ udelay(10);
+ }
+
+ RTL_W32(CounterAddrLow, 0);
+ RTL_W32(CounterAddrHigh, 0);
+
+ dma_free_coherent(d, sizeof(*counters), counters, paddr);
+}
+
+static void rtl8169_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ ASSERT_RTNL();
+
+ rtl8169_update_counters(dev);
+
+ data[0] = le64_to_cpu(tp->counters.tx_packets);
+ data[1] = le64_to_cpu(tp->counters.rx_packets);
+ data[2] = le64_to_cpu(tp->counters.tx_errors);
+ data[3] = le32_to_cpu(tp->counters.rx_errors);
+ data[4] = le16_to_cpu(tp->counters.rx_missed);
+ data[5] = le16_to_cpu(tp->counters.align_errors);
+ data[6] = le32_to_cpu(tp->counters.tx_one_collision);
+ data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
+ data[8] = le64_to_cpu(tp->counters.rx_unicast);
+ data[9] = le64_to_cpu(tp->counters.rx_broadcast);
+ data[10] = le32_to_cpu(tp->counters.rx_multicast);
+ data[11] = le16_to_cpu(tp->counters.tx_aborted);
+ data[12] = le16_to_cpu(tp->counters.tx_underun);
+}
+
+static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ switch(stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
+ break;
+ }
+}
+
+static const struct ethtool_ops rtl8169_ethtool_ops = {
+ .get_drvinfo = rtl8169_get_drvinfo,
+ .get_regs_len = rtl8169_get_regs_len,
+ .get_link = ethtool_op_get_link,
+ .get_settings = rtl8169_get_settings,
+ .set_settings = rtl8169_set_settings,
+ .get_msglevel = rtl8169_get_msglevel,
+ .set_msglevel = rtl8169_set_msglevel,
+ .get_regs = rtl8169_get_regs,
+ .get_wol = rtl8169_get_wol,
+ .set_wol = rtl8169_set_wol,
+ .get_strings = rtl8169_get_strings,
+ .get_sset_count = rtl8169_get_sset_count,
+ .get_ethtool_stats = rtl8169_get_ethtool_stats,
+};
+
+static void rtl8169_get_mac_version(struct rtl8169_private *tp,
+ struct net_device *dev, u8 default_version)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ /*
+ * The driver currently handles the 8168Bf and the 8168Be identically
+ * but they can be identified more specifically through the test below
+ * if needed:
+ *
+ * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
+ *
+ * Same thing for the 8101Eb and the 8101Ec:
+ *
+ * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
+ */
+ static const struct rtl_mac_info {
+ u32 mask;
+ u32 val;
+ int mac_version;
+ } mac_info[] = {
+ /* 8168E family. */
+ { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 },
+ { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 },
+ { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 },
+ { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 },
+
+ /* 8168D family. */
+ { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 },
+ { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
+ { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
+
+ /* 8168DP family. */
+ { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 },
+ { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 },
+ { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 },
+
+ /* 8168C family. */
+ { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 },
+ { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 },
+ { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 },
+ { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 },
+ { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 },
+ { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 },
+ { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 },
+ { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22 },
+ { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 },
+
+ /* 8168B family. */
+ { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 },
+ { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 },
+ { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 },
+ { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
+
+ /* 8101 family. */
+ { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
+ { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
+ { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
+ { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
+ { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 },
+ { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 },
+ { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
+ { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 },
+ { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 },
+ { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 },
+ { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 },
+ { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 },
+ { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 },
+ { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 },
+ { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 },
+ { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 },
+ /* FIXME: where did these entries come from ? -- FR */
+ { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 },
+ { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 },
+
+ /* 8110 family. */
+ { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 },
+ { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 },
+ { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 },
+ { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 },
+ { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 },
+ { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 },
+
+ /* Catch-all */
+ { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
+ };
+ const struct rtl_mac_info *p = mac_info;
+ u32 reg;
+
+ reg = RTL_R32(TxConfig);
+ while ((reg & p->mask) != p->val)
+ p++;
+ tp->mac_version = p->mac_version;
+
+ if (tp->mac_version == RTL_GIGA_MAC_NONE) {
+ netif_notice(tp, probe, dev,
+ "unknown MAC, using family default\n");
+ tp->mac_version = default_version;
+ }
+}
+
+static void rtl8169_print_mac_version(struct rtl8169_private *tp)
+{
+ dprintk("mac_version = 0x%02x\n", tp->mac_version);
+}
+
+struct phy_reg {
+ u16 reg;
+ u16 val;
+};
+
+static void rtl_writephy_batch(struct rtl8169_private *tp,
+ const struct phy_reg *regs, int len)
+{
+ while (len-- > 0) {
+ rtl_writephy(tp, regs->reg, regs->val);
+ regs++;
+ }
+}
+
+#define PHY_READ 0x00000000
+#define PHY_DATA_OR 0x10000000
+#define PHY_DATA_AND 0x20000000
+#define PHY_BJMPN 0x30000000
+#define PHY_READ_EFUSE 0x40000000
+#define PHY_READ_MAC_BYTE 0x50000000
+#define PHY_WRITE_MAC_BYTE 0x60000000
+#define PHY_CLEAR_READCOUNT 0x70000000
+#define PHY_WRITE 0x80000000
+#define PHY_READCOUNT_EQ_SKIP 0x90000000
+#define PHY_COMP_EQ_SKIPN 0xa0000000
+#define PHY_COMP_NEQ_SKIPN 0xb0000000
+#define PHY_WRITE_PREVIOUS 0xc0000000
+#define PHY_SKIPN 0xd0000000
+#define PHY_DELAY_MS 0xe0000000
+#define PHY_WRITE_ERI_WORD 0xf0000000
+
+struct fw_info {
+ u32 magic;
+ char version[RTL_VER_SIZE];
+ __le32 fw_start;
+ __le32 fw_len;
+ u8 chksum;
+} __packed;
+
+#define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
+
+static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
+{
+ const struct firmware *fw = rtl_fw->fw;
+ struct fw_info *fw_info = (struct fw_info *)fw->data;
+ struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
+ char *version = rtl_fw->version;
+ bool rc = false;
+
+ if (fw->size < FW_OPCODE_SIZE)
+ goto out;
+
+ if (!fw_info->magic) {
+ size_t i, size, start;
+ u8 checksum = 0;
+
+ if (fw->size < sizeof(*fw_info))
+ goto out;
+
+ for (i = 0; i < fw->size; i++)
+ checksum += fw->data[i];
+ if (checksum != 0)
+ goto out;
+
+ start = le32_to_cpu(fw_info->fw_start);
+ if (start > fw->size)
+ goto out;
+
+ size = le32_to_cpu(fw_info->fw_len);
+ if (size > (fw->size - start) / FW_OPCODE_SIZE)
+ goto out;
+
+ memcpy(version, fw_info->version, RTL_VER_SIZE);
+
+ pa->code = (__le32 *)(fw->data + start);
+ pa->size = size;
+ } else {
+ if (fw->size % FW_OPCODE_SIZE)
+ goto out;
+
+ strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE);
+
+ pa->code = (__le32 *)fw->data;
+ pa->size = fw->size / FW_OPCODE_SIZE;
+ }
+ version[RTL_VER_SIZE - 1] = 0;
+
+ rc = true;
+out:
+ return rc;
+}
+
+static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
+ struct rtl_fw_phy_action *pa)
+{
+ bool rc = false;
+ size_t index;
+
+ for (index = 0; index < pa->size; index++) {
+ u32 action = le32_to_cpu(pa->code[index]);
+ u32 regno = (action & 0x0fff0000) >> 16;
+
+ switch(action & 0xf0000000) {
+ case PHY_READ:
+ case PHY_DATA_OR:
+ case PHY_DATA_AND:
+ case PHY_READ_EFUSE:
+ case PHY_CLEAR_READCOUNT:
+ case PHY_WRITE:
+ case PHY_WRITE_PREVIOUS:
+ case PHY_DELAY_MS:
+ break;
+
+ case PHY_BJMPN:
+ if (regno > index) {
+ netif_err(tp, ifup, tp->dev,
+ "Out of range of firmware\n");
+ goto out;
+ }
+ break;
+ case PHY_READCOUNT_EQ_SKIP:
+ if (index + 2 >= pa->size) {
+ netif_err(tp, ifup, tp->dev,
+ "Out of range of firmware\n");
+ goto out;
+ }
+ break;
+ case PHY_COMP_EQ_SKIPN:
+ case PHY_COMP_NEQ_SKIPN:
+ case PHY_SKIPN:
+ if (index + 1 + regno >= pa->size) {
+ netif_err(tp, ifup, tp->dev,
+ "Out of range of firmware\n");
+ goto out;
+ }
+ break;
+
+ case PHY_READ_MAC_BYTE:
+ case PHY_WRITE_MAC_BYTE:
+ case PHY_WRITE_ERI_WORD:
+ default:
+ netif_err(tp, ifup, tp->dev,
+ "Invalid action 0x%08x\n", action);
+ goto out;
+ }
+ }
+ rc = true;
+out:
+ return rc;
+}
+
+static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
+{
+ struct net_device *dev = tp->dev;
+ int rc = -EINVAL;
+
+ if (!rtl_fw_format_ok(tp, rtl_fw)) {
+ netif_err(tp, ifup, dev, "invalid firwmare\n");
+ goto out;
+ }
+
+ if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action))
+ rc = 0;
+out:
+ return rc;
+}
+
+static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
+{
+ struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
+ u32 predata, count;
+ size_t index;
+
+ predata = count = 0;
+
+ for (index = 0; index < pa->size; ) {
+ u32 action = le32_to_cpu(pa->code[index]);
+ u32 data = action & 0x0000ffff;
+ u32 regno = (action & 0x0fff0000) >> 16;
+
+ if (!action)
+ break;
+
+ switch(action & 0xf0000000) {
+ case PHY_READ:
+ predata = rtl_readphy(tp, regno);
+ count++;
+ index++;
+ break;
+ case PHY_DATA_OR:
+ predata |= data;
+ index++;
+ break;
+ case PHY_DATA_AND:
+ predata &= data;
+ index++;
+ break;
+ case PHY_BJMPN:
+ index -= regno;
+ break;
+ case PHY_READ_EFUSE:
+ predata = rtl8168d_efuse_read(tp->mmio_addr, regno);
+ index++;
+ break;
+ case PHY_CLEAR_READCOUNT:
+ count = 0;
+ index++;
+ break;
+ case PHY_WRITE:
+ rtl_writephy(tp, regno, data);
+ index++;
+ break;
+ case PHY_READCOUNT_EQ_SKIP:
+ index += (count == data) ? 2 : 1;
+ break;
+ case PHY_COMP_EQ_SKIPN:
+ if (predata == data)
+ index += regno;
+ index++;
+ break;
+ case PHY_COMP_NEQ_SKIPN:
+ if (predata != data)
+ index += regno;
+ index++;
+ break;
+ case PHY_WRITE_PREVIOUS:
+ rtl_writephy(tp, regno, predata);
+ index++;
+ break;
+ case PHY_SKIPN:
+ index += regno + 1;
+ break;
+ case PHY_DELAY_MS:
+ mdelay(data);
+ index++;
+ break;
+
+ case PHY_READ_MAC_BYTE:
+ case PHY_WRITE_MAC_BYTE:
+ case PHY_WRITE_ERI_WORD:
+ default:
+ BUG();
+ }
+ }
+}
+
+static void rtl_release_firmware(struct rtl8169_private *tp)
+{
+ if (!IS_ERR_OR_NULL(tp->rtl_fw)) {
+ release_firmware(tp->rtl_fw->fw);
+ kfree(tp->rtl_fw);
+ }
+ tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
+}
+
+static void rtl_apply_firmware(struct rtl8169_private *tp)
+{
+ struct rtl_fw *rtl_fw = tp->rtl_fw;
+
+ /* TODO: release firmware once rtl_phy_write_fw signals failures. */
+ if (!IS_ERR_OR_NULL(rtl_fw))
+ rtl_phy_write_fw(tp, rtl_fw);
+}
+
+static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
+{
+ if (rtl_readphy(tp, reg) != val)
+ netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
+ else
+ rtl_apply_firmware(tp);
+}
+
+static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x06, 0x006e },
+ { 0x08, 0x0708 },
+ { 0x15, 0x4000 },
+ { 0x18, 0x65c7 },
+
+ { 0x1f, 0x0001 },
+ { 0x03, 0x00a1 },
+ { 0x02, 0x0008 },
+ { 0x01, 0x0120 },
+ { 0x00, 0x1000 },
+ { 0x04, 0x0800 },
+ { 0x04, 0x0000 },
+
+ { 0x03, 0xff41 },
+ { 0x02, 0xdf60 },
+ { 0x01, 0x0140 },
+ { 0x00, 0x0077 },
+ { 0x04, 0x7800 },
+ { 0x04, 0x7000 },
+
+ { 0x03, 0x802f },
+ { 0x02, 0x4f02 },
+ { 0x01, 0x0409 },
+ { 0x00, 0xf0f9 },
+ { 0x04, 0x9800 },
+ { 0x04, 0x9000 },
+
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0xff95 },
+ { 0x00, 0xba00 },
+ { 0x04, 0xa800 },
+ { 0x04, 0xa000 },
+
+ { 0x03, 0xff41 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0x0140 },
+ { 0x00, 0x00bb },
+ { 0x04, 0xb800 },
+ { 0x04, 0xb000 },
+
+ { 0x03, 0xdf41 },
+ { 0x02, 0xdc60 },
+ { 0x01, 0x6340 },
+ { 0x00, 0x007d },
+ { 0x04, 0xd800 },
+ { 0x04, 0xd000 },
+
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0x100a },
+ { 0x00, 0xa0ff },
+ { 0x04, 0xf800 },
+ { 0x04, 0xf000 },
+
+ { 0x1f, 0x0000 },
+ { 0x0b, 0x0000 },
+ { 0x00, 0x9200 }
+ };
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+}
+
+static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0002 },
+ { 0x01, 0x90d0 },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+}
+
+static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
+{
+ struct pci_dev *pdev = tp->pci_dev;
+
+ if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
+ (pdev->subsystem_device != 0xe000))
+ return;
+
+ rtl_writephy(tp, 0x1f, 0x0001);
+ rtl_writephy(tp, 0x10, 0xf01b);
+ rtl_writephy(tp, 0x1f, 0x0000);
+}
+
+static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x04, 0x0000 },
+ { 0x03, 0x00a1 },
+ { 0x02, 0x0008 },
+ { 0x01, 0x0120 },
+ { 0x00, 0x1000 },
+ { 0x04, 0x0800 },
+ { 0x04, 0x9000 },
+ { 0x03, 0x802f },
+ { 0x02, 0x4f02 },
+ { 0x01, 0x0409 },
+ { 0x00, 0xf099 },
+ { 0x04, 0x9800 },
+ { 0x04, 0xa000 },
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0xff95 },
+ { 0x00, 0xba00 },
+ { 0x04, 0xa800 },
+ { 0x04, 0xf000 },
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0x101a },
+ { 0x00, 0xa0ff },
+ { 0x04, 0xf800 },
+ { 0x04, 0x0000 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0001 },
+ { 0x10, 0xf41b },
+ { 0x14, 0xfb54 },
+ { 0x18, 0xf5c7 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0001 },
+ { 0x17, 0x0cc0 },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+
+ rtl8169scd_hw_phy_config_quirk(tp);
+}
+
+static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x04, 0x0000 },
+ { 0x03, 0x00a1 },
+ { 0x02, 0x0008 },
+ { 0x01, 0x0120 },
+ { 0x00, 0x1000 },
+ { 0x04, 0x0800 },
+ { 0x04, 0x9000 },
+ { 0x03, 0x802f },
+ { 0x02, 0x4f02 },
+ { 0x01, 0x0409 },
+ { 0x00, 0xf099 },
+ { 0x04, 0x9800 },
+ { 0x04, 0xa000 },
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0xff95 },
+ { 0x00, 0xba00 },
+ { 0x04, 0xa800 },
+ { 0x04, 0xf000 },
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0x101a },
+ { 0x00, 0xa0ff },
+ { 0x04, 0xf800 },
+ { 0x04, 0x0000 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0001 },
+ { 0x0b, 0x8480 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0001 },
+ { 0x18, 0x67c7 },
+ { 0x04, 0x2000 },
+ { 0x03, 0x002f },
+ { 0x02, 0x4360 },
+ { 0x01, 0x0109 },
+ { 0x00, 0x3022 },
+ { 0x04, 0x2800 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0001 },
+ { 0x17, 0x0cc0 },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+}
+
+static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x10, 0xf41b },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy(tp, 0x1f, 0x0001);
+ rtl_patchphy(tp, 0x16, 1 << 0);
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+}
+
+static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x10, 0xf41b },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+}
+
+static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0000 },
+ { 0x1d, 0x0f00 },
+ { 0x1f, 0x0002 },
+ { 0x0c, 0x1ec8 },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+}
+
+static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x1d, 0x3d98 },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_patchphy(tp, 0x14, 1 << 5);
+ rtl_patchphy(tp, 0x0d, 1 << 5);
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+}
+
+static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x12, 0x2300 },
+ { 0x1f, 0x0002 },
+ { 0x00, 0x88d4 },
+ { 0x01, 0x82b1 },
+ { 0x03, 0x7002 },
+ { 0x08, 0x9e30 },
+ { 0x09, 0x01f0 },
+ { 0x0a, 0x5500 },
+ { 0x0c, 0x00c8 },
+ { 0x1f, 0x0003 },
+ { 0x12, 0xc096 },
+ { 0x16, 0x000a },
+ { 0x1f, 0x0000 },
+ { 0x1f, 0x0000 },
+ { 0x09, 0x2000 },
+ { 0x09, 0x0000 }
+ };
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+
+ rtl_patchphy(tp, 0x14, 1 << 5);
+ rtl_patchphy(tp, 0x0d, 1 << 5);
+ rtl_writephy(tp, 0x1f, 0x0000);
+}
+
+static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x12, 0x2300 },
+ { 0x03, 0x802f },
+ { 0x02, 0x4f02 },
+ { 0x01, 0x0409 },
+ { 0x00, 0xf099 },
+ { 0x04, 0x9800 },
+ { 0x04, 0x9000 },
+ { 0x1d, 0x3d98 },
+ { 0x1f, 0x0002 },
+ { 0x0c, 0x7eb8 },
+ { 0x06, 0x0761 },
+ { 0x1f, 0x0003 },
+ { 0x16, 0x0f0a },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+
+ rtl_patchphy(tp, 0x16, 1 << 0);
+ rtl_patchphy(tp, 0x14, 1 << 5);
+ rtl_patchphy(tp, 0x0d, 1 << 5);
+ rtl_writephy(tp, 0x1f, 0x0000);
+}
+
+static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x12, 0x2300 },
+ { 0x1d, 0x3d98 },
+ { 0x1f, 0x0002 },
+ { 0x0c, 0x7eb8 },
+ { 0x06, 0x5461 },
+ { 0x1f, 0x0003 },
+ { 0x16, 0x0f0a },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+
+ rtl_patchphy(tp, 0x16, 1 << 0);
+ rtl_patchphy(tp, 0x14, 1 << 5);
+ rtl_patchphy(tp, 0x0d, 1 << 5);
+ rtl_writephy(tp, 0x1f, 0x0000);
+}
+
+static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
+{
+ rtl8168c_3_hw_phy_config(tp);
+}
+
+static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init_0[] = {
+ /* Channel Estimation */
+ { 0x1f, 0x0001 },
+ { 0x06, 0x4064 },
+ { 0x07, 0x2863 },
+ { 0x08, 0x059c },
+ { 0x09, 0x26b4 },
+ { 0x0a, 0x6a19 },
+ { 0x0b, 0xdcc8 },
+ { 0x10, 0xf06d },
+ { 0x14, 0x7f68 },
+ { 0x18, 0x7fd9 },
+ { 0x1c, 0xf0ff },
+ { 0x1d, 0x3d9c },
+ { 0x1f, 0x0003 },
+ { 0x12, 0xf49f },
+ { 0x13, 0x070b },
+ { 0x1a, 0x05ad },
+ { 0x14, 0x94c0 },
+
+ /*
+ * Tx Error Issue
+ * Enhance line driver power
+ */
+ { 0x1f, 0x0002 },
+ { 0x06, 0x5561 },
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8332 },
+ { 0x06, 0x5561 },
+
+ /*
+ * Can not link to 1Gbps with bad cable
+ * Decrease SNR threshold form 21.07dB to 19.04dB
+ */
+ { 0x1f, 0x0001 },
+ { 0x17, 0x0cc0 },
+
+ { 0x1f, 0x0000 },
+ { 0x0d, 0xf880 }
+ };
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
+
+ /*
+ * Rx Error Issue
+ * Fine Tune Switching regulator parameter
+ */
+ rtl_writephy(tp, 0x1f, 0x0002);
+ rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
+ rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
+
+ if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0002 },
+ { 0x05, 0x669a },
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8330 },
+ { 0x06, 0x669a },
+ { 0x1f, 0x0002 }
+ };
+ int val;
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+
+ val = rtl_readphy(tp, 0x0d);
+
+ if ((val & 0x00ff) != 0x006c) {
+ static const u32 set[] = {
+ 0x0065, 0x0066, 0x0067, 0x0068,
+ 0x0069, 0x006a, 0x006b, 0x006c
+ };
+ int i;
+
+ rtl_writephy(tp, 0x1f, 0x0002);
+
+ val &= 0xff00;
+ for (i = 0; i < ARRAY_SIZE(set); i++)
+ rtl_writephy(tp, 0x0d, val | set[i]);
+ }
+ } else {
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0002 },
+ { 0x05, 0x6662 },
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8330 },
+ { 0x06, 0x6662 }
+ };
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ }
+
+ /* RSET couple improve */
+ rtl_writephy(tp, 0x1f, 0x0002);
+ rtl_patchphy(tp, 0x0d, 0x0300);
+ rtl_patchphy(tp, 0x0f, 0x0010);
+
+ /* Fine tune PLL performance */
+ rtl_writephy(tp, 0x1f, 0x0002);
+ rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
+ rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
+
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x001b);
+
+ rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
+
+ rtl_writephy(tp, 0x1f, 0x0000);
+}
+
+static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init_0[] = {
+ /* Channel Estimation */
+ { 0x1f, 0x0001 },
+ { 0x06, 0x4064 },
+ { 0x07, 0x2863 },
+ { 0x08, 0x059c },
+ { 0x09, 0x26b4 },
+ { 0x0a, 0x6a19 },
+ { 0x0b, 0xdcc8 },
+ { 0x10, 0xf06d },
+ { 0x14, 0x7f68 },
+ { 0x18, 0x7fd9 },
+ { 0x1c, 0xf0ff },
+ { 0x1d, 0x3d9c },
+ { 0x1f, 0x0003 },
+ { 0x12, 0xf49f },
+ { 0x13, 0x070b },
+ { 0x1a, 0x05ad },
+ { 0x14, 0x94c0 },
+
+ /*
+ * Tx Error Issue
+ * Enhance line driver power
+ */
+ { 0x1f, 0x0002 },
+ { 0x06, 0x5561 },
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8332 },
+ { 0x06, 0x5561 },
+
+ /*
+ * Can not link to 1Gbps with bad cable
+ * Decrease SNR threshold form 21.07dB to 19.04dB
+ */
+ { 0x1f, 0x0001 },
+ { 0x17, 0x0cc0 },
+
+ { 0x1f, 0x0000 },
+ { 0x0d, 0xf880 }
+ };
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
+
+ if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0002 },
+ { 0x05, 0x669a },
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8330 },
+ { 0x06, 0x669a },
+
+ { 0x1f, 0x0002 }
+ };
+ int val;
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+
+ val = rtl_readphy(tp, 0x0d);
+ if ((val & 0x00ff) != 0x006c) {
+ static const u32 set[] = {
+ 0x0065, 0x0066, 0x0067, 0x0068,
+ 0x0069, 0x006a, 0x006b, 0x006c
+ };
+ int i;
+
+ rtl_writephy(tp, 0x1f, 0x0002);
+
+ val &= 0xff00;
+ for (i = 0; i < ARRAY_SIZE(set); i++)
+ rtl_writephy(tp, 0x0d, val | set[i]);
+ }
+ } else {
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0002 },
+ { 0x05, 0x2642 },
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8330 },
+ { 0x06, 0x2642 }
+ };
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ }
+
+ /* Fine tune PLL performance */
+ rtl_writephy(tp, 0x1f, 0x0002);
+ rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
+ rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
+
+ /* Switching regulator Slew rate */
+ rtl_writephy(tp, 0x1f, 0x0002);
+ rtl_patchphy(tp, 0x0f, 0x0017);
+
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x001b);
+
+ rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
+
+ rtl_writephy(tp, 0x1f, 0x0000);
+}
+
+static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0002 },
+ { 0x10, 0x0008 },
+ { 0x0d, 0x006c },
+
+ { 0x1f, 0x0000 },
+ { 0x0d, 0xf880 },
+
+ { 0x1f, 0x0001 },
+ { 0x17, 0x0cc0 },
+
+ { 0x1f, 0x0001 },
+ { 0x0b, 0xa4d8 },
+ { 0x09, 0x281c },
+ { 0x07, 0x2883 },
+ { 0x0a, 0x6b35 },
+ { 0x1d, 0x3da4 },
+ { 0x1c, 0xeffd },
+ { 0x14, 0x7f52 },
+ { 0x18, 0x7fc6 },
+ { 0x08, 0x0601 },
+ { 0x06, 0x4063 },
+ { 0x10, 0xf074 },
+ { 0x1f, 0x0003 },
+ { 0x13, 0x0789 },
+ { 0x12, 0xf4bd },
+ { 0x1a, 0x04fd },
+ { 0x14, 0x84b0 },
+ { 0x1f, 0x0000 },
+ { 0x00, 0x9200 },
+
+ { 0x1f, 0x0005 },
+ { 0x01, 0x0340 },
+ { 0x1f, 0x0001 },
+ { 0x04, 0x4000 },
+ { 0x03, 0x1d21 },
+ { 0x02, 0x0c32 },
+ { 0x01, 0x0200 },
+ { 0x00, 0x5554 },
+ { 0x04, 0x4800 },
+ { 0x04, 0x4000 },
+ { 0x04, 0xf000 },
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0x101a },
+ { 0x00, 0xa0ff },
+ { 0x04, 0xf800 },
+ { 0x04, 0xf000 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0007 },
+ { 0x1e, 0x0023 },
+ { 0x16, 0x0000 },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+}
+
+static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x17, 0x0cc0 },
+
+ { 0x1f, 0x0007 },
+ { 0x1e, 0x002d },
+ { 0x18, 0x0040 },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_patchphy(tp, 0x0d, 1 << 5);
+}
+
+static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ /* Enable Delay cap */
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8b80 },
+ { 0x06, 0xc896 },
+ { 0x1f, 0x0000 },
+
+ /* Channel estimation fine tune */
+ { 0x1f, 0x0001 },
+ { 0x0b, 0x6c20 },
+ { 0x07, 0x2872 },
+ { 0x1c, 0xefff },
+ { 0x1f, 0x0003 },
+ { 0x14, 0x6420 },
+ { 0x1f, 0x0000 },
+
+ /* Update PFM & 10M TX idle timer */
+ { 0x1f, 0x0007 },
+ { 0x1e, 0x002f },
+ { 0x15, 0x1919 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0007 },
+ { 0x1e, 0x00ac },
+ { 0x18, 0x0006 },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_apply_firmware(tp);
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+
+ /* DCO enable for 10M IDLE Power */
+ rtl_writephy(tp, 0x1f, 0x0007);
+ rtl_writephy(tp, 0x1e, 0x0023);
+ rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ /* For impedance matching */
+ rtl_writephy(tp, 0x1f, 0x0002);
+ rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ /* PHY auto speed down */
+ rtl_writephy(tp, 0x1f, 0x0007);
+ rtl_writephy(tp, 0x1e, 0x002d);
+ rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
+
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x8b86);
+ rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x8b85);
+ rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
+ rtl_writephy(tp, 0x1f, 0x0007);
+ rtl_writephy(tp, 0x1e, 0x0020);
+ rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100);
+ rtl_writephy(tp, 0x1f, 0x0006);
+ rtl_writephy(tp, 0x00, 0x5a00);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, 0x0d, 0x0007);
+ rtl_writephy(tp, 0x0e, 0x003c);
+ rtl_writephy(tp, 0x0d, 0x4007);
+ rtl_writephy(tp, 0x0e, 0x0000);
+ rtl_writephy(tp, 0x0d, 0x0000);
+}
+
+static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ /* Enable Delay cap */
+ { 0x1f, 0x0004 },
+ { 0x1f, 0x0007 },
+ { 0x1e, 0x00ac },
+ { 0x18, 0x0006 },
+ { 0x1f, 0x0002 },
+ { 0x1f, 0x0000 },
+ { 0x1f, 0x0000 },
+
+ /* Channel estimation fine tune */
+ { 0x1f, 0x0003 },
+ { 0x09, 0xa20f },
+ { 0x1f, 0x0000 },
+ { 0x1f, 0x0000 },
+
+ /* Green Setting */
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8b5b },
+ { 0x06, 0x9222 },
+ { 0x05, 0x8b6d },
+ { 0x06, 0x8000 },
+ { 0x05, 0x8b76 },
+ { 0x06, 0x8000 },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_apply_firmware(tp);
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+
+ /* For 4-corner performance improve */
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x8b80);
+ rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ /* PHY auto speed down */
+ rtl_writephy(tp, 0x1f, 0x0004);
+ rtl_writephy(tp, 0x1f, 0x0007);
+ rtl_writephy(tp, 0x1e, 0x002d);
+ rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0002);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
+
+ /* improve 10M EEE waveform */
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x8b86);
+ rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ /* Improve 2-pair detection performance */
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x8b85);
+ rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ /* EEE setting */
+ rtl_w1w0_eri(tp->mmio_addr, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003,
+ ERIAR_EXGMAC);
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x8b85);
+ rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
+ rtl_writephy(tp, 0x1f, 0x0004);
+ rtl_writephy(tp, 0x1f, 0x0007);
+ rtl_writephy(tp, 0x1e, 0x0020);
+ rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
+ rtl_writephy(tp, 0x1f, 0x0002);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, 0x0d, 0x0007);
+ rtl_writephy(tp, 0x0e, 0x003c);
+ rtl_writephy(tp, 0x0d, 0x4007);
+ rtl_writephy(tp, 0x0e, 0x0000);
+ rtl_writephy(tp, 0x0d, 0x0000);
+
+ /* Green feature */
+ rtl_writephy(tp, 0x1f, 0x0003);
+ rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
+ rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
+ rtl_writephy(tp, 0x1f, 0x0000);
+}
+
+static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0003 },
+ { 0x08, 0x441d },
+ { 0x01, 0x9100 },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_patchphy(tp, 0x11, 1 << 12);
+ rtl_patchphy(tp, 0x19, 1 << 13);
+ rtl_patchphy(tp, 0x10, 1 << 15);
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+}
+
+static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0005 },
+ { 0x1a, 0x0000 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0004 },
+ { 0x1c, 0x0000 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0001 },
+ { 0x15, 0x7701 },
+ { 0x1f, 0x0000 }
+ };
+
+ /* Disable ALDPS before ram code */
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, 0x18, 0x0310);
+ msleep(100);
+
+ rtl_apply_firmware(tp);
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+}
+
+static void rtl_hw_phy_config(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ rtl8169_print_mac_version(tp);
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_01:
+ break;
+ case RTL_GIGA_MAC_VER_02:
+ case RTL_GIGA_MAC_VER_03:
+ rtl8169s_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_04:
+ rtl8169sb_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_05:
+ rtl8169scd_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_06:
+ rtl8169sce_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_07:
+ case RTL_GIGA_MAC_VER_08:
+ case RTL_GIGA_MAC_VER_09:
+ rtl8102e_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_11:
+ rtl8168bb_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_12:
+ rtl8168bef_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_17:
+ rtl8168bef_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_18:
+ rtl8168cp_1_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_19:
+ rtl8168c_1_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_20:
+ rtl8168c_2_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_21:
+ rtl8168c_3_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_22:
+ rtl8168c_4_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_23:
+ case RTL_GIGA_MAC_VER_24:
+ rtl8168cp_2_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_25:
+ rtl8168d_1_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_26:
+ rtl8168d_2_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_27:
+ rtl8168d_3_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_28:
+ rtl8168d_4_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_29:
+ case RTL_GIGA_MAC_VER_30:
+ rtl8105e_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_31:
+ /* None. */
+ break;
+ case RTL_GIGA_MAC_VER_32:
+ case RTL_GIGA_MAC_VER_33:
+ rtl8168e_1_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_34:
+ rtl8168e_2_hw_phy_config(tp);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void rtl8169_phy_timer(unsigned long __opaque)
+{
+ struct net_device *dev = (struct net_device *)__opaque;
+ struct rtl8169_private *tp = netdev_priv(dev);
+ struct timer_list *timer = &tp->timer;
+ void __iomem *ioaddr = tp->mmio_addr;
+ unsigned long timeout = RTL8169_PHY_TIMEOUT;
+
+ assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
+
+ spin_lock_irq(&tp->lock);
+
+ if (tp->phy_reset_pending(tp)) {
+ /*
+ * A busy loop could burn quite a few cycles on nowadays CPU.
+ * Let's delay the execution of the timer for a few ticks.
+ */
+ timeout = HZ/10;
+ goto out_mod_timer;
+ }
+
+ if (tp->link_ok(ioaddr))
+ goto out_unlock;
+
+ netif_warn(tp, link, dev, "PHY reset until link up\n");
+
+ tp->phy_reset_enable(tp);
+
+out_mod_timer:
+ mod_timer(timer, jiffies + timeout);
+out_unlock:
+ spin_unlock_irq(&tp->lock);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void rtl8169_netpoll(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ struct pci_dev *pdev = tp->pci_dev;
+
+ disable_irq(pdev->irq);
+ rtl8169_interrupt(pdev->irq, dev);
+ enable_irq(pdev->irq);
+}
+#endif
+
+static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
+ void __iomem *ioaddr)
+{
+ iounmap(ioaddr);
+ pci_release_regions(pdev);
+ pci_clear_mwi(pdev);
+ pci_disable_device(pdev);
+ free_netdev(dev);
+}
+
+static void rtl8169_phy_reset(struct net_device *dev,
+ struct rtl8169_private *tp)
+{
+ unsigned int i;
+
+ tp->phy_reset_enable(tp);
+ for (i = 0; i < 100; i++) {
+ if (!tp->phy_reset_pending(tp))
+ return;
+ msleep(1);
+ }
+ netif_err(tp, link, dev, "PHY reset failed\n");
+}
+
++static bool rtl_tbi_enabled(struct rtl8169_private *tp)
++{
++ void __iomem *ioaddr = tp->mmio_addr;
++
++ return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
++ (RTL_R8(PHYstatus) & TBI_Enable);
++}
++
+static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ rtl_hw_phy_config(dev);
+
+ if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
+ dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
+ RTL_W8(0x82, 0x01);
+ }
+
+ pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
+
+ if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
+ pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
+ dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
+ RTL_W8(0x82, 0x01);
+ dprintk("Set PHY Reg 0x0bh = 0x00h\n");
+ rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
+ }
+
+ rtl8169_phy_reset(dev, tp);
+
+ rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
+ ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
+ (tp->mii.supports_gmii ?
+ ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full : 0));
+
- tp->mac_version == RTL_GIGA_MAC_VER_33)
++ if (rtl_tbi_enabled(tp))
+ netif_info(tp, link, dev, "TBI auto-negotiating\n");
+}
+
+static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ u32 high;
+ u32 low;
+
+ low = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24);
+ high = addr[4] | (addr[5] << 8);
+
+ spin_lock_irq(&tp->lock);
+
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+
+ RTL_W32(MAC4, high);
+ RTL_R32(MAC4);
+
+ RTL_W32(MAC0, low);
+ RTL_R32(MAC0);
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
+ const struct exgmac_reg e[] = {
+ { .addr = 0xe0, ERIAR_MASK_1111, .val = low },
+ { .addr = 0xe4, ERIAR_MASK_1111, .val = high },
+ { .addr = 0xf0, ERIAR_MASK_1111, .val = low << 16 },
+ { .addr = 0xf4, ERIAR_MASK_1111, .val = high << 16 |
+ low >> 16 },
+ };
+
+ rtl_write_exgmac_batch(ioaddr, e, ARRAY_SIZE(e));
+ }
+
+ RTL_W8(Cfg9346, Cfg9346_Lock);
+
+ spin_unlock_irq(&tp->lock);
+}
+
+static int rtl_set_mac_address(struct net_device *dev, void *p)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ rtl_rar_set(tp, dev->dev_addr);
+
+ return 0;
+}
+
+static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ struct mii_ioctl_data *data = if_mii(ifr);
+
+ return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
+}
+
+static int rtl_xmii_ioctl(struct rtl8169_private *tp,
+ struct mii_ioctl_data *data, int cmd)
+{
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = 32; /* Internal PHY */
+ return 0;
+
+ case SIOCGMIIREG:
+ data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
+ return 0;
+
+ case SIOCSMIIREG:
+ rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
+ return 0;
+ }
+ return -EOPNOTSUPP;
+}
+
+static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
+{
+ return -EOPNOTSUPP;
+}
+
+static const struct rtl_cfg_info {
+ void (*hw_start)(struct net_device *);
+ unsigned int region;
+ unsigned int align;
+ u16 intr_event;
+ u16 napi_event;
+ unsigned features;
+ u8 default_ver;
+} rtl_cfg_infos [] = {
+ [RTL_CFG_0] = {
+ .hw_start = rtl_hw_start_8169,
+ .region = 1,
+ .align = 0,
+ .intr_event = SYSErr | LinkChg | RxOverflow |
+ RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
+ .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
+ .features = RTL_FEATURE_GMII,
+ .default_ver = RTL_GIGA_MAC_VER_01,
+ },
+ [RTL_CFG_1] = {
+ .hw_start = rtl_hw_start_8168,
+ .region = 2,
+ .align = 8,
+ .intr_event = SYSErr | LinkChg | RxOverflow |
+ TxErr | TxOK | RxOK | RxErr,
+ .napi_event = TxErr | TxOK | RxOK | RxOverflow,
+ .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
+ .default_ver = RTL_GIGA_MAC_VER_11,
+ },
+ [RTL_CFG_2] = {
+ .hw_start = rtl_hw_start_8101,
+ .region = 2,
+ .align = 8,
+ .intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout |
+ RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
+ .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
+ .features = RTL_FEATURE_MSI,
+ .default_ver = RTL_GIGA_MAC_VER_13,
+ }
+};
+
+/* Cfg9346_Unlock assumed. */
+static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr,
+ const struct rtl_cfg_info *cfg)
+{
+ unsigned msi = 0;
+ u8 cfg2;
+
+ cfg2 = RTL_R8(Config2) & ~MSIEnable;
+ if (cfg->features & RTL_FEATURE_MSI) {
+ if (pci_enable_msi(pdev)) {
+ dev_info(&pdev->dev, "no MSI. Back to INTx.\n");
+ } else {
+ cfg2 |= MSIEnable;
+ msi = RTL_FEATURE_MSI;
+ }
+ }
+ RTL_W8(Config2, cfg2);
+ return msi;
+}
+
+static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
+{
+ if (tp->features & RTL_FEATURE_MSI) {
+ pci_disable_msi(pdev);
+ tp->features &= ~RTL_FEATURE_MSI;
+ }
+}
+
+static const struct net_device_ops rtl8169_netdev_ops = {
+ .ndo_open = rtl8169_open,
+ .ndo_stop = rtl8169_close,
+ .ndo_get_stats = rtl8169_get_stats,
+ .ndo_start_xmit = rtl8169_start_xmit,
+ .ndo_tx_timeout = rtl8169_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = rtl8169_change_mtu,
+ .ndo_fix_features = rtl8169_fix_features,
+ .ndo_set_features = rtl8169_set_features,
+ .ndo_set_mac_address = rtl_set_mac_address,
+ .ndo_do_ioctl = rtl8169_ioctl,
+ .ndo_set_rx_mode = rtl_set_rx_mode,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = rtl8169_netpoll,
+#endif
+
+};
+
+static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
+{
+ struct mdio_ops *ops = &tp->mdio_ops;
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_27:
+ ops->write = r8168dp_1_mdio_write;
+ ops->read = r8168dp_1_mdio_read;
+ break;
+ case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
+ ops->write = r8168dp_2_mdio_write;
+ ops->read = r8168dp_2_mdio_read;
+ break;
+ default:
+ ops->write = r8169_mdio_write;
+ ops->read = r8169_mdio_read;
+ break;
+ }
+}
+
+static void r810x_phy_power_down(struct rtl8169_private *tp)
+{
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
+}
+
+static void r810x_phy_power_up(struct rtl8169_private *tp)
+{
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
+}
+
+static void r810x_pll_power_down(struct rtl8169_private *tp)
+{
++ void __iomem *ioaddr = tp->mmio_addr;
++
+ if (__rtl8169_get_wol(tp) & WAKE_ANY) {
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, MII_BMCR, 0x0000);
++
++ if (tp->mac_version == RTL_GIGA_MAC_VER_29 ||
++ tp->mac_version == RTL_GIGA_MAC_VER_30)
++ RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast |
++ AcceptMulticast | AcceptMyPhys);
+ return;
+ }
+
+ r810x_phy_power_down(tp);
+}
+
+static void r810x_pll_power_up(struct rtl8169_private *tp)
+{
+ r810x_phy_power_up(tp);
+}
+
+static void r8168_phy_power_up(struct rtl8169_private *tp)
+{
+ rtl_writephy(tp, 0x1f, 0x0000);
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_11:
+ case RTL_GIGA_MAC_VER_12:
+ case RTL_GIGA_MAC_VER_17:
+ case RTL_GIGA_MAC_VER_18:
+ case RTL_GIGA_MAC_VER_19:
+ case RTL_GIGA_MAC_VER_20:
+ case RTL_GIGA_MAC_VER_21:
+ case RTL_GIGA_MAC_VER_22:
+ case RTL_GIGA_MAC_VER_23:
+ case RTL_GIGA_MAC_VER_24:
+ case RTL_GIGA_MAC_VER_25:
+ case RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_27:
+ case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
+ rtl_writephy(tp, 0x0e, 0x0000);
+ break;
+ default:
+ break;
+ }
+ rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
+}
+
+static void r8168_phy_power_down(struct rtl8169_private *tp)
+{
+ rtl_writephy(tp, 0x1f, 0x0000);
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_32:
+ case RTL_GIGA_MAC_VER_33:
+ rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
+ break;
+
+ case RTL_GIGA_MAC_VER_11:
+ case RTL_GIGA_MAC_VER_12:
+ case RTL_GIGA_MAC_VER_17:
+ case RTL_GIGA_MAC_VER_18:
+ case RTL_GIGA_MAC_VER_19:
+ case RTL_GIGA_MAC_VER_20:
+ case RTL_GIGA_MAC_VER_21:
+ case RTL_GIGA_MAC_VER_22:
+ case RTL_GIGA_MAC_VER_23:
+ case RTL_GIGA_MAC_VER_24:
+ case RTL_GIGA_MAC_VER_25:
+ case RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_27:
+ case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
+ rtl_writephy(tp, 0x0e, 0x0200);
+ default:
+ rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
+ break;
+ }
+}
+
+static void r8168_pll_power_down(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_28 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_31) &&
+ r8168dp_check_dash(tp)) {
+ return;
+ }
+
+ if ((tp->mac_version == RTL_GIGA_MAC_VER_23 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_24) &&
+ (RTL_R16(CPlusCmd) & ASF)) {
+ return;
+ }
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_33)
+ rtl_ephy_write(ioaddr, 0x19, 0xff64);
+
+ if (__rtl8169_get_wol(tp) & WAKE_ANY) {
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, MII_BMCR, 0x0000);
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
- if ((tp->mac_version <= RTL_GIGA_MAC_VER_06) &&
- (RTL_R8(PHYstatus) & TBI_Enable)) {
++ tp->mac_version == RTL_GIGA_MAC_VER_33 ||
++ tp->mac_version == RTL_GIGA_MAC_VER_34)
+ RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast |
+ AcceptMulticast | AcceptMyPhys);
+ return;
+ }
+
+ r8168_phy_power_down(tp);
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_25:
+ case RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_27:
+ case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
+ case RTL_GIGA_MAC_VER_32:
+ case RTL_GIGA_MAC_VER_33:
+ RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
+ break;
+ }
+}
+
+static void r8168_pll_power_up(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_28 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_31) &&
+ r8168dp_check_dash(tp)) {
+ return;
+ }
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_25:
+ case RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_27:
+ case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
+ case RTL_GIGA_MAC_VER_32:
+ case RTL_GIGA_MAC_VER_33:
+ RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
+ break;
+ }
+
+ r8168_phy_power_up(tp);
+}
+
+static void rtl_pll_power_op(struct rtl8169_private *tp,
+ void (*op)(struct rtl8169_private *))
+{
+ if (op)
+ op(tp);
+}
+
+static void rtl_pll_power_down(struct rtl8169_private *tp)
+{
+ rtl_pll_power_op(tp, tp->pll_power_ops.down);
+}
+
+static void rtl_pll_power_up(struct rtl8169_private *tp)
+{
+ rtl_pll_power_op(tp, tp->pll_power_ops.up);
+}
+
+static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
+{
+ struct pll_power_ops *ops = &tp->pll_power_ops;
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_07:
+ case RTL_GIGA_MAC_VER_08:
+ case RTL_GIGA_MAC_VER_09:
+ case RTL_GIGA_MAC_VER_10:
+ case RTL_GIGA_MAC_VER_16:
+ case RTL_GIGA_MAC_VER_29:
+ case RTL_GIGA_MAC_VER_30:
+ ops->down = r810x_pll_power_down;
+ ops->up = r810x_pll_power_up;
+ break;
+
+ case RTL_GIGA_MAC_VER_11:
+ case RTL_GIGA_MAC_VER_12:
+ case RTL_GIGA_MAC_VER_17:
+ case RTL_GIGA_MAC_VER_18:
+ case RTL_GIGA_MAC_VER_19:
+ case RTL_GIGA_MAC_VER_20:
+ case RTL_GIGA_MAC_VER_21:
+ case RTL_GIGA_MAC_VER_22:
+ case RTL_GIGA_MAC_VER_23:
+ case RTL_GIGA_MAC_VER_24:
+ case RTL_GIGA_MAC_VER_25:
+ case RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_27:
+ case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
+ case RTL_GIGA_MAC_VER_32:
+ case RTL_GIGA_MAC_VER_33:
+ case RTL_GIGA_MAC_VER_34:
+ ops->down = r8168_pll_power_down;
+ ops->up = r8168_pll_power_up;
+ break;
+
+ default:
+ ops->down = NULL;
+ ops->up = NULL;
+ break;
+ }
+}
+
+static void rtl_init_rxcfg(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_01:
+ case RTL_GIGA_MAC_VER_02:
+ case RTL_GIGA_MAC_VER_03:
+ case RTL_GIGA_MAC_VER_04:
+ case RTL_GIGA_MAC_VER_05:
+ case RTL_GIGA_MAC_VER_06:
+ case RTL_GIGA_MAC_VER_10:
+ case RTL_GIGA_MAC_VER_11:
+ case RTL_GIGA_MAC_VER_12:
+ case RTL_GIGA_MAC_VER_13:
+ case RTL_GIGA_MAC_VER_14:
+ case RTL_GIGA_MAC_VER_15:
+ case RTL_GIGA_MAC_VER_16:
+ case RTL_GIGA_MAC_VER_17:
+ RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
+ break;
+ case RTL_GIGA_MAC_VER_18:
+ case RTL_GIGA_MAC_VER_19:
+ case RTL_GIGA_MAC_VER_20:
+ case RTL_GIGA_MAC_VER_21:
+ case RTL_GIGA_MAC_VER_22:
+ case RTL_GIGA_MAC_VER_23:
+ case RTL_GIGA_MAC_VER_24:
+ RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
+ break;
+ default:
+ RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
+ break;
+ }
+}
+
+static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
+{
+ tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
+}
+
+static void rtl_hw_reset(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ int i;
+
+ /* Soft reset the chip. */
+ RTL_W8(ChipCmd, CmdReset);
+
+ /* Check that the chip has finished the reset. */
+ for (i = 0; i < 100; i++) {
+ if ((RTL_R8(ChipCmd) & CmdReset) == 0)
+ break;
+ udelay(100);
+ }
+
+ rtl8169_init_ring_indexes(tp);
+}
+
+static int __devinit
+rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
+ const unsigned int region = cfg->region;
+ struct rtl8169_private *tp;
+ struct mii_if_info *mii;
+ struct net_device *dev;
+ void __iomem *ioaddr;
+ int chipset, i;
+ int rc;
+
+ if (netif_msg_drv(&debug)) {
+ printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
+ MODULENAME, RTL8169_VERSION);
+ }
+
+ dev = alloc_etherdev(sizeof (*tp));
+ if (!dev) {
+ if (netif_msg_drv(&debug))
+ dev_err(&pdev->dev, "unable to alloc new ethernet\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ dev->netdev_ops = &rtl8169_netdev_ops;
+ tp = netdev_priv(dev);
+ tp->dev = dev;
+ tp->pci_dev = pdev;
+ tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
+
+ mii = &tp->mii;
+ mii->dev = dev;
+ mii->mdio_read = rtl_mdio_read;
+ mii->mdio_write = rtl_mdio_write;
+ mii->phy_id_mask = 0x1f;
+ mii->reg_num_mask = 0x1f;
+ mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
+
+ /* disable ASPM completely as that cause random device stop working
+ * problems as well as full system hangs for some PCIe devices users */
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
+
+ /* enable device (incl. PCI PM wakeup and hotplug setup) */
+ rc = pci_enable_device(pdev);
+ if (rc < 0) {
+ netif_err(tp, probe, dev, "enable failure\n");
+ goto err_out_free_dev_1;
+ }
+
+ if (pci_set_mwi(pdev) < 0)
+ netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
+
+ /* make sure PCI base addr 1 is MMIO */
+ if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
+ netif_err(tp, probe, dev,
+ "region #%d not an MMIO resource, aborting\n",
+ region);
+ rc = -ENODEV;
+ goto err_out_mwi_2;
+ }
+
+ /* check for weird/broken PCI region reporting */
+ if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
+ netif_err(tp, probe, dev,
+ "Invalid PCI region size(s), aborting\n");
+ rc = -ENODEV;
+ goto err_out_mwi_2;
+ }
+
+ rc = pci_request_regions(pdev, MODULENAME);
+ if (rc < 0) {
+ netif_err(tp, probe, dev, "could not request regions\n");
+ goto err_out_mwi_2;
+ }
+
+ tp->cp_cmd = RxChkSum;
+
+ if ((sizeof(dma_addr_t) > 4) &&
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
+ tp->cp_cmd |= PCIDAC;
+ dev->features |= NETIF_F_HIGHDMA;
+ } else {
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc < 0) {
+ netif_err(tp, probe, dev, "DMA configuration failed\n");
+ goto err_out_free_res_3;
+ }
+ }
+
+ /* ioremap MMIO region */
+ ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
+ if (!ioaddr) {
+ netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
+ rc = -EIO;
+ goto err_out_free_res_3;
+ }
+ tp->mmio_addr = ioaddr;
+
+ if (!pci_is_pcie(pdev))
+ netif_info(tp, probe, dev, "not PCI Express\n");
+
+ /* Identify chip attached to board */
+ rtl8169_get_mac_version(tp, dev, cfg->default_ver);
+
+ rtl_init_rxcfg(tp);
+
+ RTL_W16(IntrMask, 0x0000);
+
+ rtl_hw_reset(tp);
+
+ RTL_W16(IntrStatus, 0xffff);
+
+ pci_set_master(pdev);
+
+ /*
+ * Pretend we are using VLANs; This bypasses a nasty bug where
+ * Interrupts stop flowing on high load on 8110SCd controllers.
+ */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_05)
+ tp->cp_cmd |= RxVlan;
+
+ rtl_init_mdio_ops(tp);
+ rtl_init_pll_power_ops(tp);
+
+ rtl8169_print_mac_version(tp);
+
+ chipset = tp->mac_version;
+ tp->txd_version = rtl_chip_infos[chipset].txd_version;
+
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+ RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
+ RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
+ if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
+ tp->features |= RTL_FEATURE_WOL;
+ if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
+ tp->features |= RTL_FEATURE_WOL;
+ tp->features |= rtl_try_msi(pdev, ioaddr, cfg);
+ RTL_W8(Cfg9346, Cfg9346_Lock);
+
- status = le32_to_cpu(desc->opts1);
++ if (rtl_tbi_enabled(tp)) {
+ tp->set_speed = rtl8169_set_speed_tbi;
+ tp->get_settings = rtl8169_gset_tbi;
+ tp->phy_reset_enable = rtl8169_tbi_reset_enable;
+ tp->phy_reset_pending = rtl8169_tbi_reset_pending;
+ tp->link_ok = rtl8169_tbi_link_ok;
+ tp->do_ioctl = rtl_tbi_ioctl;
+ } else {
+ tp->set_speed = rtl8169_set_speed_xmii;
+ tp->get_settings = rtl8169_gset_xmii;
+ tp->phy_reset_enable = rtl8169_xmii_reset_enable;
+ tp->phy_reset_pending = rtl8169_xmii_reset_pending;
+ tp->link_ok = rtl8169_xmii_link_ok;
+ tp->do_ioctl = rtl_xmii_ioctl;
+ }
+
+ spin_lock_init(&tp->lock);
+
+ /* Get MAC address */
+ for (i = 0; i < MAC_ADDR_LEN; i++)
+ dev->dev_addr[i] = RTL_R8(MAC0 + i);
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+
+ SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
+ dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
+ dev->irq = pdev->irq;
+ dev->base_addr = (unsigned long) ioaddr;
+
+ netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
+
+ /* don't enable SG, IP_CSUM and TSO by default - it might not work
+ * properly for all devices */
+ dev->features |= NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+ NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+ NETIF_F_HIGHDMA;
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_05)
+ /* 8110SCd requires hardware Rx VLAN - disallow toggling */
+ dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
+
+ tp->intr_mask = 0xffff;
+ tp->hw_start = cfg->hw_start;
+ tp->intr_event = cfg->intr_event;
+ tp->napi_event = cfg->napi_event;
+
++ tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
++ ~(RxBOVF | RxFOVF) : ~0;
++
+ init_timer(&tp->timer);
+ tp->timer.data = (unsigned long) dev;
+ tp->timer.function = rtl8169_phy_timer;
+
+ tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
+
+ rc = register_netdev(dev);
+ if (rc < 0)
+ goto err_out_msi_4;
+
+ pci_set_drvdata(pdev, dev);
+
+ netif_info(tp, probe, dev, "%s at 0x%lx, %pM, XID %08x IRQ %d\n",
+ rtl_chip_infos[chipset].name, dev->base_addr, dev->dev_addr,
+ (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), dev->irq);
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_28 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_31) {
+ rtl8168_driver_start(tp);
+ }
+
+ device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
+
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_put_noidle(&pdev->dev);
+
+ netif_carrier_off(dev);
+
+out:
+ return rc;
+
+err_out_msi_4:
+ rtl_disable_msi(pdev, tp);
+ iounmap(ioaddr);
+err_out_free_res_3:
+ pci_release_regions(pdev);
+err_out_mwi_2:
+ pci_clear_mwi(pdev);
+ pci_disable_device(pdev);
+err_out_free_dev_1:
+ free_netdev(dev);
+ goto out;
+}
+
+static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_28 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_31) {
+ rtl8168_driver_stop(tp);
+ }
+
+ cancel_delayed_work_sync(&tp->task);
+
+ unregister_netdev(dev);
+
+ rtl_release_firmware(tp);
+
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_get_noresume(&pdev->dev);
+
+ /* restore original MAC address */
+ rtl_rar_set(tp, dev->perm_addr);
+
+ rtl_disable_msi(pdev, tp);
+ rtl8169_release_board(pdev, dev, tp->mmio_addr);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
+{
+ struct rtl_fw *rtl_fw;
+ const char *name;
+ int rc = -ENOMEM;
+
+ name = rtl_lookup_firmware_name(tp);
+ if (!name)
+ goto out_no_firmware;
+
+ rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
+ if (!rtl_fw)
+ goto err_warn;
+
+ rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev);
+ if (rc < 0)
+ goto err_free;
+
+ rc = rtl_check_firmware(tp, rtl_fw);
+ if (rc < 0)
+ goto err_release_firmware;
+
+ tp->rtl_fw = rtl_fw;
+out:
+ return;
+
+err_release_firmware:
+ release_firmware(rtl_fw->fw);
+err_free:
+ kfree(rtl_fw);
+err_warn:
+ netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
+ name, rc);
+out_no_firmware:
+ tp->rtl_fw = NULL;
+ goto out;
+}
+
+static void rtl_request_firmware(struct rtl8169_private *tp)
+{
+ if (IS_ERR(tp->rtl_fw))
+ rtl_request_uncached_firmware(tp);
+}
+
+static int rtl8169_open(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ struct pci_dev *pdev = tp->pci_dev;
+ int retval = -ENOMEM;
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ /*
+ * Rx and Tx desscriptors needs 256 bytes alignment.
+ * dma_alloc_coherent provides more.
+ */
+ tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
+ &tp->TxPhyAddr, GFP_KERNEL);
+ if (!tp->TxDescArray)
+ goto err_pm_runtime_put;
+
+ tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
+ &tp->RxPhyAddr, GFP_KERNEL);
+ if (!tp->RxDescArray)
+ goto err_free_tx_0;
+
+ retval = rtl8169_init_ring(dev);
+ if (retval < 0)
+ goto err_free_rx_1;
+
+ INIT_DELAYED_WORK(&tp->task, NULL);
+
+ smp_mb();
+
+ rtl_request_firmware(tp);
+
+ retval = request_irq(dev->irq, rtl8169_interrupt,
+ (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
+ dev->name, dev);
+ if (retval < 0)
+ goto err_release_fw_2;
+
+ napi_enable(&tp->napi);
+
+ rtl8169_init_phy(dev, tp);
+
+ rtl8169_set_features(dev, dev->features);
+
+ rtl_pll_power_up(tp);
+
+ rtl_hw_start(dev);
+
+ tp->saved_wolopts = 0;
+ pm_runtime_put_noidle(&pdev->dev);
+
+ rtl8169_check_link_status(dev, tp, ioaddr);
+out:
+ return retval;
+
+err_release_fw_2:
+ rtl_release_firmware(tp);
+ rtl8169_rx_clear(tp);
+err_free_rx_1:
+ dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
+ tp->RxPhyAddr);
+ tp->RxDescArray = NULL;
+err_free_tx_0:
+ dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
+ tp->TxPhyAddr);
+ tp->TxDescArray = NULL;
+err_pm_runtime_put:
+ pm_runtime_put_noidle(&pdev->dev);
+ goto out;
+}
+
+static void rtl_rx_close(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
+}
+
+static void rtl8169_hw_reset(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ /* Disable interrupts */
+ rtl8169_irq_mask_and_ack(ioaddr);
+
+ rtl_rx_close(tp);
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_28 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_31) {
+ while (RTL_R8(TxPoll) & NPQ)
+ udelay(20);
+ } else if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
++ RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
+ while (!(RTL_R32(TxConfig) & TXCFG_EMPTY))
+ udelay(100);
+ } else {
+ RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
+ udelay(100);
+ }
+
+ rtl_hw_reset(tp);
+}
+
+static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ /* Set DMA burst size and Interframe Gap Time */
+ RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
+ (InterFrameGap << TxInterFrameGapShift));
+}
+
+static void rtl_hw_start(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ tp->hw_start(dev);
+
+ netif_start_queue(dev);
+}
+
+static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
+ void __iomem *ioaddr)
+{
+ /*
+ * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
+ * register to be written before TxDescAddrLow to work.
+ * Switching from MMIO to I/O access fixes the issue as well.
+ */
+ RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
+ RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
+ RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
+ RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
+}
+
+static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
+{
+ u16 cmd;
+
+ cmd = RTL_R16(CPlusCmd);
+ RTL_W16(CPlusCmd, cmd);
+ return cmd;
+}
+
+static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
+{
+ /* Low hurts. Let's disable the filtering. */
+ RTL_W16(RxMaxSize, rx_buf_sz + 1);
+}
+
+static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
+{
+ static const struct rtl_cfg2_info {
+ u32 mac_version;
+ u32 clk;
+ u32 val;
+ } cfg2_info [] = {
+ { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
+ { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
+ { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
+ { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
+ };
+ const struct rtl_cfg2_info *p = cfg2_info;
+ unsigned int i;
+ u32 clk;
+
+ clk = RTL_R8(Config2) & PCI_Clock_66MHz;
+ for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
+ if ((p->mac_version == mac_version) && (p->clk == clk)) {
+ RTL_W32(0x7c, p->val);
+ break;
+ }
+ }
+}
+
+static void rtl_hw_start_8169(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ struct pci_dev *pdev = tp->pci_dev;
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
+ RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
+ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
+ }
+
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+ if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_02 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_03 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_04)
+ RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+
+ rtl_init_rxcfg(tp);
+
+ RTL_W8(EarlyTxThres, NoEarlyTx);
+
+ rtl_set_rx_max_size(ioaddr, rx_buf_sz);
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_02 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_03 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_04)
+ rtl_set_rx_tx_config_registers(tp);
+
+ tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_03) {
+ dprintk("Set MAC Reg C+CR Offset 0xE0. "
+ "Bit-3 and bit-14 MUST be 1\n");
+ tp->cp_cmd |= (1 << 14);
+ }
+
+ RTL_W16(CPlusCmd, tp->cp_cmd);
+
+ rtl8169_set_magic_reg(ioaddr, tp->mac_version);
+
+ /*
+ * Undocumented corner. Supposedly:
+ * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
+ */
+ RTL_W16(IntrMitigate, 0x0000);
+
+ rtl_set_rx_tx_desc_registers(tp, ioaddr);
+
+ if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
+ tp->mac_version != RTL_GIGA_MAC_VER_02 &&
+ tp->mac_version != RTL_GIGA_MAC_VER_03 &&
+ tp->mac_version != RTL_GIGA_MAC_VER_04) {
+ RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+ rtl_set_rx_tx_config_registers(tp);
+ }
+
+ RTL_W8(Cfg9346, Cfg9346_Lock);
+
+ /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
+ RTL_R8(IntrMask);
+
+ RTL_W32(RxMissed, 0);
+
+ rtl_set_rx_mode(dev);
+
+ /* no early-rx interrupts */
+ RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
+
+ /* Enable all known interrupts by setting the interrupt mask. */
+ RTL_W16(IntrMask, tp->intr_event);
+}
+
+static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
+{
+ int cap = pci_pcie_cap(pdev);
+
+ if (cap) {
+ u16 ctl;
+
+ pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
+ ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force;
+ pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
+ }
+}
+
+static void rtl_csi_access_enable(void __iomem *ioaddr, u32 bits)
+{
+ u32 csi;
+
+ csi = rtl_csi_read(ioaddr, 0x070c) & 0x00ffffff;
+ rtl_csi_write(ioaddr, 0x070c, csi | bits);
+}
+
+static void rtl_csi_access_enable_1(void __iomem *ioaddr)
+{
+ rtl_csi_access_enable(ioaddr, 0x17000000);
+}
+
+static void rtl_csi_access_enable_2(void __iomem *ioaddr)
+{
+ rtl_csi_access_enable(ioaddr, 0x27000000);
+}
+
+struct ephy_info {
+ unsigned int offset;
+ u16 mask;
+ u16 bits;
+};
+
+static void rtl_ephy_init(void __iomem *ioaddr, const struct ephy_info *e, int len)
+{
+ u16 w;
+
+ while (len-- > 0) {
+ w = (rtl_ephy_read(ioaddr, e->offset) & ~e->mask) | e->bits;
+ rtl_ephy_write(ioaddr, e->offset, w);
+ e++;
+ }
+}
+
+static void rtl_disable_clock_request(struct pci_dev *pdev)
+{
+ int cap = pci_pcie_cap(pdev);
+
+ if (cap) {
+ u16 ctl;
+
+ pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
+ ctl &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
+ pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
+ }
+}
+
+static void rtl_enable_clock_request(struct pci_dev *pdev)
+{
+ int cap = pci_pcie_cap(pdev);
+
+ if (cap) {
+ u16 ctl;
+
+ pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
+ ctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
+ pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
+ }
+}
+
+#define R8168_CPCMD_QUIRK_MASK (\
+ EnableBist | \
+ Mac_dbgo_oe | \
+ Force_half_dup | \
+ Force_rxflow_en | \
+ Force_txflow_en | \
+ Cxpl_dbg_sel | \
+ ASF | \
+ PktCntrDisable | \
+ Mac_dbgo_sel)
+
+static void rtl_hw_start_8168bb(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
+
+ RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
+
+ rtl_tx_performance_tweak(pdev,
+ (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
+}
+
+static void rtl_hw_start_8168bef(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ rtl_hw_start_8168bb(ioaddr, pdev);
+
+ RTL_W8(MaxTxPacketSize, TxPacketMax);
+
+ RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
+}
+
+static void __rtl_hw_start_8168cp(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
+
+ RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
+
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ rtl_disable_clock_request(pdev);
+
+ RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
+}
+
+static void rtl_hw_start_8168cp_1(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ static const struct ephy_info e_info_8168cp[] = {
+ { 0x01, 0, 0x0001 },
+ { 0x02, 0x0800, 0x1000 },
+ { 0x03, 0, 0x0042 },
+ { 0x06, 0x0080, 0x0000 },
+ { 0x07, 0, 0x2000 }
+ };
+
+ rtl_csi_access_enable_2(ioaddr);
+
+ rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
+
+ __rtl_hw_start_8168cp(ioaddr, pdev);
+}
+
+static void rtl_hw_start_8168cp_2(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ rtl_csi_access_enable_2(ioaddr);
+
+ RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
+
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
+}
+
+static void rtl_hw_start_8168cp_3(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ rtl_csi_access_enable_2(ioaddr);
+
+ RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
+
+ /* Magic. */
+ RTL_W8(DBG_REG, 0x20);
+
+ RTL_W8(MaxTxPacketSize, TxPacketMax);
+
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
+}
+
+static void rtl_hw_start_8168c_1(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ static const struct ephy_info e_info_8168c_1[] = {
+ { 0x02, 0x0800, 0x1000 },
+ { 0x03, 0, 0x0002 },
+ { 0x06, 0x0080, 0x0000 }
+ };
+
+ rtl_csi_access_enable_2(ioaddr);
+
+ RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
+
+ rtl_ephy_init(ioaddr, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
+
+ __rtl_hw_start_8168cp(ioaddr, pdev);
+}
+
+static void rtl_hw_start_8168c_2(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ static const struct ephy_info e_info_8168c_2[] = {
+ { 0x01, 0, 0x0001 },
+ { 0x03, 0x0400, 0x0220 }
+ };
+
+ rtl_csi_access_enable_2(ioaddr);
+
+ rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
+
+ __rtl_hw_start_8168cp(ioaddr, pdev);
+}
+
+static void rtl_hw_start_8168c_3(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ rtl_hw_start_8168c_2(ioaddr, pdev);
+}
+
+static void rtl_hw_start_8168c_4(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ rtl_csi_access_enable_2(ioaddr);
+
+ __rtl_hw_start_8168cp(ioaddr, pdev);
+}
+
+static void rtl_hw_start_8168d(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ rtl_csi_access_enable_2(ioaddr);
+
+ rtl_disable_clock_request(pdev);
+
+ RTL_W8(MaxTxPacketSize, TxPacketMax);
+
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
+}
+
+static void rtl_hw_start_8168dp(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ rtl_csi_access_enable_1(ioaddr);
+
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ RTL_W8(MaxTxPacketSize, TxPacketMax);
+
+ rtl_disable_clock_request(pdev);
+}
+
+static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ static const struct ephy_info e_info_8168d_4[] = {
+ { 0x0b, ~0, 0x48 },
+ { 0x19, 0x20, 0x50 },
+ { 0x0c, ~0, 0x20 }
+ };
+ int i;
+
+ rtl_csi_access_enable_1(ioaddr);
+
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ RTL_W8(MaxTxPacketSize, TxPacketMax);
+
+ for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) {
+ const struct ephy_info *e = e_info_8168d_4 + i;
+ u16 w;
+
+ w = rtl_ephy_read(ioaddr, e->offset);
+ rtl_ephy_write(ioaddr, 0x03, (w & e->mask) | e->bits);
+ }
+
+ rtl_enable_clock_request(pdev);
+}
+
+static void rtl_hw_start_8168e_1(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ static const struct ephy_info e_info_8168e_1[] = {
+ { 0x00, 0x0200, 0x0100 },
+ { 0x00, 0x0000, 0x0004 },
+ { 0x06, 0x0002, 0x0001 },
+ { 0x06, 0x0000, 0x0030 },
+ { 0x07, 0x0000, 0x2000 },
+ { 0x00, 0x0000, 0x0020 },
+ { 0x03, 0x5800, 0x2000 },
+ { 0x03, 0x0000, 0x0001 },
+ { 0x01, 0x0800, 0x1000 },
+ { 0x07, 0x0000, 0x4000 },
+ { 0x1e, 0x0000, 0x2000 },
+ { 0x19, 0xffff, 0xfe6c },
+ { 0x0a, 0x0000, 0x0040 }
+ };
+
+ rtl_csi_access_enable_2(ioaddr);
+
+ rtl_ephy_init(ioaddr, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
+
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ RTL_W8(MaxTxPacketSize, TxPacketMax);
+
+ rtl_disable_clock_request(pdev);
+
+ /* Reset tx FIFO pointer */
+ RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST);
+ RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST);
+
+ RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
+}
+
+static void rtl_hw_start_8168e_2(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ static const struct ephy_info e_info_8168e_2[] = {
+ { 0x09, 0x0000, 0x0080 },
+ { 0x19, 0x0000, 0x0224 }
+ };
+
+ rtl_csi_access_enable_1(ioaddr);
+
+ rtl_ephy_init(ioaddr, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
+
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
+ rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
+ rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
+ ERIAR_EXGMAC);
+
+ RTL_W8(MaxTxPacketSize, 0x27);
+
+ rtl_disable_clock_request(pdev);
+
+ RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
+ RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
+
+ /* Adjust EEE LED frequency */
+ RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
+
+ RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
+ RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
+ RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
+}
+
+static void rtl_hw_start_8168(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ struct pci_dev *pdev = tp->pci_dev;
+
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+
+ RTL_W8(MaxTxPacketSize, TxPacketMax);
+
+ rtl_set_rx_max_size(ioaddr, rx_buf_sz);
+
+ tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
+
+ RTL_W16(CPlusCmd, tp->cp_cmd);
+
+ RTL_W16(IntrMitigate, 0x5151);
+
+ /* Work around for RxFIFO overflow. */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_11 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_22) {
+ tp->intr_event |= RxFIFOOver | PCSTimeout;
+ tp->intr_event &= ~RxOverflow;
+ }
+
+ rtl_set_rx_tx_desc_registers(tp, ioaddr);
+
+ rtl_set_rx_mode(dev);
+
+ RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
+ (InterFrameGap << TxInterFrameGapShift));
+
+ RTL_R8(IntrMask);
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_11:
+ rtl_hw_start_8168bb(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_12:
+ case RTL_GIGA_MAC_VER_17:
+ rtl_hw_start_8168bef(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_18:
+ rtl_hw_start_8168cp_1(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_19:
+ rtl_hw_start_8168c_1(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_20:
+ rtl_hw_start_8168c_2(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_21:
+ rtl_hw_start_8168c_3(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_22:
+ rtl_hw_start_8168c_4(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_23:
+ rtl_hw_start_8168cp_2(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_24:
+ rtl_hw_start_8168cp_3(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_25:
+ case RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_27:
+ rtl_hw_start_8168d(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_28:
+ rtl_hw_start_8168d_4(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_31:
+ rtl_hw_start_8168dp(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_32:
+ case RTL_GIGA_MAC_VER_33:
+ rtl_hw_start_8168e_1(ioaddr, pdev);
+ break;
+ case RTL_GIGA_MAC_VER_34:
+ rtl_hw_start_8168e_2(ioaddr, pdev);
+ break;
+
+ default:
+ printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
+ dev->name, tp->mac_version);
+ break;
+ }
+
+ RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+
+ RTL_W8(Cfg9346, Cfg9346_Lock);
+
+ RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
+
+ RTL_W16(IntrMask, tp->intr_event);
+}
+
+#define R810X_CPCMD_QUIRK_MASK (\
+ EnableBist | \
+ Mac_dbgo_oe | \
+ Force_half_dup | \
+ Force_rxflow_en | \
+ Force_txflow_en | \
+ Cxpl_dbg_sel | \
+ ASF | \
+ PktCntrDisable | \
+ Mac_dbgo_sel)
+
+static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ static const struct ephy_info e_info_8102e_1[] = {
+ { 0x01, 0, 0x6e65 },
+ { 0x02, 0, 0x091f },
+ { 0x03, 0, 0xc2f9 },
+ { 0x06, 0, 0xafb5 },
+ { 0x07, 0, 0x0e00 },
+ { 0x19, 0, 0xec80 },
+ { 0x01, 0, 0x2e65 },
+ { 0x01, 0, 0x6e65 }
+ };
+ u8 cfg1;
+
+ rtl_csi_access_enable_2(ioaddr);
+
+ RTL_W8(DBG_REG, FIX_NAK_1);
+
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ RTL_W8(Config1,
+ LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
+ RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
+
+ cfg1 = RTL_R8(Config1);
+ if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
+ RTL_W8(Config1, cfg1 & ~LEDS0);
+
+ rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
+}
+
+static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ rtl_csi_access_enable_2(ioaddr);
+
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
+ RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
+}
+
+static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ rtl_hw_start_8102e_2(ioaddr, pdev);
+
+ rtl_ephy_write(ioaddr, 0x03, 0xc2f9);
+}
+
+static void rtl_hw_start_8105e_1(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ static const struct ephy_info e_info_8105e_1[] = {
+ { 0x07, 0, 0x4000 },
+ { 0x19, 0, 0x0200 },
+ { 0x19, 0, 0x0020 },
+ { 0x1e, 0, 0x2000 },
+ { 0x03, 0, 0x0001 },
+ { 0x19, 0, 0x0100 },
+ { 0x19, 0, 0x0004 },
+ { 0x0a, 0, 0x0020 }
+ };
+
+ /* Force LAN exit from ASPM if Rx/Tx are not idle */
+ RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
+
+ /* Disable Early Tally Counter */
+ RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
+
+ RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
+ RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
+
+ rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
+}
+
+static void rtl_hw_start_8105e_2(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ rtl_hw_start_8105e_1(ioaddr, pdev);
+ rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000);
+}
+
+static void rtl_hw_start_8101(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ struct pci_dev *pdev = tp->pci_dev;
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_16) {
+ int cap = pci_pcie_cap(pdev);
+
+ if (cap) {
+ pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_NOSNOOP_EN);
+ }
+ }
+
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_07:
+ rtl_hw_start_8102e_1(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_08:
+ rtl_hw_start_8102e_3(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_09:
+ rtl_hw_start_8102e_2(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_29:
+ rtl_hw_start_8105e_1(ioaddr, pdev);
+ break;
+ case RTL_GIGA_MAC_VER_30:
+ rtl_hw_start_8105e_2(ioaddr, pdev);
+ break;
+ }
+
+ RTL_W8(Cfg9346, Cfg9346_Lock);
+
+ RTL_W8(MaxTxPacketSize, TxPacketMax);
+
+ rtl_set_rx_max_size(ioaddr, rx_buf_sz);
+
+ tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
+ RTL_W16(CPlusCmd, tp->cp_cmd);
+
+ RTL_W16(IntrMitigate, 0x0000);
+
+ rtl_set_rx_tx_desc_registers(tp, ioaddr);
+
+ RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+ rtl_set_rx_tx_config_registers(tp);
+
+ RTL_R8(IntrMask);
+
+ rtl_set_rx_mode(dev);
+
+ RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
+
+ RTL_W16(IntrMask, tp->intr_event);
+}
+
+static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if (new_mtu < ETH_ZLEN || new_mtu > SafeMtu)
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+ netdev_update_features(dev);
+
+ return 0;
+}
+
+static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
+{
+ desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
+ desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
+}
+
+static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
+ void **data_buff, struct RxDesc *desc)
+{
+ dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
+ DMA_FROM_DEVICE);
+
+ kfree(*data_buff);
+ *data_buff = NULL;
+ rtl8169_make_unusable_by_asic(desc);
+}
+
+static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
+{
+ u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
+
+ desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
+}
+
+static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
+ u32 rx_buf_sz)
+{
+ desc->addr = cpu_to_le64(mapping);
+ wmb();
+ rtl8169_mark_to_asic(desc, rx_buf_sz);
+}
+
+static inline void *rtl8169_align(void *data)
+{
+ return (void *)ALIGN((long)data, 16);
+}
+
+static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
+ struct RxDesc *desc)
+{
+ void *data;
+ dma_addr_t mapping;
+ struct device *d = &tp->pci_dev->dev;
+ struct net_device *dev = tp->dev;
+ int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
+
+ data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
+ if (!data)
+ return NULL;
+
+ if (rtl8169_align(data) != data) {
+ kfree(data);
+ data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node);
+ if (!data)
+ return NULL;
+ }
+
+ mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(d, mapping))) {
+ if (net_ratelimit())
+ netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
+ goto err_out;
+ }
+
+ rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
+ return data;
+
+err_out:
+ kfree(data);
+ return NULL;
+}
+
+static void rtl8169_rx_clear(struct rtl8169_private *tp)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_RX_DESC; i++) {
+ if (tp->Rx_databuff[i]) {
+ rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
+ tp->RxDescArray + i);
+ }
+ }
+}
+
+static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
+{
+ desc->opts1 |= cpu_to_le32(RingEnd);
+}
+
+static int rtl8169_rx_fill(struct rtl8169_private *tp)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_RX_DESC; i++) {
+ void *data;
+
+ if (tp->Rx_databuff[i])
+ continue;
+
+ data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
+ if (!data) {
+ rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
+ goto err_out;
+ }
+ tp->Rx_databuff[i] = data;
+ }
+
+ rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
+ return 0;
+
+err_out:
+ rtl8169_rx_clear(tp);
+ return -ENOMEM;
+}
+
+static int rtl8169_init_ring(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ rtl8169_init_ring_indexes(tp);
+
+ memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
+ memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *));
+
+ return rtl8169_rx_fill(tp);
+}
+
+static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
+ struct TxDesc *desc)
+{
+ unsigned int len = tx_skb->len;
+
+ dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
+
+ desc->opts1 = 0x00;
+ desc->opts2 = 0x00;
+ desc->addr = 0x00;
+ tx_skb->len = 0;
+}
+
+static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
+ unsigned int n)
+{
+ unsigned int i;
+
+ for (i = 0; i < n; i++) {
+ unsigned int entry = (start + i) % NUM_TX_DESC;
+ struct ring_info *tx_skb = tp->tx_skb + entry;
+ unsigned int len = tx_skb->len;
+
+ if (len) {
+ struct sk_buff *skb = tx_skb->skb;
+
+ rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
+ tp->TxDescArray + entry);
+ if (skb) {
+ tp->dev->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ tx_skb->skb = NULL;
+ }
+ }
+ }
+}
+
+static void rtl8169_tx_clear(struct rtl8169_private *tp)
+{
+ rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
+ tp->cur_tx = tp->dirty_tx = 0;
+}
+
+static void rtl8169_schedule_work(struct net_device *dev, work_func_t task)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ PREPARE_DELAYED_WORK(&tp->task, task);
+ schedule_delayed_work(&tp->task, 4);
+}
+
+static void rtl8169_wait_for_quiescence(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ synchronize_irq(dev->irq);
+
+ /* Wait for any pending NAPI task to complete */
+ napi_disable(&tp->napi);
+
+ rtl8169_irq_mask_and_ack(ioaddr);
+
+ tp->intr_mask = 0xffff;
+ RTL_W16(IntrMask, tp->intr_event);
+ napi_enable(&tp->napi);
+}
+
+static void rtl8169_reinit_task(struct work_struct *work)
+{
+ struct rtl8169_private *tp =
+ container_of(work, struct rtl8169_private, task.work);
+ struct net_device *dev = tp->dev;
+ int ret;
+
+ rtnl_lock();
+
+ if (!netif_running(dev))
+ goto out_unlock;
+
+ rtl8169_wait_for_quiescence(dev);
+ rtl8169_close(dev);
+
+ ret = rtl8169_open(dev);
+ if (unlikely(ret < 0)) {
+ if (net_ratelimit())
+ netif_err(tp, drv, dev,
+ "reinit failure (status = %d). Rescheduling\n",
+ ret);
+ rtl8169_schedule_work(dev, rtl8169_reinit_task);
+ }
+
+out_unlock:
+ rtnl_unlock();
+}
+
+static void rtl8169_reset_task(struct work_struct *work)
+{
+ struct rtl8169_private *tp =
+ container_of(work, struct rtl8169_private, task.work);
+ struct net_device *dev = tp->dev;
+ int i;
+
+ rtnl_lock();
+
+ if (!netif_running(dev))
+ goto out_unlock;
+
+ rtl8169_wait_for_quiescence(dev);
+
+ for (i = 0; i < NUM_RX_DESC; i++)
+ rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
+
+ rtl8169_tx_clear(tp);
+
+ rtl8169_hw_reset(tp);
+ rtl_hw_start(dev);
+ netif_wake_queue(dev);
+ rtl8169_check_link_status(dev, tp, tp->mmio_addr);
+
+out_unlock:
+ rtnl_unlock();
+}
+
+static void rtl8169_tx_timeout(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ rtl8169_hw_reset(tp);
+
+ /* Let's wait a bit while any (async) irq lands on */
+ rtl8169_schedule_work(dev, rtl8169_reset_task);
+}
+
+static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
+ u32 *opts)
+{
+ struct skb_shared_info *info = skb_shinfo(skb);
+ unsigned int cur_frag, entry;
+ struct TxDesc * uninitialized_var(txd);
+ struct device *d = &tp->pci_dev->dev;
+
+ entry = tp->cur_tx;
+ for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
+ skb_frag_t *frag = info->frags + cur_frag;
+ dma_addr_t mapping;
+ u32 status, len;
+ void *addr;
+
+ entry = (entry + 1) % NUM_TX_DESC;
+
+ txd = tp->TxDescArray + entry;
+ len = frag->size;
+ addr = skb_frag_address(frag);
+ mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(d, mapping))) {
+ if (net_ratelimit())
+ netif_err(tp, drv, tp->dev,
+ "Failed to map TX fragments DMA!\n");
+ goto err_out;
+ }
+
+ /* Anti gcc 2.95.3 bugware (sic) */
+ status = opts[0] | len |
+ (RingEnd * !((entry + 1) % NUM_TX_DESC));
+
+ txd->opts1 = cpu_to_le32(status);
+ txd->opts2 = cpu_to_le32(opts[1]);
+ txd->addr = cpu_to_le64(mapping);
+
+ tp->tx_skb[entry].len = len;
+ }
+
+ if (cur_frag) {
+ tp->tx_skb[entry].skb = skb;
+ txd->opts1 |= cpu_to_le32(LastFrag);
+ }
+
+ return cur_frag;
+
+err_out:
+ rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
+ return -EIO;
+}
+
+static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
+ struct sk_buff *skb, u32 *opts)
+{
+ const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
+ u32 mss = skb_shinfo(skb)->gso_size;
+ int offset = info->opts_offset;
+
+ if (mss) {
+ opts[0] |= TD_LSO;
+ opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift;
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ const struct iphdr *ip = ip_hdr(skb);
+
+ if (ip->protocol == IPPROTO_TCP)
+ opts[offset] |= info->checksum.tcp;
+ else if (ip->protocol == IPPROTO_UDP)
+ opts[offset] |= info->checksum.udp;
+ else
+ WARN_ON_ONCE(1);
+ }
+}
+
+static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ unsigned int entry = tp->cur_tx % NUM_TX_DESC;
+ struct TxDesc *txd = tp->TxDescArray + entry;
+ void __iomem *ioaddr = tp->mmio_addr;
+ struct device *d = &tp->pci_dev->dev;
+ dma_addr_t mapping;
+ u32 status, len;
+ u32 opts[2];
+ int frags;
+
+ if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
+ netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
+ goto err_stop_0;
+ }
+
+ if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
+ goto err_stop_0;
+
+ len = skb_headlen(skb);
+ mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(d, mapping))) {
+ if (net_ratelimit())
+ netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
+ goto err_dma_0;
+ }
+
+ tp->tx_skb[entry].len = len;
+ txd->addr = cpu_to_le64(mapping);
+
+ opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
+ opts[0] = DescOwn;
+
+ rtl8169_tso_csum(tp, skb, opts);
+
+ frags = rtl8169_xmit_frags(tp, skb, opts);
+ if (frags < 0)
+ goto err_dma_1;
+ else if (frags)
+ opts[0] |= FirstFrag;
+ else {
+ opts[0] |= FirstFrag | LastFrag;
+ tp->tx_skb[entry].skb = skb;
+ }
+
+ txd->opts2 = cpu_to_le32(opts[1]);
+
+ wmb();
+
+ /* Anti gcc 2.95.3 bugware (sic) */
+ status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
+ txd->opts1 = cpu_to_le32(status);
+
+ tp->cur_tx += frags + 1;
+
+ wmb();
+
+ RTL_W8(TxPoll, NPQ);
+
+ if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
+ netif_stop_queue(dev);
+ smp_rmb();
+ if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
+ netif_wake_queue(dev);
+ }
+
+ return NETDEV_TX_OK;
+
+err_dma_1:
+ rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
+err_dma_0:
+ dev_kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+
+err_stop_0:
+ netif_stop_queue(dev);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_BUSY;
+}
+
+static void rtl8169_pcierr_interrupt(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ struct pci_dev *pdev = tp->pci_dev;
+ u16 pci_status, pci_cmd;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
+ pci_read_config_word(pdev, PCI_STATUS, &pci_status);
+
+ netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
+ pci_cmd, pci_status);
+
+ /*
+ * The recovery sequence below admits a very elaborated explanation:
+ * - it seems to work;
+ * - I did not see what else could be done;
+ * - it makes iop3xx happy.
+ *
+ * Feel free to adjust to your needs.
+ */
+ if (pdev->broken_parity_status)
+ pci_cmd &= ~PCI_COMMAND_PARITY;
+ else
+ pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
+
+ pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
+
+ pci_write_config_word(pdev, PCI_STATUS,
+ pci_status & (PCI_STATUS_DETECTED_PARITY |
+ PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
+ PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
+
+ /* The infamous DAC f*ckup only happens at boot time */
+ if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ netif_info(tp, intr, dev, "disabling PCI DAC\n");
+ tp->cp_cmd &= ~PCIDAC;
+ RTL_W16(CPlusCmd, tp->cp_cmd);
+ dev->features &= ~NETIF_F_HIGHDMA;
+ }
+
+ rtl8169_hw_reset(tp);
+
+ rtl8169_schedule_work(dev, rtl8169_reinit_task);
+}
+
+static void rtl8169_tx_interrupt(struct net_device *dev,
+ struct rtl8169_private *tp,
+ void __iomem *ioaddr)
+{
+ unsigned int dirty_tx, tx_left;
+
+ dirty_tx = tp->dirty_tx;
+ smp_rmb();
+ tx_left = tp->cur_tx - dirty_tx;
+
+ while (tx_left > 0) {
+ unsigned int entry = dirty_tx % NUM_TX_DESC;
+ struct ring_info *tx_skb = tp->tx_skb + entry;
+ u32 status;
+
+ rmb();
+ status = le32_to_cpu(tp->TxDescArray[entry].opts1);
+ if (status & DescOwn)
+ break;
+
+ rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
+ tp->TxDescArray + entry);
+ if (status & LastFrag) {
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += tx_skb->skb->len;
+ dev_kfree_skb(tx_skb->skb);
+ tx_skb->skb = NULL;
+ }
+ dirty_tx++;
+ tx_left--;
+ }
+
+ if (tp->dirty_tx != dirty_tx) {
+ tp->dirty_tx = dirty_tx;
+ smp_wmb();
+ if (netif_queue_stopped(dev) &&
+ (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
+ netif_wake_queue(dev);
+ }
+ /*
+ * 8168 hack: TxPoll requests are lost when the Tx packets are
+ * too close. Let's kick an extra TxPoll request when a burst
+ * of start_xmit activity is detected (if it is not detected,
+ * it is slow enough). -- FR
+ */
+ smp_rmb();
+ if (tp->cur_tx != dirty_tx)
+ RTL_W8(TxPoll, NPQ);
+ }
+}
+
+static inline int rtl8169_fragmented_frame(u32 status)
+{
+ return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
+}
+
+static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
+{
+ u32 status = opts1 & RxProtoMask;
+
+ if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
+ ((status == RxProtoUDP) && !(opts1 & UDPFail)))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+}
+
+static struct sk_buff *rtl8169_try_rx_copy(void *data,
+ struct rtl8169_private *tp,
+ int pkt_size,
+ dma_addr_t addr)
+{
+ struct sk_buff *skb;
+ struct device *d = &tp->pci_dev->dev;
+
+ data = rtl8169_align(data);
+ dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
+ prefetch(data);
+ skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
+ if (skb)
+ memcpy(skb->data, data, pkt_size);
+ dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
+
+ return skb;
+}
+
+static int rtl8169_rx_interrupt(struct net_device *dev,
+ struct rtl8169_private *tp,
+ void __iomem *ioaddr, u32 budget)
+{
+ unsigned int cur_rx, rx_left;
+ unsigned int count;
+
+ cur_rx = tp->cur_rx;
+ rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
+ rx_left = min(rx_left, budget);
+
+ for (; rx_left > 0; rx_left--, cur_rx++) {
+ unsigned int entry = cur_rx % NUM_RX_DESC;
+ struct RxDesc *desc = tp->RxDescArray + entry;
+ u32 status;
+
+ rmb();
++ status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
+
+ if (status & DescOwn)
+ break;
+ if (unlikely(status & RxRES)) {
+ netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
+ status);
+ dev->stats.rx_errors++;
+ if (status & (RxRWT | RxRUNT))
+ dev->stats.rx_length_errors++;
+ if (status & RxCRC)
+ dev->stats.rx_crc_errors++;
+ if (status & RxFOVF) {
+ rtl8169_schedule_work(dev, rtl8169_reset_task);
+ dev->stats.rx_fifo_errors++;
+ }
+ rtl8169_mark_to_asic(desc, rx_buf_sz);
+ } else {
+ struct sk_buff *skb;
+ dma_addr_t addr = le64_to_cpu(desc->addr);
+ int pkt_size = (status & 0x00001FFF) - 4;
+
+ /*
+ * The driver does not support incoming fragmented
+ * frames. They are seen as a symptom of over-mtu
+ * sized frames.
+ */
+ if (unlikely(rtl8169_fragmented_frame(status))) {
+ dev->stats.rx_dropped++;
+ dev->stats.rx_length_errors++;
+ rtl8169_mark_to_asic(desc, rx_buf_sz);
+ continue;
+ }
+
+ skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
+ tp, pkt_size, addr);
+ rtl8169_mark_to_asic(desc, rx_buf_sz);
+ if (!skb) {
+ dev->stats.rx_dropped++;
+ continue;
+ }
+
+ rtl8169_rx_csum(skb, status);
+ skb_put(skb, pkt_size);
+ skb->protocol = eth_type_trans(skb, dev);
+
+ rtl8169_rx_vlan_tag(desc, skb);
+
+ napi_gro_receive(&tp->napi, skb);
+
+ dev->stats.rx_bytes += pkt_size;
+ dev->stats.rx_packets++;
+ }
+
+ /* Work around for AMD plateform. */
+ if ((desc->opts2 & cpu_to_le32(0xfffe000)) &&
+ (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
+ desc->opts2 = 0;
+ cur_rx++;
+ }
+ }
+
+ count = cur_rx - tp->cur_rx;
+ tp->cur_rx = cur_rx;
+
+ tp->dirty_rx += count;
+
+ return count;
+}
+
+static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
+{
+ struct net_device *dev = dev_instance;
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ int handled = 0;
+ int status;
+
+ /* loop handling interrupts until we have no new ones or
+ * we hit a invalid/hotplug case.
+ */
+ status = RTL_R16(IntrStatus);
+ while (status && status != 0xffff) {
+ handled = 1;
+
+ /* Handle all of the error cases first. These will reset
+ * the chip, so just exit the loop.
+ */
+ if (unlikely(!netif_running(dev))) {
+ rtl8169_hw_reset(tp);
+ break;
+ }
+
+ if (unlikely(status & RxFIFOOver)) {
+ switch (tp->mac_version) {
+ /* Work around for rx fifo overflow */
+ case RTL_GIGA_MAC_VER_11:
+ case RTL_GIGA_MAC_VER_22:
+ case RTL_GIGA_MAC_VER_26:
+ netif_stop_queue(dev);
+ rtl8169_tx_timeout(dev);
+ goto done;
+ /* Testers needed. */
+ case RTL_GIGA_MAC_VER_17:
+ case RTL_GIGA_MAC_VER_19:
+ case RTL_GIGA_MAC_VER_20:
+ case RTL_GIGA_MAC_VER_21:
+ case RTL_GIGA_MAC_VER_23:
+ case RTL_GIGA_MAC_VER_24:
+ case RTL_GIGA_MAC_VER_27:
+ case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
+ /* Experimental science. Pktgen proof. */
+ case RTL_GIGA_MAC_VER_12:
+ case RTL_GIGA_MAC_VER_25:
+ if (status == RxFIFOOver)
+ goto done;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (unlikely(status & SYSErr)) {
+ rtl8169_pcierr_interrupt(dev);
+ break;
+ }
+
+ if (status & LinkChg)
+ __rtl8169_check_link_status(dev, tp, ioaddr, true);
+
+ /* We need to see the lastest version of tp->intr_mask to
+ * avoid ignoring an MSI interrupt and having to wait for
+ * another event which may never come.
+ */
+ smp_rmb();
+ if (status & tp->intr_mask & tp->napi_event) {
+ RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
+ tp->intr_mask = ~tp->napi_event;
+
+ if (likely(napi_schedule_prep(&tp->napi)))
+ __napi_schedule(&tp->napi);
+ else
+ netif_info(tp, intr, dev,
+ "interrupt %04x in poll\n", status);
+ }
+
+ /* We only get a new MSI interrupt when all active irq
+ * sources on the chip have been acknowledged. So, ack
+ * everything we've seen and check if new sources have become
+ * active to avoid blocking all interrupts from the chip.
+ */
+ RTL_W16(IntrStatus,
+ (status & RxFIFOOver) ? (status | RxOverflow) : status);
+ status = RTL_R16(IntrStatus);
+ }
+done:
+ return IRQ_RETVAL(handled);
+}
+
+static int rtl8169_poll(struct napi_struct *napi, int budget)
+{
+ struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
+ struct net_device *dev = tp->dev;
+ void __iomem *ioaddr = tp->mmio_addr;
+ int work_done;
+
+ work_done = rtl8169_rx_interrupt(dev, tp, ioaddr, (u32) budget);
+ rtl8169_tx_interrupt(dev, tp, ioaddr);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+
+ /* We need for force the visibility of tp->intr_mask
+ * for other CPUs, as we can loose an MSI interrupt
+ * and potentially wait for a retransmit timeout if we don't.
+ * The posted write to IntrMask is safe, as it will
+ * eventually make it to the chip and we won't loose anything
+ * until it does.
+ */
+ tp->intr_mask = 0xffff;
+ wmb();
+ RTL_W16(IntrMask, tp->intr_event);
+ }
+
+ return work_done;
+}
+
+static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (tp->mac_version > RTL_GIGA_MAC_VER_06)
+ return;
+
+ dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
+ RTL_W32(RxMissed, 0);
+}
+
+static void rtl8169_down(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ del_timer_sync(&tp->timer);
+
+ netif_stop_queue(dev);
+
+ napi_disable(&tp->napi);
+
+ spin_lock_irq(&tp->lock);
+
+ rtl8169_hw_reset(tp);
+ /*
+ * At this point device interrupts can not be enabled in any function,
+ * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task,
+ * rtl8169_reinit_task) and napi is disabled (rtl8169_poll).
+ */
+ rtl8169_rx_missed(dev, ioaddr);
+
+ spin_unlock_irq(&tp->lock);
+
+ synchronize_irq(dev->irq);
+
+ /* Give a racing hard_start_xmit a few cycles to complete. */
+ synchronize_sched(); /* FIXME: should this be synchronize_irq()? */
+
+ rtl8169_tx_clear(tp);
+
+ rtl8169_rx_clear(tp);
+
+ rtl_pll_power_down(tp);
+}
+
+static int rtl8169_close(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ struct pci_dev *pdev = tp->pci_dev;
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ /* Update counters before going down */
+ rtl8169_update_counters(dev);
+
+ rtl8169_down(dev);
+
+ free_irq(dev->irq, dev);
+
+ dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
+ tp->RxPhyAddr);
+ dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
+ tp->TxPhyAddr);
+ tp->TxDescArray = NULL;
+ tp->RxDescArray = NULL;
+
+ pm_runtime_put_sync(&pdev->dev);
+
+ return 0;
+}
+
+static void rtl_set_rx_mode(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ unsigned long flags;
+ u32 mc_filter[2]; /* Multicast hash filter */
+ int rx_mode;
+ u32 tmp = 0;
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Unconditionally log net taps. */
+ netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
+ rx_mode =
+ AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
+ AcceptAllPhys;
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter perfectly -- accept all multicasts. */
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ } else {
+ struct netdev_hw_addr *ha;
+
+ rx_mode = AcceptBroadcast | AcceptMyPhys;
+ mc_filter[1] = mc_filter[0] = 0;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ rx_mode |= AcceptMulticast;
+ }
+ }
+
+ spin_lock_irqsave(&tp->lock, flags);
+
+ tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
+
+ if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
+ u32 data = mc_filter[0];
+
+ mc_filter[0] = swab32(mc_filter[1]);
+ mc_filter[1] = swab32(data);
+ }
+
+ RTL_W32(MAR0 + 4, mc_filter[1]);
+ RTL_W32(MAR0 + 0, mc_filter[0]);
+
+ RTL_W32(RxConfig, tmp);
+
+ spin_unlock_irqrestore(&tp->lock, flags);
+}
+
+/**
+ * rtl8169_get_stats - Get rtl8169 read/write statistics
+ * @dev: The Ethernet Device to get statistics for
+ *
+ * Get TX/RX statistics for rtl8169
+ */
+static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ unsigned long flags;
+
+ if (netif_running(dev)) {
+ spin_lock_irqsave(&tp->lock, flags);
+ rtl8169_rx_missed(dev, ioaddr);
+ spin_unlock_irqrestore(&tp->lock, flags);
+ }
+
+ return &dev->stats;
+}
+
+static void rtl8169_net_suspend(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return;
+
+ rtl_pll_power_down(tp);
+
+ netif_device_detach(dev);
+ netif_stop_queue(dev);
+}
+
+#ifdef CONFIG_PM
+
+static int rtl8169_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ rtl8169_net_suspend(dev);
+
+ return 0;
+}
+
+static void __rtl8169_resume(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ netif_device_attach(dev);
+
+ rtl_pll_power_up(tp);
+
+ rtl8169_schedule_work(dev, rtl8169_reset_task);
+}
+
+static int rtl8169_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ rtl8169_init_phy(dev, tp);
+
+ if (netif_running(dev))
+ __rtl8169_resume(dev);
+
+ return 0;
+}
+
+static int rtl8169_runtime_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (!tp->TxDescArray)
+ return 0;
+
+ spin_lock_irq(&tp->lock);
+ tp->saved_wolopts = __rtl8169_get_wol(tp);
+ __rtl8169_set_wol(tp, WAKE_ANY);
+ spin_unlock_irq(&tp->lock);
+
+ rtl8169_net_suspend(dev);
+
+ return 0;
+}
+
+static int rtl8169_runtime_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (!tp->TxDescArray)
+ return 0;
+
+ spin_lock_irq(&tp->lock);
+ __rtl8169_set_wol(tp, tp->saved_wolopts);
+ tp->saved_wolopts = 0;
+ spin_unlock_irq(&tp->lock);
+
+ rtl8169_init_phy(dev, tp);
+
+ __rtl8169_resume(dev);
+
+ return 0;
+}
+
+static int rtl8169_runtime_idle(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ return tp->TxDescArray ? -EBUSY : 0;
+}
+
+static const struct dev_pm_ops rtl8169_pm_ops = {
+ .suspend = rtl8169_suspend,
+ .resume = rtl8169_resume,
+ .freeze = rtl8169_suspend,
+ .thaw = rtl8169_resume,
+ .poweroff = rtl8169_suspend,
+ .restore = rtl8169_resume,
+ .runtime_suspend = rtl8169_runtime_suspend,
+ .runtime_resume = rtl8169_runtime_resume,
+ .runtime_idle = rtl8169_runtime_idle,
+};
+
+#define RTL8169_PM_OPS (&rtl8169_pm_ops)
+
+#else /* !CONFIG_PM */
+
+#define RTL8169_PM_OPS NULL
+
+#endif /* !CONFIG_PM */
+
+static void rtl_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ rtl8169_net_suspend(dev);
+
+ /* Restore original MAC address */
+ rtl_rar_set(tp, dev->perm_addr);
+
+ spin_lock_irq(&tp->lock);
+
+ rtl8169_hw_reset(tp);
+
+ spin_unlock_irq(&tp->lock);
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ /* WoL fails with 8168b when the receiver is disabled. */
+ if ((tp->mac_version == RTL_GIGA_MAC_VER_11 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_12 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_17) &&
+ (tp->features & RTL_FEATURE_WOL)) {
+ pci_clear_master(pdev);
+
+ RTL_W8(ChipCmd, CmdRxEnb);
+ /* PCI commit */
+ RTL_R8(ChipCmd);
+ }
+
+ pci_wake_from_d3(pdev, true);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+static struct pci_driver rtl8169_pci_driver = {
+ .name = MODULENAME,
+ .id_table = rtl8169_pci_tbl,
+ .probe = rtl8169_init_one,
+ .remove = __devexit_p(rtl8169_remove_one),
+ .shutdown = rtl_shutdown,
+ .driver.pm = RTL8169_PM_OPS,
+};
+
+static int __init rtl8169_init_module(void)
+{
+ return pci_register_driver(&rtl8169_pci_driver);
+}
+
+static void __exit rtl8169_cleanup_module(void)
+{
+ pci_unregister_driver(&rtl8169_pci_driver);
+}
+
+module_init(rtl8169_init_module);
+module_exit(rtl8169_cleanup_module);
--- /dev/null
+/*
+ * SuperH Ethernet device driver
+ *
+ * Copyright (C) 2006-2008 Nobuhiro Iwamatsu
+ * Copyright (C) 2008-2009 Renesas Solutions Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/mdio-bitbang.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/cache.h>
+#include <linux/io.h>
++#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/ethtool.h>
+
+#include "sh_eth.h"
+
+#define SH_ETH_DEF_MSG_ENABLE \
+ (NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_RX_ERR| \
+ NETIF_MSG_TX_ERR)
+
+/* There is CPU dependent code */
+#if defined(CONFIG_CPU_SUBTYPE_SH7724)
+#define SH_ETH_RESET_DEFAULT 1
+static void sh_eth_set_duplex(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ if (mdp->duplex) /* Full */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
+ else /* Half */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
+}
+
+static void sh_eth_set_rate(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ switch (mdp->speed) {
+ case 10: /* 10BASE */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
+ break;
+ case 100:/* 100BASE */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
+ break;
+ default:
+ break;
+ }
+}
+
+/* SH7724 */
+static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
+ .set_duplex = sh_eth_set_duplex,
+ .set_rate = sh_eth_set_rate,
+
+ .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
+ .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
+ .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f,
+
+ .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
+ .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
+ EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
+ .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
+
+ .apr = 1,
+ .mpr = 1,
+ .tpauser = 1,
+ .hw_swap = 1,
+ .rpadir = 1,
+ .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
+};
+#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
+#define SH_ETH_HAS_BOTH_MODULES 1
+#define SH_ETH_HAS_TSU 1
+static void sh_eth_set_duplex(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ if (mdp->duplex) /* Full */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
+ else /* Half */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
+}
+
+static void sh_eth_set_rate(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ switch (mdp->speed) {
+ case 10: /* 10BASE */
+ sh_eth_write(ndev, 0, RTRATE);
+ break;
+ case 100:/* 100BASE */
+ sh_eth_write(ndev, 1, RTRATE);
+ break;
+ default:
+ break;
+ }
+}
+
+/* SH7757 */
+static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
+ .set_duplex = sh_eth_set_duplex,
+ .set_rate = sh_eth_set_rate,
+
+ .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+ .rmcr_value = 0x00000001,
+
+ .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
+ .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
+ EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
+ .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
+
+ .apr = 1,
+ .mpr = 1,
+ .tpauser = 1,
+ .hw_swap = 1,
+ .no_ade = 1,
+ .rpadir = 1,
+ .rpadir_value = 2 << 16,
+};
+
+#define SH_GIGA_ETH_BASE 0xfee00000
+#define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
+#define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
+static void sh_eth_chip_reset_giga(struct net_device *ndev)
+{
+ int i;
+ unsigned long mahr[2], malr[2];
+
+ /* save MAHR and MALR */
+ for (i = 0; i < 2; i++) {
+ malr[i] = readl(GIGA_MALR(i));
+ mahr[i] = readl(GIGA_MAHR(i));
+ }
+
+ /* reset device */
+ writel(ARSTR_ARSTR, SH_GIGA_ETH_BASE + 0x1800);
+ mdelay(1);
+
+ /* restore MAHR and MALR */
+ for (i = 0; i < 2; i++) {
+ writel(malr[i], GIGA_MALR(i));
+ writel(mahr[i], GIGA_MAHR(i));
+ }
+}
+
+static int sh_eth_is_gether(struct sh_eth_private *mdp);
+static void sh_eth_reset(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ int cnt = 100;
+
+ if (sh_eth_is_gether(mdp)) {
+ sh_eth_write(ndev, 0x03, EDSR);
+ sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
+ EDMR);
+ while (cnt > 0) {
+ if (!(sh_eth_read(ndev, EDMR) & 0x3))
+ break;
+ mdelay(1);
+ cnt--;
+ }
+ if (cnt < 0)
+ printk(KERN_ERR "Device reset fail\n");
+
+ /* Table Init */
+ sh_eth_write(ndev, 0x0, TDLAR);
+ sh_eth_write(ndev, 0x0, TDFAR);
+ sh_eth_write(ndev, 0x0, TDFXR);
+ sh_eth_write(ndev, 0x0, TDFFR);
+ sh_eth_write(ndev, 0x0, RDLAR);
+ sh_eth_write(ndev, 0x0, RDFAR);
+ sh_eth_write(ndev, 0x0, RDFXR);
+ sh_eth_write(ndev, 0x0, RDFFR);
+ } else {
+ sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
+ EDMR);
+ mdelay(3);
+ sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
+ EDMR);
+ }
+}
+
+static void sh_eth_set_duplex_giga(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ if (mdp->duplex) /* Full */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
+ else /* Half */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
+}
+
+static void sh_eth_set_rate_giga(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ switch (mdp->speed) {
+ case 10: /* 10BASE */
+ sh_eth_write(ndev, 0x00000000, GECMR);
+ break;
+ case 100:/* 100BASE */
+ sh_eth_write(ndev, 0x00000010, GECMR);
+ break;
+ case 1000: /* 1000BASE */
+ sh_eth_write(ndev, 0x00000020, GECMR);
+ break;
+ default:
+ break;
+ }
+}
+
+/* SH7757(GETHERC) */
+static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
+ .chip_reset = sh_eth_chip_reset_giga,
+ .set_duplex = sh_eth_set_duplex_giga,
+ .set_rate = sh_eth_set_rate_giga,
+
+ .ecsr_value = ECSR_ICD | ECSR_MPD,
+ .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
+ .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+
+ .tx_check = EESR_TC1 | EESR_FTC,
+ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
+ EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
+ EESR_ECI,
+ .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
+ EESR_TFE,
+ .fdr_value = 0x0000072f,
+ .rmcr_value = 0x00000001,
+
+ .apr = 1,
+ .mpr = 1,
+ .tpauser = 1,
+ .bculr = 1,
+ .hw_swap = 1,
+ .rpadir = 1,
+ .rpadir_value = 2 << 16,
+ .no_trimd = 1,
+ .no_ade = 1,
+};
+
+static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp)
+{
+ if (sh_eth_is_gether(mdp))
+ return &sh_eth_my_cpu_data_giga;
+ else
+ return &sh_eth_my_cpu_data;
+}
+
+#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
+#define SH_ETH_HAS_TSU 1
+static void sh_eth_chip_reset(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ /* reset device */
+ sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
+ mdelay(1);
+}
+
+static void sh_eth_reset(struct net_device *ndev)
+{
+ int cnt = 100;
+
+ sh_eth_write(ndev, EDSR_ENALL, EDSR);
+ sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
+ while (cnt > 0) {
+ if (!(sh_eth_read(ndev, EDMR) & 0x3))
+ break;
+ mdelay(1);
+ cnt--;
+ }
+ if (cnt == 0)
+ printk(KERN_ERR "Device reset fail\n");
+
+ /* Table Init */
+ sh_eth_write(ndev, 0x0, TDLAR);
+ sh_eth_write(ndev, 0x0, TDFAR);
+ sh_eth_write(ndev, 0x0, TDFXR);
+ sh_eth_write(ndev, 0x0, TDFFR);
+ sh_eth_write(ndev, 0x0, RDLAR);
+ sh_eth_write(ndev, 0x0, RDFAR);
+ sh_eth_write(ndev, 0x0, RDFXR);
+ sh_eth_write(ndev, 0x0, RDFFR);
+}
+
+static void sh_eth_set_duplex(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ if (mdp->duplex) /* Full */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
+ else /* Half */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
+}
+
+static void sh_eth_set_rate(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ switch (mdp->speed) {
+ case 10: /* 10BASE */
+ sh_eth_write(ndev, GECMR_10, GECMR);
+ break;
+ case 100:/* 100BASE */
+ sh_eth_write(ndev, GECMR_100, GECMR);
+ break;
+ case 1000: /* 1000BASE */
+ sh_eth_write(ndev, GECMR_1000, GECMR);
+ break;
+ default:
+ break;
+ }
+}
+
+/* sh7763 */
+static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
+ .chip_reset = sh_eth_chip_reset,
+ .set_duplex = sh_eth_set_duplex,
+ .set_rate = sh_eth_set_rate,
+
+ .ecsr_value = ECSR_ICD | ECSR_MPD,
+ .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
+ .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+
+ .tx_check = EESR_TC1 | EESR_FTC,
+ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
+ EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
+ EESR_ECI,
+ .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
+ EESR_TFE,
+
+ .apr = 1,
+ .mpr = 1,
+ .tpauser = 1,
+ .bculr = 1,
+ .hw_swap = 1,
+ .no_trimd = 1,
+ .no_ade = 1,
+ .tsu = 1,
+};
+
+#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
+#define SH_ETH_RESET_DEFAULT 1
+static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
+ .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+
+ .apr = 1,
+ .mpr = 1,
+ .tpauser = 1,
+ .hw_swap = 1,
+};
+#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
+#define SH_ETH_RESET_DEFAULT 1
+#define SH_ETH_HAS_TSU 1
+static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
+ .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+ .tsu = 1,
+};
+#endif
+
+static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
+{
+ if (!cd->ecsr_value)
+ cd->ecsr_value = DEFAULT_ECSR_INIT;
+
+ if (!cd->ecsipr_value)
+ cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
+
+ if (!cd->fcftr_value)
+ cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \
+ DEFAULT_FIFO_F_D_RFD;
+
+ if (!cd->fdr_value)
+ cd->fdr_value = DEFAULT_FDR_INIT;
+
+ if (!cd->rmcr_value)
+ cd->rmcr_value = DEFAULT_RMCR_VALUE;
+
+ if (!cd->tx_check)
+ cd->tx_check = DEFAULT_TX_CHECK;
+
+ if (!cd->eesr_err_check)
+ cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
+
+ if (!cd->tx_error_check)
+ cd->tx_error_check = DEFAULT_TX_ERROR_CHECK;
+}
+
+#if defined(SH_ETH_RESET_DEFAULT)
+/* Chip Reset */
+static void sh_eth_reset(struct net_device *ndev)
+{
+ sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR);
+ mdelay(3);
+ sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR);
+}
+#endif
+
+#if defined(CONFIG_CPU_SH4)
+static void sh_eth_set_receive_align(struct sk_buff *skb)
+{
+ int reserve;
+
+ reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
+ if (reserve)
+ skb_reserve(skb, reserve);
+}
+#else
+static void sh_eth_set_receive_align(struct sk_buff *skb)
+{
+ skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
+}
+#endif
+
+
+/* CPU <-> EDMAC endian convert */
+static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
+{
+ switch (mdp->edmac_endian) {
+ case EDMAC_LITTLE_ENDIAN:
+ return cpu_to_le32(x);
+ case EDMAC_BIG_ENDIAN:
+ return cpu_to_be32(x);
+ }
+ return x;
+}
+
+static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
+{
+ switch (mdp->edmac_endian) {
+ case EDMAC_LITTLE_ENDIAN:
+ return le32_to_cpu(x);
+ case EDMAC_BIG_ENDIAN:
+ return be32_to_cpu(x);
+ }
+ return x;
+}
+
+/*
+ * Program the hardware MAC address from dev->dev_addr.
+ */
+static void update_mac_address(struct net_device *ndev)
+{
+ sh_eth_write(ndev,
+ (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
+ (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
+ sh_eth_write(ndev,
+ (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
+}
+
+/*
+ * Get MAC address from SuperH MAC address register
+ *
+ * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
+ * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
+ * When you want use this device, you must set MAC address in bootloader.
+ *
+ */
+static void read_mac_address(struct net_device *ndev, unsigned char *mac)
+{
+ if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
+ memcpy(ndev->dev_addr, mac, 6);
+ } else {
+ ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
+ ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
+ ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
+ ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
+ ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
+ ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
+ }
+}
+
+static int sh_eth_is_gether(struct sh_eth_private *mdp)
+{
+ if (mdp->reg_offset == sh_eth_offset_gigabit)
+ return 1;
+ else
+ return 0;
+}
+
+static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
+{
+ if (sh_eth_is_gether(mdp))
+ return EDTRR_TRNS_GETHER;
+ else
+ return EDTRR_TRNS_ETHER;
+}
+
+struct bb_info {
+ void (*set_gate)(unsigned long addr);
+ struct mdiobb_ctrl ctrl;
+ u32 addr;
+ u32 mmd_msk;/* MMD */
+ u32 mdo_msk;
+ u32 mdi_msk;
+ u32 mdc_msk;
+};
+
+/* PHY bit set */
+static void bb_set(u32 addr, u32 msk)
+{
+ writel(readl(addr) | msk, addr);
+}
+
+/* PHY bit clear */
+static void bb_clr(u32 addr, u32 msk)
+{
+ writel((readl(addr) & ~msk), addr);
+}
+
+/* PHY bit read */
+static int bb_read(u32 addr, u32 msk)
+{
+ return (readl(addr) & msk) != 0;
+}
+
+/* Data I/O pin control */
+static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
+{
+ struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
+
+ if (bitbang->set_gate)
+ bitbang->set_gate(bitbang->addr);
+
+ if (bit)
+ bb_set(bitbang->addr, bitbang->mmd_msk);
+ else
+ bb_clr(bitbang->addr, bitbang->mmd_msk);
+}
+
+/* Set bit data*/
+static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
+{
+ struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
+
+ if (bitbang->set_gate)
+ bitbang->set_gate(bitbang->addr);
+
+ if (bit)
+ bb_set(bitbang->addr, bitbang->mdo_msk);
+ else
+ bb_clr(bitbang->addr, bitbang->mdo_msk);
+}
+
+/* Get bit data*/
+static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
+{
+ struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
+
+ if (bitbang->set_gate)
+ bitbang->set_gate(bitbang->addr);
+
+ return bb_read(bitbang->addr, bitbang->mdi_msk);
+}
+
+/* MDC pin control */
+static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
+{
+ struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
+
+ if (bitbang->set_gate)
+ bitbang->set_gate(bitbang->addr);
+
+ if (bit)
+ bb_set(bitbang->addr, bitbang->mdc_msk);
+ else
+ bb_clr(bitbang->addr, bitbang->mdc_msk);
+}
+
+/* mdio bus control struct */
+static struct mdiobb_ops bb_ops = {
+ .owner = THIS_MODULE,
+ .set_mdc = sh_mdc_ctrl,
+ .set_mdio_dir = sh_mmd_ctrl,
+ .set_mdio_data = sh_set_mdio,
+ .get_mdio_data = sh_get_mdio,
+};
+
+/* free skb and descriptor buffer */
+static void sh_eth_ring_free(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ int i;
+
+ /* Free Rx skb ringbuffer */
+ if (mdp->rx_skbuff) {
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (mdp->rx_skbuff[i])
+ dev_kfree_skb(mdp->rx_skbuff[i]);
+ }
+ }
+ kfree(mdp->rx_skbuff);
+
+ /* Free Tx skb ringbuffer */
+ if (mdp->tx_skbuff) {
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (mdp->tx_skbuff[i])
+ dev_kfree_skb(mdp->tx_skbuff[i]);
+ }
+ }
+ kfree(mdp->tx_skbuff);
+}
+
+/* format skb and descriptor buffer */
+static void sh_eth_ring_format(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ int i;
+ struct sk_buff *skb;
+ struct sh_eth_rxdesc *rxdesc = NULL;
+ struct sh_eth_txdesc *txdesc = NULL;
+ int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE;
+ int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE;
+
+ mdp->cur_rx = mdp->cur_tx = 0;
+ mdp->dirty_rx = mdp->dirty_tx = 0;
+
+ memset(mdp->rx_ring, 0, rx_ringsize);
+
+ /* build Rx ring buffer */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ /* skb */
+ mdp->rx_skbuff[i] = NULL;
+ skb = dev_alloc_skb(mdp->rx_buf_sz);
+ mdp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
+ DMA_FROM_DEVICE);
+ skb->dev = ndev; /* Mark as being used by this device. */
+ sh_eth_set_receive_align(skb);
+
+ /* RX descriptor */
+ rxdesc = &mdp->rx_ring[i];
+ rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
+ rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
+
+ /* The size of the buffer is 16 byte boundary. */
+ rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
+ /* Rx descriptor address set */
+ if (i == 0) {
+ sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
+ if (sh_eth_is_gether(mdp))
+ sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
+ }
+ }
+
+ mdp->dirty_rx = (u32) (i - RX_RING_SIZE);
+
+ /* Mark the last entry as wrapping the ring. */
+ rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
+
+ memset(mdp->tx_ring, 0, tx_ringsize);
+
+ /* build Tx ring buffer */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ mdp->tx_skbuff[i] = NULL;
+ txdesc = &mdp->tx_ring[i];
+ txdesc->status = cpu_to_edmac(mdp, TD_TFP);
+ txdesc->buffer_length = 0;
+ if (i == 0) {
+ /* Tx descriptor address set */
+ sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
+ if (sh_eth_is_gether(mdp))
+ sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
+ }
+ }
+
+ txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
+}
+
+/* Get skb and descriptor buffer */
+static int sh_eth_ring_init(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ int rx_ringsize, tx_ringsize, ret = 0;
+
+ /*
+ * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
+ * card needs room to do 8 byte alignment, +2 so we can reserve
+ * the first 2 bytes, and +16 gets room for the status word from the
+ * card.
+ */
+ mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
+ (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
+ if (mdp->cd->rpadir)
+ mdp->rx_buf_sz += NET_IP_ALIGN;
+
+ /* Allocate RX and TX skb rings */
+ mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE,
+ GFP_KERNEL);
+ if (!mdp->rx_skbuff) {
+ dev_err(&ndev->dev, "Cannot allocate Rx skb\n");
+ ret = -ENOMEM;
+ return ret;
+ }
+
+ mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE,
+ GFP_KERNEL);
+ if (!mdp->tx_skbuff) {
+ dev_err(&ndev->dev, "Cannot allocate Tx skb\n");
+ ret = -ENOMEM;
+ goto skb_ring_free;
+ }
+
+ /* Allocate all Rx descriptors. */
+ rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
+ mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
+ GFP_KERNEL);
+
+ if (!mdp->rx_ring) {
+ dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n",
+ rx_ringsize);
+ ret = -ENOMEM;
+ goto desc_ring_free;
+ }
+
+ mdp->dirty_rx = 0;
+
+ /* Allocate all Tx descriptors. */
+ tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
+ mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
+ GFP_KERNEL);
+ if (!mdp->tx_ring) {
+ dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n",
+ tx_ringsize);
+ ret = -ENOMEM;
+ goto desc_ring_free;
+ }
+ return ret;
+
+desc_ring_free:
+ /* free DMA buffer */
+ dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
+
+skb_ring_free:
+ /* Free Rx and Tx skb ring buffer */
+ sh_eth_ring_free(ndev);
+
+ return ret;
+}
+
+static int sh_eth_dev_init(struct net_device *ndev)
+{
+ int ret = 0;
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ u_int32_t rx_int_var, tx_int_var;
+ u32 val;
+
+ /* Soft Reset */
+ sh_eth_reset(ndev);
+
+ /* Descriptor format */
+ sh_eth_ring_format(ndev);
+ if (mdp->cd->rpadir)
+ sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
+
+ /* all sh_eth int mask */
+ sh_eth_write(ndev, 0, EESIPR);
+
+#if defined(__LITTLE_ENDIAN__)
+ if (mdp->cd->hw_swap)
+ sh_eth_write(ndev, EDMR_EL, EDMR);
+ else
+#endif
+ sh_eth_write(ndev, 0, EDMR);
+
+ /* FIFO size set */
+ sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
+ sh_eth_write(ndev, 0, TFTR);
+
+ /* Frame recv control */
+ sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
+
+ rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
+ tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
+ sh_eth_write(ndev, rx_int_var | tx_int_var, TRSCER);
+
+ if (mdp->cd->bculr)
+ sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
+
+ sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
+
+ if (!mdp->cd->no_trimd)
+ sh_eth_write(ndev, 0, TRIMD);
+
+ /* Recv frame limit set register */
+ sh_eth_write(ndev, RFLR_VALUE, RFLR);
+
+ sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
+ sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
+
+ /* PAUSE Prohibition */
+ val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
+ ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
+
+ sh_eth_write(ndev, val, ECMR);
+
+ if (mdp->cd->set_rate)
+ mdp->cd->set_rate(ndev);
+
+ /* E-MAC Status Register clear */
+ sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
+
+ /* E-MAC Interrupt Enable register */
+ sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
+
+ /* Set MAC address */
+ update_mac_address(ndev);
+
+ /* mask reset */
+ if (mdp->cd->apr)
+ sh_eth_write(ndev, APR_AP, APR);
+ if (mdp->cd->mpr)
+ sh_eth_write(ndev, MPR_MP, MPR);
+ if (mdp->cd->tpauser)
+ sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
+
+ /* Setting the Rx mode will start the Rx process. */
+ sh_eth_write(ndev, EDRRR_R, EDRRR);
+
+ netif_start_queue(ndev);
+
+ return ret;
+}
+
+/* free Tx skb function */
+static int sh_eth_txfree(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ struct sh_eth_txdesc *txdesc;
+ int freeNum = 0;
+ int entry = 0;
+
+ for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
+ entry = mdp->dirty_tx % TX_RING_SIZE;
+ txdesc = &mdp->tx_ring[entry];
+ if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
+ break;
+ /* Free the original skb. */
+ if (mdp->tx_skbuff[entry]) {
+ dma_unmap_single(&ndev->dev, txdesc->addr,
+ txdesc->buffer_length, DMA_TO_DEVICE);
+ dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
+ mdp->tx_skbuff[entry] = NULL;
+ freeNum++;
+ }
+ txdesc->status = cpu_to_edmac(mdp, TD_TFP);
+ if (entry >= TX_RING_SIZE - 1)
+ txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
+
+ mdp->stats.tx_packets++;
+ mdp->stats.tx_bytes += txdesc->buffer_length;
+ }
+ return freeNum;
+}
+
+/* Packet receive function */
+static int sh_eth_rx(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ struct sh_eth_rxdesc *rxdesc;
+
+ int entry = mdp->cur_rx % RX_RING_SIZE;
+ int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx;
+ struct sk_buff *skb;
+ u16 pkt_len = 0;
+ u32 desc_status;
+
+ rxdesc = &mdp->rx_ring[entry];
+ while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
+ desc_status = edmac_to_cpu(mdp, rxdesc->status);
+ pkt_len = rxdesc->frame_length;
+
+ if (--boguscnt < 0)
+ break;
+
+ if (!(desc_status & RDFEND))
+ mdp->stats.rx_length_errors++;
+
+ if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
+ RD_RFS5 | RD_RFS6 | RD_RFS10)) {
+ mdp->stats.rx_errors++;
+ if (desc_status & RD_RFS1)
+ mdp->stats.rx_crc_errors++;
+ if (desc_status & RD_RFS2)
+ mdp->stats.rx_frame_errors++;
+ if (desc_status & RD_RFS3)
+ mdp->stats.rx_length_errors++;
+ if (desc_status & RD_RFS4)
+ mdp->stats.rx_length_errors++;
+ if (desc_status & RD_RFS6)
+ mdp->stats.rx_missed_errors++;
+ if (desc_status & RD_RFS10)
+ mdp->stats.rx_over_errors++;
+ } else {
+ if (!mdp->cd->hw_swap)
+ sh_eth_soft_swap(
+ phys_to_virt(ALIGN(rxdesc->addr, 4)),
+ pkt_len + 2);
+ skb = mdp->rx_skbuff[entry];
+ mdp->rx_skbuff[entry] = NULL;
+ if (mdp->cd->rpadir)
+ skb_reserve(skb, NET_IP_ALIGN);
+ skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ netif_rx(skb);
+ mdp->stats.rx_packets++;
+ mdp->stats.rx_bytes += pkt_len;
+ }
+ rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
+ entry = (++mdp->cur_rx) % RX_RING_SIZE;
+ rxdesc = &mdp->rx_ring[entry];
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
+ entry = mdp->dirty_rx % RX_RING_SIZE;
+ rxdesc = &mdp->rx_ring[entry];
+ /* The size of the buffer is 16 byte boundary. */
+ rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
+
+ if (mdp->rx_skbuff[entry] == NULL) {
+ skb = dev_alloc_skb(mdp->rx_buf_sz);
+ mdp->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
+ DMA_FROM_DEVICE);
+ skb->dev = ndev;
+ sh_eth_set_receive_align(skb);
+
+ skb_checksum_none_assert(skb);
+ rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
+ }
+ if (entry >= RX_RING_SIZE - 1)
+ rxdesc->status |=
+ cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
+ else
+ rxdesc->status |=
+ cpu_to_edmac(mdp, RD_RACT | RD_RFP);
+ }
+
+ /* Restart Rx engine if stopped. */
+ /* If we don't need to check status, don't. -KDU */
+ if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R))
+ sh_eth_write(ndev, EDRRR_R, EDRRR);
+
+ return 0;
+}
+
+static void sh_eth_rcv_snd_disable(struct net_device *ndev)
+{
+ /* disable tx and rx */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
+ ~(ECMR_RE | ECMR_TE), ECMR);
+}
+
+static void sh_eth_rcv_snd_enable(struct net_device *ndev)
+{
+ /* enable tx and rx */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
+ (ECMR_RE | ECMR_TE), ECMR);
+}
+
+/* error control function */
+static void sh_eth_error(struct net_device *ndev, int intr_status)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ u32 felic_stat;
+ u32 link_stat;
+ u32 mask;
+
+ if (intr_status & EESR_ECI) {
+ felic_stat = sh_eth_read(ndev, ECSR);
+ sh_eth_write(ndev, felic_stat, ECSR); /* clear int */
+ if (felic_stat & ECSR_ICD)
+ mdp->stats.tx_carrier_errors++;
+ if (felic_stat & ECSR_LCHNG) {
+ /* Link Changed */
+ if (mdp->cd->no_psr || mdp->no_ether_link) {
+ if (mdp->link == PHY_DOWN)
+ link_stat = 0;
+ else
+ link_stat = PHY_ST_LINK;
+ } else {
+ link_stat = (sh_eth_read(ndev, PSR));
+ if (mdp->ether_link_active_low)
+ link_stat = ~link_stat;
+ }
+ if (!(link_stat & PHY_ST_LINK))
+ sh_eth_rcv_snd_disable(ndev);
+ else {
+ /* Link Up */
+ sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
+ ~DMAC_M_ECI, EESIPR);
+ /*clear int */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
+ ECSR);
+ sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
+ DMAC_M_ECI, EESIPR);
+ /* enable tx and rx */
+ sh_eth_rcv_snd_enable(ndev);
+ }
+ }
+ }
+
+ if (intr_status & EESR_TWB) {
+ /* Write buck end. unused write back interrupt */
+ if (intr_status & EESR_TABT) /* Transmit Abort int */
+ mdp->stats.tx_aborted_errors++;
+ if (netif_msg_tx_err(mdp))
+ dev_err(&ndev->dev, "Transmit Abort\n");
+ }
+
+ if (intr_status & EESR_RABT) {
+ /* Receive Abort int */
+ if (intr_status & EESR_RFRMER) {
+ /* Receive Frame Overflow int */
+ mdp->stats.rx_frame_errors++;
+ if (netif_msg_rx_err(mdp))
+ dev_err(&ndev->dev, "Receive Abort\n");
+ }
+ }
+
+ if (intr_status & EESR_TDE) {
+ /* Transmit Descriptor Empty int */
+ mdp->stats.tx_fifo_errors++;
+ if (netif_msg_tx_err(mdp))
+ dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
+ }
+
+ if (intr_status & EESR_TFE) {
+ /* FIFO under flow */
+ mdp->stats.tx_fifo_errors++;
+ if (netif_msg_tx_err(mdp))
+ dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
+ }
+
+ if (intr_status & EESR_RDE) {
+ /* Receive Descriptor Empty int */
+ mdp->stats.rx_over_errors++;
+
+ if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R)
+ sh_eth_write(ndev, EDRRR_R, EDRRR);
+ if (netif_msg_rx_err(mdp))
+ dev_err(&ndev->dev, "Receive Descriptor Empty\n");
+ }
+
+ if (intr_status & EESR_RFE) {
+ /* Receive FIFO Overflow int */
+ mdp->stats.rx_fifo_errors++;
+ if (netif_msg_rx_err(mdp))
+ dev_err(&ndev->dev, "Receive FIFO Overflow\n");
+ }
+
+ if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
+ /* Address Error */
+ mdp->stats.tx_fifo_errors++;
+ if (netif_msg_tx_err(mdp))
+ dev_err(&ndev->dev, "Address Error\n");
+ }
+
+ mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
+ if (mdp->cd->no_ade)
+ mask &= ~EESR_ADE;
+ if (intr_status & mask) {
+ /* Tx error */
+ u32 edtrr = sh_eth_read(ndev, EDTRR);
+ /* dmesg */
+ dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
+ intr_status, mdp->cur_tx);
+ dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
+ mdp->dirty_tx, (u32) ndev->state, edtrr);
+ /* dirty buffer free */
+ sh_eth_txfree(ndev);
+
+ /* SH7712 BUG */
+ if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
+ /* tx dma start */
+ sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
+ }
+ /* wakeup */
+ netif_wake_queue(ndev);
+ }
+}
+
+static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
+{
+ struct net_device *ndev = netdev;
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ struct sh_eth_cpu_data *cd = mdp->cd;
+ irqreturn_t ret = IRQ_NONE;
+ u32 intr_status = 0;
+
+ spin_lock(&mdp->lock);
+
+ /* Get interrpt stat */
+ intr_status = sh_eth_read(ndev, EESR);
+ /* Clear interrupt */
+ if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
+ EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
+ cd->tx_check | cd->eesr_err_check)) {
+ sh_eth_write(ndev, intr_status, EESR);
+ ret = IRQ_HANDLED;
+ } else
+ goto other_irq;
+
+ if (intr_status & (EESR_FRC | /* Frame recv*/
+ EESR_RMAF | /* Multi cast address recv*/
+ EESR_RRF | /* Bit frame recv */
+ EESR_RTLF | /* Long frame recv*/
+ EESR_RTSF | /* short frame recv */
+ EESR_PRE | /* PHY-LSI recv error */
+ EESR_CERF)){ /* recv frame CRC error */
+ sh_eth_rx(ndev);
+ }
+
+ /* Tx Check */
+ if (intr_status & cd->tx_check) {
+ sh_eth_txfree(ndev);
+ netif_wake_queue(ndev);
+ }
+
+ if (intr_status & cd->eesr_err_check)
+ sh_eth_error(ndev, intr_status);
+
+other_irq:
+ spin_unlock(&mdp->lock);
+
+ return ret;
+}
+
+static void sh_eth_timer(unsigned long data)
+{
+ struct net_device *ndev = (struct net_device *)data;
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ mod_timer(&mdp->timer, jiffies + (10 * HZ));
+}
+
+/* PHY state control function */
+static void sh_eth_adjust_link(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ struct phy_device *phydev = mdp->phydev;
+ int new_state = 0;
+
+ if (phydev->link != PHY_DOWN) {
+ if (phydev->duplex != mdp->duplex) {
+ new_state = 1;
+ mdp->duplex = phydev->duplex;
+ if (mdp->cd->set_duplex)
+ mdp->cd->set_duplex(ndev);
+ }
+
+ if (phydev->speed != mdp->speed) {
+ new_state = 1;
+ mdp->speed = phydev->speed;
+ if (mdp->cd->set_rate)
+ mdp->cd->set_rate(ndev);
+ }
+ if (mdp->link == PHY_DOWN) {
+ sh_eth_write(ndev,
+ (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
+ new_state = 1;
+ mdp->link = phydev->link;
+ }
+ } else if (mdp->link) {
+ new_state = 1;
+ mdp->link = PHY_DOWN;
+ mdp->speed = 0;
+ mdp->duplex = -1;
+ }
+
+ if (new_state && netif_msg_link(mdp))
+ phy_print_status(phydev);
+}
+
+/* PHY init function */
+static int sh_eth_phy_init(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ char phy_id[MII_BUS_ID_SIZE + 3];
+ struct phy_device *phydev = NULL;
+
+ snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
+ mdp->mii_bus->id , mdp->phy_id);
+
+ mdp->link = PHY_DOWN;
+ mdp->speed = 0;
+ mdp->duplex = -1;
+
+ /* Try connect to PHY */
+ phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
+ 0, mdp->phy_interface);
+ if (IS_ERR(phydev)) {
+ dev_err(&ndev->dev, "phy_connect failed\n");
+ return PTR_ERR(phydev);
+ }
+
+ dev_info(&ndev->dev, "attached phy %i to driver %s\n",
+ phydev->addr, phydev->drv->name);
+
+ mdp->phydev = phydev;
+
+ return 0;
+}
+
+/* PHY control start function */
+static int sh_eth_phy_start(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ int ret;
+
+ ret = sh_eth_phy_init(ndev);
+ if (ret)
+ return ret;
+
+ /* reset phy - this also wakes it from PDOWN */
+ phy_write(mdp->phydev, MII_BMCR, BMCR_RESET);
+ phy_start(mdp->phydev);
+
+ return 0;
+}
+
+static int sh_eth_get_settings(struct net_device *ndev,
+ struct ethtool_cmd *ecmd)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&mdp->lock, flags);
+ ret = phy_ethtool_gset(mdp->phydev, ecmd);
+ spin_unlock_irqrestore(&mdp->lock, flags);
+
+ return ret;
+}
+
+static int sh_eth_set_settings(struct net_device *ndev,
+ struct ethtool_cmd *ecmd)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&mdp->lock, flags);
+
+ /* disable tx and rx */
+ sh_eth_rcv_snd_disable(ndev);
+
+ ret = phy_ethtool_sset(mdp->phydev, ecmd);
+ if (ret)
+ goto error_exit;
+
+ if (ecmd->duplex == DUPLEX_FULL)
+ mdp->duplex = 1;
+ else
+ mdp->duplex = 0;
+
+ if (mdp->cd->set_duplex)
+ mdp->cd->set_duplex(ndev);
+
+error_exit:
+ mdelay(1);
+
+ /* enable tx and rx */
+ sh_eth_rcv_snd_enable(ndev);
+
+ spin_unlock_irqrestore(&mdp->lock, flags);
+
+ return ret;
+}
+
+static int sh_eth_nway_reset(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&mdp->lock, flags);
+ ret = phy_start_aneg(mdp->phydev);
+ spin_unlock_irqrestore(&mdp->lock, flags);
+
+ return ret;
+}
+
+static u32 sh_eth_get_msglevel(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ return mdp->msg_enable;
+}
+
+static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ mdp->msg_enable = value;
+}
+
+static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "rx_current", "tx_current",
+ "rx_dirty", "tx_dirty",
+};
+#define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
+
+static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return SH_ETH_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void sh_eth_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ int i = 0;
+
+ /* device-specific stats */
+ data[i++] = mdp->cur_rx;
+ data[i++] = mdp->cur_tx;
+ data[i++] = mdp->dirty_rx;
+ data[i++] = mdp->dirty_tx;
+}
+
+static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, *sh_eth_gstrings_stats,
+ sizeof(sh_eth_gstrings_stats));
+ break;
+ }
+}
+
+static struct ethtool_ops sh_eth_ethtool_ops = {
+ .get_settings = sh_eth_get_settings,
+ .set_settings = sh_eth_set_settings,
+ .nway_reset = sh_eth_nway_reset,
+ .get_msglevel = sh_eth_get_msglevel,
+ .set_msglevel = sh_eth_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .get_strings = sh_eth_get_strings,
+ .get_ethtool_stats = sh_eth_get_ethtool_stats,
+ .get_sset_count = sh_eth_get_sset_count,
+};
+
+/* network device open function */
+static int sh_eth_open(struct net_device *ndev)
+{
+ int ret = 0;
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ pm_runtime_get_sync(&mdp->pdev->dev);
+
+ ret = request_irq(ndev->irq, sh_eth_interrupt,
+#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7764) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7757)
+ IRQF_SHARED,
+#else
+ 0,
+#endif
+ ndev->name, ndev);
+ if (ret) {
+ dev_err(&ndev->dev, "Can not assign IRQ number\n");
+ return ret;
+ }
+
+ /* Descriptor set */
+ ret = sh_eth_ring_init(ndev);
+ if (ret)
+ goto out_free_irq;
+
+ /* device init */
+ ret = sh_eth_dev_init(ndev);
+ if (ret)
+ goto out_free_irq;
+
+ /* PHY control start*/
+ ret = sh_eth_phy_start(ndev);
+ if (ret)
+ goto out_free_irq;
+
+ /* Set the timer to check for link beat. */
+ init_timer(&mdp->timer);
+ mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
+ setup_timer(&mdp->timer, sh_eth_timer, (unsigned long)ndev);
+
+ return ret;
+
+out_free_irq:
+ free_irq(ndev->irq, ndev);
+ pm_runtime_put_sync(&mdp->pdev->dev);
+ return ret;
+}
+
+/* Timeout function */
+static void sh_eth_tx_timeout(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ struct sh_eth_rxdesc *rxdesc;
+ int i;
+
+ netif_stop_queue(ndev);
+
+ if (netif_msg_timer(mdp))
+ dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
+ " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR));
+
+ /* tx_errors count up */
+ mdp->stats.tx_errors++;
+
+ /* timer off */
+ del_timer_sync(&mdp->timer);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ rxdesc = &mdp->rx_ring[i];
+ rxdesc->status = 0;
+ rxdesc->addr = 0xBADF00D0;
+ if (mdp->rx_skbuff[i])
+ dev_kfree_skb(mdp->rx_skbuff[i]);
+ mdp->rx_skbuff[i] = NULL;
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (mdp->tx_skbuff[i])
+ dev_kfree_skb(mdp->tx_skbuff[i]);
+ mdp->tx_skbuff[i] = NULL;
+ }
+
+ /* device init */
+ sh_eth_dev_init(ndev);
+
+ /* timer on */
+ mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
+ add_timer(&mdp->timer);
+}
+
+/* Packet transmit function */
+static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ struct sh_eth_txdesc *txdesc;
+ u32 entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdp->lock, flags);
+ if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
+ if (!sh_eth_txfree(ndev)) {
+ if (netif_msg_tx_queued(mdp))
+ dev_warn(&ndev->dev, "TxFD exhausted.\n");
+ netif_stop_queue(ndev);
+ spin_unlock_irqrestore(&mdp->lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+ }
+ spin_unlock_irqrestore(&mdp->lock, flags);
+
+ entry = mdp->cur_tx % TX_RING_SIZE;
+ mdp->tx_skbuff[entry] = skb;
+ txdesc = &mdp->tx_ring[entry];
+ /* soft swap. */
+ if (!mdp->cd->hw_swap)
+ sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
+ skb->len + 2);
+ txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (skb->len < ETHERSMALL)
+ txdesc->buffer_length = ETHERSMALL;
+ else
+ txdesc->buffer_length = skb->len;
+
+ if (entry >= TX_RING_SIZE - 1)
+ txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
+ else
+ txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
+
+ mdp->cur_tx++;
+
+ if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
+ sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
+
+ return NETDEV_TX_OK;
+}
+
+/* device close function */
+static int sh_eth_close(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ int ringsize;
+
+ netif_stop_queue(ndev);
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ sh_eth_write(ndev, 0x0000, EESIPR);
+
+ /* Stop the chip's Tx and Rx processes. */
+ sh_eth_write(ndev, 0, EDTRR);
+ sh_eth_write(ndev, 0, EDRRR);
+
+ /* PHY Disconnect */
+ if (mdp->phydev) {
+ phy_stop(mdp->phydev);
+ phy_disconnect(mdp->phydev);
+ }
+
+ free_irq(ndev->irq, ndev);
+
+ del_timer_sync(&mdp->timer);
+
+ /* Free all the skbuffs in the Rx queue. */
+ sh_eth_ring_free(ndev);
+
+ /* free DMA buffer */
+ ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
+ dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma);
+
+ /* free DMA buffer */
+ ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
+ dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma);
+
+ pm_runtime_put_sync(&mdp->pdev->dev);
+
+ return 0;
+}
+
+static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ pm_runtime_get_sync(&mdp->pdev->dev);
+
+ mdp->stats.tx_dropped += sh_eth_read(ndev, TROCR);
+ sh_eth_write(ndev, 0, TROCR); /* (write clear) */
+ mdp->stats.collisions += sh_eth_read(ndev, CDCR);
+ sh_eth_write(ndev, 0, CDCR); /* (write clear) */
+ mdp->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
+ sh_eth_write(ndev, 0, LCCR); /* (write clear) */
+ if (sh_eth_is_gether(mdp)) {
+ mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
+ sh_eth_write(ndev, 0, CERCR); /* (write clear) */
+ mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
+ sh_eth_write(ndev, 0, CEECR); /* (write clear) */
+ } else {
+ mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
+ sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
+ }
+ pm_runtime_put_sync(&mdp->pdev->dev);
+
+ return &mdp->stats;
+}
+
+/* ioctl to device funciotn*/
+static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
+ int cmd)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ struct phy_device *phydev = mdp->phydev;
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_mii_ioctl(phydev, rq, cmd);
+}
+
+#if defined(SH_ETH_HAS_TSU)
+/* Multicast reception directions set */
+static void sh_eth_set_multicast_list(struct net_device *ndev)
+{
+ if (ndev->flags & IFF_PROMISC) {
+ /* Set promiscuous. */
+ sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_MCT) |
+ ECMR_PRM, ECMR);
+ } else {
+ /* Normal, unicast/broadcast-only mode. */
+ sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) |
+ ECMR_MCT, ECMR);
+ }
+}
+#endif /* SH_ETH_HAS_TSU */
+
+/* SuperH's TSU register init function */
+static void sh_eth_tsu_init(struct sh_eth_private *mdp)
+{
+ sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */
+ sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */
+ sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */
+ sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
+ sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
+ sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
+ sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
+ sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
+ sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
+ sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
+ if (sh_eth_is_gether(mdp)) {
+ sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */
+ sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */
+ } else {
+ sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */
+ sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */
+ }
+ sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */
+ sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */
+ sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
+ sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */
+ sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */
+ sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */
+ sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */
+}
+
+/* MDIO bus release function */
+static int sh_mdio_release(struct net_device *ndev)
+{
+ struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
+
+ /* unregister mdio bus */
+ mdiobus_unregister(bus);
+
+ /* remove mdio bus info from net_device */
+ dev_set_drvdata(&ndev->dev, NULL);
+
+ /* free interrupts memory */
+ kfree(bus->irq);
+
+ /* free bitbang info */
+ free_mdio_bitbang(bus);
+
+ return 0;
+}
+
+/* MDIO bus init function */
+static int sh_mdio_init(struct net_device *ndev, int id,
+ struct sh_eth_plat_data *pd)
+{
+ int ret, i;
+ struct bb_info *bitbang;
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ /* create bit control struct for PHY */
+ bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
+ if (!bitbang) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* bitbang init */
+ bitbang->addr = ndev->base_addr + mdp->reg_offset[PIR];
+ bitbang->set_gate = pd->set_mdio_gate;
+ bitbang->mdi_msk = 0x08;
+ bitbang->mdo_msk = 0x04;
+ bitbang->mmd_msk = 0x02;/* MMD */
+ bitbang->mdc_msk = 0x01;
+ bitbang->ctrl.ops = &bb_ops;
+
+ /* MII controller setting */
+ mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
+ if (!mdp->mii_bus) {
+ ret = -ENOMEM;
+ goto out_free_bitbang;
+ }
+
+ /* Hook up MII support for ethtool */
+ mdp->mii_bus->name = "sh_mii";
+ mdp->mii_bus->parent = &ndev->dev;
+ snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%x", id);
+
+ /* PHY IRQ */
+ mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+ if (!mdp->mii_bus->irq) {
+ ret = -ENOMEM;
+ goto out_free_bus;
+ }
+
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ mdp->mii_bus->irq[i] = PHY_POLL;
+
+ /* regist mdio bus */
+ ret = mdiobus_register(mdp->mii_bus);
+ if (ret)
+ goto out_free_irq;
+
+ dev_set_drvdata(&ndev->dev, mdp->mii_bus);
+
+ return 0;
+
+out_free_irq:
+ kfree(mdp->mii_bus->irq);
+
+out_free_bus:
+ free_mdio_bitbang(mdp->mii_bus);
+
+out_free_bitbang:
+ kfree(bitbang);
+
+out:
+ return ret;
+}
+
+static const u16 *sh_eth_get_register_offset(int register_type)
+{
+ const u16 *reg_offset = NULL;
+
+ switch (register_type) {
+ case SH_ETH_REG_GIGABIT:
+ reg_offset = sh_eth_offset_gigabit;
+ break;
+ case SH_ETH_REG_FAST_SH4:
+ reg_offset = sh_eth_offset_fast_sh4;
+ break;
+ case SH_ETH_REG_FAST_SH3_SH2:
+ reg_offset = sh_eth_offset_fast_sh3_sh2;
+ break;
+ default:
+ printk(KERN_ERR "Unknown register type (%d)\n", register_type);
+ break;
+ }
+
+ return reg_offset;
+}
+
+static const struct net_device_ops sh_eth_netdev_ops = {
+ .ndo_open = sh_eth_open,
+ .ndo_stop = sh_eth_close,
+ .ndo_start_xmit = sh_eth_start_xmit,
+ .ndo_get_stats = sh_eth_get_stats,
+#if defined(SH_ETH_HAS_TSU)
+ .ndo_set_rx_mode = sh_eth_set_multicast_list,
+#endif
+ .ndo_tx_timeout = sh_eth_tx_timeout,
+ .ndo_do_ioctl = sh_eth_do_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+};
+
+static int sh_eth_drv_probe(struct platform_device *pdev)
+{
+ int ret, devno = 0;
+ struct resource *res;
+ struct net_device *ndev = NULL;
+ struct sh_eth_private *mdp = NULL;
+ struct sh_eth_plat_data *pd;
+
+ /* get base addr */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(res == NULL)) {
+ dev_err(&pdev->dev, "invalid resource\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ndev = alloc_etherdev(sizeof(struct sh_eth_private));
+ if (!ndev) {
+ dev_err(&pdev->dev, "Could not allocate device.\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* The sh Ether-specific entries in the device structure. */
+ ndev->base_addr = res->start;
+ devno = pdev->id;
+ if (devno < 0)
+ devno = 0;
+
+ ndev->dma = -1;
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ ret = -ENODEV;
+ goto out_release;
+ }
+ ndev->irq = ret;
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ /* Fill in the fields of the device structure with ethernet values. */
+ ether_setup(ndev);
+
+ mdp = netdev_priv(ndev);
+ spin_lock_init(&mdp->lock);
+ mdp->pdev = pdev;
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_resume(&pdev->dev);
+
+ pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
+ /* get PHY ID */
+ mdp->phy_id = pd->phy;
+ mdp->phy_interface = pd->phy_interface;
+ /* EDMAC endian */
+ mdp->edmac_endian = pd->edmac_endian;
+ mdp->no_ether_link = pd->no_ether_link;
+ mdp->ether_link_active_low = pd->ether_link_active_low;
+ mdp->reg_offset = sh_eth_get_register_offset(pd->register_type);
+
+ /* set cpu data */
+#if defined(SH_ETH_HAS_BOTH_MODULES)
+ mdp->cd = sh_eth_get_cpu_data(mdp);
+#else
+ mdp->cd = &sh_eth_my_cpu_data;
+#endif
+ sh_eth_set_default_cpu_data(mdp->cd);
+
+ /* set function */
+ ndev->netdev_ops = &sh_eth_netdev_ops;
+ SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
+ ndev->watchdog_timeo = TX_TIMEOUT;
+
+ /* debug message level */
+ mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
+ mdp->post_rx = POST_RX >> (devno << 1);
+ mdp->post_fw = POST_FW >> (devno << 1);
+
+ /* read and set MAC address */
+ read_mac_address(ndev, pd->mac_addr);
+
+ /* First device only init */
+ if (!devno) {
+ if (mdp->cd->tsu) {
+ struct resource *rtsu;
+ rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!rtsu) {
+ dev_err(&pdev->dev, "Not found TSU resource\n");
+ goto out_release;
+ }
+ mdp->tsu_addr = ioremap(rtsu->start,
+ resource_size(rtsu));
+ }
+ if (mdp->cd->chip_reset)
+ mdp->cd->chip_reset(ndev);
+
+ if (mdp->cd->tsu) {
+ /* TSU init (Init only)*/
+ sh_eth_tsu_init(mdp);
+ }
+ }
+
+ /* network device register */
+ ret = register_netdev(ndev);
+ if (ret)
+ goto out_release;
+
+ /* mdio bus init */
+ ret = sh_mdio_init(ndev, pdev->id, pd);
+ if (ret)
+ goto out_unregister;
+
+ /* print device information */
+ pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
+ (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
+
+ platform_set_drvdata(pdev, ndev);
+
+ return ret;
+
+out_unregister:
+ unregister_netdev(ndev);
+
+out_release:
+ /* net_dev free */
+ if (mdp && mdp->tsu_addr)
+ iounmap(mdp->tsu_addr);
+ if (ndev)
+ free_netdev(ndev);
+
+out:
+ return ret;
+}
+
+static int sh_eth_drv_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ iounmap(mdp->tsu_addr);
+ sh_mdio_release(ndev);
+ unregister_netdev(ndev);
+ pm_runtime_disable(&pdev->dev);
+ free_netdev(ndev);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static int sh_eth_runtime_nop(struct device *dev)
+{
+ /*
+ * Runtime PM callback shared between ->runtime_suspend()
+ * and ->runtime_resume(). Simply returns success.
+ *
+ * This driver re-initializes all registers after
+ * pm_runtime_get_sync() anyway so there is no need
+ * to save and restore registers here.
+ */
+ return 0;
+}
+
+static struct dev_pm_ops sh_eth_dev_pm_ops = {
+ .runtime_suspend = sh_eth_runtime_nop,
+ .runtime_resume = sh_eth_runtime_nop,
+};
+
+static struct platform_driver sh_eth_driver = {
+ .probe = sh_eth_drv_probe,
+ .remove = sh_eth_drv_remove,
+ .driver = {
+ .name = CARDNAME,
+ .pm = &sh_eth_dev_pm_ops,
+ },
+};
+
+static int __init sh_eth_init(void)
+{
+ return platform_driver_register(&sh_eth_driver);
+}
+
+static void __exit sh_eth_cleanup(void)
+{
+ platform_driver_unregister(&sh_eth_driver);
+}
+
+module_init(sh_eth_init);
+module_exit(sh_eth_cleanup);
+
+MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
+MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null
- bool use_wc;
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2005-2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/notifier.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/in.h>
+#include <linux/crc32.h>
+#include <linux/ethtool.h>
+#include <linux/topology.h>
+#include <linux/gfp.h>
+#include <linux/cpu_rmap.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "nic.h"
+
+#include "mcdi.h"
+#include "workarounds.h"
+
+/**************************************************************************
+ *
+ * Type name strings
+ *
+ **************************************************************************
+ */
+
+/* Loopback mode names (see LOOPBACK_MODE()) */
+const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
+const char *efx_loopback_mode_names[] = {
+ [LOOPBACK_NONE] = "NONE",
+ [LOOPBACK_DATA] = "DATAPATH",
+ [LOOPBACK_GMAC] = "GMAC",
+ [LOOPBACK_XGMII] = "XGMII",
+ [LOOPBACK_XGXS] = "XGXS",
+ [LOOPBACK_XAUI] = "XAUI",
+ [LOOPBACK_GMII] = "GMII",
+ [LOOPBACK_SGMII] = "SGMII",
+ [LOOPBACK_XGBR] = "XGBR",
+ [LOOPBACK_XFI] = "XFI",
+ [LOOPBACK_XAUI_FAR] = "XAUI_FAR",
+ [LOOPBACK_GMII_FAR] = "GMII_FAR",
+ [LOOPBACK_SGMII_FAR] = "SGMII_FAR",
+ [LOOPBACK_XFI_FAR] = "XFI_FAR",
+ [LOOPBACK_GPHY] = "GPHY",
+ [LOOPBACK_PHYXS] = "PHYXS",
+ [LOOPBACK_PCS] = "PCS",
+ [LOOPBACK_PMAPMD] = "PMA/PMD",
+ [LOOPBACK_XPORT] = "XPORT",
+ [LOOPBACK_XGMII_WS] = "XGMII_WS",
+ [LOOPBACK_XAUI_WS] = "XAUI_WS",
+ [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR",
+ [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
+ [LOOPBACK_GMII_WS] = "GMII_WS",
+ [LOOPBACK_XFI_WS] = "XFI_WS",
+ [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR",
+ [LOOPBACK_PHYXS_WS] = "PHYXS_WS",
+};
+
+const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
+const char *efx_reset_type_names[] = {
+ [RESET_TYPE_INVISIBLE] = "INVISIBLE",
+ [RESET_TYPE_ALL] = "ALL",
+ [RESET_TYPE_WORLD] = "WORLD",
+ [RESET_TYPE_DISABLE] = "DISABLE",
+ [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
+ [RESET_TYPE_INT_ERROR] = "INT_ERROR",
+ [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
+ [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH",
+ [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH",
+ [RESET_TYPE_TX_SKIP] = "TX_SKIP",
+ [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
+};
+
+#define EFX_MAX_MTU (9 * 1024)
+
+/* Reset workqueue. If any NIC has a hardware failure then a reset will be
+ * queued onto this work queue. This is not a per-nic work queue, because
+ * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
+ */
+static struct workqueue_struct *reset_workqueue;
+
+/**************************************************************************
+ *
+ * Configurable values
+ *
+ *************************************************************************/
+
+/*
+ * Use separate channels for TX and RX events
+ *
+ * Set this to 1 to use separate channels for TX and RX. It allows us
+ * to control interrupt affinity separately for TX and RX.
+ *
+ * This is only used in MSI-X interrupt mode
+ */
+static unsigned int separate_tx_channels;
+module_param(separate_tx_channels, uint, 0444);
+MODULE_PARM_DESC(separate_tx_channels,
+ "Use separate channels for TX and RX");
+
+/* This is the weight assigned to each of the (per-channel) virtual
+ * NAPI devices.
+ */
+static int napi_weight = 64;
+
+/* This is the time (in jiffies) between invocations of the hardware
+ * monitor. On Falcon-based NICs, this will:
+ * - Check the on-board hardware monitor;
+ * - Poll the link state and reconfigure the hardware as necessary.
+ */
+static unsigned int efx_monitor_interval = 1 * HZ;
+
+/* This controls whether or not the driver will initialise devices
+ * with invalid MAC addresses stored in the EEPROM or flash. If true,
+ * such devices will be initialised with a random locally-generated
+ * MAC address. This allows for loading the sfc_mtd driver to
+ * reprogram the flash, even if the flash contents (including the MAC
+ * address) have previously been erased.
+ */
+static unsigned int allow_bad_hwaddr;
+
+/* Initial interrupt moderation settings. They can be modified after
+ * module load with ethtool.
+ *
+ * The default for RX should strike a balance between increasing the
+ * round-trip latency and reducing overhead.
+ */
+static unsigned int rx_irq_mod_usec = 60;
+
+/* Initial interrupt moderation settings. They can be modified after
+ * module load with ethtool.
+ *
+ * This default is chosen to ensure that a 10G link does not go idle
+ * while a TX queue is stopped after it has become full. A queue is
+ * restarted when it drops below half full. The time this takes (assuming
+ * worst case 3 descriptors per packet and 1024 descriptors) is
+ * 512 / 3 * 1.2 = 205 usec.
+ */
+static unsigned int tx_irq_mod_usec = 150;
+
+/* This is the first interrupt mode to try out of:
+ * 0 => MSI-X
+ * 1 => MSI
+ * 2 => legacy
+ */
+static unsigned int interrupt_mode;
+
+/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
+ * i.e. the number of CPUs among which we may distribute simultaneous
+ * interrupt handling.
+ *
+ * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
+ * The default (0) means to assign an interrupt to each package (level II cache)
+ */
+static unsigned int rss_cpus;
+module_param(rss_cpus, uint, 0444);
+MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
+
+static int phy_flash_cfg;
+module_param(phy_flash_cfg, int, 0644);
+MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
+
+static unsigned irq_adapt_low_thresh = 10000;
+module_param(irq_adapt_low_thresh, uint, 0644);
+MODULE_PARM_DESC(irq_adapt_low_thresh,
+ "Threshold score for reducing IRQ moderation");
+
+static unsigned irq_adapt_high_thresh = 20000;
+module_param(irq_adapt_high_thresh, uint, 0644);
+MODULE_PARM_DESC(irq_adapt_high_thresh,
+ "Threshold score for increasing IRQ moderation");
+
+static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
+ NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
+ NETIF_MSG_TX_ERR | NETIF_MSG_HW);
+module_param(debug, uint, 0);
+MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
+
+/**************************************************************************
+ *
+ * Utility functions and prototypes
+ *
+ *************************************************************************/
+
+static void efx_remove_channels(struct efx_nic *efx);
+static void efx_remove_port(struct efx_nic *efx);
+static void efx_init_napi(struct efx_nic *efx);
+static void efx_fini_napi(struct efx_nic *efx);
+static void efx_fini_napi_channel(struct efx_channel *channel);
+static void efx_fini_struct(struct efx_nic *efx);
+static void efx_start_all(struct efx_nic *efx);
+static void efx_stop_all(struct efx_nic *efx);
+
+#define EFX_ASSERT_RESET_SERIALISED(efx) \
+ do { \
+ if ((efx->state == STATE_RUNNING) || \
+ (efx->state == STATE_DISABLED)) \
+ ASSERT_RTNL(); \
+ } while (0)
+
+/**************************************************************************
+ *
+ * Event queue processing
+ *
+ *************************************************************************/
+
+/* Process channel's event queue
+ *
+ * This function is responsible for processing the event queue of a
+ * single channel. The caller must guarantee that this function will
+ * never be concurrently called more than once on the same channel,
+ * though different channels may be being processed concurrently.
+ */
+static int efx_process_channel(struct efx_channel *channel, int budget)
+{
+ struct efx_nic *efx = channel->efx;
+ int spent;
+
+ if (unlikely(efx->reset_pending || !channel->enabled))
+ return 0;
+
+ spent = efx_nic_process_eventq(channel, budget);
+ if (spent == 0)
+ return 0;
+
+ /* Deliver last RX packet. */
+ if (channel->rx_pkt) {
+ __efx_rx_packet(channel, channel->rx_pkt,
+ channel->rx_pkt_csummed);
+ channel->rx_pkt = NULL;
+ }
+
+ efx_rx_strategy(channel);
+
+ efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel));
+
+ return spent;
+}
+
+/* Mark channel as finished processing
+ *
+ * Note that since we will not receive further interrupts for this
+ * channel before we finish processing and call the eventq_read_ack()
+ * method, there is no need to use the interrupt hold-off timers.
+ */
+static inline void efx_channel_processed(struct efx_channel *channel)
+{
+ /* The interrupt handler for this channel may set work_pending
+ * as soon as we acknowledge the events we've seen. Make sure
+ * it's cleared before then. */
+ channel->work_pending = false;
+ smp_wmb();
+
+ efx_nic_eventq_read_ack(channel);
+}
+
+/* NAPI poll handler
+ *
+ * NAPI guarantees serialisation of polls of the same device, which
+ * provides the guarantee required by efx_process_channel().
+ */
+static int efx_poll(struct napi_struct *napi, int budget)
+{
+ struct efx_channel *channel =
+ container_of(napi, struct efx_channel, napi_str);
+ struct efx_nic *efx = channel->efx;
+ int spent;
+
+ netif_vdbg(efx, intr, efx->net_dev,
+ "channel %d NAPI poll executing on CPU %d\n",
+ channel->channel, raw_smp_processor_id());
+
+ spent = efx_process_channel(channel, budget);
+
+ if (spent < budget) {
+ if (channel->channel < efx->n_rx_channels &&
+ efx->irq_rx_adaptive &&
+ unlikely(++channel->irq_count == 1000)) {
+ if (unlikely(channel->irq_mod_score <
+ irq_adapt_low_thresh)) {
+ if (channel->irq_moderation > 1) {
+ channel->irq_moderation -= 1;
+ efx->type->push_irq_moderation(channel);
+ }
+ } else if (unlikely(channel->irq_mod_score >
+ irq_adapt_high_thresh)) {
+ if (channel->irq_moderation <
+ efx->irq_rx_moderation) {
+ channel->irq_moderation += 1;
+ efx->type->push_irq_moderation(channel);
+ }
+ }
+ channel->irq_count = 0;
+ channel->irq_mod_score = 0;
+ }
+
+ efx_filter_rfs_expire(channel);
+
+ /* There is no race here; although napi_disable() will
+ * only wait for napi_complete(), this isn't a problem
+ * since efx_channel_processed() will have no effect if
+ * interrupts have already been disabled.
+ */
+ napi_complete(napi);
+ efx_channel_processed(channel);
+ }
+
+ return spent;
+}
+
+/* Process the eventq of the specified channel immediately on this CPU
+ *
+ * Disable hardware generated interrupts, wait for any existing
+ * processing to finish, then directly poll (and ack ) the eventq.
+ * Finally reenable NAPI and interrupts.
+ *
+ * This is for use only during a loopback self-test. It must not
+ * deliver any packets up the stack as this can result in deadlock.
+ */
+void efx_process_channel_now(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+
+ BUG_ON(channel->channel >= efx->n_channels);
+ BUG_ON(!channel->enabled);
+ BUG_ON(!efx->loopback_selftest);
+
+ /* Disable interrupts and wait for ISRs to complete */
+ efx_nic_disable_interrupts(efx);
+ if (efx->legacy_irq) {
+ synchronize_irq(efx->legacy_irq);
+ efx->legacy_irq_enabled = false;
+ }
+ if (channel->irq)
+ synchronize_irq(channel->irq);
+
+ /* Wait for any NAPI processing to complete */
+ napi_disable(&channel->napi_str);
+
+ /* Poll the channel */
+ efx_process_channel(channel, channel->eventq_mask + 1);
+
+ /* Ack the eventq. This may cause an interrupt to be generated
+ * when they are reenabled */
+ efx_channel_processed(channel);
+
+ napi_enable(&channel->napi_str);
+ if (efx->legacy_irq)
+ efx->legacy_irq_enabled = true;
+ efx_nic_enable_interrupts(efx);
+}
+
+/* Create event queue
+ * Event queue memory allocations are done only once. If the channel
+ * is reset, the memory buffer will be reused; this guards against
+ * errors during channel reset and also simplifies interrupt handling.
+ */
+static int efx_probe_eventq(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned long entries;
+
+ netif_dbg(channel->efx, probe, channel->efx->net_dev,
+ "chan %d create event queue\n", channel->channel);
+
+ /* Build an event queue with room for one event per tx and rx buffer,
+ * plus some extra for link state events and MCDI completions. */
+ entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
+ EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
+ channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
+
+ return efx_nic_probe_eventq(channel);
+}
+
+/* Prepare channel's event queue */
+static void efx_init_eventq(struct efx_channel *channel)
+{
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "chan %d init event queue\n", channel->channel);
+
+ channel->eventq_read_ptr = 0;
+
+ efx_nic_init_eventq(channel);
+}
+
+static void efx_fini_eventq(struct efx_channel *channel)
+{
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "chan %d fini event queue\n", channel->channel);
+
+ efx_nic_fini_eventq(channel);
+}
+
+static void efx_remove_eventq(struct efx_channel *channel)
+{
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "chan %d remove event queue\n", channel->channel);
+
+ efx_nic_remove_eventq(channel);
+}
+
+/**************************************************************************
+ *
+ * Channel handling
+ *
+ *************************************************************************/
+
+/* Allocate and initialise a channel structure, optionally copying
+ * parameters (but not resources) from an old channel structure. */
+static struct efx_channel *
+efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
+{
+ struct efx_channel *channel;
+ struct efx_rx_queue *rx_queue;
+ struct efx_tx_queue *tx_queue;
+ int j;
+
+ if (old_channel) {
+ channel = kmalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return NULL;
+
+ *channel = *old_channel;
+
+ channel->napi_dev = NULL;
+ memset(&channel->eventq, 0, sizeof(channel->eventq));
+
+ rx_queue = &channel->rx_queue;
+ rx_queue->buffer = NULL;
+ memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
+
+ for (j = 0; j < EFX_TXQ_TYPES; j++) {
+ tx_queue = &channel->tx_queue[j];
+ if (tx_queue->channel)
+ tx_queue->channel = channel;
+ tx_queue->buffer = NULL;
+ memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
+ }
+ } else {
+ channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return NULL;
+
+ channel->efx = efx;
+ channel->channel = i;
+
+ for (j = 0; j < EFX_TXQ_TYPES; j++) {
+ tx_queue = &channel->tx_queue[j];
+ tx_queue->efx = efx;
+ tx_queue->queue = i * EFX_TXQ_TYPES + j;
+ tx_queue->channel = channel;
+ }
+ }
+
+ rx_queue = &channel->rx_queue;
+ rx_queue->efx = efx;
+ setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
+ (unsigned long)rx_queue);
+
+ return channel;
+}
+
+static int efx_probe_channel(struct efx_channel *channel)
+{
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ int rc;
+
+ netif_dbg(channel->efx, probe, channel->efx->net_dev,
+ "creating channel %d\n", channel->channel);
+
+ rc = efx_probe_eventq(channel);
+ if (rc)
+ goto fail1;
+
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ rc = efx_probe_tx_queue(tx_queue);
+ if (rc)
+ goto fail2;
+ }
+
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ rc = efx_probe_rx_queue(rx_queue);
+ if (rc)
+ goto fail3;
+ }
+
+ channel->n_rx_frm_trunc = 0;
+
+ return 0;
+
+ fail3:
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_remove_rx_queue(rx_queue);
+ fail2:
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_remove_tx_queue(tx_queue);
+ fail1:
+ return rc;
+}
+
+
+static void efx_set_channel_names(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ const char *type = "";
+ int number;
+
+ efx_for_each_channel(channel, efx) {
+ number = channel->channel;
+ if (efx->n_channels > efx->n_rx_channels) {
+ if (channel->channel < efx->n_rx_channels) {
+ type = "-rx";
+ } else {
+ type = "-tx";
+ number -= efx->n_rx_channels;
+ }
+ }
+ snprintf(efx->channel_name[channel->channel],
+ sizeof(efx->channel_name[0]),
+ "%s%s-%d", efx->name, type, number);
+ }
+}
+
+static int efx_probe_channels(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ int rc;
+
+ /* Restart special buffer allocation */
+ efx->next_buffer_table = 0;
+
+ efx_for_each_channel(channel, efx) {
+ rc = efx_probe_channel(channel);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to create channel %d\n",
+ channel->channel);
+ goto fail;
+ }
+ }
+ efx_set_channel_names(efx);
+
+ return 0;
+
+fail:
+ efx_remove_channels(efx);
+ return rc;
+}
+
+/* Channels are shutdown and reinitialised whilst the NIC is running
+ * to propagate configuration changes (mtu, checksum offload), or
+ * to clear hardware error conditions
+ */
+static void efx_init_channels(struct efx_nic *efx)
+{
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ struct efx_channel *channel;
+
+ /* Calculate the rx buffer allocation parameters required to
+ * support the current MTU, including padding for header
+ * alignment and overruns.
+ */
+ efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
+ EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
+ efx->type->rx_buffer_hash_size +
+ efx->type->rx_buffer_padding);
+ efx->rx_buffer_order = get_order(efx->rx_buffer_len +
+ sizeof(struct efx_rx_page_state));
+
+ /* Initialise the channels */
+ efx_for_each_channel(channel, efx) {
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "init chan %d\n", channel->channel);
+
+ efx_init_eventq(channel);
+
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_init_tx_queue(tx_queue);
+
+ /* The rx buffer allocation strategy is MTU dependent */
+ efx_rx_strategy(channel);
+
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_init_rx_queue(rx_queue);
+
+ WARN_ON(channel->rx_pkt != NULL);
+ efx_rx_strategy(channel);
+ }
+}
+
+/* This enables event queue processing and packet transmission.
+ *
+ * Note that this function is not allowed to fail, since that would
+ * introduce too much complexity into the suspend/resume path.
+ */
+static void efx_start_channel(struct efx_channel *channel)
+{
+ struct efx_rx_queue *rx_queue;
+
+ netif_dbg(channel->efx, ifup, channel->efx->net_dev,
+ "starting chan %d\n", channel->channel);
+
+ /* The interrupt handler for this channel may set work_pending
+ * as soon as we enable it. Make sure it's cleared before
+ * then. Similarly, make sure it sees the enabled flag set. */
+ channel->work_pending = false;
+ channel->enabled = true;
+ smp_wmb();
+
+ /* Fill the queues before enabling NAPI */
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_fast_push_rx_descriptors(rx_queue);
+
+ napi_enable(&channel->napi_str);
+}
+
+/* This disables event queue processing and packet transmission.
+ * This function does not guarantee that all queue processing
+ * (e.g. RX refill) is complete.
+ */
+static void efx_stop_channel(struct efx_channel *channel)
+{
+ if (!channel->enabled)
+ return;
+
+ netif_dbg(channel->efx, ifdown, channel->efx->net_dev,
+ "stop chan %d\n", channel->channel);
+
+ channel->enabled = false;
+ napi_disable(&channel->napi_str);
+}
+
+static void efx_fini_channels(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ int rc;
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+ BUG_ON(efx->port_enabled);
+
+ rc = efx_nic_flush_queues(efx);
+ if (rc && EFX_WORKAROUND_7803(efx)) {
+ /* Schedule a reset to recover from the flush failure. The
+ * descriptor caches reference memory we're about to free,
+ * but falcon_reconfigure_mac_wrapper() won't reconnect
+ * the MACs because of the pending reset. */
+ netif_err(efx, drv, efx->net_dev,
+ "Resetting to recover from flush failure\n");
+ efx_schedule_reset(efx, RESET_TYPE_ALL);
+ } else if (rc) {
+ netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
+ } else {
+ netif_dbg(efx, drv, efx->net_dev,
+ "successfully flushed all queues\n");
+ }
+
+ efx_for_each_channel(channel, efx) {
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "shut down chan %d\n", channel->channel);
+
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_fini_rx_queue(rx_queue);
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel)
+ efx_fini_tx_queue(tx_queue);
+ efx_fini_eventq(channel);
+ }
+}
+
+static void efx_remove_channel(struct efx_channel *channel)
+{
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "destroy chan %d\n", channel->channel);
+
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_remove_rx_queue(rx_queue);
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel)
+ efx_remove_tx_queue(tx_queue);
+ efx_remove_eventq(channel);
+}
+
+static void efx_remove_channels(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ efx_remove_channel(channel);
+}
+
+int
+efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
+{
+ struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
+ u32 old_rxq_entries, old_txq_entries;
+ unsigned i;
+ int rc;
+
+ efx_stop_all(efx);
+ efx_fini_channels(efx);
+
+ /* Clone channels */
+ memset(other_channel, 0, sizeof(other_channel));
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx_alloc_channel(efx, i, efx->channel[i]);
+ if (!channel) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ other_channel[i] = channel;
+ }
+
+ /* Swap entry counts and channel pointers */
+ old_rxq_entries = efx->rxq_entries;
+ old_txq_entries = efx->txq_entries;
+ efx->rxq_entries = rxq_entries;
+ efx->txq_entries = txq_entries;
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ efx->channel[i] = other_channel[i];
+ other_channel[i] = channel;
+ }
+
+ rc = efx_probe_channels(efx);
+ if (rc)
+ goto rollback;
+
+ efx_init_napi(efx);
+
+ /* Destroy old channels */
+ for (i = 0; i < efx->n_channels; i++) {
+ efx_fini_napi_channel(other_channel[i]);
+ efx_remove_channel(other_channel[i]);
+ }
+out:
+ /* Free unused channel structures */
+ for (i = 0; i < efx->n_channels; i++)
+ kfree(other_channel[i]);
+
+ efx_init_channels(efx);
+ efx_start_all(efx);
+ return rc;
+
+rollback:
+ /* Swap back */
+ efx->rxq_entries = old_rxq_entries;
+ efx->txq_entries = old_txq_entries;
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ efx->channel[i] = other_channel[i];
+ other_channel[i] = channel;
+ }
+ goto out;
+}
+
+void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
+{
+ mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
+}
+
+/**************************************************************************
+ *
+ * Port handling
+ *
+ **************************************************************************/
+
+/* This ensures that the kernel is kept informed (via
+ * netif_carrier_on/off) of the link status, and also maintains the
+ * link status's stop on the port's TX queue.
+ */
+void efx_link_status_changed(struct efx_nic *efx)
+{
+ struct efx_link_state *link_state = &efx->link_state;
+
+ /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
+ * that no events are triggered between unregister_netdev() and the
+ * driver unloading. A more general condition is that NETDEV_CHANGE
+ * can only be generated between NETDEV_UP and NETDEV_DOWN */
+ if (!netif_running(efx->net_dev))
+ return;
+
+ if (link_state->up != netif_carrier_ok(efx->net_dev)) {
+ efx->n_link_state_changes++;
+
+ if (link_state->up)
+ netif_carrier_on(efx->net_dev);
+ else
+ netif_carrier_off(efx->net_dev);
+ }
+
+ /* Status message for kernel log */
+ if (link_state->up) {
+ netif_info(efx, link, efx->net_dev,
+ "link up at %uMbps %s-duplex (MTU %d)%s\n",
+ link_state->speed, link_state->fd ? "full" : "half",
+ efx->net_dev->mtu,
+ (efx->promiscuous ? " [PROMISC]" : ""));
+ } else {
+ netif_info(efx, link, efx->net_dev, "link down\n");
+ }
+
+}
+
+void efx_link_set_advertising(struct efx_nic *efx, u32 advertising)
+{
+ efx->link_advertising = advertising;
+ if (advertising) {
+ if (advertising & ADVERTISED_Pause)
+ efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
+ else
+ efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
+ if (advertising & ADVERTISED_Asym_Pause)
+ efx->wanted_fc ^= EFX_FC_TX;
+ }
+}
+
+void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
+{
+ efx->wanted_fc = wanted_fc;
+ if (efx->link_advertising) {
+ if (wanted_fc & EFX_FC_RX)
+ efx->link_advertising |= (ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+ else
+ efx->link_advertising &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+ if (wanted_fc & EFX_FC_TX)
+ efx->link_advertising ^= ADVERTISED_Asym_Pause;
+ }
+}
+
+static void efx_fini_port(struct efx_nic *efx);
+
+/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
+ * the MAC appropriately. All other PHY configuration changes are pushed
+ * through phy_op->set_settings(), and pushed asynchronously to the MAC
+ * through efx_monitor().
+ *
+ * Callers must hold the mac_lock
+ */
+int __efx_reconfigure_port(struct efx_nic *efx)
+{
+ enum efx_phy_mode phy_mode;
+ int rc;
+
+ WARN_ON(!mutex_is_locked(&efx->mac_lock));
+
+ /* Serialise the promiscuous flag with efx_set_multicast_list. */
+ if (efx_dev_registered(efx)) {
+ netif_addr_lock_bh(efx->net_dev);
+ netif_addr_unlock_bh(efx->net_dev);
+ }
+
+ /* Disable PHY transmit in mac level loopbacks */
+ phy_mode = efx->phy_mode;
+ if (LOOPBACK_INTERNAL(efx))
+ efx->phy_mode |= PHY_MODE_TX_DISABLED;
+ else
+ efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
+
+ rc = efx->type->reconfigure_port(efx);
+
+ if (rc)
+ efx->phy_mode = phy_mode;
+
+ return rc;
+}
+
+/* Reinitialise the MAC to pick up new PHY settings, even if the port is
+ * disabled. */
+int efx_reconfigure_port(struct efx_nic *efx)
+{
+ int rc;
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ mutex_lock(&efx->mac_lock);
+ rc = __efx_reconfigure_port(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ return rc;
+}
+
+/* Asynchronous work item for changing MAC promiscuity and multicast
+ * hash. Avoid a drain/rx_ingress enable by reconfiguring the current
+ * MAC directly. */
+static void efx_mac_work(struct work_struct *data)
+{
+ struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
+
+ mutex_lock(&efx->mac_lock);
+ if (efx->port_enabled) {
+ efx->type->push_multicast_hash(efx);
+ efx->mac_op->reconfigure(efx);
+ }
+ mutex_unlock(&efx->mac_lock);
+}
+
+static int efx_probe_port(struct efx_nic *efx)
+{
+ unsigned char *perm_addr;
+ int rc;
+
+ netif_dbg(efx, probe, efx->net_dev, "create port\n");
+
+ if (phy_flash_cfg)
+ efx->phy_mode = PHY_MODE_SPECIAL;
+
+ /* Connect up MAC/PHY operations table */
+ rc = efx->type->probe_port(efx);
+ if (rc)
+ return rc;
+
+ /* Sanity check MAC address */
+ perm_addr = efx->net_dev->perm_addr;
+ if (is_valid_ether_addr(perm_addr)) {
+ memcpy(efx->net_dev->dev_addr, perm_addr, ETH_ALEN);
+ } else {
+ netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n",
+ perm_addr);
+ if (!allow_bad_hwaddr) {
+ rc = -EINVAL;
+ goto err;
+ }
+ random_ether_addr(efx->net_dev->dev_addr);
+ netif_info(efx, probe, efx->net_dev,
+ "using locally-generated MAC %pM\n",
+ efx->net_dev->dev_addr);
+ }
+
+ return 0;
+
+ err:
+ efx->type->remove_port(efx);
+ return rc;
+}
+
+static int efx_init_port(struct efx_nic *efx)
+{
+ int rc;
+
+ netif_dbg(efx, drv, efx->net_dev, "init port\n");
+
+ mutex_lock(&efx->mac_lock);
+
+ rc = efx->phy_op->init(efx);
+ if (rc)
+ goto fail1;
+
+ efx->port_initialized = true;
+
+ /* Reconfigure the MAC before creating dma queues (required for
+ * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
+ efx->mac_op->reconfigure(efx);
+
+ /* Ensure the PHY advertises the correct flow control settings */
+ rc = efx->phy_op->reconfigure(efx);
+ if (rc)
+ goto fail2;
+
+ mutex_unlock(&efx->mac_lock);
+ return 0;
+
+fail2:
+ efx->phy_op->fini(efx);
+fail1:
+ mutex_unlock(&efx->mac_lock);
+ return rc;
+}
+
+static void efx_start_port(struct efx_nic *efx)
+{
+ netif_dbg(efx, ifup, efx->net_dev, "start port\n");
+ BUG_ON(efx->port_enabled);
+
+ mutex_lock(&efx->mac_lock);
+ efx->port_enabled = true;
+
+ /* efx_mac_work() might have been scheduled after efx_stop_port(),
+ * and then cancelled by efx_flush_all() */
+ efx->type->push_multicast_hash(efx);
+ efx->mac_op->reconfigure(efx);
+
+ mutex_unlock(&efx->mac_lock);
+}
+
+/* Prevent efx_mac_work() and efx_monitor() from working */
+static void efx_stop_port(struct efx_nic *efx)
+{
+ netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
+
+ mutex_lock(&efx->mac_lock);
+ efx->port_enabled = false;
+ mutex_unlock(&efx->mac_lock);
+
+ /* Serialise against efx_set_multicast_list() */
+ if (efx_dev_registered(efx)) {
+ netif_addr_lock_bh(efx->net_dev);
+ netif_addr_unlock_bh(efx->net_dev);
+ }
+}
+
+static void efx_fini_port(struct efx_nic *efx)
+{
+ netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
+
+ if (!efx->port_initialized)
+ return;
+
+ efx->phy_op->fini(efx);
+ efx->port_initialized = false;
+
+ efx->link_state.up = false;
+ efx_link_status_changed(efx);
+}
+
+static void efx_remove_port(struct efx_nic *efx)
+{
+ netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
+
+ efx->type->remove_port(efx);
+}
+
+/**************************************************************************
+ *
+ * NIC handling
+ *
+ **************************************************************************/
+
+/* This configures the PCI device to enable I/O and DMA. */
+static int efx_init_io(struct efx_nic *efx)
+{
+ struct pci_dev *pci_dev = efx->pci_dev;
+ dma_addr_t dma_mask = efx->type->max_dma_mask;
-
- /* bug22643: If SR-IOV is enabled then tx push over a write combined
- * mapping is unsafe. We need to disable write combining in this case.
- * MSI is unsupported when SR-IOV is enabled, and the firmware will
- * have removed the MSI capability. So write combining is safe if
- * there is an MSI capability.
- */
- use_wc = (!EFX_WORKAROUND_22643(efx) ||
- pci_find_capability(pci_dev, PCI_CAP_ID_MSI));
- if (use_wc)
- efx->membase = ioremap_wc(efx->membase_phys,
- efx->type->mem_map_size);
- else
- efx->membase = ioremap_nocache(efx->membase_phys,
- efx->type->mem_map_size);
+ int rc;
+
+ netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
+
+ rc = pci_enable_device(pci_dev);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to enable PCI device\n");
+ goto fail1;
+ }
+
+ pci_set_master(pci_dev);
+
+ /* Set the PCI DMA mask. Try all possibilities from our
+ * genuine mask down to 32 bits, because some architectures
+ * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
+ * masks event though they reject 46 bit masks.
+ */
+ while (dma_mask > 0x7fffffffUL) {
+ if (pci_dma_supported(pci_dev, dma_mask) &&
+ ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0))
+ break;
+ dma_mask >>= 1;
+ }
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "could not find a suitable DMA mask\n");
+ goto fail2;
+ }
+ netif_dbg(efx, probe, efx->net_dev,
+ "using DMA mask %llx\n", (unsigned long long) dma_mask);
+ rc = pci_set_consistent_dma_mask(pci_dev, dma_mask);
+ if (rc) {
+ /* pci_set_consistent_dma_mask() is not *allowed* to
+ * fail with a mask that pci_set_dma_mask() accepted,
+ * but just in case...
+ */
+ netif_err(efx, probe, efx->net_dev,
+ "failed to set consistent DMA mask\n");
+ goto fail2;
+ }
+
+ efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
+ rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "request for memory BAR failed\n");
+ rc = -EIO;
+ goto fail3;
+ }
++ efx->membase = ioremap_nocache(efx->membase_phys,
++ efx->type->mem_map_size);
+ if (!efx->membase) {
+ netif_err(efx, probe, efx->net_dev,
+ "could not map memory BAR at %llx+%x\n",
+ (unsigned long long)efx->membase_phys,
+ efx->type->mem_map_size);
+ rc = -ENOMEM;
+ goto fail4;
+ }
+ netif_dbg(efx, probe, efx->net_dev,
+ "memory BAR at %llx+%x (virtual %p)\n",
+ (unsigned long long)efx->membase_phys,
+ efx->type->mem_map_size, efx->membase);
+
+ return 0;
+
+ fail4:
+ pci_release_region(efx->pci_dev, EFX_MEM_BAR);
+ fail3:
+ efx->membase_phys = 0;
+ fail2:
+ pci_disable_device(efx->pci_dev);
+ fail1:
+ return rc;
+}
+
+static void efx_fini_io(struct efx_nic *efx)
+{
+ netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
+
+ if (efx->membase) {
+ iounmap(efx->membase);
+ efx->membase = NULL;
+ }
+
+ if (efx->membase_phys) {
+ pci_release_region(efx->pci_dev, EFX_MEM_BAR);
+ efx->membase_phys = 0;
+ }
+
+ pci_disable_device(efx->pci_dev);
+}
+
+/* Get number of channels wanted. Each channel will have its own IRQ,
+ * 1 RX queue and/or 2 TX queues. */
+static int efx_wanted_channels(void)
+{
+ cpumask_var_t core_mask;
+ int count;
+ int cpu;
+
+ if (rss_cpus)
+ return rss_cpus;
+
+ if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) {
+ printk(KERN_WARNING
+ "sfc: RSS disabled due to allocation failure\n");
+ return 1;
+ }
+
+ count = 0;
+ for_each_online_cpu(cpu) {
+ if (!cpumask_test_cpu(cpu, core_mask)) {
+ ++count;
+ cpumask_or(core_mask, core_mask,
+ topology_core_cpumask(cpu));
+ }
+ }
+
+ free_cpumask_var(core_mask);
+ return count;
+}
+
+static int
+efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries)
+{
+#ifdef CONFIG_RFS_ACCEL
+ int i, rc;
+
+ efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels);
+ if (!efx->net_dev->rx_cpu_rmap)
+ return -ENOMEM;
+ for (i = 0; i < efx->n_rx_channels; i++) {
+ rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
+ xentries[i].vector);
+ if (rc) {
+ free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
+ efx->net_dev->rx_cpu_rmap = NULL;
+ return rc;
+ }
+ }
+#endif
+ return 0;
+}
+
+/* Probe the number and type of interrupts we are able to obtain, and
+ * the resulting numbers of channels and RX queues.
+ */
+static int efx_probe_interrupts(struct efx_nic *efx)
+{
+ int max_channels =
+ min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
+ int rc, i;
+
+ if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
+ struct msix_entry xentries[EFX_MAX_CHANNELS];
+ int n_channels;
+
+ n_channels = efx_wanted_channels();
+ if (separate_tx_channels)
+ n_channels *= 2;
+ n_channels = min(n_channels, max_channels);
+
+ for (i = 0; i < n_channels; i++)
+ xentries[i].entry = i;
+ rc = pci_enable_msix(efx->pci_dev, xentries, n_channels);
+ if (rc > 0) {
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Insufficient MSI-X vectors"
+ " available (%d < %d).\n", rc, n_channels);
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Performance may be reduced.\n");
+ EFX_BUG_ON_PARANOID(rc >= n_channels);
+ n_channels = rc;
+ rc = pci_enable_msix(efx->pci_dev, xentries,
+ n_channels);
+ }
+
+ if (rc == 0) {
+ efx->n_channels = n_channels;
+ if (separate_tx_channels) {
+ efx->n_tx_channels =
+ max(efx->n_channels / 2, 1U);
+ efx->n_rx_channels =
+ max(efx->n_channels -
+ efx->n_tx_channels, 1U);
+ } else {
+ efx->n_tx_channels = efx->n_channels;
+ efx->n_rx_channels = efx->n_channels;
+ }
+ rc = efx_init_rx_cpu_rmap(efx, xentries);
+ if (rc) {
+ pci_disable_msix(efx->pci_dev);
+ return rc;
+ }
+ for (i = 0; i < n_channels; i++)
+ efx_get_channel(efx, i)->irq =
+ xentries[i].vector;
+ } else {
+ /* Fall back to single channel MSI */
+ efx->interrupt_mode = EFX_INT_MODE_MSI;
+ netif_err(efx, drv, efx->net_dev,
+ "could not enable MSI-X\n");
+ }
+ }
+
+ /* Try single interrupt MSI */
+ if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
+ efx->n_channels = 1;
+ efx->n_rx_channels = 1;
+ efx->n_tx_channels = 1;
+ rc = pci_enable_msi(efx->pci_dev);
+ if (rc == 0) {
+ efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
+ } else {
+ netif_err(efx, drv, efx->net_dev,
+ "could not enable MSI\n");
+ efx->interrupt_mode = EFX_INT_MODE_LEGACY;
+ }
+ }
+
+ /* Assume legacy interrupts */
+ if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
+ efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
+ efx->n_rx_channels = 1;
+ efx->n_tx_channels = 1;
+ efx->legacy_irq = efx->pci_dev->irq;
+ }
+
+ return 0;
+}
+
+static void efx_remove_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ /* Remove MSI/MSI-X interrupts */
+ efx_for_each_channel(channel, efx)
+ channel->irq = 0;
+ pci_disable_msi(efx->pci_dev);
+ pci_disable_msix(efx->pci_dev);
+
+ /* Remove legacy interrupt */
+ efx->legacy_irq = 0;
+}
+
+static void efx_set_channels(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+
+ efx->tx_channel_offset =
+ separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
+
+ /* We need to adjust the TX queue numbers if we have separate
+ * RX-only and TX-only channels.
+ */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ tx_queue->queue -= (efx->tx_channel_offset *
+ EFX_TXQ_TYPES);
+ }
+}
+
+static int efx_probe_nic(struct efx_nic *efx)
+{
+ size_t i;
+ int rc;
+
+ netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
+
+ /* Carry out hardware-type specific initialisation */
+ rc = efx->type->probe(efx);
+ if (rc)
+ return rc;
+
+ /* Determine the number of channels and queues by trying to hook
+ * in MSI-X interrupts. */
+ rc = efx_probe_interrupts(efx);
+ if (rc)
+ goto fail;
+
+ if (efx->n_channels > 1)
+ get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
+ for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
+ efx->rx_indir_table[i] = i % efx->n_rx_channels;
+
+ efx_set_channels(efx);
+ netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
+ netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
+
+ /* Initialise the interrupt moderation settings */
+ efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
+ true);
+
+ return 0;
+
+fail:
+ efx->type->remove(efx);
+ return rc;
+}
+
+static void efx_remove_nic(struct efx_nic *efx)
+{
+ netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
+
+ efx_remove_interrupts(efx);
+ efx->type->remove(efx);
+}
+
+/**************************************************************************
+ *
+ * NIC startup/shutdown
+ *
+ *************************************************************************/
+
+static int efx_probe_all(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = efx_probe_nic(efx);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
+ goto fail1;
+ }
+
+ rc = efx_probe_port(efx);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev, "failed to create port\n");
+ goto fail2;
+ }
+
+ efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
+ rc = efx_probe_channels(efx);
+ if (rc)
+ goto fail3;
+
+ rc = efx_probe_filters(efx);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to create filter tables\n");
+ goto fail4;
+ }
+
+ return 0;
+
+ fail4:
+ efx_remove_channels(efx);
+ fail3:
+ efx_remove_port(efx);
+ fail2:
+ efx_remove_nic(efx);
+ fail1:
+ return rc;
+}
+
+/* Called after previous invocation(s) of efx_stop_all, restarts the
+ * port, kernel transmit queue, NAPI processing and hardware interrupts,
+ * and ensures that the port is scheduled to be reconfigured.
+ * This function is safe to call multiple times when the NIC is in any
+ * state. */
+static void efx_start_all(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ /* Check that it is appropriate to restart the interface. All
+ * of these flags are safe to read under just the rtnl lock */
+ if (efx->port_enabled)
+ return;
+ if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
+ return;
+ if (efx_dev_registered(efx) && !netif_running(efx->net_dev))
+ return;
+
+ /* Mark the port as enabled so port reconfigurations can start, then
+ * restart the transmit interface early so the watchdog timer stops */
+ efx_start_port(efx);
+
+ if (efx_dev_registered(efx) && netif_device_present(efx->net_dev))
+ netif_tx_wake_all_queues(efx->net_dev);
+
+ efx_for_each_channel(channel, efx)
+ efx_start_channel(channel);
+
+ if (efx->legacy_irq)
+ efx->legacy_irq_enabled = true;
+ efx_nic_enable_interrupts(efx);
+
+ /* Switch to event based MCDI completions after enabling interrupts.
+ * If a reset has been scheduled, then we need to stay in polled mode.
+ * Rather than serialising efx_mcdi_mode_event() [which sleeps] and
+ * reset_pending [modified from an atomic context], we instead guarantee
+ * that efx_mcdi_mode_poll() isn't reverted erroneously */
+ efx_mcdi_mode_event(efx);
+ if (efx->reset_pending)
+ efx_mcdi_mode_poll(efx);
+
+ /* Start the hardware monitor if there is one. Otherwise (we're link
+ * event driven), we have to poll the PHY because after an event queue
+ * flush, we could have a missed a link state change */
+ if (efx->type->monitor != NULL) {
+ queue_delayed_work(efx->workqueue, &efx->monitor_work,
+ efx_monitor_interval);
+ } else {
+ mutex_lock(&efx->mac_lock);
+ if (efx->phy_op->poll(efx))
+ efx_link_status_changed(efx);
+ mutex_unlock(&efx->mac_lock);
+ }
+
+ efx->type->start_stats(efx);
+}
+
+/* Flush all delayed work. Should only be called when no more delayed work
+ * will be scheduled. This doesn't flush pending online resets (efx_reset),
+ * since we're holding the rtnl_lock at this point. */
+static void efx_flush_all(struct efx_nic *efx)
+{
+ /* Make sure the hardware monitor is stopped */
+ cancel_delayed_work_sync(&efx->monitor_work);
+ /* Stop scheduled port reconfigurations */
+ cancel_work_sync(&efx->mac_work);
+}
+
+/* Quiesce hardware and software without bringing the link down.
+ * Safe to call multiple times, when the nic and interface is in any
+ * state. The caller is guaranteed to subsequently be in a position
+ * to modify any hardware and software state they see fit without
+ * taking locks. */
+static void efx_stop_all(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ /* port_enabled can be read safely under the rtnl lock */
+ if (!efx->port_enabled)
+ return;
+
+ efx->type->stop_stats(efx);
+
+ /* Switch to MCDI polling on Siena before disabling interrupts */
+ efx_mcdi_mode_poll(efx);
+
+ /* Disable interrupts and wait for ISR to complete */
+ efx_nic_disable_interrupts(efx);
+ if (efx->legacy_irq) {
+ synchronize_irq(efx->legacy_irq);
+ efx->legacy_irq_enabled = false;
+ }
+ efx_for_each_channel(channel, efx) {
+ if (channel->irq)
+ synchronize_irq(channel->irq);
+ }
+
+ /* Stop all NAPI processing and synchronous rx refills */
+ efx_for_each_channel(channel, efx)
+ efx_stop_channel(channel);
+
+ /* Stop all asynchronous port reconfigurations. Since all
+ * event processing has already been stopped, there is no
+ * window to loose phy events */
+ efx_stop_port(efx);
+
+ /* Flush efx_mac_work(), refill_workqueue, monitor_work */
+ efx_flush_all(efx);
+
+ /* Stop the kernel transmit interface late, so the watchdog
+ * timer isn't ticking over the flush */
+ if (efx_dev_registered(efx)) {
+ netif_tx_stop_all_queues(efx->net_dev);
+ netif_tx_lock_bh(efx->net_dev);
+ netif_tx_unlock_bh(efx->net_dev);
+ }
+}
+
+static void efx_remove_all(struct efx_nic *efx)
+{
+ efx_remove_filters(efx);
+ efx_remove_channels(efx);
+ efx_remove_port(efx);
+ efx_remove_nic(efx);
+}
+
+/**************************************************************************
+ *
+ * Interrupt moderation
+ *
+ **************************************************************************/
+
+static unsigned int irq_mod_ticks(unsigned int usecs, unsigned int resolution)
+{
+ if (usecs == 0)
+ return 0;
+ if (usecs < resolution)
+ return 1; /* never round down to 0 */
+ return usecs / resolution;
+}
+
+/* Set interrupt moderation parameters */
+int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
+ unsigned int rx_usecs, bool rx_adaptive,
+ bool rx_may_override_tx)
+{
+ struct efx_channel *channel;
+ unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION);
+ unsigned rx_ticks = irq_mod_ticks(rx_usecs, EFX_IRQ_MOD_RESOLUTION);
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ if (tx_ticks > EFX_IRQ_MOD_MAX || rx_ticks > EFX_IRQ_MOD_MAX)
+ return -EINVAL;
+
+ if (tx_ticks != rx_ticks && efx->tx_channel_offset == 0 &&
+ !rx_may_override_tx) {
+ netif_err(efx, drv, efx->net_dev, "Channels are shared. "
+ "RX and TX IRQ moderation must be equal\n");
+ return -EINVAL;
+ }
+
+ efx->irq_rx_adaptive = rx_adaptive;
+ efx->irq_rx_moderation = rx_ticks;
+ efx_for_each_channel(channel, efx) {
+ if (efx_channel_has_rx_queue(channel))
+ channel->irq_moderation = rx_ticks;
+ else if (efx_channel_has_tx_queues(channel))
+ channel->irq_moderation = tx_ticks;
+ }
+
+ return 0;
+}
+
+void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
+ unsigned int *rx_usecs, bool *rx_adaptive)
+{
+ *rx_adaptive = efx->irq_rx_adaptive;
+ *rx_usecs = efx->irq_rx_moderation * EFX_IRQ_MOD_RESOLUTION;
+
+ /* If channels are shared between RX and TX, so is IRQ
+ * moderation. Otherwise, IRQ moderation is the same for all
+ * TX channels and is not adaptive.
+ */
+ if (efx->tx_channel_offset == 0)
+ *tx_usecs = *rx_usecs;
+ else
+ *tx_usecs =
+ efx->channel[efx->tx_channel_offset]->irq_moderation *
+ EFX_IRQ_MOD_RESOLUTION;
+}
+
+/**************************************************************************
+ *
+ * Hardware monitor
+ *
+ **************************************************************************/
+
+/* Run periodically off the general workqueue */
+static void efx_monitor(struct work_struct *data)
+{
+ struct efx_nic *efx = container_of(data, struct efx_nic,
+ monitor_work.work);
+
+ netif_vdbg(efx, timer, efx->net_dev,
+ "hardware monitor executing on CPU %d\n",
+ raw_smp_processor_id());
+ BUG_ON(efx->type->monitor == NULL);
+
+ /* If the mac_lock is already held then it is likely a port
+ * reconfiguration is already in place, which will likely do
+ * most of the work of monitor() anyway. */
+ if (mutex_trylock(&efx->mac_lock)) {
+ if (efx->port_enabled)
+ efx->type->monitor(efx);
+ mutex_unlock(&efx->mac_lock);
+ }
+
+ queue_delayed_work(efx->workqueue, &efx->monitor_work,
+ efx_monitor_interval);
+}
+
+/**************************************************************************
+ *
+ * ioctls
+ *
+ *************************************************************************/
+
+/* Net device ioctl
+ * Context: process, rtnl_lock() held.
+ */
+static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct mii_ioctl_data *data = if_mii(ifr);
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ /* Convert phy_id from older PRTAD/DEVAD format */
+ if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
+ (data->phy_id & 0xfc00) == 0x0400)
+ data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
+
+ return mdio_mii_ioctl(&efx->mdio, data, cmd);
+}
+
+/**************************************************************************
+ *
+ * NAPI interface
+ *
+ **************************************************************************/
+
+static void efx_init_napi(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx) {
+ channel->napi_dev = efx->net_dev;
+ netif_napi_add(channel->napi_dev, &channel->napi_str,
+ efx_poll, napi_weight);
+ }
+}
+
+static void efx_fini_napi_channel(struct efx_channel *channel)
+{
+ if (channel->napi_dev)
+ netif_napi_del(&channel->napi_str);
+ channel->napi_dev = NULL;
+}
+
+static void efx_fini_napi(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ efx_fini_napi_channel(channel);
+}
+
+/**************************************************************************
+ *
+ * Kernel netpoll interface
+ *
+ *************************************************************************/
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+
+/* Although in the common case interrupts will be disabled, this is not
+ * guaranteed. However, all our work happens inside the NAPI callback,
+ * so no locking is required.
+ */
+static void efx_netpoll(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ efx_schedule_channel(channel);
+}
+
+#endif
+
+/**************************************************************************
+ *
+ * Kernel net device interface
+ *
+ *************************************************************************/
+
+/* Context: process, rtnl_lock() held. */
+static int efx_net_open(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
+ raw_smp_processor_id());
+
+ if (efx->state == STATE_DISABLED)
+ return -EIO;
+ if (efx->phy_mode & PHY_MODE_SPECIAL)
+ return -EBUSY;
+ if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
+ return -EIO;
+
+ /* Notify the kernel of the link state polled during driver load,
+ * before the monitor starts running */
+ efx_link_status_changed(efx);
+
+ efx_start_all(efx);
+ return 0;
+}
+
+/* Context: process, rtnl_lock() held.
+ * Note that the kernel will ignore our return code; this method
+ * should really be a void.
+ */
+static int efx_net_stop(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
+ raw_smp_processor_id());
+
+ if (efx->state != STATE_DISABLED) {
+ /* Stop the device and flush all the channels */
+ efx_stop_all(efx);
+ efx_fini_channels(efx);
+ efx_init_channels(efx);
+ }
+
+ return 0;
+}
+
+/* Context: process, dev_base_lock or RTNL held, non-blocking. */
+static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_mac_stats *mac_stats = &efx->mac_stats;
+
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx);
+ spin_unlock_bh(&efx->stats_lock);
+
+ stats->rx_packets = mac_stats->rx_packets;
+ stats->tx_packets = mac_stats->tx_packets;
+ stats->rx_bytes = mac_stats->rx_bytes;
+ stats->tx_bytes = mac_stats->tx_bytes;
+ stats->rx_dropped = efx->n_rx_nodesc_drop_cnt;
+ stats->multicast = mac_stats->rx_multicast;
+ stats->collisions = mac_stats->tx_collision;
+ stats->rx_length_errors = (mac_stats->rx_gtjumbo +
+ mac_stats->rx_length_error);
+ stats->rx_crc_errors = mac_stats->rx_bad;
+ stats->rx_frame_errors = mac_stats->rx_align_error;
+ stats->rx_fifo_errors = mac_stats->rx_overflow;
+ stats->rx_missed_errors = mac_stats->rx_missed;
+ stats->tx_window_errors = mac_stats->tx_late_collision;
+
+ stats->rx_errors = (stats->rx_length_errors +
+ stats->rx_crc_errors +
+ stats->rx_frame_errors +
+ mac_stats->rx_symbol_error);
+ stats->tx_errors = (stats->tx_window_errors +
+ mac_stats->tx_bad);
+
+ return stats;
+}
+
+/* Context: netif_tx_lock held, BHs disabled. */
+static void efx_watchdog(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ netif_err(efx, tx_err, efx->net_dev,
+ "TX stuck with port_enabled=%d: resetting channels\n",
+ efx->port_enabled);
+
+ efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
+}
+
+
+/* Context: process, rtnl_lock() held. */
+static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ int rc = 0;
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ if (new_mtu > EFX_MAX_MTU)
+ return -EINVAL;
+
+ efx_stop_all(efx);
+
+ netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
+
+ efx_fini_channels(efx);
+
+ mutex_lock(&efx->mac_lock);
+ /* Reconfigure the MAC before enabling the dma queues so that
+ * the RX buffers don't overflow */
+ net_dev->mtu = new_mtu;
+ efx->mac_op->reconfigure(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ efx_init_channels(efx);
+
+ efx_start_all(efx);
+ return rc;
+}
+
+static int efx_set_mac_address(struct net_device *net_dev, void *data)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct sockaddr *addr = data;
+ char *new_addr = addr->sa_data;
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ if (!is_valid_ether_addr(new_addr)) {
+ netif_err(efx, drv, efx->net_dev,
+ "invalid ethernet MAC address requested: %pM\n",
+ new_addr);
+ return -EINVAL;
+ }
+
+ memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
+
+ /* Reconfigure the MAC */
+ mutex_lock(&efx->mac_lock);
+ efx->mac_op->reconfigure(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ return 0;
+}
+
+/* Context: netif_addr_lock held, BHs disabled. */
+static void efx_set_multicast_list(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct netdev_hw_addr *ha;
+ union efx_multicast_hash *mc_hash = &efx->multicast_hash;
+ u32 crc;
+ int bit;
+
+ efx->promiscuous = !!(net_dev->flags & IFF_PROMISC);
+
+ /* Build multicast hash table */
+ if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
+ memset(mc_hash, 0xff, sizeof(*mc_hash));
+ } else {
+ memset(mc_hash, 0x00, sizeof(*mc_hash));
+ netdev_for_each_mc_addr(ha, net_dev) {
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
+ bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
+ set_bit_le(bit, mc_hash->byte);
+ }
+
+ /* Broadcast packets go through the multicast hash filter.
+ * ether_crc_le() of the broadcast address is 0xbe2612ff
+ * so we always add bit 0xff to the mask.
+ */
+ set_bit_le(0xff, mc_hash->byte);
+ }
+
+ if (efx->port_enabled)
+ queue_work(efx->workqueue, &efx->mac_work);
+ /* Otherwise efx_start_port() will do this */
+}
+
+static int efx_set_features(struct net_device *net_dev, u32 data)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ /* If disabling RX n-tuple filtering, clear existing filters */
+ if (net_dev->features & ~data & NETIF_F_NTUPLE)
+ efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
+
+ return 0;
+}
+
+static const struct net_device_ops efx_netdev_ops = {
+ .ndo_open = efx_net_open,
+ .ndo_stop = efx_net_stop,
+ .ndo_get_stats64 = efx_net_stats,
+ .ndo_tx_timeout = efx_watchdog,
+ .ndo_start_xmit = efx_hard_start_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = efx_ioctl,
+ .ndo_change_mtu = efx_change_mtu,
+ .ndo_set_mac_address = efx_set_mac_address,
+ .ndo_set_rx_mode = efx_set_multicast_list,
+ .ndo_set_features = efx_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = efx_netpoll,
+#endif
+ .ndo_setup_tc = efx_setup_tc,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = efx_filter_rfs,
+#endif
+};
+
+static void efx_update_name(struct efx_nic *efx)
+{
+ strcpy(efx->name, efx->net_dev->name);
+ efx_mtd_rename(efx);
+ efx_set_channel_names(efx);
+}
+
+static int efx_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *net_dev = ptr;
+
+ if (net_dev->netdev_ops == &efx_netdev_ops &&
+ event == NETDEV_CHANGENAME)
+ efx_update_name(netdev_priv(net_dev));
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block efx_netdev_notifier = {
+ .notifier_call = efx_netdev_event,
+};
+
+static ssize_t
+show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ return sprintf(buf, "%d\n", efx->phy_type);
+}
+static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL);
+
+static int efx_register_netdev(struct efx_nic *efx)
+{
+ struct net_device *net_dev = efx->net_dev;
+ struct efx_channel *channel;
+ int rc;
+
+ net_dev->watchdog_timeo = 5 * HZ;
+ net_dev->irq = efx->pci_dev->irq;
+ net_dev->netdev_ops = &efx_netdev_ops;
+ SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
+
+ /* Clear MAC statistics */
+ efx->mac_op->update_stats(efx);
+ memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
+
+ rtnl_lock();
+
+ rc = dev_alloc_name(net_dev, net_dev->name);
+ if (rc < 0)
+ goto fail_locked;
+ efx_update_name(efx);
+
+ rc = register_netdevice(net_dev);
+ if (rc)
+ goto fail_locked;
+
+ efx_for_each_channel(channel, efx) {
+ struct efx_tx_queue *tx_queue;
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_init_tx_queue_core_txq(tx_queue);
+ }
+
+ /* Always start with carrier off; PHY events will detect the link */
+ netif_carrier_off(efx->net_dev);
+
+ rtnl_unlock();
+
+ rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "failed to init net dev attributes\n");
+ goto fail_registered;
+ }
+
+ return 0;
+
+fail_locked:
+ rtnl_unlock();
+ netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
+ return rc;
+
+fail_registered:
+ unregister_netdev(net_dev);
+ return rc;
+}
+
+static void efx_unregister_netdev(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+
+ if (!efx->net_dev)
+ return;
+
+ BUG_ON(netdev_priv(efx->net_dev) != efx);
+
+ /* Free up any skbs still remaining. This has to happen before
+ * we try to unregister the netdev as running their destructors
+ * may be needed to get the device ref. count to 0. */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_release_tx_buffers(tx_queue);
+ }
+
+ if (efx_dev_registered(efx)) {
+ strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
+ unregister_netdev(efx->net_dev);
+ }
+}
+
+/**************************************************************************
+ *
+ * Device reset and suspend
+ *
+ **************************************************************************/
+
+/* Tears down the entire software state and most of the hardware state
+ * before reset. */
+void efx_reset_down(struct efx_nic *efx, enum reset_type method)
+{
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ efx_stop_all(efx);
+ mutex_lock(&efx->mac_lock);
+
+ efx_fini_channels(efx);
+ if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
+ efx->phy_op->fini(efx);
+ efx->type->fini(efx);
+}
+
+/* This function will always ensure that the locks acquired in
+ * efx_reset_down() are released. A failure return code indicates
+ * that we were unable to reinitialise the hardware, and the
+ * driver should be disabled. If ok is false, then the rx and tx
+ * engines are not restarted, pending a RESET_DISABLE. */
+int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
+{
+ int rc;
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ rc = efx->type->init(efx);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
+ goto fail;
+ }
+
+ if (!ok)
+ goto fail;
+
+ if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) {
+ rc = efx->phy_op->init(efx);
+ if (rc)
+ goto fail;
+ if (efx->phy_op->reconfigure(efx))
+ netif_err(efx, drv, efx->net_dev,
+ "could not restore PHY settings\n");
+ }
+
+ efx->mac_op->reconfigure(efx);
+
+ efx_init_channels(efx);
+ efx_restore_filters(efx);
+
+ mutex_unlock(&efx->mac_lock);
+
+ efx_start_all(efx);
+
+ return 0;
+
+fail:
+ efx->port_initialized = false;
+
+ mutex_unlock(&efx->mac_lock);
+
+ return rc;
+}
+
+/* Reset the NIC using the specified method. Note that the reset may
+ * fail, in which case the card will be left in an unusable state.
+ *
+ * Caller must hold the rtnl_lock.
+ */
+int efx_reset(struct efx_nic *efx, enum reset_type method)
+{
+ int rc, rc2;
+ bool disabled;
+
+ netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
+ RESET_TYPE(method));
+
+ netif_device_detach(efx->net_dev);
+ efx_reset_down(efx, method);
+
+ rc = efx->type->reset(efx, method);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
+ goto out;
+ }
+
+ /* Clear flags for the scopes we covered. We assume the NIC and
+ * driver are now quiescent so that there is no race here.
+ */
+ efx->reset_pending &= -(1 << (method + 1));
+
+ /* Reinitialise bus-mastering, which may have been turned off before
+ * the reset was scheduled. This is still appropriate, even in the
+ * RESET_TYPE_DISABLE since this driver generally assumes the hardware
+ * can respond to requests. */
+ pci_set_master(efx->pci_dev);
+
+out:
+ /* Leave device stopped if necessary */
+ disabled = rc || method == RESET_TYPE_DISABLE;
+ rc2 = efx_reset_up(efx, method, !disabled);
+ if (rc2) {
+ disabled = true;
+ if (!rc)
+ rc = rc2;
+ }
+
+ if (disabled) {
+ dev_close(efx->net_dev);
+ netif_err(efx, drv, efx->net_dev, "has been disabled\n");
+ efx->state = STATE_DISABLED;
+ } else {
+ netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
+ netif_device_attach(efx->net_dev);
+ }
+ return rc;
+}
+
+/* The worker thread exists so that code that cannot sleep can
+ * schedule a reset for later.
+ */
+static void efx_reset_work(struct work_struct *data)
+{
+ struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
+ unsigned long pending = ACCESS_ONCE(efx->reset_pending);
+
+ if (!pending)
+ return;
+
+ /* If we're not RUNNING then don't reset. Leave the reset_pending
+ * flags set so that efx_pci_probe_main will be retried */
+ if (efx->state != STATE_RUNNING) {
+ netif_info(efx, drv, efx->net_dev,
+ "scheduled reset quenched. NIC not RUNNING\n");
+ return;
+ }
+
+ rtnl_lock();
+ (void)efx_reset(efx, fls(pending) - 1);
+ rtnl_unlock();
+}
+
+void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
+{
+ enum reset_type method;
+
+ switch (type) {
+ case RESET_TYPE_INVISIBLE:
+ case RESET_TYPE_ALL:
+ case RESET_TYPE_WORLD:
+ case RESET_TYPE_DISABLE:
+ method = type;
+ netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
+ RESET_TYPE(method));
+ break;
+ default:
+ method = efx->type->map_reset_reason(type);
+ netif_dbg(efx, drv, efx->net_dev,
+ "scheduling %s reset for %s\n",
+ RESET_TYPE(method), RESET_TYPE(type));
+ break;
+ }
+
+ set_bit(method, &efx->reset_pending);
+
+ /* efx_process_channel() will no longer read events once a
+ * reset is scheduled. So switch back to poll'd MCDI completions. */
+ efx_mcdi_mode_poll(efx);
+
+ queue_work(reset_workqueue, &efx->reset_work);
+}
+
+/**************************************************************************
+ *
+ * List of NICs we support
+ *
+ **************************************************************************/
+
+/* PCI device ID table */
+static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
+ {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID),
+ .driver_data = (unsigned long) &falcon_a1_nic_type},
+ {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID),
+ .driver_data = (unsigned long) &falcon_b0_nic_type},
+ {PCI_DEVICE(EFX_VENDID_SFC, BETHPAGE_A_P_DEVID),
+ .driver_data = (unsigned long) &siena_a0_nic_type},
+ {PCI_DEVICE(EFX_VENDID_SFC, SIENA_A_P_DEVID),
+ .driver_data = (unsigned long) &siena_a0_nic_type},
+ {0} /* end of list */
+};
+
+/**************************************************************************
+ *
+ * Dummy PHY/MAC operations
+ *
+ * Can be used for some unimplemented operations
+ * Needed so all function pointers are valid and do not have to be tested
+ * before use
+ *
+ **************************************************************************/
+int efx_port_dummy_op_int(struct efx_nic *efx)
+{
+ return 0;
+}
+void efx_port_dummy_op_void(struct efx_nic *efx) {}
+
+static bool efx_port_dummy_op_poll(struct efx_nic *efx)
+{
+ return false;
+}
+
+static const struct efx_phy_operations efx_dummy_phy_operations = {
+ .init = efx_port_dummy_op_int,
+ .reconfigure = efx_port_dummy_op_int,
+ .poll = efx_port_dummy_op_poll,
+ .fini = efx_port_dummy_op_void,
+};
+
+/**************************************************************************
+ *
+ * Data housekeeping
+ *
+ **************************************************************************/
+
+/* This zeroes out and then fills in the invariants in a struct
+ * efx_nic (including all sub-structures).
+ */
+static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
+ struct pci_dev *pci_dev, struct net_device *net_dev)
+{
+ int i;
+
+ /* Initialise common structures */
+ memset(efx, 0, sizeof(*efx));
+ spin_lock_init(&efx->biu_lock);
+#ifdef CONFIG_SFC_MTD
+ INIT_LIST_HEAD(&efx->mtd_list);
+#endif
+ INIT_WORK(&efx->reset_work, efx_reset_work);
+ INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
+ efx->pci_dev = pci_dev;
+ efx->msg_enable = debug;
+ efx->state = STATE_INIT;
+ strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
+
+ efx->net_dev = net_dev;
+ spin_lock_init(&efx->stats_lock);
+ mutex_init(&efx->mac_lock);
+ efx->mac_op = type->default_mac_ops;
+ efx->phy_op = &efx_dummy_phy_operations;
+ efx->mdio.dev = net_dev;
+ INIT_WORK(&efx->mac_work, efx_mac_work);
+
+ for (i = 0; i < EFX_MAX_CHANNELS; i++) {
+ efx->channel[i] = efx_alloc_channel(efx, i, NULL);
+ if (!efx->channel[i])
+ goto fail;
+ }
+
+ efx->type = type;
+
+ EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
+
+ /* Higher numbered interrupt modes are less capable! */
+ efx->interrupt_mode = max(efx->type->max_interrupt_mode,
+ interrupt_mode);
+
+ /* Would be good to use the net_dev name, but we're too early */
+ snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
+ pci_name(pci_dev));
+ efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
+ if (!efx->workqueue)
+ goto fail;
+
+ return 0;
+
+fail:
+ efx_fini_struct(efx);
+ return -ENOMEM;
+}
+
+static void efx_fini_struct(struct efx_nic *efx)
+{
+ int i;
+
+ for (i = 0; i < EFX_MAX_CHANNELS; i++)
+ kfree(efx->channel[i]);
+
+ if (efx->workqueue) {
+ destroy_workqueue(efx->workqueue);
+ efx->workqueue = NULL;
+ }
+}
+
+/**************************************************************************
+ *
+ * PCI interface
+ *
+ **************************************************************************/
+
+/* Main body of final NIC shutdown code
+ * This is called only at module unload (or hotplug removal).
+ */
+static void efx_pci_remove_main(struct efx_nic *efx)
+{
+#ifdef CONFIG_RFS_ACCEL
+ free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
+ efx->net_dev->rx_cpu_rmap = NULL;
+#endif
+ efx_nic_fini_interrupt(efx);
+ efx_fini_channels(efx);
+ efx_fini_port(efx);
+ efx->type->fini(efx);
+ efx_fini_napi(efx);
+ efx_remove_all(efx);
+}
+
+/* Final NIC shutdown
+ * This is called only at module unload (or hotplug removal).
+ */
+static void efx_pci_remove(struct pci_dev *pci_dev)
+{
+ struct efx_nic *efx;
+
+ efx = pci_get_drvdata(pci_dev);
+ if (!efx)
+ return;
+
+ /* Mark the NIC as fini, then stop the interface */
+ rtnl_lock();
+ efx->state = STATE_FINI;
+ dev_close(efx->net_dev);
+
+ /* Allow any queued efx_resets() to complete */
+ rtnl_unlock();
+
+ efx_unregister_netdev(efx);
+
+ efx_mtd_remove(efx);
+
+ /* Wait for any scheduled resets to complete. No more will be
+ * scheduled from this point because efx_stop_all() has been
+ * called, we are no longer registered with driverlink, and
+ * the net_device's have been removed. */
+ cancel_work_sync(&efx->reset_work);
+
+ efx_pci_remove_main(efx);
+
+ efx_fini_io(efx);
+ netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
+
+ pci_set_drvdata(pci_dev, NULL);
+ efx_fini_struct(efx);
+ free_netdev(efx->net_dev);
+};
+
+/* Main body of NIC initialisation
+ * This is called at module load (or hotplug insertion, theoretically).
+ */
+static int efx_pci_probe_main(struct efx_nic *efx)
+{
+ int rc;
+
+ /* Do start-of-day initialisation */
+ rc = efx_probe_all(efx);
+ if (rc)
+ goto fail1;
+
+ efx_init_napi(efx);
+
+ rc = efx->type->init(efx);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to initialise NIC\n");
+ goto fail3;
+ }
+
+ rc = efx_init_port(efx);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to initialise port\n");
+ goto fail4;
+ }
+
+ efx_init_channels(efx);
+
+ rc = efx_nic_init_interrupt(efx);
+ if (rc)
+ goto fail5;
+
+ return 0;
+
+ fail5:
+ efx_fini_channels(efx);
+ efx_fini_port(efx);
+ fail4:
+ efx->type->fini(efx);
+ fail3:
+ efx_fini_napi(efx);
+ efx_remove_all(efx);
+ fail1:
+ return rc;
+}
+
+/* NIC initialisation
+ *
+ * This is called at module load (or hotplug insertion,
+ * theoretically). It sets up PCI mappings, tests and resets the NIC,
+ * sets up and registers the network devices with the kernel and hooks
+ * the interrupt service routine. It does not prepare the device for
+ * transmission; this is left to the first time one of the network
+ * interfaces is brought up (i.e. efx_net_open).
+ */
+static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *entry)
+{
+ const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data;
+ struct net_device *net_dev;
+ struct efx_nic *efx;
+ int i, rc;
+
+ /* Allocate and initialise a struct net_device and struct efx_nic */
+ net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
+ EFX_MAX_RX_QUEUES);
+ if (!net_dev)
+ return -ENOMEM;
+ net_dev->features |= (type->offload_features | NETIF_F_SG |
+ NETIF_F_HIGHDMA | NETIF_F_TSO |
+ NETIF_F_RXCSUM);
+ if (type->offload_features & NETIF_F_V6_CSUM)
+ net_dev->features |= NETIF_F_TSO6;
+ /* Mask for features that also apply to VLAN devices */
+ net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
+ NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
+ NETIF_F_RXCSUM);
+ /* All offloads can be toggled */
+ net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
+ efx = netdev_priv(net_dev);
+ pci_set_drvdata(pci_dev, efx);
+ SET_NETDEV_DEV(net_dev, &pci_dev->dev);
+ rc = efx_init_struct(efx, type, pci_dev, net_dev);
+ if (rc)
+ goto fail1;
+
+ netif_info(efx, probe, efx->net_dev,
+ "Solarflare NIC detected\n");
+
+ /* Set up basic I/O (BAR mappings etc) */
+ rc = efx_init_io(efx);
+ if (rc)
+ goto fail2;
+
+ /* No serialisation is required with the reset path because
+ * we're in STATE_INIT. */
+ for (i = 0; i < 5; i++) {
+ rc = efx_pci_probe_main(efx);
+
+ /* Serialise against efx_reset(). No more resets will be
+ * scheduled since efx_stop_all() has been called, and we
+ * have not and never have been registered with either
+ * the rtnetlink or driverlink layers. */
+ cancel_work_sync(&efx->reset_work);
+
+ if (rc == 0) {
+ if (efx->reset_pending) {
+ /* If there was a scheduled reset during
+ * probe, the NIC is probably hosed anyway */
+ efx_pci_remove_main(efx);
+ rc = -EIO;
+ } else {
+ break;
+ }
+ }
+
+ /* Retry if a recoverably reset event has been scheduled */
+ if (efx->reset_pending &
+ ~(1 << RESET_TYPE_INVISIBLE | 1 << RESET_TYPE_ALL) ||
+ !efx->reset_pending)
+ goto fail3;
+
+ efx->reset_pending = 0;
+ }
+
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev, "Could not reset NIC\n");
+ goto fail4;
+ }
+
+ /* Switch to the running state before we expose the device to the OS,
+ * so that dev_open()|efx_start_all() will actually start the device */
+ efx->state = STATE_RUNNING;
+
+ rc = efx_register_netdev(efx);
+ if (rc)
+ goto fail5;
+
+ netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
+
+ rtnl_lock();
+ efx_mtd_probe(efx); /* allowed to fail */
+ rtnl_unlock();
+ return 0;
+
+ fail5:
+ efx_pci_remove_main(efx);
+ fail4:
+ fail3:
+ efx_fini_io(efx);
+ fail2:
+ efx_fini_struct(efx);
+ fail1:
+ WARN_ON(rc > 0);
+ netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
+ free_netdev(net_dev);
+ return rc;
+}
+
+static int efx_pm_freeze(struct device *dev)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+
+ efx->state = STATE_FINI;
+
+ netif_device_detach(efx->net_dev);
+
+ efx_stop_all(efx);
+ efx_fini_channels(efx);
+
+ return 0;
+}
+
+static int efx_pm_thaw(struct device *dev)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+
+ efx->state = STATE_INIT;
+
+ efx_init_channels(efx);
+
+ mutex_lock(&efx->mac_lock);
+ efx->phy_op->reconfigure(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ efx_start_all(efx);
+
+ netif_device_attach(efx->net_dev);
+
+ efx->state = STATE_RUNNING;
+
+ efx->type->resume_wol(efx);
+
+ /* Reschedule any quenched resets scheduled during efx_pm_freeze() */
+ queue_work(reset_workqueue, &efx->reset_work);
+
+ return 0;
+}
+
+static int efx_pm_poweroff(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct efx_nic *efx = pci_get_drvdata(pci_dev);
+
+ efx->type->fini(efx);
+
+ efx->reset_pending = 0;
+
+ pci_save_state(pci_dev);
+ return pci_set_power_state(pci_dev, PCI_D3hot);
+}
+
+/* Used for both resume and restore */
+static int efx_pm_resume(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct efx_nic *efx = pci_get_drvdata(pci_dev);
+ int rc;
+
+ rc = pci_set_power_state(pci_dev, PCI_D0);
+ if (rc)
+ return rc;
+ pci_restore_state(pci_dev);
+ rc = pci_enable_device(pci_dev);
+ if (rc)
+ return rc;
+ pci_set_master(efx->pci_dev);
+ rc = efx->type->reset(efx, RESET_TYPE_ALL);
+ if (rc)
+ return rc;
+ rc = efx->type->init(efx);
+ if (rc)
+ return rc;
+ efx_pm_thaw(dev);
+ return 0;
+}
+
+static int efx_pm_suspend(struct device *dev)
+{
+ int rc;
+
+ efx_pm_freeze(dev);
+ rc = efx_pm_poweroff(dev);
+ if (rc)
+ efx_pm_resume(dev);
+ return rc;
+}
+
+static struct dev_pm_ops efx_pm_ops = {
+ .suspend = efx_pm_suspend,
+ .resume = efx_pm_resume,
+ .freeze = efx_pm_freeze,
+ .thaw = efx_pm_thaw,
+ .poweroff = efx_pm_poweroff,
+ .restore = efx_pm_resume,
+};
+
+static struct pci_driver efx_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = efx_pci_table,
+ .probe = efx_pci_probe,
+ .remove = efx_pci_remove,
+ .driver.pm = &efx_pm_ops,
+};
+
+/**************************************************************************
+ *
+ * Kernel module interface
+ *
+ *************************************************************************/
+
+module_param(interrupt_mode, uint, 0444);
+MODULE_PARM_DESC(interrupt_mode,
+ "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
+
+static int __init efx_init_module(void)
+{
+ int rc;
+
+ printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
+
+ rc = register_netdevice_notifier(&efx_netdev_notifier);
+ if (rc)
+ goto err_notifier;
+
+ reset_workqueue = create_singlethread_workqueue("sfc_reset");
+ if (!reset_workqueue) {
+ rc = -ENOMEM;
+ goto err_reset;
+ }
+
+ rc = pci_register_driver(&efx_pci_driver);
+ if (rc < 0)
+ goto err_pci;
+
+ return 0;
+
+ err_pci:
+ destroy_workqueue(reset_workqueue);
+ err_reset:
+ unregister_netdevice_notifier(&efx_netdev_notifier);
+ err_notifier:
+ return rc;
+}
+
+static void __exit efx_exit_module(void)
+{
+ printk(KERN_INFO "Solarflare NET driver unloading\n");
+
+ pci_unregister_driver(&efx_pci_driver);
+ destroy_workqueue(reset_workqueue);
+ unregister_netdevice_notifier(&efx_netdev_notifier);
+
+}
+
+module_init(efx_init_module);
+module_exit(efx_exit_module);
+
+MODULE_AUTHOR("Solarflare Communications and "
+ "Michael Brown <mbrown@fensystems.co.uk>");
+MODULE_DESCRIPTION("Solarflare Communications network driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, efx_pci_table);
--- /dev/null
- wmb();
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2010 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_IO_H
+#define EFX_IO_H
+
+#include <linux/io.h>
+#include <linux/spinlock.h>
+
+/**************************************************************************
+ *
+ * NIC register I/O
+ *
+ **************************************************************************
+ *
+ * Notes on locking strategy:
+ *
+ * Most CSRs are 128-bit (oword) and therefore cannot be read or
+ * written atomically. Access from the host is buffered by the Bus
+ * Interface Unit (BIU). Whenever the host reads from the lowest
+ * address of such a register, or from the address of a different such
+ * register, the BIU latches the register's value. Subsequent reads
+ * from higher addresses of the same register will read the latched
+ * value. Whenever the host writes part of such a register, the BIU
+ * collects the written value and does not write to the underlying
+ * register until all 4 dwords have been written. A similar buffering
+ * scheme applies to host access to the NIC's 64-bit SRAM.
+ *
+ * Access to different CSRs and 64-bit SRAM words must be serialised,
+ * since interleaved access can result in lost writes or lost
+ * information from read-to-clear fields. We use efx_nic::biu_lock
+ * for this. (We could use separate locks for read and write, but
+ * this is not normally a performance bottleneck.)
+ *
+ * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are
+ * 128-bit but are special-cased in the BIU to avoid the need for
+ * locking in the host:
+ *
+ * - They are write-only.
+ * - The semantics of writing to these registers are such that
+ * replacing the low 96 bits with zero does not affect functionality.
+ * - If the host writes to the last dword address of such a register
+ * (i.e. the high 32 bits) the underlying register will always be
+ * written. If the collector and the current write together do not
+ * provide values for all 128 bits of the register, the low 96 bits
+ * will be written as zero.
+ * - If the host writes to the address of any other part of such a
+ * register while the collector already holds values for some other
+ * register, the write is discarded and the collector maintains its
+ * current state.
+ */
+
+#if BITS_PER_LONG == 64
+#define EFX_USE_QWORD_IO 1
+#endif
+
+#ifdef EFX_USE_QWORD_IO
+static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
+ unsigned int reg)
+{
+ __raw_writeq((__force u64)value, efx->membase + reg);
+}
+static inline __le64 _efx_readq(struct efx_nic *efx, unsigned int reg)
+{
+ return (__force __le64)__raw_readq(efx->membase + reg);
+}
+#endif
+
+static inline void _efx_writed(struct efx_nic *efx, __le32 value,
+ unsigned int reg)
+{
+ __raw_writel((__force u32)value, efx->membase + reg);
+}
+static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg)
+{
+ return (__force __le32)__raw_readl(efx->membase + reg);
+}
+
+/* Write a normal 128-bit CSR, locking as appropriate. */
+static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
+ unsigned int reg)
+{
+ unsigned long flags __attribute__ ((unused));
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "writing register %x with " EFX_OWORD_FMT "\n", reg,
+ EFX_OWORD_VAL(*value));
+
+ spin_lock_irqsave(&efx->biu_lock, flags);
+#ifdef EFX_USE_QWORD_IO
+ _efx_writeq(efx, value->u64[0], reg + 0);
+ _efx_writeq(efx, value->u64[1], reg + 8);
+#else
+ _efx_writed(efx, value->u32[0], reg + 0);
+ _efx_writed(efx, value->u32[1], reg + 4);
+ _efx_writed(efx, value->u32[2], reg + 8);
+ _efx_writed(efx, value->u32[3], reg + 12);
+#endif
- wmb();
+ mmiowb();
+ spin_unlock_irqrestore(&efx->biu_lock, flags);
+}
+
+/* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */
+static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
+ efx_qword_t *value, unsigned int index)
+{
+ unsigned int addr = index * sizeof(*value);
+ unsigned long flags __attribute__ ((unused));
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "writing SRAM address %x with " EFX_QWORD_FMT "\n",
+ addr, EFX_QWORD_VAL(*value));
+
+ spin_lock_irqsave(&efx->biu_lock, flags);
+#ifdef EFX_USE_QWORD_IO
+ __raw_writeq((__force u64)value->u64[0], membase + addr);
+#else
+ __raw_writel((__force u32)value->u32[0], membase + addr);
+ __raw_writel((__force u32)value->u32[1], membase + addr + 4);
+#endif
- wmb();
+ mmiowb();
+ spin_unlock_irqrestore(&efx->biu_lock, flags);
+}
+
+/* Write a 32-bit CSR or the last dword of a special 128-bit CSR */
+static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
+ unsigned int reg)
+{
+ netif_vdbg(efx, hw, efx->net_dev,
+ "writing register %x with "EFX_DWORD_FMT"\n",
+ reg, EFX_DWORD_VAL(*value));
+
+ /* No lock required */
+ _efx_writed(efx, value->u32[0], reg);
- rmb();
+}
+
+/* Read a 128-bit CSR, locking as appropriate. */
+static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
+ unsigned int reg)
+{
+ unsigned long flags __attribute__ ((unused));
+
+ spin_lock_irqsave(&efx->biu_lock, flags);
+ value->u32[0] = _efx_readd(efx, reg + 0);
- rmb();
+ value->u32[1] = _efx_readd(efx, reg + 4);
+ value->u32[2] = _efx_readd(efx, reg + 8);
+ value->u32[3] = _efx_readd(efx, reg + 12);
+ spin_unlock_irqrestore(&efx->biu_lock, flags);
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "read from register %x, got " EFX_OWORD_FMT "\n", reg,
+ EFX_OWORD_VAL(*value));
+}
+
+/* Read 64-bit SRAM through the supplied mapping, locking as appropriate. */
+static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
+ efx_qword_t *value, unsigned int index)
+{
+ unsigned int addr = index * sizeof(*value);
+ unsigned long flags __attribute__ ((unused));
+
+ spin_lock_irqsave(&efx->biu_lock, flags);
+#ifdef EFX_USE_QWORD_IO
+ value->u64[0] = (__force __le64)__raw_readq(membase + addr);
+#else
+ value->u32[0] = (__force __le32)__raw_readl(membase + addr);
- wmb();
+ value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
+#endif
+ spin_unlock_irqrestore(&efx->biu_lock, flags);
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "read from SRAM address %x, got "EFX_QWORD_FMT"\n",
+ addr, EFX_QWORD_VAL(*value));
+}
+
+/* Read a 32-bit CSR or SRAM */
+static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
+ unsigned int reg)
+{
+ value->u32[0] = _efx_readd(efx, reg);
+ netif_vdbg(efx, hw, efx->net_dev,
+ "read from register %x, got "EFX_DWORD_FMT"\n",
+ reg, EFX_DWORD_VAL(*value));
+}
+
+/* Write a 128-bit CSR forming part of a table */
+static inline void efx_writeo_table(struct efx_nic *efx, efx_oword_t *value,
+ unsigned int reg, unsigned int index)
+{
+ efx_writeo(efx, value, reg + index * sizeof(efx_oword_t));
+}
+
+/* Read a 128-bit CSR forming part of a table */
+static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
+ unsigned int reg, unsigned int index)
+{
+ efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
+}
+
+/* Write a 32-bit CSR forming part of a table, or 32-bit SRAM */
+static inline void efx_writed_table(struct efx_nic *efx, efx_dword_t *value,
+ unsigned int reg, unsigned int index)
+{
+ efx_writed(efx, value, reg + index * sizeof(efx_oword_t));
+}
+
+/* Read a 32-bit CSR forming part of a table, or 32-bit SRAM */
+static inline void efx_readd_table(struct efx_nic *efx, efx_dword_t *value,
+ unsigned int reg, unsigned int index)
+{
+ efx_readd(efx, value, reg + index * sizeof(efx_dword_t));
+}
+
+/* Page-mapped register block size */
+#define EFX_PAGE_BLOCK_SIZE 0x2000
+
+/* Calculate offset to page-mapped register block */
+#define EFX_PAGED_REG(page, reg) \
+ ((page) * EFX_PAGE_BLOCK_SIZE + (reg))
+
+/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */
+static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
+ unsigned int reg, unsigned int page)
+{
+ reg = EFX_PAGED_REG(page, reg);
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "writing register %x with " EFX_OWORD_FMT "\n", reg,
+ EFX_OWORD_VAL(*value));
+
+#ifdef EFX_USE_QWORD_IO
+ _efx_writeq(efx, value->u64[0], reg + 0);
+ _efx_writeq(efx, value->u64[1], reg + 8);
+#else
+ _efx_writed(efx, value->u32[0], reg + 0);
+ _efx_writed(efx, value->u32[1], reg + 4);
+ _efx_writed(efx, value->u32[2], reg + 8);
+ _efx_writed(efx, value->u32[3], reg + 12);
+#endif
+}
+#define efx_writeo_page(efx, value, reg, page) \
+ _efx_writeo_page(efx, value, \
+ reg + \
+ BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \
+ page)
+
+/* Write a page-mapped 32-bit CSR (EVQ_RPTR or the high bits of
+ * RX_DESC_UPD or TX_DESC_UPD)
+ */
+static inline void _efx_writed_page(struct efx_nic *efx, efx_dword_t *value,
+ unsigned int reg, unsigned int page)
+{
+ efx_writed(efx, value, EFX_PAGED_REG(page, reg));
+}
+#define efx_writed_page(efx, value, reg, page) \
+ _efx_writed_page(efx, value, \
+ reg + \
+ BUILD_BUG_ON_ZERO((reg) != 0x400 && (reg) != 0x83c \
+ && (reg) != 0xa1c), \
+ page)
+
+/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug
+ * in the BIU means that writes to TIMER_COMMAND[0] invalidate the
+ * collector register.
+ */
+static inline void _efx_writed_page_locked(struct efx_nic *efx,
+ efx_dword_t *value,
+ unsigned int reg,
+ unsigned int page)
+{
+ unsigned long flags __attribute__ ((unused));
+
+ if (page == 0) {
+ spin_lock_irqsave(&efx->biu_lock, flags);
+ efx_writed(efx, value, EFX_PAGED_REG(page, reg));
+ spin_unlock_irqrestore(&efx->biu_lock, flags);
+ } else {
+ efx_writed(efx, value, EFX_PAGED_REG(page, reg));
+ }
+}
+#define efx_writed_page_locked(efx, value, reg, page) \
+ _efx_writed_page_locked(efx, value, \
+ reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \
+ page)
+
+#endif /* EFX_IO_H */
--- /dev/null
- static inline void
- efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg)
- {
- struct siena_nic_data *nic_data = efx->nic_data;
- value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg);
- }
-
- static inline void
- efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg)
- {
- struct siena_nic_data *nic_data = efx->nic_data;
- __raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg);
- }
-
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2008-2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/delay.h>
+#include "net_driver.h"
+#include "nic.h"
+#include "io.h"
+#include "regs.h"
+#include "mcdi_pcol.h"
+#include "phy.h"
+
+/**************************************************************************
+ *
+ * Management-Controller-to-Driver Interface
+ *
+ **************************************************************************
+ */
+
+/* Software-defined structure to the shared-memory */
+#define CMD_NOTIFY_PORT0 0
+#define CMD_NOTIFY_PORT1 4
+#define CMD_PDU_PORT0 0x008
+#define CMD_PDU_PORT1 0x108
+#define REBOOT_FLAG_PORT0 0x3f8
+#define REBOOT_FLAG_PORT1 0x3fc
+
+#define MCDI_RPC_TIMEOUT 10 /*seconds */
+
+#define MCDI_PDU(efx) \
+ (efx_port_num(efx) ? CMD_PDU_PORT1 : CMD_PDU_PORT0)
+#define MCDI_DOORBELL(efx) \
+ (efx_port_num(efx) ? CMD_NOTIFY_PORT1 : CMD_NOTIFY_PORT0)
+#define MCDI_REBOOT_FLAG(efx) \
+ (efx_port_num(efx) ? REBOOT_FLAG_PORT1 : REBOOT_FLAG_PORT0)
+
+#define SEQ_MASK \
+ EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
+
+static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
+{
+ struct siena_nic_data *nic_data;
+ EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
+ nic_data = efx->nic_data;
+ return &nic_data->mcdi;
+}
+
- unsigned pdu = MCDI_PDU(efx);
- unsigned doorbell = MCDI_DOORBELL(efx);
+void efx_mcdi_init(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi;
+
+ if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
+ return;
+
+ mcdi = efx_mcdi(efx);
+ init_waitqueue_head(&mcdi->wq);
+ spin_lock_init(&mcdi->iface_lock);
+ atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
+ mcdi->mode = MCDI_MODE_POLL;
+
+ (void) efx_mcdi_poll_reboot(efx);
+}
+
+static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
+ const u8 *inbuf, size_t inlen)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- efx_mcdi_writed(efx, &hdr, pdu);
++ unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
++ unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
+ unsigned int i;
+ efx_dword_t hdr;
+ u32 xflags, seqno;
+
+ BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
+ BUG_ON(inlen & 3 || inlen >= 0x100);
+
+ seqno = mcdi->seqno & SEQ_MASK;
+ xflags = 0;
+ if (mcdi->mode == MCDI_MODE_EVENTS)
+ xflags |= MCDI_HEADER_XFLAGS_EVREQ;
+
+ EFX_POPULATE_DWORD_6(hdr,
+ MCDI_HEADER_RESPONSE, 0,
+ MCDI_HEADER_RESYNC, 1,
+ MCDI_HEADER_CODE, cmd,
+ MCDI_HEADER_DATALEN, inlen,
+ MCDI_HEADER_SEQ, seqno,
+ MCDI_HEADER_XFLAGS, xflags);
+
- efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i),
- pdu + 4 + i);
++ efx_writed(efx, &hdr, pdu);
+
+ for (i = 0; i < inlen; i += 4)
- EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc);
- efx_mcdi_writed(efx, &hdr, doorbell);
++ _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i);
++
++ /* Ensure the payload is written out before the header */
++ wmb();
+
+ /* ring the doorbell with a distinctive value */
- unsigned int pdu = MCDI_PDU(efx);
++ _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
+}
+
+static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i);
++ unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+ int i;
+
+ BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
+ BUG_ON(outlen & 3 || outlen >= 0x100);
+
+ for (i = 0; i < outlen; i += 4)
- unsigned int pdu = MCDI_PDU(efx);
++ *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i);
+}
+
+static int efx_mcdi_poll(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ unsigned int time, finish;
+ unsigned int respseq, respcmd, error;
- efx_mcdi_readd(efx, ®, pdu);
++ unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+ unsigned int rc, spins;
+ efx_dword_t reg;
+
+ /* Check for a reboot atomically with respect to efx_mcdi_copyout() */
+ rc = -efx_mcdi_poll_reboot(efx);
+ if (rc)
+ goto out;
+
+ /* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
+ * because generally mcdi responses are fast. After that, back off
+ * and poll once a jiffy (approximately)
+ */
+ spins = TICK_USEC;
+ finish = get_seconds() + MCDI_RPC_TIMEOUT;
+
+ while (1) {
+ if (spins != 0) {
+ --spins;
+ udelay(1);
+ } else {
+ schedule_timeout_uninterruptible(1);
+ }
+
+ time = get_seconds();
+
- efx_mcdi_readd(efx, ®, pdu + 4);
++ rmb();
++ efx_readd(efx, ®, pdu);
+
+ /* All 1's indicates that shared memory is in reset (and is
+ * not a valid header). Wait for it to come out reset before
+ * completing the command */
+ if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) != 0xffffffff &&
+ EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE))
+ break;
+
+ if (time >= finish)
+ return -ETIMEDOUT;
+ }
+
+ mcdi->resplen = EFX_DWORD_FIELD(reg, MCDI_HEADER_DATALEN);
+ respseq = EFX_DWORD_FIELD(reg, MCDI_HEADER_SEQ);
+ respcmd = EFX_DWORD_FIELD(reg, MCDI_HEADER_CODE);
+ error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR);
+
+ if (error && mcdi->resplen == 0) {
+ netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
+ rc = EIO;
+ } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
+ netif_err(efx, hw, efx->net_dev,
+ "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
+ respseq, mcdi->seqno);
+ rc = EIO;
+ } else if (error) {
- unsigned int addr = MCDI_REBOOT_FLAG(efx);
++ efx_readd(efx, ®, pdu + 4);
+ switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {
+#define TRANSLATE_ERROR(name) \
+ case MC_CMD_ERR_ ## name: \
+ rc = name; \
+ break
+ TRANSLATE_ERROR(ENOENT);
+ TRANSLATE_ERROR(EINTR);
+ TRANSLATE_ERROR(EACCES);
+ TRANSLATE_ERROR(EBUSY);
+ TRANSLATE_ERROR(EINVAL);
+ TRANSLATE_ERROR(EDEADLK);
+ TRANSLATE_ERROR(ENOSYS);
+ TRANSLATE_ERROR(ETIME);
+#undef TRANSLATE_ERROR
+ default:
+ rc = EIO;
+ break;
+ }
+ } else
+ rc = 0;
+
+out:
+ mcdi->resprc = rc;
+ if (rc)
+ mcdi->resplen = 0;
+
+ /* Return rc=0 like wait_event_timeout() */
+ return 0;
+}
+
+/* Test and clear MC-rebooted flag for this port/function */
+int efx_mcdi_poll_reboot(struct efx_nic *efx)
+{
- efx_mcdi_readd(efx, ®, addr);
++ unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx);
+ efx_dword_t reg;
+ uint32_t value;
+
+ if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
+ return false;
+
- efx_mcdi_writed(efx, ®, addr);
++ efx_readd(efx, ®, addr);
+ value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
+
+ if (value == 0)
+ return 0;
+
+ EFX_ZERO_DWORD(reg);
++ efx_writed(efx, ®, addr);
+
+ if (value == MC_STATUS_DWORD_ASSERT)
+ return -EINTR;
+ else
+ return -EIO;
+}
+
+static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi)
+{
+ /* Wait until the interface becomes QUIESCENT and we win the race
+ * to mark it RUNNING. */
+ wait_event(mcdi->wq,
+ atomic_cmpxchg(&mcdi->state,
+ MCDI_STATE_QUIESCENT,
+ MCDI_STATE_RUNNING)
+ == MCDI_STATE_QUIESCENT);
+}
+
+static int efx_mcdi_await_completion(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ if (wait_event_timeout(
+ mcdi->wq,
+ atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED,
+ msecs_to_jiffies(MCDI_RPC_TIMEOUT * 1000)) == 0)
+ return -ETIMEDOUT;
+
+ /* Check if efx_mcdi_set_mode() switched us back to polled completions.
+ * In which case, poll for completions directly. If efx_mcdi_ev_cpl()
+ * completed the request first, then we'll just end up completing the
+ * request again, which is safe.
+ *
+ * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which
+ * wait_event_timeout() implicitly provides.
+ */
+ if (mcdi->mode == MCDI_MODE_POLL)
+ return efx_mcdi_poll(efx);
+
+ return 0;
+}
+
+static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi)
+{
+ /* If the interface is RUNNING, then move to COMPLETED and wake any
+ * waiters. If the interface isn't in RUNNING then we've received a
+ * duplicate completion after we've already transitioned back to
+ * QUIESCENT. [A subsequent invocation would increment seqno, so would
+ * have failed the seqno check].
+ */
+ if (atomic_cmpxchg(&mcdi->state,
+ MCDI_STATE_RUNNING,
+ MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING) {
+ wake_up(&mcdi->wq);
+ return true;
+ }
+
+ return false;
+}
+
+static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
+{
+ atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
+ wake_up(&mcdi->wq);
+}
+
+static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
+ unsigned int datalen, unsigned int errno)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ bool wake = false;
+
+ spin_lock(&mcdi->iface_lock);
+
+ if ((seqno ^ mcdi->seqno) & SEQ_MASK) {
+ if (mcdi->credits)
+ /* The request has been cancelled */
+ --mcdi->credits;
+ else
+ netif_err(efx, hw, efx->net_dev,
+ "MC response mismatch tx seq 0x%x rx "
+ "seq 0x%x\n", seqno, mcdi->seqno);
+ } else {
+ mcdi->resprc = errno;
+ mcdi->resplen = datalen;
+
+ wake = true;
+ }
+
+ spin_unlock(&mcdi->iface_lock);
+
+ if (wake)
+ efx_mcdi_complete(mcdi);
+}
+
+/* Issue the given command by writing the data into the shared memory PDU,
+ * ring the doorbell and wait for completion. Copyout the result. */
+int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
+ const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen,
+ size_t *outlen_actual)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ int rc;
+ BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
+
+ efx_mcdi_acquire(mcdi);
+
+ /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
+ spin_lock_bh(&mcdi->iface_lock);
+ ++mcdi->seqno;
+ spin_unlock_bh(&mcdi->iface_lock);
+
+ efx_mcdi_copyin(efx, cmd, inbuf, inlen);
+
+ if (mcdi->mode == MCDI_MODE_POLL)
+ rc = efx_mcdi_poll(efx);
+ else
+ rc = efx_mcdi_await_completion(efx);
+
+ if (rc != 0) {
+ /* Close the race with efx_mcdi_ev_cpl() executing just too late
+ * and completing a request we've just cancelled, by ensuring
+ * that the seqno check therein fails.
+ */
+ spin_lock_bh(&mcdi->iface_lock);
+ ++mcdi->seqno;
+ ++mcdi->credits;
+ spin_unlock_bh(&mcdi->iface_lock);
+
+ netif_err(efx, hw, efx->net_dev,
+ "MC command 0x%x inlen %d mode %d timed out\n",
+ cmd, (int)inlen, mcdi->mode);
+ } else {
+ size_t resplen;
+
+ /* At the very least we need a memory barrier here to ensure
+ * we pick up changes from efx_mcdi_ev_cpl(). Protect against
+ * a spurious efx_mcdi_ev_cpl() running concurrently by
+ * acquiring the iface_lock. */
+ spin_lock_bh(&mcdi->iface_lock);
+ rc = -mcdi->resprc;
+ resplen = mcdi->resplen;
+ spin_unlock_bh(&mcdi->iface_lock);
+
+ if (rc == 0) {
+ efx_mcdi_copyout(efx, outbuf,
+ min(outlen, mcdi->resplen + 3) & ~0x3);
+ if (outlen_actual != NULL)
+ *outlen_actual = resplen;
+ } else if (cmd == MC_CMD_REBOOT && rc == -EIO)
+ ; /* Don't reset if MC_CMD_REBOOT returns EIO */
+ else if (rc == -EIO || rc == -EINTR) {
+ netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
+ -rc);
+ efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
+ } else
+ netif_dbg(efx, hw, efx->net_dev,
+ "MC command 0x%x inlen %d failed rc=%d\n",
+ cmd, (int)inlen, -rc);
+ }
+
+ efx_mcdi_release(mcdi);
+ return rc;
+}
+
+void efx_mcdi_mode_poll(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi;
+
+ if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
+ return;
+
+ mcdi = efx_mcdi(efx);
+ if (mcdi->mode == MCDI_MODE_POLL)
+ return;
+
+ /* We can switch from event completion to polled completion, because
+ * mcdi requests are always completed in shared memory. We do this by
+ * switching the mode to POLL'd then completing the request.
+ * efx_mcdi_await_completion() will then call efx_mcdi_poll().
+ *
+ * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
+ * which efx_mcdi_complete() provides for us.
+ */
+ mcdi->mode = MCDI_MODE_POLL;
+
+ efx_mcdi_complete(mcdi);
+}
+
+void efx_mcdi_mode_event(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi;
+
+ if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
+ return;
+
+ mcdi = efx_mcdi(efx);
+
+ if (mcdi->mode == MCDI_MODE_EVENTS)
+ return;
+
+ /* We can't switch from polled to event completion in the middle of a
+ * request, because the completion method is specified in the request.
+ * So acquire the interface to serialise the requestors. We don't need
+ * to acquire the iface_lock to change the mode here, but we do need a
+ * write memory barrier ensure that efx_mcdi_rpc() sees it, which
+ * efx_mcdi_acquire() provides.
+ */
+ efx_mcdi_acquire(mcdi);
+ mcdi->mode = MCDI_MODE_EVENTS;
+ efx_mcdi_release(mcdi);
+}
+
+static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ /* If there is an outstanding MCDI request, it has been terminated
+ * either by a BADASSERT or REBOOT event. If the mcdi interface is
+ * in polled mode, then do nothing because the MC reboot handler will
+ * set the header correctly. However, if the mcdi interface is waiting
+ * for a CMDDONE event it won't receive it [and since all MCDI events
+ * are sent to the same queue, we can't be racing with
+ * efx_mcdi_ev_cpl()]
+ *
+ * There's a race here with efx_mcdi_rpc(), because we might receive
+ * a REBOOT event *before* the request has been copied out. In polled
+ * mode (during startup) this is irrelevant, because efx_mcdi_complete()
+ * is ignored. In event mode, this condition is just an edge-case of
+ * receiving a REBOOT event after posting the MCDI request. Did the mc
+ * reboot before or after the copyout? The best we can do always is
+ * just return failure.
+ */
+ spin_lock(&mcdi->iface_lock);
+ if (efx_mcdi_complete(mcdi)) {
+ if (mcdi->mode == MCDI_MODE_EVENTS) {
+ mcdi->resprc = rc;
+ mcdi->resplen = 0;
+ ++mcdi->credits;
+ }
+ } else
+ /* Nobody was waiting for an MCDI request, so trigger a reset */
+ efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
+
+ spin_unlock(&mcdi->iface_lock);
+}
+
+static unsigned int efx_mcdi_event_link_speed[] = {
+ [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
+ [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
+};
+
+
+static void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
+{
+ u32 flags, fcntl, speed, lpa;
+
+ speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED);
+ EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
+ speed = efx_mcdi_event_link_speed[speed];
+
+ flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS);
+ fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL);
+ lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP);
+
+ /* efx->link_state is only modified by efx_mcdi_phy_get_link(),
+ * which is only run after flushing the event queues. Therefore, it
+ * is safe to modify the link state outside of the mac_lock here.
+ */
+ efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl);
+
+ efx_mcdi_phy_check_fcntl(efx, lpa);
+
+ efx_link_status_changed(efx);
+}
+
+static const char *sensor_names[] = {
+ [MC_CMD_SENSOR_CONTROLLER_TEMP] = "Controller temp. sensor",
+ [MC_CMD_SENSOR_PHY_COMMON_TEMP] = "PHY shared temp. sensor",
+ [MC_CMD_SENSOR_CONTROLLER_COOLING] = "Controller cooling",
+ [MC_CMD_SENSOR_PHY0_TEMP] = "PHY 0 temp. sensor",
+ [MC_CMD_SENSOR_PHY0_COOLING] = "PHY 0 cooling",
+ [MC_CMD_SENSOR_PHY1_TEMP] = "PHY 1 temp. sensor",
+ [MC_CMD_SENSOR_PHY1_COOLING] = "PHY 1 cooling",
+ [MC_CMD_SENSOR_IN_1V0] = "1.0V supply sensor",
+ [MC_CMD_SENSOR_IN_1V2] = "1.2V supply sensor",
+ [MC_CMD_SENSOR_IN_1V8] = "1.8V supply sensor",
+ [MC_CMD_SENSOR_IN_2V5] = "2.5V supply sensor",
+ [MC_CMD_SENSOR_IN_3V3] = "3.3V supply sensor",
+ [MC_CMD_SENSOR_IN_12V0] = "12V supply sensor"
+};
+
+static const char *sensor_status_names[] = {
+ [MC_CMD_SENSOR_STATE_OK] = "OK",
+ [MC_CMD_SENSOR_STATE_WARNING] = "Warning",
+ [MC_CMD_SENSOR_STATE_FATAL] = "Fatal",
+ [MC_CMD_SENSOR_STATE_BROKEN] = "Device failure",
+};
+
+static void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
+{
+ unsigned int monitor, state, value;
+ const char *name, *state_txt;
+ monitor = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR);
+ state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE);
+ value = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_VALUE);
+ /* Deal gracefully with the board having more drivers than we
+ * know about, but do not expect new sensor states. */
+ name = (monitor >= ARRAY_SIZE(sensor_names))
+ ? "No sensor name available" :
+ sensor_names[monitor];
+ EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names));
+ state_txt = sensor_status_names[state];
+
+ netif_err(efx, hw, efx->net_dev,
+ "Sensor %d (%s) reports condition '%s' for raw value %d\n",
+ monitor, name, state_txt, value);
+}
+
+/* Called from falcon_process_eventq for MCDI events */
+void efx_mcdi_process_event(struct efx_channel *channel,
+ efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE);
+ u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA);
+
+ switch (code) {
+ case MCDI_EVENT_CODE_BADSSERT:
+ netif_err(efx, hw, efx->net_dev,
+ "MC watchdog or assertion failure at 0x%x\n", data);
+ efx_mcdi_ev_death(efx, EINTR);
+ break;
+
+ case MCDI_EVENT_CODE_PMNOTICE:
+ netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n");
+ break;
+
+ case MCDI_EVENT_CODE_CMDDONE:
+ efx_mcdi_ev_cpl(efx,
+ MCDI_EVENT_FIELD(*event, CMDDONE_SEQ),
+ MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN),
+ MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO));
+ break;
+
+ case MCDI_EVENT_CODE_LINKCHANGE:
+ efx_mcdi_process_link_change(efx, event);
+ break;
+ case MCDI_EVENT_CODE_SENSOREVT:
+ efx_mcdi_sensor_event(efx, event);
+ break;
+ case MCDI_EVENT_CODE_SCHEDERR:
+ netif_info(efx, hw, efx->net_dev,
+ "MC Scheduler error address=0x%x\n", data);
+ break;
+ case MCDI_EVENT_CODE_REBOOT:
+ netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
+ efx_mcdi_ev_death(efx, EIO);
+ break;
+ case MCDI_EVENT_CODE_MAC_STATS_DMA:
+ /* MAC stats are gather lazily. We can ignore this. */
+ break;
+
+ default:
+ netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
+ code);
+ }
+}
+
+/**************************************************************************
+ *
+ * Specific request functions
+ *
+ **************************************************************************
+ */
+
+void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
+{
+ u8 outbuf[ALIGN(MC_CMD_GET_VERSION_V1_OUT_LEN, 4)];
+ size_t outlength;
+ const __le16 *ver_words;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
+ outbuf, sizeof(outbuf), &outlength);
+ if (rc)
+ goto fail;
+
+ if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
+ snprintf(buf, len, "%u.%u.%u.%u",
+ le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
+ le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
+ return;
+
+fail:
+ netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ buf[0] = 0;
+}
+
+int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
+ bool *was_attached)
+{
+ u8 inbuf[MC_CMD_DRV_ATTACH_IN_LEN];
+ u8 outbuf[MC_CMD_DRV_ATTACH_OUT_LEN];
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
+ driver_operating ? 1 : 0);
+ MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+ if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ if (was_attached != NULL)
+ *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
+ return 0;
+
+fail:
+ netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
+ u16 *fw_subtype_list)
+{
+ uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LEN];
+ size_t outlen;
+ int port_num = efx_port_num(efx);
+ int offset;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ offset = (port_num)
+ ? MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST
+ : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST;
+ if (mac_address)
+ memcpy(mac_address, outbuf + offset, ETH_ALEN);
+ if (fw_subtype_list)
+ memcpy(fw_subtype_list,
+ outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST,
+ MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN);
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n",
+ __func__, rc, (int)outlen);
+
+ return rc;
+}
+
+int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
+{
+ u8 inbuf[MC_CMD_LOG_CTRL_IN_LEN];
+ u32 dest = 0;
+ int rc;
+
+ if (uart)
+ dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART;
+ if (evq)
+ dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ;
+
+ MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest);
+ MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq);
+
+ BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
+{
+ u8 outbuf[MC_CMD_NVRAM_TYPES_OUT_LEN];
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+ if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+ __func__, rc);
+ return rc;
+}
+
+int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
+ size_t *size_out, size_t *erase_size_out,
+ bool *protected_out)
+{
+ u8 inbuf[MC_CMD_NVRAM_INFO_IN_LEN];
+ u8 outbuf[MC_CMD_NVRAM_INFO_OUT_LEN];
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+ if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
+ *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
+ *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
+ (1 << MC_CMD_NVRAM_PROTECTED_LBN));
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
+{
+ u8 inbuf[MC_CMD_NVRAM_UPDATE_START_IN_LEN];
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
+ loff_t offset, u8 *buffer, size_t length)
+{
+ u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN];
+ u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)];
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
+ MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
+ MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
+ loff_t offset, const u8 *buffer, size_t length)
+{
+ u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)];
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
+ MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
+ MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
+ memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
+ ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
+ loff_t offset, size_t length)
+{
+ u8 inbuf[MC_CMD_NVRAM_ERASE_IN_LEN];
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
+ MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
+ MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
+{
+ u8 inbuf[MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN];
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
+{
+ u8 inbuf[MC_CMD_NVRAM_TEST_IN_LEN];
+ u8 outbuf[MC_CMD_NVRAM_TEST_OUT_LEN];
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ return rc;
+
+ switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) {
+ case MC_CMD_NVRAM_TEST_PASS:
+ case MC_CMD_NVRAM_TEST_NOTSUPP:
+ return 0;
+ default:
+ return -EIO;
+ }
+}
+
+int efx_mcdi_nvram_test_all(struct efx_nic *efx)
+{
+ u32 nvram_types;
+ unsigned int type;
+ int rc;
+
+ rc = efx_mcdi_nvram_types(efx, &nvram_types);
+ if (rc)
+ goto fail1;
+
+ type = 0;
+ while (nvram_types != 0) {
+ if (nvram_types & 1) {
+ rc = efx_mcdi_nvram_test(efx, type);
+ if (rc)
+ goto fail2;
+ }
+ type++;
+ nvram_types >>= 1;
+ }
+
+ return 0;
+
+fail2:
+ netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n",
+ __func__, type);
+fail1:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int efx_mcdi_read_assertion(struct efx_nic *efx)
+{
+ u8 inbuf[MC_CMD_GET_ASSERTS_IN_LEN];
+ u8 outbuf[MC_CMD_GET_ASSERTS_OUT_LEN];
+ unsigned int flags, index, ofst;
+ const char *reason;
+ size_t outlen;
+ int retry;
+ int rc;
+
+ /* Attempt to read any stored assertion state before we reboot
+ * the mcfw out of the assertion handler. Retry twice, once
+ * because a boot-time assertion might cause this command to fail
+ * with EINTR. And once again because GET_ASSERTS can race with
+ * MC_CMD_REBOOT running on the other port. */
+ retry = 2;
+ do {
+ MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS,
+ inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
+ outbuf, sizeof(outbuf), &outlen);
+ } while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
+
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
+ return -EIO;
+
+ /* Print out any recorded assertion state */
+ flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
+ if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
+ return 0;
+
+ reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
+ ? "system-level assertion"
+ : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
+ ? "thread-level assertion"
+ : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
+ ? "watchdog reset"
+ : "unknown assertion";
+ netif_err(efx, hw, efx->net_dev,
+ "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
+ MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
+ MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
+
+ /* Print out the registers */
+ ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST;
+ for (index = 1; index < 32; index++) {
+ netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", index,
+ MCDI_DWORD2(outbuf, ofst));
+ ofst += sizeof(efx_dword_t);
+ }
+
+ return 0;
+}
+
+static void efx_mcdi_exit_assertion(struct efx_nic *efx)
+{
+ u8 inbuf[MC_CMD_REBOOT_IN_LEN];
+
+ /* Atomically reboot the mcfw out of the assertion handler */
+ BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
+ MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
+ MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
+ efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
+ NULL, 0, NULL);
+}
+
+int efx_mcdi_handle_assertion(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = efx_mcdi_read_assertion(efx);
+ if (rc)
+ return rc;
+
+ efx_mcdi_exit_assertion(efx);
+
+ return 0;
+}
+
+void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
+{
+ u8 inbuf[MC_CMD_SET_ID_LED_IN_LEN];
+ int rc;
+
+ BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
+ BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON);
+ BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT);
+
+ BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0);
+
+ MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc)
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+ __func__, rc);
+}
+
+int efx_mcdi_reset_port(struct efx_nic *efx)
+{
+ int rc = efx_mcdi_rpc(efx, MC_CMD_PORT_RESET, NULL, 0, NULL, 0, NULL);
+ if (rc)
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+ __func__, rc);
+ return rc;
+}
+
+int efx_mcdi_reset_mc(struct efx_nic *efx)
+{
+ u8 inbuf[MC_CMD_REBOOT_IN_LEN];
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
+ MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ /* White is black, and up is down */
+ if (rc == -EIO)
+ return 0;
+ if (rc == 0)
+ rc = -EIO;
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
+ const u8 *mac, int *id_out)
+{
+ u8 inbuf[MC_CMD_WOL_FILTER_SET_IN_LEN];
+ u8 outbuf[MC_CMD_WOL_FILTER_SET_OUT_LEN];
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
+ MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
+ MC_CMD_FILTER_MODE_SIMPLE);
+ memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID);
+
+ return 0;
+
+fail:
+ *id_out = -1;
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+
+}
+
+
+int
+efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out)
+{
+ return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out);
+}
+
+
+int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
+{
+ u8 outbuf[MC_CMD_WOL_FILTER_GET_OUT_LEN];
+ size_t outlen;
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID);
+
+ return 0;
+
+fail:
+ *id_out = -1;
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+
+int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
+{
+ u8 inbuf[MC_CMD_WOL_FILTER_REMOVE_IN_LEN];
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+
+int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
--- /dev/null
- if (table->offset >= efx->type->mem_map_size) {
- /* No longer mapped; return dummy data */
- memcpy(buf, "\xde\xc0\xad\xde", 4);
- buf += table->rows * size;
- continue;
- }
-
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include "net_driver.h"
+#include "bitfield.h"
+#include "efx.h"
+#include "nic.h"
+#include "regs.h"
+#include "io.h"
+#include "workarounds.h"
+
+/**************************************************************************
+ *
+ * Configurable values
+ *
+ **************************************************************************
+ */
+
+/* This is set to 16 for a good reason. In summary, if larger than
+ * 16, the descriptor cache holds more than a default socket
+ * buffer's worth of packets (for UDP we can only have at most one
+ * socket buffer's worth outstanding). This combined with the fact
+ * that we only get 1 TX event per descriptor cache means the NIC
+ * goes idle.
+ */
+#define TX_DC_ENTRIES 16
+#define TX_DC_ENTRIES_ORDER 1
+
+#define RX_DC_ENTRIES 64
+#define RX_DC_ENTRIES_ORDER 3
+
+/* If EFX_MAX_INT_ERRORS internal errors occur within
+ * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
+ * disable it.
+ */
+#define EFX_INT_ERROR_EXPIRE 3600
+#define EFX_MAX_INT_ERRORS 5
+
+/* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
+ */
+#define EFX_FLUSH_INTERVAL 10
+#define EFX_FLUSH_POLL_COUNT 100
+
+/* Size and alignment of special buffers (4KB) */
+#define EFX_BUF_SIZE 4096
+
+/* Depth of RX flush request fifo */
+#define EFX_RX_FLUSH_COUNT 4
+
+/* Generated event code for efx_generate_test_event() */
+#define EFX_CHANNEL_MAGIC_TEST(_channel) \
+ (0x00010100 + (_channel)->channel)
+
+/* Generated event code for efx_generate_fill_event() */
+#define EFX_CHANNEL_MAGIC_FILL(_channel) \
+ (0x00010200 + (_channel)->channel)
+
+/**************************************************************************
+ *
+ * Solarstorm hardware access
+ *
+ **************************************************************************/
+
+static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
+ unsigned int index)
+{
+ efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
+ value, index);
+}
+
+/* Read the current event from the event queue */
+static inline efx_qword_t *efx_event(struct efx_channel *channel,
+ unsigned int index)
+{
+ return ((efx_qword_t *) (channel->eventq.addr)) +
+ (index & channel->eventq_mask);
+}
+
+/* See if an event is present
+ *
+ * We check both the high and low dword of the event for all ones. We
+ * wrote all ones when we cleared the event, and no valid event can
+ * have all ones in either its high or low dwords. This approach is
+ * robust against reordering.
+ *
+ * Note that using a single 64-bit comparison is incorrect; even
+ * though the CPU read will be atomic, the DMA write may not be.
+ */
+static inline int efx_event_present(efx_qword_t *event)
+{
+ return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
+ EFX_DWORD_IS_ALL_ONES(event->dword[1]));
+}
+
+static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
+ const efx_oword_t *mask)
+{
+ return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
+ ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
+}
+
+int efx_nic_test_registers(struct efx_nic *efx,
+ const struct efx_nic_register_test *regs,
+ size_t n_regs)
+{
+ unsigned address = 0, i, j;
+ efx_oword_t mask, imask, original, reg, buf;
+
+ /* Falcon should be in loopback to isolate the XMAC from the PHY */
+ WARN_ON(!LOOPBACK_INTERNAL(efx));
+
+ for (i = 0; i < n_regs; ++i) {
+ address = regs[i].address;
+ mask = imask = regs[i].mask;
+ EFX_INVERT_OWORD(imask);
+
+ efx_reado(efx, &original, address);
+
+ /* bit sweep on and off */
+ for (j = 0; j < 128; j++) {
+ if (!EFX_EXTRACT_OWORD32(mask, j, j))
+ continue;
+
+ /* Test this testable bit can be set in isolation */
+ EFX_AND_OWORD(reg, original, mask);
+ EFX_SET_OWORD32(reg, j, j, 1);
+
+ efx_writeo(efx, ®, address);
+ efx_reado(efx, &buf, address);
+
+ if (efx_masked_compare_oword(®, &buf, &mask))
+ goto fail;
+
+ /* Test this testable bit can be cleared in isolation */
+ EFX_OR_OWORD(reg, original, mask);
+ EFX_SET_OWORD32(reg, j, j, 0);
+
+ efx_writeo(efx, ®, address);
+ efx_reado(efx, &buf, address);
+
+ if (efx_masked_compare_oword(®, &buf, &mask))
+ goto fail;
+ }
+
+ efx_writeo(efx, &original, address);
+ }
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev,
+ "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
+ " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
+ EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
+ return -EIO;
+}
+
+/**************************************************************************
+ *
+ * Special buffer handling
+ * Special buffers are used for event queues and the TX and RX
+ * descriptor rings.
+ *
+ *************************************************************************/
+
+/*
+ * Initialise a special buffer
+ *
+ * This will define a buffer (previously allocated via
+ * efx_alloc_special_buffer()) in the buffer table, allowing
+ * it to be used for event queues, descriptor rings etc.
+ */
+static void
+efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
+{
+ efx_qword_t buf_desc;
+ int index;
+ dma_addr_t dma_addr;
+ int i;
+
+ EFX_BUG_ON_PARANOID(!buffer->addr);
+
+ /* Write buffer descriptors to NIC */
+ for (i = 0; i < buffer->entries; i++) {
+ index = buffer->index + i;
+ dma_addr = buffer->dma_addr + (i * 4096);
+ netif_dbg(efx, probe, efx->net_dev,
+ "mapping special buffer %d at %llx\n",
+ index, (unsigned long long)dma_addr);
+ EFX_POPULATE_QWORD_3(buf_desc,
+ FRF_AZ_BUF_ADR_REGION, 0,
+ FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
+ FRF_AZ_BUF_OWNER_ID_FBUF, 0);
+ efx_write_buf_tbl(efx, &buf_desc, index);
+ }
+}
+
+/* Unmaps a buffer and clears the buffer table entries */
+static void
+efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
+{
+ efx_oword_t buf_tbl_upd;
+ unsigned int start = buffer->index;
+ unsigned int end = (buffer->index + buffer->entries - 1);
+
+ if (!buffer->entries)
+ return;
+
+ netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
+ buffer->index, buffer->index + buffer->entries - 1);
+
+ EFX_POPULATE_OWORD_4(buf_tbl_upd,
+ FRF_AZ_BUF_UPD_CMD, 0,
+ FRF_AZ_BUF_CLR_CMD, 1,
+ FRF_AZ_BUF_CLR_END_ID, end,
+ FRF_AZ_BUF_CLR_START_ID, start);
+ efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
+}
+
+/*
+ * Allocate a new special buffer
+ *
+ * This allocates memory for a new buffer, clears it and allocates a
+ * new buffer ID range. It does not write into the buffer table.
+ *
+ * This call will allocate 4KB buffers, since 8KB buffers can't be
+ * used for event queues and descriptor rings.
+ */
+static int efx_alloc_special_buffer(struct efx_nic *efx,
+ struct efx_special_buffer *buffer,
+ unsigned int len)
+{
+ len = ALIGN(len, EFX_BUF_SIZE);
+
+ buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
+ &buffer->dma_addr, GFP_KERNEL);
+ if (!buffer->addr)
+ return -ENOMEM;
+ buffer->len = len;
+ buffer->entries = len / EFX_BUF_SIZE;
+ BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1));
+
+ /* All zeros is a potentially valid event so memset to 0xff */
+ memset(buffer->addr, 0xff, len);
+
+ /* Select new buffer ID */
+ buffer->index = efx->next_buffer_table;
+ efx->next_buffer_table += buffer->entries;
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "allocating special buffers %d-%d at %llx+%x "
+ "(virt %p phys %llx)\n", buffer->index,
+ buffer->index + buffer->entries - 1,
+ (u64)buffer->dma_addr, len,
+ buffer->addr, (u64)virt_to_phys(buffer->addr));
+
+ return 0;
+}
+
+static void
+efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
+{
+ if (!buffer->addr)
+ return;
+
+ netif_dbg(efx, hw, efx->net_dev,
+ "deallocating special buffers %d-%d at %llx+%x "
+ "(virt %p phys %llx)\n", buffer->index,
+ buffer->index + buffer->entries - 1,
+ (u64)buffer->dma_addr, buffer->len,
+ buffer->addr, (u64)virt_to_phys(buffer->addr));
+
+ dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr,
+ buffer->dma_addr);
+ buffer->addr = NULL;
+ buffer->entries = 0;
+}
+
+/**************************************************************************
+ *
+ * Generic buffer handling
+ * These buffers are used for interrupt status and MAC stats
+ *
+ **************************************************************************/
+
+int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
+ unsigned int len)
+{
+ buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
+ &buffer->dma_addr);
+ if (!buffer->addr)
+ return -ENOMEM;
+ buffer->len = len;
+ memset(buffer->addr, 0, len);
+ return 0;
+}
+
+void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
+{
+ if (buffer->addr) {
+ pci_free_consistent(efx->pci_dev, buffer->len,
+ buffer->addr, buffer->dma_addr);
+ buffer->addr = NULL;
+ }
+}
+
+/**************************************************************************
+ *
+ * TX path
+ *
+ **************************************************************************/
+
+/* Returns a pointer to the specified transmit descriptor in the TX
+ * descriptor queue belonging to the specified channel.
+ */
+static inline efx_qword_t *
+efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
+{
+ return ((efx_qword_t *) (tx_queue->txd.addr)) + index;
+}
+
+/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
+static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
+{
+ unsigned write_ptr;
+ efx_dword_t reg;
+
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
+ efx_writed_page(tx_queue->efx, ®,
+ FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
+}
+
+/* Write pointer and first descriptor for TX descriptor ring */
+static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue,
+ const efx_qword_t *txd)
+{
+ unsigned write_ptr;
+ efx_oword_t reg;
+
+ BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
+ BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
+
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
+ FRF_AZ_TX_DESC_WPTR, write_ptr);
+ reg.qword[0] = *txd;
+ efx_writeo_page(tx_queue->efx, ®,
+ FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
+}
+
+static inline bool
+efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
+{
+ unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
+
+ if (empty_read_count == 0)
+ return false;
+
+ tx_queue->empty_read_count = 0;
+ return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
+}
+
+/* For each entry inserted into the software descriptor ring, create a
+ * descriptor in the hardware TX descriptor ring (in host memory), and
+ * write a doorbell.
+ */
+void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
+{
+
+ struct efx_tx_buffer *buffer;
+ efx_qword_t *txd;
+ unsigned write_ptr;
+ unsigned old_write_count = tx_queue->write_count;
+
+ BUG_ON(tx_queue->write_count == tx_queue->insert_count);
+
+ do {
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ buffer = &tx_queue->buffer[write_ptr];
+ txd = efx_tx_desc(tx_queue, write_ptr);
+ ++tx_queue->write_count;
+
+ /* Create TX descriptor ring entry */
+ EFX_POPULATE_QWORD_4(*txd,
+ FSF_AZ_TX_KER_CONT, buffer->continuation,
+ FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
+ FSF_AZ_TX_KER_BUF_REGION, 0,
+ FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
+ } while (tx_queue->write_count != tx_queue->insert_count);
+
+ wmb(); /* Ensure descriptors are written before they are fetched */
+
+ if (efx_may_push_tx_desc(tx_queue, old_write_count)) {
+ txd = efx_tx_desc(tx_queue,
+ old_write_count & tx_queue->ptr_mask);
+ efx_push_tx_desc(tx_queue, txd);
+ ++tx_queue->pushes;
+ } else {
+ efx_notify_tx_desc(tx_queue);
+ }
+}
+
+/* Allocate hardware resources for a TX queue */
+int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ unsigned entries;
+
+ entries = tx_queue->ptr_mask + 1;
+ return efx_alloc_special_buffer(efx, &tx_queue->txd,
+ entries * sizeof(efx_qword_t));
+}
+
+void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ efx_oword_t reg;
+
+ tx_queue->flushed = FLUSH_NONE;
+
+ /* Pin TX descriptor ring */
+ efx_init_special_buffer(efx, &tx_queue->txd);
+
+ /* Push TX descriptor ring to card */
+ EFX_POPULATE_OWORD_10(reg,
+ FRF_AZ_TX_DESCQ_EN, 1,
+ FRF_AZ_TX_ISCSI_DDIG_EN, 0,
+ FRF_AZ_TX_ISCSI_HDIG_EN, 0,
+ FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
+ FRF_AZ_TX_DESCQ_EVQ_ID,
+ tx_queue->channel->channel,
+ FRF_AZ_TX_DESCQ_OWNER_ID, 0,
+ FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
+ FRF_AZ_TX_DESCQ_SIZE,
+ __ffs(tx_queue->txd.entries),
+ FRF_AZ_TX_DESCQ_TYPE, 0,
+ FRF_BZ_TX_NON_IP_DROP_DIS, 1);
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
+ !csum);
+ }
+
+ efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base,
+ tx_queue->queue);
+
+ if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
+ /* Only 128 bits in this register */
+ BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
+
+ efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG);
+ if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
+ clear_bit_le(tx_queue->queue, (void *)®);
+ else
+ set_bit_le(tx_queue->queue, (void *)®);
+ efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG);
+ }
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ EFX_POPULATE_OWORD_1(reg,
+ FRF_BZ_TX_PACE,
+ (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
+ FFE_BZ_TX_PACE_OFF :
+ FFE_BZ_TX_PACE_RESERVED);
+ efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL,
+ tx_queue->queue);
+ }
+}
+
+static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ efx_oword_t tx_flush_descq;
+
+ tx_queue->flushed = FLUSH_PENDING;
+
+ /* Post a flush command */
+ EFX_POPULATE_OWORD_2(tx_flush_descq,
+ FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
+ FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
+ efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
+}
+
+void efx_nic_fini_tx(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ efx_oword_t tx_desc_ptr;
+
+ /* The queue should have been flushed */
+ WARN_ON(tx_queue->flushed != FLUSH_DONE);
+
+ /* Remove TX descriptor ring from card */
+ EFX_ZERO_OWORD(tx_desc_ptr);
+ efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
+ tx_queue->queue);
+
+ /* Unpin TX descriptor ring */
+ efx_fini_special_buffer(efx, &tx_queue->txd);
+}
+
+/* Free buffers backing TX queue */
+void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
+{
+ efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
+}
+
+/**************************************************************************
+ *
+ * RX path
+ *
+ **************************************************************************/
+
+/* Returns a pointer to the specified descriptor in the RX descriptor queue */
+static inline efx_qword_t *
+efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
+{
+ return ((efx_qword_t *) (rx_queue->rxd.addr)) + index;
+}
+
+/* This creates an entry in the RX descriptor queue */
+static inline void
+efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
+{
+ struct efx_rx_buffer *rx_buf;
+ efx_qword_t *rxd;
+
+ rxd = efx_rx_desc(rx_queue, index);
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ EFX_POPULATE_QWORD_3(*rxd,
+ FSF_AZ_RX_KER_BUF_SIZE,
+ rx_buf->len -
+ rx_queue->efx->type->rx_buffer_padding,
+ FSF_AZ_RX_KER_BUF_REGION, 0,
+ FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
+}
+
+/* This writes to the RX_DESC_WPTR register for the specified receive
+ * descriptor ring.
+ */
+void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ efx_dword_t reg;
+ unsigned write_ptr;
+
+ while (rx_queue->notified_count != rx_queue->added_count) {
+ efx_build_rx_desc(
+ rx_queue,
+ rx_queue->notified_count & rx_queue->ptr_mask);
+ ++rx_queue->notified_count;
+ }
+
+ wmb();
+ write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
+ EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
+ efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0,
+ efx_rx_queue_index(rx_queue));
+}
+
+int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned entries;
+
+ entries = rx_queue->ptr_mask + 1;
+ return efx_alloc_special_buffer(efx, &rx_queue->rxd,
+ entries * sizeof(efx_qword_t));
+}
+
+void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
+{
+ efx_oword_t rx_desc_ptr;
+ struct efx_nic *efx = rx_queue->efx;
+ bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
+ bool iscsi_digest_en = is_b0;
+
+ netif_dbg(efx, hw, efx->net_dev,
+ "RX queue %d ring in special buffers %d-%d\n",
+ efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
+ rx_queue->rxd.index + rx_queue->rxd.entries - 1);
+
+ rx_queue->flushed = FLUSH_NONE;
+
+ /* Pin RX descriptor ring */
+ efx_init_special_buffer(efx, &rx_queue->rxd);
+
+ /* Push RX descriptor ring to card */
+ EFX_POPULATE_OWORD_10(rx_desc_ptr,
+ FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
+ FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
+ FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
+ FRF_AZ_RX_DESCQ_EVQ_ID,
+ efx_rx_queue_channel(rx_queue)->channel,
+ FRF_AZ_RX_DESCQ_OWNER_ID, 0,
+ FRF_AZ_RX_DESCQ_LABEL,
+ efx_rx_queue_index(rx_queue),
+ FRF_AZ_RX_DESCQ_SIZE,
+ __ffs(rx_queue->rxd.entries),
+ FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
+ /* For >=B0 this is scatter so disable */
+ FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
+ FRF_AZ_RX_DESCQ_EN, 1);
+ efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
+ efx_rx_queue_index(rx_queue));
+}
+
+static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ efx_oword_t rx_flush_descq;
+
+ rx_queue->flushed = FLUSH_PENDING;
+
+ /* Post a flush command */
+ EFX_POPULATE_OWORD_2(rx_flush_descq,
+ FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
+ FRF_AZ_RX_FLUSH_DESCQ,
+ efx_rx_queue_index(rx_queue));
+ efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
+}
+
+void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
+{
+ efx_oword_t rx_desc_ptr;
+ struct efx_nic *efx = rx_queue->efx;
+
+ /* The queue should already have been flushed */
+ WARN_ON(rx_queue->flushed != FLUSH_DONE);
+
+ /* Remove RX descriptor ring from card */
+ EFX_ZERO_OWORD(rx_desc_ptr);
+ efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
+ efx_rx_queue_index(rx_queue));
+
+ /* Unpin RX descriptor ring */
+ efx_fini_special_buffer(efx, &rx_queue->rxd);
+}
+
+/* Free buffers backing RX queue */
+void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
+{
+ efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
+}
+
+/**************************************************************************
+ *
+ * Event queue processing
+ * Event queues are processed by per-channel tasklets.
+ *
+ **************************************************************************/
+
+/* Update a channel's event queue's read pointer (RPTR) register
+ *
+ * This writes the EVQ_RPTR_REG register for the specified channel's
+ * event queue.
+ */
+void efx_nic_eventq_read_ack(struct efx_channel *channel)
+{
+ efx_dword_t reg;
+ struct efx_nic *efx = channel->efx;
+
+ EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
+ channel->eventq_read_ptr & channel->eventq_mask);
+ efx_writed_table(efx, ®, efx->type->evq_rptr_tbl_base,
+ channel->channel);
+}
+
+/* Use HW to insert a SW defined event */
+static void efx_generate_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ efx_oword_t drv_ev_reg;
+
+ BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
+ FRF_AZ_DRV_EV_DATA_WIDTH != 64);
+ drv_ev_reg.u32[0] = event->u32[0];
+ drv_ev_reg.u32[1] = event->u32[1];
+ drv_ev_reg.u32[2] = 0;
+ drv_ev_reg.u32[3] = 0;
+ EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel);
+ efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV);
+}
+
+/* Handle a transmit completion event
+ *
+ * The NIC batches TX completion events; the message we receive is of
+ * the form "complete all TX events up to this index".
+ */
+static int
+efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ unsigned int tx_ev_desc_ptr;
+ unsigned int tx_ev_q_label;
+ struct efx_tx_queue *tx_queue;
+ struct efx_nic *efx = channel->efx;
+ int tx_packets = 0;
+
+ if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
+ /* Transmit completion */
+ tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
+ tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
+ tx_queue = efx_channel_get_tx_queue(
+ channel, tx_ev_q_label % EFX_TXQ_TYPES);
+ tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
+ tx_queue->ptr_mask);
+ channel->irq_mod_score += tx_packets;
+ efx_xmit_done(tx_queue, tx_ev_desc_ptr);
+ } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
+ /* Rewrite the FIFO write pointer */
+ tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
+ tx_queue = efx_channel_get_tx_queue(
+ channel, tx_ev_q_label % EFX_TXQ_TYPES);
+
+ if (efx_dev_registered(efx))
+ netif_tx_lock(efx->net_dev);
+ efx_notify_tx_desc(tx_queue);
+ if (efx_dev_registered(efx))
+ netif_tx_unlock(efx->net_dev);
+ } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
+ EFX_WORKAROUND_10727(efx)) {
+ efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
+ } else {
+ netif_err(efx, tx_err, efx->net_dev,
+ "channel %d unexpected TX event "
+ EFX_QWORD_FMT"\n", channel->channel,
+ EFX_QWORD_VAL(*event));
+ }
+
+ return tx_packets;
+}
+
+/* Detect errors included in the rx_evt_pkt_ok bit. */
+static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
+ const efx_qword_t *event,
+ bool *rx_ev_pkt_ok,
+ bool *discard)
+{
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+ struct efx_nic *efx = rx_queue->efx;
+ bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
+ bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
+ bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
+ bool rx_ev_other_err, rx_ev_pause_frm;
+ bool rx_ev_hdr_type, rx_ev_mcast_pkt;
+ unsigned rx_ev_pkt_type;
+
+ rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
+ rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
+ rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
+ rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
+ rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
+ FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
+ rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
+ FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
+ rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
+ FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
+ rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
+ rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
+ rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
+ 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
+ rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
+
+ /* Every error apart from tobe_disc and pause_frm */
+ rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
+ rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
+ rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
+
+ /* Count errors that are not in MAC stats. Ignore expected
+ * checksum errors during self-test. */
+ if (rx_ev_frm_trunc)
+ ++channel->n_rx_frm_trunc;
+ else if (rx_ev_tobe_disc)
+ ++channel->n_rx_tobe_disc;
+ else if (!efx->loopback_selftest) {
+ if (rx_ev_ip_hdr_chksum_err)
+ ++channel->n_rx_ip_hdr_chksum_err;
+ else if (rx_ev_tcp_udp_chksum_err)
+ ++channel->n_rx_tcp_udp_chksum_err;
+ }
+
+ /* The frame must be discarded if any of these are true. */
+ *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
+ rx_ev_tobe_disc | rx_ev_pause_frm);
+
+ /* TOBE_DISC is expected on unicast mismatches; don't print out an
+ * error message. FRM_TRUNC indicates RXDP dropped the packet due
+ * to a FIFO overflow.
+ */
+#ifdef EFX_ENABLE_DEBUG
+ if (rx_ev_other_err && net_ratelimit()) {
+ netif_dbg(efx, rx_err, efx->net_dev,
+ " RX queue %d unexpected RX event "
+ EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
+ efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
+ rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
+ rx_ev_ip_hdr_chksum_err ?
+ " [IP_HDR_CHKSUM_ERR]" : "",
+ rx_ev_tcp_udp_chksum_err ?
+ " [TCP_UDP_CHKSUM_ERR]" : "",
+ rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
+ rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
+ rx_ev_drib_nib ? " [DRIB_NIB]" : "",
+ rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
+ rx_ev_pause_frm ? " [PAUSE]" : "");
+ }
+#endif
+}
+
+/* Handle receive events that are not in-order. */
+static void
+efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned expected, dropped;
+
+ expected = rx_queue->removed_count & rx_queue->ptr_mask;
+ dropped = (index - expected) & rx_queue->ptr_mask;
+ netif_info(efx, rx_err, efx->net_dev,
+ "dropped %d events (index=%d expected=%d)\n",
+ dropped, index, expected);
+
+ efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
+ RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
+}
+
+/* Handle a packet received event
+ *
+ * The NIC gives a "discard" flag if it's a unicast packet with the
+ * wrong destination address
+ * Also "is multicast" and "matches multicast filter" flags can be used to
+ * discard non-matching multicast packets.
+ */
+static void
+efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
+{
+ unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
+ unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
+ unsigned expected_ptr;
+ bool rx_ev_pkt_ok, discard = false, checksummed;
+ struct efx_rx_queue *rx_queue;
+
+ /* Basic packet information */
+ rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
+ rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
+ rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
+ WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
+ WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
+ WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
+ channel->channel);
+
+ rx_queue = efx_channel_get_rx_queue(channel);
+
+ rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
+ expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
+ if (unlikely(rx_ev_desc_ptr != expected_ptr))
+ efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
+
+ if (likely(rx_ev_pkt_ok)) {
+ /* If packet is marked as OK and packet type is TCP/IP or
+ * UDP/IP, then we can rely on the hardware checksum.
+ */
+ checksummed =
+ rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
+ rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP;
+ } else {
+ efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard);
+ checksummed = false;
+ }
+
+ /* Detect multicast packets that didn't match the filter */
+ rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
+ if (rx_ev_mcast_pkt) {
+ unsigned int rx_ev_mcast_hash_match =
+ EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
+
+ if (unlikely(!rx_ev_mcast_hash_match)) {
+ ++channel->n_rx_mcast_mismatch;
+ discard = true;
+ }
+ }
+
+ channel->irq_mod_score += 2;
+
+ /* Handle received packet */
+ efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
+ checksummed, discard);
+}
+
+static void
+efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned code;
+
+ code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
+ if (code == EFX_CHANNEL_MAGIC_TEST(channel))
+ ; /* ignore */
+ else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
+ /* The queue must be empty, so we won't receive any rx
+ * events, so efx_process_channel() won't refill the
+ * queue. Refill it here */
+ efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel));
+ else
+ netif_dbg(efx, hw, efx->net_dev, "channel %d received "
+ "generated event "EFX_QWORD_FMT"\n",
+ channel->channel, EFX_QWORD_VAL(*event));
+}
+
+static void
+efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned int ev_sub_code;
+ unsigned int ev_sub_data;
+
+ ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
+ ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
+
+ switch (ev_sub_code) {
+ case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
+ netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
+ channel->channel, ev_sub_data);
+ break;
+ case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
+ netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
+ channel->channel, ev_sub_data);
+ break;
+ case FSE_AZ_EVQ_INIT_DONE_EV:
+ netif_dbg(efx, hw, efx->net_dev,
+ "channel %d EVQ %d initialised\n",
+ channel->channel, ev_sub_data);
+ break;
+ case FSE_AZ_SRM_UPD_DONE_EV:
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d SRAM update done\n", channel->channel);
+ break;
+ case FSE_AZ_WAKE_UP_EV:
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d RXQ %d wakeup event\n",
+ channel->channel, ev_sub_data);
+ break;
+ case FSE_AZ_TIMER_EV:
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d RX queue %d timer expired\n",
+ channel->channel, ev_sub_data);
+ break;
+ case FSE_AA_RX_RECOVER_EV:
+ netif_err(efx, rx_err, efx->net_dev,
+ "channel %d seen DRIVER RX_RESET event. "
+ "Resetting.\n", channel->channel);
+ atomic_inc(&efx->rx_reset);
+ efx_schedule_reset(efx,
+ EFX_WORKAROUND_6555(efx) ?
+ RESET_TYPE_RX_RECOVERY :
+ RESET_TYPE_DISABLE);
+ break;
+ case FSE_BZ_RX_DSC_ERROR_EV:
+ netif_err(efx, rx_err, efx->net_dev,
+ "RX DMA Q %d reports descriptor fetch error."
+ " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
+ efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
+ break;
+ case FSE_BZ_TX_DSC_ERROR_EV:
+ netif_err(efx, tx_err, efx->net_dev,
+ "TX DMA Q %d reports descriptor fetch error."
+ " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
+ efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
+ break;
+ default:
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d unknown driver event code %d "
+ "data %04x\n", channel->channel, ev_sub_code,
+ ev_sub_data);
+ break;
+ }
+}
+
+int efx_nic_process_eventq(struct efx_channel *channel, int budget)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned int read_ptr;
+ efx_qword_t event, *p_event;
+ int ev_code;
+ int tx_packets = 0;
+ int spent = 0;
+
+ read_ptr = channel->eventq_read_ptr;
+
+ for (;;) {
+ p_event = efx_event(channel, read_ptr);
+ event = *p_event;
+
+ if (!efx_event_present(&event))
+ /* End of events */
+ break;
+
+ netif_vdbg(channel->efx, intr, channel->efx->net_dev,
+ "channel %d event is "EFX_QWORD_FMT"\n",
+ channel->channel, EFX_QWORD_VAL(event));
+
+ /* Clear this event by marking it all ones */
+ EFX_SET_QWORD(*p_event);
+
+ ++read_ptr;
+
+ ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
+
+ switch (ev_code) {
+ case FSE_AZ_EV_CODE_RX_EV:
+ efx_handle_rx_event(channel, &event);
+ if (++spent == budget)
+ goto out;
+ break;
+ case FSE_AZ_EV_CODE_TX_EV:
+ tx_packets += efx_handle_tx_event(channel, &event);
+ if (tx_packets > efx->txq_entries) {
+ spent = budget;
+ goto out;
+ }
+ break;
+ case FSE_AZ_EV_CODE_DRV_GEN_EV:
+ efx_handle_generated_event(channel, &event);
+ break;
+ case FSE_AZ_EV_CODE_DRIVER_EV:
+ efx_handle_driver_event(channel, &event);
+ break;
+ case FSE_CZ_EV_CODE_MCDI_EV:
+ efx_mcdi_process_event(channel, &event);
+ break;
+ case FSE_AZ_EV_CODE_GLOBAL_EV:
+ if (efx->type->handle_global_event &&
+ efx->type->handle_global_event(channel, &event))
+ break;
+ /* else fall through */
+ default:
+ netif_err(channel->efx, hw, channel->efx->net_dev,
+ "channel %d unknown event type %d (data "
+ EFX_QWORD_FMT ")\n", channel->channel,
+ ev_code, EFX_QWORD_VAL(event));
+ }
+ }
+
+out:
+ channel->eventq_read_ptr = read_ptr;
+ return spent;
+}
+
+/* Check whether an event is present in the eventq at the current
+ * read pointer. Only useful for self-test.
+ */
+bool efx_nic_event_present(struct efx_channel *channel)
+{
+ return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
+}
+
+/* Allocate buffer table entries for event queue */
+int efx_nic_probe_eventq(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned entries;
+
+ entries = channel->eventq_mask + 1;
+ return efx_alloc_special_buffer(efx, &channel->eventq,
+ entries * sizeof(efx_qword_t));
+}
+
+void efx_nic_init_eventq(struct efx_channel *channel)
+{
+ efx_oword_t reg;
+ struct efx_nic *efx = channel->efx;
+
+ netif_dbg(efx, hw, efx->net_dev,
+ "channel %d event queue in special buffers %d-%d\n",
+ channel->channel, channel->eventq.index,
+ channel->eventq.index + channel->eventq.entries - 1);
+
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
+ EFX_POPULATE_OWORD_3(reg,
+ FRF_CZ_TIMER_Q_EN, 1,
+ FRF_CZ_HOST_NOTIFY_MODE, 0,
+ FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
+ efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel);
+ }
+
+ /* Pin event queue buffer */
+ efx_init_special_buffer(efx, &channel->eventq);
+
+ /* Fill event queue with all ones (i.e. empty events) */
+ memset(channel->eventq.addr, 0xff, channel->eventq.len);
+
+ /* Push event queue to card */
+ EFX_POPULATE_OWORD_3(reg,
+ FRF_AZ_EVQ_EN, 1,
+ FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
+ FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
+ efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base,
+ channel->channel);
+
+ efx->type->push_irq_moderation(channel);
+}
+
+void efx_nic_fini_eventq(struct efx_channel *channel)
+{
+ efx_oword_t reg;
+ struct efx_nic *efx = channel->efx;
+
+ /* Remove event queue from card */
+ EFX_ZERO_OWORD(reg);
+ efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base,
+ channel->channel);
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
+ efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel);
+
+ /* Unpin event queue */
+ efx_fini_special_buffer(efx, &channel->eventq);
+}
+
+/* Free buffers backing event queue */
+void efx_nic_remove_eventq(struct efx_channel *channel)
+{
+ efx_free_special_buffer(channel->efx, &channel->eventq);
+}
+
+
+void efx_nic_generate_test_event(struct efx_channel *channel)
+{
+ unsigned int magic = EFX_CHANNEL_MAGIC_TEST(channel);
+ efx_qword_t test_event;
+
+ EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
+ FSE_AZ_EV_CODE_DRV_GEN_EV,
+ FSF_AZ_DRV_GEN_EV_MAGIC, magic);
+ efx_generate_event(channel, &test_event);
+}
+
+void efx_nic_generate_fill_event(struct efx_channel *channel)
+{
+ unsigned int magic = EFX_CHANNEL_MAGIC_FILL(channel);
+ efx_qword_t test_event;
+
+ EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
+ FSE_AZ_EV_CODE_DRV_GEN_EV,
+ FSF_AZ_DRV_GEN_EV_MAGIC, magic);
+ efx_generate_event(channel, &test_event);
+}
+
+/**************************************************************************
+ *
+ * Flush handling
+ *
+ **************************************************************************/
+
+
+static void efx_poll_flush_events(struct efx_nic *efx)
+{
+ struct efx_channel *channel = efx_get_channel(efx, 0);
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ unsigned int read_ptr = channel->eventq_read_ptr;
+ unsigned int end_ptr = read_ptr + channel->eventq_mask - 1;
+
+ do {
+ efx_qword_t *event = efx_event(channel, read_ptr);
+ int ev_code, ev_sub_code, ev_queue;
+ bool ev_failed;
+
+ if (!efx_event_present(event))
+ break;
+
+ ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE);
+ ev_sub_code = EFX_QWORD_FIELD(*event,
+ FSF_AZ_DRIVER_EV_SUBCODE);
+ if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
+ ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) {
+ ev_queue = EFX_QWORD_FIELD(*event,
+ FSF_AZ_DRIVER_EV_SUBDATA);
+ if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) {
+ tx_queue = efx_get_tx_queue(
+ efx, ev_queue / EFX_TXQ_TYPES,
+ ev_queue % EFX_TXQ_TYPES);
+ tx_queue->flushed = FLUSH_DONE;
+ }
+ } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
+ ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) {
+ ev_queue = EFX_QWORD_FIELD(
+ *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
+ ev_failed = EFX_QWORD_FIELD(
+ *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
+ if (ev_queue < efx->n_rx_channels) {
+ rx_queue = efx_get_rx_queue(efx, ev_queue);
+ rx_queue->flushed =
+ ev_failed ? FLUSH_FAILED : FLUSH_DONE;
+ }
+ }
+
+ /* We're about to destroy the queue anyway, so
+ * it's ok to throw away every non-flush event */
+ EFX_SET_QWORD(*event);
+
+ ++read_ptr;
+ } while (read_ptr != end_ptr);
+
+ channel->eventq_read_ptr = read_ptr;
+}
+
+/* Handle tx and rx flushes at the same time, since they run in
+ * parallel in the hardware and there's no reason for us to
+ * serialise them */
+int efx_nic_flush_queues(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ struct efx_rx_queue *rx_queue;
+ struct efx_tx_queue *tx_queue;
+ int i, tx_pending, rx_pending;
+
+ /* If necessary prepare the hardware for flushing */
+ efx->type->prepare_flush(efx);
+
+ /* Flush all tx queues in parallel */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
+ if (tx_queue->initialised)
+ efx_flush_tx_queue(tx_queue);
+ }
+ }
+
+ /* The hardware supports four concurrent rx flushes, each of which may
+ * need to be retried if there is an outstanding descriptor fetch */
+ for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) {
+ rx_pending = tx_pending = 0;
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ if (rx_queue->flushed == FLUSH_PENDING)
+ ++rx_pending;
+ }
+ }
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ if (rx_pending == EFX_RX_FLUSH_COUNT)
+ break;
+ if (rx_queue->flushed == FLUSH_FAILED ||
+ rx_queue->flushed == FLUSH_NONE) {
+ efx_flush_rx_queue(rx_queue);
+ ++rx_pending;
+ }
+ }
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
+ if (tx_queue->initialised &&
+ tx_queue->flushed != FLUSH_DONE)
+ ++tx_pending;
+ }
+ }
+
+ if (rx_pending == 0 && tx_pending == 0)
+ return 0;
+
+ msleep(EFX_FLUSH_INTERVAL);
+ efx_poll_flush_events(efx);
+ }
+
+ /* Mark the queues as all flushed. We're going to return failure
+ * leading to a reset, or fake up success anyway */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
+ if (tx_queue->initialised &&
+ tx_queue->flushed != FLUSH_DONE)
+ netif_err(efx, hw, efx->net_dev,
+ "tx queue %d flush command timed out\n",
+ tx_queue->queue);
+ tx_queue->flushed = FLUSH_DONE;
+ }
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ if (rx_queue->flushed != FLUSH_DONE)
+ netif_err(efx, hw, efx->net_dev,
+ "rx queue %d flush command timed out\n",
+ efx_rx_queue_index(rx_queue));
+ rx_queue->flushed = FLUSH_DONE;
+ }
+ }
+
+ return -ETIMEDOUT;
+}
+
+/**************************************************************************
+ *
+ * Hardware interrupts
+ * The hardware interrupt handler does very little work; all the event
+ * queue processing is carried out by per-channel tasklets.
+ *
+ **************************************************************************/
+
+/* Enable/disable/generate interrupts */
+static inline void efx_nic_interrupts(struct efx_nic *efx,
+ bool enabled, bool force)
+{
+ efx_oword_t int_en_reg_ker;
+
+ EFX_POPULATE_OWORD_3(int_en_reg_ker,
+ FRF_AZ_KER_INT_LEVE_SEL, efx->fatal_irq_level,
+ FRF_AZ_KER_INT_KER, force,
+ FRF_AZ_DRV_INT_EN_KER, enabled);
+ efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
+}
+
+void efx_nic_enable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
+ wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
+
+ /* Enable interrupts */
+ efx_nic_interrupts(efx, true, false);
+
+ /* Force processing of all the channels to get the EVQ RPTRs up to
+ date */
+ efx_for_each_channel(channel, efx)
+ efx_schedule_channel(channel);
+}
+
+void efx_nic_disable_interrupts(struct efx_nic *efx)
+{
+ /* Disable interrupts */
+ efx_nic_interrupts(efx, false, false);
+}
+
+/* Generate a test interrupt
+ * Interrupt must already have been enabled, otherwise nasty things
+ * may happen.
+ */
+void efx_nic_generate_interrupt(struct efx_nic *efx)
+{
+ efx_nic_interrupts(efx, true, true);
+}
+
+/* Process a fatal interrupt
+ * Disable bus mastering ASAP and schedule a reset
+ */
+irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ efx_oword_t *int_ker = efx->irq_status.addr;
+ efx_oword_t fatal_intr;
+ int error, mem_perr;
+
+ efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
+ error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
+
+ netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
+ EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
+ EFX_OWORD_VAL(fatal_intr),
+ error ? "disabling bus mastering" : "no recognised error");
+
+ /* If this is a memory parity error dump which blocks are offending */
+ mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
+ EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
+ if (mem_perr) {
+ efx_oword_t reg;
+ efx_reado(efx, ®, FR_AZ_MEM_STAT);
+ netif_err(efx, hw, efx->net_dev,
+ "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
+ EFX_OWORD_VAL(reg));
+ }
+
+ /* Disable both devices */
+ pci_clear_master(efx->pci_dev);
+ if (efx_nic_is_dual_func(efx))
+ pci_clear_master(nic_data->pci_dev2);
+ efx_nic_disable_interrupts(efx);
+
+ /* Count errors and reset or disable the NIC accordingly */
+ if (efx->int_error_count == 0 ||
+ time_after(jiffies, efx->int_error_expire)) {
+ efx->int_error_count = 0;
+ efx->int_error_expire =
+ jiffies + EFX_INT_ERROR_EXPIRE * HZ;
+ }
+ if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
+ netif_err(efx, hw, efx->net_dev,
+ "SYSTEM ERROR - reset scheduled\n");
+ efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
+ } else {
+ netif_err(efx, hw, efx->net_dev,
+ "SYSTEM ERROR - max number of errors seen."
+ "NIC will be disabled\n");
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Handle a legacy interrupt
+ * Acknowledges the interrupt and schedule event queue processing.
+ */
+static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
+{
+ struct efx_nic *efx = dev_id;
+ efx_oword_t *int_ker = efx->irq_status.addr;
+ irqreturn_t result = IRQ_NONE;
+ struct efx_channel *channel;
+ efx_dword_t reg;
+ u32 queues;
+ int syserr;
+
+ /* Could this be ours? If interrupts are disabled then the
+ * channel state may not be valid.
+ */
+ if (!efx->legacy_irq_enabled)
+ return result;
+
+ /* Read the ISR which also ACKs the interrupts */
+ efx_readd(efx, ®, FR_BZ_INT_ISR0);
+ queues = EFX_EXTRACT_DWORD(reg, 0, 31);
+
+ /* Check to see if we have a serious error condition */
+ if (queues & (1U << efx->fatal_irq_level)) {
+ syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+ if (unlikely(syserr))
+ return efx_nic_fatal_interrupt(efx);
+ }
+
+ if (queues != 0) {
+ if (EFX_WORKAROUND_15783(efx))
+ efx->irq_zero_count = 0;
+
+ /* Schedule processing of any interrupting queues */
+ efx_for_each_channel(channel, efx) {
+ if (queues & 1)
+ efx_schedule_channel(channel);
+ queues >>= 1;
+ }
+ result = IRQ_HANDLED;
+
+ } else if (EFX_WORKAROUND_15783(efx)) {
+ efx_qword_t *event;
+
+ /* We can't return IRQ_HANDLED more than once on seeing ISR=0
+ * because this might be a shared interrupt. */
+ if (efx->irq_zero_count++ == 0)
+ result = IRQ_HANDLED;
+
+ /* Ensure we schedule or rearm all event queues */
+ efx_for_each_channel(channel, efx) {
+ event = efx_event(channel, channel->eventq_read_ptr);
+ if (efx_event_present(event))
+ efx_schedule_channel(channel);
+ else
+ efx_nic_eventq_read_ack(channel);
+ }
+ }
+
+ if (result == IRQ_HANDLED) {
+ efx->last_irq_cpu = raw_smp_processor_id();
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
+ irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
+ }
+
+ return result;
+}
+
+/* Handle an MSI interrupt
+ *
+ * Handle an MSI hardware interrupt. This routine schedules event
+ * queue processing. No interrupt acknowledgement cycle is necessary.
+ * Also, we never need to check that the interrupt is for us, since
+ * MSI interrupts cannot be shared.
+ */
+static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
+{
+ struct efx_channel *channel = *(struct efx_channel **)dev_id;
+ struct efx_nic *efx = channel->efx;
+ efx_oword_t *int_ker = efx->irq_status.addr;
+ int syserr;
+
+ efx->last_irq_cpu = raw_smp_processor_id();
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
+ irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
+
+ /* Check to see if we have a serious error condition */
+ if (channel->channel == efx->fatal_irq_level) {
+ syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+ if (unlikely(syserr))
+ return efx_nic_fatal_interrupt(efx);
+ }
+
+ /* Schedule processing of the channel */
+ efx_schedule_channel(channel);
+
+ return IRQ_HANDLED;
+}
+
+
+/* Setup RSS indirection table.
+ * This maps from the hash value of the packet to RXQ
+ */
+void efx_nic_push_rx_indir_table(struct efx_nic *efx)
+{
+ size_t i = 0;
+ efx_dword_t dword;
+
+ if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
+ return;
+
+ BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
+ FR_BZ_RX_INDIRECTION_TBL_ROWS);
+
+ for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
+ EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
+ efx->rx_indir_table[i]);
+ efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i);
+ }
+}
+
+/* Hook interrupt handler(s)
+ * Try MSI and then legacy interrupts.
+ */
+int efx_nic_init_interrupt(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ int rc;
+
+ if (!EFX_INT_MODE_USE_MSI(efx)) {
+ irq_handler_t handler;
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
+ handler = efx_legacy_interrupt;
+ else
+ handler = falcon_legacy_interrupt_a1;
+
+ rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
+ efx->name, efx);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "failed to hook legacy IRQ %d\n",
+ efx->pci_dev->irq);
+ goto fail1;
+ }
+ return 0;
+ }
+
+ /* Hook MSI or MSI-X interrupt */
+ efx_for_each_channel(channel, efx) {
+ rc = request_irq(channel->irq, efx_msi_interrupt,
+ IRQF_PROBE_SHARED, /* Not shared */
+ efx->channel_name[channel->channel],
+ &efx->channel[channel->channel]);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "failed to hook IRQ %d\n", channel->irq);
+ goto fail2;
+ }
+ }
+
+ return 0;
+
+ fail2:
+ efx_for_each_channel(channel, efx)
+ free_irq(channel->irq, &efx->channel[channel->channel]);
+ fail1:
+ return rc;
+}
+
+void efx_nic_fini_interrupt(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ efx_oword_t reg;
+
+ /* Disable MSI/MSI-X interrupts */
+ efx_for_each_channel(channel, efx) {
+ if (channel->irq)
+ free_irq(channel->irq, &efx->channel[channel->channel]);
+ }
+
+ /* ACK legacy interrupt */
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
+ efx_reado(efx, ®, FR_BZ_INT_ISR0);
+ else
+ falcon_irq_ack_a1(efx);
+
+ /* Disable legacy interrupt */
+ if (efx->legacy_irq)
+ free_irq(efx->legacy_irq, efx);
+}
+
+u32 efx_nic_fpga_ver(struct efx_nic *efx)
+{
+ efx_oword_t altera_build;
+ efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
+ return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
+}
+
+void efx_nic_init_common(struct efx_nic *efx)
+{
+ efx_oword_t temp;
+
+ /* Set positions of descriptor caches in SRAM. */
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR,
+ efx->type->tx_dc_base / 8);
+ efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR,
+ efx->type->rx_dc_base / 8);
+ efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
+
+ /* Set TX descriptor cache size. */
+ BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
+ efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
+
+ /* Set RX descriptor cache size. Set low watermark to size-8, as
+ * this allows most efficient prefetching.
+ */
+ BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
+ efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
+ efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
+
+ /* Program INT_KER address */
+ EFX_POPULATE_OWORD_2(temp,
+ FRF_AZ_NORM_INT_VEC_DIS_KER,
+ EFX_INT_MODE_USE_MSI(efx),
+ FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
+ efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
+
+ if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
+ /* Use an interrupt level unused by event queues */
+ efx->fatal_irq_level = 0x1f;
+ else
+ /* Use a valid MSI-X vector */
+ efx->fatal_irq_level = 0;
+
+ /* Enable all the genuinely fatal interrupts. (They are still
+ * masked by the overall interrupt mask, controlled by
+ * falcon_interrupts()).
+ *
+ * Note: All other fatal interrupts are enabled
+ */
+ EFX_POPULATE_OWORD_3(temp,
+ FRF_AZ_ILL_ADR_INT_KER_EN, 1,
+ FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
+ FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
+ EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
+ EFX_INVERT_OWORD(temp);
+ efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
+
+ efx_nic_push_rx_indir_table(efx);
+
+ /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
+ * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
+ */
+ efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
+ /* Enable SW_EV to inherit in char driver - assume harmless here */
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
+ /* Prefetch threshold 2 => fetch when descriptor cache half empty */
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
+ /* Disable hardware watchdog which can misfire */
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
+ /* Squash TX of packets of 16 bytes or less */
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
+ efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ EFX_POPULATE_OWORD_4(temp,
+ /* Default values */
+ FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
+ FRF_BZ_TX_PACE_SB_AF, 0xb,
+ FRF_BZ_TX_PACE_FB_BASE, 0,
+ /* Allow large pace values in the
+ * fast bin. */
+ FRF_BZ_TX_PACE_BIN_TH,
+ FFE_BZ_TX_PACE_RESERVED);
+ efx_writeo(efx, &temp, FR_BZ_TX_PACE);
+ }
+}
+
+/* Register dump */
+
+#define REGISTER_REVISION_A 1
+#define REGISTER_REVISION_B 2
+#define REGISTER_REVISION_C 3
+#define REGISTER_REVISION_Z 3 /* latest revision */
+
+struct efx_nic_reg {
+ u32 offset:24;
+ u32 min_revision:2, max_revision:2;
+};
+
+#define REGISTER(name, min_rev, max_rev) { \
+ FR_ ## min_rev ## max_rev ## _ ## name, \
+ REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \
+}
+#define REGISTER_AA(name) REGISTER(name, A, A)
+#define REGISTER_AB(name) REGISTER(name, A, B)
+#define REGISTER_AZ(name) REGISTER(name, A, Z)
+#define REGISTER_BB(name) REGISTER(name, B, B)
+#define REGISTER_BZ(name) REGISTER(name, B, Z)
+#define REGISTER_CZ(name) REGISTER(name, C, Z)
+
+static const struct efx_nic_reg efx_nic_regs[] = {
+ REGISTER_AZ(ADR_REGION),
+ REGISTER_AZ(INT_EN_KER),
+ REGISTER_BZ(INT_EN_CHAR),
+ REGISTER_AZ(INT_ADR_KER),
+ REGISTER_BZ(INT_ADR_CHAR),
+ /* INT_ACK_KER is WO */
+ /* INT_ISR0 is RC */
+ REGISTER_AZ(HW_INIT),
+ REGISTER_CZ(USR_EV_CFG),
+ REGISTER_AB(EE_SPI_HCMD),
+ REGISTER_AB(EE_SPI_HADR),
+ REGISTER_AB(EE_SPI_HDATA),
+ REGISTER_AB(EE_BASE_PAGE),
+ REGISTER_AB(EE_VPD_CFG0),
+ /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
+ /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
+ /* PCIE_CORE_INDIRECT is indirect */
+ REGISTER_AB(NIC_STAT),
+ REGISTER_AB(GPIO_CTL),
+ REGISTER_AB(GLB_CTL),
+ /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
+ REGISTER_BZ(DP_CTRL),
+ REGISTER_AZ(MEM_STAT),
+ REGISTER_AZ(CS_DEBUG),
+ REGISTER_AZ(ALTERA_BUILD),
+ REGISTER_AZ(CSR_SPARE),
+ REGISTER_AB(PCIE_SD_CTL0123),
+ REGISTER_AB(PCIE_SD_CTL45),
+ REGISTER_AB(PCIE_PCS_CTL_STAT),
+ /* DEBUG_DATA_OUT is not used */
+ /* DRV_EV is WO */
+ REGISTER_AZ(EVQ_CTL),
+ REGISTER_AZ(EVQ_CNT1),
+ REGISTER_AZ(EVQ_CNT2),
+ REGISTER_AZ(BUF_TBL_CFG),
+ REGISTER_AZ(SRM_RX_DC_CFG),
+ REGISTER_AZ(SRM_TX_DC_CFG),
+ REGISTER_AZ(SRM_CFG),
+ /* BUF_TBL_UPD is WO */
+ REGISTER_AZ(SRM_UPD_EVQ),
+ REGISTER_AZ(SRAM_PARITY),
+ REGISTER_AZ(RX_CFG),
+ REGISTER_BZ(RX_FILTER_CTL),
+ /* RX_FLUSH_DESCQ is WO */
+ REGISTER_AZ(RX_DC_CFG),
+ REGISTER_AZ(RX_DC_PF_WM),
+ REGISTER_BZ(RX_RSS_TKEY),
+ /* RX_NODESC_DROP is RC */
+ REGISTER_AA(RX_SELF_RST),
+ /* RX_DEBUG, RX_PUSH_DROP are not used */
+ REGISTER_CZ(RX_RSS_IPV6_REG1),
+ REGISTER_CZ(RX_RSS_IPV6_REG2),
+ REGISTER_CZ(RX_RSS_IPV6_REG3),
+ /* TX_FLUSH_DESCQ is WO */
+ REGISTER_AZ(TX_DC_CFG),
+ REGISTER_AA(TX_CHKSM_CFG),
+ REGISTER_AZ(TX_CFG),
+ /* TX_PUSH_DROP is not used */
+ REGISTER_AZ(TX_RESERVED),
+ REGISTER_BZ(TX_PACE),
+ /* TX_PACE_DROP_QID is RC */
+ REGISTER_BB(TX_VLAN),
+ REGISTER_BZ(TX_IPFIL_PORTEN),
+ REGISTER_AB(MD_TXD),
+ REGISTER_AB(MD_RXD),
+ REGISTER_AB(MD_CS),
+ REGISTER_AB(MD_PHY_ADR),
+ REGISTER_AB(MD_ID),
+ /* MD_STAT is RC */
+ REGISTER_AB(MAC_STAT_DMA),
+ REGISTER_AB(MAC_CTRL),
+ REGISTER_BB(GEN_MODE),
+ REGISTER_AB(MAC_MC_HASH_REG0),
+ REGISTER_AB(MAC_MC_HASH_REG1),
+ REGISTER_AB(GM_CFG1),
+ REGISTER_AB(GM_CFG2),
+ /* GM_IPG and GM_HD are not used */
+ REGISTER_AB(GM_MAX_FLEN),
+ /* GM_TEST is not used */
+ REGISTER_AB(GM_ADR1),
+ REGISTER_AB(GM_ADR2),
+ REGISTER_AB(GMF_CFG0),
+ REGISTER_AB(GMF_CFG1),
+ REGISTER_AB(GMF_CFG2),
+ REGISTER_AB(GMF_CFG3),
+ REGISTER_AB(GMF_CFG4),
+ REGISTER_AB(GMF_CFG5),
+ REGISTER_BB(TX_SRC_MAC_CTL),
+ REGISTER_AB(XM_ADR_LO),
+ REGISTER_AB(XM_ADR_HI),
+ REGISTER_AB(XM_GLB_CFG),
+ REGISTER_AB(XM_TX_CFG),
+ REGISTER_AB(XM_RX_CFG),
+ REGISTER_AB(XM_MGT_INT_MASK),
+ REGISTER_AB(XM_FC),
+ REGISTER_AB(XM_PAUSE_TIME),
+ REGISTER_AB(XM_TX_PARAM),
+ REGISTER_AB(XM_RX_PARAM),
+ /* XM_MGT_INT_MSK (note no 'A') is RC */
+ REGISTER_AB(XX_PWR_RST),
+ REGISTER_AB(XX_SD_CTL),
+ REGISTER_AB(XX_TXDRV_CTL),
+ /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
+ /* XX_CORE_STAT is partly RC */
+};
+
+struct efx_nic_reg_table {
+ u32 offset:24;
+ u32 min_revision:2, max_revision:2;
+ u32 step:6, rows:21;
+};
+
+#define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \
+ offset, \
+ REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \
+ step, rows \
+}
+#define REGISTER_TABLE(name, min_rev, max_rev) \
+ REGISTER_TABLE_DIMENSIONS( \
+ name, FR_ ## min_rev ## max_rev ## _ ## name, \
+ min_rev, max_rev, \
+ FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
+ FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
+#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A)
+#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z)
+#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B)
+#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z)
+#define REGISTER_TABLE_BB_CZ(name) \
+ REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \
+ FR_BZ_ ## name ## _STEP, \
+ FR_BB_ ## name ## _ROWS), \
+ REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \
+ FR_BZ_ ## name ## _STEP, \
+ FR_CZ_ ## name ## _ROWS)
+#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z)
+
+static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
+ /* DRIVER is not used */
+ /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
+ REGISTER_TABLE_BB(TX_IPFIL_TBL),
+ REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
+ REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
+ REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
+ REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
+ REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
+ REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
+ REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
+ /* We can't reasonably read all of the buffer table (up to 8MB!).
+ * However this driver will only use a few entries. Reading
+ * 1K entries allows for some expansion of queue count and
+ * size before we need to change the version. */
+ REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
+ A, A, 8, 1024),
+ REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
+ B, Z, 8, 1024),
+ REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
+ REGISTER_TABLE_BB_CZ(TIMER_TBL),
+ REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
+ REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
+ /* TX_FILTER_TBL0 is huge and not used by this driver */
+ REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
+ REGISTER_TABLE_CZ(MC_TREG_SMEM),
+ /* MSIX_PBA_TABLE is not mapped */
+ /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
+ REGISTER_TABLE_BZ(RX_FILTER_TBL0),
+};
+
+size_t efx_nic_get_regs_len(struct efx_nic *efx)
+{
+ const struct efx_nic_reg *reg;
+ const struct efx_nic_reg_table *table;
+ size_t len = 0;
+
+ for (reg = efx_nic_regs;
+ reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
+ reg++)
+ if (efx->type->revision >= reg->min_revision &&
+ efx->type->revision <= reg->max_revision)
+ len += sizeof(efx_oword_t);
+
+ for (table = efx_nic_reg_tables;
+ table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
+ table++)
+ if (efx->type->revision >= table->min_revision &&
+ efx->type->revision <= table->max_revision)
+ len += table->rows * min_t(size_t, table->step, 16);
+
+ return len;
+}
+
+void efx_nic_get_regs(struct efx_nic *efx, void *buf)
+{
+ const struct efx_nic_reg *reg;
+ const struct efx_nic_reg_table *table;
+
+ for (reg = efx_nic_regs;
+ reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
+ reg++) {
+ if (efx->type->revision >= reg->min_revision &&
+ efx->type->revision <= reg->max_revision) {
+ efx_reado(efx, (efx_oword_t *)buf, reg->offset);
+ buf += sizeof(efx_oword_t);
+ }
+ }
+
+ for (table = efx_nic_reg_tables;
+ table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
+ table++) {
+ size_t size, i;
+
+ if (!(efx->type->revision >= table->min_revision &&
+ efx->type->revision <= table->max_revision))
+ continue;
+
+ size = min_t(size_t, table->step, 16);
+
+ for (i = 0; i < table->rows; i++) {
+ switch (table->step) {
+ case 4: /* 32-bit register or SRAM */
+ efx_readd_table(efx, buf, table->offset, i);
+ break;
+ case 8: /* 64-bit SRAM */
+ efx_sram_readq(efx,
+ efx->membase + table->offset,
+ buf, i);
+ break;
+ case 16: /* 128-bit register */
+ efx_reado_table(efx, buf, table->offset, i);
+ break;
+ case 32: /* 128-bit register, interleaved */
+ efx_reado_table(efx, buf, table->offset, 2 * i);
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+ buf += size;
+ }
+ }
+}
--- /dev/null
- * @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable.
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_NIC_H
+#define EFX_NIC_H
+
+#include <linux/i2c-algo-bit.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "mcdi.h"
+#include "spi.h"
+
+/*
+ * Falcon hardware control
+ */
+
+enum {
+ EFX_REV_FALCON_A0 = 0,
+ EFX_REV_FALCON_A1 = 1,
+ EFX_REV_FALCON_B0 = 2,
+ EFX_REV_SIENA_A0 = 3,
+};
+
+static inline int efx_nic_rev(struct efx_nic *efx)
+{
+ return efx->type->revision;
+}
+
+extern u32 efx_nic_fpga_ver(struct efx_nic *efx);
+
+static inline bool efx_nic_has_mc(struct efx_nic *efx)
+{
+ return efx_nic_rev(efx) >= EFX_REV_SIENA_A0;
+}
+/* NIC has two interlinked PCI functions for the same port. */
+static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
+{
+ return efx_nic_rev(efx) < EFX_REV_FALCON_B0;
+}
+
+enum {
+ PHY_TYPE_NONE = 0,
+ PHY_TYPE_TXC43128 = 1,
+ PHY_TYPE_88E1111 = 2,
+ PHY_TYPE_SFX7101 = 3,
+ PHY_TYPE_QT2022C2 = 4,
+ PHY_TYPE_PM8358 = 6,
+ PHY_TYPE_SFT9001A = 8,
+ PHY_TYPE_QT2025C = 9,
+ PHY_TYPE_SFT9001B = 10,
+};
+
+#define FALCON_XMAC_LOOPBACKS \
+ ((1 << LOOPBACK_XGMII) | \
+ (1 << LOOPBACK_XGXS) | \
+ (1 << LOOPBACK_XAUI))
+
+#define FALCON_GMAC_LOOPBACKS \
+ (1 << LOOPBACK_GMAC)
+
+/**
+ * struct falcon_board_type - board operations and type information
+ * @id: Board type id, as found in NVRAM
+ * @ref_model: Model number of Solarflare reference design
+ * @gen_type: Generic board type description
+ * @init: Allocate resources and initialise peripheral hardware
+ * @init_phy: Do board-specific PHY initialisation
+ * @fini: Shut down hardware and free resources
+ * @set_id_led: Set state of identifying LED or revert to automatic function
+ * @monitor: Board-specific health check function
+ */
+struct falcon_board_type {
+ u8 id;
+ const char *ref_model;
+ const char *gen_type;
+ int (*init) (struct efx_nic *nic);
+ void (*init_phy) (struct efx_nic *efx);
+ void (*fini) (struct efx_nic *nic);
+ void (*set_id_led) (struct efx_nic *efx, enum efx_led_mode mode);
+ int (*monitor) (struct efx_nic *nic);
+};
+
+/**
+ * struct falcon_board - board information
+ * @type: Type of board
+ * @major: Major rev. ('A', 'B' ...)
+ * @minor: Minor rev. (0, 1, ...)
+ * @i2c_adap: I2C adapter for on-board peripherals
+ * @i2c_data: Data for bit-banging algorithm
+ * @hwmon_client: I2C client for hardware monitor
+ * @ioexp_client: I2C client for power/port control
+ */
+struct falcon_board {
+ const struct falcon_board_type *type;
+ int major;
+ int minor;
+ struct i2c_adapter i2c_adap;
+ struct i2c_algo_bit_data i2c_data;
+ struct i2c_client *hwmon_client, *ioexp_client;
+};
+
+/**
+ * struct falcon_nic_data - Falcon NIC state
+ * @pci_dev2: Secondary function of Falcon A
+ * @board: Board state and functions
+ * @stats_disable_count: Nest count for disabling statistics fetches
+ * @stats_pending: Is there a pending DMA of MAC statistics.
+ * @stats_timer: A timer for regularly fetching MAC statistics.
+ * @stats_dma_done: Pointer to the flag which indicates DMA completion.
+ * @spi_flash: SPI flash device
+ * @spi_eeprom: SPI EEPROM device
+ * @spi_lock: SPI bus lock
+ * @mdio_lock: MDIO bus lock
+ * @xmac_poll_required: XMAC link state needs polling
+ */
+struct falcon_nic_data {
+ struct pci_dev *pci_dev2;
+ struct falcon_board board;
+ unsigned int stats_disable_count;
+ bool stats_pending;
+ struct timer_list stats_timer;
+ u32 *stats_dma_done;
+ struct efx_spi_device spi_flash;
+ struct efx_spi_device spi_eeprom;
+ struct mutex spi_lock;
+ struct mutex mdio_lock;
+ bool xmac_poll_required;
+};
+
+static inline struct falcon_board *falcon_board(struct efx_nic *efx)
+{
+ struct falcon_nic_data *data = efx->nic_data;
+ return &data->board;
+}
+
+/**
+ * struct siena_nic_data - Siena NIC state
+ * @mcdi: Management-Controller-to-Driver Interface
- void __iomem *mcdi_smem;
+ * @wol_filter_id: Wake-on-LAN packet filter id
+ */
+struct siena_nic_data {
+ struct efx_mcdi_iface mcdi;
+ int wol_filter_id;
+};
+
+extern const struct efx_nic_type falcon_a1_nic_type;
+extern const struct efx_nic_type falcon_b0_nic_type;
+extern const struct efx_nic_type siena_a0_nic_type;
+
+/**************************************************************************
+ *
+ * Externs
+ *
+ **************************************************************************
+ */
+
+extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
+
+/* TX data path */
+extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue);
+extern void efx_nic_init_tx(struct efx_tx_queue *tx_queue);
+extern void efx_nic_fini_tx(struct efx_tx_queue *tx_queue);
+extern void efx_nic_remove_tx(struct efx_tx_queue *tx_queue);
+extern void efx_nic_push_buffers(struct efx_tx_queue *tx_queue);
+
+/* RX data path */
+extern int efx_nic_probe_rx(struct efx_rx_queue *rx_queue);
+extern void efx_nic_init_rx(struct efx_rx_queue *rx_queue);
+extern void efx_nic_fini_rx(struct efx_rx_queue *rx_queue);
+extern void efx_nic_remove_rx(struct efx_rx_queue *rx_queue);
+extern void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue);
+
+/* Event data path */
+extern int efx_nic_probe_eventq(struct efx_channel *channel);
+extern void efx_nic_init_eventq(struct efx_channel *channel);
+extern void efx_nic_fini_eventq(struct efx_channel *channel);
+extern void efx_nic_remove_eventq(struct efx_channel *channel);
+extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota);
+extern void efx_nic_eventq_read_ack(struct efx_channel *channel);
+extern bool efx_nic_event_present(struct efx_channel *channel);
+
+/* MAC/PHY */
+extern void falcon_drain_tx_fifo(struct efx_nic *efx);
+extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
+
+/* Interrupts and test events */
+extern int efx_nic_init_interrupt(struct efx_nic *efx);
+extern void efx_nic_enable_interrupts(struct efx_nic *efx);
+extern void efx_nic_generate_test_event(struct efx_channel *channel);
+extern void efx_nic_generate_fill_event(struct efx_channel *channel);
+extern void efx_nic_generate_interrupt(struct efx_nic *efx);
+extern void efx_nic_disable_interrupts(struct efx_nic *efx);
+extern void efx_nic_fini_interrupt(struct efx_nic *efx);
+extern irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx);
+extern irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id);
+extern void falcon_irq_ack_a1(struct efx_nic *efx);
+
+#define EFX_IRQ_MOD_RESOLUTION 5
+#define EFX_IRQ_MOD_MAX 0x1000
+
+/* Global Resources */
+extern int efx_nic_flush_queues(struct efx_nic *efx);
+extern void falcon_start_nic_stats(struct efx_nic *efx);
+extern void falcon_stop_nic_stats(struct efx_nic *efx);
+extern void falcon_setup_xaui(struct efx_nic *efx);
+extern int falcon_reset_xaui(struct efx_nic *efx);
+extern void efx_nic_init_common(struct efx_nic *efx);
+extern void efx_nic_push_rx_indir_table(struct efx_nic *efx);
+
+int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
+ unsigned int len);
+void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer);
+
+/* Tests */
+struct efx_nic_register_test {
+ unsigned address;
+ efx_oword_t mask;
+};
+extern int efx_nic_test_registers(struct efx_nic *efx,
+ const struct efx_nic_register_test *regs,
+ size_t n_regs);
+
+extern size_t efx_nic_get_regs_len(struct efx_nic *efx);
+extern void efx_nic_get_regs(struct efx_nic *efx, void *buf);
+
+/**************************************************************************
+ *
+ * Falcon MAC stats
+ *
+ **************************************************************************
+ */
+
+#define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset)
+#define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH)
+
+/* Retrieve statistic from statistics block */
+#define FALCON_STAT(efx, falcon_stat, efx_stat) do { \
+ if (FALCON_STAT_WIDTH(falcon_stat) == 16) \
+ (efx)->mac_stats.efx_stat += le16_to_cpu( \
+ *((__force __le16 *) \
+ (efx->stats_buffer.addr + \
+ FALCON_STAT_OFFSET(falcon_stat)))); \
+ else if (FALCON_STAT_WIDTH(falcon_stat) == 32) \
+ (efx)->mac_stats.efx_stat += le32_to_cpu( \
+ *((__force __le32 *) \
+ (efx->stats_buffer.addr + \
+ FALCON_STAT_OFFSET(falcon_stat)))); \
+ else \
+ (efx)->mac_stats.efx_stat += le64_to_cpu( \
+ *((__force __le64 *) \
+ (efx->stats_buffer.addr + \
+ FALCON_STAT_OFFSET(falcon_stat)))); \
+ } while (0)
+
+#define FALCON_MAC_STATS_SIZE 0x100
+
+#define MAC_DATA_LBN 0
+#define MAC_DATA_WIDTH 32
+
+extern void efx_nic_generate_event(struct efx_channel *channel,
+ efx_qword_t *event);
+
+extern void falcon_poll_xmac(struct efx_nic *efx);
+
+#endif /* EFX_NIC_H */
--- /dev/null
- /* Initialise MCDI */
- nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys +
- FR_CZ_MC_TREG_SMEM,
- FR_CZ_MC_TREG_SMEM_STEP *
- FR_CZ_MC_TREG_SMEM_ROWS);
- if (!nic_data->mcdi_smem) {
- netif_err(efx, probe, efx->net_dev,
- "could not map MCDI at %llx+%x\n",
- (unsigned long long)efx->membase_phys +
- FR_CZ_MC_TREG_SMEM,
- FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS);
- rc = -ENOMEM;
- goto fail1;
- }
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2010 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include "net_driver.h"
+#include "bitfield.h"
+#include "efx.h"
+#include "nic.h"
+#include "mac.h"
+#include "spi.h"
+#include "regs.h"
+#include "io.h"
+#include "phy.h"
+#include "workarounds.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+
+/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */
+
+static void siena_init_wol(struct efx_nic *efx);
+
+
+static void siena_push_irq_moderation(struct efx_channel *channel)
+{
+ efx_dword_t timer_cmd;
+
+ BUILD_BUG_ON(EFX_IRQ_MOD_MAX > (1 << FRF_CZ_TC_TIMER_VAL_WIDTH));
+
+ if (channel->irq_moderation)
+ EFX_POPULATE_DWORD_2(timer_cmd,
+ FRF_CZ_TC_TIMER_MODE,
+ FFE_CZ_TIMER_MODE_INT_HLDOFF,
+ FRF_CZ_TC_TIMER_VAL,
+ channel->irq_moderation - 1);
+ else
+ EFX_POPULATE_DWORD_2(timer_cmd,
+ FRF_CZ_TC_TIMER_MODE,
+ FFE_CZ_TIMER_MODE_DIS,
+ FRF_CZ_TC_TIMER_VAL, 0);
+ efx_writed_page_locked(channel->efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
+ channel->channel);
+}
+
+static void siena_push_multicast_hash(struct efx_nic *efx)
+{
+ WARN_ON(!mutex_is_locked(&efx->mac_lock));
+
+ efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH,
+ efx->multicast_hash.byte, sizeof(efx->multicast_hash),
+ NULL, 0, NULL);
+}
+
+static int siena_mdio_write(struct net_device *net_dev,
+ int prtad, int devad, u16 addr, u16 value)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ uint32_t status;
+ int rc;
+
+ rc = efx_mcdi_mdio_write(efx, efx->mdio_bus, prtad, devad,
+ addr, value, &status);
+ if (rc)
+ return rc;
+ if (status != MC_CMD_MDIO_STATUS_GOOD)
+ return -EIO;
+
+ return 0;
+}
+
+static int siena_mdio_read(struct net_device *net_dev,
+ int prtad, int devad, u16 addr)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ uint16_t value;
+ uint32_t status;
+ int rc;
+
+ rc = efx_mcdi_mdio_read(efx, efx->mdio_bus, prtad, devad,
+ addr, &value, &status);
+ if (rc)
+ return rc;
+ if (status != MC_CMD_MDIO_STATUS_GOOD)
+ return -EIO;
+
+ return (int)value;
+}
+
+/* This call is responsible for hooking in the MAC and PHY operations */
+static int siena_probe_port(struct efx_nic *efx)
+{
+ int rc;
+
+ /* Hook in PHY operations table */
+ efx->phy_op = &efx_mcdi_phy_ops;
+
+ /* Set up MDIO structure for PHY */
+ efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+ efx->mdio.mdio_read = siena_mdio_read;
+ efx->mdio.mdio_write = siena_mdio_write;
+
+ /* Fill out MDIO structure, loopback modes, and initial link state */
+ rc = efx->phy_op->probe(efx);
+ if (rc != 0)
+ return rc;
+
+ /* Allocate buffer for stats */
+ rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
+ MC_CMD_MAC_NSTATS * sizeof(u64));
+ if (rc)
+ return rc;
+ netif_dbg(efx, probe, efx->net_dev,
+ "stats buffer at %llx (virt %p phys %llx)\n",
+ (u64)efx->stats_buffer.dma_addr,
+ efx->stats_buffer.addr,
+ (u64)virt_to_phys(efx->stats_buffer.addr));
+
+ efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1);
+
+ return 0;
+}
+
+static void siena_remove_port(struct efx_nic *efx)
+{
+ efx->phy_op->remove(efx);
+ efx_nic_free_buffer(efx, &efx->stats_buffer);
+}
+
+static const struct efx_nic_register_test siena_register_tests[] = {
+ { FR_AZ_ADR_REGION,
+ EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
+ { FR_CZ_USR_EV_CFG,
+ EFX_OWORD32(0x000103FF, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AZ_RX_CFG,
+ EFX_OWORD32(0xFFFFFFFE, 0xFFFFFFFF, 0x0003FFFF, 0x00000000) },
+ { FR_AZ_TX_CFG,
+ EFX_OWORD32(0x7FFF0037, 0xFFFF8000, 0xFFFFFFFF, 0x03FFFFFF) },
+ { FR_AZ_TX_RESERVED,
+ EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
+ { FR_AZ_SRM_TX_DC_CFG,
+ EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AZ_RX_DC_CFG,
+ EFX_OWORD32(0x00000003, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AZ_RX_DC_PF_WM,
+ EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_BZ_DP_CTRL,
+ EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_BZ_RX_RSS_TKEY,
+ EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) },
+ { FR_CZ_RX_RSS_IPV6_REG1,
+ EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) },
+ { FR_CZ_RX_RSS_IPV6_REG2,
+ EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) },
+ { FR_CZ_RX_RSS_IPV6_REG3,
+ EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000) },
+};
+
+static int siena_test_registers(struct efx_nic *efx)
+{
+ return efx_nic_test_registers(efx, siena_register_tests,
+ ARRAY_SIZE(siena_register_tests));
+}
+
+/**************************************************************************
+ *
+ * Device reset
+ *
+ **************************************************************************
+ */
+
+static enum reset_type siena_map_reset_reason(enum reset_type reason)
+{
+ return RESET_TYPE_ALL;
+}
+
+static int siena_map_reset_flags(u32 *flags)
+{
+ enum {
+ SIENA_RESET_PORT = (ETH_RESET_DMA | ETH_RESET_FILTER |
+ ETH_RESET_OFFLOAD | ETH_RESET_MAC |
+ ETH_RESET_PHY),
+ SIENA_RESET_MC = (SIENA_RESET_PORT |
+ ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT),
+ };
+
+ if ((*flags & SIENA_RESET_MC) == SIENA_RESET_MC) {
+ *flags &= ~SIENA_RESET_MC;
+ return RESET_TYPE_WORLD;
+ }
+
+ if ((*flags & SIENA_RESET_PORT) == SIENA_RESET_PORT) {
+ *flags &= ~SIENA_RESET_PORT;
+ return RESET_TYPE_ALL;
+ }
+
+ /* no invisible reset implemented */
+
+ return -EINVAL;
+}
+
+static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
+{
+ int rc;
+
+ /* Recover from a failed assertion pre-reset */
+ rc = efx_mcdi_handle_assertion(efx);
+ if (rc)
+ return rc;
+
+ if (method == RESET_TYPE_WORLD)
+ return efx_mcdi_reset_mc(efx);
+ else
+ return efx_mcdi_reset_port(efx);
+}
+
+static int siena_probe_nvconfig(struct efx_nic *efx)
+{
+ return efx_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL);
+}
+
+static int siena_probe_nic(struct efx_nic *efx)
+{
+ struct siena_nic_data *nic_data;
+ bool already_attached = 0;
+ efx_oword_t reg;
+ int rc;
+
+ /* Allocate storage for hardware specific data */
+ nic_data = kzalloc(sizeof(struct siena_nic_data), GFP_KERNEL);
+ if (!nic_data)
+ return -ENOMEM;
+ efx->nic_data = nic_data;
+
+ if (efx_nic_fpga_ver(efx) != 0) {
+ netif_err(efx, probe, efx->net_dev,
+ "Siena FPGA not supported\n");
+ rc = -ENODEV;
+ goto fail1;
+ }
+
+ efx_reado(efx, ®, FR_AZ_CS_DEBUG);
+ efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
+
- goto fail2;
+ efx_mcdi_init(efx);
+
+ /* Recover from a failed assertion before probing */
+ rc = efx_mcdi_handle_assertion(efx);
+ if (rc)
- iounmap(nic_data->mcdi_smem);
++ goto fail1;
+
+ /* Let the BMC know that the driver is now in charge of link and
+ * filter settings. We must do this before we reset the NIC */
+ rc = efx_mcdi_drv_attach(efx, true, &already_attached);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Unable to register driver with MCPU\n");
+ goto fail2;
+ }
+ if (already_attached)
+ /* Not a fatal error */
+ netif_err(efx, probe, efx->net_dev,
+ "Host already registered with MCPU\n");
+
+ /* Now we can reset the NIC */
+ rc = siena_reset_hw(efx, RESET_TYPE_ALL);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
+ goto fail3;
+ }
+
+ siena_init_wol(efx);
+
+ /* Allocate memory for INT_KER */
+ rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
+ if (rc)
+ goto fail4;
+ BUG_ON(efx->irq_status.dma_addr & 0x0f);
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "INT_KER at %llx (virt %p phys %llx)\n",
+ (unsigned long long)efx->irq_status.dma_addr,
+ efx->irq_status.addr,
+ (unsigned long long)virt_to_phys(efx->irq_status.addr));
+
+ /* Read in the non-volatile configuration */
+ rc = siena_probe_nvconfig(efx);
+ if (rc == -EINVAL) {
+ netif_err(efx, probe, efx->net_dev,
+ "NVRAM is invalid therefore using defaults\n");
+ efx->phy_type = PHY_TYPE_NONE;
+ efx->mdio.prtad = MDIO_PRTAD_NONE;
+ } else if (rc) {
+ goto fail5;
+ }
+
+ return 0;
+
+fail5:
+ efx_nic_free_buffer(efx, &efx->irq_status);
+fail4:
+fail3:
+ efx_mcdi_drv_attach(efx, false, NULL);
+fail2:
- struct siena_nic_data *nic_data = efx->nic_data;
-
+fail1:
+ kfree(efx->nic_data);
+ return rc;
+}
+
+/* This call performs hardware-specific global initialisation, such as
+ * defining the descriptor cache sizes and number of RSS channels.
+ * It does not set up any buffers, descriptor rings or event queues.
+ */
+static int siena_init_nic(struct efx_nic *efx)
+{
+ efx_oword_t temp;
+ int rc;
+
+ /* Recover from a failed assertion post-reset */
+ rc = efx_mcdi_handle_assertion(efx);
+ if (rc)
+ return rc;
+
+ /* Squash TX of packets of 16 bytes or less */
+ efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
+ efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
+
+ /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
+ * descriptors (which is bad).
+ */
+ efx_reado(efx, &temp, FR_AZ_TX_CFG);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
+ EFX_SET_OWORD_FIELD(temp, FRF_CZ_TX_FILTER_EN_BIT, 1);
+ efx_writeo(efx, &temp, FR_AZ_TX_CFG);
+
+ efx_reado(efx, &temp, FR_AZ_RX_CFG);
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_DESC_PUSH_EN, 0);
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1);
+ /* Enable hash insertion. This is broken for the 'Falcon' hash
+ * if IPv6 hashing is also enabled, so also select Toeplitz
+ * TCP/IPv4 and IPv4 hashes. */
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1);
+ efx_writeo(efx, &temp, FR_AZ_RX_CFG);
+
+ /* Set hash key for IPv4 */
+ memcpy(&temp, efx->rx_hash_key, sizeof(temp));
+ efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
+
+ /* Enable IPv6 RSS */
+ BUILD_BUG_ON(sizeof(efx->rx_hash_key) <
+ 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 ||
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0);
+ memcpy(&temp, efx->rx_hash_key, sizeof(temp));
+ efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
+ memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp));
+ efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
+ EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1,
+ FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1);
+ memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp),
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
+ efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
+
+ /* Enable event logging */
+ rc = efx_mcdi_log_ctrl(efx, true, false, 0);
+ if (rc)
+ return rc;
+
+ /* Set destination of both TX and RX Flush events */
+ EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
+ efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
+
+ EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1);
+ efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG);
+
+ efx_nic_init_common(efx);
+ return 0;
+}
+
+static void siena_remove_nic(struct efx_nic *efx)
+{
- iounmap(nic_data->mcdi_smem);
- kfree(nic_data);
+ efx_nic_free_buffer(efx, &efx->irq_status);
+
+ siena_reset_hw(efx, RESET_TYPE_ALL);
+
+ /* Relinquish the device back to the BMC */
+ if (efx_nic_has_mc(efx))
+ efx_mcdi_drv_attach(efx, false, NULL);
+
+ /* Tear down the private nic state */
- .mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */
++ kfree(efx->nic_data);
+ efx->nic_data = NULL;
+}
+
+#define STATS_GENERATION_INVALID ((__force __le64)(-1))
+
+static int siena_try_update_nic_stats(struct efx_nic *efx)
+{
+ __le64 *dma_stats;
+ struct efx_mac_stats *mac_stats;
+ __le64 generation_start, generation_end;
+
+ mac_stats = &efx->mac_stats;
+ dma_stats = efx->stats_buffer.addr;
+
+ generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+ if (generation_end == STATS_GENERATION_INVALID)
+ return 0;
+ rmb();
+
+#define MAC_STAT(M, D) \
+ mac_stats->M = le64_to_cpu(dma_stats[MC_CMD_MAC_ ## D])
+
+ MAC_STAT(tx_bytes, TX_BYTES);
+ MAC_STAT(tx_bad_bytes, TX_BAD_BYTES);
+ mac_stats->tx_good_bytes = (mac_stats->tx_bytes -
+ mac_stats->tx_bad_bytes);
+ MAC_STAT(tx_packets, TX_PKTS);
+ MAC_STAT(tx_bad, TX_BAD_FCS_PKTS);
+ MAC_STAT(tx_pause, TX_PAUSE_PKTS);
+ MAC_STAT(tx_control, TX_CONTROL_PKTS);
+ MAC_STAT(tx_unicast, TX_UNICAST_PKTS);
+ MAC_STAT(tx_multicast, TX_MULTICAST_PKTS);
+ MAC_STAT(tx_broadcast, TX_BROADCAST_PKTS);
+ MAC_STAT(tx_lt64, TX_LT64_PKTS);
+ MAC_STAT(tx_64, TX_64_PKTS);
+ MAC_STAT(tx_65_to_127, TX_65_TO_127_PKTS);
+ MAC_STAT(tx_128_to_255, TX_128_TO_255_PKTS);
+ MAC_STAT(tx_256_to_511, TX_256_TO_511_PKTS);
+ MAC_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS);
+ MAC_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS);
+ MAC_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS);
+ MAC_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS);
+ mac_stats->tx_collision = 0;
+ MAC_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS);
+ MAC_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS);
+ MAC_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS);
+ MAC_STAT(tx_deferred, TX_DEFERRED_PKTS);
+ MAC_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS);
+ mac_stats->tx_collision = (mac_stats->tx_single_collision +
+ mac_stats->tx_multiple_collision +
+ mac_stats->tx_excessive_collision +
+ mac_stats->tx_late_collision);
+ MAC_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS);
+ MAC_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS);
+ MAC_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS);
+ MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS);
+ MAC_STAT(rx_bytes, RX_BYTES);
+ MAC_STAT(rx_bad_bytes, RX_BAD_BYTES);
+ mac_stats->rx_good_bytes = (mac_stats->rx_bytes -
+ mac_stats->rx_bad_bytes);
+ MAC_STAT(rx_packets, RX_PKTS);
+ MAC_STAT(rx_good, RX_GOOD_PKTS);
+ MAC_STAT(rx_bad, RX_BAD_FCS_PKTS);
+ MAC_STAT(rx_pause, RX_PAUSE_PKTS);
+ MAC_STAT(rx_control, RX_CONTROL_PKTS);
+ MAC_STAT(rx_unicast, RX_UNICAST_PKTS);
+ MAC_STAT(rx_multicast, RX_MULTICAST_PKTS);
+ MAC_STAT(rx_broadcast, RX_BROADCAST_PKTS);
+ MAC_STAT(rx_lt64, RX_UNDERSIZE_PKTS);
+ MAC_STAT(rx_64, RX_64_PKTS);
+ MAC_STAT(rx_65_to_127, RX_65_TO_127_PKTS);
+ MAC_STAT(rx_128_to_255, RX_128_TO_255_PKTS);
+ MAC_STAT(rx_256_to_511, RX_256_TO_511_PKTS);
+ MAC_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS);
+ MAC_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS);
+ MAC_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS);
+ MAC_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS);
+ mac_stats->rx_bad_lt64 = 0;
+ mac_stats->rx_bad_64_to_15xx = 0;
+ mac_stats->rx_bad_15xx_to_jumbo = 0;
+ MAC_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS);
+ MAC_STAT(rx_overflow, RX_OVERFLOW_PKTS);
+ mac_stats->rx_missed = 0;
+ MAC_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS);
+ MAC_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS);
+ MAC_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS);
+ MAC_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS);
+ MAC_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS);
+ mac_stats->rx_good_lt64 = 0;
+
+ efx->n_rx_nodesc_drop_cnt =
+ le64_to_cpu(dma_stats[MC_CMD_MAC_RX_NODESC_DROPS]);
+
+#undef MAC_STAT
+
+ rmb();
+ generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
+ if (generation_end != generation_start)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static void siena_update_nic_stats(struct efx_nic *efx)
+{
+ int retry;
+
+ /* If we're unlucky enough to read statistics wduring the DMA, wait
+ * up to 10ms for it to finish (typically takes <500us) */
+ for (retry = 0; retry < 100; ++retry) {
+ if (siena_try_update_nic_stats(efx) == 0)
+ return;
+ udelay(100);
+ }
+
+ /* Use the old values instead */
+}
+
+static void siena_start_nic_stats(struct efx_nic *efx)
+{
+ __le64 *dma_stats = efx->stats_buffer.addr;
+
+ dma_stats[MC_CMD_MAC_GENERATION_END] = STATS_GENERATION_INVALID;
+
+ efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr,
+ MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0);
+}
+
+static void siena_stop_nic_stats(struct efx_nic *efx)
+{
+ efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0);
+}
+
+/**************************************************************************
+ *
+ * Wake on LAN
+ *
+ **************************************************************************
+ */
+
+static void siena_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
+{
+ struct siena_nic_data *nic_data = efx->nic_data;
+
+ wol->supported = WAKE_MAGIC;
+ if (nic_data->wol_filter_id != -1)
+ wol->wolopts = WAKE_MAGIC;
+ else
+ wol->wolopts = 0;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+
+static int siena_set_wol(struct efx_nic *efx, u32 type)
+{
+ struct siena_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ if (type & ~WAKE_MAGIC)
+ return -EINVAL;
+
+ if (type & WAKE_MAGIC) {
+ if (nic_data->wol_filter_id != -1)
+ efx_mcdi_wol_filter_remove(efx,
+ nic_data->wol_filter_id);
+ rc = efx_mcdi_wol_filter_set_magic(efx, efx->net_dev->dev_addr,
+ &nic_data->wol_filter_id);
+ if (rc)
+ goto fail;
+
+ pci_wake_from_d3(efx->pci_dev, true);
+ } else {
+ rc = efx_mcdi_wol_filter_reset(efx);
+ nic_data->wol_filter_id = -1;
+ pci_wake_from_d3(efx->pci_dev, false);
+ if (rc)
+ goto fail;
+ }
+
+ return 0;
+ fail:
+ netif_err(efx, hw, efx->net_dev, "%s failed: type=%d rc=%d\n",
+ __func__, type, rc);
+ return rc;
+}
+
+
+static void siena_init_wol(struct efx_nic *efx)
+{
+ struct siena_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ rc = efx_mcdi_wol_filter_get_magic(efx, &nic_data->wol_filter_id);
+
+ if (rc != 0) {
+ /* If it failed, attempt to get into a synchronised
+ * state with MC by resetting any set WoL filters */
+ efx_mcdi_wol_filter_reset(efx);
+ nic_data->wol_filter_id = -1;
+ } else if (nic_data->wol_filter_id != -1) {
+ pci_wake_from_d3(efx->pci_dev, true);
+ }
+}
+
+
+/**************************************************************************
+ *
+ * Revision-dependent attributes used by efx.c and nic.c
+ *
+ **************************************************************************
+ */
+
+const struct efx_nic_type siena_a0_nic_type = {
+ .probe = siena_probe_nic,
+ .remove = siena_remove_nic,
+ .init = siena_init_nic,
+ .fini = efx_port_dummy_op_void,
+ .monitor = NULL,
+ .map_reset_reason = siena_map_reset_reason,
+ .map_reset_flags = siena_map_reset_flags,
+ .reset = siena_reset_hw,
+ .probe_port = siena_probe_port,
+ .remove_port = siena_remove_port,
+ .prepare_flush = efx_port_dummy_op_void,
+ .update_stats = siena_update_nic_stats,
+ .start_stats = siena_start_nic_stats,
+ .stop_stats = siena_stop_nic_stats,
+ .set_id_led = efx_mcdi_set_id_led,
+ .push_irq_moderation = siena_push_irq_moderation,
+ .push_multicast_hash = siena_push_multicast_hash,
+ .reconfigure_port = efx_mcdi_phy_reconfigure,
+ .get_wol = siena_get_wol,
+ .set_wol = siena_set_wol,
+ .resume_wol = siena_init_wol,
+ .test_registers = siena_test_registers,
+ .test_nvram = efx_mcdi_nvram_test_all,
+ .default_mac_ops = &efx_mcdi_mac_operations,
+
+ .revision = EFX_REV_SIENA_A0,
++ .mem_map_size = (FR_CZ_MC_TREG_SMEM +
++ FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
+ .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
+ .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
+ .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
+ .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
+ .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
+ .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
+ .rx_buffer_hash_size = 0x10,
+ .rx_buffer_padding = 0,
+ .max_interrupt_mode = EFX_INT_MODE_MSIX,
+ .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
+ * interrupt handler only supports 32
+ * channels */
+ .tx_dc_base = 0x88000,
+ .rx_dc_base = 0x68000,
+ .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXHASH | NETIF_F_NTUPLE),
+};
--- /dev/null
- /* Write combining and sriov=enabled are incompatible */
- #define EFX_WORKAROUND_22643 EFX_WORKAROUND_SIENA
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2006-2010 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_WORKAROUNDS_H
+#define EFX_WORKAROUNDS_H
+
+/*
+ * Hardware workarounds.
+ * Bug numbers are from Solarflare's Bugzilla.
+ */
+
+#define EFX_WORKAROUND_ALWAYS(efx) 1
+#define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1)
+#define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0)
+#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0)
+#define EFX_WORKAROUND_10G(efx) 1
+
+/* XAUI resets if link not detected */
+#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
+/* RX PCIe double split performance issue */
+#define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS
+/* Bit-bashed I2C reads cause performance drop */
+#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G
+/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor
+ * or a PCIe error (bug 11028) */
+#define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS
+/* Transmit flow control may get disabled */
+#define EFX_WORKAROUND_11482 EFX_WORKAROUND_FALCON_AB
+/* Truncated IPv4 packets can confuse the TX packet parser */
+#define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB
+/* Legacy ISR read can return zero once */
+#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
+/* Legacy interrupt storm when interrupt fifo fills */
+#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
+
+/* Spurious parity errors in TSORT buffers */
+#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
+/* Unaligned read request >512 bytes after aligning may break TSORT */
+#define EFX_WORKAROUND_5391 EFX_WORKAROUND_FALCON_A
+/* iSCSI parsing errors */
+#define EFX_WORKAROUND_5583 EFX_WORKAROUND_FALCON_A
+/* RX events go missing */
+#define EFX_WORKAROUND_5676 EFX_WORKAROUND_FALCON_A
+/* RX_RESET on A1 */
+#define EFX_WORKAROUND_6555 EFX_WORKAROUND_FALCON_A
+/* Increase filter depth to avoid RX_RESET */
+#define EFX_WORKAROUND_7244 EFX_WORKAROUND_FALCON_A
+/* Flushes may never complete */
+#define EFX_WORKAROUND_7803 EFX_WORKAROUND_FALCON_AB
+/* Leak overlength packets rather than free */
+#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A
+
+#endif /* EFX_WORKAROUNDS_H */
--- /dev/null
- int ring;
+/* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
+ *
+ * Copyright (C) 2004 Sun Microsystems Inc.
+ * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ *
+ * This driver uses the sungem driver (c) David Miller
+ * (davem@redhat.com) as its basis.
+ *
+ * The cassini chip has a number of features that distinguish it from
+ * the gem chip:
+ * 4 transmit descriptor rings that are used for either QoS (VLAN) or
+ * load balancing (non-VLAN mode)
+ * batching of multiple packets
+ * multiple CPU dispatching
+ * page-based RX descriptor engine with separate completion rings
+ * Gigabit support (GMII and PCS interface)
+ * MIF link up/down detection works
+ *
+ * RX is handled by page sized buffers that are attached as fragments to
+ * the skb. here's what's done:
+ * -- driver allocates pages at a time and keeps reference counts
+ * on them.
+ * -- the upper protocol layers assume that the header is in the skb
+ * itself. as a result, cassini will copy a small amount (64 bytes)
+ * to make them happy.
+ * -- driver appends the rest of the data pages as frags to skbuffs
+ * and increments the reference count
+ * -- on page reclamation, the driver swaps the page with a spare page.
+ * if that page is still in use, it frees its reference to that page,
+ * and allocates a new page for use. otherwise, it just recycles the
+ * the page.
+ *
+ * NOTE: cassini can parse the header. however, it's not worth it
+ * as long as the network stack requires a header copy.
+ *
+ * TX has 4 queues. currently these queues are used in a round-robin
+ * fashion for load balancing. They can also be used for QoS. for that
+ * to work, however, QoS information needs to be exposed down to the driver
+ * level so that subqueues get targeted to particular transmit rings.
+ * alternatively, the queues can be configured via use of the all-purpose
+ * ioctl.
+ *
+ * RX DATA: the rx completion ring has all the info, but the rx desc
+ * ring has all of the data. RX can conceivably come in under multiple
+ * interrupts, but the INT# assignment needs to be set up properly by
+ * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do
+ * that. also, the two descriptor rings are designed to distinguish between
+ * encrypted and non-encrypted packets, but we use them for buffering
+ * instead.
+ *
+ * by default, the selective clear mask is set up to process rx packets.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/vmalloc.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/list.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/crc32.h>
+#include <linux/random.h>
+#include <linux/mii.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/mutex.h>
+#include <linux/firmware.h>
+
+#include <net/checksum.h>
+
+#include <linux/atomic.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+
+#define cas_page_map(x) kmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
+#define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
+#define CAS_NCPUS num_online_cpus()
+
+#define cas_skb_release(x) netif_rx(x)
+
+/* select which firmware to use */
+#define USE_HP_WORKAROUND
+#define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */
+#define CAS_HP_ALT_FIRMWARE cas_prog_null /* alternate firmware */
+
+#include "cassini.h"
+
+#define USE_TX_COMPWB /* use completion writeback registers */
+#define USE_CSMA_CD_PROTO /* standard CSMA/CD */
+#define USE_RX_BLANK /* hw interrupt mitigation */
+#undef USE_ENTROPY_DEV /* don't test for entropy device */
+
+/* NOTE: these aren't useable unless PCI interrupts can be assigned.
+ * also, we need to make cp->lock finer-grained.
+ */
+#undef USE_PCI_INTB
+#undef USE_PCI_INTC
+#undef USE_PCI_INTD
+#undef USE_QOS
+
+#undef USE_VPD_DEBUG /* debug vpd information if defined */
+
+/* rx processing options */
+#define USE_PAGE_ORDER /* specify to allocate large rx pages */
+#define RX_DONT_BATCH 0 /* if 1, don't batch flows */
+#define RX_COPY_ALWAYS 0 /* if 0, use frags */
+#define RX_COPY_MIN 64 /* copy a little to make upper layers happy */
+#undef RX_COUNT_BUFFERS /* define to calculate RX buffer stats */
+
+#define DRV_MODULE_NAME "cassini"
+#define DRV_MODULE_VERSION "1.6"
+#define DRV_MODULE_RELDATE "21 May 2008"
+
+#define CAS_DEF_MSG_ENABLE \
+ (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR)
+
+/* length of time before we decide the hardware is borked,
+ * and dev->tx_timeout() should be called to fix the problem
+ */
+#define CAS_TX_TIMEOUT (HZ)
+#define CAS_LINK_TIMEOUT (22*HZ/10)
+#define CAS_LINK_FAST_TIMEOUT (1)
+
+/* timeout values for state changing. these specify the number
+ * of 10us delays to be used before giving up.
+ */
+#define STOP_TRIES_PHY 1000
+#define STOP_TRIES 5000
+
+/* specify a minimum frame size to deal with some fifo issues
+ * max mtu == 2 * page size - ethernet header - 64 - swivel =
+ * 2 * page_size - 0x50
+ */
+#define CAS_MIN_FRAME 97
+#define CAS_1000MB_MIN_FRAME 255
+#define CAS_MIN_MTU 60
+#define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000)
+
+#if 1
+/*
+ * Eliminate these and use separate atomic counters for each, to
+ * avoid a race condition.
+ */
+#else
+#define CAS_RESET_MTU 1
+#define CAS_RESET_ALL 2
+#define CAS_RESET_SPARE 3
+#endif
+
+static char version[] __devinitdata =
+ DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */
+static int link_mode;
+
+MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
+MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("sun/cassini.bin");
+module_param(cassini_debug, int, 0);
+MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
+module_param(link_mode, int, 0);
+MODULE_PARM_DESC(link_mode, "default link mode");
+
+/*
+ * Work around for a PCS bug in which the link goes down due to the chip
+ * being confused and never showing a link status of "up."
+ */
+#define DEFAULT_LINKDOWN_TIMEOUT 5
+/*
+ * Value in seconds, for user input.
+ */
+static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
+module_param(linkdown_timeout, int, 0);
+MODULE_PARM_DESC(linkdown_timeout,
+"min reset interval in sec. for PCS linkdown issue; disabled if not positive");
+
+/*
+ * value in 'ticks' (units used by jiffies). Set when we init the
+ * module because 'HZ' in actually a function call on some flavors of
+ * Linux. This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ.
+ */
+static int link_transition_timeout;
+
+
+
+static u16 link_modes[] __devinitdata = {
+ BMCR_ANENABLE, /* 0 : autoneg */
+ 0, /* 1 : 10bt half duplex */
+ BMCR_SPEED100, /* 2 : 100bt half duplex */
+ BMCR_FULLDPLX, /* 3 : 10bt full duplex */
+ BMCR_SPEED100|BMCR_FULLDPLX, /* 4 : 100bt full duplex */
+ CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
+};
+
+static DEFINE_PCI_DEVICE_TABLE(cas_pci_tbl) = {
+ { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, cas_pci_tbl);
+
+static void cas_set_link_modes(struct cas *cp);
+
+static inline void cas_lock_tx(struct cas *cp)
+{
+ int i;
+
+ for (i = 0; i < N_TX_RINGS; i++)
+ spin_lock(&cp->tx_lock[i]);
+}
+
+static inline void cas_lock_all(struct cas *cp)
+{
+ spin_lock_irq(&cp->lock);
+ cas_lock_tx(cp);
+}
+
+/* WTZ: QA was finding deadlock problems with the previous
+ * versions after long test runs with multiple cards per machine.
+ * See if replacing cas_lock_all with safer versions helps. The
+ * symptoms QA is reporting match those we'd expect if interrupts
+ * aren't being properly restored, and we fixed a previous deadlock
+ * with similar symptoms by using save/restore versions in other
+ * places.
+ */
+#define cas_lock_all_save(cp, flags) \
+do { \
+ struct cas *xxxcp = (cp); \
+ spin_lock_irqsave(&xxxcp->lock, flags); \
+ cas_lock_tx(xxxcp); \
+} while (0)
+
+static inline void cas_unlock_tx(struct cas *cp)
+{
+ int i;
+
+ for (i = N_TX_RINGS; i > 0; i--)
+ spin_unlock(&cp->tx_lock[i - 1]);
+}
+
+static inline void cas_unlock_all(struct cas *cp)
+{
+ cas_unlock_tx(cp);
+ spin_unlock_irq(&cp->lock);
+}
+
+#define cas_unlock_all_restore(cp, flags) \
+do { \
+ struct cas *xxxcp = (cp); \
+ cas_unlock_tx(xxxcp); \
+ spin_unlock_irqrestore(&xxxcp->lock, flags); \
+} while (0)
+
+static void cas_disable_irq(struct cas *cp, const int ring)
+{
+ /* Make sure we won't get any more interrupts */
+ if (ring == 0) {
+ writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK);
+ return;
+ }
+
+ /* disable completion interrupts and selectively mask */
+ if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
+ switch (ring) {
+#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
+#ifdef USE_PCI_INTB
+ case 1:
+#endif
+#ifdef USE_PCI_INTC
+ case 2:
+#endif
+#ifdef USE_PCI_INTD
+ case 3:
+#endif
+ writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
+ cp->regs + REG_PLUS_INTRN_MASK(ring));
+ break;
+#endif
+ default:
+ writel(INTRN_MASK_CLEAR_ALL, cp->regs +
+ REG_PLUS_INTRN_MASK(ring));
+ break;
+ }
+ }
+}
+
+static inline void cas_mask_intr(struct cas *cp)
+{
+ int i;
+
+ for (i = 0; i < N_RX_COMP_RINGS; i++)
+ cas_disable_irq(cp, i);
+}
+
+static void cas_enable_irq(struct cas *cp, const int ring)
+{
+ if (ring == 0) { /* all but TX_DONE */
+ writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK);
+ return;
+ }
+
+ if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
+ switch (ring) {
+#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
+#ifdef USE_PCI_INTB
+ case 1:
+#endif
+#ifdef USE_PCI_INTC
+ case 2:
+#endif
+#ifdef USE_PCI_INTD
+ case 3:
+#endif
+ writel(INTRN_MASK_RX_EN, cp->regs +
+ REG_PLUS_INTRN_MASK(ring));
+ break;
+#endif
+ default:
+ break;
+ }
+ }
+}
+
+static inline void cas_unmask_intr(struct cas *cp)
+{
+ int i;
+
+ for (i = 0; i < N_RX_COMP_RINGS; i++)
+ cas_enable_irq(cp, i);
+}
+
+static inline void cas_entropy_gather(struct cas *cp)
+{
+#ifdef USE_ENTROPY_DEV
+ if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
+ return;
+
+ batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV),
+ readl(cp->regs + REG_ENTROPY_IV),
+ sizeof(uint64_t)*8);
+#endif
+}
+
+static inline void cas_entropy_reset(struct cas *cp)
+{
+#ifdef USE_ENTROPY_DEV
+ if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
+ return;
+
+ writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
+ cp->regs + REG_BIM_LOCAL_DEV_EN);
+ writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
+ writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
+
+ /* if we read back 0x0, we don't have an entropy device */
+ if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0)
+ cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV;
+#endif
+}
+
+/* access to the phy. the following assumes that we've initialized the MIF to
+ * be in frame rather than bit-bang mode
+ */
+static u16 cas_phy_read(struct cas *cp, int reg)
+{
+ u32 cmd;
+ int limit = STOP_TRIES_PHY;
+
+ cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ;
+ cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
+ cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
+ cmd |= MIF_FRAME_TURN_AROUND_MSB;
+ writel(cmd, cp->regs + REG_MIF_FRAME);
+
+ /* poll for completion */
+ while (limit-- > 0) {
+ udelay(10);
+ cmd = readl(cp->regs + REG_MIF_FRAME);
+ if (cmd & MIF_FRAME_TURN_AROUND_LSB)
+ return cmd & MIF_FRAME_DATA_MASK;
+ }
+ return 0xFFFF; /* -1 */
+}
+
+static int cas_phy_write(struct cas *cp, int reg, u16 val)
+{
+ int limit = STOP_TRIES_PHY;
+ u32 cmd;
+
+ cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE;
+ cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
+ cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
+ cmd |= MIF_FRAME_TURN_AROUND_MSB;
+ cmd |= val & MIF_FRAME_DATA_MASK;
+ writel(cmd, cp->regs + REG_MIF_FRAME);
+
+ /* poll for completion */
+ while (limit-- > 0) {
+ udelay(10);
+ cmd = readl(cp->regs + REG_MIF_FRAME);
+ if (cmd & MIF_FRAME_TURN_AROUND_LSB)
+ return 0;
+ }
+ return -1;
+}
+
+static void cas_phy_powerup(struct cas *cp)
+{
+ u16 ctl = cas_phy_read(cp, MII_BMCR);
+
+ if ((ctl & BMCR_PDOWN) == 0)
+ return;
+ ctl &= ~BMCR_PDOWN;
+ cas_phy_write(cp, MII_BMCR, ctl);
+}
+
+static void cas_phy_powerdown(struct cas *cp)
+{
+ u16 ctl = cas_phy_read(cp, MII_BMCR);
+
+ if (ctl & BMCR_PDOWN)
+ return;
+ ctl |= BMCR_PDOWN;
+ cas_phy_write(cp, MII_BMCR, ctl);
+}
+
+/* cp->lock held. note: the last put_page will free the buffer */
+static int cas_page_free(struct cas *cp, cas_page_t *page)
+{
+ pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
+ PCI_DMA_FROMDEVICE);
+ __free_pages(page->buffer, cp->page_order);
+ kfree(page);
+ return 0;
+}
+
+#ifdef RX_COUNT_BUFFERS
+#define RX_USED_ADD(x, y) ((x)->used += (y))
+#define RX_USED_SET(x, y) ((x)->used = (y))
+#else
+#define RX_USED_ADD(x, y)
+#define RX_USED_SET(x, y)
+#endif
+
+/* local page allocation routines for the receive buffers. jumbo pages
+ * require at least 8K contiguous and 8K aligned buffers.
+ */
+static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
+{
+ cas_page_t *page;
+
+ page = kmalloc(sizeof(cas_page_t), flags);
+ if (!page)
+ return NULL;
+
+ INIT_LIST_HEAD(&page->list);
+ RX_USED_SET(page, 0);
+ page->buffer = alloc_pages(flags, cp->page_order);
+ if (!page->buffer)
+ goto page_err;
+ page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
+ cp->page_size, PCI_DMA_FROMDEVICE);
+ return page;
+
+page_err:
+ kfree(page);
+ return NULL;
+}
+
+/* initialize spare pool of rx buffers, but allocate during the open */
+static void cas_spare_init(struct cas *cp)
+{
+ spin_lock(&cp->rx_inuse_lock);
+ INIT_LIST_HEAD(&cp->rx_inuse_list);
+ spin_unlock(&cp->rx_inuse_lock);
+
+ spin_lock(&cp->rx_spare_lock);
+ INIT_LIST_HEAD(&cp->rx_spare_list);
+ cp->rx_spares_needed = RX_SPARE_COUNT;
+ spin_unlock(&cp->rx_spare_lock);
+}
+
+/* used on close. free all the spare buffers. */
+static void cas_spare_free(struct cas *cp)
+{
+ struct list_head list, *elem, *tmp;
+
+ /* free spare buffers */
+ INIT_LIST_HEAD(&list);
+ spin_lock(&cp->rx_spare_lock);
+ list_splice_init(&cp->rx_spare_list, &list);
+ spin_unlock(&cp->rx_spare_lock);
+ list_for_each_safe(elem, tmp, &list) {
+ cas_page_free(cp, list_entry(elem, cas_page_t, list));
+ }
+
+ INIT_LIST_HEAD(&list);
+#if 1
+ /*
+ * Looks like Adrian had protected this with a different
+ * lock than used everywhere else to manipulate this list.
+ */
+ spin_lock(&cp->rx_inuse_lock);
+ list_splice_init(&cp->rx_inuse_list, &list);
+ spin_unlock(&cp->rx_inuse_lock);
+#else
+ spin_lock(&cp->rx_spare_lock);
+ list_splice_init(&cp->rx_inuse_list, &list);
+ spin_unlock(&cp->rx_spare_lock);
+#endif
+ list_for_each_safe(elem, tmp, &list) {
+ cas_page_free(cp, list_entry(elem, cas_page_t, list));
+ }
+}
+
+/* replenish spares if needed */
+static void cas_spare_recover(struct cas *cp, const gfp_t flags)
+{
+ struct list_head list, *elem, *tmp;
+ int needed, i;
+
+ /* check inuse list. if we don't need any more free buffers,
+ * just free it
+ */
+
+ /* make a local copy of the list */
+ INIT_LIST_HEAD(&list);
+ spin_lock(&cp->rx_inuse_lock);
+ list_splice_init(&cp->rx_inuse_list, &list);
+ spin_unlock(&cp->rx_inuse_lock);
+
+ list_for_each_safe(elem, tmp, &list) {
+ cas_page_t *page = list_entry(elem, cas_page_t, list);
+
+ /*
+ * With the lockless pagecache, cassini buffering scheme gets
+ * slightly less accurate: we might find that a page has an
+ * elevated reference count here, due to a speculative ref,
+ * and skip it as in-use. Ideally we would be able to reclaim
+ * it. However this would be such a rare case, it doesn't
+ * matter too much as we should pick it up the next time round.
+ *
+ * Importantly, if we find that the page has a refcount of 1
+ * here (our refcount), then we know it is definitely not inuse
+ * so we can reuse it.
+ */
+ if (page_count(page->buffer) > 1)
+ continue;
+
+ list_del(elem);
+ spin_lock(&cp->rx_spare_lock);
+ if (cp->rx_spares_needed > 0) {
+ list_add(elem, &cp->rx_spare_list);
+ cp->rx_spares_needed--;
+ spin_unlock(&cp->rx_spare_lock);
+ } else {
+ spin_unlock(&cp->rx_spare_lock);
+ cas_page_free(cp, page);
+ }
+ }
+
+ /* put any inuse buffers back on the list */
+ if (!list_empty(&list)) {
+ spin_lock(&cp->rx_inuse_lock);
+ list_splice(&list, &cp->rx_inuse_list);
+ spin_unlock(&cp->rx_inuse_lock);
+ }
+
+ spin_lock(&cp->rx_spare_lock);
+ needed = cp->rx_spares_needed;
+ spin_unlock(&cp->rx_spare_lock);
+ if (!needed)
+ return;
+
+ /* we still need spares, so try to allocate some */
+ INIT_LIST_HEAD(&list);
+ i = 0;
+ while (i < needed) {
+ cas_page_t *spare = cas_page_alloc(cp, flags);
+ if (!spare)
+ break;
+ list_add(&spare->list, &list);
+ i++;
+ }
+
+ spin_lock(&cp->rx_spare_lock);
+ list_splice(&list, &cp->rx_spare_list);
+ cp->rx_spares_needed -= i;
+ spin_unlock(&cp->rx_spare_lock);
+}
+
+/* pull a page from the list. */
+static cas_page_t *cas_page_dequeue(struct cas *cp)
+{
+ struct list_head *entry;
+ int recover;
+
+ spin_lock(&cp->rx_spare_lock);
+ if (list_empty(&cp->rx_spare_list)) {
+ /* try to do a quick recovery */
+ spin_unlock(&cp->rx_spare_lock);
+ cas_spare_recover(cp, GFP_ATOMIC);
+ spin_lock(&cp->rx_spare_lock);
+ if (list_empty(&cp->rx_spare_list)) {
+ netif_err(cp, rx_err, cp->dev,
+ "no spare buffers available\n");
+ spin_unlock(&cp->rx_spare_lock);
+ return NULL;
+ }
+ }
+
+ entry = cp->rx_spare_list.next;
+ list_del(entry);
+ recover = ++cp->rx_spares_needed;
+ spin_unlock(&cp->rx_spare_lock);
+
+ /* trigger the timer to do the recovery */
+ if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) {
+#if 1
+ atomic_inc(&cp->reset_task_pending);
+ atomic_inc(&cp->reset_task_pending_spare);
+ schedule_work(&cp->reset_task);
+#else
+ atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE);
+ schedule_work(&cp->reset_task);
+#endif
+ }
+ return list_entry(entry, cas_page_t, list);
+}
+
+
+static void cas_mif_poll(struct cas *cp, const int enable)
+{
+ u32 cfg;
+
+ cfg = readl(cp->regs + REG_MIF_CFG);
+ cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
+
+ if (cp->phy_type & CAS_PHY_MII_MDIO1)
+ cfg |= MIF_CFG_PHY_SELECT;
+
+ /* poll and interrupt on link status change. */
+ if (enable) {
+ cfg |= MIF_CFG_POLL_EN;
+ cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
+ cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
+ }
+ writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
+ cp->regs + REG_MIF_MASK);
+ writel(cfg, cp->regs + REG_MIF_CFG);
+}
+
+/* Must be invoked under cp->lock */
+static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
+{
+ u16 ctl;
+#if 1
+ int lcntl;
+ int changed = 0;
+ int oldstate = cp->lstate;
+ int link_was_not_down = !(oldstate == link_down);
+#endif
+ /* Setup link parameters */
+ if (!ep)
+ goto start_aneg;
+ lcntl = cp->link_cntl;
+ if (ep->autoneg == AUTONEG_ENABLE)
+ cp->link_cntl = BMCR_ANENABLE;
+ else {
+ u32 speed = ethtool_cmd_speed(ep);
+ cp->link_cntl = 0;
+ if (speed == SPEED_100)
+ cp->link_cntl |= BMCR_SPEED100;
+ else if (speed == SPEED_1000)
+ cp->link_cntl |= CAS_BMCR_SPEED1000;
+ if (ep->duplex == DUPLEX_FULL)
+ cp->link_cntl |= BMCR_FULLDPLX;
+ }
+#if 1
+ changed = (lcntl != cp->link_cntl);
+#endif
+start_aneg:
+ if (cp->lstate == link_up) {
+ netdev_info(cp->dev, "PCS link down\n");
+ } else {
+ if (changed) {
+ netdev_info(cp->dev, "link configuration changed\n");
+ }
+ }
+ cp->lstate = link_down;
+ cp->link_transition = LINK_TRANSITION_LINK_DOWN;
+ if (!cp->hw_running)
+ return;
+#if 1
+ /*
+ * WTZ: If the old state was link_up, we turn off the carrier
+ * to replicate everything we do elsewhere on a link-down
+ * event when we were already in a link-up state..
+ */
+ if (oldstate == link_up)
+ netif_carrier_off(cp->dev);
+ if (changed && link_was_not_down) {
+ /*
+ * WTZ: This branch will simply schedule a full reset after
+ * we explicitly changed link modes in an ioctl. See if this
+ * fixes the link-problems we were having for forced mode.
+ */
+ atomic_inc(&cp->reset_task_pending);
+ atomic_inc(&cp->reset_task_pending_all);
+ schedule_work(&cp->reset_task);
+ cp->timer_ticks = 0;
+ mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
+ return;
+ }
+#endif
+ if (cp->phy_type & CAS_PHY_SERDES) {
+ u32 val = readl(cp->regs + REG_PCS_MII_CTRL);
+
+ if (cp->link_cntl & BMCR_ANENABLE) {
+ val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN);
+ cp->lstate = link_aneg;
+ } else {
+ if (cp->link_cntl & BMCR_FULLDPLX)
+ val |= PCS_MII_CTRL_DUPLEX;
+ val &= ~PCS_MII_AUTONEG_EN;
+ cp->lstate = link_force_ok;
+ }
+ cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
+ writel(val, cp->regs + REG_PCS_MII_CTRL);
+
+ } else {
+ cas_mif_poll(cp, 0);
+ ctl = cas_phy_read(cp, MII_BMCR);
+ ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
+ CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
+ ctl |= cp->link_cntl;
+ if (ctl & BMCR_ANENABLE) {
+ ctl |= BMCR_ANRESTART;
+ cp->lstate = link_aneg;
+ } else {
+ cp->lstate = link_force_ok;
+ }
+ cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
+ cas_phy_write(cp, MII_BMCR, ctl);
+ cas_mif_poll(cp, 1);
+ }
+
+ cp->timer_ticks = 0;
+ mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
+}
+
+/* Must be invoked under cp->lock. */
+static int cas_reset_mii_phy(struct cas *cp)
+{
+ int limit = STOP_TRIES_PHY;
+ u16 val;
+
+ cas_phy_write(cp, MII_BMCR, BMCR_RESET);
+ udelay(100);
+ while (--limit) {
+ val = cas_phy_read(cp, MII_BMCR);
+ if ((val & BMCR_RESET) == 0)
+ break;
+ udelay(10);
+ }
+ return limit <= 0;
+}
+
+static int cas_saturn_firmware_init(struct cas *cp)
+{
+ const struct firmware *fw;
+ const char fw_name[] = "sun/cassini.bin";
+ int err;
+
+ if (PHY_NS_DP83065 != cp->phy_id)
+ return 0;
+
+ err = request_firmware(&fw, fw_name, &cp->pdev->dev);
+ if (err) {
+ pr_err("Failed to load firmware \"%s\"\n",
+ fw_name);
+ return err;
+ }
+ if (fw->size < 2) {
+ pr_err("bogus length %zu in \"%s\"\n",
+ fw->size, fw_name);
+ err = -EINVAL;
+ goto out;
+ }
+ cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
+ cp->fw_size = fw->size - 2;
+ cp->fw_data = vmalloc(cp->fw_size);
+ if (!cp->fw_data) {
+ err = -ENOMEM;
+ pr_err("\"%s\" Failed %d\n", fw_name, err);
+ goto out;
+ }
+ memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
+out:
+ release_firmware(fw);
+ return err;
+}
+
+static void cas_saturn_firmware_load(struct cas *cp)
+{
+ int i;
+
+ cas_phy_powerdown(cp);
+
+ /* expanded memory access mode */
+ cas_phy_write(cp, DP83065_MII_MEM, 0x0);
+
+ /* pointer configuration for new firmware */
+ cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9);
+ cas_phy_write(cp, DP83065_MII_REGD, 0xbd);
+ cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa);
+ cas_phy_write(cp, DP83065_MII_REGD, 0x82);
+ cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb);
+ cas_phy_write(cp, DP83065_MII_REGD, 0x0);
+ cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc);
+ cas_phy_write(cp, DP83065_MII_REGD, 0x39);
+
+ /* download new firmware */
+ cas_phy_write(cp, DP83065_MII_MEM, 0x1);
+ cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
+ for (i = 0; i < cp->fw_size; i++)
+ cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
+
+ /* enable firmware */
+ cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
+ cas_phy_write(cp, DP83065_MII_REGD, 0x1);
+}
+
+
+/* phy initialization */
+static void cas_phy_init(struct cas *cp)
+{
+ u16 val;
+
+ /* if we're in MII/GMII mode, set up phy */
+ if (CAS_PHY_MII(cp->phy_type)) {
+ writel(PCS_DATAPATH_MODE_MII,
+ cp->regs + REG_PCS_DATAPATH_MODE);
+
+ cas_mif_poll(cp, 0);
+ cas_reset_mii_phy(cp); /* take out of isolate mode */
+
+ if (PHY_LUCENT_B0 == cp->phy_id) {
+ /* workaround link up/down issue with lucent */
+ cas_phy_write(cp, LUCENT_MII_REG, 0x8000);
+ cas_phy_write(cp, MII_BMCR, 0x00f1);
+ cas_phy_write(cp, LUCENT_MII_REG, 0x0);
+
+ } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) {
+ /* workarounds for broadcom phy */
+ cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20);
+ cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012);
+ cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804);
+ cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013);
+ cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204);
+ cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
+ cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132);
+ cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
+ cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232);
+ cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F);
+ cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20);
+
+ } else if (PHY_BROADCOM_5411 == cp->phy_id) {
+ val = cas_phy_read(cp, BROADCOM_MII_REG4);
+ val = cas_phy_read(cp, BROADCOM_MII_REG4);
+ if (val & 0x0080) {
+ /* link workaround */
+ cas_phy_write(cp, BROADCOM_MII_REG4,
+ val & ~0x0080);
+ }
+
+ } else if (cp->cas_flags & CAS_FLAG_SATURN) {
+ writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
+ SATURN_PCFG_FSI : 0x0,
+ cp->regs + REG_SATURN_PCFG);
+
+ /* load firmware to address 10Mbps auto-negotiation
+ * issue. NOTE: this will need to be changed if the
+ * default firmware gets fixed.
+ */
+ if (PHY_NS_DP83065 == cp->phy_id) {
+ cas_saturn_firmware_load(cp);
+ }
+ cas_phy_powerup(cp);
+ }
+
+ /* advertise capabilities */
+ val = cas_phy_read(cp, MII_BMCR);
+ val &= ~BMCR_ANENABLE;
+ cas_phy_write(cp, MII_BMCR, val);
+ udelay(10);
+
+ cas_phy_write(cp, MII_ADVERTISE,
+ cas_phy_read(cp, MII_ADVERTISE) |
+ (ADVERTISE_10HALF | ADVERTISE_10FULL |
+ ADVERTISE_100HALF | ADVERTISE_100FULL |
+ CAS_ADVERTISE_PAUSE |
+ CAS_ADVERTISE_ASYM_PAUSE));
+
+ if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
+ /* make sure that we don't advertise half
+ * duplex to avoid a chip issue
+ */
+ val = cas_phy_read(cp, CAS_MII_1000_CTRL);
+ val &= ~CAS_ADVERTISE_1000HALF;
+ val |= CAS_ADVERTISE_1000FULL;
+ cas_phy_write(cp, CAS_MII_1000_CTRL, val);
+ }
+
+ } else {
+ /* reset pcs for serdes */
+ u32 val;
+ int limit;
+
+ writel(PCS_DATAPATH_MODE_SERDES,
+ cp->regs + REG_PCS_DATAPATH_MODE);
+
+ /* enable serdes pins on saturn */
+ if (cp->cas_flags & CAS_FLAG_SATURN)
+ writel(0, cp->regs + REG_SATURN_PCFG);
+
+ /* Reset PCS unit. */
+ val = readl(cp->regs + REG_PCS_MII_CTRL);
+ val |= PCS_MII_RESET;
+ writel(val, cp->regs + REG_PCS_MII_CTRL);
+
+ limit = STOP_TRIES;
+ while (--limit > 0) {
+ udelay(10);
+ if ((readl(cp->regs + REG_PCS_MII_CTRL) &
+ PCS_MII_RESET) == 0)
+ break;
+ }
+ if (limit <= 0)
+ netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n",
+ readl(cp->regs + REG_PCS_STATE_MACHINE));
+
+ /* Make sure PCS is disabled while changing advertisement
+ * configuration.
+ */
+ writel(0x0, cp->regs + REG_PCS_CFG);
+
+ /* Advertise all capabilities except half-duplex. */
+ val = readl(cp->regs + REG_PCS_MII_ADVERT);
+ val &= ~PCS_MII_ADVERT_HD;
+ val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
+ PCS_MII_ADVERT_ASYM_PAUSE);
+ writel(val, cp->regs + REG_PCS_MII_ADVERT);
+
+ /* enable PCS */
+ writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);
+
+ /* pcs workaround: enable sync detect */
+ writel(PCS_SERDES_CTRL_SYNCD_EN,
+ cp->regs + REG_PCS_SERDES_CTRL);
+ }
+}
+
+
+static int cas_pcs_link_check(struct cas *cp)
+{
+ u32 stat, state_machine;
+ int retval = 0;
+
+ /* The link status bit latches on zero, so you must
+ * read it twice in such a case to see a transition
+ * to the link being up.
+ */
+ stat = readl(cp->regs + REG_PCS_MII_STATUS);
+ if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0)
+ stat = readl(cp->regs + REG_PCS_MII_STATUS);
+
+ /* The remote-fault indication is only valid
+ * when autoneg has completed.
+ */
+ if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
+ PCS_MII_STATUS_REMOTE_FAULT)) ==
+ (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT))
+ netif_info(cp, link, cp->dev, "PCS RemoteFault\n");
+
+ /* work around link detection issue by querying the PCS state
+ * machine directly.
+ */
+ state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);
+ if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) {
+ stat &= ~PCS_MII_STATUS_LINK_STATUS;
+ } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) {
+ stat |= PCS_MII_STATUS_LINK_STATUS;
+ }
+
+ if (stat & PCS_MII_STATUS_LINK_STATUS) {
+ if (cp->lstate != link_up) {
+ if (cp->opened) {
+ cp->lstate = link_up;
+ cp->link_transition = LINK_TRANSITION_LINK_UP;
+
+ cas_set_link_modes(cp);
+ netif_carrier_on(cp->dev);
+ }
+ }
+ } else if (cp->lstate == link_up) {
+ cp->lstate = link_down;
+ if (link_transition_timeout != 0 &&
+ cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
+ !cp->link_transition_jiffies_valid) {
+ /*
+ * force a reset, as a workaround for the
+ * link-failure problem. May want to move this to a
+ * point a bit earlier in the sequence. If we had
+ * generated a reset a short time ago, we'll wait for
+ * the link timer to check the status until a
+ * timer expires (link_transistion_jiffies_valid is
+ * true when the timer is running.) Instead of using
+ * a system timer, we just do a check whenever the
+ * link timer is running - this clears the flag after
+ * a suitable delay.
+ */
+ retval = 1;
+ cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
+ cp->link_transition_jiffies = jiffies;
+ cp->link_transition_jiffies_valid = 1;
+ } else {
+ cp->link_transition = LINK_TRANSITION_ON_FAILURE;
+ }
+ netif_carrier_off(cp->dev);
+ if (cp->opened)
+ netif_info(cp, link, cp->dev, "PCS link down\n");
+
+ /* Cassini only: if you force a mode, there can be
+ * sync problems on link down. to fix that, the following
+ * things need to be checked:
+ * 1) read serialink state register
+ * 2) read pcs status register to verify link down.
+ * 3) if link down and serial link == 0x03, then you need
+ * to global reset the chip.
+ */
+ if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {
+ /* should check to see if we're in a forced mode */
+ stat = readl(cp->regs + REG_PCS_SERDES_STATE);
+ if (stat == 0x03)
+ return 1;
+ }
+ } else if (cp->lstate == link_down) {
+ if (link_transition_timeout != 0 &&
+ cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
+ !cp->link_transition_jiffies_valid) {
+ /* force a reset, as a workaround for the
+ * link-failure problem. May want to move
+ * this to a point a bit earlier in the
+ * sequence.
+ */
+ retval = 1;
+ cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
+ cp->link_transition_jiffies = jiffies;
+ cp->link_transition_jiffies_valid = 1;
+ } else {
+ cp->link_transition = LINK_TRANSITION_STILL_FAILED;
+ }
+ }
+
+ return retval;
+}
+
+static int cas_pcs_interrupt(struct net_device *dev,
+ struct cas *cp, u32 status)
+{
+ u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
+
+ if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
+ return 0;
+ return cas_pcs_link_check(cp);
+}
+
+static int cas_txmac_interrupt(struct net_device *dev,
+ struct cas *cp, u32 status)
+{
+ u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
+
+ if (!txmac_stat)
+ return 0;
+
+ netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+ "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat);
+
+ /* Defer timer expiration is quite normal,
+ * don't even log the event.
+ */
+ if ((txmac_stat & MAC_TX_DEFER_TIMER) &&
+ !(txmac_stat & ~MAC_TX_DEFER_TIMER))
+ return 0;
+
+ spin_lock(&cp->stat_lock[0]);
+ if (txmac_stat & MAC_TX_UNDERRUN) {
+ netdev_err(dev, "TX MAC xmit underrun\n");
+ cp->net_stats[0].tx_fifo_errors++;
+ }
+
+ if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
+ netdev_err(dev, "TX MAC max packet size error\n");
+ cp->net_stats[0].tx_errors++;
+ }
+
+ /* The rest are all cases of one of the 16-bit TX
+ * counters expiring.
+ */
+ if (txmac_stat & MAC_TX_COLL_NORMAL)
+ cp->net_stats[0].collisions += 0x10000;
+
+ if (txmac_stat & MAC_TX_COLL_EXCESS) {
+ cp->net_stats[0].tx_aborted_errors += 0x10000;
+ cp->net_stats[0].collisions += 0x10000;
+ }
+
+ if (txmac_stat & MAC_TX_COLL_LATE) {
+ cp->net_stats[0].tx_aborted_errors += 0x10000;
+ cp->net_stats[0].collisions += 0x10000;
+ }
+ spin_unlock(&cp->stat_lock[0]);
+
+ /* We do not keep track of MAC_TX_COLL_FIRST and
+ * MAC_TX_PEAK_ATTEMPTS events.
+ */
+ return 0;
+}
+
+static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
+{
+ cas_hp_inst_t *inst;
+ u32 val;
+ int i;
+
+ i = 0;
+ while ((inst = firmware) && inst->note) {
+ writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);
+
+ val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val);
+ val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask);
+ writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);
+
+ val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10);
+ val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop);
+ val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext);
+ val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff);
+ val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext);
+ val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff);
+ val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op);
+ writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);
+
+ val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask);
+ val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift);
+ val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab);
+ val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg);
+ writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);
+ ++firmware;
+ ++i;
+ }
+}
+
+static void cas_init_rx_dma(struct cas *cp)
+{
+ u64 desc_dma = cp->block_dvma;
+ u32 val;
+ int i, size;
+
+ /* rx free descriptors */
+ val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
+ val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
+ val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
+ if ((N_RX_DESC_RINGS > 1) &&
+ (cp->cas_flags & CAS_FLAG_REG_PLUS)) /* do desc 2 */
+ val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
+ writel(val, cp->regs + REG_RX_CFG);
+
+ val = (unsigned long) cp->init_rxds[0] -
+ (unsigned long) cp->init_block;
+ writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
+ writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
+ writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
+
+ if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
+ /* rx desc 2 is for IPSEC packets. however,
+ * we don't it that for that purpose.
+ */
+ val = (unsigned long) cp->init_rxds[1] -
+ (unsigned long) cp->init_block;
+ writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
+ writel((desc_dma + val) & 0xffffffff, cp->regs +
+ REG_PLUS_RX_DB1_LOW);
+ writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
+ REG_PLUS_RX_KICK1);
+ }
+
+ /* rx completion registers */
+ val = (unsigned long) cp->init_rxcs[0] -
+ (unsigned long) cp->init_block;
+ writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
+ writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
+
+ if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
+ /* rx comp 2-4 */
+ for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
+ val = (unsigned long) cp->init_rxcs[i] -
+ (unsigned long) cp->init_block;
+ writel((desc_dma + val) >> 32, cp->regs +
+ REG_PLUS_RX_CBN_HI(i));
+ writel((desc_dma + val) & 0xffffffff, cp->regs +
+ REG_PLUS_RX_CBN_LOW(i));
+ }
+ }
+
+ /* read selective clear regs to prevent spurious interrupts
+ * on reset because complete == kick.
+ * selective clear set up to prevent interrupts on resets
+ */
+ readl(cp->regs + REG_INTR_STATUS_ALIAS);
+ writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
+ if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
+ for (i = 1; i < N_RX_COMP_RINGS; i++)
+ readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i));
+
+ /* 2 is different from 3 and 4 */
+ if (N_RX_COMP_RINGS > 1)
+ writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
+ cp->regs + REG_PLUS_ALIASN_CLEAR(1));
+
+ for (i = 2; i < N_RX_COMP_RINGS; i++)
+ writel(INTR_RX_DONE_ALT,
+ cp->regs + REG_PLUS_ALIASN_CLEAR(i));
+ }
+
+ /* set up pause thresholds */
+ val = CAS_BASE(RX_PAUSE_THRESH_OFF,
+ cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
+ val |= CAS_BASE(RX_PAUSE_THRESH_ON,
+ cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
+ writel(val, cp->regs + REG_RX_PAUSE_THRESH);
+
+ /* zero out dma reassembly buffers */
+ for (i = 0; i < 64; i++) {
+ writel(i, cp->regs + REG_RX_TABLE_ADDR);
+ writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);
+ writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);
+ writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);
+ }
+
+ /* make sure address register is 0 for normal operation */
+ writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);
+ writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);
+
+ /* interrupt mitigation */
+#ifdef USE_RX_BLANK
+ val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL);
+ val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL);
+ writel(val, cp->regs + REG_RX_BLANK);
+#else
+ writel(0x0, cp->regs + REG_RX_BLANK);
+#endif
+
+ /* interrupt generation as a function of low water marks for
+ * free desc and completion entries. these are used to trigger
+ * housekeeping for rx descs. we don't use the free interrupt
+ * as it's not very useful
+ */
+ /* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */
+ val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL);
+ writel(val, cp->regs + REG_RX_AE_THRESH);
+ if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
+ val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1));
+ writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);
+ }
+
+ /* Random early detect registers. useful for congestion avoidance.
+ * this should be tunable.
+ */
+ writel(0x0, cp->regs + REG_RX_RED);
+
+ /* receive page sizes. default == 2K (0x800) */
+ val = 0;
+ if (cp->page_size == 0x1000)
+ val = 0x1;
+ else if (cp->page_size == 0x2000)
+ val = 0x2;
+ else if (cp->page_size == 0x4000)
+ val = 0x3;
+
+ /* round mtu + offset. constrain to page size. */
+ size = cp->dev->mtu + 64;
+ if (size > cp->page_size)
+ size = cp->page_size;
+
+ if (size <= 0x400)
+ i = 0x0;
+ else if (size <= 0x800)
+ i = 0x1;
+ else if (size <= 0x1000)
+ i = 0x2;
+ else
+ i = 0x3;
+
+ cp->mtu_stride = 1 << (i + 10);
+ val = CAS_BASE(RX_PAGE_SIZE, val);
+ val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
+ val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
+ val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
+ writel(val, cp->regs + REG_RX_PAGE_SIZE);
+
+ /* enable the header parser if desired */
+ if (CAS_HP_FIRMWARE == cas_prog_null)
+ return;
+
+ val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
+ val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK;
+ val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL);
+ writel(val, cp->regs + REG_HP_CFG);
+}
+
+static inline void cas_rxc_init(struct cas_rx_comp *rxc)
+{
+ memset(rxc, 0, sizeof(*rxc));
+ rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
+}
+
+/* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1]
+ * flipping is protected by the fact that the chip will not
+ * hand back the same page index while it's being processed.
+ */
+static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
+{
+ cas_page_t *page = cp->rx_pages[1][index];
+ cas_page_t *new;
+
+ if (page_count(page->buffer) == 1)
+ return page;
+
+ new = cas_page_dequeue(cp);
+ if (new) {
+ spin_lock(&cp->rx_inuse_lock);
+ list_add(&page->list, &cp->rx_inuse_list);
+ spin_unlock(&cp->rx_inuse_lock);
+ }
+ return new;
+}
+
+/* this needs to be changed if we actually use the ENC RX DESC ring */
+static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
+ const int index)
+{
+ cas_page_t **page0 = cp->rx_pages[0];
+ cas_page_t **page1 = cp->rx_pages[1];
+
+ /* swap if buffer is in use */
+ if (page_count(page0[index]->buffer) > 1) {
+ cas_page_t *new = cas_page_spare(cp, index);
+ if (new) {
+ page1[index] = page0[index];
+ page0[index] = new;
+ }
+ }
+ RX_USED_SET(page0[index], 0);
+ return page0[index];
+}
+
+static void cas_clean_rxds(struct cas *cp)
+{
+ /* only clean ring 0 as ring 1 is used for spare buffers */
+ struct cas_rx_desc *rxd = cp->init_rxds[0];
+ int i, size;
+
+ /* release all rx flows */
+ for (i = 0; i < N_RX_FLOWS; i++) {
+ struct sk_buff *skb;
+ while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
+ cas_skb_release(skb);
+ }
+ }
+
+ /* initialize descriptors */
+ size = RX_DESC_RINGN_SIZE(0);
+ for (i = 0; i < size; i++) {
+ cas_page_t *page = cas_page_swap(cp, 0, i);
+ rxd[i].buffer = cpu_to_le64(page->dma_addr);
+ rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
+ CAS_BASE(RX_INDEX_RING, 0));
+ }
+
+ cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4;
+ cp->rx_last[0] = 0;
+ cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
+}
+
+static void cas_clean_rxcs(struct cas *cp)
+{
+ int i, j;
+
+ /* take ownership of rx comp descriptors */
+ memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS);
+ memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS);
+ for (i = 0; i < N_RX_COMP_RINGS; i++) {
+ struct cas_rx_comp *rxc = cp->init_rxcs[i];
+ for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) {
+ cas_rxc_init(rxc + j);
+ }
+ }
+}
+
+#if 0
+/* When we get a RX fifo overflow, the RX unit is probably hung
+ * so we do the following.
+ *
+ * If any part of the reset goes wrong, we return 1 and that causes the
+ * whole chip to be reset.
+ */
+static int cas_rxmac_reset(struct cas *cp)
+{
+ struct net_device *dev = cp->dev;
+ int limit;
+ u32 val;
+
+ /* First, reset MAC RX. */
+ writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
+ for (limit = 0; limit < STOP_TRIES; limit++) {
+ if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
+ break;
+ udelay(10);
+ }
+ if (limit == STOP_TRIES) {
+ netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
+ return 1;
+ }
+
+ /* Second, disable RX DMA. */
+ writel(0, cp->regs + REG_RX_CFG);
+ for (limit = 0; limit < STOP_TRIES; limit++) {
+ if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
+ break;
+ udelay(10);
+ }
+ if (limit == STOP_TRIES) {
+ netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
+ return 1;
+ }
+
+ mdelay(5);
+
+ /* Execute RX reset command. */
+ writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
+ for (limit = 0; limit < STOP_TRIES; limit++) {
+ if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
+ break;
+ udelay(10);
+ }
+ if (limit == STOP_TRIES) {
+ netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
+ return 1;
+ }
+
+ /* reset driver rx state */
+ cas_clean_rxds(cp);
+ cas_clean_rxcs(cp);
+
+ /* Now, reprogram the rest of RX unit. */
+ cas_init_rx_dma(cp);
+
+ /* re-enable */
+ val = readl(cp->regs + REG_RX_CFG);
+ writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
+ writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
+ val = readl(cp->regs + REG_MAC_RX_CFG);
+ writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
+ return 0;
+}
+#endif
+
+static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
+ u32 status)
+{
+ u32 stat = readl(cp->regs + REG_MAC_RX_STATUS);
+
+ if (!stat)
+ return 0;
+
+ netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat);
+
+ /* these are all rollovers */
+ spin_lock(&cp->stat_lock[0]);
+ if (stat & MAC_RX_ALIGN_ERR)
+ cp->net_stats[0].rx_frame_errors += 0x10000;
+
+ if (stat & MAC_RX_CRC_ERR)
+ cp->net_stats[0].rx_crc_errors += 0x10000;
+
+ if (stat & MAC_RX_LEN_ERR)
+ cp->net_stats[0].rx_length_errors += 0x10000;
+
+ if (stat & MAC_RX_OVERFLOW) {
+ cp->net_stats[0].rx_over_errors++;
+ cp->net_stats[0].rx_fifo_errors++;
+ }
+
+ /* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR
+ * events.
+ */
+ spin_unlock(&cp->stat_lock[0]);
+ return 0;
+}
+
+static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
+ u32 status)
+{
+ u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS);
+
+ if (!stat)
+ return 0;
+
+ netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+ "mac interrupt, stat: 0x%x\n", stat);
+
+ /* This interrupt is just for pause frame and pause
+ * tracking. It is useful for diagnostics and debug
+ * but probably by default we will mask these events.
+ */
+ if (stat & MAC_CTRL_PAUSE_STATE)
+ cp->pause_entered++;
+
+ if (stat & MAC_CTRL_PAUSE_RECEIVED)
+ cp->pause_last_time_recvd = (stat >> 16);
+
+ return 0;
+}
+
+
+/* Must be invoked under cp->lock. */
+static inline int cas_mdio_link_not_up(struct cas *cp)
+{
+ u16 val;
+
+ switch (cp->lstate) {
+ case link_force_ret:
+ netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n");
+ cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
+ cp->timer_ticks = 5;
+ cp->lstate = link_force_ok;
+ cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
+ break;
+
+ case link_aneg:
+ val = cas_phy_read(cp, MII_BMCR);
+
+ /* Try forced modes. we try things in the following order:
+ * 1000 full -> 100 full/half -> 10 half
+ */
+ val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
+ val |= BMCR_FULLDPLX;
+ val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
+ CAS_BMCR_SPEED1000 : BMCR_SPEED100;
+ cas_phy_write(cp, MII_BMCR, val);
+ cp->timer_ticks = 5;
+ cp->lstate = link_force_try;
+ cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
+ break;
+
+ case link_force_try:
+ /* Downgrade from 1000 to 100 to 10 Mbps if necessary. */
+ val = cas_phy_read(cp, MII_BMCR);
+ cp->timer_ticks = 5;
+ if (val & CAS_BMCR_SPEED1000) { /* gigabit */
+ val &= ~CAS_BMCR_SPEED1000;
+ val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
+ cas_phy_write(cp, MII_BMCR, val);
+ break;
+ }
+
+ if (val & BMCR_SPEED100) {
+ if (val & BMCR_FULLDPLX) /* fd failed */
+ val &= ~BMCR_FULLDPLX;
+ else { /* 100Mbps failed */
+ val &= ~BMCR_SPEED100;
+ }
+ cas_phy_write(cp, MII_BMCR, val);
+ break;
+ }
+ default:
+ break;
+ }
+ return 0;
+}
+
+
+/* must be invoked with cp->lock held */
+static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
+{
+ int restart;
+
+ if (bmsr & BMSR_LSTATUS) {
+ /* Ok, here we got a link. If we had it due to a forced
+ * fallback, and we were configured for autoneg, we
+ * retry a short autoneg pass. If you know your hub is
+ * broken, use ethtool ;)
+ */
+ if ((cp->lstate == link_force_try) &&
+ (cp->link_cntl & BMCR_ANENABLE)) {
+ cp->lstate = link_force_ret;
+ cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
+ cas_mif_poll(cp, 0);
+ cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
+ cp->timer_ticks = 5;
+ if (cp->opened)
+ netif_info(cp, link, cp->dev,
+ "Got link after fallback, retrying autoneg once...\n");
+ cas_phy_write(cp, MII_BMCR,
+ cp->link_fcntl | BMCR_ANENABLE |
+ BMCR_ANRESTART);
+ cas_mif_poll(cp, 1);
+
+ } else if (cp->lstate != link_up) {
+ cp->lstate = link_up;
+ cp->link_transition = LINK_TRANSITION_LINK_UP;
+
+ if (cp->opened) {
+ cas_set_link_modes(cp);
+ netif_carrier_on(cp->dev);
+ }
+ }
+ return 0;
+ }
+
+ /* link not up. if the link was previously up, we restart the
+ * whole process
+ */
+ restart = 0;
+ if (cp->lstate == link_up) {
+ cp->lstate = link_down;
+ cp->link_transition = LINK_TRANSITION_LINK_DOWN;
+
+ netif_carrier_off(cp->dev);
+ if (cp->opened)
+ netif_info(cp, link, cp->dev, "Link down\n");
+ restart = 1;
+
+ } else if (++cp->timer_ticks > 10)
+ cas_mdio_link_not_up(cp);
+
+ return restart;
+}
+
+static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
+ u32 status)
+{
+ u32 stat = readl(cp->regs + REG_MIF_STATUS);
+ u16 bmsr;
+
+ /* check for a link change */
+ if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0)
+ return 0;
+
+ bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat);
+ return cas_mii_link_check(cp, bmsr);
+}
+
+static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
+ u32 status)
+{
+ u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS);
+
+ if (!stat)
+ return 0;
+
+ netdev_err(dev, "PCI error [%04x:%04x]",
+ stat, readl(cp->regs + REG_BIM_DIAG));
+
+ /* cassini+ has this reserved */
+ if ((stat & PCI_ERR_BADACK) &&
+ ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
+ pr_cont(" <No ACK64# during ABS64 cycle>");
+
+ if (stat & PCI_ERR_DTRTO)
+ pr_cont(" <Delayed transaction timeout>");
+ if (stat & PCI_ERR_OTHER)
+ pr_cont(" <other>");
+ if (stat & PCI_ERR_BIM_DMA_WRITE)
+ pr_cont(" <BIM DMA 0 write req>");
+ if (stat & PCI_ERR_BIM_DMA_READ)
+ pr_cont(" <BIM DMA 0 read req>");
+ pr_cont("\n");
+
+ if (stat & PCI_ERR_OTHER) {
+ u16 cfg;
+
+ /* Interrogate PCI config space for the
+ * true cause.
+ */
+ pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
+ netdev_err(dev, "Read PCI cfg space status [%04x]\n", cfg);
+ if (cfg & PCI_STATUS_PARITY)
+ netdev_err(dev, "PCI parity error detected\n");
+ if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
+ netdev_err(dev, "PCI target abort\n");
+ if (cfg & PCI_STATUS_REC_TARGET_ABORT)
+ netdev_err(dev, "PCI master acks target abort\n");
+ if (cfg & PCI_STATUS_REC_MASTER_ABORT)
+ netdev_err(dev, "PCI master abort\n");
+ if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
+ netdev_err(dev, "PCI system error SERR#\n");
+ if (cfg & PCI_STATUS_DETECTED_PARITY)
+ netdev_err(dev, "PCI parity error\n");
+
+ /* Write the error bits back to clear them. */
+ cfg &= (PCI_STATUS_PARITY |
+ PCI_STATUS_SIG_TARGET_ABORT |
+ PCI_STATUS_REC_TARGET_ABORT |
+ PCI_STATUS_REC_MASTER_ABORT |
+ PCI_STATUS_SIG_SYSTEM_ERROR |
+ PCI_STATUS_DETECTED_PARITY);
+ pci_write_config_word(cp->pdev, PCI_STATUS, cfg);
+ }
+
+ /* For all PCI errors, we should reset the chip. */
+ return 1;
+}
+
+/* All non-normal interrupt conditions get serviced here.
+ * Returns non-zero if we should just exit the interrupt
+ * handler right now (ie. if we reset the card which invalidates
+ * all of the other original irq status bits).
+ */
+static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
+ u32 status)
+{
+ if (status & INTR_RX_TAG_ERROR) {
+ /* corrupt RX tag framing */
+ netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
+ "corrupt rx tag framing\n");
+ spin_lock(&cp->stat_lock[0]);
+ cp->net_stats[0].rx_errors++;
+ spin_unlock(&cp->stat_lock[0]);
+ goto do_reset;
+ }
+
+ if (status & INTR_RX_LEN_MISMATCH) {
+ /* length mismatch. */
+ netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
+ "length mismatch for rx frame\n");
+ spin_lock(&cp->stat_lock[0]);
+ cp->net_stats[0].rx_errors++;
+ spin_unlock(&cp->stat_lock[0]);
+ goto do_reset;
+ }
+
+ if (status & INTR_PCS_STATUS) {
+ if (cas_pcs_interrupt(dev, cp, status))
+ goto do_reset;
+ }
+
+ if (status & INTR_TX_MAC_STATUS) {
+ if (cas_txmac_interrupt(dev, cp, status))
+ goto do_reset;
+ }
+
+ if (status & INTR_RX_MAC_STATUS) {
+ if (cas_rxmac_interrupt(dev, cp, status))
+ goto do_reset;
+ }
+
+ if (status & INTR_MAC_CTRL_STATUS) {
+ if (cas_mac_interrupt(dev, cp, status))
+ goto do_reset;
+ }
+
+ if (status & INTR_MIF_STATUS) {
+ if (cas_mif_interrupt(dev, cp, status))
+ goto do_reset;
+ }
+
+ if (status & INTR_PCI_ERROR_STATUS) {
+ if (cas_pci_interrupt(dev, cp, status))
+ goto do_reset;
+ }
+ return 0;
+
+do_reset:
+#if 1
+ atomic_inc(&cp->reset_task_pending);
+ atomic_inc(&cp->reset_task_pending_all);
+ netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status);
+ schedule_work(&cp->reset_task);
+#else
+ atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
+ netdev_err(dev, "reset called in cas_abnormal_irq\n");
+ schedule_work(&cp->reset_task);
+#endif
+ return 1;
+}
+
+/* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when
+ * determining whether to do a netif_stop/wakeup
+ */
+#define CAS_TABORT(x) (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1)
+#define CAS_ROUND_PAGE(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK)
+static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr,
+ const int len)
+{
+ unsigned long off = addr + len;
+
+ if (CAS_TABORT(cp) == 1)
+ return 0;
+ if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN)
+ return 0;
+ return TX_TARGET_ABORT_LEN;
+}
+
+static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
+{
+ struct cas_tx_desc *txds;
+ struct sk_buff **skbs;
+ struct net_device *dev = cp->dev;
+ int entry, count;
+
+ spin_lock(&cp->tx_lock[ring]);
+ txds = cp->init_txds[ring];
+ skbs = cp->tx_skbs[ring];
+ entry = cp->tx_old[ring];
+
+ count = TX_BUFF_COUNT(ring, entry, limit);
+ while (entry != limit) {
+ struct sk_buff *skb = skbs[entry];
+ dma_addr_t daddr;
+ u32 dlen;
+ int frag;
+
+ if (!skb) {
+ /* this should never occur */
+ entry = TX_DESC_NEXT(ring, entry);
+ continue;
+ }
+
+ /* however, we might get only a partial skb release. */
+ count -= skb_shinfo(skb)->nr_frags +
+ + cp->tx_tiny_use[ring][entry].nbufs + 1;
+ if (count < 0)
+ break;
+
+ netif_printk(cp, tx_done, KERN_DEBUG, cp->dev,
+ "tx[%d] done, slot %d\n", ring, entry);
+
+ skbs[entry] = NULL;
+ cp->tx_tiny_use[ring][entry].nbufs = 0;
+
+ for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
+ struct cas_tx_desc *txd = txds + entry;
+
+ daddr = le64_to_cpu(txd->buffer);
+ dlen = CAS_VAL(TX_DESC_BUFLEN,
+ le64_to_cpu(txd->control));
+ pci_unmap_page(cp->pdev, daddr, dlen,
+ PCI_DMA_TODEVICE);
+ entry = TX_DESC_NEXT(ring, entry);
+
+ /* tiny buffer may follow */
+ if (cp->tx_tiny_use[ring][entry].used) {
+ cp->tx_tiny_use[ring][entry].used = 0;
+ entry = TX_DESC_NEXT(ring, entry);
+ }
+ }
+
+ spin_lock(&cp->stat_lock[ring]);
+ cp->net_stats[ring].tx_packets++;
+ cp->net_stats[ring].tx_bytes += skb->len;
+ spin_unlock(&cp->stat_lock[ring]);
+ dev_kfree_skb_irq(skb);
+ }
+ cp->tx_old[ring] = entry;
+
+ /* this is wrong for multiple tx rings. the net device needs
+ * multiple queues for this to do the right thing. we wait
+ * for 2*packets to be available when using tiny buffers
+ */
+ if (netif_queue_stopped(dev) &&
+ (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
+ netif_wake_queue(dev);
+ spin_unlock(&cp->tx_lock[ring]);
+}
+
+static void cas_tx(struct net_device *dev, struct cas *cp,
+ u32 status)
+{
+ int limit, ring;
+#ifdef USE_TX_COMPWB
+ u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
+#endif
+ netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+ "tx interrupt, status: 0x%x, %llx\n",
+ status, (unsigned long long)compwb);
+ /* process all the rings */
+ for (ring = 0; ring < N_TX_RINGS; ring++) {
+#ifdef USE_TX_COMPWB
+ /* use the completion writeback registers */
+ limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) |
+ CAS_VAL(TX_COMPWB_LSB, compwb);
+ compwb = TX_COMPWB_NEXT(compwb);
+#else
+ limit = readl(cp->regs + REG_TX_COMPN(ring));
+#endif
+ if (cp->tx_old[ring] != limit)
+ cas_tx_ringN(cp, ring, limit);
+ }
+}
+
+
+static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
+ int entry, const u64 *words,
+ struct sk_buff **skbref)
+{
+ int dlen, hlen, len, i, alloclen;
+ int off, swivel = RX_SWIVEL_OFF_VAL;
+ struct cas_page *page;
+ struct sk_buff *skb;
+ void *addr, *crcaddr;
+ __sum16 csum;
+ char *p;
+
+ hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
+ dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
+ len = hlen + dlen;
+
+ if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
+ alloclen = len;
+ else
+ alloclen = max(hlen, RX_COPY_MIN);
+
+ skb = dev_alloc_skb(alloclen + swivel + cp->crc_size);
+ if (skb == NULL)
+ return -1;
+
+ *skbref = skb;
+ skb_reserve(skb, swivel);
+
+ p = skb->data;
+ addr = crcaddr = NULL;
+ if (hlen) { /* always copy header pages */
+ i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
+ page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
+ off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
+ swivel;
+
+ i = hlen;
+ if (!dlen) /* attach FCS */
+ i += cp->crc_size;
+ pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
+ PCI_DMA_FROMDEVICE);
+ addr = cas_page_map(page->buffer);
+ memcpy(p, addr + off, i);
+ pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
+ PCI_DMA_FROMDEVICE);
+ cas_page_unmap(addr);
+ RX_USED_ADD(page, 0x100);
+ p += hlen;
+ swivel = 0;
+ }
+
+
+ if (alloclen < (hlen + dlen)) {
+ skb_frag_t *frag = skb_shinfo(skb)->frags;
+
+ /* normal or jumbo packets. we use frags */
+ i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
+ page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
+ off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
+
+ hlen = min(cp->page_size - off, dlen);
+ if (hlen < 0) {
+ netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
+ "rx page overflow: %d\n", hlen);
+ dev_kfree_skb_irq(skb);
+ return -1;
+ }
+ i = hlen;
+ if (i == dlen) /* attach FCS */
+ i += cp->crc_size;
+ pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
+ PCI_DMA_FROMDEVICE);
+
+ /* make sure we always copy a header */
+ swivel = 0;
+ if (p == (char *) skb->data) { /* not split */
+ addr = cas_page_map(page->buffer);
+ memcpy(p, addr + off, RX_COPY_MIN);
+ pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
+ PCI_DMA_FROMDEVICE);
+ cas_page_unmap(addr);
+ off += RX_COPY_MIN;
+ swivel = RX_COPY_MIN;
+ RX_USED_ADD(page, cp->mtu_stride);
+ } else {
+ RX_USED_ADD(page, hlen);
+ }
+ skb_put(skb, alloclen);
+
+ skb_shinfo(skb)->nr_frags++;
+ skb->data_len += hlen - swivel;
+ skb->truesize += hlen - swivel;
+ skb->len += hlen - swivel;
+
+ __skb_frag_set_page(frag, page->buffer);
+ __skb_frag_ref(frag);
+ frag->page_offset = off;
+ frag->size = hlen - swivel;
+
+ /* any more data? */
+ if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
+ hlen = dlen;
+ off = 0;
+
+ i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
+ page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
+ pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
+ hlen + cp->crc_size,
+ PCI_DMA_FROMDEVICE);
+ pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
+ hlen + cp->crc_size,
+ PCI_DMA_FROMDEVICE);
+
+ skb_shinfo(skb)->nr_frags++;
+ skb->data_len += hlen;
+ skb->len += hlen;
+ frag++;
+
+ __skb_frag_set_page(frag, page->buffer);
+ __skb_frag_ref(frag);
+ frag->page_offset = 0;
+ frag->size = hlen;
+ RX_USED_ADD(page, hlen + cp->crc_size);
+ }
+
+ if (cp->crc_size) {
+ addr = cas_page_map(page->buffer);
+ crcaddr = addr + off + hlen;
+ }
+
+ } else {
+ /* copying packet */
+ if (!dlen)
+ goto end_copy_pkt;
+
+ i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
+ page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
+ off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
+ hlen = min(cp->page_size - off, dlen);
+ if (hlen < 0) {
+ netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
+ "rx page overflow: %d\n", hlen);
+ dev_kfree_skb_irq(skb);
+ return -1;
+ }
+ i = hlen;
+ if (i == dlen) /* attach FCS */
+ i += cp->crc_size;
+ pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
+ PCI_DMA_FROMDEVICE);
+ addr = cas_page_map(page->buffer);
+ memcpy(p, addr + off, i);
+ pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
+ PCI_DMA_FROMDEVICE);
+ cas_page_unmap(addr);
+ if (p == (char *) skb->data) /* not split */
+ RX_USED_ADD(page, cp->mtu_stride);
+ else
+ RX_USED_ADD(page, i);
+
+ /* any more data? */
+ if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
+ p += hlen;
+ i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
+ page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
+ pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
+ dlen + cp->crc_size,
+ PCI_DMA_FROMDEVICE);
+ addr = cas_page_map(page->buffer);
+ memcpy(p, addr, dlen + cp->crc_size);
+ pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
+ dlen + cp->crc_size,
+ PCI_DMA_FROMDEVICE);
+ cas_page_unmap(addr);
+ RX_USED_ADD(page, dlen + cp->crc_size);
+ }
+end_copy_pkt:
+ if (cp->crc_size) {
+ addr = NULL;
+ crcaddr = skb->data + alloclen;
+ }
+ skb_put(skb, alloclen);
+ }
+
+ csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3]));
+ if (cp->crc_size) {
+ /* checksum includes FCS. strip it out. */
+ csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
+ csum_unfold(csum)));
+ if (addr)
+ cas_page_unmap(addr);
+ }
+ skb->protocol = eth_type_trans(skb, cp->dev);
+ if (skb->protocol == htons(ETH_P_IP)) {
+ skb->csum = csum_unfold(~csum);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ } else
+ skb_checksum_none_assert(skb);
+ return len;
+}
+
+
+/* we can handle up to 64 rx flows at a time. we do the same thing
+ * as nonreassm except that we batch up the buffers.
+ * NOTE: we currently just treat each flow as a bunch of packets that
+ * we pass up. a better way would be to coalesce the packets
+ * into a jumbo packet. to do that, we need to do the following:
+ * 1) the first packet will have a clean split between header and
+ * data. save both.
+ * 2) each time the next flow packet comes in, extend the
+ * data length and merge the checksums.
+ * 3) on flow release, fix up the header.
+ * 4) make sure the higher layer doesn't care.
+ * because packets get coalesced, we shouldn't run into fragment count
+ * issues.
+ */
+static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
+ struct sk_buff *skb)
+{
+ int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
+ struct sk_buff_head *flow = &cp->rx_flows[flowid];
+
+ /* this is protected at a higher layer, so no need to
+ * do any additional locking here. stick the buffer
+ * at the end.
+ */
+ __skb_queue_tail(flow, skb);
+ if (words[0] & RX_COMP1_RELEASE_FLOW) {
+ while ((skb = __skb_dequeue(flow))) {
+ cas_skb_release(skb);
+ }
+ }
+}
+
+/* put rx descriptor back on ring. if a buffer is in use by a higher
+ * layer, this will need to put in a replacement.
+ */
+static void cas_post_page(struct cas *cp, const int ring, const int index)
+{
+ cas_page_t *new;
+ int entry;
+
+ entry = cp->rx_old[ring];
+
+ new = cas_page_swap(cp, ring, index);
+ cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
+ cp->init_rxds[ring][entry].index =
+ cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
+ CAS_BASE(RX_INDEX_RING, ring));
+
+ entry = RX_DESC_ENTRY(ring, entry + 1);
+ cp->rx_old[ring] = entry;
+
+ if (entry % 4)
+ return;
+
+ if (ring == 0)
+ writel(entry, cp->regs + REG_RX_KICK);
+ else if ((N_RX_DESC_RINGS > 1) &&
+ (cp->cas_flags & CAS_FLAG_REG_PLUS))
+ writel(entry, cp->regs + REG_PLUS_RX_KICK1);
+}
+
+
+/* only when things are bad */
+static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
+{
+ unsigned int entry, last, count, released;
+ int cluster;
+ cas_page_t **page = cp->rx_pages[ring];
+
+ entry = cp->rx_old[ring];
+
+ netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+ "rxd[%d] interrupt, done: %d\n", ring, entry);
+
+ cluster = -1;
+ count = entry & 0x3;
+ last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
+ released = 0;
+ while (entry != last) {
+ /* make a new buffer if it's still in use */
+ if (page_count(page[entry]->buffer) > 1) {
+ cas_page_t *new = cas_page_dequeue(cp);
+ if (!new) {
+ /* let the timer know that we need to
+ * do this again
+ */
+ cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
+ if (!timer_pending(&cp->link_timer))
+ mod_timer(&cp->link_timer, jiffies +
+ CAS_LINK_FAST_TIMEOUT);
+ cp->rx_old[ring] = entry;
+ cp->rx_last[ring] = num ? num - released : 0;
+ return -ENOMEM;
+ }
+ spin_lock(&cp->rx_inuse_lock);
+ list_add(&page[entry]->list, &cp->rx_inuse_list);
+ spin_unlock(&cp->rx_inuse_lock);
+ cp->init_rxds[ring][entry].buffer =
+ cpu_to_le64(new->dma_addr);
+ page[entry] = new;
+
+ }
+
+ if (++count == 4) {
+ cluster = entry;
+ count = 0;
+ }
+ released++;
+ entry = RX_DESC_ENTRY(ring, entry + 1);
+ }
+ cp->rx_old[ring] = entry;
+
+ if (cluster < 0)
+ return 0;
+
+ if (ring == 0)
+ writel(cluster, cp->regs + REG_RX_KICK);
+ else if ((N_RX_DESC_RINGS > 1) &&
+ (cp->cas_flags & CAS_FLAG_REG_PLUS))
+ writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
+ return 0;
+}
+
+
+/* process a completion ring. packets are set up in three basic ways:
+ * small packets: should be copied header + data in single buffer.
+ * large packets: header and data in a single buffer.
+ * split packets: header in a separate buffer from data.
+ * data may be in multiple pages. data may be > 256
+ * bytes but in a single page.
+ *
+ * NOTE: RX page posting is done in this routine as well. while there's
+ * the capability of using multiple RX completion rings, it isn't
+ * really worthwhile due to the fact that the page posting will
+ * force serialization on the single descriptor ring.
+ */
+static int cas_rx_ringN(struct cas *cp, int ring, int budget)
+{
+ struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
+ int entry, drops;
+ int npackets = 0;
+
+ netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+ "rx[%d] interrupt, done: %d/%d\n",
+ ring,
+ readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]);
+
+ entry = cp->rx_new[ring];
+ drops = 0;
+ while (1) {
+ struct cas_rx_comp *rxc = rxcs + entry;
+ struct sk_buff *uninitialized_var(skb);
+ int type, len;
+ u64 words[4];
+ int i, dring;
+
+ words[0] = le64_to_cpu(rxc->word1);
+ words[1] = le64_to_cpu(rxc->word2);
+ words[2] = le64_to_cpu(rxc->word3);
+ words[3] = le64_to_cpu(rxc->word4);
+
+ /* don't touch if still owned by hw */
+ type = CAS_VAL(RX_COMP1_TYPE, words[0]);
+ if (type == 0)
+ break;
+
+ /* hw hasn't cleared the zero bit yet */
+ if (words[3] & RX_COMP4_ZERO) {
+ break;
+ }
+
+ /* get info on the packet */
+ if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) {
+ spin_lock(&cp->stat_lock[ring]);
+ cp->net_stats[ring].rx_errors++;
+ if (words[3] & RX_COMP4_LEN_MISMATCH)
+ cp->net_stats[ring].rx_length_errors++;
+ if (words[3] & RX_COMP4_BAD)
+ cp->net_stats[ring].rx_crc_errors++;
+ spin_unlock(&cp->stat_lock[ring]);
+
+ /* We'll just return it to Cassini. */
+ drop_it:
+ spin_lock(&cp->stat_lock[ring]);
+ ++cp->net_stats[ring].rx_dropped;
+ spin_unlock(&cp->stat_lock[ring]);
+ goto next;
+ }
+
+ len = cas_rx_process_pkt(cp, rxc, entry, words, &skb);
+ if (len < 0) {
+ ++drops;
+ goto drop_it;
+ }
+
+ /* see if it's a flow re-assembly or not. the driver
+ * itself handles release back up.
+ */
+ if (RX_DONT_BATCH || (type == 0x2)) {
+ /* non-reassm: these always get released */
+ cas_skb_release(skb);
+ } else {
+ cas_rx_flow_pkt(cp, words, skb);
+ }
+
+ spin_lock(&cp->stat_lock[ring]);
+ cp->net_stats[ring].rx_packets++;
+ cp->net_stats[ring].rx_bytes += len;
+ spin_unlock(&cp->stat_lock[ring]);
+
+ next:
+ npackets++;
+
+ /* should it be released? */
+ if (words[0] & RX_COMP1_RELEASE_HDR) {
+ i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
+ dring = CAS_VAL(RX_INDEX_RING, i);
+ i = CAS_VAL(RX_INDEX_NUM, i);
+ cas_post_page(cp, dring, i);
+ }
+
+ if (words[0] & RX_COMP1_RELEASE_DATA) {
+ i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
+ dring = CAS_VAL(RX_INDEX_RING, i);
+ i = CAS_VAL(RX_INDEX_NUM, i);
+ cas_post_page(cp, dring, i);
+ }
+
+ if (words[0] & RX_COMP1_RELEASE_NEXT) {
+ i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
+ dring = CAS_VAL(RX_INDEX_RING, i);
+ i = CAS_VAL(RX_INDEX_NUM, i);
+ cas_post_page(cp, dring, i);
+ }
+
+ /* skip to the next entry */
+ entry = RX_COMP_ENTRY(ring, entry + 1 +
+ CAS_VAL(RX_COMP1_SKIP, words[0]));
+#ifdef USE_NAPI
+ if (budget && (npackets >= budget))
+ break;
+#endif
+ }
+ cp->rx_new[ring] = entry;
+
+ if (drops)
+ netdev_info(cp->dev, "Memory squeeze, deferring packet\n");
+ return npackets;
+}
+
+
+/* put completion entries back on the ring */
+static void cas_post_rxcs_ringN(struct net_device *dev,
+ struct cas *cp, int ring)
+{
+ struct cas_rx_comp *rxc = cp->init_rxcs[ring];
+ int last, entry;
+
+ last = cp->rx_cur[ring];
+ entry = cp->rx_new[ring];
+ netif_printk(cp, intr, KERN_DEBUG, dev,
+ "rxc[%d] interrupt, done: %d/%d\n",
+ ring, readl(cp->regs + REG_RX_COMP_HEAD), entry);
+
+ /* zero and re-mark descriptors */
+ while (last != entry) {
+ cas_rxc_init(rxc + last);
+ last = RX_COMP_ENTRY(ring, last + 1);
+ }
+ cp->rx_cur[ring] = last;
+
+ if (ring == 0)
+ writel(last, cp->regs + REG_RX_COMP_TAIL);
+ else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
+ writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
+}
+
+
+
+/* cassini can use all four PCI interrupts for the completion ring.
+ * rings 3 and 4 are identical
+ */
+#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
+static inline void cas_handle_irqN(struct net_device *dev,
+ struct cas *cp, const u32 status,
+ const int ring)
+{
+ if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
+ cas_post_rxcs_ringN(dev, cp, ring);
+}
+
+static irqreturn_t cas_interruptN(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct cas *cp = netdev_priv(dev);
+ unsigned long flags;
- ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
++ int ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
+ u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
+
+ /* check for shared irq */
+ if (status == 0)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
+#ifdef USE_NAPI
+ cas_mask_intr(cp);
+ napi_schedule(&cp->napi);
+#else
+ cas_rx_ringN(cp, ring, 0);
+#endif
+ status &= ~INTR_RX_DONE_ALT;
+ }
+
+ if (status)
+ cas_handle_irqN(dev, cp, status, ring);
+ spin_unlock_irqrestore(&cp->lock, flags);
+ return IRQ_HANDLED;
+}
+#endif
+
+#ifdef USE_PCI_INTB
+/* everything but rx packets */
+static inline void cas_handle_irq1(struct cas *cp, const u32 status)
+{
+ if (status & INTR_RX_BUF_UNAVAIL_1) {
+ /* Frame arrived, no free RX buffers available.
+ * NOTE: we can get this on a link transition. */
+ cas_post_rxds_ringN(cp, 1, 0);
+ spin_lock(&cp->stat_lock[1]);
+ cp->net_stats[1].rx_dropped++;
+ spin_unlock(&cp->stat_lock[1]);
+ }
+
+ if (status & INTR_RX_BUF_AE_1)
+ cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
+ RX_AE_FREEN_VAL(1));
+
+ if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
+ cas_post_rxcs_ringN(cp, 1);
+}
+
+/* ring 2 handles a few more events than 3 and 4 */
+static irqreturn_t cas_interrupt1(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct cas *cp = netdev_priv(dev);
+ unsigned long flags;
+ u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
+
+ /* check for shared interrupt */
+ if (status == 0)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
+#ifdef USE_NAPI
+ cas_mask_intr(cp);
+ napi_schedule(&cp->napi);
+#else
+ cas_rx_ringN(cp, 1, 0);
+#endif
+ status &= ~INTR_RX_DONE_ALT;
+ }
+ if (status)
+ cas_handle_irq1(cp, status);
+ spin_unlock_irqrestore(&cp->lock, flags);
+ return IRQ_HANDLED;
+}
+#endif
+
+static inline void cas_handle_irq(struct net_device *dev,
+ struct cas *cp, const u32 status)
+{
+ /* housekeeping interrupts */
+ if (status & INTR_ERROR_MASK)
+ cas_abnormal_irq(dev, cp, status);
+
+ if (status & INTR_RX_BUF_UNAVAIL) {
+ /* Frame arrived, no free RX buffers available.
+ * NOTE: we can get this on a link transition.
+ */
+ cas_post_rxds_ringN(cp, 0, 0);
+ spin_lock(&cp->stat_lock[0]);
+ cp->net_stats[0].rx_dropped++;
+ spin_unlock(&cp->stat_lock[0]);
+ } else if (status & INTR_RX_BUF_AE) {
+ cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) -
+ RX_AE_FREEN_VAL(0));
+ }
+
+ if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
+ cas_post_rxcs_ringN(dev, cp, 0);
+}
+
+static irqreturn_t cas_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct cas *cp = netdev_priv(dev);
+ unsigned long flags;
+ u32 status = readl(cp->regs + REG_INTR_STATUS);
+
+ if (status == 0)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ if (status & (INTR_TX_ALL | INTR_TX_INTME)) {
+ cas_tx(dev, cp, status);
+ status &= ~(INTR_TX_ALL | INTR_TX_INTME);
+ }
+
+ if (status & INTR_RX_DONE) {
+#ifdef USE_NAPI
+ cas_mask_intr(cp);
+ napi_schedule(&cp->napi);
+#else
+ cas_rx_ringN(cp, 0, 0);
+#endif
+ status &= ~INTR_RX_DONE;
+ }
+
+ if (status)
+ cas_handle_irq(dev, cp, status);
+ spin_unlock_irqrestore(&cp->lock, flags);
+ return IRQ_HANDLED;
+}
+
+
+#ifdef USE_NAPI
+static int cas_poll(struct napi_struct *napi, int budget)
+{
+ struct cas *cp = container_of(napi, struct cas, napi);
+ struct net_device *dev = cp->dev;
+ int i, enable_intr, credits;
+ u32 status = readl(cp->regs + REG_INTR_STATUS);
+ unsigned long flags;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ cas_tx(dev, cp, status);
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ /* NAPI rx packets. we spread the credits across all of the
+ * rxc rings
+ *
+ * to make sure we're fair with the work we loop through each
+ * ring N_RX_COMP_RING times with a request of
+ * budget / N_RX_COMP_RINGS
+ */
+ enable_intr = 1;
+ credits = 0;
+ for (i = 0; i < N_RX_COMP_RINGS; i++) {
+ int j;
+ for (j = 0; j < N_RX_COMP_RINGS; j++) {
+ credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS);
+ if (credits >= budget) {
+ enable_intr = 0;
+ goto rx_comp;
+ }
+ }
+ }
+
+rx_comp:
+ /* final rx completion */
+ spin_lock_irqsave(&cp->lock, flags);
+ if (status)
+ cas_handle_irq(dev, cp, status);
+
+#ifdef USE_PCI_INTB
+ if (N_RX_COMP_RINGS > 1) {
+ status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
+ if (status)
+ cas_handle_irq1(dev, cp, status);
+ }
+#endif
+
+#ifdef USE_PCI_INTC
+ if (N_RX_COMP_RINGS > 2) {
+ status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2));
+ if (status)
+ cas_handle_irqN(dev, cp, status, 2);
+ }
+#endif
+
+#ifdef USE_PCI_INTD
+ if (N_RX_COMP_RINGS > 3) {
+ status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3));
+ if (status)
+ cas_handle_irqN(dev, cp, status, 3);
+ }
+#endif
+ spin_unlock_irqrestore(&cp->lock, flags);
+ if (enable_intr) {
+ napi_complete(napi);
+ cas_unmask_intr(cp);
+ }
+ return credits;
+}
+#endif
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void cas_netpoll(struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+
+ cas_disable_irq(cp, 0);
+ cas_interrupt(cp->pdev->irq, dev);
+ cas_enable_irq(cp, 0);
+
+#ifdef USE_PCI_INTB
+ if (N_RX_COMP_RINGS > 1) {
+ /* cas_interrupt1(); */
+ }
+#endif
+#ifdef USE_PCI_INTC
+ if (N_RX_COMP_RINGS > 2) {
+ /* cas_interruptN(); */
+ }
+#endif
+#ifdef USE_PCI_INTD
+ if (N_RX_COMP_RINGS > 3) {
+ /* cas_interruptN(); */
+ }
+#endif
+}
+#endif
+
+static void cas_tx_timeout(struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+
+ netdev_err(dev, "transmit timed out, resetting\n");
+ if (!cp->hw_running) {
+ netdev_err(dev, "hrm.. hw not running!\n");
+ return;
+ }
+
+ netdev_err(dev, "MIF_STATE[%08x]\n",
+ readl(cp->regs + REG_MIF_STATE_MACHINE));
+
+ netdev_err(dev, "MAC_STATE[%08x]\n",
+ readl(cp->regs + REG_MAC_STATE_MACHINE));
+
+ netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
+ readl(cp->regs + REG_TX_CFG),
+ readl(cp->regs + REG_MAC_TX_STATUS),
+ readl(cp->regs + REG_MAC_TX_CFG),
+ readl(cp->regs + REG_TX_FIFO_PKT_CNT),
+ readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
+ readl(cp->regs + REG_TX_FIFO_READ_PTR),
+ readl(cp->regs + REG_TX_SM_1),
+ readl(cp->regs + REG_TX_SM_2));
+
+ netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
+ readl(cp->regs + REG_RX_CFG),
+ readl(cp->regs + REG_MAC_RX_STATUS),
+ readl(cp->regs + REG_MAC_RX_CFG));
+
+ netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n",
+ readl(cp->regs + REG_HP_STATE_MACHINE),
+ readl(cp->regs + REG_HP_STATUS0),
+ readl(cp->regs + REG_HP_STATUS1),
+ readl(cp->regs + REG_HP_STATUS2));
+
+#if 1
+ atomic_inc(&cp->reset_task_pending);
+ atomic_inc(&cp->reset_task_pending_all);
+ schedule_work(&cp->reset_task);
+#else
+ atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
+ schedule_work(&cp->reset_task);
+#endif
+}
+
+static inline int cas_intme(int ring, int entry)
+{
+ /* Algorithm: IRQ every 1/2 of descriptors. */
+ if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1)))
+ return 1;
+ return 0;
+}
+
+
+static void cas_write_txd(struct cas *cp, int ring, int entry,
+ dma_addr_t mapping, int len, u64 ctrl, int last)
+{
+ struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
+
+ ctrl |= CAS_BASE(TX_DESC_BUFLEN, len);
+ if (cas_intme(ring, entry))
+ ctrl |= TX_DESC_INTME;
+ if (last)
+ ctrl |= TX_DESC_EOF;
+ txd->control = cpu_to_le64(ctrl);
+ txd->buffer = cpu_to_le64(mapping);
+}
+
+static inline void *tx_tiny_buf(struct cas *cp, const int ring,
+ const int entry)
+{
+ return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
+}
+
+static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
+ const int entry, const int tentry)
+{
+ cp->tx_tiny_use[ring][tentry].nbufs++;
+ cp->tx_tiny_use[ring][entry].used = 1;
+ return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
+}
+
+static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
+ struct sk_buff *skb)
+{
+ struct net_device *dev = cp->dev;
+ int entry, nr_frags, frag, tabort, tentry;
+ dma_addr_t mapping;
+ unsigned long flags;
+ u64 ctrl;
+ u32 len;
+
+ spin_lock_irqsave(&cp->tx_lock[ring], flags);
+
+ /* This is a hard error, log it. */
+ if (TX_BUFFS_AVAIL(cp, ring) <=
+ CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
+ return 1;
+ }
+
+ ctrl = 0;
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ const u64 csum_start_off = skb_checksum_start_offset(skb);
+ const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
+
+ ctrl = TX_DESC_CSUM_EN |
+ CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
+ CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
+ }
+
+ entry = cp->tx_new[ring];
+ cp->tx_skbs[ring][entry] = skb;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ len = skb_headlen(skb);
+ mapping = pci_map_page(cp->pdev, virt_to_page(skb->data),
+ offset_in_page(skb->data), len,
+ PCI_DMA_TODEVICE);
+
+ tentry = entry;
+ tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
+ if (unlikely(tabort)) {
+ /* NOTE: len is always > tabort */
+ cas_write_txd(cp, ring, entry, mapping, len - tabort,
+ ctrl | TX_DESC_SOF, 0);
+ entry = TX_DESC_NEXT(ring, entry);
+
+ skb_copy_from_linear_data_offset(skb, len - tabort,
+ tx_tiny_buf(cp, ring, entry), tabort);
+ mapping = tx_tiny_map(cp, ring, entry, tentry);
+ cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
+ (nr_frags == 0));
+ } else {
+ cas_write_txd(cp, ring, entry, mapping, len, ctrl |
+ TX_DESC_SOF, (nr_frags == 0));
+ }
+ entry = TX_DESC_NEXT(ring, entry);
+
+ for (frag = 0; frag < nr_frags; frag++) {
+ skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
+
+ len = fragp->size;
+ mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len,
+ PCI_DMA_TODEVICE);
+
+ tabort = cas_calc_tabort(cp, fragp->page_offset, len);
+ if (unlikely(tabort)) {
+ void *addr;
+
+ /* NOTE: len is always > tabort */
+ cas_write_txd(cp, ring, entry, mapping, len - tabort,
+ ctrl, 0);
+ entry = TX_DESC_NEXT(ring, entry);
+
+ addr = cas_page_map(skb_frag_page(fragp));
+ memcpy(tx_tiny_buf(cp, ring, entry),
+ addr + fragp->page_offset + len - tabort,
+ tabort);
+ cas_page_unmap(addr);
+ mapping = tx_tiny_map(cp, ring, entry, tentry);
+ len = tabort;
+ }
+
+ cas_write_txd(cp, ring, entry, mapping, len, ctrl,
+ (frag + 1 == nr_frags));
+ entry = TX_DESC_NEXT(ring, entry);
+ }
+
+ cp->tx_new[ring] = entry;
+ if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
+ netif_stop_queue(dev);
+
+ netif_printk(cp, tx_queued, KERN_DEBUG, dev,
+ "tx[%d] queued, slot %d, skblen %d, avail %d\n",
+ ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
+ writel(entry, cp->regs + REG_TX_KICKN(ring));
+ spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
+ return 0;
+}
+
+static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+
+ /* this is only used as a load-balancing hint, so it doesn't
+ * need to be SMP safe
+ */
+ static int ring;
+
+ if (skb_padto(skb, cp->min_frame_size))
+ return NETDEV_TX_OK;
+
+ /* XXX: we need some higher-level QoS hooks to steer packets to
+ * individual queues.
+ */
+ if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
+ return NETDEV_TX_BUSY;
+ return NETDEV_TX_OK;
+}
+
+static void cas_init_tx_dma(struct cas *cp)
+{
+ u64 desc_dma = cp->block_dvma;
+ unsigned long off;
+ u32 val;
+ int i;
+
+ /* set up tx completion writeback registers. must be 8-byte aligned */
+#ifdef USE_TX_COMPWB
+ off = offsetof(struct cas_init_block, tx_compwb);
+ writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI);
+ writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW);
+#endif
+
+ /* enable completion writebacks, enable paced mode,
+ * disable read pipe, and disable pre-interrupt compwbs
+ */
+ val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
+ TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
+ TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
+ TX_CFG_INTR_COMPWB_DIS;
+
+ /* write out tx ring info and tx desc bases */
+ for (i = 0; i < MAX_TX_RINGS; i++) {
+ off = (unsigned long) cp->init_txds[i] -
+ (unsigned long) cp->init_block;
+
+ val |= CAS_TX_RINGN_BASE(i);
+ writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i));
+ writel((desc_dma + off) & 0xffffffff, cp->regs +
+ REG_TX_DBN_LOW(i));
+ /* don't zero out the kick register here as the system
+ * will wedge
+ */
+ }
+ writel(val, cp->regs + REG_TX_CFG);
+
+ /* program max burst sizes. these numbers should be different
+ * if doing QoS.
+ */
+#ifdef USE_QOS
+ writel(0x800, cp->regs + REG_TX_MAXBURST_0);
+ writel(0x1600, cp->regs + REG_TX_MAXBURST_1);
+ writel(0x2400, cp->regs + REG_TX_MAXBURST_2);
+ writel(0x4800, cp->regs + REG_TX_MAXBURST_3);
+#else
+ writel(0x800, cp->regs + REG_TX_MAXBURST_0);
+ writel(0x800, cp->regs + REG_TX_MAXBURST_1);
+ writel(0x800, cp->regs + REG_TX_MAXBURST_2);
+ writel(0x800, cp->regs + REG_TX_MAXBURST_3);
+#endif
+}
+
+/* Must be invoked under cp->lock. */
+static inline void cas_init_dma(struct cas *cp)
+{
+ cas_init_tx_dma(cp);
+ cas_init_rx_dma(cp);
+}
+
+static void cas_process_mc_list(struct cas *cp)
+{
+ u16 hash_table[16];
+ u32 crc;
+ struct netdev_hw_addr *ha;
+ int i = 1;
+
+ memset(hash_table, 0, sizeof(hash_table));
+ netdev_for_each_mc_addr(ha, cp->dev) {
+ if (i <= CAS_MC_EXACT_MATCH_SIZE) {
+ /* use the alternate mac address registers for the
+ * first 15 multicast addresses
+ */
+ writel((ha->addr[4] << 8) | ha->addr[5],
+ cp->regs + REG_MAC_ADDRN(i*3 + 0));
+ writel((ha->addr[2] << 8) | ha->addr[3],
+ cp->regs + REG_MAC_ADDRN(i*3 + 1));
+ writel((ha->addr[0] << 8) | ha->addr[1],
+ cp->regs + REG_MAC_ADDRN(i*3 + 2));
+ i++;
+ }
+ else {
+ /* use hw hash table for the next series of
+ * multicast addresses
+ */
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
+ crc >>= 24;
+ hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
+ }
+ }
+ for (i = 0; i < 16; i++)
+ writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i));
+}
+
+/* Must be invoked under cp->lock. */
+static u32 cas_setup_multicast(struct cas *cp)
+{
+ u32 rxcfg = 0;
+ int i;
+
+ if (cp->dev->flags & IFF_PROMISC) {
+ rxcfg |= MAC_RX_CFG_PROMISC_EN;
+
+ } else if (cp->dev->flags & IFF_ALLMULTI) {
+ for (i=0; i < 16; i++)
+ writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i));
+ rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
+
+ } else {
+ cas_process_mc_list(cp);
+ rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
+ }
+
+ return rxcfg;
+}
+
+/* must be invoked under cp->stat_lock[N_TX_RINGS] */
+static void cas_clear_mac_err(struct cas *cp)
+{
+ writel(0, cp->regs + REG_MAC_COLL_NORMAL);
+ writel(0, cp->regs + REG_MAC_COLL_FIRST);
+ writel(0, cp->regs + REG_MAC_COLL_EXCESS);
+ writel(0, cp->regs + REG_MAC_COLL_LATE);
+ writel(0, cp->regs + REG_MAC_TIMER_DEFER);
+ writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK);
+ writel(0, cp->regs + REG_MAC_RECV_FRAME);
+ writel(0, cp->regs + REG_MAC_LEN_ERR);
+ writel(0, cp->regs + REG_MAC_ALIGN_ERR);
+ writel(0, cp->regs + REG_MAC_FCS_ERR);
+ writel(0, cp->regs + REG_MAC_RX_CODE_ERR);
+}
+
+
+static void cas_mac_reset(struct cas *cp)
+{
+ int i;
+
+ /* do both TX and RX reset */
+ writel(0x1, cp->regs + REG_MAC_TX_RESET);
+ writel(0x1, cp->regs + REG_MAC_RX_RESET);
+
+ /* wait for TX */
+ i = STOP_TRIES;
+ while (i-- > 0) {
+ if (readl(cp->regs + REG_MAC_TX_RESET) == 0)
+ break;
+ udelay(10);
+ }
+
+ /* wait for RX */
+ i = STOP_TRIES;
+ while (i-- > 0) {
+ if (readl(cp->regs + REG_MAC_RX_RESET) == 0)
+ break;
+ udelay(10);
+ }
+
+ if (readl(cp->regs + REG_MAC_TX_RESET) |
+ readl(cp->regs + REG_MAC_RX_RESET))
+ netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n",
+ readl(cp->regs + REG_MAC_TX_RESET),
+ readl(cp->regs + REG_MAC_RX_RESET),
+ readl(cp->regs + REG_MAC_STATE_MACHINE));
+}
+
+
+/* Must be invoked under cp->lock. */
+static void cas_init_mac(struct cas *cp)
+{
+ unsigned char *e = &cp->dev->dev_addr[0];
+ int i;
+ cas_mac_reset(cp);
+
+ /* setup core arbitration weight register */
+ writel(CAWR_RR_DIS, cp->regs + REG_CAWR);
+
+ /* XXX Use pci_dma_burst_advice() */
+#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
+ /* set the infinite burst register for chips that don't have
+ * pci issues.
+ */
+ if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0)
+ writel(INF_BURST_EN, cp->regs + REG_INF_BURST);
+#endif
+
+ writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE);
+
+ writel(0x00, cp->regs + REG_MAC_IPG0);
+ writel(0x08, cp->regs + REG_MAC_IPG1);
+ writel(0x04, cp->regs + REG_MAC_IPG2);
+
+ /* change later for 802.3z */
+ writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
+
+ /* min frame + FCS */
+ writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
+
+ /* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we
+ * specify the maximum frame size to prevent RX tag errors on
+ * oversized frames.
+ */
+ writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
+ CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
+ (CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
+ cp->regs + REG_MAC_FRAMESIZE_MAX);
+
+ /* NOTE: crc_size is used as a surrogate for half-duplex.
+ * workaround saturn half-duplex issue by increasing preamble
+ * size to 65 bytes.
+ */
+ if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size)
+ writel(0x41, cp->regs + REG_MAC_PA_SIZE);
+ else
+ writel(0x07, cp->regs + REG_MAC_PA_SIZE);
+ writel(0x04, cp->regs + REG_MAC_JAM_SIZE);
+ writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT);
+ writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE);
+
+ writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED);
+
+ writel(0, cp->regs + REG_MAC_ADDR_FILTER0);
+ writel(0, cp->regs + REG_MAC_ADDR_FILTER1);
+ writel(0, cp->regs + REG_MAC_ADDR_FILTER2);
+ writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK);
+ writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK);
+
+ /* setup mac address in perfect filter array */
+ for (i = 0; i < 45; i++)
+ writel(0x0, cp->regs + REG_MAC_ADDRN(i));
+
+ writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0));
+ writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1));
+ writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2));
+
+ writel(0x0001, cp->regs + REG_MAC_ADDRN(42));
+ writel(0xc200, cp->regs + REG_MAC_ADDRN(43));
+ writel(0x0180, cp->regs + REG_MAC_ADDRN(44));
+
+ cp->mac_rx_cfg = cas_setup_multicast(cp);
+
+ spin_lock(&cp->stat_lock[N_TX_RINGS]);
+ cas_clear_mac_err(cp);
+ spin_unlock(&cp->stat_lock[N_TX_RINGS]);
+
+ /* Setup MAC interrupts. We want to get all of the interesting
+ * counter expiration events, but we do not want to hear about
+ * normal rx/tx as the DMA engine tells us that.
+ */
+ writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK);
+ writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
+
+ /* Don't enable even the PAUSE interrupts for now, we
+ * make no use of those events other than to record them.
+ */
+ writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK);
+}
+
+/* Must be invoked under cp->lock. */
+static void cas_init_pause_thresholds(struct cas *cp)
+{
+ /* Calculate pause thresholds. Setting the OFF threshold to the
+ * full RX fifo size effectively disables PAUSE generation
+ */
+ if (cp->rx_fifo_size <= (2 * 1024)) {
+ cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size;
+ } else {
+ int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63;
+ if (max_frame * 3 > cp->rx_fifo_size) {
+ cp->rx_pause_off = 7104;
+ cp->rx_pause_on = 960;
+ } else {
+ int off = (cp->rx_fifo_size - (max_frame * 2));
+ int on = off - max_frame;
+ cp->rx_pause_off = off;
+ cp->rx_pause_on = on;
+ }
+ }
+}
+
+static int cas_vpd_match(const void __iomem *p, const char *str)
+{
+ int len = strlen(str) + 1;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ if (readb(p + i) != str[i])
+ return 0;
+ }
+ return 1;
+}
+
+
+/* get the mac address by reading the vpd information in the rom.
+ * also get the phy type and determine if there's an entropy generator.
+ * NOTE: this is a bit convoluted for the following reasons:
+ * 1) vpd info has order-dependent mac addresses for multinic cards
+ * 2) the only way to determine the nic order is to use the slot
+ * number.
+ * 3) fiber cards don't have bridges, so their slot numbers don't
+ * mean anything.
+ * 4) we don't actually know we have a fiber card until after
+ * the mac addresses are parsed.
+ */
+static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
+ const int offset)
+{
+ void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START;
+ void __iomem *base, *kstart;
+ int i, len;
+ int found = 0;
+#define VPD_FOUND_MAC 0x01
+#define VPD_FOUND_PHY 0x02
+
+ int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */
+ int mac_off = 0;
+
+#if defined(CONFIG_SPARC)
+ const unsigned char *addr;
+#endif
+
+ /* give us access to the PROM */
+ writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD,
+ cp->regs + REG_BIM_LOCAL_DEV_EN);
+
+ /* check for an expansion rom */
+ if (readb(p) != 0x55 || readb(p + 1) != 0xaa)
+ goto use_random_mac_addr;
+
+ /* search for beginning of vpd */
+ base = NULL;
+ for (i = 2; i < EXPANSION_ROM_SIZE; i++) {
+ /* check for PCIR */
+ if ((readb(p + i + 0) == 0x50) &&
+ (readb(p + i + 1) == 0x43) &&
+ (readb(p + i + 2) == 0x49) &&
+ (readb(p + i + 3) == 0x52)) {
+ base = p + (readb(p + i + 8) |
+ (readb(p + i + 9) << 8));
+ break;
+ }
+ }
+
+ if (!base || (readb(base) != 0x82))
+ goto use_random_mac_addr;
+
+ i = (readb(base + 1) | (readb(base + 2) << 8)) + 3;
+ while (i < EXPANSION_ROM_SIZE) {
+ if (readb(base + i) != 0x90) /* no vpd found */
+ goto use_random_mac_addr;
+
+ /* found a vpd field */
+ len = readb(base + i + 1) | (readb(base + i + 2) << 8);
+
+ /* extract keywords */
+ kstart = base + i + 3;
+ p = kstart;
+ while ((p - kstart) < len) {
+ int klen = readb(p + 2);
+ int j;
+ char type;
+
+ p += 3;
+
+ /* look for the following things:
+ * -- correct length == 29
+ * 3 (type) + 2 (size) +
+ * 18 (strlen("local-mac-address") + 1) +
+ * 6 (mac addr)
+ * -- VPD Instance 'I'
+ * -- VPD Type Bytes 'B'
+ * -- VPD data length == 6
+ * -- property string == local-mac-address
+ *
+ * -- correct length == 24
+ * 3 (type) + 2 (size) +
+ * 12 (strlen("entropy-dev") + 1) +
+ * 7 (strlen("vms110") + 1)
+ * -- VPD Instance 'I'
+ * -- VPD Type String 'B'
+ * -- VPD data length == 7
+ * -- property string == entropy-dev
+ *
+ * -- correct length == 18
+ * 3 (type) + 2 (size) +
+ * 9 (strlen("phy-type") + 1) +
+ * 4 (strlen("pcs") + 1)
+ * -- VPD Instance 'I'
+ * -- VPD Type String 'S'
+ * -- VPD data length == 4
+ * -- property string == phy-type
+ *
+ * -- correct length == 23
+ * 3 (type) + 2 (size) +
+ * 14 (strlen("phy-interface") + 1) +
+ * 4 (strlen("pcs") + 1)
+ * -- VPD Instance 'I'
+ * -- VPD Type String 'S'
+ * -- VPD data length == 4
+ * -- property string == phy-interface
+ */
+ if (readb(p) != 'I')
+ goto next;
+
+ /* finally, check string and length */
+ type = readb(p + 3);
+ if (type == 'B') {
+ if ((klen == 29) && readb(p + 4) == 6 &&
+ cas_vpd_match(p + 5,
+ "local-mac-address")) {
+ if (mac_off++ > offset)
+ goto next;
+
+ /* set mac address */
+ for (j = 0; j < 6; j++)
+ dev_addr[j] =
+ readb(p + 23 + j);
+ goto found_mac;
+ }
+ }
+
+ if (type != 'S')
+ goto next;
+
+#ifdef USE_ENTROPY_DEV
+ if ((klen == 24) &&
+ cas_vpd_match(p + 5, "entropy-dev") &&
+ cas_vpd_match(p + 17, "vms110")) {
+ cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
+ goto next;
+ }
+#endif
+
+ if (found & VPD_FOUND_PHY)
+ goto next;
+
+ if ((klen == 18) && readb(p + 4) == 4 &&
+ cas_vpd_match(p + 5, "phy-type")) {
+ if (cas_vpd_match(p + 14, "pcs")) {
+ phy_type = CAS_PHY_SERDES;
+ goto found_phy;
+ }
+ }
+
+ if ((klen == 23) && readb(p + 4) == 4 &&
+ cas_vpd_match(p + 5, "phy-interface")) {
+ if (cas_vpd_match(p + 19, "pcs")) {
+ phy_type = CAS_PHY_SERDES;
+ goto found_phy;
+ }
+ }
+found_mac:
+ found |= VPD_FOUND_MAC;
+ goto next;
+
+found_phy:
+ found |= VPD_FOUND_PHY;
+
+next:
+ p += klen;
+ }
+ i += len + 3;
+ }
+
+use_random_mac_addr:
+ if (found & VPD_FOUND_MAC)
+ goto done;
+
+#if defined(CONFIG_SPARC)
+ addr = of_get_property(cp->of_node, "local-mac-address", NULL);
+ if (addr != NULL) {
+ memcpy(dev_addr, addr, 6);
+ goto done;
+ }
+#endif
+
+ /* Sun MAC prefix then 3 random bytes. */
+ pr_info("MAC address not found in ROM VPD\n");
+ dev_addr[0] = 0x08;
+ dev_addr[1] = 0x00;
+ dev_addr[2] = 0x20;
+ get_random_bytes(dev_addr + 3, 3);
+
+done:
+ writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN);
+ return phy_type;
+}
+
+/* check pci invariants */
+static void cas_check_pci_invariants(struct cas *cp)
+{
+ struct pci_dev *pdev = cp->pdev;
+
+ cp->cas_flags = 0;
+ if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
+ (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
+ if (pdev->revision >= CAS_ID_REVPLUS)
+ cp->cas_flags |= CAS_FLAG_REG_PLUS;
+ if (pdev->revision < CAS_ID_REVPLUS02u)
+ cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
+
+ /* Original Cassini supports HW CSUM, but it's not
+ * enabled by default as it can trigger TX hangs.
+ */
+ if (pdev->revision < CAS_ID_REV2)
+ cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
+ } else {
+ /* Only sun has original cassini chips. */
+ cp->cas_flags |= CAS_FLAG_REG_PLUS;
+
+ /* We use a flag because the same phy might be externally
+ * connected.
+ */
+ if ((pdev->vendor == PCI_VENDOR_ID_NS) &&
+ (pdev->device == PCI_DEVICE_ID_NS_SATURN))
+ cp->cas_flags |= CAS_FLAG_SATURN;
+ }
+}
+
+
+static int cas_check_invariants(struct cas *cp)
+{
+ struct pci_dev *pdev = cp->pdev;
+ u32 cfg;
+ int i;
+
+ /* get page size for rx buffers. */
+ cp->page_order = 0;
+#ifdef USE_PAGE_ORDER
+ if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) {
+ /* see if we can allocate larger pages */
+ struct page *page = alloc_pages(GFP_ATOMIC,
+ CAS_JUMBO_PAGE_SHIFT -
+ PAGE_SHIFT);
+ if (page) {
+ __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
+ cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
+ } else {
+ printk("MTU limited to %d bytes\n", CAS_MAX_MTU);
+ }
+ }
+#endif
+ cp->page_size = (PAGE_SIZE << cp->page_order);
+
+ /* Fetch the FIFO configurations. */
+ cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
+ cp->rx_fifo_size = RX_FIFO_SIZE;
+
+ /* finish phy determination. MDIO1 takes precedence over MDIO0 if
+ * they're both connected.
+ */
+ cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr,
+ PCI_SLOT(pdev->devfn));
+ if (cp->phy_type & CAS_PHY_SERDES) {
+ cp->cas_flags |= CAS_FLAG_1000MB_CAP;
+ return 0; /* no more checking needed */
+ }
+
+ /* MII */
+ cfg = readl(cp->regs + REG_MIF_CFG);
+ if (cfg & MIF_CFG_MDIO_1) {
+ cp->phy_type = CAS_PHY_MII_MDIO1;
+ } else if (cfg & MIF_CFG_MDIO_0) {
+ cp->phy_type = CAS_PHY_MII_MDIO0;
+ }
+
+ cas_mif_poll(cp, 0);
+ writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
+
+ for (i = 0; i < 32; i++) {
+ u32 phy_id;
+ int j;
+
+ for (j = 0; j < 3; j++) {
+ cp->phy_addr = i;
+ phy_id = cas_phy_read(cp, MII_PHYSID1) << 16;
+ phy_id |= cas_phy_read(cp, MII_PHYSID2);
+ if (phy_id && (phy_id != 0xFFFFFFFF)) {
+ cp->phy_id = phy_id;
+ goto done;
+ }
+ }
+ }
+ pr_err("MII phy did not respond [%08x]\n",
+ readl(cp->regs + REG_MIF_STATE_MACHINE));
+ return -1;
+
+done:
+ /* see if we can do gigabit */
+ cfg = cas_phy_read(cp, MII_BMSR);
+ if ((cfg & CAS_BMSR_1000_EXTEND) &&
+ cas_phy_read(cp, CAS_MII_1000_EXTEND))
+ cp->cas_flags |= CAS_FLAG_1000MB_CAP;
+ return 0;
+}
+
+/* Must be invoked under cp->lock. */
+static inline void cas_start_dma(struct cas *cp)
+{
+ int i;
+ u32 val;
+ int txfailed = 0;
+
+ /* enable dma */
+ val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
+ writel(val, cp->regs + REG_TX_CFG);
+ val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN;
+ writel(val, cp->regs + REG_RX_CFG);
+
+ /* enable the mac */
+ val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN;
+ writel(val, cp->regs + REG_MAC_TX_CFG);
+ val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN;
+ writel(val, cp->regs + REG_MAC_RX_CFG);
+
+ i = STOP_TRIES;
+ while (i-- > 0) {
+ val = readl(cp->regs + REG_MAC_TX_CFG);
+ if ((val & MAC_TX_CFG_EN))
+ break;
+ udelay(10);
+ }
+ if (i < 0) txfailed = 1;
+ i = STOP_TRIES;
+ while (i-- > 0) {
+ val = readl(cp->regs + REG_MAC_RX_CFG);
+ if ((val & MAC_RX_CFG_EN)) {
+ if (txfailed) {
+ netdev_err(cp->dev,
+ "enabling mac failed [tx:%08x:%08x]\n",
+ readl(cp->regs + REG_MIF_STATE_MACHINE),
+ readl(cp->regs + REG_MAC_STATE_MACHINE));
+ }
+ goto enable_rx_done;
+ }
+ udelay(10);
+ }
+ netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n",
+ (txfailed ? "tx,rx" : "rx"),
+ readl(cp->regs + REG_MIF_STATE_MACHINE),
+ readl(cp->regs + REG_MAC_STATE_MACHINE));
+
+enable_rx_done:
+ cas_unmask_intr(cp); /* enable interrupts */
+ writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
+ writel(0, cp->regs + REG_RX_COMP_TAIL);
+
+ if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
+ if (N_RX_DESC_RINGS > 1)
+ writel(RX_DESC_RINGN_SIZE(1) - 4,
+ cp->regs + REG_PLUS_RX_KICK1);
+
+ for (i = 1; i < N_RX_COMP_RINGS; i++)
+ writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i));
+ }
+}
+
+/* Must be invoked under cp->lock. */
+static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd,
+ int *pause)
+{
+ u32 val = readl(cp->regs + REG_PCS_MII_LPA);
+ *fd = (val & PCS_MII_LPA_FD) ? 1 : 0;
+ *pause = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00;
+ if (val & PCS_MII_LPA_ASYM_PAUSE)
+ *pause |= 0x10;
+ *spd = 1000;
+}
+
+/* Must be invoked under cp->lock. */
+static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd,
+ int *pause)
+{
+ u32 val;
+
+ *fd = 0;
+ *spd = 10;
+ *pause = 0;
+
+ /* use GMII registers */
+ val = cas_phy_read(cp, MII_LPA);
+ if (val & CAS_LPA_PAUSE)
+ *pause = 0x01;
+
+ if (val & CAS_LPA_ASYM_PAUSE)
+ *pause |= 0x10;
+
+ if (val & LPA_DUPLEX)
+ *fd = 1;
+ if (val & LPA_100)
+ *spd = 100;
+
+ if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
+ val = cas_phy_read(cp, CAS_MII_1000_STATUS);
+ if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF))
+ *spd = 1000;
+ if (val & CAS_LPA_1000FULL)
+ *fd = 1;
+ }
+}
+
+/* A link-up condition has occurred, initialize and enable the
+ * rest of the chip.
+ *
+ * Must be invoked under cp->lock.
+ */
+static void cas_set_link_modes(struct cas *cp)
+{
+ u32 val;
+ int full_duplex, speed, pause;
+
+ full_duplex = 0;
+ speed = 10;
+ pause = 0;
+
+ if (CAS_PHY_MII(cp->phy_type)) {
+ cas_mif_poll(cp, 0);
+ val = cas_phy_read(cp, MII_BMCR);
+ if (val & BMCR_ANENABLE) {
+ cas_read_mii_link_mode(cp, &full_duplex, &speed,
+ &pause);
+ } else {
+ if (val & BMCR_FULLDPLX)
+ full_duplex = 1;
+
+ if (val & BMCR_SPEED100)
+ speed = 100;
+ else if (val & CAS_BMCR_SPEED1000)
+ speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
+ 1000 : 100;
+ }
+ cas_mif_poll(cp, 1);
+
+ } else {
+ val = readl(cp->regs + REG_PCS_MII_CTRL);
+ cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause);
+ if ((val & PCS_MII_AUTONEG_EN) == 0) {
+ if (val & PCS_MII_CTRL_DUPLEX)
+ full_duplex = 1;
+ }
+ }
+
+ netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n",
+ speed, full_duplex ? "full" : "half");
+
+ val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
+ if (CAS_PHY_MII(cp->phy_type)) {
+ val |= MAC_XIF_MII_BUFFER_OUTPUT_EN;
+ if (!full_duplex)
+ val |= MAC_XIF_DISABLE_ECHO;
+ }
+ if (full_duplex)
+ val |= MAC_XIF_FDPLX_LED;
+ if (speed == 1000)
+ val |= MAC_XIF_GMII_MODE;
+ writel(val, cp->regs + REG_MAC_XIF_CFG);
+
+ /* deal with carrier and collision detect. */
+ val = MAC_TX_CFG_IPG_EN;
+ if (full_duplex) {
+ val |= MAC_TX_CFG_IGNORE_CARRIER;
+ val |= MAC_TX_CFG_IGNORE_COLL;
+ } else {
+#ifndef USE_CSMA_CD_PROTO
+ val |= MAC_TX_CFG_NEVER_GIVE_UP_EN;
+ val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM;
+#endif
+ }
+ /* val now set up for REG_MAC_TX_CFG */
+
+ /* If gigabit and half-duplex, enable carrier extension
+ * mode. increase slot time to 512 bytes as well.
+ * else, disable it and make sure slot time is 64 bytes.
+ * also activate checksum bug workaround
+ */
+ if ((speed == 1000) && !full_duplex) {
+ writel(val | MAC_TX_CFG_CARRIER_EXTEND,
+ cp->regs + REG_MAC_TX_CFG);
+
+ val = readl(cp->regs + REG_MAC_RX_CFG);
+ val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */
+ writel(val | MAC_RX_CFG_CARRIER_EXTEND,
+ cp->regs + REG_MAC_RX_CFG);
+
+ writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
+
+ cp->crc_size = 4;
+ /* minimum size gigabit frame at half duplex */
+ cp->min_frame_size = CAS_1000MB_MIN_FRAME;
+
+ } else {
+ writel(val, cp->regs + REG_MAC_TX_CFG);
+
+ /* checksum bug workaround. don't strip FCS when in
+ * half-duplex mode
+ */
+ val = readl(cp->regs + REG_MAC_RX_CFG);
+ if (full_duplex) {
+ val |= MAC_RX_CFG_STRIP_FCS;
+ cp->crc_size = 0;
+ cp->min_frame_size = CAS_MIN_MTU;
+ } else {
+ val &= ~MAC_RX_CFG_STRIP_FCS;
+ cp->crc_size = 4;
+ cp->min_frame_size = CAS_MIN_FRAME;
+ }
+ writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
+ cp->regs + REG_MAC_RX_CFG);
+ writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
+ }
+
+ if (netif_msg_link(cp)) {
+ if (pause & 0x01) {
+ netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
+ cp->rx_fifo_size,
+ cp->rx_pause_off,
+ cp->rx_pause_on);
+ } else if (pause & 0x10) {
+ netdev_info(cp->dev, "TX pause enabled\n");
+ } else {
+ netdev_info(cp->dev, "Pause is disabled\n");
+ }
+ }
+
+ val = readl(cp->regs + REG_MAC_CTRL_CFG);
+ val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN);
+ if (pause) { /* symmetric or asymmetric pause */
+ val |= MAC_CTRL_CFG_SEND_PAUSE_EN;
+ if (pause & 0x01) { /* symmetric pause */
+ val |= MAC_CTRL_CFG_RECV_PAUSE_EN;
+ }
+ }
+ writel(val, cp->regs + REG_MAC_CTRL_CFG);
+ cas_start_dma(cp);
+}
+
+/* Must be invoked under cp->lock. */
+static void cas_init_hw(struct cas *cp, int restart_link)
+{
+ if (restart_link)
+ cas_phy_init(cp);
+
+ cas_init_pause_thresholds(cp);
+ cas_init_mac(cp);
+ cas_init_dma(cp);
+
+ if (restart_link) {
+ /* Default aneg parameters */
+ cp->timer_ticks = 0;
+ cas_begin_auto_negotiation(cp, NULL);
+ } else if (cp->lstate == link_up) {
+ cas_set_link_modes(cp);
+ netif_carrier_on(cp->dev);
+ }
+}
+
+/* Must be invoked under cp->lock. on earlier cassini boards,
+ * SOFT_0 is tied to PCI reset. we use this to force a pci reset,
+ * let it settle out, and then restore pci state.
+ */
+static void cas_hard_reset(struct cas *cp)
+{
+ writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
+ udelay(20);
+ pci_restore_state(cp->pdev);
+}
+
+
+static void cas_global_reset(struct cas *cp, int blkflag)
+{
+ int limit;
+
+ /* issue a global reset. don't use RSTOUT. */
+ if (blkflag && !CAS_PHY_MII(cp->phy_type)) {
+ /* For PCS, when the blkflag is set, we should set the
+ * SW_REST_BLOCK_PCS_SLINK bit to prevent the results of
+ * the last autonegotiation from being cleared. We'll
+ * need some special handling if the chip is set into a
+ * loopback mode.
+ */
+ writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
+ cp->regs + REG_SW_RESET);
+ } else {
+ writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
+ }
+
+ /* need to wait at least 3ms before polling register */
+ mdelay(3);
+
+ limit = STOP_TRIES;
+ while (limit-- > 0) {
+ u32 val = readl(cp->regs + REG_SW_RESET);
+ if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0)
+ goto done;
+ udelay(10);
+ }
+ netdev_err(cp->dev, "sw reset failed\n");
+
+done:
+ /* enable various BIM interrupts */
+ writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
+ BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
+
+ /* clear out pci error status mask for handled errors.
+ * we don't deal with DMA counter overflows as they happen
+ * all the time.
+ */
+ writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
+ PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
+ PCI_ERR_BIM_DMA_READ), cp->regs +
+ REG_PCI_ERR_STATUS_MASK);
+
+ /* set up for MII by default to address mac rx reset timeout
+ * issue
+ */
+ writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
+}
+
+static void cas_reset(struct cas *cp, int blkflag)
+{
+ u32 val;
+
+ cas_mask_intr(cp);
+ cas_global_reset(cp, blkflag);
+ cas_mac_reset(cp);
+ cas_entropy_reset(cp);
+
+ /* disable dma engines. */
+ val = readl(cp->regs + REG_TX_CFG);
+ val &= ~TX_CFG_DMA_EN;
+ writel(val, cp->regs + REG_TX_CFG);
+
+ val = readl(cp->regs + REG_RX_CFG);
+ val &= ~RX_CFG_DMA_EN;
+ writel(val, cp->regs + REG_RX_CFG);
+
+ /* program header parser */
+ if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
+ (CAS_HP_ALT_FIRMWARE == cas_prog_null)) {
+ cas_load_firmware(cp, CAS_HP_FIRMWARE);
+ } else {
+ cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
+ }
+
+ /* clear out error registers */
+ spin_lock(&cp->stat_lock[N_TX_RINGS]);
+ cas_clear_mac_err(cp);
+ spin_unlock(&cp->stat_lock[N_TX_RINGS]);
+}
+
+/* Shut down the chip, must be called with pm_mutex held. */
+static void cas_shutdown(struct cas *cp)
+{
+ unsigned long flags;
+
+ /* Make us not-running to avoid timers respawning */
+ cp->hw_running = 0;
+
+ del_timer_sync(&cp->link_timer);
+
+ /* Stop the reset task */
+#if 0
+ while (atomic_read(&cp->reset_task_pending_mtu) ||
+ atomic_read(&cp->reset_task_pending_spare) ||
+ atomic_read(&cp->reset_task_pending_all))
+ schedule();
+
+#else
+ while (atomic_read(&cp->reset_task_pending))
+ schedule();
+#endif
+ /* Actually stop the chip */
+ cas_lock_all_save(cp, flags);
+ cas_reset(cp, 0);
+ if (cp->cas_flags & CAS_FLAG_SATURN)
+ cas_phy_powerdown(cp);
+ cas_unlock_all_restore(cp, flags);
+}
+
+static int cas_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct cas *cp = netdev_priv(dev);
+
+ if (new_mtu < CAS_MIN_MTU || new_mtu > CAS_MAX_MTU)
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+ if (!netif_running(dev) || !netif_device_present(dev))
+ return 0;
+
+ /* let the reset task handle it */
+#if 1
+ atomic_inc(&cp->reset_task_pending);
+ if ((cp->phy_type & CAS_PHY_SERDES)) {
+ atomic_inc(&cp->reset_task_pending_all);
+ } else {
+ atomic_inc(&cp->reset_task_pending_mtu);
+ }
+ schedule_work(&cp->reset_task);
+#else
+ atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
+ CAS_RESET_ALL : CAS_RESET_MTU);
+ pr_err("reset called in cas_change_mtu\n");
+ schedule_work(&cp->reset_task);
+#endif
+
+ flush_work_sync(&cp->reset_task);
+ return 0;
+}
+
+static void cas_clean_txd(struct cas *cp, int ring)
+{
+ struct cas_tx_desc *txd = cp->init_txds[ring];
+ struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
+ u64 daddr, dlen;
+ int i, size;
+
+ size = TX_DESC_RINGN_SIZE(ring);
+ for (i = 0; i < size; i++) {
+ int frag;
+
+ if (skbs[i] == NULL)
+ continue;
+
+ skb = skbs[i];
+ skbs[i] = NULL;
+
+ for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
+ int ent = i & (size - 1);
+
+ /* first buffer is never a tiny buffer and so
+ * needs to be unmapped.
+ */
+ daddr = le64_to_cpu(txd[ent].buffer);
+ dlen = CAS_VAL(TX_DESC_BUFLEN,
+ le64_to_cpu(txd[ent].control));
+ pci_unmap_page(cp->pdev, daddr, dlen,
+ PCI_DMA_TODEVICE);
+
+ if (frag != skb_shinfo(skb)->nr_frags) {
+ i++;
+
+ /* next buffer might by a tiny buffer.
+ * skip past it.
+ */
+ ent = i & (size - 1);
+ if (cp->tx_tiny_use[ring][ent].used)
+ i++;
+ }
+ }
+ dev_kfree_skb_any(skb);
+ }
+
+ /* zero out tiny buf usage */
+ memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring]));
+}
+
+/* freed on close */
+static inline void cas_free_rx_desc(struct cas *cp, int ring)
+{
+ cas_page_t **page = cp->rx_pages[ring];
+ int i, size;
+
+ size = RX_DESC_RINGN_SIZE(ring);
+ for (i = 0; i < size; i++) {
+ if (page[i]) {
+ cas_page_free(cp, page[i]);
+ page[i] = NULL;
+ }
+ }
+}
+
+static void cas_free_rxds(struct cas *cp)
+{
+ int i;
+
+ for (i = 0; i < N_RX_DESC_RINGS; i++)
+ cas_free_rx_desc(cp, i);
+}
+
+/* Must be invoked under cp->lock. */
+static void cas_clean_rings(struct cas *cp)
+{
+ int i;
+
+ /* need to clean all tx rings */
+ memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS);
+ memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS);
+ for (i = 0; i < N_TX_RINGS; i++)
+ cas_clean_txd(cp, i);
+
+ /* zero out init block */
+ memset(cp->init_block, 0, sizeof(struct cas_init_block));
+ cas_clean_rxds(cp);
+ cas_clean_rxcs(cp);
+}
+
+/* allocated on open */
+static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
+{
+ cas_page_t **page = cp->rx_pages[ring];
+ int size, i = 0;
+
+ size = RX_DESC_RINGN_SIZE(ring);
+ for (i = 0; i < size; i++) {
+ if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
+ return -1;
+ }
+ return 0;
+}
+
+static int cas_alloc_rxds(struct cas *cp)
+{
+ int i;
+
+ for (i = 0; i < N_RX_DESC_RINGS; i++) {
+ if (cas_alloc_rx_desc(cp, i) < 0) {
+ cas_free_rxds(cp);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static void cas_reset_task(struct work_struct *work)
+{
+ struct cas *cp = container_of(work, struct cas, reset_task);
+#if 0
+ int pending = atomic_read(&cp->reset_task_pending);
+#else
+ int pending_all = atomic_read(&cp->reset_task_pending_all);
+ int pending_spare = atomic_read(&cp->reset_task_pending_spare);
+ int pending_mtu = atomic_read(&cp->reset_task_pending_mtu);
+
+ if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) {
+ /* We can have more tasks scheduled than actually
+ * needed.
+ */
+ atomic_dec(&cp->reset_task_pending);
+ return;
+ }
+#endif
+ /* The link went down, we reset the ring, but keep
+ * DMA stopped. Use this function for reset
+ * on error as well.
+ */
+ if (cp->hw_running) {
+ unsigned long flags;
+
+ /* Make sure we don't get interrupts or tx packets */
+ netif_device_detach(cp->dev);
+ cas_lock_all_save(cp, flags);
+
+ if (cp->opened) {
+ /* We call cas_spare_recover when we call cas_open.
+ * but we do not initialize the lists cas_spare_recover
+ * uses until cas_open is called.
+ */
+ cas_spare_recover(cp, GFP_ATOMIC);
+ }
+#if 1
+ /* test => only pending_spare set */
+ if (!pending_all && !pending_mtu)
+ goto done;
+#else
+ if (pending == CAS_RESET_SPARE)
+ goto done;
+#endif
+ /* when pending == CAS_RESET_ALL, the following
+ * call to cas_init_hw will restart auto negotiation.
+ * Setting the second argument of cas_reset to
+ * !(pending == CAS_RESET_ALL) will set this argument
+ * to 1 (avoiding reinitializing the PHY for the normal
+ * PCS case) when auto negotiation is not restarted.
+ */
+#if 1
+ cas_reset(cp, !(pending_all > 0));
+ if (cp->opened)
+ cas_clean_rings(cp);
+ cas_init_hw(cp, (pending_all > 0));
+#else
+ cas_reset(cp, !(pending == CAS_RESET_ALL));
+ if (cp->opened)
+ cas_clean_rings(cp);
+ cas_init_hw(cp, pending == CAS_RESET_ALL);
+#endif
+
+done:
+ cas_unlock_all_restore(cp, flags);
+ netif_device_attach(cp->dev);
+ }
+#if 1
+ atomic_sub(pending_all, &cp->reset_task_pending_all);
+ atomic_sub(pending_spare, &cp->reset_task_pending_spare);
+ atomic_sub(pending_mtu, &cp->reset_task_pending_mtu);
+ atomic_dec(&cp->reset_task_pending);
+#else
+ atomic_set(&cp->reset_task_pending, 0);
+#endif
+}
+
+static void cas_link_timer(unsigned long data)
+{
+ struct cas *cp = (struct cas *) data;
+ int mask, pending = 0, reset = 0;
+ unsigned long flags;
+
+ if (link_transition_timeout != 0 &&
+ cp->link_transition_jiffies_valid &&
+ ((jiffies - cp->link_transition_jiffies) >
+ (link_transition_timeout))) {
+ /* One-second counter so link-down workaround doesn't
+ * cause resets to occur so fast as to fool the switch
+ * into thinking the link is down.
+ */
+ cp->link_transition_jiffies_valid = 0;
+ }
+
+ if (!cp->hw_running)
+ return;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ cas_lock_tx(cp);
+ cas_entropy_gather(cp);
+
+ /* If the link task is still pending, we just
+ * reschedule the link timer
+ */
+#if 1
+ if (atomic_read(&cp->reset_task_pending_all) ||
+ atomic_read(&cp->reset_task_pending_spare) ||
+ atomic_read(&cp->reset_task_pending_mtu))
+ goto done;
+#else
+ if (atomic_read(&cp->reset_task_pending))
+ goto done;
+#endif
+
+ /* check for rx cleaning */
+ if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) {
+ int i, rmask;
+
+ for (i = 0; i < MAX_RX_DESC_RINGS; i++) {
+ rmask = CAS_FLAG_RXD_POST(i);
+ if ((mask & rmask) == 0)
+ continue;
+
+ /* post_rxds will do a mod_timer */
+ if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) {
+ pending = 1;
+ continue;
+ }
+ cp->cas_flags &= ~rmask;
+ }
+ }
+
+ if (CAS_PHY_MII(cp->phy_type)) {
+ u16 bmsr;
+ cas_mif_poll(cp, 0);
+ bmsr = cas_phy_read(cp, MII_BMSR);
+ /* WTZ: Solaris driver reads this twice, but that
+ * may be due to the PCS case and the use of a
+ * common implementation. Read it twice here to be
+ * safe.
+ */
+ bmsr = cas_phy_read(cp, MII_BMSR);
+ cas_mif_poll(cp, 1);
+ readl(cp->regs + REG_MIF_STATUS); /* avoid dups */
+ reset = cas_mii_link_check(cp, bmsr);
+ } else {
+ reset = cas_pcs_link_check(cp);
+ }
+
+ if (reset)
+ goto done;
+
+ /* check for tx state machine confusion */
+ if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) {
+ u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE);
+ u32 wptr, rptr;
+ int tlm = CAS_VAL(MAC_SM_TLM, val);
+
+ if (((tlm == 0x5) || (tlm == 0x3)) &&
+ (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
+ netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
+ "tx err: MAC_STATE[%08x]\n", val);
+ reset = 1;
+ goto done;
+ }
+
+ val = readl(cp->regs + REG_TX_FIFO_PKT_CNT);
+ wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
+ rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
+ if ((val == 0) && (wptr != rptr)) {
+ netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
+ "tx err: TX_FIFO[%08x:%08x:%08x]\n",
+ val, wptr, rptr);
+ reset = 1;
+ }
+
+ if (reset)
+ cas_hard_reset(cp);
+ }
+
+done:
+ if (reset) {
+#if 1
+ atomic_inc(&cp->reset_task_pending);
+ atomic_inc(&cp->reset_task_pending_all);
+ schedule_work(&cp->reset_task);
+#else
+ atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
+ pr_err("reset called in cas_link_timer\n");
+ schedule_work(&cp->reset_task);
+#endif
+ }
+
+ if (!pending)
+ mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
+ cas_unlock_tx(cp);
+ spin_unlock_irqrestore(&cp->lock, flags);
+}
+
+/* tiny buffers are used to avoid target abort issues with
+ * older cassini's
+ */
+static void cas_tx_tiny_free(struct cas *cp)
+{
+ struct pci_dev *pdev = cp->pdev;
+ int i;
+
+ for (i = 0; i < N_TX_RINGS; i++) {
+ if (!cp->tx_tiny_bufs[i])
+ continue;
+
+ pci_free_consistent(pdev, TX_TINY_BUF_BLOCK,
+ cp->tx_tiny_bufs[i],
+ cp->tx_tiny_dvma[i]);
+ cp->tx_tiny_bufs[i] = NULL;
+ }
+}
+
+static int cas_tx_tiny_alloc(struct cas *cp)
+{
+ struct pci_dev *pdev = cp->pdev;
+ int i;
+
+ for (i = 0; i < N_TX_RINGS; i++) {
+ cp->tx_tiny_bufs[i] =
+ pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK,
+ &cp->tx_tiny_dvma[i]);
+ if (!cp->tx_tiny_bufs[i]) {
+ cas_tx_tiny_free(cp);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+
+static int cas_open(struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+ int hw_was_up, err;
+ unsigned long flags;
+
+ mutex_lock(&cp->pm_mutex);
+
+ hw_was_up = cp->hw_running;
+
+ /* The power-management mutex protects the hw_running
+ * etc. state so it is safe to do this bit without cp->lock
+ */
+ if (!cp->hw_running) {
+ /* Reset the chip */
+ cas_lock_all_save(cp, flags);
+ /* We set the second arg to cas_reset to zero
+ * because cas_init_hw below will have its second
+ * argument set to non-zero, which will force
+ * autonegotiation to start.
+ */
+ cas_reset(cp, 0);
+ cp->hw_running = 1;
+ cas_unlock_all_restore(cp, flags);
+ }
+
+ err = -ENOMEM;
+ if (cas_tx_tiny_alloc(cp) < 0)
+ goto err_unlock;
+
+ /* alloc rx descriptors */
+ if (cas_alloc_rxds(cp) < 0)
+ goto err_tx_tiny;
+
+ /* allocate spares */
+ cas_spare_init(cp);
+ cas_spare_recover(cp, GFP_KERNEL);
+
+ /* We can now request the interrupt as we know it's masked
+ * on the controller. cassini+ has up to 4 interrupts
+ * that can be used, but you need to do explicit pci interrupt
+ * mapping to expose them
+ */
+ if (request_irq(cp->pdev->irq, cas_interrupt,
+ IRQF_SHARED, dev->name, (void *) dev)) {
+ netdev_err(cp->dev, "failed to request irq !\n");
+ err = -EAGAIN;
+ goto err_spare;
+ }
+
+#ifdef USE_NAPI
+ napi_enable(&cp->napi);
+#endif
+ /* init hw */
+ cas_lock_all_save(cp, flags);
+ cas_clean_rings(cp);
+ cas_init_hw(cp, !hw_was_up);
+ cp->opened = 1;
+ cas_unlock_all_restore(cp, flags);
+
+ netif_start_queue(dev);
+ mutex_unlock(&cp->pm_mutex);
+ return 0;
+
+err_spare:
+ cas_spare_free(cp);
+ cas_free_rxds(cp);
+err_tx_tiny:
+ cas_tx_tiny_free(cp);
+err_unlock:
+ mutex_unlock(&cp->pm_mutex);
+ return err;
+}
+
+static int cas_close(struct net_device *dev)
+{
+ unsigned long flags;
+ struct cas *cp = netdev_priv(dev);
+
+#ifdef USE_NAPI
+ napi_disable(&cp->napi);
+#endif
+ /* Make sure we don't get distracted by suspend/resume */
+ mutex_lock(&cp->pm_mutex);
+
+ netif_stop_queue(dev);
+
+ /* Stop traffic, mark us closed */
+ cas_lock_all_save(cp, flags);
+ cp->opened = 0;
+ cas_reset(cp, 0);
+ cas_phy_init(cp);
+ cas_begin_auto_negotiation(cp, NULL);
+ cas_clean_rings(cp);
+ cas_unlock_all_restore(cp, flags);
+
+ free_irq(cp->pdev->irq, (void *) dev);
+ cas_spare_free(cp);
+ cas_free_rxds(cp);
+ cas_tx_tiny_free(cp);
+ mutex_unlock(&cp->pm_mutex);
+ return 0;
+}
+
+static struct {
+ const char name[ETH_GSTRING_LEN];
+} ethtool_cassini_statnames[] = {
+ {"collisions"},
+ {"rx_bytes"},
+ {"rx_crc_errors"},
+ {"rx_dropped"},
+ {"rx_errors"},
+ {"rx_fifo_errors"},
+ {"rx_frame_errors"},
+ {"rx_length_errors"},
+ {"rx_over_errors"},
+ {"rx_packets"},
+ {"tx_aborted_errors"},
+ {"tx_bytes"},
+ {"tx_dropped"},
+ {"tx_errors"},
+ {"tx_fifo_errors"},
+ {"tx_packets"}
+};
+#define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames)
+
+static struct {
+ const int offsets; /* neg. values for 2nd arg to cas_read_phy */
+} ethtool_register_table[] = {
+ {-MII_BMSR},
+ {-MII_BMCR},
+ {REG_CAWR},
+ {REG_INF_BURST},
+ {REG_BIM_CFG},
+ {REG_RX_CFG},
+ {REG_HP_CFG},
+ {REG_MAC_TX_CFG},
+ {REG_MAC_RX_CFG},
+ {REG_MAC_CTRL_CFG},
+ {REG_MAC_XIF_CFG},
+ {REG_MIF_CFG},
+ {REG_PCS_CFG},
+ {REG_SATURN_PCFG},
+ {REG_PCS_MII_STATUS},
+ {REG_PCS_STATE_MACHINE},
+ {REG_MAC_COLL_EXCESS},
+ {REG_MAC_COLL_LATE}
+};
+#define CAS_REG_LEN ARRAY_SIZE(ethtool_register_table)
+#define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN)
+
+static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
+{
+ u8 *p;
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) {
+ u16 hval;
+ u32 val;
+ if (ethtool_register_table[i].offsets < 0) {
+ hval = cas_phy_read(cp,
+ -ethtool_register_table[i].offsets);
+ val = hval;
+ } else {
+ val= readl(cp->regs+ethtool_register_table[i].offsets);
+ }
+ memcpy(p, (u8 *)&val, sizeof(u32));
+ }
+ spin_unlock_irqrestore(&cp->lock, flags);
+}
+
+static struct net_device_stats *cas_get_stats(struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+ struct net_device_stats *stats = cp->net_stats;
+ unsigned long flags;
+ int i;
+ unsigned long tmp;
+
+ /* we collate all of the stats into net_stats[N_TX_RING] */
+ if (!cp->hw_running)
+ return stats + N_TX_RINGS;
+
+ /* collect outstanding stats */
+ /* WTZ: the Cassini spec gives these as 16 bit counters but
+ * stored in 32-bit words. Added a mask of 0xffff to be safe,
+ * in case the chip somehow puts any garbage in the other bits.
+ * Also, counter usage didn't seem to mach what Adrian did
+ * in the parts of the code that set these quantities. Made
+ * that consistent.
+ */
+ spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
+ stats[N_TX_RINGS].rx_crc_errors +=
+ readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
+ stats[N_TX_RINGS].rx_frame_errors +=
+ readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
+ stats[N_TX_RINGS].rx_length_errors +=
+ readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
+#if 1
+ tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
+ (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff);
+ stats[N_TX_RINGS].tx_aborted_errors += tmp;
+ stats[N_TX_RINGS].collisions +=
+ tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
+#else
+ stats[N_TX_RINGS].tx_aborted_errors +=
+ readl(cp->regs + REG_MAC_COLL_EXCESS);
+ stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
+ readl(cp->regs + REG_MAC_COLL_LATE);
+#endif
+ cas_clear_mac_err(cp);
+
+ /* saved bits that are unique to ring 0 */
+ spin_lock(&cp->stat_lock[0]);
+ stats[N_TX_RINGS].collisions += stats[0].collisions;
+ stats[N_TX_RINGS].rx_over_errors += stats[0].rx_over_errors;
+ stats[N_TX_RINGS].rx_frame_errors += stats[0].rx_frame_errors;
+ stats[N_TX_RINGS].rx_fifo_errors += stats[0].rx_fifo_errors;
+ stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors;
+ stats[N_TX_RINGS].tx_fifo_errors += stats[0].tx_fifo_errors;
+ spin_unlock(&cp->stat_lock[0]);
+
+ for (i = 0; i < N_TX_RINGS; i++) {
+ spin_lock(&cp->stat_lock[i]);
+ stats[N_TX_RINGS].rx_length_errors +=
+ stats[i].rx_length_errors;
+ stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors;
+ stats[N_TX_RINGS].rx_packets += stats[i].rx_packets;
+ stats[N_TX_RINGS].tx_packets += stats[i].tx_packets;
+ stats[N_TX_RINGS].rx_bytes += stats[i].rx_bytes;
+ stats[N_TX_RINGS].tx_bytes += stats[i].tx_bytes;
+ stats[N_TX_RINGS].rx_errors += stats[i].rx_errors;
+ stats[N_TX_RINGS].tx_errors += stats[i].tx_errors;
+ stats[N_TX_RINGS].rx_dropped += stats[i].rx_dropped;
+ stats[N_TX_RINGS].tx_dropped += stats[i].tx_dropped;
+ memset(stats + i, 0, sizeof(struct net_device_stats));
+ spin_unlock(&cp->stat_lock[i]);
+ }
+ spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags);
+ return stats + N_TX_RINGS;
+}
+
+
+static void cas_set_multicast(struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+ u32 rxcfg, rxcfg_new;
+ unsigned long flags;
+ int limit = STOP_TRIES;
+
+ if (!cp->hw_running)
+ return;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
+
+ /* disable RX MAC and wait for completion */
+ writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
+ while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) {
+ if (!limit--)
+ break;
+ udelay(10);
+ }
+
+ /* disable hash filter and wait for completion */
+ limit = STOP_TRIES;
+ rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN);
+ writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
+ while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) {
+ if (!limit--)
+ break;
+ udelay(10);
+ }
+
+ /* program hash filters */
+ cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp);
+ rxcfg |= rxcfg_new;
+ writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
+ spin_unlock_irqrestore(&cp->lock, flags);
+}
+
+static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct cas *cp = netdev_priv(dev);
+ strncpy(info->driver, DRV_MODULE_NAME, ETHTOOL_BUSINFO_LEN);
+ strncpy(info->version, DRV_MODULE_VERSION, ETHTOOL_BUSINFO_LEN);
+ info->fw_version[0] = '\0';
+ strncpy(info->bus_info, pci_name(cp->pdev), ETHTOOL_BUSINFO_LEN);
+ info->regdump_len = cp->casreg_len < CAS_MAX_REGS ?
+ cp->casreg_len : CAS_MAX_REGS;
+ info->n_stats = CAS_NUM_STAT_KEYS;
+}
+
+static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct cas *cp = netdev_priv(dev);
+ u16 bmcr;
+ int full_duplex, speed, pause;
+ unsigned long flags;
+ enum link_state linkstate = link_up;
+
+ cmd->advertising = 0;
+ cmd->supported = SUPPORTED_Autoneg;
+ if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
+ cmd->supported |= SUPPORTED_1000baseT_Full;
+ cmd->advertising |= ADVERTISED_1000baseT_Full;
+ }
+
+ /* Record PHY settings if HW is on. */
+ spin_lock_irqsave(&cp->lock, flags);
+ bmcr = 0;
+ linkstate = cp->lstate;
+ if (CAS_PHY_MII(cp->phy_type)) {
+ cmd->port = PORT_MII;
+ cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ?
+ XCVR_INTERNAL : XCVR_EXTERNAL;
+ cmd->phy_address = cp->phy_addr;
+ cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII |
+ ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full;
+
+ cmd->supported |=
+ (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_TP | SUPPORTED_MII);
+
+ if (cp->hw_running) {
+ cas_mif_poll(cp, 0);
+ bmcr = cas_phy_read(cp, MII_BMCR);
+ cas_read_mii_link_mode(cp, &full_duplex,
+ &speed, &pause);
+ cas_mif_poll(cp, 1);
+ }
+
+ } else {
+ cmd->port = PORT_FIBRE;
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->phy_address = 0;
+ cmd->supported |= SUPPORTED_FIBRE;
+ cmd->advertising |= ADVERTISED_FIBRE;
+
+ if (cp->hw_running) {
+ /* pcs uses the same bits as mii */
+ bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
+ cas_read_pcs_link_mode(cp, &full_duplex,
+ &speed, &pause);
+ }
+ }
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ if (bmcr & BMCR_ANENABLE) {
+ cmd->advertising |= ADVERTISED_Autoneg;
+ cmd->autoneg = AUTONEG_ENABLE;
+ ethtool_cmd_speed_set(cmd, ((speed == 10) ?
+ SPEED_10 :
+ ((speed == 1000) ?
+ SPEED_1000 : SPEED_100)));
+ cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
+ } else {
+ cmd->autoneg = AUTONEG_DISABLE;
+ ethtool_cmd_speed_set(cmd, ((bmcr & CAS_BMCR_SPEED1000) ?
+ SPEED_1000 :
+ ((bmcr & BMCR_SPEED100) ?
+ SPEED_100 : SPEED_10)));
+ cmd->duplex =
+ (bmcr & BMCR_FULLDPLX) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ }
+ if (linkstate != link_up) {
+ /* Force these to "unknown" if the link is not up and
+ * autonogotiation in enabled. We can set the link
+ * speed to 0, but not cmd->duplex,
+ * because its legal values are 0 and 1. Ethtool will
+ * print the value reported in parentheses after the
+ * word "Unknown" for unrecognized values.
+ *
+ * If in forced mode, we report the speed and duplex
+ * settings that we configured.
+ */
+ if (cp->link_cntl & BMCR_ANENABLE) {
+ ethtool_cmd_speed_set(cmd, 0);
+ cmd->duplex = 0xff;
+ } else {
+ ethtool_cmd_speed_set(cmd, SPEED_10);
+ if (cp->link_cntl & BMCR_SPEED100) {
+ ethtool_cmd_speed_set(cmd, SPEED_100);
+ } else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
+ ethtool_cmd_speed_set(cmd, SPEED_1000);
+ }
+ cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)?
+ DUPLEX_FULL : DUPLEX_HALF;
+ }
+ }
+ return 0;
+}
+
+static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct cas *cp = netdev_priv(dev);
+ unsigned long flags;
+ u32 speed = ethtool_cmd_speed(cmd);
+
+ /* Verify the settings we care about. */
+ if (cmd->autoneg != AUTONEG_ENABLE &&
+ cmd->autoneg != AUTONEG_DISABLE)
+ return -EINVAL;
+
+ if (cmd->autoneg == AUTONEG_DISABLE &&
+ ((speed != SPEED_1000 &&
+ speed != SPEED_100 &&
+ speed != SPEED_10) ||
+ (cmd->duplex != DUPLEX_HALF &&
+ cmd->duplex != DUPLEX_FULL)))
+ return -EINVAL;
+
+ /* Apply settings and restart link process. */
+ spin_lock_irqsave(&cp->lock, flags);
+ cas_begin_auto_negotiation(cp, cmd);
+ spin_unlock_irqrestore(&cp->lock, flags);
+ return 0;
+}
+
+static int cas_nway_reset(struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+ unsigned long flags;
+
+ if ((cp->link_cntl & BMCR_ANENABLE) == 0)
+ return -EINVAL;
+
+ /* Restart link process. */
+ spin_lock_irqsave(&cp->lock, flags);
+ cas_begin_auto_negotiation(cp, NULL);
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ return 0;
+}
+
+static u32 cas_get_link(struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+ return cp->lstate == link_up;
+}
+
+static u32 cas_get_msglevel(struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+ return cp->msg_enable;
+}
+
+static void cas_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct cas *cp = netdev_priv(dev);
+ cp->msg_enable = value;
+}
+
+static int cas_get_regs_len(struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+ return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS;
+}
+
+static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *p)
+{
+ struct cas *cp = netdev_priv(dev);
+ regs->version = 0;
+ /* cas_read_regs handles locks (cp->lock). */
+ cas_read_regs(cp, p, regs->len / sizeof(u32));
+}
+
+static int cas_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return CAS_NUM_STAT_KEYS;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ memcpy(data, ðtool_cassini_statnames,
+ CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
+}
+
+static void cas_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *estats, u64 *data)
+{
+ struct cas *cp = netdev_priv(dev);
+ struct net_device_stats *stats = cas_get_stats(cp->dev);
+ int i = 0;
+ data[i++] = stats->collisions;
+ data[i++] = stats->rx_bytes;
+ data[i++] = stats->rx_crc_errors;
+ data[i++] = stats->rx_dropped;
+ data[i++] = stats->rx_errors;
+ data[i++] = stats->rx_fifo_errors;
+ data[i++] = stats->rx_frame_errors;
+ data[i++] = stats->rx_length_errors;
+ data[i++] = stats->rx_over_errors;
+ data[i++] = stats->rx_packets;
+ data[i++] = stats->tx_aborted_errors;
+ data[i++] = stats->tx_bytes;
+ data[i++] = stats->tx_dropped;
+ data[i++] = stats->tx_errors;
+ data[i++] = stats->tx_fifo_errors;
+ data[i++] = stats->tx_packets;
+ BUG_ON(i != CAS_NUM_STAT_KEYS);
+}
+
+static const struct ethtool_ops cas_ethtool_ops = {
+ .get_drvinfo = cas_get_drvinfo,
+ .get_settings = cas_get_settings,
+ .set_settings = cas_set_settings,
+ .nway_reset = cas_nway_reset,
+ .get_link = cas_get_link,
+ .get_msglevel = cas_get_msglevel,
+ .set_msglevel = cas_set_msglevel,
+ .get_regs_len = cas_get_regs_len,
+ .get_regs = cas_get_regs,
+ .get_sset_count = cas_get_sset_count,
+ .get_strings = cas_get_strings,
+ .get_ethtool_stats = cas_get_ethtool_stats,
+};
+
+static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct cas *cp = netdev_priv(dev);
+ struct mii_ioctl_data *data = if_mii(ifr);
+ unsigned long flags;
+ int rc = -EOPNOTSUPP;
+
+ /* Hold the PM mutex while doing ioctl's or we may collide
+ * with open/close and power management and oops.
+ */
+ mutex_lock(&cp->pm_mutex);
+ switch (cmd) {
+ case SIOCGMIIPHY: /* Get address of MII PHY in use. */
+ data->phy_id = cp->phy_addr;
+ /* Fallthrough... */
+
+ case SIOCGMIIREG: /* Read MII PHY register. */
+ spin_lock_irqsave(&cp->lock, flags);
+ cas_mif_poll(cp, 0);
+ data->val_out = cas_phy_read(cp, data->reg_num & 0x1f);
+ cas_mif_poll(cp, 1);
+ spin_unlock_irqrestore(&cp->lock, flags);
+ rc = 0;
+ break;
+
+ case SIOCSMIIREG: /* Write MII PHY register. */
+ spin_lock_irqsave(&cp->lock, flags);
+ cas_mif_poll(cp, 0);
+ rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in);
+ cas_mif_poll(cp, 1);
+ spin_unlock_irqrestore(&cp->lock, flags);
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&cp->pm_mutex);
+ return rc;
+}
+
+/* When this chip sits underneath an Intel 31154 bridge, it is the
+ * only subordinate device and we can tweak the bridge settings to
+ * reflect that fact.
+ */
+static void __devinit cas_program_bridge(struct pci_dev *cas_pdev)
+{
+ struct pci_dev *pdev = cas_pdev->bus->self;
+ u32 val;
+
+ if (!pdev)
+ return;
+
+ if (pdev->vendor != 0x8086 || pdev->device != 0x537c)
+ return;
+
+ /* Clear bit 10 (Bus Parking Control) in the Secondary
+ * Arbiter Control/Status Register which lives at offset
+ * 0x41. Using a 32-bit word read/modify/write at 0x40
+ * is much simpler so that's how we do this.
+ */
+ pci_read_config_dword(pdev, 0x40, &val);
+ val &= ~0x00040000;
+ pci_write_config_dword(pdev, 0x40, val);
+
+ /* Max out the Multi-Transaction Timer settings since
+ * Cassini is the only device present.
+ *
+ * The register is 16-bit and lives at 0x50. When the
+ * settings are enabled, it extends the GRANT# signal
+ * for a requestor after a transaction is complete. This
+ * allows the next request to run without first needing
+ * to negotiate the GRANT# signal back.
+ *
+ * Bits 12:10 define the grant duration:
+ *
+ * 1 -- 16 clocks
+ * 2 -- 32 clocks
+ * 3 -- 64 clocks
+ * 4 -- 128 clocks
+ * 5 -- 256 clocks
+ *
+ * All other values are illegal.
+ *
+ * Bits 09:00 define which REQ/GNT signal pairs get the
+ * GRANT# signal treatment. We set them all.
+ */
+ pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff);
+
+ /* The Read Prefecth Policy register is 16-bit and sits at
+ * offset 0x52. It enables a "smart" pre-fetch policy. We
+ * enable it and max out all of the settings since only one
+ * device is sitting underneath and thus bandwidth sharing is
+ * not an issue.
+ *
+ * The register has several 3 bit fields, which indicates a
+ * multiplier applied to the base amount of prefetching the
+ * chip would do. These fields are at:
+ *
+ * 15:13 --- ReRead Primary Bus
+ * 12:10 --- FirstRead Primary Bus
+ * 09:07 --- ReRead Secondary Bus
+ * 06:04 --- FirstRead Secondary Bus
+ *
+ * Bits 03:00 control which REQ/GNT pairs the prefetch settings
+ * get enabled on. Bit 3 is a grouped enabler which controls
+ * all of the REQ/GNT pairs from [8:3]. Bits 2 to 0 control
+ * the individual REQ/GNT pairs [2:0].
+ */
+ pci_write_config_word(pdev, 0x52,
+ (0x7 << 13) |
+ (0x7 << 10) |
+ (0x7 << 7) |
+ (0x7 << 4) |
+ (0xf << 0));
+
+ /* Force cacheline size to 0x8 */
+ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
+
+ /* Force latency timer to maximum setting so Cassini can
+ * sit on the bus as long as it likes.
+ */
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff);
+}
+
+static const struct net_device_ops cas_netdev_ops = {
+ .ndo_open = cas_open,
+ .ndo_stop = cas_close,
+ .ndo_start_xmit = cas_start_xmit,
+ .ndo_get_stats = cas_get_stats,
+ .ndo_set_rx_mode = cas_set_multicast,
+ .ndo_do_ioctl = cas_ioctl,
+ .ndo_tx_timeout = cas_tx_timeout,
+ .ndo_change_mtu = cas_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cas_netpoll,
+#endif
+};
+
+static int __devinit cas_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static int cas_version_printed = 0;
+ unsigned long casreg_len;
+ struct net_device *dev;
+ struct cas *cp;
+ int i, err, pci_using_dac;
+ u16 pci_cmd;
+ u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
+
+ if (cas_version_printed++ == 0)
+ pr_info("%s", version);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
+ return err;
+ }
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ dev_err(&pdev->dev, "Cannot find proper PCI device "
+ "base address, aborting\n");
+ err = -ENODEV;
+ goto err_out_disable_pdev;
+ }
+
+ dev = alloc_etherdev(sizeof(*cp));
+ if (!dev) {
+ dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
+ err = -ENOMEM;
+ goto err_out_disable_pdev;
+ }
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ err = pci_request_regions(pdev, dev->name);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
+ goto err_out_free_netdev;
+ }
+ pci_set_master(pdev);
+
+ /* we must always turn on parity response or else parity
+ * doesn't get generated properly. disable SERR/PERR as well.
+ * in addition, we want to turn MWI on.
+ */
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
+ pci_cmd &= ~PCI_COMMAND_SERR;
+ pci_cmd |= PCI_COMMAND_PARITY;
+ pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
+ if (pci_try_set_mwi(pdev))
+ pr_warning("Could not enable MWI for %s\n", pci_name(pdev));
+
+ cas_program_bridge(pdev);
+
+ /*
+ * On some architectures, the default cache line size set
+ * by pci_try_set_mwi reduces perforamnce. We have to increase
+ * it for this case. To start, we'll print some configuration
+ * data.
+ */
+#if 1
+ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
+ &orig_cacheline_size);
+ if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) {
+ cas_cacheline_size =
+ (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
+ CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES;
+ if (pci_write_config_byte(pdev,
+ PCI_CACHE_LINE_SIZE,
+ cas_cacheline_size)) {
+ dev_err(&pdev->dev, "Could not set PCI cache "
+ "line size\n");
+ goto err_write_cacheline;
+ }
+ }
+#endif
+
+
+ /* Configure DMA attributes. */
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ pci_using_dac = 1;
+ err = pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(64));
+ if (err < 0) {
+ dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
+ "for consistent allocations\n");
+ goto err_out_free_res;
+ }
+
+ } else {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "No usable DMA configuration, "
+ "aborting\n");
+ goto err_out_free_res;
+ }
+ pci_using_dac = 0;
+ }
+
+ casreg_len = pci_resource_len(pdev, 0);
+
+ cp = netdev_priv(dev);
+ cp->pdev = pdev;
+#if 1
+ /* A value of 0 indicates we never explicitly set it */
+ cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
+#endif
+ cp->dev = dev;
+ cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
+ cassini_debug;
+
+#if defined(CONFIG_SPARC)
+ cp->of_node = pci_device_to_OF_node(pdev);
+#endif
+
+ cp->link_transition = LINK_TRANSITION_UNKNOWN;
+ cp->link_transition_jiffies_valid = 0;
+
+ spin_lock_init(&cp->lock);
+ spin_lock_init(&cp->rx_inuse_lock);
+ spin_lock_init(&cp->rx_spare_lock);
+ for (i = 0; i < N_TX_RINGS; i++) {
+ spin_lock_init(&cp->stat_lock[i]);
+ spin_lock_init(&cp->tx_lock[i]);
+ }
+ spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
+ mutex_init(&cp->pm_mutex);
+
+ init_timer(&cp->link_timer);
+ cp->link_timer.function = cas_link_timer;
+ cp->link_timer.data = (unsigned long) cp;
+
+#if 1
+ /* Just in case the implementation of atomic operations
+ * change so that an explicit initialization is necessary.
+ */
+ atomic_set(&cp->reset_task_pending, 0);
+ atomic_set(&cp->reset_task_pending_all, 0);
+ atomic_set(&cp->reset_task_pending_spare, 0);
+ atomic_set(&cp->reset_task_pending_mtu, 0);
+#endif
+ INIT_WORK(&cp->reset_task, cas_reset_task);
+
+ /* Default link parameters */
+ if (link_mode >= 0 && link_mode < 6)
+ cp->link_cntl = link_modes[link_mode];
+ else
+ cp->link_cntl = BMCR_ANENABLE;
+ cp->lstate = link_down;
+ cp->link_transition = LINK_TRANSITION_LINK_DOWN;
+ netif_carrier_off(cp->dev);
+ cp->timer_ticks = 0;
+
+ /* give us access to cassini registers */
+ cp->regs = pci_iomap(pdev, 0, casreg_len);
+ if (!cp->regs) {
+ dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
+ goto err_out_free_res;
+ }
+ cp->casreg_len = casreg_len;
+
+ pci_save_state(pdev);
+ cas_check_pci_invariants(cp);
+ cas_hard_reset(cp);
+ cas_reset(cp, 0);
+ if (cas_check_invariants(cp))
+ goto err_out_iounmap;
+ if (cp->cas_flags & CAS_FLAG_SATURN)
+ if (cas_saturn_firmware_init(cp))
+ goto err_out_iounmap;
+
+ cp->init_block = (struct cas_init_block *)
+ pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
+ &cp->block_dvma);
+ if (!cp->init_block) {
+ dev_err(&pdev->dev, "Cannot allocate init block, aborting\n");
+ goto err_out_iounmap;
+ }
+
+ for (i = 0; i < N_TX_RINGS; i++)
+ cp->init_txds[i] = cp->init_block->txds[i];
+
+ for (i = 0; i < N_RX_DESC_RINGS; i++)
+ cp->init_rxds[i] = cp->init_block->rxds[i];
+
+ for (i = 0; i < N_RX_COMP_RINGS; i++)
+ cp->init_rxcs[i] = cp->init_block->rxcs[i];
+
+ for (i = 0; i < N_RX_FLOWS; i++)
+ skb_queue_head_init(&cp->rx_flows[i]);
+
+ dev->netdev_ops = &cas_netdev_ops;
+ dev->ethtool_ops = &cas_ethtool_ops;
+ dev->watchdog_timeo = CAS_TX_TIMEOUT;
+
+#ifdef USE_NAPI
+ netif_napi_add(dev, &cp->napi, cas_poll, 64);
+#endif
+ dev->irq = pdev->irq;
+ dev->dma = 0;
+
+ /* Cassini features. */
+ if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0)
+ dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
+
+ if (pci_using_dac)
+ dev->features |= NETIF_F_HIGHDMA;
+
+ if (register_netdev(dev)) {
+ dev_err(&pdev->dev, "Cannot register net device, aborting\n");
+ goto err_out_free_consistent;
+ }
+
+ i = readl(cp->regs + REG_BIM_CFG);
+ netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n",
+ (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
+ (i & BIM_CFG_32BIT) ? "32" : "64",
+ (i & BIM_CFG_66MHZ) ? "66" : "33",
+ (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
+ dev->dev_addr);
+
+ pci_set_drvdata(pdev, dev);
+ cp->hw_running = 1;
+ cas_entropy_reset(cp);
+ cas_phy_init(cp);
+ cas_begin_auto_negotiation(cp, NULL);
+ return 0;
+
+err_out_free_consistent:
+ pci_free_consistent(pdev, sizeof(struct cas_init_block),
+ cp->init_block, cp->block_dvma);
+
+err_out_iounmap:
+ mutex_lock(&cp->pm_mutex);
+ if (cp->hw_running)
+ cas_shutdown(cp);
+ mutex_unlock(&cp->pm_mutex);
+
+ pci_iounmap(pdev, cp->regs);
+
+
+err_out_free_res:
+ pci_release_regions(pdev);
+
+err_write_cacheline:
+ /* Try to restore it in case the error occurred after we
+ * set it.
+ */
+ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
+
+err_out_free_netdev:
+ free_netdev(dev);
+
+err_out_disable_pdev:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return -ENODEV;
+}
+
+static void __devexit cas_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct cas *cp;
+ if (!dev)
+ return;
+
+ cp = netdev_priv(dev);
+ unregister_netdev(dev);
+
+ if (cp->fw_data)
+ vfree(cp->fw_data);
+
+ mutex_lock(&cp->pm_mutex);
+ cancel_work_sync(&cp->reset_task);
+ if (cp->hw_running)
+ cas_shutdown(cp);
+ mutex_unlock(&cp->pm_mutex);
+
+#if 1
+ if (cp->orig_cacheline_size) {
+ /* Restore the cache line size if we had modified
+ * it.
+ */
+ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
+ cp->orig_cacheline_size);
+ }
+#endif
+ pci_free_consistent(pdev, sizeof(struct cas_init_block),
+ cp->init_block, cp->block_dvma);
+ pci_iounmap(pdev, cp->regs);
+ free_netdev(dev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+#ifdef CONFIG_PM
+static int cas_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct cas *cp = netdev_priv(dev);
+ unsigned long flags;
+
+ mutex_lock(&cp->pm_mutex);
+
+ /* If the driver is opened, we stop the DMA */
+ if (cp->opened) {
+ netif_device_detach(dev);
+
+ cas_lock_all_save(cp, flags);
+
+ /* We can set the second arg of cas_reset to 0
+ * because on resume, we'll call cas_init_hw with
+ * its second arg set so that autonegotiation is
+ * restarted.
+ */
+ cas_reset(cp, 0);
+ cas_clean_rings(cp);
+ cas_unlock_all_restore(cp, flags);
+ }
+
+ if (cp->hw_running)
+ cas_shutdown(cp);
+ mutex_unlock(&cp->pm_mutex);
+
+ return 0;
+}
+
+static int cas_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct cas *cp = netdev_priv(dev);
+
+ netdev_info(dev, "resuming\n");
+
+ mutex_lock(&cp->pm_mutex);
+ cas_hard_reset(cp);
+ if (cp->opened) {
+ unsigned long flags;
+ cas_lock_all_save(cp, flags);
+ cas_reset(cp, 0);
+ cp->hw_running = 1;
+ cas_clean_rings(cp);
+ cas_init_hw(cp, 1);
+ cas_unlock_all_restore(cp, flags);
+
+ netif_device_attach(dev);
+ }
+ mutex_unlock(&cp->pm_mutex);
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static struct pci_driver cas_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = cas_pci_tbl,
+ .probe = cas_init_one,
+ .remove = __devexit_p(cas_remove_one),
+#ifdef CONFIG_PM
+ .suspend = cas_suspend,
+ .resume = cas_resume
+#endif
+};
+
+static int __init cas_init(void)
+{
+ if (linkdown_timeout > 0)
+ link_transition_timeout = linkdown_timeout * HZ;
+ else
+ link_transition_timeout = 0;
+
+ return pci_register_driver(&cas_driver);
+}
+
+static void __exit cas_cleanup(void)
+{
+ pci_unregister_driver(&cas_driver);
+}
+
+module_init(cas_init);
+module_exit(cas_cleanup);
--- /dev/null
- mtu = pch->chan->mtu - hdrlen;
+/*
+ * Generic PPP layer for Linux.
+ *
+ * Copyright 1999-2002 Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * The generic PPP layer handles the PPP network interfaces, the
+ * /dev/ppp device, packet and VJ compression, and multilink.
+ * It talks to PPP `channels' via the interface defined in
+ * include/linux/ppp_channel.h. Channels provide the basic means for
+ * sending and receiving PPP frames on some kind of communications
+ * channel.
+ *
+ * Part of the code in this driver was inspired by the old async-only
+ * PPP driver, written by Michael Callahan and Al Longyear, and
+ * subsequently hacked by Paul Mackerras.
+ *
+ * ==FILEVERSION 20041108==
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kmod.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/idr.h>
+#include <linux/netdevice.h>
+#include <linux/poll.h>
+#include <linux/ppp_defs.h>
+#include <linux/filter.h>
+#include <linux/if_ppp.h>
+#include <linux/ppp_channel.h>
+#include <linux/ppp-comp.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_arp.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/spinlock.h>
+#include <linux/rwsem.h>
+#include <linux/stddef.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+#include <net/slhc_vj.h>
+#include <linux/atomic.h>
+
+#include <linux/nsproxy.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+
+#define PPP_VERSION "2.4.2"
+
+/*
+ * Network protocols we support.
+ */
+#define NP_IP 0 /* Internet Protocol V4 */
+#define NP_IPV6 1 /* Internet Protocol V6 */
+#define NP_IPX 2 /* IPX protocol */
+#define NP_AT 3 /* Appletalk protocol */
+#define NP_MPLS_UC 4 /* MPLS unicast */
+#define NP_MPLS_MC 5 /* MPLS multicast */
+#define NUM_NP 6 /* Number of NPs. */
+
+#define MPHDRLEN 6 /* multilink protocol header length */
+#define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */
+
+/*
+ * An instance of /dev/ppp can be associated with either a ppp
+ * interface unit or a ppp channel. In both cases, file->private_data
+ * points to one of these.
+ */
+struct ppp_file {
+ enum {
+ INTERFACE=1, CHANNEL
+ } kind;
+ struct sk_buff_head xq; /* pppd transmit queue */
+ struct sk_buff_head rq; /* receive queue for pppd */
+ wait_queue_head_t rwait; /* for poll on reading /dev/ppp */
+ atomic_t refcnt; /* # refs (incl /dev/ppp attached) */
+ int hdrlen; /* space to leave for headers */
+ int index; /* interface unit / channel number */
+ int dead; /* unit/channel has been shut down */
+};
+
+#define PF_TO_X(pf, X) container_of(pf, X, file)
+
+#define PF_TO_PPP(pf) PF_TO_X(pf, struct ppp)
+#define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel)
+
+/*
+ * Data structure describing one ppp unit.
+ * A ppp unit corresponds to a ppp network interface device
+ * and represents a multilink bundle.
+ * It can have 0 or more ppp channels connected to it.
+ */
+struct ppp {
+ struct ppp_file file; /* stuff for read/write/poll 0 */
+ struct file *owner; /* file that owns this unit 48 */
+ struct list_head channels; /* list of attached channels 4c */
+ int n_channels; /* how many channels are attached 54 */
+ spinlock_t rlock; /* lock for receive side 58 */
+ spinlock_t wlock; /* lock for transmit side 5c */
+ int mru; /* max receive unit 60 */
+ unsigned int flags; /* control bits 64 */
+ unsigned int xstate; /* transmit state bits 68 */
+ unsigned int rstate; /* receive state bits 6c */
+ int debug; /* debug flags 70 */
+ struct slcompress *vj; /* state for VJ header compression */
+ enum NPmode npmode[NUM_NP]; /* what to do with each net proto 78 */
+ struct sk_buff *xmit_pending; /* a packet ready to go out 88 */
+ struct compressor *xcomp; /* transmit packet compressor 8c */
+ void *xc_state; /* its internal state 90 */
+ struct compressor *rcomp; /* receive decompressor 94 */
+ void *rc_state; /* its internal state 98 */
+ unsigned long last_xmit; /* jiffies when last pkt sent 9c */
+ unsigned long last_recv; /* jiffies when last pkt rcvd a0 */
+ struct net_device *dev; /* network interface device a4 */
+ int closing; /* is device closing down? a8 */
+#ifdef CONFIG_PPP_MULTILINK
+ int nxchan; /* next channel to send something on */
+ u32 nxseq; /* next sequence number to send */
+ int mrru; /* MP: max reconst. receive unit */
+ u32 nextseq; /* MP: seq no of next packet */
+ u32 minseq; /* MP: min of most recent seqnos */
+ struct sk_buff_head mrq; /* MP: receive reconstruction queue */
+#endif /* CONFIG_PPP_MULTILINK */
+#ifdef CONFIG_PPP_FILTER
+ struct sock_filter *pass_filter; /* filter for packets to pass */
+ struct sock_filter *active_filter;/* filter for pkts to reset idle */
+ unsigned pass_len, active_len;
+#endif /* CONFIG_PPP_FILTER */
+ struct net *ppp_net; /* the net we belong to */
+};
+
+/*
+ * Bits in flags: SC_NO_TCP_CCID, SC_CCP_OPEN, SC_CCP_UP, SC_LOOP_TRAFFIC,
+ * SC_MULTILINK, SC_MP_SHORTSEQ, SC_MP_XSHORTSEQ, SC_COMP_TCP, SC_REJ_COMP_TCP,
+ * SC_MUST_COMP
+ * Bits in rstate: SC_DECOMP_RUN, SC_DC_ERROR, SC_DC_FERROR.
+ * Bits in xstate: SC_COMP_RUN
+ */
+#define SC_FLAG_BITS (SC_NO_TCP_CCID|SC_CCP_OPEN|SC_CCP_UP|SC_LOOP_TRAFFIC \
+ |SC_MULTILINK|SC_MP_SHORTSEQ|SC_MP_XSHORTSEQ \
+ |SC_COMP_TCP|SC_REJ_COMP_TCP|SC_MUST_COMP)
+
+/*
+ * Private data structure for each channel.
+ * This includes the data structure used for multilink.
+ */
+struct channel {
+ struct ppp_file file; /* stuff for read/write/poll */
+ struct list_head list; /* link in all/new_channels list */
+ struct ppp_channel *chan; /* public channel data structure */
+ struct rw_semaphore chan_sem; /* protects `chan' during chan ioctl */
+ spinlock_t downl; /* protects `chan', file.xq dequeue */
+ struct ppp *ppp; /* ppp unit we're connected to */
+ struct net *chan_net; /* the net channel belongs to */
+ struct list_head clist; /* link in list of channels per unit */
+ rwlock_t upl; /* protects `ppp' */
+#ifdef CONFIG_PPP_MULTILINK
+ u8 avail; /* flag used in multilink stuff */
+ u8 had_frag; /* >= 1 fragments have been sent */
+ u32 lastseq; /* MP: last sequence # received */
+ int speed; /* speed of the corresponding ppp channel*/
+#endif /* CONFIG_PPP_MULTILINK */
+};
+
+/*
+ * SMP locking issues:
+ * Both the ppp.rlock and ppp.wlock locks protect the ppp.channels
+ * list and the ppp.n_channels field, you need to take both locks
+ * before you modify them.
+ * The lock ordering is: channel.upl -> ppp.wlock -> ppp.rlock ->
+ * channel.downl.
+ */
+
+static DEFINE_MUTEX(ppp_mutex);
+static atomic_t ppp_unit_count = ATOMIC_INIT(0);
+static atomic_t channel_count = ATOMIC_INIT(0);
+
+/* per-net private data for this module */
+static int ppp_net_id __read_mostly;
+struct ppp_net {
+ /* units to ppp mapping */
+ struct idr units_idr;
+
+ /*
+ * all_ppp_mutex protects the units_idr mapping.
+ * It also ensures that finding a ppp unit in the units_idr
+ * map and updating its file.refcnt field is atomic.
+ */
+ struct mutex all_ppp_mutex;
+
+ /* channels */
+ struct list_head all_channels;
+ struct list_head new_channels;
+ int last_channel_index;
+
+ /*
+ * all_channels_lock protects all_channels and
+ * last_channel_index, and the atomicity of find
+ * a channel and updating its file.refcnt field.
+ */
+ spinlock_t all_channels_lock;
+};
+
+/* Get the PPP protocol number from a skb */
+#define PPP_PROTO(skb) get_unaligned_be16((skb)->data)
+
+/* We limit the length of ppp->file.rq to this (arbitrary) value */
+#define PPP_MAX_RQLEN 32
+
+/*
+ * Maximum number of multilink fragments queued up.
+ * This has to be large enough to cope with the maximum latency of
+ * the slowest channel relative to the others. Strictly it should
+ * depend on the number of channels and their characteristics.
+ */
+#define PPP_MP_MAX_QLEN 128
+
+/* Multilink header bits. */
+#define B 0x80 /* this fragment begins a packet */
+#define E 0x40 /* this fragment ends a packet */
+
+/* Compare multilink sequence numbers (assumed to be 32 bits wide) */
+#define seq_before(a, b) ((s32)((a) - (b)) < 0)
+#define seq_after(a, b) ((s32)((a) - (b)) > 0)
+
+/* Prototypes. */
+static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
+ struct file *file, unsigned int cmd, unsigned long arg);
+static void ppp_xmit_process(struct ppp *ppp);
+static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
+static void ppp_push(struct ppp *ppp);
+static void ppp_channel_push(struct channel *pch);
+static void ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb,
+ struct channel *pch);
+static void ppp_receive_error(struct ppp *ppp);
+static void ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb);
+static struct sk_buff *ppp_decompress_frame(struct ppp *ppp,
+ struct sk_buff *skb);
+#ifdef CONFIG_PPP_MULTILINK
+static void ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb,
+ struct channel *pch);
+static void ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb);
+static struct sk_buff *ppp_mp_reconstruct(struct ppp *ppp);
+static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb);
+#endif /* CONFIG_PPP_MULTILINK */
+static int ppp_set_compress(struct ppp *ppp, unsigned long arg);
+static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
+static void ppp_ccp_closed(struct ppp *ppp);
+static struct compressor *find_compressor(int type);
+static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
+static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp);
+static void init_ppp_file(struct ppp_file *pf, int kind);
+static void ppp_shutdown_interface(struct ppp *ppp);
+static void ppp_destroy_interface(struct ppp *ppp);
+static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
+static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
+static int ppp_connect_channel(struct channel *pch, int unit);
+static int ppp_disconnect_channel(struct channel *pch);
+static void ppp_destroy_channel(struct channel *pch);
+static int unit_get(struct idr *p, void *ptr);
+static int unit_set(struct idr *p, void *ptr, int n);
+static void unit_put(struct idr *p, int n);
+static void *unit_find(struct idr *p, int n);
+
+static struct class *ppp_class;
+
+/* per net-namespace data */
+static inline struct ppp_net *ppp_pernet(struct net *net)
+{
+ BUG_ON(!net);
+
+ return net_generic(net, ppp_net_id);
+}
+
+/* Translates a PPP protocol number to a NP index (NP == network protocol) */
+static inline int proto_to_npindex(int proto)
+{
+ switch (proto) {
+ case PPP_IP:
+ return NP_IP;
+ case PPP_IPV6:
+ return NP_IPV6;
+ case PPP_IPX:
+ return NP_IPX;
+ case PPP_AT:
+ return NP_AT;
+ case PPP_MPLS_UC:
+ return NP_MPLS_UC;
+ case PPP_MPLS_MC:
+ return NP_MPLS_MC;
+ }
+ return -EINVAL;
+}
+
+/* Translates an NP index into a PPP protocol number */
+static const int npindex_to_proto[NUM_NP] = {
+ PPP_IP,
+ PPP_IPV6,
+ PPP_IPX,
+ PPP_AT,
+ PPP_MPLS_UC,
+ PPP_MPLS_MC,
+};
+
+/* Translates an ethertype into an NP index */
+static inline int ethertype_to_npindex(int ethertype)
+{
+ switch (ethertype) {
+ case ETH_P_IP:
+ return NP_IP;
+ case ETH_P_IPV6:
+ return NP_IPV6;
+ case ETH_P_IPX:
+ return NP_IPX;
+ case ETH_P_PPPTALK:
+ case ETH_P_ATALK:
+ return NP_AT;
+ case ETH_P_MPLS_UC:
+ return NP_MPLS_UC;
+ case ETH_P_MPLS_MC:
+ return NP_MPLS_MC;
+ }
+ return -1;
+}
+
+/* Translates an NP index into an ethertype */
+static const int npindex_to_ethertype[NUM_NP] = {
+ ETH_P_IP,
+ ETH_P_IPV6,
+ ETH_P_IPX,
+ ETH_P_PPPTALK,
+ ETH_P_MPLS_UC,
+ ETH_P_MPLS_MC,
+};
+
+/*
+ * Locking shorthand.
+ */
+#define ppp_xmit_lock(ppp) spin_lock_bh(&(ppp)->wlock)
+#define ppp_xmit_unlock(ppp) spin_unlock_bh(&(ppp)->wlock)
+#define ppp_recv_lock(ppp) spin_lock_bh(&(ppp)->rlock)
+#define ppp_recv_unlock(ppp) spin_unlock_bh(&(ppp)->rlock)
+#define ppp_lock(ppp) do { ppp_xmit_lock(ppp); \
+ ppp_recv_lock(ppp); } while (0)
+#define ppp_unlock(ppp) do { ppp_recv_unlock(ppp); \
+ ppp_xmit_unlock(ppp); } while (0)
+
+/*
+ * /dev/ppp device routines.
+ * The /dev/ppp device is used by pppd to control the ppp unit.
+ * It supports the read, write, ioctl and poll functions.
+ * Open instances of /dev/ppp can be in one of three states:
+ * unattached, attached to a ppp unit, or attached to a ppp channel.
+ */
+static int ppp_open(struct inode *inode, struct file *file)
+{
+ /*
+ * This could (should?) be enforced by the permissions on /dev/ppp.
+ */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ return 0;
+}
+
+static int ppp_release(struct inode *unused, struct file *file)
+{
+ struct ppp_file *pf = file->private_data;
+ struct ppp *ppp;
+
+ if (pf) {
+ file->private_data = NULL;
+ if (pf->kind == INTERFACE) {
+ ppp = PF_TO_PPP(pf);
+ if (file == ppp->owner)
+ ppp_shutdown_interface(ppp);
+ }
+ if (atomic_dec_and_test(&pf->refcnt)) {
+ switch (pf->kind) {
+ case INTERFACE:
+ ppp_destroy_interface(PF_TO_PPP(pf));
+ break;
+ case CHANNEL:
+ ppp_destroy_channel(PF_TO_CHANNEL(pf));
+ break;
+ }
+ }
+ }
+ return 0;
+}
+
+static ssize_t ppp_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ppp_file *pf = file->private_data;
+ DECLARE_WAITQUEUE(wait, current);
+ ssize_t ret;
+ struct sk_buff *skb = NULL;
+ struct iovec iov;
+
+ ret = count;
+
+ if (!pf)
+ return -ENXIO;
+ add_wait_queue(&pf->rwait, &wait);
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ skb = skb_dequeue(&pf->rq);
+ if (skb)
+ break;
+ ret = 0;
+ if (pf->dead)
+ break;
+ if (pf->kind == INTERFACE) {
+ /*
+ * Return 0 (EOF) on an interface that has no
+ * channels connected, unless it is looping
+ * network traffic (demand mode).
+ */
+ struct ppp *ppp = PF_TO_PPP(pf);
+ if (ppp->n_channels == 0 &&
+ (ppp->flags & SC_LOOP_TRAFFIC) == 0)
+ break;
+ }
+ ret = -EAGAIN;
+ if (file->f_flags & O_NONBLOCK)
+ break;
+ ret = -ERESTARTSYS;
+ if (signal_pending(current))
+ break;
+ schedule();
+ }
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&pf->rwait, &wait);
+
+ if (!skb)
+ goto out;
+
+ ret = -EOVERFLOW;
+ if (skb->len > count)
+ goto outf;
+ ret = -EFAULT;
+ iov.iov_base = buf;
+ iov.iov_len = count;
+ if (skb_copy_datagram_iovec(skb, 0, &iov, skb->len))
+ goto outf;
+ ret = skb->len;
+
+ outf:
+ kfree_skb(skb);
+ out:
+ return ret;
+}
+
+static ssize_t ppp_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ppp_file *pf = file->private_data;
+ struct sk_buff *skb;
+ ssize_t ret;
+
+ if (!pf)
+ return -ENXIO;
+ ret = -ENOMEM;
+ skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL);
+ if (!skb)
+ goto out;
+ skb_reserve(skb, pf->hdrlen);
+ ret = -EFAULT;
+ if (copy_from_user(skb_put(skb, count), buf, count)) {
+ kfree_skb(skb);
+ goto out;
+ }
+
+ skb_queue_tail(&pf->xq, skb);
+
+ switch (pf->kind) {
+ case INTERFACE:
+ ppp_xmit_process(PF_TO_PPP(pf));
+ break;
+ case CHANNEL:
+ ppp_channel_push(PF_TO_CHANNEL(pf));
+ break;
+ }
+
+ ret = count;
+
+ out:
+ return ret;
+}
+
+/* No kernel lock - fine */
+static unsigned int ppp_poll(struct file *file, poll_table *wait)
+{
+ struct ppp_file *pf = file->private_data;
+ unsigned int mask;
+
+ if (!pf)
+ return 0;
+ poll_wait(file, &pf->rwait, wait);
+ mask = POLLOUT | POLLWRNORM;
+ if (skb_peek(&pf->rq))
+ mask |= POLLIN | POLLRDNORM;
+ if (pf->dead)
+ mask |= POLLHUP;
+ else if (pf->kind == INTERFACE) {
+ /* see comment in ppp_read */
+ struct ppp *ppp = PF_TO_PPP(pf);
+ if (ppp->n_channels == 0 &&
+ (ppp->flags & SC_LOOP_TRAFFIC) == 0)
+ mask |= POLLIN | POLLRDNORM;
+ }
+
+ return mask;
+}
+
+#ifdef CONFIG_PPP_FILTER
+static int get_filter(void __user *arg, struct sock_filter **p)
+{
+ struct sock_fprog uprog;
+ struct sock_filter *code = NULL;
+ int len, err;
+
+ if (copy_from_user(&uprog, arg, sizeof(uprog)))
+ return -EFAULT;
+
+ if (!uprog.len) {
+ *p = NULL;
+ return 0;
+ }
+
+ len = uprog.len * sizeof(struct sock_filter);
+ code = memdup_user(uprog.filter, len);
+ if (IS_ERR(code))
+ return PTR_ERR(code);
+
+ err = sk_chk_filter(code, uprog.len);
+ if (err) {
+ kfree(code);
+ return err;
+ }
+
+ *p = code;
+ return uprog.len;
+}
+#endif /* CONFIG_PPP_FILTER */
+
+static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct ppp_file *pf = file->private_data;
+ struct ppp *ppp;
+ int err = -EFAULT, val, val2, i;
+ struct ppp_idle idle;
+ struct npioctl npi;
+ int unit, cflags;
+ struct slcompress *vj;
+ void __user *argp = (void __user *)arg;
+ int __user *p = argp;
+
+ if (!pf)
+ return ppp_unattached_ioctl(current->nsproxy->net_ns,
+ pf, file, cmd, arg);
+
+ if (cmd == PPPIOCDETACH) {
+ /*
+ * We have to be careful here... if the file descriptor
+ * has been dup'd, we could have another process in the
+ * middle of a poll using the same file *, so we had
+ * better not free the interface data structures -
+ * instead we fail the ioctl. Even in this case, we
+ * shut down the interface if we are the owner of it.
+ * Actually, we should get rid of PPPIOCDETACH, userland
+ * (i.e. pppd) could achieve the same effect by closing
+ * this fd and reopening /dev/ppp.
+ */
+ err = -EINVAL;
+ mutex_lock(&ppp_mutex);
+ if (pf->kind == INTERFACE) {
+ ppp = PF_TO_PPP(pf);
+ if (file == ppp->owner)
+ ppp_shutdown_interface(ppp);
+ }
+ if (atomic_long_read(&file->f_count) <= 2) {
+ ppp_release(NULL, file);
+ err = 0;
+ } else
+ pr_warn("PPPIOCDETACH file->f_count=%ld\n",
+ atomic_long_read(&file->f_count));
+ mutex_unlock(&ppp_mutex);
+ return err;
+ }
+
+ if (pf->kind == CHANNEL) {
+ struct channel *pch;
+ struct ppp_channel *chan;
+
+ mutex_lock(&ppp_mutex);
+ pch = PF_TO_CHANNEL(pf);
+
+ switch (cmd) {
+ case PPPIOCCONNECT:
+ if (get_user(unit, p))
+ break;
+ err = ppp_connect_channel(pch, unit);
+ break;
+
+ case PPPIOCDISCONN:
+ err = ppp_disconnect_channel(pch);
+ break;
+
+ default:
+ down_read(&pch->chan_sem);
+ chan = pch->chan;
+ err = -ENOTTY;
+ if (chan && chan->ops->ioctl)
+ err = chan->ops->ioctl(chan, cmd, arg);
+ up_read(&pch->chan_sem);
+ }
+ mutex_unlock(&ppp_mutex);
+ return err;
+ }
+
+ if (pf->kind != INTERFACE) {
+ /* can't happen */
+ pr_err("PPP: not interface or channel??\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ppp_mutex);
+ ppp = PF_TO_PPP(pf);
+ switch (cmd) {
+ case PPPIOCSMRU:
+ if (get_user(val, p))
+ break;
+ ppp->mru = val;
+ err = 0;
+ break;
+
+ case PPPIOCSFLAGS:
+ if (get_user(val, p))
+ break;
+ ppp_lock(ppp);
+ cflags = ppp->flags & ~val;
+ ppp->flags = val & SC_FLAG_BITS;
+ ppp_unlock(ppp);
+ if (cflags & SC_CCP_OPEN)
+ ppp_ccp_closed(ppp);
+ err = 0;
+ break;
+
+ case PPPIOCGFLAGS:
+ val = ppp->flags | ppp->xstate | ppp->rstate;
+ if (put_user(val, p))
+ break;
+ err = 0;
+ break;
+
+ case PPPIOCSCOMPRESS:
+ err = ppp_set_compress(ppp, arg);
+ break;
+
+ case PPPIOCGUNIT:
+ if (put_user(ppp->file.index, p))
+ break;
+ err = 0;
+ break;
+
+ case PPPIOCSDEBUG:
+ if (get_user(val, p))
+ break;
+ ppp->debug = val;
+ err = 0;
+ break;
+
+ case PPPIOCGDEBUG:
+ if (put_user(ppp->debug, p))
+ break;
+ err = 0;
+ break;
+
+ case PPPIOCGIDLE:
+ idle.xmit_idle = (jiffies - ppp->last_xmit) / HZ;
+ idle.recv_idle = (jiffies - ppp->last_recv) / HZ;
+ if (copy_to_user(argp, &idle, sizeof(idle)))
+ break;
+ err = 0;
+ break;
+
+ case PPPIOCSMAXCID:
+ if (get_user(val, p))
+ break;
+ val2 = 15;
+ if ((val >> 16) != 0) {
+ val2 = val >> 16;
+ val &= 0xffff;
+ }
+ vj = slhc_init(val2+1, val+1);
+ if (!vj) {
+ netdev_err(ppp->dev,
+ "PPP: no memory (VJ compressor)\n");
+ err = -ENOMEM;
+ break;
+ }
+ ppp_lock(ppp);
+ if (ppp->vj)
+ slhc_free(ppp->vj);
+ ppp->vj = vj;
+ ppp_unlock(ppp);
+ err = 0;
+ break;
+
+ case PPPIOCGNPMODE:
+ case PPPIOCSNPMODE:
+ if (copy_from_user(&npi, argp, sizeof(npi)))
+ break;
+ err = proto_to_npindex(npi.protocol);
+ if (err < 0)
+ break;
+ i = err;
+ if (cmd == PPPIOCGNPMODE) {
+ err = -EFAULT;
+ npi.mode = ppp->npmode[i];
+ if (copy_to_user(argp, &npi, sizeof(npi)))
+ break;
+ } else {
+ ppp->npmode[i] = npi.mode;
+ /* we may be able to transmit more packets now (??) */
+ netif_wake_queue(ppp->dev);
+ }
+ err = 0;
+ break;
+
+#ifdef CONFIG_PPP_FILTER
+ case PPPIOCSPASS:
+ {
+ struct sock_filter *code;
+ err = get_filter(argp, &code);
+ if (err >= 0) {
+ ppp_lock(ppp);
+ kfree(ppp->pass_filter);
+ ppp->pass_filter = code;
+ ppp->pass_len = err;
+ ppp_unlock(ppp);
+ err = 0;
+ }
+ break;
+ }
+ case PPPIOCSACTIVE:
+ {
+ struct sock_filter *code;
+ err = get_filter(argp, &code);
+ if (err >= 0) {
+ ppp_lock(ppp);
+ kfree(ppp->active_filter);
+ ppp->active_filter = code;
+ ppp->active_len = err;
+ ppp_unlock(ppp);
+ err = 0;
+ }
+ break;
+ }
+#endif /* CONFIG_PPP_FILTER */
+
+#ifdef CONFIG_PPP_MULTILINK
+ case PPPIOCSMRRU:
+ if (get_user(val, p))
+ break;
+ ppp_recv_lock(ppp);
+ ppp->mrru = val;
+ ppp_recv_unlock(ppp);
+ err = 0;
+ break;
+#endif /* CONFIG_PPP_MULTILINK */
+
+ default:
+ err = -ENOTTY;
+ }
+ mutex_unlock(&ppp_mutex);
+ return err;
+}
+
+static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
+ struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int unit, err = -EFAULT;
+ struct ppp *ppp;
+ struct channel *chan;
+ struct ppp_net *pn;
+ int __user *p = (int __user *)arg;
+
+ mutex_lock(&ppp_mutex);
+ switch (cmd) {
+ case PPPIOCNEWUNIT:
+ /* Create a new ppp unit */
+ if (get_user(unit, p))
+ break;
+ ppp = ppp_create_interface(net, unit, &err);
+ if (!ppp)
+ break;
+ file->private_data = &ppp->file;
+ ppp->owner = file;
+ err = -EFAULT;
+ if (put_user(ppp->file.index, p))
+ break;
+ err = 0;
+ break;
+
+ case PPPIOCATTACH:
+ /* Attach to an existing ppp unit */
+ if (get_user(unit, p))
+ break;
+ err = -ENXIO;
+ pn = ppp_pernet(net);
+ mutex_lock(&pn->all_ppp_mutex);
+ ppp = ppp_find_unit(pn, unit);
+ if (ppp) {
+ atomic_inc(&ppp->file.refcnt);
+ file->private_data = &ppp->file;
+ err = 0;
+ }
+ mutex_unlock(&pn->all_ppp_mutex);
+ break;
+
+ case PPPIOCATTCHAN:
+ if (get_user(unit, p))
+ break;
+ err = -ENXIO;
+ pn = ppp_pernet(net);
+ spin_lock_bh(&pn->all_channels_lock);
+ chan = ppp_find_channel(pn, unit);
+ if (chan) {
+ atomic_inc(&chan->file.refcnt);
+ file->private_data = &chan->file;
+ err = 0;
+ }
+ spin_unlock_bh(&pn->all_channels_lock);
+ break;
+
+ default:
+ err = -ENOTTY;
+ }
+ mutex_unlock(&ppp_mutex);
+ return err;
+}
+
+static const struct file_operations ppp_device_fops = {
+ .owner = THIS_MODULE,
+ .read = ppp_read,
+ .write = ppp_write,
+ .poll = ppp_poll,
+ .unlocked_ioctl = ppp_ioctl,
+ .open = ppp_open,
+ .release = ppp_release,
+ .llseek = noop_llseek,
+};
+
+static __net_init int ppp_init_net(struct net *net)
+{
+ struct ppp_net *pn = net_generic(net, ppp_net_id);
+
+ idr_init(&pn->units_idr);
+ mutex_init(&pn->all_ppp_mutex);
+
+ INIT_LIST_HEAD(&pn->all_channels);
+ INIT_LIST_HEAD(&pn->new_channels);
+
+ spin_lock_init(&pn->all_channels_lock);
+
+ return 0;
+}
+
+static __net_exit void ppp_exit_net(struct net *net)
+{
+ struct ppp_net *pn = net_generic(net, ppp_net_id);
+
+ idr_destroy(&pn->units_idr);
+}
+
+static struct pernet_operations ppp_net_ops = {
+ .init = ppp_init_net,
+ .exit = ppp_exit_net,
+ .id = &ppp_net_id,
+ .size = sizeof(struct ppp_net),
+};
+
+#define PPP_MAJOR 108
+
+/* Called at boot time if ppp is compiled into the kernel,
+ or at module load time (from init_module) if compiled as a module. */
+static int __init ppp_init(void)
+{
+ int err;
+
+ pr_info("PPP generic driver version " PPP_VERSION "\n");
+
+ err = register_pernet_device(&ppp_net_ops);
+ if (err) {
+ pr_err("failed to register PPP pernet device (%d)\n", err);
+ goto out;
+ }
+
+ err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops);
+ if (err) {
+ pr_err("failed to register PPP device (%d)\n", err);
+ goto out_net;
+ }
+
+ ppp_class = class_create(THIS_MODULE, "ppp");
+ if (IS_ERR(ppp_class)) {
+ err = PTR_ERR(ppp_class);
+ goto out_chrdev;
+ }
+
+ /* not a big deal if we fail here :-) */
+ device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
+
+ return 0;
+
+out_chrdev:
+ unregister_chrdev(PPP_MAJOR, "ppp");
+out_net:
+ unregister_pernet_device(&ppp_net_ops);
+out:
+ return err;
+}
+
+/*
+ * Network interface unit routines.
+ */
+static netdev_tx_t
+ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ppp *ppp = netdev_priv(dev);
+ int npi, proto;
+ unsigned char *pp;
+
+ npi = ethertype_to_npindex(ntohs(skb->protocol));
+ if (npi < 0)
+ goto outf;
+
+ /* Drop, accept or reject the packet */
+ switch (ppp->npmode[npi]) {
+ case NPMODE_PASS:
+ break;
+ case NPMODE_QUEUE:
+ /* it would be nice to have a way to tell the network
+ system to queue this one up for later. */
+ goto outf;
+ case NPMODE_DROP:
+ case NPMODE_ERROR:
+ goto outf;
+ }
+
+ /* Put the 2-byte PPP protocol number on the front,
+ making sure there is room for the address and control fields. */
+ if (skb_cow_head(skb, PPP_HDRLEN))
+ goto outf;
+
+ pp = skb_push(skb, 2);
+ proto = npindex_to_proto[npi];
+ put_unaligned_be16(proto, pp);
+
+ netif_stop_queue(dev);
+ skb_queue_tail(&ppp->file.xq, skb);
+ ppp_xmit_process(ppp);
+ return NETDEV_TX_OK;
+
+ outf:
+ kfree_skb(skb);
+ ++dev->stats.tx_dropped;
+ return NETDEV_TX_OK;
+}
+
+static int
+ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct ppp *ppp = netdev_priv(dev);
+ int err = -EFAULT;
+ void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
+ struct ppp_stats stats;
+ struct ppp_comp_stats cstats;
+ char *vers;
+
+ switch (cmd) {
+ case SIOCGPPPSTATS:
+ ppp_get_stats(ppp, &stats);
+ if (copy_to_user(addr, &stats, sizeof(stats)))
+ break;
+ err = 0;
+ break;
+
+ case SIOCGPPPCSTATS:
+ memset(&cstats, 0, sizeof(cstats));
+ if (ppp->xc_state)
+ ppp->xcomp->comp_stat(ppp->xc_state, &cstats.c);
+ if (ppp->rc_state)
+ ppp->rcomp->decomp_stat(ppp->rc_state, &cstats.d);
+ if (copy_to_user(addr, &cstats, sizeof(cstats)))
+ break;
+ err = 0;
+ break;
+
+ case SIOCGPPPVER:
+ vers = PPP_VERSION;
+ if (copy_to_user(addr, vers, strlen(vers) + 1))
+ break;
+ err = 0;
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static const struct net_device_ops ppp_netdev_ops = {
+ .ndo_start_xmit = ppp_start_xmit,
+ .ndo_do_ioctl = ppp_net_ioctl,
+};
+
+static void ppp_setup(struct net_device *dev)
+{
+ dev->netdev_ops = &ppp_netdev_ops;
+ dev->hard_header_len = PPP_HDRLEN;
+ dev->mtu = PPP_MTU;
+ dev->addr_len = 0;
+ dev->tx_queue_len = 3;
+ dev->type = ARPHRD_PPP;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+ dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+}
+
+/*
+ * Transmit-side routines.
+ */
+
+/*
+ * Called to do any work queued up on the transmit side
+ * that can now be done.
+ */
+static void
+ppp_xmit_process(struct ppp *ppp)
+{
+ struct sk_buff *skb;
+
+ ppp_xmit_lock(ppp);
+ if (!ppp->closing) {
+ ppp_push(ppp);
+ while (!ppp->xmit_pending &&
+ (skb = skb_dequeue(&ppp->file.xq)))
+ ppp_send_frame(ppp, skb);
+ /* If there's no work left to do, tell the core net
+ code that we can accept some more. */
+ if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq))
+ netif_wake_queue(ppp->dev);
+ }
+ ppp_xmit_unlock(ppp);
+}
+
+static inline struct sk_buff *
+pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
+{
+ struct sk_buff *new_skb;
+ int len;
+ int new_skb_size = ppp->dev->mtu +
+ ppp->xcomp->comp_extra + ppp->dev->hard_header_len;
+ int compressor_skb_size = ppp->dev->mtu +
+ ppp->xcomp->comp_extra + PPP_HDRLEN;
+ new_skb = alloc_skb(new_skb_size, GFP_ATOMIC);
+ if (!new_skb) {
+ if (net_ratelimit())
+ netdev_err(ppp->dev, "PPP: no memory (comp pkt)\n");
+ return NULL;
+ }
+ if (ppp->dev->hard_header_len > PPP_HDRLEN)
+ skb_reserve(new_skb,
+ ppp->dev->hard_header_len - PPP_HDRLEN);
+
+ /* compressor still expects A/C bytes in hdr */
+ len = ppp->xcomp->compress(ppp->xc_state, skb->data - 2,
+ new_skb->data, skb->len + 2,
+ compressor_skb_size);
+ if (len > 0 && (ppp->flags & SC_CCP_UP)) {
+ kfree_skb(skb);
+ skb = new_skb;
+ skb_put(skb, len);
+ skb_pull(skb, 2); /* pull off A/C bytes */
+ } else if (len == 0) {
+ /* didn't compress, or CCP not up yet */
+ kfree_skb(new_skb);
+ new_skb = skb;
+ } else {
+ /*
+ * (len < 0)
+ * MPPE requires that we do not send unencrypted
+ * frames. The compressor will return -1 if we
+ * should drop the frame. We cannot simply test
+ * the compress_proto because MPPE and MPPC share
+ * the same number.
+ */
+ if (net_ratelimit())
+ netdev_err(ppp->dev, "ppp: compressor dropped pkt\n");
+ kfree_skb(skb);
+ kfree_skb(new_skb);
+ new_skb = NULL;
+ }
+ return new_skb;
+}
+
+/*
+ * Compress and send a frame.
+ * The caller should have locked the xmit path,
+ * and xmit_pending should be 0.
+ */
+static void
+ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
+{
+ int proto = PPP_PROTO(skb);
+ struct sk_buff *new_skb;
+ int len;
+ unsigned char *cp;
+
+ if (proto < 0x8000) {
+#ifdef CONFIG_PPP_FILTER
+ /* check if we should pass this packet */
+ /* the filter instructions are constructed assuming
+ a four-byte PPP header on each packet */
+ *skb_push(skb, 2) = 1;
+ if (ppp->pass_filter &&
+ sk_run_filter(skb, ppp->pass_filter) == 0) {
+ if (ppp->debug & 1)
+ netdev_printk(KERN_DEBUG, ppp->dev,
+ "PPP: outbound frame "
+ "not passed\n");
+ kfree_skb(skb);
+ return;
+ }
+ /* if this packet passes the active filter, record the time */
+ if (!(ppp->active_filter &&
+ sk_run_filter(skb, ppp->active_filter) == 0))
+ ppp->last_xmit = jiffies;
+ skb_pull(skb, 2);
+#else
+ /* for data packets, record the time */
+ ppp->last_xmit = jiffies;
+#endif /* CONFIG_PPP_FILTER */
+ }
+
+ ++ppp->dev->stats.tx_packets;
+ ppp->dev->stats.tx_bytes += skb->len - 2;
+
+ switch (proto) {
+ case PPP_IP:
+ if (!ppp->vj || (ppp->flags & SC_COMP_TCP) == 0)
+ break;
+ /* try to do VJ TCP header compression */
+ new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2,
+ GFP_ATOMIC);
+ if (!new_skb) {
+ netdev_err(ppp->dev, "PPP: no memory (VJ comp pkt)\n");
+ goto drop;
+ }
+ skb_reserve(new_skb, ppp->dev->hard_header_len - 2);
+ cp = skb->data + 2;
+ len = slhc_compress(ppp->vj, cp, skb->len - 2,
+ new_skb->data + 2, &cp,
+ !(ppp->flags & SC_NO_TCP_CCID));
+ if (cp == skb->data + 2) {
+ /* didn't compress */
+ kfree_skb(new_skb);
+ } else {
+ if (cp[0] & SL_TYPE_COMPRESSED_TCP) {
+ proto = PPP_VJC_COMP;
+ cp[0] &= ~SL_TYPE_COMPRESSED_TCP;
+ } else {
+ proto = PPP_VJC_UNCOMP;
+ cp[0] = skb->data[2];
+ }
+ kfree_skb(skb);
+ skb = new_skb;
+ cp = skb_put(skb, len + 2);
+ cp[0] = 0;
+ cp[1] = proto;
+ }
+ break;
+
+ case PPP_CCP:
+ /* peek at outbound CCP frames */
+ ppp_ccp_peek(ppp, skb, 0);
+ break;
+ }
+
+ /* try to do packet compression */
+ if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state &&
+ proto != PPP_LCP && proto != PPP_CCP) {
+ if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) {
+ if (net_ratelimit())
+ netdev_err(ppp->dev,
+ "ppp: compression required but "
+ "down - pkt dropped.\n");
+ goto drop;
+ }
+ skb = pad_compress_skb(ppp, skb);
+ if (!skb)
+ goto drop;
+ }
+
+ /*
+ * If we are waiting for traffic (demand dialling),
+ * queue it up for pppd to receive.
+ */
+ if (ppp->flags & SC_LOOP_TRAFFIC) {
+ if (ppp->file.rq.qlen > PPP_MAX_RQLEN)
+ goto drop;
+ skb_queue_tail(&ppp->file.rq, skb);
+ wake_up_interruptible(&ppp->file.rwait);
+ return;
+ }
+
+ ppp->xmit_pending = skb;
+ ppp_push(ppp);
+ return;
+
+ drop:
+ kfree_skb(skb);
+ ++ppp->dev->stats.tx_errors;
+}
+
+/*
+ * Try to send the frame in xmit_pending.
+ * The caller should have the xmit path locked.
+ */
+static void
+ppp_push(struct ppp *ppp)
+{
+ struct list_head *list;
+ struct channel *pch;
+ struct sk_buff *skb = ppp->xmit_pending;
+
+ if (!skb)
+ return;
+
+ list = &ppp->channels;
+ if (list_empty(list)) {
+ /* nowhere to send the packet, just drop it */
+ ppp->xmit_pending = NULL;
+ kfree_skb(skb);
+ return;
+ }
+
+ if ((ppp->flags & SC_MULTILINK) == 0) {
+ /* not doing multilink: send it down the first channel */
+ list = list->next;
+ pch = list_entry(list, struct channel, clist);
+
+ spin_lock_bh(&pch->downl);
+ if (pch->chan) {
+ if (pch->chan->ops->start_xmit(pch->chan, skb))
+ ppp->xmit_pending = NULL;
+ } else {
+ /* channel got unregistered */
+ kfree_skb(skb);
+ ppp->xmit_pending = NULL;
+ }
+ spin_unlock_bh(&pch->downl);
+ return;
+ }
+
+#ifdef CONFIG_PPP_MULTILINK
+ /* Multilink: fragment the packet over as many links
+ as can take the packet at the moment. */
+ if (!ppp_mp_explode(ppp, skb))
+ return;
+#endif /* CONFIG_PPP_MULTILINK */
+
+ ppp->xmit_pending = NULL;
+ kfree_skb(skb);
+}
+
+#ifdef CONFIG_PPP_MULTILINK
+static bool mp_protocol_compress __read_mostly = true;
+module_param(mp_protocol_compress, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(mp_protocol_compress,
+ "compress protocol id in multilink fragments");
+
+/*
+ * Divide a packet to be transmitted into fragments and
+ * send them out the individual links.
+ */
+static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
+{
+ int len, totlen;
+ int i, bits, hdrlen, mtu;
+ int flen;
+ int navail, nfree, nzero;
+ int nbigger;
+ int totspeed;
+ int totfree;
+ unsigned char *p, *q;
+ struct list_head *list;
+ struct channel *pch;
+ struct sk_buff *frag;
+ struct ppp_channel *chan;
+
+ totspeed = 0; /*total bitrate of the bundle*/
+ nfree = 0; /* # channels which have no packet already queued */
+ navail = 0; /* total # of usable channels (not deregistered) */
+ nzero = 0; /* number of channels with zero speed associated*/
+ totfree = 0; /*total # of channels available and
+ *having no queued packets before
+ *starting the fragmentation*/
+
+ hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
+ i = 0;
+ list_for_each_entry(pch, &ppp->channels, clist) {
+ if (pch->chan) {
+ pch->avail = 1;
+ navail++;
+ pch->speed = pch->chan->speed;
+ } else {
+ pch->avail = 0;
+ }
+ if (pch->avail) {
+ if (skb_queue_empty(&pch->file.xq) ||
+ !pch->had_frag) {
+ if (pch->speed == 0)
+ nzero++;
+ else
+ totspeed += pch->speed;
+
+ pch->avail = 2;
+ ++nfree;
+ ++totfree;
+ }
+ if (!pch->had_frag && i < ppp->nxchan)
+ ppp->nxchan = i;
+ }
+ ++i;
+ }
+ /*
+ * Don't start sending this packet unless at least half of
+ * the channels are free. This gives much better TCP
+ * performance if we have a lot of channels.
+ */
+ if (nfree == 0 || nfree < navail / 2)
+ return 0; /* can't take now, leave it in xmit_pending */
+
+ /* Do protocol field compression */
+ p = skb->data;
+ len = skb->len;
+ if (*p == 0 && mp_protocol_compress) {
+ ++p;
+ --len;
+ }
+
+ totlen = len;
+ nbigger = len % nfree;
+
+ /* skip to the channel after the one we last used
+ and start at that one */
+ list = &ppp->channels;
+ for (i = 0; i < ppp->nxchan; ++i) {
+ list = list->next;
+ if (list == &ppp->channels) {
+ i = 0;
+ break;
+ }
+ }
+
+ /* create a fragment for each channel */
+ bits = B;
+ while (len > 0) {
+ list = list->next;
+ if (list == &ppp->channels) {
+ i = 0;
+ continue;
+ }
+ pch = list_entry(list, struct channel, clist);
+ ++i;
+ if (!pch->avail)
+ continue;
+
+ /*
+ * Skip this channel if it has a fragment pending already and
+ * we haven't given a fragment to all of the free channels.
+ */
+ if (pch->avail == 1) {
+ if (nfree > 0)
+ continue;
+ } else {
+ pch->avail = 1;
+ }
+
+ /* check the channel's mtu and whether it is still attached. */
+ spin_lock_bh(&pch->downl);
+ if (pch->chan == NULL) {
+ /* can't use this channel, it's being deregistered */
+ if (pch->speed == 0)
+ nzero--;
+ else
+ totspeed -= pch->speed;
+
+ spin_unlock_bh(&pch->downl);
+ pch->avail = 0;
+ totlen = len;
+ totfree--;
+ nfree--;
+ if (--navail == 0)
+ break;
+ continue;
+ }
+
+ /*
+ *if the channel speed is not set divide
+ *the packet evenly among the free channels;
+ *otherwise divide it according to the speed
+ *of the channel we are going to transmit on
+ */
+ flen = len;
+ if (nfree > 0) {
+ if (pch->speed == 0) {
+ flen = len/nfree;
+ if (nbigger > 0) {
+ flen++;
+ nbigger--;
+ }
+ } else {
+ flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) /
+ ((totspeed*totfree)/pch->speed)) - hdrlen;
+ if (nbigger > 0) {
+ flen += ((totfree - nzero)*pch->speed)/totspeed;
+ nbigger -= ((totfree - nzero)*pch->speed)/
+ totspeed;
+ }
+ }
+ nfree--;
+ }
+
+ /*
+ *check if we are on the last channel or
+ *we exceded the length of the data to
+ *fragment
+ */
+ if ((nfree <= 0) || (flen > len))
+ flen = len;
+ /*
+ *it is not worth to tx on slow channels:
+ *in that case from the resulting flen according to the
+ *above formula will be equal or less than zero.
+ *Skip the channel in this case
+ */
+ if (flen <= 0) {
+ pch->avail = 2;
+ spin_unlock_bh(&pch->downl);
+ continue;
+ }
+
++ /*
++ * hdrlen includes the 2-byte PPP protocol field, but the
++ * MTU counts only the payload excluding the protocol field.
++ * (RFC1661 Section 2)
++ */
++ mtu = pch->chan->mtu - (hdrlen - 2);
+ if (mtu < 4)
+ mtu = 4;
+ if (flen > mtu)
+ flen = mtu;
+ if (flen == len)
+ bits |= E;
+ frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC);
+ if (!frag)
+ goto noskb;
+ q = skb_put(frag, flen + hdrlen);
+
+ /* make the MP header */
+ put_unaligned_be16(PPP_MP, q);
+ if (ppp->flags & SC_MP_XSHORTSEQ) {
+ q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
+ q[3] = ppp->nxseq;
+ } else {
+ q[2] = bits;
+ q[3] = ppp->nxseq >> 16;
+ q[4] = ppp->nxseq >> 8;
+ q[5] = ppp->nxseq;
+ }
+
+ memcpy(q + hdrlen, p, flen);
+
+ /* try to send it down the channel */
+ chan = pch->chan;
+ if (!skb_queue_empty(&pch->file.xq) ||
+ !chan->ops->start_xmit(chan, frag))
+ skb_queue_tail(&pch->file.xq, frag);
+ pch->had_frag = 1;
+ p += flen;
+ len -= flen;
+ ++ppp->nxseq;
+ bits = 0;
+ spin_unlock_bh(&pch->downl);
+ }
+ ppp->nxchan = i;
+
+ return 1;
+
+ noskb:
+ spin_unlock_bh(&pch->downl);
+ if (ppp->debug & 1)
+ netdev_err(ppp->dev, "PPP: no memory (fragment)\n");
+ ++ppp->dev->stats.tx_errors;
+ ++ppp->nxseq;
+ return 1; /* abandon the frame */
+}
+#endif /* CONFIG_PPP_MULTILINK */
+
+/*
+ * Try to send data out on a channel.
+ */
+static void
+ppp_channel_push(struct channel *pch)
+{
+ struct sk_buff *skb;
+ struct ppp *ppp;
+
+ spin_lock_bh(&pch->downl);
+ if (pch->chan) {
+ while (!skb_queue_empty(&pch->file.xq)) {
+ skb = skb_dequeue(&pch->file.xq);
+ if (!pch->chan->ops->start_xmit(pch->chan, skb)) {
+ /* put the packet back and try again later */
+ skb_queue_head(&pch->file.xq, skb);
+ break;
+ }
+ }
+ } else {
+ /* channel got deregistered */
+ skb_queue_purge(&pch->file.xq);
+ }
+ spin_unlock_bh(&pch->downl);
+ /* see if there is anything from the attached unit to be sent */
+ if (skb_queue_empty(&pch->file.xq)) {
+ read_lock_bh(&pch->upl);
+ ppp = pch->ppp;
+ if (ppp)
+ ppp_xmit_process(ppp);
+ read_unlock_bh(&pch->upl);
+ }
+}
+
+/*
+ * Receive-side routines.
+ */
+
+struct ppp_mp_skb_parm {
+ u32 sequence;
+ u8 BEbits;
+};
+#define PPP_MP_CB(skb) ((struct ppp_mp_skb_parm *)((skb)->cb))
+
+static inline void
+ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
+{
+ ppp_recv_lock(ppp);
+ if (!ppp->closing)
+ ppp_receive_frame(ppp, skb, pch);
+ else
+ kfree_skb(skb);
+ ppp_recv_unlock(ppp);
+}
+
+void
+ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
+{
+ struct channel *pch = chan->ppp;
+ int proto;
+
+ if (!pch) {
+ kfree_skb(skb);
+ return;
+ }
+
+ read_lock_bh(&pch->upl);
+ if (!pskb_may_pull(skb, 2)) {
+ kfree_skb(skb);
+ if (pch->ppp) {
+ ++pch->ppp->dev->stats.rx_length_errors;
+ ppp_receive_error(pch->ppp);
+ }
+ goto done;
+ }
+
+ proto = PPP_PROTO(skb);
+ if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) {
+ /* put it on the channel queue */
+ skb_queue_tail(&pch->file.rq, skb);
+ /* drop old frames if queue too long */
+ while (pch->file.rq.qlen > PPP_MAX_RQLEN &&
+ (skb = skb_dequeue(&pch->file.rq)))
+ kfree_skb(skb);
+ wake_up_interruptible(&pch->file.rwait);
+ } else {
+ ppp_do_recv(pch->ppp, skb, pch);
+ }
+
+done:
+ read_unlock_bh(&pch->upl);
+}
+
+/* Put a 0-length skb in the receive queue as an error indication */
+void
+ppp_input_error(struct ppp_channel *chan, int code)
+{
+ struct channel *pch = chan->ppp;
+ struct sk_buff *skb;
+
+ if (!pch)
+ return;
+
+ read_lock_bh(&pch->upl);
+ if (pch->ppp) {
+ skb = alloc_skb(0, GFP_ATOMIC);
+ if (skb) {
+ skb->len = 0; /* probably unnecessary */
+ skb->cb[0] = code;
+ ppp_do_recv(pch->ppp, skb, pch);
+ }
+ }
+ read_unlock_bh(&pch->upl);
+}
+
+/*
+ * We come in here to process a received frame.
+ * The receive side of the ppp unit is locked.
+ */
+static void
+ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
+{
+ /* note: a 0-length skb is used as an error indication */
+ if (skb->len > 0) {
+#ifdef CONFIG_PPP_MULTILINK
+ /* XXX do channel-level decompression here */
+ if (PPP_PROTO(skb) == PPP_MP)
+ ppp_receive_mp_frame(ppp, skb, pch);
+ else
+#endif /* CONFIG_PPP_MULTILINK */
+ ppp_receive_nonmp_frame(ppp, skb);
+ } else {
+ kfree_skb(skb);
+ ppp_receive_error(ppp);
+ }
+}
+
+static void
+ppp_receive_error(struct ppp *ppp)
+{
+ ++ppp->dev->stats.rx_errors;
+ if (ppp->vj)
+ slhc_toss(ppp->vj);
+}
+
+static void
+ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
+{
+ struct sk_buff *ns;
+ int proto, len, npi;
+
+ /*
+ * Decompress the frame, if compressed.
+ * Note that some decompressors need to see uncompressed frames
+ * that come in as well as compressed frames.
+ */
+ if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN) &&
+ (ppp->rstate & (SC_DC_FERROR | SC_DC_ERROR)) == 0)
+ skb = ppp_decompress_frame(ppp, skb);
+
+ if (ppp->flags & SC_MUST_COMP && ppp->rstate & SC_DC_FERROR)
+ goto err;
+
+ proto = PPP_PROTO(skb);
+ switch (proto) {
+ case PPP_VJC_COMP:
+ /* decompress VJ compressed packets */
+ if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP))
+ goto err;
+
+ if (skb_tailroom(skb) < 124 || skb_cloned(skb)) {
+ /* copy to a new sk_buff with more tailroom */
+ ns = dev_alloc_skb(skb->len + 128);
+ if (!ns) {
+ netdev_err(ppp->dev, "PPP: no memory "
+ "(VJ decomp)\n");
+ goto err;
+ }
+ skb_reserve(ns, 2);
+ skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len);
+ kfree_skb(skb);
+ skb = ns;
+ }
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+
+ len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2);
+ if (len <= 0) {
+ netdev_printk(KERN_DEBUG, ppp->dev,
+ "PPP: VJ decompression error\n");
+ goto err;
+ }
+ len += 2;
+ if (len > skb->len)
+ skb_put(skb, len - skb->len);
+ else if (len < skb->len)
+ skb_trim(skb, len);
+ proto = PPP_IP;
+ break;
+
+ case PPP_VJC_UNCOMP:
+ if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP))
+ goto err;
+
+ /* Until we fix the decompressor need to make sure
+ * data portion is linear.
+ */
+ if (!pskb_may_pull(skb, skb->len))
+ goto err;
+
+ if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) {
+ netdev_err(ppp->dev, "PPP: VJ uncompressed error\n");
+ goto err;
+ }
+ proto = PPP_IP;
+ break;
+
+ case PPP_CCP:
+ ppp_ccp_peek(ppp, skb, 1);
+ break;
+ }
+
+ ++ppp->dev->stats.rx_packets;
+ ppp->dev->stats.rx_bytes += skb->len - 2;
+
+ npi = proto_to_npindex(proto);
+ if (npi < 0) {
+ /* control or unknown frame - pass it to pppd */
+ skb_queue_tail(&ppp->file.rq, skb);
+ /* limit queue length by dropping old frames */
+ while (ppp->file.rq.qlen > PPP_MAX_RQLEN &&
+ (skb = skb_dequeue(&ppp->file.rq)))
+ kfree_skb(skb);
+ /* wake up any process polling or blocking on read */
+ wake_up_interruptible(&ppp->file.rwait);
+
+ } else {
+ /* network protocol frame - give it to the kernel */
+
+#ifdef CONFIG_PPP_FILTER
+ /* check if the packet passes the pass and active filters */
+ /* the filter instructions are constructed assuming
+ a four-byte PPP header on each packet */
+ if (ppp->pass_filter || ppp->active_filter) {
+ if (skb_cloned(skb) &&
+ pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ goto err;
+
+ *skb_push(skb, 2) = 0;
+ if (ppp->pass_filter &&
+ sk_run_filter(skb, ppp->pass_filter) == 0) {
+ if (ppp->debug & 1)
+ netdev_printk(KERN_DEBUG, ppp->dev,
+ "PPP: inbound frame "
+ "not passed\n");
+ kfree_skb(skb);
+ return;
+ }
+ if (!(ppp->active_filter &&
+ sk_run_filter(skb, ppp->active_filter) == 0))
+ ppp->last_recv = jiffies;
+ __skb_pull(skb, 2);
+ } else
+#endif /* CONFIG_PPP_FILTER */
+ ppp->last_recv = jiffies;
+
+ if ((ppp->dev->flags & IFF_UP) == 0 ||
+ ppp->npmode[npi] != NPMODE_PASS) {
+ kfree_skb(skb);
+ } else {
+ /* chop off protocol */
+ skb_pull_rcsum(skb, 2);
+ skb->dev = ppp->dev;
+ skb->protocol = htons(npindex_to_ethertype[npi]);
+ skb_reset_mac_header(skb);
+ netif_rx(skb);
+ }
+ }
+ return;
+
+ err:
+ kfree_skb(skb);
+ ppp_receive_error(ppp);
+}
+
+static struct sk_buff *
+ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb)
+{
+ int proto = PPP_PROTO(skb);
+ struct sk_buff *ns;
+ int len;
+
+ /* Until we fix all the decompressor's need to make sure
+ * data portion is linear.
+ */
+ if (!pskb_may_pull(skb, skb->len))
+ goto err;
+
+ if (proto == PPP_COMP) {
+ int obuff_size;
+
+ switch(ppp->rcomp->compress_proto) {
+ case CI_MPPE:
+ obuff_size = ppp->mru + PPP_HDRLEN + 1;
+ break;
+ default:
+ obuff_size = ppp->mru + PPP_HDRLEN;
+ break;
+ }
+
+ ns = dev_alloc_skb(obuff_size);
+ if (!ns) {
+ netdev_err(ppp->dev, "ppp_decompress_frame: "
+ "no memory\n");
+ goto err;
+ }
+ /* the decompressor still expects the A/C bytes in the hdr */
+ len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2,
+ skb->len + 2, ns->data, obuff_size);
+ if (len < 0) {
+ /* Pass the compressed frame to pppd as an
+ error indication. */
+ if (len == DECOMP_FATALERROR)
+ ppp->rstate |= SC_DC_FERROR;
+ kfree_skb(ns);
+ goto err;
+ }
+
+ kfree_skb(skb);
+ skb = ns;
+ skb_put(skb, len);
+ skb_pull(skb, 2); /* pull off the A/C bytes */
+
+ } else {
+ /* Uncompressed frame - pass to decompressor so it
+ can update its dictionary if necessary. */
+ if (ppp->rcomp->incomp)
+ ppp->rcomp->incomp(ppp->rc_state, skb->data - 2,
+ skb->len + 2);
+ }
+
+ return skb;
+
+ err:
+ ppp->rstate |= SC_DC_ERROR;
+ ppp_receive_error(ppp);
+ return skb;
+}
+
+#ifdef CONFIG_PPP_MULTILINK
+/*
+ * Receive a multilink frame.
+ * We put it on the reconstruction queue and then pull off
+ * as many completed frames as we can.
+ */
+static void
+ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
+{
+ u32 mask, seq;
+ struct channel *ch;
+ int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
+
+ if (!pskb_may_pull(skb, mphdrlen + 1) || ppp->mrru == 0)
+ goto err; /* no good, throw it away */
+
+ /* Decode sequence number and begin/end bits */
+ if (ppp->flags & SC_MP_SHORTSEQ) {
+ seq = ((skb->data[2] & 0x0f) << 8) | skb->data[3];
+ mask = 0xfff;
+ } else {
+ seq = (skb->data[3] << 16) | (skb->data[4] << 8)| skb->data[5];
+ mask = 0xffffff;
+ }
+ PPP_MP_CB(skb)->BEbits = skb->data[2];
+ skb_pull(skb, mphdrlen); /* pull off PPP and MP headers */
+
+ /*
+ * Do protocol ID decompression on the first fragment of each packet.
+ */
+ if ((PPP_MP_CB(skb)->BEbits & B) && (skb->data[0] & 1))
+ *skb_push(skb, 1) = 0;
+
+ /*
+ * Expand sequence number to 32 bits, making it as close
+ * as possible to ppp->minseq.
+ */
+ seq |= ppp->minseq & ~mask;
+ if ((int)(ppp->minseq - seq) > (int)(mask >> 1))
+ seq += mask + 1;
+ else if ((int)(seq - ppp->minseq) > (int)(mask >> 1))
+ seq -= mask + 1; /* should never happen */
+ PPP_MP_CB(skb)->sequence = seq;
+ pch->lastseq = seq;
+
+ /*
+ * If this packet comes before the next one we were expecting,
+ * drop it.
+ */
+ if (seq_before(seq, ppp->nextseq)) {
+ kfree_skb(skb);
+ ++ppp->dev->stats.rx_dropped;
+ ppp_receive_error(ppp);
+ return;
+ }
+
+ /*
+ * Reevaluate minseq, the minimum over all channels of the
+ * last sequence number received on each channel. Because of
+ * the increasing sequence number rule, we know that any fragment
+ * before `minseq' which hasn't arrived is never going to arrive.
+ * The list of channels can't change because we have the receive
+ * side of the ppp unit locked.
+ */
+ list_for_each_entry(ch, &ppp->channels, clist) {
+ if (seq_before(ch->lastseq, seq))
+ seq = ch->lastseq;
+ }
+ if (seq_before(ppp->minseq, seq))
+ ppp->minseq = seq;
+
+ /* Put the fragment on the reconstruction queue */
+ ppp_mp_insert(ppp, skb);
+
+ /* If the queue is getting long, don't wait any longer for packets
+ before the start of the queue. */
+ if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) {
+ struct sk_buff *mskb = skb_peek(&ppp->mrq);
+ if (seq_before(ppp->minseq, PPP_MP_CB(mskb)->sequence))
+ ppp->minseq = PPP_MP_CB(mskb)->sequence;
+ }
+
+ /* Pull completed packets off the queue and receive them. */
+ while ((skb = ppp_mp_reconstruct(ppp))) {
+ if (pskb_may_pull(skb, 2))
+ ppp_receive_nonmp_frame(ppp, skb);
+ else {
+ ++ppp->dev->stats.rx_length_errors;
+ kfree_skb(skb);
+ ppp_receive_error(ppp);
+ }
+ }
+
+ return;
+
+ err:
+ kfree_skb(skb);
+ ppp_receive_error(ppp);
+}
+
+/*
+ * Insert a fragment on the MP reconstruction queue.
+ * The queue is ordered by increasing sequence number.
+ */
+static void
+ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb)
+{
+ struct sk_buff *p;
+ struct sk_buff_head *list = &ppp->mrq;
+ u32 seq = PPP_MP_CB(skb)->sequence;
+
+ /* N.B. we don't need to lock the list lock because we have the
+ ppp unit receive-side lock. */
+ skb_queue_walk(list, p) {
+ if (seq_before(seq, PPP_MP_CB(p)->sequence))
+ break;
+ }
+ __skb_queue_before(list, p, skb);
+}
+
+/*
+ * Reconstruct a packet from the MP fragment queue.
+ * We go through increasing sequence numbers until we find a
+ * complete packet, or we get to the sequence number for a fragment
+ * which hasn't arrived but might still do so.
+ */
+static struct sk_buff *
+ppp_mp_reconstruct(struct ppp *ppp)
+{
+ u32 seq = ppp->nextseq;
+ u32 minseq = ppp->minseq;
+ struct sk_buff_head *list = &ppp->mrq;
+ struct sk_buff *p, *tmp;
+ struct sk_buff *head, *tail;
+ struct sk_buff *skb = NULL;
+ int lost = 0, len = 0;
+
+ if (ppp->mrru == 0) /* do nothing until mrru is set */
+ return NULL;
+ head = list->next;
+ tail = NULL;
+ skb_queue_walk_safe(list, p, tmp) {
+ again:
+ if (seq_before(PPP_MP_CB(p)->sequence, seq)) {
+ /* this can't happen, anyway ignore the skb */
+ netdev_err(ppp->dev, "ppp_mp_reconstruct bad "
+ "seq %u < %u\n",
+ PPP_MP_CB(p)->sequence, seq);
+ __skb_unlink(p, list);
+ kfree_skb(p);
+ continue;
+ }
+ if (PPP_MP_CB(p)->sequence != seq) {
+ /* Fragment `seq' is missing. If it is after
+ minseq, it might arrive later, so stop here. */
+ if (seq_after(seq, minseq))
+ break;
+ /* Fragment `seq' is lost, keep going. */
+ lost = 1;
+ seq = seq_before(minseq, PPP_MP_CB(p)->sequence)?
+ minseq + 1: PPP_MP_CB(p)->sequence;
+ goto again;
+ }
+
+ /*
+ * At this point we know that all the fragments from
+ * ppp->nextseq to seq are either present or lost.
+ * Also, there are no complete packets in the queue
+ * that have no missing fragments and end before this
+ * fragment.
+ */
+
+ /* B bit set indicates this fragment starts a packet */
+ if (PPP_MP_CB(p)->BEbits & B) {
+ head = p;
+ lost = 0;
+ len = 0;
+ }
+
+ len += p->len;
+
+ /* Got a complete packet yet? */
+ if (lost == 0 && (PPP_MP_CB(p)->BEbits & E) &&
+ (PPP_MP_CB(head)->BEbits & B)) {
+ if (len > ppp->mrru + 2) {
+ ++ppp->dev->stats.rx_length_errors;
+ netdev_printk(KERN_DEBUG, ppp->dev,
+ "PPP: reconstructed packet"
+ " is too long (%d)\n", len);
+ } else {
+ tail = p;
+ break;
+ }
+ ppp->nextseq = seq + 1;
+ }
+
+ /*
+ * If this is the ending fragment of a packet,
+ * and we haven't found a complete valid packet yet,
+ * we can discard up to and including this fragment.
+ */
+ if (PPP_MP_CB(p)->BEbits & E) {
+ struct sk_buff *tmp2;
+
+ skb_queue_reverse_walk_from_safe(list, p, tmp2) {
+ __skb_unlink(p, list);
+ kfree_skb(p);
+ }
+ head = skb_peek(list);
+ if (!head)
+ break;
+ }
+ ++seq;
+ }
+
+ /* If we have a complete packet, copy it all into one skb. */
+ if (tail != NULL) {
+ /* If we have discarded any fragments,
+ signal a receive error. */
+ if (PPP_MP_CB(head)->sequence != ppp->nextseq) {
+ if (ppp->debug & 1)
+ netdev_printk(KERN_DEBUG, ppp->dev,
+ " missed pkts %u..%u\n",
+ ppp->nextseq,
+ PPP_MP_CB(head)->sequence-1);
+ ++ppp->dev->stats.rx_dropped;
+ ppp_receive_error(ppp);
+ }
+
+ skb = head;
+ if (head != tail) {
+ struct sk_buff **fragpp = &skb_shinfo(skb)->frag_list;
+ p = skb_queue_next(list, head);
+ __skb_unlink(skb, list);
+ skb_queue_walk_from_safe(list, p, tmp) {
+ __skb_unlink(p, list);
+ *fragpp = p;
+ p->next = NULL;
+ fragpp = &p->next;
+
+ skb->len += p->len;
+ skb->data_len += p->len;
+ skb->truesize += p->len;
+
+ if (p == tail)
+ break;
+ }
+ } else {
+ __skb_unlink(skb, list);
+ }
+
+ ppp->nextseq = PPP_MP_CB(tail)->sequence + 1;
+ }
+
+ return skb;
+}
+#endif /* CONFIG_PPP_MULTILINK */
+
+/*
+ * Channel interface.
+ */
+
+/* Create a new, unattached ppp channel. */
+int ppp_register_channel(struct ppp_channel *chan)
+{
+ return ppp_register_net_channel(current->nsproxy->net_ns, chan);
+}
+
+/* Create a new, unattached ppp channel for specified net. */
+int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
+{
+ struct channel *pch;
+ struct ppp_net *pn;
+
+ pch = kzalloc(sizeof(struct channel), GFP_KERNEL);
+ if (!pch)
+ return -ENOMEM;
+
+ pn = ppp_pernet(net);
+
+ pch->ppp = NULL;
+ pch->chan = chan;
+ pch->chan_net = net;
+ chan->ppp = pch;
+ init_ppp_file(&pch->file, CHANNEL);
+ pch->file.hdrlen = chan->hdrlen;
+#ifdef CONFIG_PPP_MULTILINK
+ pch->lastseq = -1;
+#endif /* CONFIG_PPP_MULTILINK */
+ init_rwsem(&pch->chan_sem);
+ spin_lock_init(&pch->downl);
+ rwlock_init(&pch->upl);
+
+ spin_lock_bh(&pn->all_channels_lock);
+ pch->file.index = ++pn->last_channel_index;
+ list_add(&pch->list, &pn->new_channels);
+ atomic_inc(&channel_count);
+ spin_unlock_bh(&pn->all_channels_lock);
+
+ return 0;
+}
+
+/*
+ * Return the index of a channel.
+ */
+int ppp_channel_index(struct ppp_channel *chan)
+{
+ struct channel *pch = chan->ppp;
+
+ if (pch)
+ return pch->file.index;
+ return -1;
+}
+
+/*
+ * Return the PPP unit number to which a channel is connected.
+ */
+int ppp_unit_number(struct ppp_channel *chan)
+{
+ struct channel *pch = chan->ppp;
+ int unit = -1;
+
+ if (pch) {
+ read_lock_bh(&pch->upl);
+ if (pch->ppp)
+ unit = pch->ppp->file.index;
+ read_unlock_bh(&pch->upl);
+ }
+ return unit;
+}
+
+/*
+ * Return the PPP device interface name of a channel.
+ */
+char *ppp_dev_name(struct ppp_channel *chan)
+{
+ struct channel *pch = chan->ppp;
+ char *name = NULL;
+
+ if (pch) {
+ read_lock_bh(&pch->upl);
+ if (pch->ppp && pch->ppp->dev)
+ name = pch->ppp->dev->name;
+ read_unlock_bh(&pch->upl);
+ }
+ return name;
+}
+
+
+/*
+ * Disconnect a channel from the generic layer.
+ * This must be called in process context.
+ */
+void
+ppp_unregister_channel(struct ppp_channel *chan)
+{
+ struct channel *pch = chan->ppp;
+ struct ppp_net *pn;
+
+ if (!pch)
+ return; /* should never happen */
+
+ chan->ppp = NULL;
+
+ /*
+ * This ensures that we have returned from any calls into the
+ * the channel's start_xmit or ioctl routine before we proceed.
+ */
+ down_write(&pch->chan_sem);
+ spin_lock_bh(&pch->downl);
+ pch->chan = NULL;
+ spin_unlock_bh(&pch->downl);
+ up_write(&pch->chan_sem);
+ ppp_disconnect_channel(pch);
+
+ pn = ppp_pernet(pch->chan_net);
+ spin_lock_bh(&pn->all_channels_lock);
+ list_del(&pch->list);
+ spin_unlock_bh(&pn->all_channels_lock);
+
+ pch->file.dead = 1;
+ wake_up_interruptible(&pch->file.rwait);
+ if (atomic_dec_and_test(&pch->file.refcnt))
+ ppp_destroy_channel(pch);
+}
+
+/*
+ * Callback from a channel when it can accept more to transmit.
+ * This should be called at BH/softirq level, not interrupt level.
+ */
+void
+ppp_output_wakeup(struct ppp_channel *chan)
+{
+ struct channel *pch = chan->ppp;
+
+ if (!pch)
+ return;
+ ppp_channel_push(pch);
+}
+
+/*
+ * Compression control.
+ */
+
+/* Process the PPPIOCSCOMPRESS ioctl. */
+static int
+ppp_set_compress(struct ppp *ppp, unsigned long arg)
+{
+ int err;
+ struct compressor *cp, *ocomp;
+ struct ppp_option_data data;
+ void *state, *ostate;
+ unsigned char ccp_option[CCP_MAX_OPTION_LENGTH];
+
+ err = -EFAULT;
+ if (copy_from_user(&data, (void __user *) arg, sizeof(data)) ||
+ (data.length <= CCP_MAX_OPTION_LENGTH &&
+ copy_from_user(ccp_option, (void __user *) data.ptr, data.length)))
+ goto out;
+ err = -EINVAL;
+ if (data.length > CCP_MAX_OPTION_LENGTH ||
+ ccp_option[1] < 2 || ccp_option[1] > data.length)
+ goto out;
+
+ cp = try_then_request_module(
+ find_compressor(ccp_option[0]),
+ "ppp-compress-%d", ccp_option[0]);
+ if (!cp)
+ goto out;
+
+ err = -ENOBUFS;
+ if (data.transmit) {
+ state = cp->comp_alloc(ccp_option, data.length);
+ if (state) {
+ ppp_xmit_lock(ppp);
+ ppp->xstate &= ~SC_COMP_RUN;
+ ocomp = ppp->xcomp;
+ ostate = ppp->xc_state;
+ ppp->xcomp = cp;
+ ppp->xc_state = state;
+ ppp_xmit_unlock(ppp);
+ if (ostate) {
+ ocomp->comp_free(ostate);
+ module_put(ocomp->owner);
+ }
+ err = 0;
+ } else
+ module_put(cp->owner);
+
+ } else {
+ state = cp->decomp_alloc(ccp_option, data.length);
+ if (state) {
+ ppp_recv_lock(ppp);
+ ppp->rstate &= ~SC_DECOMP_RUN;
+ ocomp = ppp->rcomp;
+ ostate = ppp->rc_state;
+ ppp->rcomp = cp;
+ ppp->rc_state = state;
+ ppp_recv_unlock(ppp);
+ if (ostate) {
+ ocomp->decomp_free(ostate);
+ module_put(ocomp->owner);
+ }
+ err = 0;
+ } else
+ module_put(cp->owner);
+ }
+
+ out:
+ return err;
+}
+
+/*
+ * Look at a CCP packet and update our state accordingly.
+ * We assume the caller has the xmit or recv path locked.
+ */
+static void
+ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound)
+{
+ unsigned char *dp;
+ int len;
+
+ if (!pskb_may_pull(skb, CCP_HDRLEN + 2))
+ return; /* no header */
+ dp = skb->data + 2;
+
+ switch (CCP_CODE(dp)) {
+ case CCP_CONFREQ:
+
+ /* A ConfReq starts negotiation of compression
+ * in one direction of transmission,
+ * and hence brings it down...but which way?
+ *
+ * Remember:
+ * A ConfReq indicates what the sender would like to receive
+ */
+ if(inbound)
+ /* He is proposing what I should send */
+ ppp->xstate &= ~SC_COMP_RUN;
+ else
+ /* I am proposing to what he should send */
+ ppp->rstate &= ~SC_DECOMP_RUN;
+
+ break;
+
+ case CCP_TERMREQ:
+ case CCP_TERMACK:
+ /*
+ * CCP is going down, both directions of transmission
+ */
+ ppp->rstate &= ~SC_DECOMP_RUN;
+ ppp->xstate &= ~SC_COMP_RUN;
+ break;
+
+ case CCP_CONFACK:
+ if ((ppp->flags & (SC_CCP_OPEN | SC_CCP_UP)) != SC_CCP_OPEN)
+ break;
+ len = CCP_LENGTH(dp);
+ if (!pskb_may_pull(skb, len + 2))
+ return; /* too short */
+ dp += CCP_HDRLEN;
+ len -= CCP_HDRLEN;
+ if (len < CCP_OPT_MINLEN || len < CCP_OPT_LENGTH(dp))
+ break;
+ if (inbound) {
+ /* we will start receiving compressed packets */
+ if (!ppp->rc_state)
+ break;
+ if (ppp->rcomp->decomp_init(ppp->rc_state, dp, len,
+ ppp->file.index, 0, ppp->mru, ppp->debug)) {
+ ppp->rstate |= SC_DECOMP_RUN;
+ ppp->rstate &= ~(SC_DC_ERROR | SC_DC_FERROR);
+ }
+ } else {
+ /* we will soon start sending compressed packets */
+ if (!ppp->xc_state)
+ break;
+ if (ppp->xcomp->comp_init(ppp->xc_state, dp, len,
+ ppp->file.index, 0, ppp->debug))
+ ppp->xstate |= SC_COMP_RUN;
+ }
+ break;
+
+ case CCP_RESETACK:
+ /* reset the [de]compressor */
+ if ((ppp->flags & SC_CCP_UP) == 0)
+ break;
+ if (inbound) {
+ if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN)) {
+ ppp->rcomp->decomp_reset(ppp->rc_state);
+ ppp->rstate &= ~SC_DC_ERROR;
+ }
+ } else {
+ if (ppp->xc_state && (ppp->xstate & SC_COMP_RUN))
+ ppp->xcomp->comp_reset(ppp->xc_state);
+ }
+ break;
+ }
+}
+
+/* Free up compression resources. */
+static void
+ppp_ccp_closed(struct ppp *ppp)
+{
+ void *xstate, *rstate;
+ struct compressor *xcomp, *rcomp;
+
+ ppp_lock(ppp);
+ ppp->flags &= ~(SC_CCP_OPEN | SC_CCP_UP);
+ ppp->xstate = 0;
+ xcomp = ppp->xcomp;
+ xstate = ppp->xc_state;
+ ppp->xc_state = NULL;
+ ppp->rstate = 0;
+ rcomp = ppp->rcomp;
+ rstate = ppp->rc_state;
+ ppp->rc_state = NULL;
+ ppp_unlock(ppp);
+
+ if (xstate) {
+ xcomp->comp_free(xstate);
+ module_put(xcomp->owner);
+ }
+ if (rstate) {
+ rcomp->decomp_free(rstate);
+ module_put(rcomp->owner);
+ }
+}
+
+/* List of compressors. */
+static LIST_HEAD(compressor_list);
+static DEFINE_SPINLOCK(compressor_list_lock);
+
+struct compressor_entry {
+ struct list_head list;
+ struct compressor *comp;
+};
+
+static struct compressor_entry *
+find_comp_entry(int proto)
+{
+ struct compressor_entry *ce;
+
+ list_for_each_entry(ce, &compressor_list, list) {
+ if (ce->comp->compress_proto == proto)
+ return ce;
+ }
+ return NULL;
+}
+
+/* Register a compressor */
+int
+ppp_register_compressor(struct compressor *cp)
+{
+ struct compressor_entry *ce;
+ int ret;
+ spin_lock(&compressor_list_lock);
+ ret = -EEXIST;
+ if (find_comp_entry(cp->compress_proto))
+ goto out;
+ ret = -ENOMEM;
+ ce = kmalloc(sizeof(struct compressor_entry), GFP_ATOMIC);
+ if (!ce)
+ goto out;
+ ret = 0;
+ ce->comp = cp;
+ list_add(&ce->list, &compressor_list);
+ out:
+ spin_unlock(&compressor_list_lock);
+ return ret;
+}
+
+/* Unregister a compressor */
+void
+ppp_unregister_compressor(struct compressor *cp)
+{
+ struct compressor_entry *ce;
+
+ spin_lock(&compressor_list_lock);
+ ce = find_comp_entry(cp->compress_proto);
+ if (ce && ce->comp == cp) {
+ list_del(&ce->list);
+ kfree(ce);
+ }
+ spin_unlock(&compressor_list_lock);
+}
+
+/* Find a compressor. */
+static struct compressor *
+find_compressor(int type)
+{
+ struct compressor_entry *ce;
+ struct compressor *cp = NULL;
+
+ spin_lock(&compressor_list_lock);
+ ce = find_comp_entry(type);
+ if (ce) {
+ cp = ce->comp;
+ if (!try_module_get(cp->owner))
+ cp = NULL;
+ }
+ spin_unlock(&compressor_list_lock);
+ return cp;
+}
+
+/*
+ * Miscelleneous stuff.
+ */
+
+static void
+ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
+{
+ struct slcompress *vj = ppp->vj;
+
+ memset(st, 0, sizeof(*st));
+ st->p.ppp_ipackets = ppp->dev->stats.rx_packets;
+ st->p.ppp_ierrors = ppp->dev->stats.rx_errors;
+ st->p.ppp_ibytes = ppp->dev->stats.rx_bytes;
+ st->p.ppp_opackets = ppp->dev->stats.tx_packets;
+ st->p.ppp_oerrors = ppp->dev->stats.tx_errors;
+ st->p.ppp_obytes = ppp->dev->stats.tx_bytes;
+ if (!vj)
+ return;
+ st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed;
+ st->vj.vjs_compressed = vj->sls_o_compressed;
+ st->vj.vjs_searches = vj->sls_o_searches;
+ st->vj.vjs_misses = vj->sls_o_misses;
+ st->vj.vjs_errorin = vj->sls_i_error;
+ st->vj.vjs_tossed = vj->sls_i_tossed;
+ st->vj.vjs_uncompressedin = vj->sls_i_uncompressed;
+ st->vj.vjs_compressedin = vj->sls_i_compressed;
+}
+
+/*
+ * Stuff for handling the lists of ppp units and channels
+ * and for initialization.
+ */
+
+/*
+ * Create a new ppp interface unit. Fails if it can't allocate memory
+ * or if there is already a unit with the requested number.
+ * unit == -1 means allocate a new number.
+ */
+static struct ppp *
+ppp_create_interface(struct net *net, int unit, int *retp)
+{
+ struct ppp *ppp;
+ struct ppp_net *pn;
+ struct net_device *dev = NULL;
+ int ret = -ENOMEM;
+ int i;
+
+ dev = alloc_netdev(sizeof(struct ppp), "", ppp_setup);
+ if (!dev)
+ goto out1;
+
+ pn = ppp_pernet(net);
+
+ ppp = netdev_priv(dev);
+ ppp->dev = dev;
+ ppp->mru = PPP_MRU;
+ init_ppp_file(&ppp->file, INTERFACE);
+ ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */
+ for (i = 0; i < NUM_NP; ++i)
+ ppp->npmode[i] = NPMODE_PASS;
+ INIT_LIST_HEAD(&ppp->channels);
+ spin_lock_init(&ppp->rlock);
+ spin_lock_init(&ppp->wlock);
+#ifdef CONFIG_PPP_MULTILINK
+ ppp->minseq = -1;
+ skb_queue_head_init(&ppp->mrq);
+#endif /* CONFIG_PPP_MULTILINK */
+
+ /*
+ * drum roll: don't forget to set
+ * the net device is belong to
+ */
+ dev_net_set(dev, net);
+
+ mutex_lock(&pn->all_ppp_mutex);
+
+ if (unit < 0) {
+ unit = unit_get(&pn->units_idr, ppp);
+ if (unit < 0) {
+ ret = unit;
+ goto out2;
+ }
+ } else {
+ ret = -EEXIST;
+ if (unit_find(&pn->units_idr, unit))
+ goto out2; /* unit already exists */
+ /*
+ * if caller need a specified unit number
+ * lets try to satisfy him, otherwise --
+ * he should better ask us for new unit number
+ *
+ * NOTE: yes I know that returning EEXIST it's not
+ * fair but at least pppd will ask us to allocate
+ * new unit in this case so user is happy :)
+ */
+ unit = unit_set(&pn->units_idr, ppp, unit);
+ if (unit < 0)
+ goto out2;
+ }
+
+ /* Initialize the new ppp unit */
+ ppp->file.index = unit;
+ sprintf(dev->name, "ppp%d", unit);
+
+ ret = register_netdev(dev);
+ if (ret != 0) {
+ unit_put(&pn->units_idr, unit);
+ netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n",
+ dev->name, ret);
+ goto out2;
+ }
+
+ ppp->ppp_net = net;
+
+ atomic_inc(&ppp_unit_count);
+ mutex_unlock(&pn->all_ppp_mutex);
+
+ *retp = 0;
+ return ppp;
+
+out2:
+ mutex_unlock(&pn->all_ppp_mutex);
+ free_netdev(dev);
+out1:
+ *retp = ret;
+ return NULL;
+}
+
+/*
+ * Initialize a ppp_file structure.
+ */
+static void
+init_ppp_file(struct ppp_file *pf, int kind)
+{
+ pf->kind = kind;
+ skb_queue_head_init(&pf->xq);
+ skb_queue_head_init(&pf->rq);
+ atomic_set(&pf->refcnt, 1);
+ init_waitqueue_head(&pf->rwait);
+}
+
+/*
+ * Take down a ppp interface unit - called when the owning file
+ * (the one that created the unit) is closed or detached.
+ */
+static void ppp_shutdown_interface(struct ppp *ppp)
+{
+ struct ppp_net *pn;
+
+ pn = ppp_pernet(ppp->ppp_net);
+ mutex_lock(&pn->all_ppp_mutex);
+
+ /* This will call dev_close() for us. */
+ ppp_lock(ppp);
+ if (!ppp->closing) {
+ ppp->closing = 1;
+ ppp_unlock(ppp);
+ unregister_netdev(ppp->dev);
+ unit_put(&pn->units_idr, ppp->file.index);
+ } else
+ ppp_unlock(ppp);
+
+ ppp->file.dead = 1;
+ ppp->owner = NULL;
+ wake_up_interruptible(&ppp->file.rwait);
+
+ mutex_unlock(&pn->all_ppp_mutex);
+}
+
+/*
+ * Free the memory used by a ppp unit. This is only called once
+ * there are no channels connected to the unit and no file structs
+ * that reference the unit.
+ */
+static void ppp_destroy_interface(struct ppp *ppp)
+{
+ atomic_dec(&ppp_unit_count);
+
+ if (!ppp->file.dead || ppp->n_channels) {
+ /* "can't happen" */
+ netdev_err(ppp->dev, "ppp: destroying ppp struct %p "
+ "but dead=%d n_channels=%d !\n",
+ ppp, ppp->file.dead, ppp->n_channels);
+ return;
+ }
+
+ ppp_ccp_closed(ppp);
+ if (ppp->vj) {
+ slhc_free(ppp->vj);
+ ppp->vj = NULL;
+ }
+ skb_queue_purge(&ppp->file.xq);
+ skb_queue_purge(&ppp->file.rq);
+#ifdef CONFIG_PPP_MULTILINK
+ skb_queue_purge(&ppp->mrq);
+#endif /* CONFIG_PPP_MULTILINK */
+#ifdef CONFIG_PPP_FILTER
+ kfree(ppp->pass_filter);
+ ppp->pass_filter = NULL;
+ kfree(ppp->active_filter);
+ ppp->active_filter = NULL;
+#endif /* CONFIG_PPP_FILTER */
+
+ kfree_skb(ppp->xmit_pending);
+
+ free_netdev(ppp->dev);
+}
+
+/*
+ * Locate an existing ppp unit.
+ * The caller should have locked the all_ppp_mutex.
+ */
+static struct ppp *
+ppp_find_unit(struct ppp_net *pn, int unit)
+{
+ return unit_find(&pn->units_idr, unit);
+}
+
+/*
+ * Locate an existing ppp channel.
+ * The caller should have locked the all_channels_lock.
+ * First we look in the new_channels list, then in the
+ * all_channels list. If found in the new_channels list,
+ * we move it to the all_channels list. This is for speed
+ * when we have a lot of channels in use.
+ */
+static struct channel *
+ppp_find_channel(struct ppp_net *pn, int unit)
+{
+ struct channel *pch;
+
+ list_for_each_entry(pch, &pn->new_channels, list) {
+ if (pch->file.index == unit) {
+ list_move(&pch->list, &pn->all_channels);
+ return pch;
+ }
+ }
+
+ list_for_each_entry(pch, &pn->all_channels, list) {
+ if (pch->file.index == unit)
+ return pch;
+ }
+
+ return NULL;
+}
+
+/*
+ * Connect a PPP channel to a PPP interface unit.
+ */
+static int
+ppp_connect_channel(struct channel *pch, int unit)
+{
+ struct ppp *ppp;
+ struct ppp_net *pn;
+ int ret = -ENXIO;
+ int hdrlen;
+
+ pn = ppp_pernet(pch->chan_net);
+
+ mutex_lock(&pn->all_ppp_mutex);
+ ppp = ppp_find_unit(pn, unit);
+ if (!ppp)
+ goto out;
+ write_lock_bh(&pch->upl);
+ ret = -EINVAL;
+ if (pch->ppp)
+ goto outl;
+
+ ppp_lock(ppp);
+ if (pch->file.hdrlen > ppp->file.hdrlen)
+ ppp->file.hdrlen = pch->file.hdrlen;
+ hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */
+ if (hdrlen > ppp->dev->hard_header_len)
+ ppp->dev->hard_header_len = hdrlen;
+ list_add_tail(&pch->clist, &ppp->channels);
+ ++ppp->n_channels;
+ pch->ppp = ppp;
+ atomic_inc(&ppp->file.refcnt);
+ ppp_unlock(ppp);
+ ret = 0;
+
+ outl:
+ write_unlock_bh(&pch->upl);
+ out:
+ mutex_unlock(&pn->all_ppp_mutex);
+ return ret;
+}
+
+/*
+ * Disconnect a channel from its ppp unit.
+ */
+static int
+ppp_disconnect_channel(struct channel *pch)
+{
+ struct ppp *ppp;
+ int err = -EINVAL;
+
+ write_lock_bh(&pch->upl);
+ ppp = pch->ppp;
+ pch->ppp = NULL;
+ write_unlock_bh(&pch->upl);
+ if (ppp) {
+ /* remove it from the ppp unit's list */
+ ppp_lock(ppp);
+ list_del(&pch->clist);
+ if (--ppp->n_channels == 0)
+ wake_up_interruptible(&ppp->file.rwait);
+ ppp_unlock(ppp);
+ if (atomic_dec_and_test(&ppp->file.refcnt))
+ ppp_destroy_interface(ppp);
+ err = 0;
+ }
+ return err;
+}
+
+/*
+ * Free up the resources used by a ppp channel.
+ */
+static void ppp_destroy_channel(struct channel *pch)
+{
+ atomic_dec(&channel_count);
+
+ if (!pch->file.dead) {
+ /* "can't happen" */
+ pr_err("ppp: destroying undead channel %p !\n", pch);
+ return;
+ }
+ skb_queue_purge(&pch->file.xq);
+ skb_queue_purge(&pch->file.rq);
+ kfree(pch);
+}
+
+static void __exit ppp_cleanup(void)
+{
+ /* should never happen */
+ if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
+ pr_err("PPP: removing module but units remain!\n");
+ unregister_chrdev(PPP_MAJOR, "ppp");
+ device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
+ class_destroy(ppp_class);
+ unregister_pernet_device(&ppp_net_ops);
+}
+
+/*
+ * Units handling. Caller must protect concurrent access
+ * by holding all_ppp_mutex
+ */
+
+static int __unit_alloc(struct idr *p, void *ptr, int n)
+{
+ int unit, err;
+
+again:
+ if (!idr_pre_get(p, GFP_KERNEL)) {
+ pr_err("PPP: No free memory for idr\n");
+ return -ENOMEM;
+ }
+
+ err = idr_get_new_above(p, ptr, n, &unit);
+ if (err < 0) {
+ if (err == -EAGAIN)
+ goto again;
+ return err;
+ }
+
+ return unit;
+}
+
+/* associate pointer with specified number */
+static int unit_set(struct idr *p, void *ptr, int n)
+{
+ int unit;
+
+ unit = __unit_alloc(p, ptr, n);
+ if (unit < 0)
+ return unit;
+ else if (unit != n) {
+ idr_remove(p, unit);
+ return -EINVAL;
+ }
+
+ return unit;
+}
+
+/* get new free unit number and associate pointer with it */
+static int unit_get(struct idr *p, void *ptr)
+{
+ return __unit_alloc(p, ptr, 0);
+}
+
+/* put unit number back to a pool */
+static void unit_put(struct idr *p, int n)
+{
+ idr_remove(p, n);
+}
+
+/* get pointer associated with the number */
+static void *unit_find(struct idr *p, int n)
+{
+ return idr_find(p, n);
+}
+
+/* Module/initialization stuff */
+
+module_init(ppp_init);
+module_exit(ppp_cleanup);
+
+EXPORT_SYMBOL(ppp_register_net_channel);
+EXPORT_SYMBOL(ppp_register_channel);
+EXPORT_SYMBOL(ppp_unregister_channel);
+EXPORT_SYMBOL(ppp_channel_index);
+EXPORT_SYMBOL(ppp_unit_number);
+EXPORT_SYMBOL(ppp_dev_name);
+EXPORT_SYMBOL(ppp_input);
+EXPORT_SYMBOL(ppp_input_error);
+EXPORT_SYMBOL(ppp_output_wakeup);
+EXPORT_SYMBOL(ppp_register_compressor);
+EXPORT_SYMBOL(ppp_unregister_compressor);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0);
+MODULE_ALIAS("devname:ppp");
cmd = txq->cmd[cmd_index];
meta = &txq->meta[cmd_index];
- iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
+ txq->time_stamp = jiffies;
+
+ iwlagn_unmap_tfd(trans, meta, &txq->tfds[index],
+ DMA_BIDIRECTIONAL);
/* Input error checking is done when commands are added to queue. */
if (meta->flags & CMD_WANT_SKB) {
entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
if (rt2800usb_txdone_entry_check(entry, reg))
break;
+ entry = NULL;
}
- if (!entry || rt2x00queue_empty(queue))
- break;
-
- rt2800_txdone_entry(entry, reg,
- rt2800usb_get_txwi(entry));
+ if (entry)
- rt2800_txdone_entry(entry, reg);
++ rt2800_txdone_entry(entry, reg,
++ rt2800usb_get_txwi(entry));
}
}
if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
if (skb_copy_ubufs(skb, gfp_mask))
goto nofrags;
- skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
- get_page(skb_shinfo(skb)->frags[i].page);
+ skb_frag_ref(skb, i);
if (skb_has_frag_list(skb))
skb_clone_fraglist(skb);