From: David S. Miller Date: Sat, 20 Aug 2011 17:39:12 +0000 (-0700) Subject: Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=823dcd2506fa369aeb8cbd26da5663efe2fda9a9;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git Merge branch 'master' of /linux/kernel/git/davem/net --- 823dcd2506fa369aeb8cbd26da5663efe2fda9a9 diff --cc drivers/net/ethernet/amd/pcnet32.c index e19c1a73c955,000000000000..c90fe917090b mode 100644,000000..100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@@ -1,2937 -1,0 +1,2937 @@@ +/* pcnet32.c: An AMD PCnet32 ethernet driver for linux. */ +/* + * Copyright 1996-1999 Thomas Bogendoerfer + * + * Derived from the lance driver written 1993,1994,1995 by Donald Becker. + * + * Copyright 1993 United States Government as represented by the + * Director, National Security Agency. + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + * This driver is for PCnet32 and PCnetPCI based ethercards + */ +/************************************************************************** + * 23 Oct, 2000. + * Fixed a few bugs, related to running the controller in 32bit mode. + * + * Carsten Langgaard, carstenl@mips.com + * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. + * + *************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#define DRV_NAME "pcnet32" +#define DRV_VERSION "1.35" +#define DRV_RELDATE "21.Apr.2008" +#define PFX DRV_NAME ": " + +static const char *const version = + DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n"; + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +/* + * PCI device identifiers for "new style" Linux PCI Device Drivers + */ +static DEFINE_PCI_DEVICE_TABLE(pcnet32_pci_tbl) = { + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), }, + + /* + * Adapters that were sold with IBM's RS/6000 or pSeries hardware have + * the incorrect vendor id. + */ + { PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE), + .class = (PCI_CLASS_NETWORK_ETHERNET << 8), .class_mask = 0xffff00, }, + + { } /* terminate list */ +}; + +MODULE_DEVICE_TABLE(pci, pcnet32_pci_tbl); + +static int cards_found; + +/* + * VLB I/O addresses + */ - static unsigned int pcnet32_portlist[] __initdata = ++static unsigned int pcnet32_portlist[] = + { 0x300, 0x320, 0x340, 0x360, 0 }; + +static int pcnet32_debug; +static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */ +static int pcnet32vlb; /* check for VLB cards ? */ + +static struct net_device *pcnet32_dev; + +static int max_interrupt_work = 2; +static int rx_copybreak = 200; + +#define PCNET32_PORT_AUI 0x00 +#define PCNET32_PORT_10BT 0x01 +#define PCNET32_PORT_GPSI 0x02 +#define PCNET32_PORT_MII 0x03 + +#define PCNET32_PORT_PORTSEL 0x03 +#define PCNET32_PORT_ASEL 0x04 +#define PCNET32_PORT_100 0x40 +#define PCNET32_PORT_FD 0x80 + +#define PCNET32_DMA_MASK 0xffffffff + +#define PCNET32_WATCHDOG_TIMEOUT (jiffies + (2 * HZ)) +#define PCNET32_BLINK_TIMEOUT (jiffies + (HZ/4)) + +/* + * table to translate option values from tulip + * to internal options + */ +static const unsigned char options_mapping[] = { + PCNET32_PORT_ASEL, /* 0 Auto-select */ + PCNET32_PORT_AUI, /* 1 BNC/AUI */ + PCNET32_PORT_AUI, /* 2 AUI/BNC */ + PCNET32_PORT_ASEL, /* 3 not supported */ + PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */ + PCNET32_PORT_ASEL, /* 5 not supported */ + PCNET32_PORT_ASEL, /* 6 not supported */ + PCNET32_PORT_ASEL, /* 7 not supported */ + PCNET32_PORT_ASEL, /* 8 not supported */ + PCNET32_PORT_MII, /* 9 MII 10baseT */ + PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */ + PCNET32_PORT_MII, /* 11 MII (autosel) */ + PCNET32_PORT_10BT, /* 12 10BaseT */ + PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */ + /* 14 MII 100BaseTx-FD */ + PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD, + PCNET32_PORT_ASEL /* 15 not supported */ +}; + +static const char pcnet32_gstrings_test[][ETH_GSTRING_LEN] = { + "Loopback test (offline)" +}; + +#define PCNET32_TEST_LEN ARRAY_SIZE(pcnet32_gstrings_test) + +#define PCNET32_NUM_REGS 136 + +#define MAX_UNITS 8 /* More are supported, limit only on options */ +static int options[MAX_UNITS]; +static int full_duplex[MAX_UNITS]; +static int homepna[MAX_UNITS]; + +/* + * Theory of Operation + * + * This driver uses the same software structure as the normal lance + * driver. So look for a verbose description in lance.c. The differences + * to the normal lance driver is the use of the 32bit mode of PCnet32 + * and PCnetPCI chips. Because these chips are 32bit chips, there is no + * 16MB limitation and we don't need bounce buffers. + */ + +/* + * Set the number of Tx and Rx buffers, using Log_2(# buffers). + * Reasonable default values are 4 Tx buffers, and 16 Rx buffers. + * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4). + */ +#ifndef PCNET32_LOG_TX_BUFFERS +#define PCNET32_LOG_TX_BUFFERS 4 +#define PCNET32_LOG_RX_BUFFERS 5 +#define PCNET32_LOG_MAX_TX_BUFFERS 9 /* 2^9 == 512 */ +#define PCNET32_LOG_MAX_RX_BUFFERS 9 +#endif + +#define TX_RING_SIZE (1 << (PCNET32_LOG_TX_BUFFERS)) +#define TX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_TX_BUFFERS)) + +#define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS)) +#define RX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_RX_BUFFERS)) + +#define PKT_BUF_SKB 1544 +/* actual buffer length after being aligned */ +#define PKT_BUF_SIZE (PKT_BUF_SKB - NET_IP_ALIGN) +/* chip wants twos complement of the (aligned) buffer length */ +#define NEG_BUF_SIZE (NET_IP_ALIGN - PKT_BUF_SKB) + +/* Offsets from base I/O address. */ +#define PCNET32_WIO_RDP 0x10 +#define PCNET32_WIO_RAP 0x12 +#define PCNET32_WIO_RESET 0x14 +#define PCNET32_WIO_BDP 0x16 + +#define PCNET32_DWIO_RDP 0x10 +#define PCNET32_DWIO_RAP 0x14 +#define PCNET32_DWIO_RESET 0x18 +#define PCNET32_DWIO_BDP 0x1C + +#define PCNET32_TOTAL_SIZE 0x20 + +#define CSR0 0 +#define CSR0_INIT 0x1 +#define CSR0_START 0x2 +#define CSR0_STOP 0x4 +#define CSR0_TXPOLL 0x8 +#define CSR0_INTEN 0x40 +#define CSR0_IDON 0x0100 +#define CSR0_NORMAL (CSR0_START | CSR0_INTEN) +#define PCNET32_INIT_LOW 1 +#define PCNET32_INIT_HIGH 2 +#define CSR3 3 +#define CSR4 4 +#define CSR5 5 +#define CSR5_SUSPEND 0x0001 +#define CSR15 15 +#define PCNET32_MC_FILTER 8 + +#define PCNET32_79C970A 0x2621 + +/* The PCNET32 Rx and Tx ring descriptors. */ +struct pcnet32_rx_head { + __le32 base; + __le16 buf_length; /* two`s complement of length */ + __le16 status; + __le32 msg_length; + __le32 reserved; +}; + +struct pcnet32_tx_head { + __le32 base; + __le16 length; /* two`s complement of length */ + __le16 status; + __le32 misc; + __le32 reserved; +}; + +/* The PCNET32 32-Bit initialization block, described in databook. */ +struct pcnet32_init_block { + __le16 mode; + __le16 tlen_rlen; + u8 phys_addr[6]; + __le16 reserved; + __le32 filter[2]; + /* Receive and transmit ring base, along with extra bits. */ + __le32 rx_ring; + __le32 tx_ring; +}; + +/* PCnet32 access functions */ +struct pcnet32_access { + u16 (*read_csr) (unsigned long, int); + void (*write_csr) (unsigned long, int, u16); + u16 (*read_bcr) (unsigned long, int); + void (*write_bcr) (unsigned long, int, u16); + u16 (*read_rap) (unsigned long); + void (*write_rap) (unsigned long, u16); + void (*reset) (unsigned long); +}; + +/* + * The first field of pcnet32_private is read by the ethernet device + * so the structure should be allocated using pci_alloc_consistent(). + */ +struct pcnet32_private { + struct pcnet32_init_block *init_block; + /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */ + struct pcnet32_rx_head *rx_ring; + struct pcnet32_tx_head *tx_ring; + dma_addr_t init_dma_addr;/* DMA address of beginning of the init block, + returned by pci_alloc_consistent */ + struct pci_dev *pci_dev; + const char *name; + /* The saved address of a sent-in-place packet/buffer, for skfree(). */ + struct sk_buff **tx_skbuff; + struct sk_buff **rx_skbuff; + dma_addr_t *tx_dma_addr; + dma_addr_t *rx_dma_addr; + struct pcnet32_access a; + spinlock_t lock; /* Guard lock */ + unsigned int cur_rx, cur_tx; /* The next free ring entry */ + unsigned int rx_ring_size; /* current rx ring size */ + unsigned int tx_ring_size; /* current tx ring size */ + unsigned int rx_mod_mask; /* rx ring modular mask */ + unsigned int tx_mod_mask; /* tx ring modular mask */ + unsigned short rx_len_bits; + unsigned short tx_len_bits; + dma_addr_t rx_ring_dma_addr; + dma_addr_t tx_ring_dma_addr; + unsigned int dirty_rx, /* ring entries to be freed. */ + dirty_tx; + + struct net_device *dev; + struct napi_struct napi; + char tx_full; + char phycount; /* number of phys found */ + int options; + unsigned int shared_irq:1, /* shared irq possible */ + dxsuflo:1, /* disable transmit stop on uflo */ + mii:1; /* mii port available */ + struct net_device *next; + struct mii_if_info mii_if; + struct timer_list watchdog_timer; + u32 msg_enable; /* debug message level */ + + /* each bit indicates an available PHY */ + u32 phymask; + unsigned short chip_version; /* which variant this is */ + + /* saved registers during ethtool blink */ + u16 save_regs[4]; +}; + +static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *); +static int pcnet32_probe1(unsigned long, int, struct pci_dev *); +static int pcnet32_open(struct net_device *); +static int pcnet32_init_ring(struct net_device *); +static netdev_tx_t pcnet32_start_xmit(struct sk_buff *, + struct net_device *); +static void pcnet32_tx_timeout(struct net_device *dev); +static irqreturn_t pcnet32_interrupt(int, void *); +static int pcnet32_close(struct net_device *); +static struct net_device_stats *pcnet32_get_stats(struct net_device *); +static void pcnet32_load_multicast(struct net_device *dev); +static void pcnet32_set_multicast_list(struct net_device *); +static int pcnet32_ioctl(struct net_device *, struct ifreq *, int); +static void pcnet32_watchdog(struct net_device *); +static int mdio_read(struct net_device *dev, int phy_id, int reg_num); +static void mdio_write(struct net_device *dev, int phy_id, int reg_num, + int val); +static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits); +static void pcnet32_ethtool_test(struct net_device *dev, + struct ethtool_test *eth_test, u64 * data); +static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1); +static int pcnet32_get_regs_len(struct net_device *dev); +static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *ptr); +static void pcnet32_purge_tx_ring(struct net_device *dev); +static int pcnet32_alloc_ring(struct net_device *dev, const char *name); +static void pcnet32_free_ring(struct net_device *dev); +static void pcnet32_check_media(struct net_device *dev, int verbose); + +static u16 pcnet32_wio_read_csr(unsigned long addr, int index) +{ + outw(index, addr + PCNET32_WIO_RAP); + return inw(addr + PCNET32_WIO_RDP); +} + +static void pcnet32_wio_write_csr(unsigned long addr, int index, u16 val) +{ + outw(index, addr + PCNET32_WIO_RAP); + outw(val, addr + PCNET32_WIO_RDP); +} + +static u16 pcnet32_wio_read_bcr(unsigned long addr, int index) +{ + outw(index, addr + PCNET32_WIO_RAP); + return inw(addr + PCNET32_WIO_BDP); +} + +static void pcnet32_wio_write_bcr(unsigned long addr, int index, u16 val) +{ + outw(index, addr + PCNET32_WIO_RAP); + outw(val, addr + PCNET32_WIO_BDP); +} + +static u16 pcnet32_wio_read_rap(unsigned long addr) +{ + return inw(addr + PCNET32_WIO_RAP); +} + +static void pcnet32_wio_write_rap(unsigned long addr, u16 val) +{ + outw(val, addr + PCNET32_WIO_RAP); +} + +static void pcnet32_wio_reset(unsigned long addr) +{ + inw(addr + PCNET32_WIO_RESET); +} + +static int pcnet32_wio_check(unsigned long addr) +{ + outw(88, addr + PCNET32_WIO_RAP); + return inw(addr + PCNET32_WIO_RAP) == 88; +} + +static struct pcnet32_access pcnet32_wio = { + .read_csr = pcnet32_wio_read_csr, + .write_csr = pcnet32_wio_write_csr, + .read_bcr = pcnet32_wio_read_bcr, + .write_bcr = pcnet32_wio_write_bcr, + .read_rap = pcnet32_wio_read_rap, + .write_rap = pcnet32_wio_write_rap, + .reset = pcnet32_wio_reset +}; + +static u16 pcnet32_dwio_read_csr(unsigned long addr, int index) +{ + outl(index, addr + PCNET32_DWIO_RAP); + return inl(addr + PCNET32_DWIO_RDP) & 0xffff; +} + +static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val) +{ + outl(index, addr + PCNET32_DWIO_RAP); + outl(val, addr + PCNET32_DWIO_RDP); +} + +static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index) +{ + outl(index, addr + PCNET32_DWIO_RAP); + return inl(addr + PCNET32_DWIO_BDP) & 0xffff; +} + +static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val) +{ + outl(index, addr + PCNET32_DWIO_RAP); + outl(val, addr + PCNET32_DWIO_BDP); +} + +static u16 pcnet32_dwio_read_rap(unsigned long addr) +{ + return inl(addr + PCNET32_DWIO_RAP) & 0xffff; +} + +static void pcnet32_dwio_write_rap(unsigned long addr, u16 val) +{ + outl(val, addr + PCNET32_DWIO_RAP); +} + +static void pcnet32_dwio_reset(unsigned long addr) +{ + inl(addr + PCNET32_DWIO_RESET); +} + +static int pcnet32_dwio_check(unsigned long addr) +{ + outl(88, addr + PCNET32_DWIO_RAP); + return (inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88; +} + +static struct pcnet32_access pcnet32_dwio = { + .read_csr = pcnet32_dwio_read_csr, + .write_csr = pcnet32_dwio_write_csr, + .read_bcr = pcnet32_dwio_read_bcr, + .write_bcr = pcnet32_dwio_write_bcr, + .read_rap = pcnet32_dwio_read_rap, + .write_rap = pcnet32_dwio_write_rap, + .reset = pcnet32_dwio_reset +}; + +static void pcnet32_netif_stop(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + + dev->trans_start = jiffies; /* prevent tx timeout */ + napi_disable(&lp->napi); + netif_tx_disable(dev); +} + +static void pcnet32_netif_start(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + ulong ioaddr = dev->base_addr; + u16 val; + + netif_wake_queue(dev); + val = lp->a.read_csr(ioaddr, CSR3); + val &= 0x00ff; + lp->a.write_csr(ioaddr, CSR3, val); + napi_enable(&lp->napi); +} + +/* + * Allocate space for the new sized tx ring. + * Free old resources + * Save new resources. + * Any failure keeps old resources. + * Must be called with lp->lock held. + */ +static void pcnet32_realloc_tx_ring(struct net_device *dev, + struct pcnet32_private *lp, + unsigned int size) +{ + dma_addr_t new_ring_dma_addr; + dma_addr_t *new_dma_addr_list; + struct pcnet32_tx_head *new_tx_ring; + struct sk_buff **new_skb_list; + + pcnet32_purge_tx_ring(dev); + + new_tx_ring = pci_alloc_consistent(lp->pci_dev, + sizeof(struct pcnet32_tx_head) * + (1 << size), + &new_ring_dma_addr); + if (new_tx_ring == NULL) { + netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); + return; + } + memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size)); + + new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t), + GFP_ATOMIC); + if (!new_dma_addr_list) { + netif_err(lp, drv, dev, "Memory allocation failed\n"); + goto free_new_tx_ring; + } + + new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *), + GFP_ATOMIC); + if (!new_skb_list) { + netif_err(lp, drv, dev, "Memory allocation failed\n"); + goto free_new_lists; + } + + kfree(lp->tx_skbuff); + kfree(lp->tx_dma_addr); + pci_free_consistent(lp->pci_dev, + sizeof(struct pcnet32_tx_head) * + lp->tx_ring_size, lp->tx_ring, + lp->tx_ring_dma_addr); + + lp->tx_ring_size = (1 << size); + lp->tx_mod_mask = lp->tx_ring_size - 1; + lp->tx_len_bits = (size << 12); + lp->tx_ring = new_tx_ring; + lp->tx_ring_dma_addr = new_ring_dma_addr; + lp->tx_dma_addr = new_dma_addr_list; + lp->tx_skbuff = new_skb_list; + return; + +free_new_lists: + kfree(new_dma_addr_list); +free_new_tx_ring: + pci_free_consistent(lp->pci_dev, + sizeof(struct pcnet32_tx_head) * + (1 << size), + new_tx_ring, + new_ring_dma_addr); +} + +/* + * Allocate space for the new sized rx ring. + * Re-use old receive buffers. + * alloc extra buffers + * free unneeded buffers + * free unneeded buffers + * Save new resources. + * Any failure keeps old resources. + * Must be called with lp->lock held. + */ +static void pcnet32_realloc_rx_ring(struct net_device *dev, + struct pcnet32_private *lp, + unsigned int size) +{ + dma_addr_t new_ring_dma_addr; + dma_addr_t *new_dma_addr_list; + struct pcnet32_rx_head *new_rx_ring; + struct sk_buff **new_skb_list; + int new, overlap; + + new_rx_ring = pci_alloc_consistent(lp->pci_dev, + sizeof(struct pcnet32_rx_head) * + (1 << size), + &new_ring_dma_addr); + if (new_rx_ring == NULL) { + netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); + return; + } + memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size)); + + new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t), + GFP_ATOMIC); + if (!new_dma_addr_list) { + netif_err(lp, drv, dev, "Memory allocation failed\n"); + goto free_new_rx_ring; + } + + new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *), + GFP_ATOMIC); + if (!new_skb_list) { + netif_err(lp, drv, dev, "Memory allocation failed\n"); + goto free_new_lists; + } + + /* first copy the current receive buffers */ + overlap = min(size, lp->rx_ring_size); + for (new = 0; new < overlap; new++) { + new_rx_ring[new] = lp->rx_ring[new]; + new_dma_addr_list[new] = lp->rx_dma_addr[new]; + new_skb_list[new] = lp->rx_skbuff[new]; + } + /* now allocate any new buffers needed */ + for (; new < size; new++) { + struct sk_buff *rx_skbuff; + new_skb_list[new] = dev_alloc_skb(PKT_BUF_SKB); + rx_skbuff = new_skb_list[new]; + if (!rx_skbuff) { + /* keep the original lists and buffers */ + netif_err(lp, drv, dev, "%s dev_alloc_skb failed\n", + __func__); + goto free_all_new; + } + skb_reserve(rx_skbuff, NET_IP_ALIGN); + + new_dma_addr_list[new] = + pci_map_single(lp->pci_dev, rx_skbuff->data, + PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); + new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]); + new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE); + new_rx_ring[new].status = cpu_to_le16(0x8000); + } + /* and free any unneeded buffers */ + for (; new < lp->rx_ring_size; new++) { + if (lp->rx_skbuff[new]) { + pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new], + PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); + dev_kfree_skb(lp->rx_skbuff[new]); + } + } + + kfree(lp->rx_skbuff); + kfree(lp->rx_dma_addr); + pci_free_consistent(lp->pci_dev, + sizeof(struct pcnet32_rx_head) * + lp->rx_ring_size, lp->rx_ring, + lp->rx_ring_dma_addr); + + lp->rx_ring_size = (1 << size); + lp->rx_mod_mask = lp->rx_ring_size - 1; + lp->rx_len_bits = (size << 4); + lp->rx_ring = new_rx_ring; + lp->rx_ring_dma_addr = new_ring_dma_addr; + lp->rx_dma_addr = new_dma_addr_list; + lp->rx_skbuff = new_skb_list; + return; + +free_all_new: + while (--new >= lp->rx_ring_size) { + if (new_skb_list[new]) { + pci_unmap_single(lp->pci_dev, new_dma_addr_list[new], + PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); + dev_kfree_skb(new_skb_list[new]); + } + } + kfree(new_skb_list); +free_new_lists: + kfree(new_dma_addr_list); +free_new_rx_ring: + pci_free_consistent(lp->pci_dev, + sizeof(struct pcnet32_rx_head) * + (1 << size), + new_rx_ring, + new_ring_dma_addr); +} + +static void pcnet32_purge_rx_ring(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + int i; + + /* free all allocated skbuffs */ + for (i = 0; i < lp->rx_ring_size; i++) { + lp->rx_ring[i].status = 0; /* CPU owns buffer */ + wmb(); /* Make sure adapter sees owner change */ + if (lp->rx_skbuff[i]) { + pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], + PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); + dev_kfree_skb_any(lp->rx_skbuff[i]); + } + lp->rx_skbuff[i] = NULL; + lp->rx_dma_addr[i] = 0; + } +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void pcnet32_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + pcnet32_interrupt(0, dev); + enable_irq(dev->irq); +} +#endif + +static int pcnet32_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long flags; + int r = -EOPNOTSUPP; + + if (lp->mii) { + spin_lock_irqsave(&lp->lock, flags); + mii_ethtool_gset(&lp->mii_if, cmd); + spin_unlock_irqrestore(&lp->lock, flags); + r = 0; + } + return r; +} + +static int pcnet32_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long flags; + int r = -EOPNOTSUPP; + + if (lp->mii) { + spin_lock_irqsave(&lp->lock, flags); + r = mii_ethtool_sset(&lp->mii_if, cmd); + spin_unlock_irqrestore(&lp->lock, flags); + } + return r; +} + +static void pcnet32_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct pcnet32_private *lp = netdev_priv(dev); + + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + if (lp->pci_dev) + strcpy(info->bus_info, pci_name(lp->pci_dev)); + else + sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr); +} + +static u32 pcnet32_get_link(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long flags; + int r; + + spin_lock_irqsave(&lp->lock, flags); + if (lp->mii) { + r = mii_link_ok(&lp->mii_if); + } else if (lp->chip_version >= PCNET32_79C970A) { + ulong ioaddr = dev->base_addr; /* card base I/O address */ + r = (lp->a.read_bcr(ioaddr, 4) != 0xc0); + } else { /* can not detect link on really old chips */ + r = 1; + } + spin_unlock_irqrestore(&lp->lock, flags); + + return r; +} + +static u32 pcnet32_get_msglevel(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + return lp->msg_enable; +} + +static void pcnet32_set_msglevel(struct net_device *dev, u32 value) +{ + struct pcnet32_private *lp = netdev_priv(dev); + lp->msg_enable = value; +} + +static int pcnet32_nway_reset(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long flags; + int r = -EOPNOTSUPP; + + if (lp->mii) { + spin_lock_irqsave(&lp->lock, flags); + r = mii_nway_restart(&lp->mii_if); + spin_unlock_irqrestore(&lp->lock, flags); + } + return r; +} + +static void pcnet32_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct pcnet32_private *lp = netdev_priv(dev); + + ering->tx_max_pending = TX_MAX_RING_SIZE; + ering->tx_pending = lp->tx_ring_size; + ering->rx_max_pending = RX_MAX_RING_SIZE; + ering->rx_pending = lp->rx_ring_size; +} + +static int pcnet32_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long flags; + unsigned int size; + ulong ioaddr = dev->base_addr; + int i; + + if (ering->rx_mini_pending || ering->rx_jumbo_pending) + return -EINVAL; + + if (netif_running(dev)) + pcnet32_netif_stop(dev); + + spin_lock_irqsave(&lp->lock, flags); + lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ + + size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE); + + /* set the minimum ring size to 4, to allow the loopback test to work + * unchanged. + */ + for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) { + if (size <= (1 << i)) + break; + } + if ((1 << i) != lp->tx_ring_size) + pcnet32_realloc_tx_ring(dev, lp, i); + + size = min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE); + for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) { + if (size <= (1 << i)) + break; + } + if ((1 << i) != lp->rx_ring_size) + pcnet32_realloc_rx_ring(dev, lp, i); + + lp->napi.weight = lp->rx_ring_size / 2; + + if (netif_running(dev)) { + pcnet32_netif_start(dev); + pcnet32_restart(dev, CSR0_NORMAL); + } + + spin_unlock_irqrestore(&lp->lock, flags); + + netif_info(lp, drv, dev, "Ring Param Settings: RX: %d, TX: %d\n", + lp->rx_ring_size, lp->tx_ring_size); + + return 0; +} + +static void pcnet32_get_strings(struct net_device *dev, u32 stringset, + u8 *data) +{ + memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test)); +} + +static int pcnet32_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return PCNET32_TEST_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void pcnet32_ethtool_test(struct net_device *dev, + struct ethtool_test *test, u64 * data) +{ + struct pcnet32_private *lp = netdev_priv(dev); + int rc; + + if (test->flags == ETH_TEST_FL_OFFLINE) { + rc = pcnet32_loopback_test(dev, data); + if (rc) { + netif_printk(lp, hw, KERN_DEBUG, dev, + "Loopback test failed\n"); + test->flags |= ETH_TEST_FL_FAILED; + } else + netif_printk(lp, hw, KERN_DEBUG, dev, + "Loopback test passed\n"); + } else + netif_printk(lp, hw, KERN_DEBUG, dev, + "No tests to run (specify 'Offline' on ethtool)\n"); +} /* end pcnet32_ethtool_test */ + +static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) +{ + struct pcnet32_private *lp = netdev_priv(dev); + struct pcnet32_access *a = &lp->a; /* access to registers */ + ulong ioaddr = dev->base_addr; /* card base I/O address */ + struct sk_buff *skb; /* sk buff */ + int x, i; /* counters */ + int numbuffs = 4; /* number of TX/RX buffers and descs */ + u16 status = 0x8300; /* TX ring status */ + __le16 teststatus; /* test of ring status */ + int rc; /* return code */ + int size; /* size of packets */ + unsigned char *packet; /* source packet data */ + static const int data_len = 60; /* length of source packets */ + unsigned long flags; + unsigned long ticks; + + rc = 1; /* default to fail */ + + if (netif_running(dev)) + pcnet32_netif_stop(dev); + + spin_lock_irqsave(&lp->lock, flags); + lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ + + numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size)); + + /* Reset the PCNET32 */ + lp->a.reset(ioaddr); + lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ + + /* switch pcnet32 to 32bit mode */ + lp->a.write_bcr(ioaddr, 20, 2); + + /* purge & init rings but don't actually restart */ + pcnet32_restart(dev, 0x0000); + + lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */ + + /* Initialize Transmit buffers. */ + size = data_len + 15; + for (x = 0; x < numbuffs; x++) { + skb = dev_alloc_skb(size); + if (!skb) { + netif_printk(lp, hw, KERN_DEBUG, dev, + "Cannot allocate skb at line: %d!\n", + __LINE__); + goto clean_up; + } + packet = skb->data; + skb_put(skb, size); /* create space for data */ + lp->tx_skbuff[x] = skb; + lp->tx_ring[x].length = cpu_to_le16(-skb->len); + lp->tx_ring[x].misc = 0; + + /* put DA and SA into the skb */ + for (i = 0; i < 6; i++) + *packet++ = dev->dev_addr[i]; + for (i = 0; i < 6; i++) + *packet++ = dev->dev_addr[i]; + /* type */ + *packet++ = 0x08; + *packet++ = 0x06; + /* packet number */ + *packet++ = x; + /* fill packet with data */ + for (i = 0; i < data_len; i++) + *packet++ = i; + + lp->tx_dma_addr[x] = + pci_map_single(lp->pci_dev, skb->data, skb->len, + PCI_DMA_TODEVICE); + lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]); + wmb(); /* Make sure owner changes after all others are visible */ + lp->tx_ring[x].status = cpu_to_le16(status); + } + + x = a->read_bcr(ioaddr, 32); /* set internal loopback in BCR32 */ + a->write_bcr(ioaddr, 32, x | 0x0002); + + /* set int loopback in CSR15 */ + x = a->read_csr(ioaddr, CSR15) & 0xfffc; + lp->a.write_csr(ioaddr, CSR15, x | 0x0044); + + teststatus = cpu_to_le16(0x8000); + lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */ + + /* Check status of descriptors */ + for (x = 0; x < numbuffs; x++) { + ticks = 0; + rmb(); + while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) { + spin_unlock_irqrestore(&lp->lock, flags); + msleep(1); + spin_lock_irqsave(&lp->lock, flags); + rmb(); + ticks++; + } + if (ticks == 200) { + netif_err(lp, hw, dev, "Desc %d failed to reset!\n", x); + break; + } + } + + lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */ + wmb(); + if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) { + netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n"); + + for (x = 0; x < numbuffs; x++) { + netdev_printk(KERN_DEBUG, dev, "Packet %d: ", x); + skb = lp->rx_skbuff[x]; + for (i = 0; i < size; i++) + pr_cont(" %02x", *(skb->data + i)); + pr_cont("\n"); + } + } + + x = 0; + rc = 0; + while (x < numbuffs && !rc) { + skb = lp->rx_skbuff[x]; + packet = lp->tx_skbuff[x]->data; + for (i = 0; i < size; i++) { + if (*(skb->data + i) != packet[i]) { + netif_printk(lp, hw, KERN_DEBUG, dev, + "Error in compare! %2x - %02x %02x\n", + i, *(skb->data + i), packet[i]); + rc = 1; + break; + } + } + x++; + } + +clean_up: + *data1 = rc; + pcnet32_purge_tx_ring(dev); + + x = a->read_csr(ioaddr, CSR15); + a->write_csr(ioaddr, CSR15, (x & ~0x0044)); /* reset bits 6 and 2 */ + + x = a->read_bcr(ioaddr, 32); /* reset internal loopback */ + a->write_bcr(ioaddr, 32, (x & ~0x0002)); + + if (netif_running(dev)) { + pcnet32_netif_start(dev); + pcnet32_restart(dev, CSR0_NORMAL); + } else { + pcnet32_purge_rx_ring(dev); + lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */ + } + spin_unlock_irqrestore(&lp->lock, flags); + + return rc; +} /* end pcnet32_loopback_test */ + +static int pcnet32_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct pcnet32_private *lp = netdev_priv(dev); + struct pcnet32_access *a = &lp->a; + ulong ioaddr = dev->base_addr; + unsigned long flags; + int i; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + /* Save the current value of the bcrs */ + spin_lock_irqsave(&lp->lock, flags); + for (i = 4; i < 8; i++) + lp->save_regs[i - 4] = a->read_bcr(ioaddr, i); + spin_unlock_irqrestore(&lp->lock, flags); + return 2; /* cycle on/off twice per second */ + + case ETHTOOL_ID_ON: + case ETHTOOL_ID_OFF: + /* Blink the led */ + spin_lock_irqsave(&lp->lock, flags); + for (i = 4; i < 8; i++) + a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000); + spin_unlock_irqrestore(&lp->lock, flags); + break; + + case ETHTOOL_ID_INACTIVE: + /* Restore the original value of the bcrs */ + spin_lock_irqsave(&lp->lock, flags); + for (i = 4; i < 8; i++) + a->write_bcr(ioaddr, i, lp->save_regs[i - 4]); + spin_unlock_irqrestore(&lp->lock, flags); + } + return 0; +} + +/* + * lp->lock must be held. + */ +static int pcnet32_suspend(struct net_device *dev, unsigned long *flags, + int can_sleep) +{ + int csr5; + struct pcnet32_private *lp = netdev_priv(dev); + struct pcnet32_access *a = &lp->a; + ulong ioaddr = dev->base_addr; + int ticks; + + /* really old chips have to be stopped. */ + if (lp->chip_version < PCNET32_79C970A) + return 0; + + /* set SUSPEND (SPND) - CSR5 bit 0 */ + csr5 = a->read_csr(ioaddr, CSR5); + a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND); + + /* poll waiting for bit to be set */ + ticks = 0; + while (!(a->read_csr(ioaddr, CSR5) & CSR5_SUSPEND)) { + spin_unlock_irqrestore(&lp->lock, *flags); + if (can_sleep) + msleep(1); + else + mdelay(1); + spin_lock_irqsave(&lp->lock, *flags); + ticks++; + if (ticks > 200) { + netif_printk(lp, hw, KERN_DEBUG, dev, + "Error getting into suspend!\n"); + return 0; + } + } + return 1; +} + +/* + * process one receive descriptor entry + */ + +static void pcnet32_rx_entry(struct net_device *dev, + struct pcnet32_private *lp, + struct pcnet32_rx_head *rxp, + int entry) +{ + int status = (short)le16_to_cpu(rxp->status) >> 8; + int rx_in_place = 0; + struct sk_buff *skb; + short pkt_len; + + if (status != 0x03) { /* There was an error. */ + /* + * There is a tricky error noted by John Murphy, + * to Russ Nelson: Even with full-sized + * buffers it's possible for a jabber packet to use two + * buffers, with only the last correctly noting the error. + */ + if (status & 0x01) /* Only count a general error at the */ + dev->stats.rx_errors++; /* end of a packet. */ + if (status & 0x20) + dev->stats.rx_frame_errors++; + if (status & 0x10) + dev->stats.rx_over_errors++; + if (status & 0x08) + dev->stats.rx_crc_errors++; + if (status & 0x04) + dev->stats.rx_fifo_errors++; + return; + } + + pkt_len = (le32_to_cpu(rxp->msg_length) & 0xfff) - 4; + + /* Discard oversize frames. */ + if (unlikely(pkt_len > PKT_BUF_SIZE)) { + netif_err(lp, drv, dev, "Impossible packet size %d!\n", + pkt_len); + dev->stats.rx_errors++; + return; + } + if (pkt_len < 60) { + netif_err(lp, rx_err, dev, "Runt packet!\n"); + dev->stats.rx_errors++; + return; + } + + if (pkt_len > rx_copybreak) { + struct sk_buff *newskb; + + newskb = dev_alloc_skb(PKT_BUF_SKB); + if (newskb) { + skb_reserve(newskb, NET_IP_ALIGN); + skb = lp->rx_skbuff[entry]; + pci_unmap_single(lp->pci_dev, + lp->rx_dma_addr[entry], + PKT_BUF_SIZE, + PCI_DMA_FROMDEVICE); + skb_put(skb, pkt_len); + lp->rx_skbuff[entry] = newskb; + lp->rx_dma_addr[entry] = + pci_map_single(lp->pci_dev, + newskb->data, + PKT_BUF_SIZE, + PCI_DMA_FROMDEVICE); + rxp->base = cpu_to_le32(lp->rx_dma_addr[entry]); + rx_in_place = 1; + } else + skb = NULL; + } else + skb = dev_alloc_skb(pkt_len + NET_IP_ALIGN); + + if (skb == NULL) { + netif_err(lp, drv, dev, "Memory squeeze, dropping packet\n"); + dev->stats.rx_dropped++; + return; + } + if (!rx_in_place) { + skb_reserve(skb, NET_IP_ALIGN); + skb_put(skb, pkt_len); /* Make room */ + pci_dma_sync_single_for_cpu(lp->pci_dev, + lp->rx_dma_addr[entry], + pkt_len, + PCI_DMA_FROMDEVICE); + skb_copy_to_linear_data(skb, + (unsigned char *)(lp->rx_skbuff[entry]->data), + pkt_len); + pci_dma_sync_single_for_device(lp->pci_dev, + lp->rx_dma_addr[entry], + pkt_len, + PCI_DMA_FROMDEVICE); + } + dev->stats.rx_bytes += skb->len; + skb->protocol = eth_type_trans(skb, dev); + netif_receive_skb(skb); + dev->stats.rx_packets++; +} + +static int pcnet32_rx(struct net_device *dev, int budget) +{ + struct pcnet32_private *lp = netdev_priv(dev); + int entry = lp->cur_rx & lp->rx_mod_mask; + struct pcnet32_rx_head *rxp = &lp->rx_ring[entry]; + int npackets = 0; + + /* If we own the next entry, it's a new packet. Send it up. */ + while (npackets < budget && (short)le16_to_cpu(rxp->status) >= 0) { + pcnet32_rx_entry(dev, lp, rxp, entry); + npackets += 1; + /* + * The docs say that the buffer length isn't touched, but Andrew + * Boyd of QNX reports that some revs of the 79C965 clear it. + */ + rxp->buf_length = cpu_to_le16(NEG_BUF_SIZE); + wmb(); /* Make sure owner changes after others are visible */ + rxp->status = cpu_to_le16(0x8000); + entry = (++lp->cur_rx) & lp->rx_mod_mask; + rxp = &lp->rx_ring[entry]; + } + + return npackets; +} + +static int pcnet32_tx(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned int dirty_tx = lp->dirty_tx; + int delta; + int must_restart = 0; + + while (dirty_tx != lp->cur_tx) { + int entry = dirty_tx & lp->tx_mod_mask; + int status = (short)le16_to_cpu(lp->tx_ring[entry].status); + + if (status < 0) + break; /* It still hasn't been Txed */ + + lp->tx_ring[entry].base = 0; + + if (status & 0x4000) { + /* There was a major error, log it. */ + int err_status = le32_to_cpu(lp->tx_ring[entry].misc); + dev->stats.tx_errors++; + netif_err(lp, tx_err, dev, + "Tx error status=%04x err_status=%08x\n", + status, err_status); + if (err_status & 0x04000000) + dev->stats.tx_aborted_errors++; + if (err_status & 0x08000000) + dev->stats.tx_carrier_errors++; + if (err_status & 0x10000000) + dev->stats.tx_window_errors++; +#ifndef DO_DXSUFLO + if (err_status & 0x40000000) { + dev->stats.tx_fifo_errors++; + /* Ackk! On FIFO errors the Tx unit is turned off! */ + /* Remove this verbosity later! */ + netif_err(lp, tx_err, dev, "Tx FIFO error!\n"); + must_restart = 1; + } +#else + if (err_status & 0x40000000) { + dev->stats.tx_fifo_errors++; + if (!lp->dxsuflo) { /* If controller doesn't recover ... */ + /* Ackk! On FIFO errors the Tx unit is turned off! */ + /* Remove this verbosity later! */ + netif_err(lp, tx_err, dev, "Tx FIFO error!\n"); + must_restart = 1; + } + } +#endif + } else { + if (status & 0x1800) + dev->stats.collisions++; + dev->stats.tx_packets++; + } + + /* We must free the original skb */ + if (lp->tx_skbuff[entry]) { + pci_unmap_single(lp->pci_dev, + lp->tx_dma_addr[entry], + lp->tx_skbuff[entry]-> + len, PCI_DMA_TODEVICE); + dev_kfree_skb_any(lp->tx_skbuff[entry]); + lp->tx_skbuff[entry] = NULL; + lp->tx_dma_addr[entry] = 0; + } + dirty_tx++; + } + + delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size); + if (delta > lp->tx_ring_size) { + netif_err(lp, drv, dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n", + dirty_tx, lp->cur_tx, lp->tx_full); + dirty_tx += lp->tx_ring_size; + delta -= lp->tx_ring_size; + } + + if (lp->tx_full && + netif_queue_stopped(dev) && + delta < lp->tx_ring_size - 2) { + /* The ring is no longer full, clear tbusy. */ + lp->tx_full = 0; + netif_wake_queue(dev); + } + lp->dirty_tx = dirty_tx; + + return must_restart; +} + +static int pcnet32_poll(struct napi_struct *napi, int budget) +{ + struct pcnet32_private *lp = container_of(napi, struct pcnet32_private, napi); + struct net_device *dev = lp->dev; + unsigned long ioaddr = dev->base_addr; + unsigned long flags; + int work_done; + u16 val; + + work_done = pcnet32_rx(dev, budget); + + spin_lock_irqsave(&lp->lock, flags); + if (pcnet32_tx(dev)) { + /* reset the chip to clear the error condition, then restart */ + lp->a.reset(ioaddr); + lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ + pcnet32_restart(dev, CSR0_START); + netif_wake_queue(dev); + } + spin_unlock_irqrestore(&lp->lock, flags); + + if (work_done < budget) { + spin_lock_irqsave(&lp->lock, flags); + + __napi_complete(napi); + + /* clear interrupt masks */ + val = lp->a.read_csr(ioaddr, CSR3); + val &= 0x00ff; + lp->a.write_csr(ioaddr, CSR3, val); + + /* Set interrupt enable. */ + lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN); + + spin_unlock_irqrestore(&lp->lock, flags); + } + return work_done; +} + +#define PCNET32_REGS_PER_PHY 32 +#define PCNET32_MAX_PHYS 32 +static int pcnet32_get_regs_len(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + int j = lp->phycount * PCNET32_REGS_PER_PHY; + + return (PCNET32_NUM_REGS + j) * sizeof(u16); +} + +static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *ptr) +{ + int i, csr0; + u16 *buff = ptr; + struct pcnet32_private *lp = netdev_priv(dev); + struct pcnet32_access *a = &lp->a; + ulong ioaddr = dev->base_addr; + unsigned long flags; + + spin_lock_irqsave(&lp->lock, flags); + + csr0 = a->read_csr(ioaddr, CSR0); + if (!(csr0 & CSR0_STOP)) /* If not stopped */ + pcnet32_suspend(dev, &flags, 1); + + /* read address PROM */ + for (i = 0; i < 16; i += 2) + *buff++ = inw(ioaddr + i); + + /* read control and status registers */ + for (i = 0; i < 90; i++) + *buff++ = a->read_csr(ioaddr, i); + + *buff++ = a->read_csr(ioaddr, 112); + *buff++ = a->read_csr(ioaddr, 114); + + /* read bus configuration registers */ + for (i = 0; i < 30; i++) + *buff++ = a->read_bcr(ioaddr, i); + + *buff++ = 0; /* skip bcr30 so as not to hang 79C976 */ + + for (i = 31; i < 36; i++) + *buff++ = a->read_bcr(ioaddr, i); + + /* read mii phy registers */ + if (lp->mii) { + int j; + for (j = 0; j < PCNET32_MAX_PHYS; j++) { + if (lp->phymask & (1 << j)) { + for (i = 0; i < PCNET32_REGS_PER_PHY; i++) { + lp->a.write_bcr(ioaddr, 33, + (j << 5) | i); + *buff++ = lp->a.read_bcr(ioaddr, 34); + } + } + } + } + + if (!(csr0 & CSR0_STOP)) { /* If not stopped */ + int csr5; + + /* clear SUSPEND (SPND) - CSR5 bit 0 */ + csr5 = a->read_csr(ioaddr, CSR5); + a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND)); + } + + spin_unlock_irqrestore(&lp->lock, flags); +} + +static const struct ethtool_ops pcnet32_ethtool_ops = { + .get_settings = pcnet32_get_settings, + .set_settings = pcnet32_set_settings, + .get_drvinfo = pcnet32_get_drvinfo, + .get_msglevel = pcnet32_get_msglevel, + .set_msglevel = pcnet32_set_msglevel, + .nway_reset = pcnet32_nway_reset, + .get_link = pcnet32_get_link, + .get_ringparam = pcnet32_get_ringparam, + .set_ringparam = pcnet32_set_ringparam, + .get_strings = pcnet32_get_strings, + .self_test = pcnet32_ethtool_test, + .set_phys_id = pcnet32_set_phys_id, + .get_regs_len = pcnet32_get_regs_len, + .get_regs = pcnet32_get_regs, + .get_sset_count = pcnet32_get_sset_count, +}; + +/* only probes for non-PCI devices, the rest are handled by + * pci_register_driver via pcnet32_probe_pci */ + +static void __devinit pcnet32_probe_vlbus(unsigned int *pcnet32_portlist) +{ + unsigned int *port, ioaddr; + + /* search for PCnet32 VLB cards at known addresses */ + for (port = pcnet32_portlist; (ioaddr = *port); port++) { + if (request_region + (ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) { + /* check if there is really a pcnet chip on that ioaddr */ + if ((inb(ioaddr + 14) == 0x57) && + (inb(ioaddr + 15) == 0x57)) { + pcnet32_probe1(ioaddr, 0, NULL); + } else { + release_region(ioaddr, PCNET32_TOTAL_SIZE); + } + } + } +} + +static int __devinit +pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + unsigned long ioaddr; + int err; + + err = pci_enable_device(pdev); + if (err < 0) { + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_err("failed to enable device -- err=%d\n", err); + return err; + } + pci_set_master(pdev); + + ioaddr = pci_resource_start(pdev, 0); + if (!ioaddr) { + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_err("card has no PCI IO resources, aborting\n"); + return -ENODEV; + } + + if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) { + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_err("architecture does not support 32bit PCI busmaster DMA\n"); + return -ENODEV; + } + if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) { + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_err("io address range already allocated\n"); + return -EBUSY; + } + + err = pcnet32_probe1(ioaddr, 1, pdev); + if (err < 0) + pci_disable_device(pdev); + + return err; +} + +static const struct net_device_ops pcnet32_netdev_ops = { + .ndo_open = pcnet32_open, + .ndo_stop = pcnet32_close, + .ndo_start_xmit = pcnet32_start_xmit, + .ndo_tx_timeout = pcnet32_tx_timeout, + .ndo_get_stats = pcnet32_get_stats, + .ndo_set_rx_mode = pcnet32_set_multicast_list, + .ndo_do_ioctl = pcnet32_ioctl, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = pcnet32_poll_controller, +#endif +}; + +/* pcnet32_probe1 + * Called from both pcnet32_probe_vlbus and pcnet_probe_pci. + * pdev will be NULL when called from pcnet32_probe_vlbus. + */ +static int __devinit +pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) +{ + struct pcnet32_private *lp; + int i, media; + int fdx, mii, fset, dxsuflo; + int chip_version; + char *chipname; + struct net_device *dev; + struct pcnet32_access *a = NULL; + u8 promaddr[6]; + int ret = -ENODEV; + + /* reset the chip */ + pcnet32_wio_reset(ioaddr); + + /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */ + if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) { + a = &pcnet32_wio; + } else { + pcnet32_dwio_reset(ioaddr); + if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 && + pcnet32_dwio_check(ioaddr)) { + a = &pcnet32_dwio; + } else { + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_err("No access methods\n"); + goto err_release_region; + } + } + + chip_version = + a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16); + if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW)) + pr_info(" PCnet chip version is %#x\n", chip_version); + if ((chip_version & 0xfff) != 0x003) { + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_info("Unsupported chip version\n"); + goto err_release_region; + } + + /* initialize variables */ + fdx = mii = fset = dxsuflo = 0; + chip_version = (chip_version >> 12) & 0xffff; + + switch (chip_version) { + case 0x2420: + chipname = "PCnet/PCI 79C970"; /* PCI */ + break; + case 0x2430: + if (shared) + chipname = "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */ + else + chipname = "PCnet/32 79C965"; /* 486/VL bus */ + break; + case 0x2621: + chipname = "PCnet/PCI II 79C970A"; /* PCI */ + fdx = 1; + break; + case 0x2623: + chipname = "PCnet/FAST 79C971"; /* PCI */ + fdx = 1; + mii = 1; + fset = 1; + break; + case 0x2624: + chipname = "PCnet/FAST+ 79C972"; /* PCI */ + fdx = 1; + mii = 1; + fset = 1; + break; + case 0x2625: + chipname = "PCnet/FAST III 79C973"; /* PCI */ + fdx = 1; + mii = 1; + break; + case 0x2626: + chipname = "PCnet/Home 79C978"; /* PCI */ + fdx = 1; + /* + * This is based on specs published at www.amd.com. This section + * assumes that a card with a 79C978 wants to go into standard + * ethernet mode. The 79C978 can also go into 1Mb HomePNA mode, + * and the module option homepna=1 can select this instead. + */ + media = a->read_bcr(ioaddr, 49); + media &= ~3; /* default to 10Mb ethernet */ + if (cards_found < MAX_UNITS && homepna[cards_found]) + media |= 1; /* switch to home wiring mode */ + if (pcnet32_debug & NETIF_MSG_PROBE) + printk(KERN_DEBUG PFX "media set to %sMbit mode\n", + (media & 1) ? "1" : "10"); + a->write_bcr(ioaddr, 49, media); + break; + case 0x2627: + chipname = "PCnet/FAST III 79C975"; /* PCI */ + fdx = 1; + mii = 1; + break; + case 0x2628: + chipname = "PCnet/PRO 79C976"; + fdx = 1; + mii = 1; + break; + default: + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_info("PCnet version %#x, no PCnet32 chip\n", + chip_version); + goto err_release_region; + } + + /* + * On selected chips turn on the BCR18:NOUFLO bit. This stops transmit + * starting until the packet is loaded. Strike one for reliability, lose + * one for latency - although on PCI this isn't a big loss. Older chips + * have FIFO's smaller than a packet, so you can't do this. + * Turn on BCR18:BurstRdEn and BCR18:BurstWrEn. + */ + + if (fset) { + a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0860)); + a->write_csr(ioaddr, 80, + (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00); + dxsuflo = 1; + } + + dev = alloc_etherdev(sizeof(*lp)); + if (!dev) { + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_err("Memory allocation failed\n"); + ret = -ENOMEM; + goto err_release_region; + } + + if (pdev) + SET_NETDEV_DEV(dev, &pdev->dev); + + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_info("%s at %#3lx,", chipname, ioaddr); + + /* In most chips, after a chip reset, the ethernet address is read from the + * station address PROM at the base address and programmed into the + * "Physical Address Registers" CSR12-14. + * As a precautionary measure, we read the PROM values and complain if + * they disagree with the CSRs. If they miscompare, and the PROM addr + * is valid, then the PROM addr is used. + */ + for (i = 0; i < 3; i++) { + unsigned int val; + val = a->read_csr(ioaddr, i + 12) & 0x0ffff; + /* There may be endianness issues here. */ + dev->dev_addr[2 * i] = val & 0x0ff; + dev->dev_addr[2 * i + 1] = (val >> 8) & 0x0ff; + } + + /* read PROM address and compare with CSR address */ + for (i = 0; i < 6; i++) + promaddr[i] = inb(ioaddr + i); + + if (memcmp(promaddr, dev->dev_addr, 6) || + !is_valid_ether_addr(dev->dev_addr)) { + if (is_valid_ether_addr(promaddr)) { + if (pcnet32_debug & NETIF_MSG_PROBE) { + pr_cont(" warning: CSR address invalid,\n"); + pr_info(" using instead PROM address of"); + } + memcpy(dev->dev_addr, promaddr, 6); + } + } + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); + + /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */ + if (!is_valid_ether_addr(dev->perm_addr)) + memset(dev->dev_addr, 0, ETH_ALEN); + + if (pcnet32_debug & NETIF_MSG_PROBE) { + pr_cont(" %pM", dev->dev_addr); + + /* Version 0x2623 and 0x2624 */ + if (((chip_version + 1) & 0xfffe) == 0x2624) { + i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */ + pr_info(" tx_start_pt(0x%04x):", i); + switch (i >> 10) { + case 0: + pr_cont(" 20 bytes,"); + break; + case 1: + pr_cont(" 64 bytes,"); + break; + case 2: + pr_cont(" 128 bytes,"); + break; + case 3: + pr_cont("~220 bytes,"); + break; + } + i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */ + pr_cont(" BCR18(%x):", i & 0xffff); + if (i & (1 << 5)) + pr_cont("BurstWrEn "); + if (i & (1 << 6)) + pr_cont("BurstRdEn "); + if (i & (1 << 7)) + pr_cont("DWordIO "); + if (i & (1 << 11)) + pr_cont("NoUFlow "); + i = a->read_bcr(ioaddr, 25); + pr_info(" SRAMSIZE=0x%04x,", i << 8); + i = a->read_bcr(ioaddr, 26); + pr_cont(" SRAM_BND=0x%04x,", i << 8); + i = a->read_bcr(ioaddr, 27); + if (i & (1 << 14)) + pr_cont("LowLatRx"); + } + } + + dev->base_addr = ioaddr; + lp = netdev_priv(dev); + /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */ + lp->init_block = pci_alloc_consistent(pdev, sizeof(*lp->init_block), + &lp->init_dma_addr); + if (!lp->init_block) { + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_err("Consistent memory allocation failed\n"); + ret = -ENOMEM; + goto err_free_netdev; + } + lp->pci_dev = pdev; + + lp->dev = dev; + + spin_lock_init(&lp->lock); + + lp->name = chipname; + lp->shared_irq = shared; + lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */ + lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */ + lp->tx_mod_mask = lp->tx_ring_size - 1; + lp->rx_mod_mask = lp->rx_ring_size - 1; + lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12); + lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4); + lp->mii_if.full_duplex = fdx; + lp->mii_if.phy_id_mask = 0x1f; + lp->mii_if.reg_num_mask = 0x1f; + lp->dxsuflo = dxsuflo; + lp->mii = mii; + lp->chip_version = chip_version; + lp->msg_enable = pcnet32_debug; + if ((cards_found >= MAX_UNITS) || + (options[cards_found] >= sizeof(options_mapping))) + lp->options = PCNET32_PORT_ASEL; + else + lp->options = options_mapping[options[cards_found]]; + lp->mii_if.dev = dev; + lp->mii_if.mdio_read = mdio_read; + lp->mii_if.mdio_write = mdio_write; + + /* napi.weight is used in both the napi and non-napi cases */ + lp->napi.weight = lp->rx_ring_size / 2; + + netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2); + + if (fdx && !(lp->options & PCNET32_PORT_ASEL) && + ((cards_found >= MAX_UNITS) || full_duplex[cards_found])) + lp->options |= PCNET32_PORT_FD; + + lp->a = *a; + + /* prior to register_netdev, dev->name is not yet correct */ + if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) { + ret = -ENOMEM; + goto err_free_ring; + } + /* detect special T1/E1 WAN card by checking for MAC address */ + if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 && + dev->dev_addr[2] == 0x75) + lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI; + + lp->init_block->mode = cpu_to_le16(0x0003); /* Disable Rx and Tx. */ + lp->init_block->tlen_rlen = + cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits); + for (i = 0; i < 6; i++) + lp->init_block->phys_addr[i] = dev->dev_addr[i]; + lp->init_block->filter[0] = 0x00000000; + lp->init_block->filter[1] = 0x00000000; + lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr); + lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr); + + /* switch pcnet32 to 32bit mode */ + a->write_bcr(ioaddr, 20, 2); + + a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); + a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16)); + + if (pdev) { /* use the IRQ provided by PCI */ + dev->irq = pdev->irq; + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_cont(" assigned IRQ %d\n", dev->irq); + } else { + unsigned long irq_mask = probe_irq_on(); + + /* + * To auto-IRQ we enable the initialization-done and DMA error + * interrupts. For ISA boards we get a DMA error, but VLB and PCI + * boards will work. + */ + /* Trigger an initialization just for the interrupt. */ + a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_INIT); + mdelay(1); + + dev->irq = probe_irq_off(irq_mask); + if (!dev->irq) { + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_cont(", failed to detect IRQ line\n"); + ret = -ENODEV; + goto err_free_ring; + } + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_cont(", probed IRQ %d\n", dev->irq); + } + + /* Set the mii phy_id so that we can query the link state */ + if (lp->mii) { + /* lp->phycount and lp->phymask are set to 0 by memset above */ + + lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f; + /* scan for PHYs */ + for (i = 0; i < PCNET32_MAX_PHYS; i++) { + unsigned short id1, id2; + + id1 = mdio_read(dev, i, MII_PHYSID1); + if (id1 == 0xffff) + continue; + id2 = mdio_read(dev, i, MII_PHYSID2); + if (id2 == 0xffff) + continue; + if (i == 31 && ((chip_version + 1) & 0xfffe) == 0x2624) + continue; /* 79C971 & 79C972 have phantom phy at id 31 */ + lp->phycount++; + lp->phymask |= (1 << i); + lp->mii_if.phy_id = i; + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_info("Found PHY %04x:%04x at address %d\n", + id1, id2, i); + } + lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5); + if (lp->phycount > 1) + lp->options |= PCNET32_PORT_MII; + } + + init_timer(&lp->watchdog_timer); + lp->watchdog_timer.data = (unsigned long)dev; + lp->watchdog_timer.function = (void *)&pcnet32_watchdog; + + /* The PCNET32-specific entries in the device structure. */ + dev->netdev_ops = &pcnet32_netdev_ops; + dev->ethtool_ops = &pcnet32_ethtool_ops; + dev->watchdog_timeo = (5 * HZ); + + /* Fill in the generic fields of the device structure. */ + if (register_netdev(dev)) + goto err_free_ring; + + if (pdev) { + pci_set_drvdata(pdev, dev); + } else { + lp->next = pcnet32_dev; + pcnet32_dev = dev; + } + + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_info("%s: registered as %s\n", dev->name, lp->name); + cards_found++; + + /* enable LED writes */ + a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000); + + return 0; + +err_free_ring: + pcnet32_free_ring(dev); + pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), + lp->init_block, lp->init_dma_addr); +err_free_netdev: + free_netdev(dev); +err_release_region: + release_region(ioaddr, PCNET32_TOTAL_SIZE); + return ret; +} + +/* if any allocation fails, caller must also call pcnet32_free_ring */ +static int pcnet32_alloc_ring(struct net_device *dev, const char *name) +{ + struct pcnet32_private *lp = netdev_priv(dev); + + lp->tx_ring = pci_alloc_consistent(lp->pci_dev, + sizeof(struct pcnet32_tx_head) * + lp->tx_ring_size, + &lp->tx_ring_dma_addr); + if (lp->tx_ring == NULL) { + netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); + return -ENOMEM; + } + + lp->rx_ring = pci_alloc_consistent(lp->pci_dev, + sizeof(struct pcnet32_rx_head) * + lp->rx_ring_size, + &lp->rx_ring_dma_addr); + if (lp->rx_ring == NULL) { + netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); + return -ENOMEM; + } + + lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t), + GFP_ATOMIC); + if (!lp->tx_dma_addr) { + netif_err(lp, drv, dev, "Memory allocation failed\n"); + return -ENOMEM; + } + + lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t), + GFP_ATOMIC); + if (!lp->rx_dma_addr) { + netif_err(lp, drv, dev, "Memory allocation failed\n"); + return -ENOMEM; + } + + lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *), + GFP_ATOMIC); + if (!lp->tx_skbuff) { + netif_err(lp, drv, dev, "Memory allocation failed\n"); + return -ENOMEM; + } + + lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *), + GFP_ATOMIC); + if (!lp->rx_skbuff) { + netif_err(lp, drv, dev, "Memory allocation failed\n"); + return -ENOMEM; + } + + return 0; +} + +static void pcnet32_free_ring(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + + kfree(lp->tx_skbuff); + lp->tx_skbuff = NULL; + + kfree(lp->rx_skbuff); + lp->rx_skbuff = NULL; + + kfree(lp->tx_dma_addr); + lp->tx_dma_addr = NULL; + + kfree(lp->rx_dma_addr); + lp->rx_dma_addr = NULL; + + if (lp->tx_ring) { + pci_free_consistent(lp->pci_dev, + sizeof(struct pcnet32_tx_head) * + lp->tx_ring_size, lp->tx_ring, + lp->tx_ring_dma_addr); + lp->tx_ring = NULL; + } + + if (lp->rx_ring) { + pci_free_consistent(lp->pci_dev, + sizeof(struct pcnet32_rx_head) * + lp->rx_ring_size, lp->rx_ring, + lp->rx_ring_dma_addr); + lp->rx_ring = NULL; + } +} + +static int pcnet32_open(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + struct pci_dev *pdev = lp->pci_dev; + unsigned long ioaddr = dev->base_addr; + u16 val; + int i; + int rc; + unsigned long flags; + + if (request_irq(dev->irq, pcnet32_interrupt, + lp->shared_irq ? IRQF_SHARED : 0, dev->name, + (void *)dev)) { + return -EAGAIN; + } + + spin_lock_irqsave(&lp->lock, flags); + /* Check for a valid station address */ + if (!is_valid_ether_addr(dev->dev_addr)) { + rc = -EINVAL; + goto err_free_irq; + } + + /* Reset the PCNET32 */ + lp->a.reset(ioaddr); + + /* switch pcnet32 to 32bit mode */ + lp->a.write_bcr(ioaddr, 20, 2); + + netif_printk(lp, ifup, KERN_DEBUG, dev, + "%s() irq %d tx/rx rings %#x/%#x init %#x\n", + __func__, dev->irq, (u32) (lp->tx_ring_dma_addr), + (u32) (lp->rx_ring_dma_addr), + (u32) (lp->init_dma_addr)); + + /* set/reset autoselect bit */ + val = lp->a.read_bcr(ioaddr, 2) & ~2; + if (lp->options & PCNET32_PORT_ASEL) + val |= 2; + lp->a.write_bcr(ioaddr, 2, val); + + /* handle full duplex setting */ + if (lp->mii_if.full_duplex) { + val = lp->a.read_bcr(ioaddr, 9) & ~3; + if (lp->options & PCNET32_PORT_FD) { + val |= 1; + if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI)) + val |= 2; + } else if (lp->options & PCNET32_PORT_ASEL) { + /* workaround of xSeries250, turn on for 79C975 only */ + if (lp->chip_version == 0x2627) + val |= 3; + } + lp->a.write_bcr(ioaddr, 9, val); + } + + /* set/reset GPSI bit in test register */ + val = lp->a.read_csr(ioaddr, 124) & ~0x10; + if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI) + val |= 0x10; + lp->a.write_csr(ioaddr, 124, val); + + /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */ + if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT && + (pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX || + pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) { + if (lp->options & PCNET32_PORT_ASEL) { + lp->options = PCNET32_PORT_FD | PCNET32_PORT_100; + netif_printk(lp, link, KERN_DEBUG, dev, + "Setting 100Mb-Full Duplex\n"); + } + } + if (lp->phycount < 2) { + /* + * 24 Jun 2004 according AMD, in order to change the PHY, + * DANAS (or DISPM for 79C976) must be set; then select the speed, + * duplex, and/or enable auto negotiation, and clear DANAS + */ + if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) { + lp->a.write_bcr(ioaddr, 32, + lp->a.read_bcr(ioaddr, 32) | 0x0080); + /* disable Auto Negotiation, set 10Mpbs, HD */ + val = lp->a.read_bcr(ioaddr, 32) & ~0xb8; + if (lp->options & PCNET32_PORT_FD) + val |= 0x10; + if (lp->options & PCNET32_PORT_100) + val |= 0x08; + lp->a.write_bcr(ioaddr, 32, val); + } else { + if (lp->options & PCNET32_PORT_ASEL) { + lp->a.write_bcr(ioaddr, 32, + lp->a.read_bcr(ioaddr, + 32) | 0x0080); + /* enable auto negotiate, setup, disable fd */ + val = lp->a.read_bcr(ioaddr, 32) & ~0x98; + val |= 0x20; + lp->a.write_bcr(ioaddr, 32, val); + } + } + } else { + int first_phy = -1; + u16 bmcr; + u32 bcr9; + struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET }; + + /* + * There is really no good other way to handle multiple PHYs + * other than turning off all automatics + */ + val = lp->a.read_bcr(ioaddr, 2); + lp->a.write_bcr(ioaddr, 2, val & ~2); + val = lp->a.read_bcr(ioaddr, 32); + lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */ + + if (!(lp->options & PCNET32_PORT_ASEL)) { + /* setup ecmd */ + ecmd.port = PORT_MII; + ecmd.transceiver = XCVR_INTERNAL; + ecmd.autoneg = AUTONEG_DISABLE; + ethtool_cmd_speed_set(&ecmd, + (lp->options & PCNET32_PORT_100) ? + SPEED_100 : SPEED_10); + bcr9 = lp->a.read_bcr(ioaddr, 9); + + if (lp->options & PCNET32_PORT_FD) { + ecmd.duplex = DUPLEX_FULL; + bcr9 |= (1 << 0); + } else { + ecmd.duplex = DUPLEX_HALF; + bcr9 |= ~(1 << 0); + } + lp->a.write_bcr(ioaddr, 9, bcr9); + } + + for (i = 0; i < PCNET32_MAX_PHYS; i++) { + if (lp->phymask & (1 << i)) { + /* isolate all but the first PHY */ + bmcr = mdio_read(dev, i, MII_BMCR); + if (first_phy == -1) { + first_phy = i; + mdio_write(dev, i, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + } else { + mdio_write(dev, i, MII_BMCR, + bmcr | BMCR_ISOLATE); + } + /* use mii_ethtool_sset to setup PHY */ + lp->mii_if.phy_id = i; + ecmd.phy_address = i; + if (lp->options & PCNET32_PORT_ASEL) { + mii_ethtool_gset(&lp->mii_if, &ecmd); + ecmd.autoneg = AUTONEG_ENABLE; + } + mii_ethtool_sset(&lp->mii_if, &ecmd); + } + } + lp->mii_if.phy_id = first_phy; + netif_info(lp, link, dev, "Using PHY number %d\n", first_phy); + } + +#ifdef DO_DXSUFLO + if (lp->dxsuflo) { /* Disable transmit stop on underflow */ + val = lp->a.read_csr(ioaddr, CSR3); + val |= 0x40; + lp->a.write_csr(ioaddr, CSR3, val); + } +#endif + + lp->init_block->mode = + cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7); + pcnet32_load_multicast(dev); + + if (pcnet32_init_ring(dev)) { + rc = -ENOMEM; + goto err_free_ring; + } + + napi_enable(&lp->napi); + + /* Re-initialize the PCNET32, and start it when done. */ + lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); + lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16)); + + lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ + lp->a.write_csr(ioaddr, CSR0, CSR0_INIT); + + netif_start_queue(dev); + + if (lp->chip_version >= PCNET32_79C970A) { + /* Print the link status and start the watchdog */ + pcnet32_check_media(dev, 1); + mod_timer(&lp->watchdog_timer, PCNET32_WATCHDOG_TIMEOUT); + } + + i = 0; + while (i++ < 100) + if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON) + break; + /* + * We used to clear the InitDone bit, 0x0100, here but Mark Stockton + * reports that doing so triggers a bug in the '974. + */ + lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL); + + netif_printk(lp, ifup, KERN_DEBUG, dev, + "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n", + i, + (u32) (lp->init_dma_addr), + lp->a.read_csr(ioaddr, CSR0)); + + spin_unlock_irqrestore(&lp->lock, flags); + + return 0; /* Always succeed */ + +err_free_ring: + /* free any allocated skbuffs */ + pcnet32_purge_rx_ring(dev); + + /* + * Switch back to 16bit mode to avoid problems with dumb + * DOS packet driver after a warm reboot + */ + lp->a.write_bcr(ioaddr, 20, 4); + +err_free_irq: + spin_unlock_irqrestore(&lp->lock, flags); + free_irq(dev->irq, dev); + return rc; +} + +/* + * The LANCE has been halted for one reason or another (busmaster memory + * arbitration error, Tx FIFO underflow, driver stopped it to reconfigure, + * etc.). Modern LANCE variants always reload their ring-buffer + * configuration when restarted, so we must reinitialize our ring + * context before restarting. As part of this reinitialization, + * find all packets still on the Tx ring and pretend that they had been + * sent (in effect, drop the packets on the floor) - the higher-level + * protocols will time out and retransmit. It'd be better to shuffle + * these skbs to a temp list and then actually re-Tx them after + * restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com + */ + +static void pcnet32_purge_tx_ring(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + int i; + + for (i = 0; i < lp->tx_ring_size; i++) { + lp->tx_ring[i].status = 0; /* CPU owns buffer */ + wmb(); /* Make sure adapter sees owner change */ + if (lp->tx_skbuff[i]) { + pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], + lp->tx_skbuff[i]->len, + PCI_DMA_TODEVICE); + dev_kfree_skb_any(lp->tx_skbuff[i]); + } + lp->tx_skbuff[i] = NULL; + lp->tx_dma_addr[i] = 0; + } +} + +/* Initialize the PCNET32 Rx and Tx rings. */ +static int pcnet32_init_ring(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + int i; + + lp->tx_full = 0; + lp->cur_rx = lp->cur_tx = 0; + lp->dirty_rx = lp->dirty_tx = 0; + + for (i = 0; i < lp->rx_ring_size; i++) { + struct sk_buff *rx_skbuff = lp->rx_skbuff[i]; + if (rx_skbuff == NULL) { + lp->rx_skbuff[i] = dev_alloc_skb(PKT_BUF_SKB); + rx_skbuff = lp->rx_skbuff[i]; + if (!rx_skbuff) { + /* there is not much we can do at this point */ + netif_err(lp, drv, dev, "%s dev_alloc_skb failed\n", + __func__); + return -1; + } + skb_reserve(rx_skbuff, NET_IP_ALIGN); + } + + rmb(); + if (lp->rx_dma_addr[i] == 0) + lp->rx_dma_addr[i] = + pci_map_single(lp->pci_dev, rx_skbuff->data, + PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); + lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]); + lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE); + wmb(); /* Make sure owner changes after all others are visible */ + lp->rx_ring[i].status = cpu_to_le16(0x8000); + } + /* The Tx buffer address is filled in as needed, but we do need to clear + * the upper ownership bit. */ + for (i = 0; i < lp->tx_ring_size; i++) { + lp->tx_ring[i].status = 0; /* CPU owns buffer */ + wmb(); /* Make sure adapter sees owner change */ + lp->tx_ring[i].base = 0; + lp->tx_dma_addr[i] = 0; + } + + lp->init_block->tlen_rlen = + cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits); + for (i = 0; i < 6; i++) + lp->init_block->phys_addr[i] = dev->dev_addr[i]; + lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr); + lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr); + wmb(); /* Make sure all changes are visible */ + return 0; +} + +/* the pcnet32 has been issued a stop or reset. Wait for the stop bit + * then flush the pending transmit operations, re-initialize the ring, + * and tell the chip to initialize. + */ +static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr; + int i; + + /* wait for stop */ + for (i = 0; i < 100; i++) + if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP) + break; + + if (i >= 100) + netif_err(lp, drv, dev, "%s timed out waiting for stop\n", + __func__); + + pcnet32_purge_tx_ring(dev); + if (pcnet32_init_ring(dev)) + return; + + /* ReInit Ring */ + lp->a.write_csr(ioaddr, CSR0, CSR0_INIT); + i = 0; + while (i++ < 1000) + if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON) + break; + + lp->a.write_csr(ioaddr, CSR0, csr0_bits); +} + +static void pcnet32_tx_timeout(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr, flags; + + spin_lock_irqsave(&lp->lock, flags); + /* Transmitter timeout, serious problems. */ + if (pcnet32_debug & NETIF_MSG_DRV) + pr_err("%s: transmit timed out, status %4.4x, resetting\n", + dev->name, lp->a.read_csr(ioaddr, CSR0)); + lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); + dev->stats.tx_errors++; + if (netif_msg_tx_err(lp)) { + int i; + printk(KERN_DEBUG + " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.", + lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "", + lp->cur_rx); + for (i = 0; i < lp->rx_ring_size; i++) + printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", + le32_to_cpu(lp->rx_ring[i].base), + (-le16_to_cpu(lp->rx_ring[i].buf_length)) & + 0xffff, le32_to_cpu(lp->rx_ring[i].msg_length), + le16_to_cpu(lp->rx_ring[i].status)); + for (i = 0; i < lp->tx_ring_size; i++) + printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", + le32_to_cpu(lp->tx_ring[i].base), + (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff, + le32_to_cpu(lp->tx_ring[i].misc), + le16_to_cpu(lp->tx_ring[i].status)); + printk("\n"); + } + pcnet32_restart(dev, CSR0_NORMAL); + + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue(dev); + + spin_unlock_irqrestore(&lp->lock, flags); +} + +static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr; + u16 status; + int entry; + unsigned long flags; + + spin_lock_irqsave(&lp->lock, flags); + + netif_printk(lp, tx_queued, KERN_DEBUG, dev, + "%s() called, csr0 %4.4x\n", + __func__, lp->a.read_csr(ioaddr, CSR0)); + + /* Default status -- will not enable Successful-TxDone + * interrupt when that option is available to us. + */ + status = 0x8300; + + /* Fill in a Tx ring entry */ + + /* Mask to ring buffer boundary. */ + entry = lp->cur_tx & lp->tx_mod_mask; + + /* Caution: the write order is important here, set the status + * with the "ownership" bits last. */ + + lp->tx_ring[entry].length = cpu_to_le16(-skb->len); + + lp->tx_ring[entry].misc = 0x00000000; + + lp->tx_skbuff[entry] = skb; + lp->tx_dma_addr[entry] = + pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); + lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]); + wmb(); /* Make sure owner changes after all others are visible */ + lp->tx_ring[entry].status = cpu_to_le16(status); + + lp->cur_tx++; + dev->stats.tx_bytes += skb->len; + + /* Trigger an immediate send poll. */ + lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL); + + if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) { + lp->tx_full = 1; + netif_stop_queue(dev); + } + spin_unlock_irqrestore(&lp->lock, flags); + return NETDEV_TX_OK; +} + +/* The PCNET32 interrupt handler. */ +static irqreturn_t +pcnet32_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct pcnet32_private *lp; + unsigned long ioaddr; + u16 csr0; + int boguscnt = max_interrupt_work; + + ioaddr = dev->base_addr; + lp = netdev_priv(dev); + + spin_lock(&lp->lock); + + csr0 = lp->a.read_csr(ioaddr, CSR0); + while ((csr0 & 0x8f00) && --boguscnt >= 0) { + if (csr0 == 0xffff) + break; /* PCMCIA remove happened */ + /* Acknowledge all of the current interrupt sources ASAP. */ + lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f); + + netif_printk(lp, intr, KERN_DEBUG, dev, + "interrupt csr0=%#2.2x new csr=%#2.2x\n", + csr0, lp->a.read_csr(ioaddr, CSR0)); + + /* Log misc errors. */ + if (csr0 & 0x4000) + dev->stats.tx_errors++; /* Tx babble. */ + if (csr0 & 0x1000) { + /* + * This happens when our receive ring is full. This + * shouldn't be a problem as we will see normal rx + * interrupts for the frames in the receive ring. But + * there are some PCI chipsets (I can reproduce this + * on SP3G with Intel saturn chipset) which have + * sometimes problems and will fill up the receive + * ring with error descriptors. In this situation we + * don't get a rx interrupt, but a missed frame + * interrupt sooner or later. + */ + dev->stats.rx_errors++; /* Missed a Rx frame. */ + } + if (csr0 & 0x0800) { + netif_err(lp, drv, dev, "Bus master arbitration failure, status %4.4x\n", + csr0); + /* unlike for the lance, there is no restart needed */ + } + if (napi_schedule_prep(&lp->napi)) { + u16 val; + /* set interrupt masks */ + val = lp->a.read_csr(ioaddr, CSR3); + val |= 0x5f00; + lp->a.write_csr(ioaddr, CSR3, val); + + __napi_schedule(&lp->napi); + break; + } + csr0 = lp->a.read_csr(ioaddr, CSR0); + } + + netif_printk(lp, intr, KERN_DEBUG, dev, + "exiting interrupt, csr0=%#4.4x\n", + lp->a.read_csr(ioaddr, CSR0)); + + spin_unlock(&lp->lock); + + return IRQ_HANDLED; +} + +static int pcnet32_close(struct net_device *dev) +{ + unsigned long ioaddr = dev->base_addr; + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long flags; + + del_timer_sync(&lp->watchdog_timer); + + netif_stop_queue(dev); + napi_disable(&lp->napi); + + spin_lock_irqsave(&lp->lock, flags); + + dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112); + + netif_printk(lp, ifdown, KERN_DEBUG, dev, + "Shutting down ethercard, status was %2.2x\n", + lp->a.read_csr(ioaddr, CSR0)); + + /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */ + lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); + + /* + * Switch back to 16bit mode to avoid problems with dumb + * DOS packet driver after a warm reboot + */ + lp->a.write_bcr(ioaddr, 20, 4); + + spin_unlock_irqrestore(&lp->lock, flags); + + free_irq(dev->irq, dev); + + spin_lock_irqsave(&lp->lock, flags); + + pcnet32_purge_rx_ring(dev); + pcnet32_purge_tx_ring(dev); + + spin_unlock_irqrestore(&lp->lock, flags); + + return 0; +} + +static struct net_device_stats *pcnet32_get_stats(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr; + unsigned long flags; + + spin_lock_irqsave(&lp->lock, flags); + dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112); + spin_unlock_irqrestore(&lp->lock, flags); + + return &dev->stats; +} + +/* taken from the sunlance driver, which it took from the depca driver */ +static void pcnet32_load_multicast(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + volatile struct pcnet32_init_block *ib = lp->init_block; + volatile __le16 *mcast_table = (__le16 *)ib->filter; + struct netdev_hw_addr *ha; + unsigned long ioaddr = dev->base_addr; + int i; + u32 crc; + + /* set all multicast bits */ + if (dev->flags & IFF_ALLMULTI) { + ib->filter[0] = cpu_to_le32(~0U); + ib->filter[1] = cpu_to_le32(~0U); + lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff); + lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff); + lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff); + lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff); + return; + } + /* clear the multicast filter */ + ib->filter[0] = 0; + ib->filter[1] = 0; + + /* Add addresses */ + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc_le(6, ha->addr); + crc = crc >> 26; + mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf)); + } + for (i = 0; i < 4; i++) + lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i, + le16_to_cpu(mcast_table[i])); +} + +/* + * Set or clear the multicast filter for this adaptor. + */ +static void pcnet32_set_multicast_list(struct net_device *dev) +{ + unsigned long ioaddr = dev->base_addr, flags; + struct pcnet32_private *lp = netdev_priv(dev); + int csr15, suspended; + + spin_lock_irqsave(&lp->lock, flags); + suspended = pcnet32_suspend(dev, &flags, 0); + csr15 = lp->a.read_csr(ioaddr, CSR15); + if (dev->flags & IFF_PROMISC) { + /* Log any net taps. */ + netif_info(lp, hw, dev, "Promiscuous mode enabled\n"); + lp->init_block->mode = + cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << + 7); + lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000); + } else { + lp->init_block->mode = + cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7); + lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff); + pcnet32_load_multicast(dev); + } + + if (suspended) { + int csr5; + /* clear SUSPEND (SPND) - CSR5 bit 0 */ + csr5 = lp->a.read_csr(ioaddr, CSR5); + lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND)); + } else { + lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); + pcnet32_restart(dev, CSR0_NORMAL); + netif_wake_queue(dev); + } + + spin_unlock_irqrestore(&lp->lock, flags); +} + +/* This routine assumes that the lp->lock is held */ +static int mdio_read(struct net_device *dev, int phy_id, int reg_num) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr; + u16 val_out; + + if (!lp->mii) + return 0; + + lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); + val_out = lp->a.read_bcr(ioaddr, 34); + + return val_out; +} + +/* This routine assumes that the lp->lock is held */ +static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr; + + if (!lp->mii) + return; + + lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); + lp->a.write_bcr(ioaddr, 34, val); +} + +static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct pcnet32_private *lp = netdev_priv(dev); + int rc; + unsigned long flags; + + /* SIOC[GS]MIIxxx ioctls */ + if (lp->mii) { + spin_lock_irqsave(&lp->lock, flags); + rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL); + spin_unlock_irqrestore(&lp->lock, flags); + } else { + rc = -EOPNOTSUPP; + } + + return rc; +} + +static int pcnet32_check_otherphy(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + struct mii_if_info mii = lp->mii_if; + u16 bmcr; + int i; + + for (i = 0; i < PCNET32_MAX_PHYS; i++) { + if (i == lp->mii_if.phy_id) + continue; /* skip active phy */ + if (lp->phymask & (1 << i)) { + mii.phy_id = i; + if (mii_link_ok(&mii)) { + /* found PHY with active link */ + netif_info(lp, link, dev, "Using PHY number %d\n", + i); + + /* isolate inactive phy */ + bmcr = + mdio_read(dev, lp->mii_if.phy_id, MII_BMCR); + mdio_write(dev, lp->mii_if.phy_id, MII_BMCR, + bmcr | BMCR_ISOLATE); + + /* de-isolate new phy */ + bmcr = mdio_read(dev, i, MII_BMCR); + mdio_write(dev, i, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + + /* set new phy address */ + lp->mii_if.phy_id = i; + return 1; + } + } + } + return 0; +} + +/* + * Show the status of the media. Similar to mii_check_media however it + * correctly shows the link speed for all (tested) pcnet32 variants. + * Devices with no mii just report link state without speed. + * + * Caller is assumed to hold and release the lp->lock. + */ + +static void pcnet32_check_media(struct net_device *dev, int verbose) +{ + struct pcnet32_private *lp = netdev_priv(dev); + int curr_link; + int prev_link = netif_carrier_ok(dev) ? 1 : 0; + u32 bcr9; + + if (lp->mii) { + curr_link = mii_link_ok(&lp->mii_if); + } else { + ulong ioaddr = dev->base_addr; /* card base I/O address */ + curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0); + } + if (!curr_link) { + if (prev_link || verbose) { + netif_carrier_off(dev); + netif_info(lp, link, dev, "link down\n"); + } + if (lp->phycount > 1) { + curr_link = pcnet32_check_otherphy(dev); + prev_link = 0; + } + } else if (verbose || !prev_link) { + netif_carrier_on(dev); + if (lp->mii) { + if (netif_msg_link(lp)) { + struct ethtool_cmd ecmd = { + .cmd = ETHTOOL_GSET }; + mii_ethtool_gset(&lp->mii_if, &ecmd); + netdev_info(dev, "link up, %uMbps, %s-duplex\n", + ethtool_cmd_speed(&ecmd), + (ecmd.duplex == DUPLEX_FULL) + ? "full" : "half"); + } + bcr9 = lp->a.read_bcr(dev->base_addr, 9); + if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) { + if (lp->mii_if.full_duplex) + bcr9 |= (1 << 0); + else + bcr9 &= ~(1 << 0); + lp->a.write_bcr(dev->base_addr, 9, bcr9); + } + } else { + netif_info(lp, link, dev, "link up\n"); + } + } +} + +/* + * Check for loss of link and link establishment. + * Can not use mii_check_media because it does nothing if mode is forced. + */ + +static void pcnet32_watchdog(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long flags; + + /* Print the link status if it has changed */ + spin_lock_irqsave(&lp->lock, flags); + pcnet32_check_media(dev, 0); + spin_unlock_irqrestore(&lp->lock, flags); + + mod_timer(&lp->watchdog_timer, round_jiffies(PCNET32_WATCHDOG_TIMEOUT)); +} + +static int pcnet32_pm_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + if (netif_running(dev)) { + netif_device_detach(dev); + pcnet32_close(dev); + } + pci_save_state(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + return 0; +} + +static int pcnet32_pm_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + if (netif_running(dev)) { + pcnet32_open(dev); + netif_device_attach(dev); + } + return 0; +} + +static void __devexit pcnet32_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + if (dev) { + struct pcnet32_private *lp = netdev_priv(dev); + + unregister_netdev(dev); + pcnet32_free_ring(dev); + release_region(dev->base_addr, PCNET32_TOTAL_SIZE); + pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), + lp->init_block, lp->init_dma_addr); + free_netdev(dev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + } +} + +static struct pci_driver pcnet32_driver = { + .name = DRV_NAME, + .probe = pcnet32_probe_pci, + .remove = __devexit_p(pcnet32_remove_one), + .id_table = pcnet32_pci_tbl, + .suspend = pcnet32_pm_suspend, + .resume = pcnet32_pm_resume, +}; + +/* An additional parameter that may be passed in... */ +static int debug = -1; +static int tx_start_pt = -1; +static int pcnet32_have_pci; + +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, DRV_NAME " debug level"); +module_param(max_interrupt_work, int, 0); +MODULE_PARM_DESC(max_interrupt_work, + DRV_NAME " maximum events handled per interrupt"); +module_param(rx_copybreak, int, 0); +MODULE_PARM_DESC(rx_copybreak, + DRV_NAME " copy breakpoint for copy-only-tiny-frames"); +module_param(tx_start_pt, int, 0); +MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)"); +module_param(pcnet32vlb, int, 0); +MODULE_PARM_DESC(pcnet32vlb, DRV_NAME " Vesa local bus (VLB) support (0/1)"); +module_param_array(options, int, NULL, 0); +MODULE_PARM_DESC(options, DRV_NAME " initial option setting(s) (0-15)"); +module_param_array(full_duplex, int, NULL, 0); +MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)"); +/* Module Parameter for HomePNA cards added by Patrick Simmons, 2004 */ +module_param_array(homepna, int, NULL, 0); +MODULE_PARM_DESC(homepna, + DRV_NAME + " mode for 79C978 cards (1 for HomePNA, 0 for Ethernet, default Ethernet"); + +MODULE_AUTHOR("Thomas Bogendoerfer"); +MODULE_DESCRIPTION("Driver for PCnet32 and PCnetPCI based ethercards"); +MODULE_LICENSE("GPL"); + +#define PCNET32_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) + +static int __init pcnet32_init_module(void) +{ + pr_info("%s", version); + + pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT); + + if ((tx_start_pt >= 0) && (tx_start_pt <= 3)) + tx_start = tx_start_pt; + + /* find the PCI devices */ + if (!pci_register_driver(&pcnet32_driver)) + pcnet32_have_pci = 1; + + /* should we find any remaining VLbus devices ? */ + if (pcnet32vlb) + pcnet32_probe_vlbus(pcnet32_portlist); + + if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE)) + pr_info("%d cards_found\n", cards_found); + + return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV; +} + +static void __exit pcnet32_cleanup_module(void) +{ + struct net_device *next_dev; + + while (pcnet32_dev) { + struct pcnet32_private *lp = netdev_priv(pcnet32_dev); + next_dev = lp->next; + unregister_netdev(pcnet32_dev); + pcnet32_free_ring(pcnet32_dev); + release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE); + pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), + lp->init_block, lp->init_dma_addr); + free_netdev(pcnet32_dev); + pcnet32_dev = next_dev; + } + + if (pcnet32_have_pci) + pci_unregister_driver(&pcnet32_driver); +} + +module_init(pcnet32_init_module); +module_exit(pcnet32_cleanup_module); + +/* + * Local variables: + * c-indent-level: 4 + * tab-width: 8 + * End: + */ diff --cc drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 23b37dd79df3,000000000000..93bff08c87ad mode 100644,000000..100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@@ -1,3577 -1,0 +1,3598 @@@ +/* bnx2x_cmn.c: Broadcom Everest network driver. + * + * Copyright (c) 2007-2011 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Maintained by: Eilon Greenstein + * Written by: Eliezer Tamir + * Based on code from Michael Chan's bnx2 driver + * UDP CSUM errata workaround by Arik Gendelman + * Slowpath and fastpath rework by Vladislav Zolotarov + * Statistics and Link management by Yitchak Gertner + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include "bnx2x_cmn.h" +#include "bnx2x_init.h" +#include "bnx2x_sp.h" + + + +/** + * bnx2x_bz_fp - zero content of the fastpath structure. + * + * @bp: driver handle + * @index: fastpath index to be zeroed + * + * Makes sure the contents of the bp->fp[index].napi is kept + * intact. + */ +static inline void bnx2x_bz_fp(struct bnx2x *bp, int index) +{ + struct bnx2x_fastpath *fp = &bp->fp[index]; + struct napi_struct orig_napi = fp->napi; + /* bzero bnx2x_fastpath contents */ + memset(fp, 0, sizeof(*fp)); + + /* Restore the NAPI object as it has been already initialized */ + fp->napi = orig_napi; + + fp->bp = bp; + fp->index = index; + if (IS_ETH_FP(fp)) + fp->max_cos = bp->max_cos; + else + /* Special queues support only one CoS */ + fp->max_cos = 1; + + /* + * set the tpa flag for each queue. The tpa flag determines the queue + * minimal size so it must be set prior to queue memory allocation + */ + fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0); + +#ifdef BCM_CNIC - /* We don't want TPA on FCoE, FWD and OOO L2 rings */ - bnx2x_fcoe(bp, disable_tpa) = 1; ++ /* We don't want TPA on an FCoE L2 ring */ ++ if (IS_FCOE_FP(fp)) ++ fp->disable_tpa = 1; +#endif +} + +/** + * bnx2x_move_fp - move content of the fastpath structure. + * + * @bp: driver handle + * @from: source FP index + * @to: destination FP index + * + * Makes sure the contents of the bp->fp[to].napi is kept + * intact. + */ +static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) +{ + struct bnx2x_fastpath *from_fp = &bp->fp[from]; + struct bnx2x_fastpath *to_fp = &bp->fp[to]; + struct napi_struct orig_napi = to_fp->napi; + /* Move bnx2x_fastpath contents */ + memcpy(to_fp, from_fp, sizeof(*to_fp)); + to_fp->index = to; + + /* Restore the NAPI object as it has been already initialized */ + to_fp->napi = orig_napi; +} + +int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ + +/* free skb in the packet ring at pos idx + * return idx of last bd freed + */ +static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, + u16 idx) +{ + struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx]; + struct eth_tx_start_bd *tx_start_bd; + struct eth_tx_bd *tx_data_bd; + struct sk_buff *skb = tx_buf->skb; + u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; + int nbd; + + /* prefetch skb end pointer to speedup dev_kfree_skb() */ + prefetch(&skb->end); + + DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", + txdata->txq_index, idx, tx_buf, skb); + + /* unmap first bd */ + DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); + tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; + dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), + BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE); + + + nbd = le16_to_cpu(tx_start_bd->nbd) - 1; +#ifdef BNX2X_STOP_ON_ERROR + if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) { + BNX2X_ERR("BAD nbd!\n"); + bnx2x_panic(); + } +#endif + new_cons = nbd + tx_buf->first_bd; + + /* Get the next bd */ + bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); + + /* Skip a parse bd... */ + --nbd; + bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); + + /* ...and the TSO split header bd since they have no mapping */ + if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { + --nbd; + bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); + } + + /* now free frags */ + while (nbd > 0) { + + DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx); + tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; + dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), + BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); + if (--nbd) + bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); + } + + /* release skb */ + WARN_ON(!skb); + dev_kfree_skb_any(skb); + tx_buf->first_bd = 0; + tx_buf->skb = NULL; + + return new_cons; +} + +int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) +{ + struct netdev_queue *txq; + u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons; + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return -1; +#endif + + txq = netdev_get_tx_queue(bp->dev, txdata->txq_index); + hw_cons = le16_to_cpu(*txdata->tx_cons_sb); + sw_cons = txdata->tx_pkt_cons; + + while (sw_cons != hw_cons) { + u16 pkt_cons; + + pkt_cons = TX_BD(sw_cons); + + DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u " + " pkt_cons %u\n", + txdata->txq_index, hw_cons, sw_cons, pkt_cons); + + bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons); + sw_cons++; + } + + txdata->tx_pkt_cons = sw_cons; + txdata->tx_bd_cons = bd_cons; + + /* Need to make the tx_bd_cons update visible to start_xmit() + * before checking for netif_tx_queue_stopped(). Without the + * memory barrier, there is a small possibility that + * start_xmit() will miss it and cause the queue to be stopped + * forever. + * On the other hand we need an rmb() here to ensure the proper + * ordering of bit testing in the following + * netif_tx_queue_stopped(txq) call. + */ + smp_mb(); + + if (unlikely(netif_tx_queue_stopped(txq))) { + /* Taking tx_lock() is needed to prevent reenabling the queue + * while it's empty. This could have happen if rx_action() gets + * suspended in bnx2x_tx_int() after the condition before + * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()): + * + * stops the queue->sees fresh tx_bd_cons->releases the queue-> + * sends some packets consuming the whole queue again-> + * stops the queue + */ + + __netif_tx_lock(txq, smp_processor_id()); + + if ((netif_tx_queue_stopped(txq)) && + (bp->state == BNX2X_STATE_OPEN) && + (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)) + netif_tx_wake_queue(txq); + + __netif_tx_unlock(txq); + } + return 0; +} + +static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp, + u16 idx) +{ + u16 last_max = fp->last_max_sge; + + if (SUB_S16(idx, last_max) > 0) + fp->last_max_sge = idx; +} + +static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, + struct eth_fast_path_rx_cqe *fp_cqe) +{ + struct bnx2x *bp = fp->bp; + u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) - + le16_to_cpu(fp_cqe->len_on_bd)) >> + SGE_PAGE_SHIFT; + u16 last_max, last_elem, first_elem; + u16 delta = 0; + u16 i; + + if (!sge_len) + return; + + /* First mark all used pages */ + for (i = 0; i < sge_len; i++) + BIT_VEC64_CLEAR_BIT(fp->sge_mask, + RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i]))); + + DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n", + sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1])); + + /* Here we assume that the last SGE index is the biggest */ + prefetch((void *)(fp->sge_mask)); + bnx2x_update_last_max_sge(fp, + le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1])); + + last_max = RX_SGE(fp->last_max_sge); + last_elem = last_max >> BIT_VEC64_ELEM_SHIFT; + first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; + + /* If ring is not full */ + if (last_elem + 1 != first_elem) + last_elem++; + + /* Now update the prod */ + for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) { + if (likely(fp->sge_mask[i])) + break; + + fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; + delta += BIT_VEC64_ELEM_SZ; + } + + if (delta > 0) { + fp->rx_sge_prod += delta; + /* clear page-end entries */ + bnx2x_clear_sge_mask_next_elems(fp); + } + + DP(NETIF_MSG_RX_STATUS, + "fp->last_max_sge = %d fp->rx_sge_prod = %d\n", + fp->last_max_sge, fp->rx_sge_prod); +} + +static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, + struct sk_buff *skb, u16 cons, u16 prod, + struct eth_fast_path_rx_cqe *cqe) +{ + struct bnx2x *bp = fp->bp; + struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; + struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; + struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; + dma_addr_t mapping; + struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; + struct sw_rx_bd *first_buf = &tpa_info->first_buf; + + /* print error if current state != stop */ + if (tpa_info->tpa_state != BNX2X_TPA_STOP) + BNX2X_ERR("start of bin not in stop [%d]\n", queue); + + /* Try to map an empty skb from the aggregation info */ + mapping = dma_map_single(&bp->pdev->dev, + first_buf->skb->data, + fp->rx_buf_size, DMA_FROM_DEVICE); + /* + * ...if it fails - move the skb from the consumer to the producer + * and set the current aggregation state as ERROR to drop it + * when TPA_STOP arrives. + */ + + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { + /* Move the BD from the consumer to the producer */ + bnx2x_reuse_rx_skb(fp, cons, prod); + tpa_info->tpa_state = BNX2X_TPA_ERROR; + return; + } + + /* move empty skb from pool to prod */ + prod_rx_buf->skb = first_buf->skb; + dma_unmap_addr_set(prod_rx_buf, mapping, mapping); + /* point prod_bd to new skb */ + prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); + prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); + + /* move partial skb from cons to pool (don't unmap yet) */ + *first_buf = *cons_rx_buf; + + /* mark bin state as START */ + tpa_info->parsing_flags = + le16_to_cpu(cqe->pars_flags.flags); + tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); + tpa_info->tpa_state = BNX2X_TPA_START; + tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd); + tpa_info->placement_offset = cqe->placement_offset; + +#ifdef BNX2X_STOP_ON_ERROR + fp->tpa_queue_used |= (1 << queue); +#ifdef _ASM_GENERIC_INT_L64_H + DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n", +#else + DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n", +#endif + fp->tpa_queue_used); +#endif +} + +/* Timestamp option length allowed for TPA aggregation: + * + * nop nop kind length echo val + */ +#define TPA_TSTAMP_OPT_LEN 12 +/** + * bnx2x_set_lro_mss - calculate the approximate value of the MSS + * + * @bp: driver handle + * @parsing_flags: parsing flags from the START CQE + * @len_on_bd: total length of the first packet for the + * aggregation. + * + * Approximate value of the MSS for this aggregation calculated using + * the first packet of it. + */ +static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags, + u16 len_on_bd) +{ + /* + * TPA arrgregation won't have either IP options or TCP options + * other than timestamp or IPv6 extension headers. + */ + u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr); + + if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == + PRS_FLAG_OVERETH_IPV6) + hdrs_len += sizeof(struct ipv6hdr); + else /* IPv4 */ + hdrs_len += sizeof(struct iphdr); + + + /* Check if there was a TCP timestamp, if there is it's will + * always be 12 bytes length: nop nop kind length echo val. + * + * Otherwise FW would close the aggregation. + */ + if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG) + hdrs_len += TPA_TSTAMP_OPT_LEN; + + return len_on_bd - hdrs_len; +} + +static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, + u16 queue, struct sk_buff *skb, + struct eth_end_agg_rx_cqe *cqe, + u16 cqe_idx) +{ + struct sw_rx_page *rx_pg, old_rx_pg; + u32 i, frag_len, frag_size, pages; + int err; + int j; + struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; + u16 len_on_bd = tpa_info->len_on_bd; + + frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd; + pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; + + /* This is needed in order to enable forwarding support */ + if (frag_size) + skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, + tpa_info->parsing_flags, len_on_bd); + +#ifdef BNX2X_STOP_ON_ERROR + if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { + BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n", + pages, cqe_idx); + BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len); + bnx2x_panic(); + return -EINVAL; + } +#endif + + /* Run through the SGL and compose the fragmented skb */ + for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { + u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j])); + + /* FW gives the indices of the SGE as if the ring is an array + (meaning that "next" element will consume 2 indices) */ + frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE)); + rx_pg = &fp->rx_page_ring[sge_idx]; + old_rx_pg = *rx_pg; + + /* If we fail to allocate a substitute page, we simply stop + where we are and drop the whole packet */ + err = bnx2x_alloc_rx_sge(bp, fp, sge_idx); + if (unlikely(err)) { + fp->eth_q_stats.rx_skb_alloc_failed++; + return err; + } + + /* Unmap the page as we r going to pass it to the stack */ + dma_unmap_page(&bp->pdev->dev, + dma_unmap_addr(&old_rx_pg, mapping), + SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); + + /* Add one frag and update the appropriate fields in the skb */ + skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len); + + skb->data_len += frag_len; + skb->truesize += frag_len; + skb->len += frag_len; + + frag_size -= frag_len; + } + + return 0; +} + +static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, + u16 queue, struct eth_end_agg_rx_cqe *cqe, + u16 cqe_idx) +{ + struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; + struct sw_rx_bd *rx_buf = &tpa_info->first_buf; + u8 pad = tpa_info->placement_offset; + u16 len = tpa_info->len_on_bd; + struct sk_buff *skb = rx_buf->skb; + /* alloc new skb */ + struct sk_buff *new_skb; + u8 old_tpa_state = tpa_info->tpa_state; + + tpa_info->tpa_state = BNX2X_TPA_STOP; + + /* If we there was an error during the handling of the TPA_START - + * drop this aggregation. + */ + if (old_tpa_state == BNX2X_TPA_ERROR) + goto drop; + + /* Try to allocate the new skb */ + new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size); + + /* Unmap skb in the pool anyway, as we are going to change + pool entry status to BNX2X_TPA_STOP even if new skb allocation + fails. */ + dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), + fp->rx_buf_size, DMA_FROM_DEVICE); + + if (likely(new_skb)) { + prefetch(skb); + prefetch(((char *)(skb)) + L1_CACHE_BYTES); + +#ifdef BNX2X_STOP_ON_ERROR + if (pad + len > fp->rx_buf_size) { + BNX2X_ERR("skb_put is about to fail... " + "pad %d len %d rx_buf_size %d\n", + pad, len, fp->rx_buf_size); + bnx2x_panic(); + return; + } +#endif + + skb_reserve(skb, pad); + skb_put(skb, len); + + skb->protocol = eth_type_trans(skb, bp->dev); + skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) { + if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) + __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag); + napi_gro_receive(&fp->napi, skb); + } else { + DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages" + " - dropping packet!\n"); + dev_kfree_skb_any(skb); + } + + + /* put new skb in bin */ + rx_buf->skb = new_skb; + + return; + } + +drop: + /* drop the packet and keep the buffer in the bin */ + DP(NETIF_MSG_RX_STATUS, + "Failed to allocate or map a new skb - dropping packet!\n"); + fp->eth_q_stats.rx_skb_alloc_failed++; +} + +/* Set Toeplitz hash value in the skb using the value from the + * CQE (calculated by HW). + */ +static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe, + struct sk_buff *skb) +{ + /* Set Toeplitz hash from CQE */ + if ((bp->dev->features & NETIF_F_RXHASH) && + (cqe->fast_path_cqe.status_flags & + ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) + skb->rxhash = + le32_to_cpu(cqe->fast_path_cqe.rss_hash_result); +} + +int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) +{ + struct bnx2x *bp = fp->bp; + u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; + u16 hw_comp_cons, sw_comp_cons, sw_comp_prod; + int rx_pkt = 0; + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return 0; +#endif + + /* CQ "next element" is of the size of the regular element, + that's why it's ok here */ + hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb); + if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) + hw_comp_cons++; + + bd_cons = fp->rx_bd_cons; + bd_prod = fp->rx_bd_prod; + bd_prod_fw = bd_prod; + sw_comp_cons = fp->rx_comp_cons; + sw_comp_prod = fp->rx_comp_prod; + + /* Memory barrier necessary as speculative reads of the rx + * buffer can be ahead of the index in the status block + */ + rmb(); + + DP(NETIF_MSG_RX_STATUS, + "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n", + fp->index, hw_comp_cons, sw_comp_cons); + + while (sw_comp_cons != hw_comp_cons) { + struct sw_rx_bd *rx_buf = NULL; + struct sk_buff *skb; + union eth_rx_cqe *cqe; + struct eth_fast_path_rx_cqe *cqe_fp; + u8 cqe_fp_flags; + enum eth_rx_cqe_type cqe_fp_type; + u16 len, pad; + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return 0; +#endif + + comp_ring_cons = RCQ_BD(sw_comp_cons); + bd_prod = RX_BD(bd_prod); + bd_cons = RX_BD(bd_cons); + + /* Prefetch the page containing the BD descriptor + at producer's index. It will be needed when new skb is + allocated */ + prefetch((void *)(PAGE_ALIGN((unsigned long) + (&fp->rx_desc_ring[bd_prod])) - + PAGE_SIZE + 1)); + + cqe = &fp->rx_comp_ring[comp_ring_cons]; + cqe_fp = &cqe->fast_path_cqe; + cqe_fp_flags = cqe_fp->type_error_flags; + cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; + + DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x" + " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags), + cqe_fp_flags, cqe_fp->status_flags, + le32_to_cpu(cqe_fp->rss_hash_result), + le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len)); + + /* is this a slowpath msg? */ + if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) { + bnx2x_sp_event(fp, cqe); + goto next_cqe; + + /* this is an rx packet */ + } else { + rx_buf = &fp->rx_buf_ring[bd_cons]; + skb = rx_buf->skb; + prefetch(skb); + + if (!CQE_TYPE_FAST(cqe_fp_type)) { +#ifdef BNX2X_STOP_ON_ERROR + /* sanity check */ + if (fp->disable_tpa && + (CQE_TYPE_START(cqe_fp_type) || + CQE_TYPE_STOP(cqe_fp_type))) + BNX2X_ERR("START/STOP packet while " + "disable_tpa type %x\n", + CQE_TYPE(cqe_fp_type)); +#endif + + if (CQE_TYPE_START(cqe_fp_type)) { + u16 queue = cqe_fp->queue_index; + DP(NETIF_MSG_RX_STATUS, + "calling tpa_start on queue %d\n", + queue); + + bnx2x_tpa_start(fp, queue, skb, + bd_cons, bd_prod, + cqe_fp); + + /* Set Toeplitz hash for LRO skb */ + bnx2x_set_skb_rxhash(bp, cqe, skb); + + goto next_rx; + + } else { + u16 queue = + cqe->end_agg_cqe.queue_index; + DP(NETIF_MSG_RX_STATUS, + "calling tpa_stop on queue %d\n", + queue); + + bnx2x_tpa_stop(bp, fp, queue, + &cqe->end_agg_cqe, + comp_ring_cons); +#ifdef BNX2X_STOP_ON_ERROR + if (bp->panic) + return 0; +#endif + + bnx2x_update_sge_prod(fp, cqe_fp); + goto next_cqe; + } + } + /* non TPA */ + len = le16_to_cpu(cqe_fp->pkt_len); + pad = cqe_fp->placement_offset; + dma_sync_single_for_cpu(&bp->pdev->dev, + dma_unmap_addr(rx_buf, mapping), + pad + RX_COPY_THRESH, + DMA_FROM_DEVICE); + prefetch(((char *)(skb)) + L1_CACHE_BYTES); + + /* is this an error packet? */ + if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { + DP(NETIF_MSG_RX_ERR, + "ERROR flags %x rx packet %u\n", + cqe_fp_flags, sw_comp_cons); + fp->eth_q_stats.rx_err_discard_pkt++; + goto reuse_rx; + } + + /* Since we don't have a jumbo ring + * copy small packets if mtu > 1500 + */ + if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) && + (len <= RX_COPY_THRESH)) { + struct sk_buff *new_skb; + + new_skb = netdev_alloc_skb(bp->dev, len + pad); + if (new_skb == NULL) { + DP(NETIF_MSG_RX_ERR, + "ERROR packet dropped " + "because of alloc failure\n"); + fp->eth_q_stats.rx_skb_alloc_failed++; + goto reuse_rx; + } + + /* aligned copy */ + skb_copy_from_linear_data_offset(skb, pad, + new_skb->data + pad, len); + skb_reserve(new_skb, pad); + skb_put(new_skb, len); + + bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod); + + skb = new_skb; + + } else + if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) { + dma_unmap_single(&bp->pdev->dev, + dma_unmap_addr(rx_buf, mapping), + fp->rx_buf_size, + DMA_FROM_DEVICE); + skb_reserve(skb, pad); + skb_put(skb, len); + + } else { + DP(NETIF_MSG_RX_ERR, + "ERROR packet dropped because " + "of alloc failure\n"); + fp->eth_q_stats.rx_skb_alloc_failed++; +reuse_rx: + bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod); + goto next_rx; + } + + skb->protocol = eth_type_trans(skb, bp->dev); + + /* Set Toeplitz hash for a none-LRO skb */ + bnx2x_set_skb_rxhash(bp, cqe, skb); + + skb_checksum_none_assert(skb); + + if (bp->dev->features & NETIF_F_RXCSUM) { + + if (likely(BNX2X_RX_CSUM_OK(cqe))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + fp->eth_q_stats.hw_csum_err++; + } + } + + skb_record_rx_queue(skb, fp->index); + + if (le16_to_cpu(cqe_fp->pars_flags.flags) & + PARSING_FLAGS_VLAN) + __vlan_hwaccel_put_tag(skb, + le16_to_cpu(cqe_fp->vlan_tag)); + napi_gro_receive(&fp->napi, skb); + + +next_rx: + rx_buf->skb = NULL; + + bd_cons = NEXT_RX_IDX(bd_cons); + bd_prod = NEXT_RX_IDX(bd_prod); + bd_prod_fw = NEXT_RX_IDX(bd_prod_fw); + rx_pkt++; +next_cqe: + sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod); + sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons); + + if (rx_pkt == budget) + break; + } /* while */ + + fp->rx_bd_cons = bd_cons; + fp->rx_bd_prod = bd_prod_fw; + fp->rx_comp_cons = sw_comp_cons; + fp->rx_comp_prod = sw_comp_prod; + + /* Update producers */ + bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod, + fp->rx_sge_prod); + + fp->rx_pkt += rx_pkt; + fp->rx_calls++; + + return rx_pkt; +} + +static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) +{ + struct bnx2x_fastpath *fp = fp_cookie; + struct bnx2x *bp = fp->bp; + u8 cos; + + DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB " + "[fp %d fw_sd %d igusb %d]\n", + fp->index, fp->fw_sb_id, fp->igu_sb_id); + bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return IRQ_HANDLED; +#endif + + /* Handle Rx and Tx according to MSI-X vector */ + prefetch(fp->rx_cons_sb); + + for_each_cos_in_tx_queue(fp, cos) + prefetch(fp->txdata[cos].tx_cons_sb); + + prefetch(&fp->sb_running_index[SM_RX_ID]); + napi_schedule(&bnx2x_fp(bp, fp->index, napi)); + + return IRQ_HANDLED; +} + +/* HW Lock for shared dual port PHYs */ +void bnx2x_acquire_phy_lock(struct bnx2x *bp) +{ + mutex_lock(&bp->port.phy_mutex); + + if (bp->port.need_hw_lock) + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); +} + +void bnx2x_release_phy_lock(struct bnx2x *bp) +{ + if (bp->port.need_hw_lock) + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); + + mutex_unlock(&bp->port.phy_mutex); +} + +/* calculates MF speed according to current linespeed and MF configuration */ +u16 bnx2x_get_mf_speed(struct bnx2x *bp) +{ + u16 line_speed = bp->link_vars.line_speed; + if (IS_MF(bp)) { + u16 maxCfg = bnx2x_extract_max_cfg(bp, + bp->mf_config[BP_VN(bp)]); + + /* Calculate the current MAX line speed limit for the MF + * devices + */ + if (IS_MF_SI(bp)) + line_speed = (line_speed * maxCfg) / 100; + else { /* SD mode */ + u16 vn_max_rate = maxCfg * 100; + + if (vn_max_rate < line_speed) + line_speed = vn_max_rate; + } + } + + return line_speed; +} + +/** + * bnx2x_fill_report_data - fill link report data to report + * + * @bp: driver handle + * @data: link state to update + * + * It uses a none-atomic bit operations because is called under the mutex. + */ +static inline void bnx2x_fill_report_data(struct bnx2x *bp, + struct bnx2x_link_report_data *data) +{ + u16 line_speed = bnx2x_get_mf_speed(bp); + + memset(data, 0, sizeof(*data)); + + /* Fill the report data: efective line speed */ + data->line_speed = line_speed; + + /* Link is down */ + if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS)) + __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, + &data->link_report_flags); + + /* Full DUPLEX */ + if (bp->link_vars.duplex == DUPLEX_FULL) + __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags); + + /* Rx Flow Control is ON */ + if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) + __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags); + + /* Tx Flow Control is ON */ + if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) + __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags); +} + +/** + * bnx2x_link_report - report link status to OS. + * + * @bp: driver handle + * + * Calls the __bnx2x_link_report() under the same locking scheme + * as a link/PHY state managing code to ensure a consistent link + * reporting. + */ + +void bnx2x_link_report(struct bnx2x *bp) +{ + bnx2x_acquire_phy_lock(bp); + __bnx2x_link_report(bp); + bnx2x_release_phy_lock(bp); +} + +/** + * __bnx2x_link_report - report link status to OS. + * + * @bp: driver handle + * + * None atomic inmlementation. + * Should be called under the phy_lock. + */ +void __bnx2x_link_report(struct bnx2x *bp) +{ + struct bnx2x_link_report_data cur_data; + + /* reread mf_cfg */ + if (!CHIP_IS_E1(bp)) + bnx2x_read_mf_cfg(bp); + + /* Read the current link report info */ + bnx2x_fill_report_data(bp, &cur_data); + + /* Don't report link down or exactly the same link status twice */ + if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) || + (test_bit(BNX2X_LINK_REPORT_LINK_DOWN, + &bp->last_reported_link.link_report_flags) && + test_bit(BNX2X_LINK_REPORT_LINK_DOWN, + &cur_data.link_report_flags))) + return; + + bp->link_cnt++; + + /* We are going to report a new link parameters now - + * remember the current data for the next time. + */ + memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data)); + + if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN, + &cur_data.link_report_flags)) { + netif_carrier_off(bp->dev); + netdev_err(bp->dev, "NIC Link is Down\n"); + return; + } else { + const char *duplex; + const char *flow; + + netif_carrier_on(bp->dev); + + if (test_and_clear_bit(BNX2X_LINK_REPORT_FD, + &cur_data.link_report_flags)) + duplex = "full"; + else + duplex = "half"; + + /* Handle the FC at the end so that only these flags would be + * possibly set. This way we may easily check if there is no FC + * enabled. + */ + if (cur_data.link_report_flags) { + if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON, + &cur_data.link_report_flags)) { + if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON, + &cur_data.link_report_flags)) + flow = "ON - receive & transmit"; + else + flow = "ON - receive"; + } else { + flow = "ON - transmit"; + } + } else { + flow = "none"; + } + netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", + cur_data.line_speed, duplex, flow); + } +} + +void bnx2x_init_rx_rings(struct bnx2x *bp) +{ + int func = BP_FUNC(bp); + int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : + ETH_MAX_AGGREGATION_QUEUES_E1H_E2; + u16 ring_prod; + int i, j; + + /* Allocate TPA resources */ + for_each_rx_queue(bp, j) { + struct bnx2x_fastpath *fp = &bp->fp[j]; + + DP(NETIF_MSG_IFUP, + "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); + + if (!fp->disable_tpa) { + /* Fill the per-aggregtion pool */ + for (i = 0; i < max_agg_queues; i++) { + struct bnx2x_agg_info *tpa_info = + &fp->tpa_info[i]; + struct sw_rx_bd *first_buf = + &tpa_info->first_buf; + + first_buf->skb = netdev_alloc_skb(bp->dev, + fp->rx_buf_size); + if (!first_buf->skb) { + BNX2X_ERR("Failed to allocate TPA " + "skb pool for queue[%d] - " + "disabling TPA on this " + "queue!\n", j); + bnx2x_free_tpa_pool(bp, fp, i); + fp->disable_tpa = 1; + break; + } + dma_unmap_addr_set(first_buf, mapping, 0); + tpa_info->tpa_state = BNX2X_TPA_STOP; + } + + /* "next page" elements initialization */ + bnx2x_set_next_page_sgl(fp); + + /* set SGEs bit mask */ + bnx2x_init_sge_ring_bit_mask(fp); + + /* Allocate SGEs and initialize the ring elements */ + for (i = 0, ring_prod = 0; + i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) { + + if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) { + BNX2X_ERR("was only able to allocate " + "%d rx sges\n", i); + BNX2X_ERR("disabling TPA for " + "queue[%d]\n", j); + /* Cleanup already allocated elements */ + bnx2x_free_rx_sge_range(bp, fp, + ring_prod); + bnx2x_free_tpa_pool(bp, fp, + max_agg_queues); + fp->disable_tpa = 1; + ring_prod = 0; + break; + } + ring_prod = NEXT_SGE_IDX(ring_prod); + } + + fp->rx_sge_prod = ring_prod; + } + } + + for_each_rx_queue(bp, j) { + struct bnx2x_fastpath *fp = &bp->fp[j]; + + fp->rx_bd_cons = 0; + + /* Activate BD ring */ + /* Warning! + * this will generate an interrupt (to the TSTORM) + * must only be done after chip is initialized + */ + bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, + fp->rx_sge_prod); + + if (j != 0) + continue; + + if (CHIP_IS_E1(bp)) { + REG_WR(bp, BAR_USTRORM_INTMEM + + USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func), + U64_LO(fp->rx_comp_mapping)); + REG_WR(bp, BAR_USTRORM_INTMEM + + USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4, + U64_HI(fp->rx_comp_mapping)); + } + } +} + +static void bnx2x_free_tx_skbs(struct bnx2x *bp) +{ + int i; + u8 cos; + + for_each_tx_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + for_each_cos_in_tx_queue(fp, cos) { + struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; + + u16 bd_cons = txdata->tx_bd_cons; + u16 sw_prod = txdata->tx_pkt_prod; + u16 sw_cons = txdata->tx_pkt_cons; + + while (sw_cons != sw_prod) { + bd_cons = bnx2x_free_tx_pkt(bp, txdata, + TX_BD(sw_cons)); + sw_cons++; + } + } + } +} + +static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp) +{ + struct bnx2x *bp = fp->bp; + int i; + + /* ring wasn't allocated */ + if (fp->rx_buf_ring == NULL) + return; + + for (i = 0; i < NUM_RX_BD; i++) { + struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i]; + struct sk_buff *skb = rx_buf->skb; + + if (skb == NULL) + continue; + dma_unmap_single(&bp->pdev->dev, + dma_unmap_addr(rx_buf, mapping), + fp->rx_buf_size, DMA_FROM_DEVICE); + + rx_buf->skb = NULL; + dev_kfree_skb(skb); + } +} + +static void bnx2x_free_rx_skbs(struct bnx2x *bp) +{ + int j; + + for_each_rx_queue(bp, j) { + struct bnx2x_fastpath *fp = &bp->fp[j]; + + bnx2x_free_rx_bds(fp); + + if (!fp->disable_tpa) + bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? + ETH_MAX_AGGREGATION_QUEUES_E1 : + ETH_MAX_AGGREGATION_QUEUES_E1H_E2); + } +} + +void bnx2x_free_skbs(struct bnx2x *bp) +{ + bnx2x_free_tx_skbs(bp); + bnx2x_free_rx_skbs(bp); +} + +void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value) +{ + /* load old values */ + u32 mf_cfg = bp->mf_config[BP_VN(bp)]; + + if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) { + /* leave all but MAX value */ + mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK; + + /* set new MAX value */ + mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT) + & FUNC_MF_CFG_MAX_BW_MASK; + + bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg); + } +} + +/** + * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors + * + * @bp: driver handle + * @nvecs: number of vectors to be released + */ +static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs) +{ + int i, offset = 0; + + if (nvecs == offset) + return; + free_irq(bp->msix_table[offset].vector, bp->dev); + DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", + bp->msix_table[offset].vector); + offset++; +#ifdef BCM_CNIC + if (nvecs == offset) + return; + offset++; +#endif + + for_each_eth_queue(bp, i) { + if (nvecs == offset) + return; + DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d " + "irq\n", i, bp->msix_table[offset].vector); + + free_irq(bp->msix_table[offset++].vector, &bp->fp[i]); + } +} + +void bnx2x_free_irq(struct bnx2x *bp) +{ + if (bp->flags & USING_MSIX_FLAG) + bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) + + CNIC_PRESENT + 1); + else if (bp->flags & USING_MSI_FLAG) + free_irq(bp->pdev->irq, bp->dev); + else + free_irq(bp->pdev->irq, bp->dev); +} + +int bnx2x_enable_msix(struct bnx2x *bp) +{ + int msix_vec = 0, i, rc, req_cnt; + + bp->msix_table[msix_vec].entry = msix_vec; + DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", + bp->msix_table[0].entry); + msix_vec++; + +#ifdef BCM_CNIC + bp->msix_table[msix_vec].entry = msix_vec; + DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n", + bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry); + msix_vec++; +#endif + /* We need separate vectors for ETH queues only (not FCoE) */ + for_each_eth_queue(bp, i) { + bp->msix_table[msix_vec].entry = msix_vec; + DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d " + "(fastpath #%u)\n", msix_vec, msix_vec, i); + msix_vec++; + } + + req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1; + + rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt); + + /* + * reconfigure number of tx/rx queues according to available + * MSI-X vectors + */ + if (rc >= BNX2X_MIN_MSIX_VEC_CNT) { + /* how less vectors we will have? */ + int diff = req_cnt - rc; + + DP(NETIF_MSG_IFUP, + "Trying to use less MSI-X vectors: %d\n", rc); + + rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc); + + if (rc) { + DP(NETIF_MSG_IFUP, + "MSI-X is not attainable rc %d\n", rc); + return rc; + } + /* + * decrease number of queues by number of unallocated entries + */ + bp->num_queues -= diff; + + DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n", + bp->num_queues); + } else if (rc) { + /* fall to INTx if not enough memory */ + if (rc == -ENOMEM) + bp->flags |= DISABLE_MSI_FLAG; + DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc); + return rc; + } + + bp->flags |= USING_MSIX_FLAG; + + return 0; +} + +static int bnx2x_req_msix_irqs(struct bnx2x *bp) +{ + int i, rc, offset = 0; + + rc = request_irq(bp->msix_table[offset++].vector, + bnx2x_msix_sp_int, 0, + bp->dev->name, bp->dev); + if (rc) { + BNX2X_ERR("request sp irq failed\n"); + return -EBUSY; + } + +#ifdef BCM_CNIC + offset++; +#endif + for_each_eth_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", + bp->dev->name, i); + + rc = request_irq(bp->msix_table[offset].vector, + bnx2x_msix_fp_int, 0, fp->name, fp); + if (rc) { + BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i, + bp->msix_table[offset].vector, rc); + bnx2x_free_msix_irqs(bp, offset); + return -EBUSY; + } + + offset++; + } + + i = BNX2X_NUM_ETH_QUEUES(bp); + offset = 1 + CNIC_PRESENT; + netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d" + " ... fp[%d] %d\n", + bp->msix_table[0].vector, + 0, bp->msix_table[offset].vector, + i - 1, bp->msix_table[offset + i - 1].vector); + + return 0; +} + +int bnx2x_enable_msi(struct bnx2x *bp) +{ + int rc; + + rc = pci_enable_msi(bp->pdev); + if (rc) { + DP(NETIF_MSG_IFUP, "MSI is not attainable\n"); + return -1; + } + bp->flags |= USING_MSI_FLAG; + + return 0; +} + +static int bnx2x_req_irq(struct bnx2x *bp) +{ + unsigned long flags; + int rc; + + if (bp->flags & USING_MSI_FLAG) + flags = 0; + else + flags = IRQF_SHARED; + + rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags, + bp->dev->name, bp->dev); + return rc; +} + +static inline int bnx2x_setup_irqs(struct bnx2x *bp) +{ + int rc = 0; + if (bp->flags & USING_MSIX_FLAG) { + rc = bnx2x_req_msix_irqs(bp); + if (rc) + return rc; + } else { + bnx2x_ack_int(bp); + rc = bnx2x_req_irq(bp); + if (rc) { + BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc); + return rc; + } + if (bp->flags & USING_MSI_FLAG) { + bp->dev->irq = bp->pdev->irq; + netdev_info(bp->dev, "using MSI IRQ %d\n", + bp->pdev->irq); + } + } + + return 0; +} + +static inline void bnx2x_napi_enable(struct bnx2x *bp) +{ + int i; + + for_each_rx_queue(bp, i) + napi_enable(&bnx2x_fp(bp, i, napi)); +} + +static inline void bnx2x_napi_disable(struct bnx2x *bp) +{ + int i; + + for_each_rx_queue(bp, i) + napi_disable(&bnx2x_fp(bp, i, napi)); +} + +void bnx2x_netif_start(struct bnx2x *bp) +{ + if (netif_running(bp->dev)) { + bnx2x_napi_enable(bp); + bnx2x_int_enable(bp); + if (bp->state == BNX2X_STATE_OPEN) + netif_tx_wake_all_queues(bp->dev); + } +} + +void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) +{ + bnx2x_int_disable_sync(bp, disable_hw); + bnx2x_napi_disable(bp); +} + +u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) +{ + struct bnx2x *bp = netdev_priv(dev); ++ +#ifdef BCM_CNIC - if (NO_FCOE(bp)) - return skb_tx_hash(dev, skb); - else { ++ if (!NO_FCOE(bp)) { + struct ethhdr *hdr = (struct ethhdr *)skb->data; + u16 ether_type = ntohs(hdr->h_proto); + + /* Skip VLAN tag if present */ + if (ether_type == ETH_P_8021Q) { + struct vlan_ethhdr *vhdr = + (struct vlan_ethhdr *)skb->data; + + ether_type = ntohs(vhdr->h_vlan_encapsulated_proto); + } + + /* If ethertype is FCoE or FIP - use FCoE ring */ + if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP)) + return bnx2x_fcoe_tx(bp, txq_index); + } +#endif - /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring - */ ++ /* select a non-FCoE queue */ + return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp)); +} + +void bnx2x_set_num_queues(struct bnx2x *bp) +{ + switch (bp->multi_mode) { + case ETH_RSS_MODE_DISABLED: + bp->num_queues = 1; + break; + case ETH_RSS_MODE_REGULAR: + bp->num_queues = bnx2x_calc_num_queues(bp); + break; + + default: + bp->num_queues = 1; + break; + } + + /* Add special queues */ + bp->num_queues += NON_ETH_CONTEXT_USE; +} + ++/** ++ * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues ++ * ++ * @bp: Driver handle ++ * ++ * We currently support for at most 16 Tx queues for each CoS thus we will ++ * allocate a multiple of 16 for ETH L2 rings according to the value of the ++ * bp->max_cos. ++ * ++ * If there is an FCoE L2 queue the appropriate Tx queue will have the next ++ * index after all ETH L2 indices. ++ * ++ * If the actual number of Tx queues (for each CoS) is less than 16 then there ++ * will be the holes at the end of each group of 16 ETh L2 indices (0..15, ++ * 16..31,...) with indicies that are not coupled with any real Tx queue. ++ * ++ * The proper configuration of skb->queue_mapping is handled by ++ * bnx2x_select_queue() and __skb_tx_hash(). ++ * ++ * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash() ++ * will return a proper Tx index if TC is enabled (netdev->num_tc > 0). ++ */ +static inline int bnx2x_set_real_num_queues(struct bnx2x *bp) +{ + int rc, tx, rx; + + tx = MAX_TXQS_PER_COS * bp->max_cos; + rx = BNX2X_NUM_ETH_QUEUES(bp); + +/* account for fcoe queue */ +#ifdef BCM_CNIC + if (!NO_FCOE(bp)) { + rx += FCOE_PRESENT; + tx += FCOE_PRESENT; + } +#endif + + rc = netif_set_real_num_tx_queues(bp->dev, tx); + if (rc) { + BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc); + return rc; + } + rc = netif_set_real_num_rx_queues(bp->dev, rx); + if (rc) { + BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc); + return rc; + } + + DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n", + tx, rx); + + return rc; +} + +static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp) +{ + int i; + + for_each_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + + /* Always use a mini-jumbo MTU for the FCoE L2 ring */ + if (IS_FCOE_IDX(i)) + /* + * Although there are no IP frames expected to arrive to + * this ring we still want to add an + * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer + * overrun attack. + */ + fp->rx_buf_size = + BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD + + BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING; + else + fp->rx_buf_size = + bp->dev->mtu + ETH_OVREHEAD + + BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING; + } +} + +static inline int bnx2x_init_rss_pf(struct bnx2x *bp) +{ + int i; + u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; + u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); + + /* + * Prepare the inital contents fo the indirection table if RSS is + * enabled + */ + if (bp->multi_mode != ETH_RSS_MODE_DISABLED) { + for (i = 0; i < sizeof(ind_table); i++) + ind_table[i] = + bp->fp->cl_id + (i % num_eth_queues); + } + + /* + * For 57710 and 57711 SEARCHER configuration (rss_keys) is + * per-port, so if explicit configuration is needed , do it only + * for a PMF. + * + * For 57712 and newer on the other hand it's a per-function + * configuration. + */ + return bnx2x_config_rss_pf(bp, ind_table, + bp->port.pmf || !CHIP_IS_E1x(bp)); +} + +int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash) +{ + struct bnx2x_config_rss_params params = {0}; + int i; + + /* Although RSS is meaningless when there is a single HW queue we + * still need it enabled in order to have HW Rx hash generated. + * + * if (!is_eth_multi(bp)) + * bp->multi_mode = ETH_RSS_MODE_DISABLED; + */ + + params.rss_obj = &bp->rss_conf_obj; + + __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); + + /* RSS mode */ + switch (bp->multi_mode) { + case ETH_RSS_MODE_DISABLED: + __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags); + break; + case ETH_RSS_MODE_REGULAR: + __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags); + break; + case ETH_RSS_MODE_VLAN_PRI: + __set_bit(BNX2X_RSS_MODE_VLAN_PRI, ¶ms.rss_flags); + break; + case ETH_RSS_MODE_E1HOV_PRI: + __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, ¶ms.rss_flags); + break; + case ETH_RSS_MODE_IP_DSCP: + __set_bit(BNX2X_RSS_MODE_IP_DSCP, ¶ms.rss_flags); + break; + default: + BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode); + return -EINVAL; + } + + /* If RSS is enabled */ + if (bp->multi_mode != ETH_RSS_MODE_DISABLED) { + /* RSS configuration */ + __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags); + __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags); + __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags); + __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags); + + /* Hash bits */ + params.rss_result_mask = MULTI_MASK; + + memcpy(params.ind_table, ind_table, sizeof(params.ind_table)); + + if (config_hash) { + /* RSS keys */ + for (i = 0; i < sizeof(params.rss_key) / 4; i++) + params.rss_key[i] = random32(); + + __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags); + } + } + + return bnx2x_config_rss(bp, ¶ms); +} + +static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) +{ + struct bnx2x_func_state_params func_params = {0}; + + /* Prepare parameters for function state transitions */ + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_HW_INIT; + + func_params.params.hw_init.load_phase = load_code; + + return bnx2x_func_state_change(bp, &func_params); +} + +/* + * Cleans the object that have internal lists without sending + * ramrods. Should be run when interrutps are disabled. + */ +static void bnx2x_squeeze_objects(struct bnx2x *bp) +{ + int rc; + unsigned long ramrod_flags = 0, vlan_mac_flags = 0; + struct bnx2x_mcast_ramrod_params rparam = {0}; + struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj; + + /***************** Cleanup MACs' object first *************************/ + + /* Wait for completion of requested */ + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + /* Perform a dry cleanup */ + __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags); + + /* Clean ETH primary MAC */ + __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags); + rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags, + &ramrod_flags); + if (rc != 0) + BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc); + + /* Cleanup UC list */ + vlan_mac_flags = 0; + __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags); + rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, + &ramrod_flags); + if (rc != 0) + BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc); + + /***************** Now clean mcast object *****************************/ + rparam.mcast_obj = &bp->mcast_obj; + __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); + + /* Add a DEL command... */ + rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); + if (rc < 0) + BNX2X_ERR("Failed to add a new DEL command to a multi-cast " + "object: %d\n", rc); + + /* ...and wait until all pending commands are cleared */ + rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); + while (rc != 0) { + if (rc < 0) { + BNX2X_ERR("Failed to clean multi-cast object: %d\n", + rc); + return; + } + + rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); + } +} + +#ifndef BNX2X_STOP_ON_ERROR +#define LOAD_ERROR_EXIT(bp, label) \ + do { \ + (bp)->state = BNX2X_STATE_ERROR; \ + goto label; \ + } while (0) +#else +#define LOAD_ERROR_EXIT(bp, label) \ + do { \ + (bp)->state = BNX2X_STATE_ERROR; \ + (bp)->panic = 1; \ + return -EBUSY; \ + } while (0) +#endif + +/* must be called with rtnl_lock */ +int bnx2x_nic_load(struct bnx2x *bp, int load_mode) +{ + int port = BP_PORT(bp); + u32 load_code; + int i, rc; + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return -EPERM; +#endif + + bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; + + /* Set the initial link reported state to link down */ + bnx2x_acquire_phy_lock(bp); + memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link)); + __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, + &bp->last_reported_link.link_report_flags); + bnx2x_release_phy_lock(bp); + + /* must be called before memory allocation and HW init */ + bnx2x_ilt_set_info(bp); + + /* + * Zero fastpath structures preserving invariants like napi, which are + * allocated only once, fp index, max_cos, bp pointer. + * Also set fp->disable_tpa. + */ + for_each_queue(bp, i) + bnx2x_bz_fp(bp, i); + + + /* Set the receive queues buffer size */ + bnx2x_set_rx_buf_size(bp); + + if (bnx2x_alloc_mem(bp)) + return -ENOMEM; + + /* As long as bnx2x_alloc_mem() may possibly update + * bp->num_queues, bnx2x_set_real_num_queues() should always + * come after it. + */ + rc = bnx2x_set_real_num_queues(bp); + if (rc) { + BNX2X_ERR("Unable to set real_num_queues\n"); + LOAD_ERROR_EXIT(bp, load_error0); + } + + /* configure multi cos mappings in kernel. + * this configuration may be overriden by a multi class queue discipline + * or by a dcbx negotiation result. + */ + bnx2x_setup_tc(bp->dev, bp->max_cos); + + bnx2x_napi_enable(bp); + + /* Send LOAD_REQUEST command to MCP + * Returns the type of LOAD command: + * if it is the first port to be initialized + * common blocks should be initialized, otherwise - not + */ + if (!BP_NOMCP(bp)) { + load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0); + if (!load_code) { + BNX2X_ERR("MCP response failure, aborting\n"); + rc = -EBUSY; + LOAD_ERROR_EXIT(bp, load_error1); + } + if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) { + rc = -EBUSY; /* other port in diagnostic mode */ + LOAD_ERROR_EXIT(bp, load_error1); + } + + } else { + int path = BP_PATH(bp); + + DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n", + path, load_count[path][0], load_count[path][1], + load_count[path][2]); + load_count[path][0]++; + load_count[path][1 + port]++; + DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n", + path, load_count[path][0], load_count[path][1], + load_count[path][2]); + if (load_count[path][0] == 1) + load_code = FW_MSG_CODE_DRV_LOAD_COMMON; + else if (load_count[path][1 + port] == 1) + load_code = FW_MSG_CODE_DRV_LOAD_PORT; + else + load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION; + } + + if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || + (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || + (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { + bp->port.pmf = 1; + /* + * We need the barrier to ensure the ordering between the + * writing to bp->port.pmf here and reading it from the + * bnx2x_periodic_task(). + */ + smp_mb(); + queue_delayed_work(bnx2x_wq, &bp->period_task, 0); + } else + bp->port.pmf = 0; + + DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); + + /* Init Function state controlling object */ + bnx2x__init_func_obj(bp); + + /* Initialize HW */ + rc = bnx2x_init_hw(bp, load_code); + if (rc) { + BNX2X_ERR("HW init failed, aborting\n"); + bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); + LOAD_ERROR_EXIT(bp, load_error2); + } + + /* Connect to IRQs */ + rc = bnx2x_setup_irqs(bp); + if (rc) { + bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); + LOAD_ERROR_EXIT(bp, load_error2); + } + + /* Setup NIC internals and enable interrupts */ + bnx2x_nic_init(bp, load_code); + + /* Init per-function objects */ + bnx2x_init_bp_objs(bp); + + if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || + (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) && + (bp->common.shmem2_base)) { + if (SHMEM2_HAS(bp, dcc_support)) + SHMEM2_WR(bp, dcc_support, + (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | + SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV)); + } + + bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; + rc = bnx2x_func_start(bp); + if (rc) { + BNX2X_ERR("Function start failed!\n"); + bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); + LOAD_ERROR_EXIT(bp, load_error3); + } + + /* Send LOAD_DONE command to MCP */ + if (!BP_NOMCP(bp)) { + load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); + if (!load_code) { + BNX2X_ERR("MCP response failure, aborting\n"); + rc = -EBUSY; + LOAD_ERROR_EXIT(bp, load_error3); + } + } + + rc = bnx2x_setup_leading(bp); + if (rc) { + BNX2X_ERR("Setup leading failed!\n"); + LOAD_ERROR_EXIT(bp, load_error3); + } + +#ifdef BCM_CNIC + /* Enable Timer scan */ + REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1); +#endif + + for_each_nondefault_queue(bp, i) { + rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); + if (rc) + LOAD_ERROR_EXIT(bp, load_error4); + } + + rc = bnx2x_init_rss_pf(bp); + if (rc) + LOAD_ERROR_EXIT(bp, load_error4); + + /* Now when Clients are configured we are ready to work */ + bp->state = BNX2X_STATE_OPEN; + + /* Configure a ucast MAC */ + rc = bnx2x_set_eth_mac(bp, true); + if (rc) + LOAD_ERROR_EXIT(bp, load_error4); + + if (bp->pending_max) { + bnx2x_update_max_mf_config(bp, bp->pending_max); + bp->pending_max = 0; + } + + if (bp->port.pmf) + bnx2x_initial_phy_init(bp, load_mode); + + /* Start fast path */ + + /* Initialize Rx filter. */ + netif_addr_lock_bh(bp->dev); + bnx2x_set_rx_mode(bp->dev); + netif_addr_unlock_bh(bp->dev); + + /* Start the Tx */ + switch (load_mode) { + case LOAD_NORMAL: + /* Tx queue should be only reenabled */ + netif_tx_wake_all_queues(bp->dev); + break; + + case LOAD_OPEN: + netif_tx_start_all_queues(bp->dev); + smp_mb__after_clear_bit(); + break; + + case LOAD_DIAG: + bp->state = BNX2X_STATE_DIAG; + break; + + default: + break; + } + + if (!bp->port.pmf) + bnx2x__link_status_update(bp); + + /* start the timer */ + mod_timer(&bp->timer, jiffies + bp->current_interval); + +#ifdef BCM_CNIC + bnx2x_setup_cnic_irq_info(bp); + if (bp->state == BNX2X_STATE_OPEN) + bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); +#endif + bnx2x_inc_load_cnt(bp); + + /* Wait for all pending SP commands to complete */ + if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) { + BNX2X_ERR("Timeout waiting for SP elements to complete\n"); + bnx2x_nic_unload(bp, UNLOAD_CLOSE); + return -EBUSY; + } + + bnx2x_dcbx_init(bp); + return 0; + +#ifndef BNX2X_STOP_ON_ERROR +load_error4: +#ifdef BCM_CNIC + /* Disable Timer scan */ + REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); +#endif +load_error3: + bnx2x_int_disable_sync(bp, 1); + + /* Clean queueable objects */ + bnx2x_squeeze_objects(bp); + + /* Free SKBs, SGEs, TPA pool and driver internals */ + bnx2x_free_skbs(bp); + for_each_rx_queue(bp, i) + bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); + + /* Release IRQs */ + bnx2x_free_irq(bp); +load_error2: + if (!BP_NOMCP(bp)) { + bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); + bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); + } + + bp->port.pmf = 0; +load_error1: + bnx2x_napi_disable(bp); +load_error0: + bnx2x_free_mem(bp); + + return rc; +#endif /* ! BNX2X_STOP_ON_ERROR */ +} + +/* must be called with rtnl_lock */ +int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) +{ + int i; + bool global = false; + + if ((bp->state == BNX2X_STATE_CLOSED) || + (bp->state == BNX2X_STATE_ERROR)) { + /* We can get here if the driver has been unloaded + * during parity error recovery and is either waiting for a + * leader to complete or for other functions to unload and + * then ifdown has been issued. In this case we want to + * unload and let other functions to complete a recovery + * process. + */ + bp->recovery_state = BNX2X_RECOVERY_DONE; + bp->is_leader = 0; + bnx2x_release_leader_lock(bp); + smp_mb(); + + DP(NETIF_MSG_HW, "Releasing a leadership...\n"); + + return -EINVAL; + } + + /* + * It's important to set the bp->state to the value different from + * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int() + * may restart the Tx from the NAPI context (see bnx2x_tx_int()). + */ + bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; + smp_mb(); + + /* Stop Tx */ + bnx2x_tx_disable(bp); + +#ifdef BCM_CNIC + bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); +#endif + + bp->rx_mode = BNX2X_RX_MODE_NONE; + + del_timer_sync(&bp->timer); + + /* Set ALWAYS_ALIVE bit in shmem */ + bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; + + bnx2x_drv_pulse(bp); + + bnx2x_stats_handle(bp, STATS_EVENT_STOP); + + /* Cleanup the chip if needed */ + if (unload_mode != UNLOAD_RECOVERY) + bnx2x_chip_cleanup(bp, unload_mode); + else { + /* Send the UNLOAD_REQUEST to the MCP */ + bnx2x_send_unload_req(bp, unload_mode); + + /* + * Prevent transactions to host from the functions on the + * engine that doesn't reset global blocks in case of global + * attention once gloabl blocks are reset and gates are opened + * (the engine which leader will perform the recovery + * last). + */ + if (!CHIP_IS_E1x(bp)) + bnx2x_pf_disable(bp); + + /* Disable HW interrupts, NAPI */ + bnx2x_netif_stop(bp, 1); + + /* Release IRQs */ + bnx2x_free_irq(bp); + + /* Report UNLOAD_DONE to MCP */ + bnx2x_send_unload_done(bp); + } + + /* + * At this stage no more interrupts will arrive so we may safly clean + * the queueable objects here in case they failed to get cleaned so far. + */ + bnx2x_squeeze_objects(bp); + + /* There should be no more pending SP commands at this stage */ + bp->sp_state = 0; + + bp->port.pmf = 0; + + /* Free SKBs, SGEs, TPA pool and driver internals */ + bnx2x_free_skbs(bp); + for_each_rx_queue(bp, i) + bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); + + bnx2x_free_mem(bp); + + bp->state = BNX2X_STATE_CLOSED; + + /* Check if there are pending parity attentions. If there are - set + * RECOVERY_IN_PROGRESS. + */ + if (bnx2x_chk_parity_attn(bp, &global, false)) { + bnx2x_set_reset_in_progress(bp); + + /* Set RESET_IS_GLOBAL if needed */ + if (global) + bnx2x_set_reset_global(bp); + } + + + /* The last driver must disable a "close the gate" if there is no + * parity attention or "process kill" pending. + */ + if (!bnx2x_dec_load_cnt(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp))) + bnx2x_disable_close_the_gate(bp); + + return 0; +} + +int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) +{ + u16 pmcsr; + + /* If there is no power capability, silently succeed */ + if (!bp->pm_cap) { + DP(NETIF_MSG_HW, "No power capability. Breaking.\n"); + return 0; + } + + pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr); + + switch (state) { + case PCI_D0: + pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, + ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) | + PCI_PM_CTRL_PME_STATUS)); + + if (pmcsr & PCI_PM_CTRL_STATE_MASK) + /* delay required during transition out of D3hot */ + msleep(20); + break; + + case PCI_D3hot: + /* If there are other clients above don't + shut down the power */ + if (atomic_read(&bp->pdev->enable_cnt) != 1) + return 0; + /* Don't shut down the power for emulation and FPGA */ + if (CHIP_REV_IS_SLOW(bp)) + return 0; + + pmcsr &= ~PCI_PM_CTRL_STATE_MASK; + pmcsr |= 3; + + if (bp->wol) + pmcsr |= PCI_PM_CTRL_PME_ENABLE; + + pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, + pmcsr); + + /* No more memory access after this point until + * device is brought back to D0. + */ + break; + + default: + return -EINVAL; + } + return 0; +} + +/* + * net_device service functions + */ +int bnx2x_poll(struct napi_struct *napi, int budget) +{ + int work_done = 0; + u8 cos; + struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, + napi); + struct bnx2x *bp = fp->bp; + + while (1) { +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) { + napi_complete(napi); + return 0; + } +#endif + + for_each_cos_in_tx_queue(fp, cos) + if (bnx2x_tx_queue_has_work(&fp->txdata[cos])) + bnx2x_tx_int(bp, &fp->txdata[cos]); + + + if (bnx2x_has_rx_work(fp)) { + work_done += bnx2x_rx_int(fp, budget - work_done); + + /* must not complete if we consumed full budget */ + if (work_done >= budget) + break; + } + + /* Fall out from the NAPI loop if needed */ + if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { +#ifdef BCM_CNIC + /* No need to update SB for FCoE L2 ring as long as + * it's connected to the default SB and the SB + * has been updated when NAPI was scheduled. + */ + if (IS_FCOE_FP(fp)) { + napi_complete(napi); + break; + } +#endif + + bnx2x_update_fpsb_idx(fp); + /* bnx2x_has_rx_work() reads the status block, + * thus we need to ensure that status block indices + * have been actually read (bnx2x_update_fpsb_idx) + * prior to this check (bnx2x_has_rx_work) so that + * we won't write the "newer" value of the status block + * to IGU (if there was a DMA right after + * bnx2x_has_rx_work and if there is no rmb, the memory + * reading (bnx2x_update_fpsb_idx) may be postponed + * to right before bnx2x_ack_sb). In this case there + * will never be another interrupt until there is + * another update of the status block, while there + * is still unhandled work. + */ + rmb(); + + if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { + napi_complete(napi); + /* Re-enable interrupts */ + DP(NETIF_MSG_HW, + "Update index to %d\n", fp->fp_hc_idx); + bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, + le16_to_cpu(fp->fp_hc_idx), + IGU_INT_ENABLE, 1); + break; + } + } + } + + return work_done; +} + +/* we split the first BD into headers and data BDs + * to ease the pain of our fellow microcode engineers + * we use one mapping for both BDs + * So far this has only been observed to happen + * in Other Operating Systems(TM) + */ +static noinline u16 bnx2x_tx_split(struct bnx2x *bp, + struct bnx2x_fp_txdata *txdata, + struct sw_tx_bd *tx_buf, + struct eth_tx_start_bd **tx_bd, u16 hlen, + u16 bd_prod, int nbd) +{ + struct eth_tx_start_bd *h_tx_bd = *tx_bd; + struct eth_tx_bd *d_tx_bd; + dma_addr_t mapping; + int old_len = le16_to_cpu(h_tx_bd->nbytes); + + /* first fix first BD */ + h_tx_bd->nbd = cpu_to_le16(nbd); + h_tx_bd->nbytes = cpu_to_le16(hlen); + + DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d " + "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi, + h_tx_bd->addr_lo, h_tx_bd->nbd); + + /* now get a new data BD + * (after the pbd) and fill it */ + bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); + d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; + + mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi), + le32_to_cpu(h_tx_bd->addr_lo)) + hlen; + + d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); + d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); + d_tx_bd->nbytes = cpu_to_le16(old_len - hlen); + + /* this marks the BD as one that has no individual mapping */ + tx_buf->flags |= BNX2X_TSO_SPLIT_BD; + + DP(NETIF_MSG_TX_QUEUED, + "TSO split data size is %d (%x:%x)\n", + d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo); + + /* update tx_bd */ + *tx_bd = (struct eth_tx_start_bd *)d_tx_bd; + + return bd_prod; +} + +static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix) +{ + if (fix > 0) + csum = (u16) ~csum_fold(csum_sub(csum, + csum_partial(t_header - fix, fix, 0))); + + else if (fix < 0) + csum = (u16) ~csum_fold(csum_add(csum, + csum_partial(t_header, -fix, 0))); + + return swab16(csum); +} + +static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) +{ + u32 rc; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + rc = XMIT_PLAIN; + + else { + if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) { + rc = XMIT_CSUM_V6; + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + rc |= XMIT_CSUM_TCP; + + } else { + rc = XMIT_CSUM_V4; + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + rc |= XMIT_CSUM_TCP; + } + } + + if (skb_is_gso_v6(skb)) + rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6; + else if (skb_is_gso(skb)) + rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP; + + return rc; +} + +#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) +/* check if packet requires linearization (packet is too fragmented) + no need to check fragmentation if page size > 8K (there will be no + violation to FW restrictions) */ +static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, + u32 xmit_type) +{ + int to_copy = 0; + int hlen = 0; + int first_bd_sz = 0; + + /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */ + if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) { + + if (xmit_type & XMIT_GSO) { + unsigned short lso_mss = skb_shinfo(skb)->gso_size; + /* Check if LSO packet needs to be copied: + 3 = 1 (for headers BD) + 2 (for PBD and last BD) */ + int wnd_size = MAX_FETCH_BD - 3; + /* Number of windows to check */ + int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size; + int wnd_idx = 0; + int frag_idx = 0; + u32 wnd_sum = 0; + + /* Headers length */ + hlen = (int)(skb_transport_header(skb) - skb->data) + + tcp_hdrlen(skb); + + /* Amount of data (w/o headers) on linear part of SKB*/ + first_bd_sz = skb_headlen(skb) - hlen; + + wnd_sum = first_bd_sz; + + /* Calculate the first sum - it's special */ + for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++) + wnd_sum += + skb_shinfo(skb)->frags[frag_idx].size; + + /* If there was data on linear skb data - check it */ + if (first_bd_sz > 0) { + if (unlikely(wnd_sum < lso_mss)) { + to_copy = 1; + goto exit_lbl; + } + + wnd_sum -= first_bd_sz; + } + + /* Others are easier: run through the frag list and + check all windows */ + for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) { + wnd_sum += + skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size; + + if (unlikely(wnd_sum < lso_mss)) { + to_copy = 1; + break; + } + wnd_sum -= + skb_shinfo(skb)->frags[wnd_idx].size; + } + } else { + /* in non-LSO too fragmented packet should always + be linearized */ + to_copy = 1; + } + } + +exit_lbl: + if (unlikely(to_copy)) + DP(NETIF_MSG_TX_QUEUED, + "Linearization IS REQUIRED for %s packet. " + "num_frags %d hlen %d first_bd_sz %d\n", + (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO", + skb_shinfo(skb)->nr_frags, hlen, first_bd_sz); + + return to_copy; +} +#endif + +static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data, + u32 xmit_type) +{ + *parsing_data |= (skb_shinfo(skb)->gso_size << + ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & + ETH_TX_PARSE_BD_E2_LSO_MSS; + if ((xmit_type & XMIT_GSO_V6) && + (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) + *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; +} + +/** + * bnx2x_set_pbd_gso - update PBD in GSO case. + * + * @skb: packet skb + * @pbd: parse BD + * @xmit_type: xmit flags + */ +static inline void bnx2x_set_pbd_gso(struct sk_buff *skb, + struct eth_tx_parse_bd_e1x *pbd, + u32 xmit_type) +{ + pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); + pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq); + pbd->tcp_flags = pbd_tcp_flags(skb); + + if (xmit_type & XMIT_GSO_V4) { + pbd->ip_id = swab16(ip_hdr(skb)->id); + pbd->tcp_pseudo_csum = + swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0)); + + } else + pbd->tcp_pseudo_csum = + swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0)); + + pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN; +} + +/** + * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length + * + * @bp: driver handle + * @skb: packet skb + * @parsing_data: data to be updated + * @xmit_type: xmit flags + * + * 57712 related + */ +static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, + u32 *parsing_data, u32 xmit_type) +{ + *parsing_data |= + ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) << + ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) & + ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W; + + if (xmit_type & XMIT_CSUM_TCP) { + *parsing_data |= ((tcp_hdrlen(skb) / 4) << + ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & + ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW; + + return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data; + } else + /* We support checksum offload for TCP and UDP only. + * No need to pass the UDP header length - it's a constant. + */ + return skb_transport_header(skb) + + sizeof(struct udphdr) - skb->data; +} + +static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, + struct eth_tx_start_bd *tx_start_bd, u32 xmit_type) +{ + tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; + + if (xmit_type & XMIT_CSUM_V4) + tx_start_bd->bd_flags.as_bitfield |= + ETH_TX_BD_FLAGS_IP_CSUM; + else + tx_start_bd->bd_flags.as_bitfield |= + ETH_TX_BD_FLAGS_IPV6; + + if (!(xmit_type & XMIT_CSUM_TCP)) + tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP; +} + +/** + * bnx2x_set_pbd_csum - update PBD with checksum and return header length + * + * @bp: driver handle + * @skb: packet skb + * @pbd: parse BD to be updated + * @xmit_type: xmit flags + */ +static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb, + struct eth_tx_parse_bd_e1x *pbd, + u32 xmit_type) +{ + u8 hlen = (skb_network_header(skb) - skb->data) >> 1; + + /* for now NS flag is not used in Linux */ + pbd->global_data = + (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << + ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT)); + + pbd->ip_hlen_w = (skb_transport_header(skb) - + skb_network_header(skb)) >> 1; + + hlen += pbd->ip_hlen_w; + + /* We support checksum offload for TCP and UDP only */ + if (xmit_type & XMIT_CSUM_TCP) + hlen += tcp_hdrlen(skb) / 2; + else + hlen += sizeof(struct udphdr) / 2; + + pbd->total_hlen_w = cpu_to_le16(hlen); + hlen = hlen*2; + + if (xmit_type & XMIT_CSUM_TCP) { + pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check); + + } else { + s8 fix = SKB_CS_OFF(skb); /* signed! */ + + DP(NETIF_MSG_TX_QUEUED, + "hlen %d fix %d csum before fix %x\n", + le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb)); + + /* HW bug: fixup the CSUM */ + pbd->tcp_pseudo_csum = + bnx2x_csum_fix(skb_transport_header(skb), + SKB_CS(skb), fix); + + DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n", + pbd->tcp_pseudo_csum); + } + + return hlen; +} + +/* called with netif_tx_lock + * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call + * netif_wake_queue() + */ +netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + + struct bnx2x_fastpath *fp; + struct netdev_queue *txq; + struct bnx2x_fp_txdata *txdata; + struct sw_tx_bd *tx_buf; + struct eth_tx_start_bd *tx_start_bd, *first_bd; + struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; + struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; + struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; + u32 pbd_e2_parsing_data = 0; + u16 pkt_prod, bd_prod; + int nbd, txq_index, fp_index, txdata_index; + dma_addr_t mapping; + u32 xmit_type = bnx2x_xmit_type(bp, skb); + int i; + u8 hlen = 0; + __le16 pkt_size = 0; + struct ethhdr *eth; + u8 mac_type = UNICAST_ADDRESS; + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return NETDEV_TX_BUSY; +#endif + + txq_index = skb_get_queue_mapping(skb); + txq = netdev_get_tx_queue(dev, txq_index); + + BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT); + + /* decode the fastpath index and the cos index from the txq */ + fp_index = TXQ_TO_FP(txq_index); + txdata_index = TXQ_TO_COS(txq_index); + +#ifdef BCM_CNIC + /* + * Override the above for the FCoE queue: + * - FCoE fp entry is right after the ETH entries. + * - FCoE L2 queue uses bp->txdata[0] only. + */ + if (unlikely(!NO_FCOE(bp) && (txq_index == + bnx2x_fcoe_tx(bp, txq_index)))) { + fp_index = FCOE_IDX; + txdata_index = 0; + } +#endif + + /* enable this debug print to view the transmission queue being used + DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d\n", + txq_index, fp_index, txdata_index); */ + + /* locate the fastpath and the txdata */ + fp = &bp->fp[fp_index]; + txdata = &fp->txdata[txdata_index]; + + /* enable this debug print to view the tranmission details + DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d" + " tx_data ptr %p fp pointer %p\n", + txdata->cid, fp_index, txdata_index, txdata, fp); */ + + if (unlikely(bnx2x_tx_avail(bp, txdata) < + (skb_shinfo(skb)->nr_frags + 3))) { + fp->eth_q_stats.driver_xoff++; + netif_tx_stop_queue(txq); + BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); + return NETDEV_TX_BUSY; + } + + DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x " + "protocol(%x,%x) gso type %x xmit_type %x\n", + txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, + ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); + + eth = (struct ethhdr *)skb->data; + + /* set flag according to packet type (UNICAST_ADDRESS is default)*/ + if (unlikely(is_multicast_ether_addr(eth->h_dest))) { + if (is_broadcast_ether_addr(eth->h_dest)) + mac_type = BROADCAST_ADDRESS; + else + mac_type = MULTICAST_ADDRESS; + } + +#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) + /* First, check if we need to linearize the skb (due to FW + restrictions). No need to check fragmentation if page size > 8K + (there will be no violation to FW restrictions) */ + if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { + /* Statistics of linearization */ + bp->lin_cnt++; + if (skb_linearize(skb) != 0) { + DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - " + "silently dropping this SKB\n"); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + } +#endif + /* Map skb linear data for DMA */ + mapping = dma_map_single(&bp->pdev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { + DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - " + "silently dropping this SKB\n"); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + /* + Please read carefully. First we use one BD which we mark as start, + then we have a parsing info BD (used for TSO or xsum), + and only then we have the rest of the TSO BDs. + (don't forget to mark the last one as last, + and to unmap only AFTER you write to the BD ...) + And above all, all pdb sizes are in words - NOT DWORDS! + */ + + /* get current pkt produced now - advance it just before sending packet + * since mapping of pages may fail and cause packet to be dropped + */ + pkt_prod = txdata->tx_pkt_prod; + bd_prod = TX_BD(txdata->tx_bd_prod); + + /* get a tx_buf and first BD + * tx_start_bd may be changed during SPLIT, + * but first_bd will always stay first + */ + tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)]; + tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd; + first_bd = tx_start_bd; + + tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; + SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE, + mac_type); + + /* header nbd */ + SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1); + + /* remember the first BD of the packet */ + tx_buf->first_bd = txdata->tx_bd_prod; + tx_buf->skb = skb; + tx_buf->flags = 0; + + DP(NETIF_MSG_TX_QUEUED, + "sending pkt %u @%p next_idx %u bd %u @%p\n", + pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd); + + if (vlan_tx_tag_present(skb)) { + tx_start_bd->vlan_or_ethertype = + cpu_to_le16(vlan_tx_tag_get(skb)); + tx_start_bd->bd_flags.as_bitfield |= + (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); + } else + tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); + + /* turn on parsing and get a BD */ + bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); + + if (xmit_type & XMIT_CSUM) + bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type); + + if (!CHIP_IS_E1x(bp)) { + pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2; + memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); + /* Set PBD in checksum offload case */ + if (xmit_type & XMIT_CSUM) + hlen = bnx2x_set_pbd_csum_e2(bp, skb, + &pbd_e2_parsing_data, + xmit_type); + if (IS_MF_SI(bp)) { + /* + * fill in the MAC addresses in the PBD - for local + * switching + */ + bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi, + &pbd_e2->src_mac_addr_mid, + &pbd_e2->src_mac_addr_lo, + eth->h_source); + bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi, + &pbd_e2->dst_mac_addr_mid, + &pbd_e2->dst_mac_addr_lo, + eth->h_dest); + } + } else { + pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x; + memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); + /* Set PBD in checksum offload case */ + if (xmit_type & XMIT_CSUM) + hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type); + + } + + /* Setup the data pointer of the first BD of the packet */ + tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); + tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); + nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */ + tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); + pkt_size = tx_start_bd->nbytes; + + DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d" + " nbytes %d flags %x vlan %x\n", + tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo, + le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes), + tx_start_bd->bd_flags.as_bitfield, + le16_to_cpu(tx_start_bd->vlan_or_ethertype)); + + if (xmit_type & XMIT_GSO) { + + DP(NETIF_MSG_TX_QUEUED, + "TSO packet len %d hlen %d total len %d tso size %d\n", + skb->len, hlen, skb_headlen(skb), + skb_shinfo(skb)->gso_size); + + tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; + + if (unlikely(skb_headlen(skb) > hlen)) + bd_prod = bnx2x_tx_split(bp, txdata, tx_buf, + &tx_start_bd, hlen, + bd_prod, ++nbd); + if (!CHIP_IS_E1x(bp)) + bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data, + xmit_type); + else + bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type); + } + + /* Set the PBD's parsing_data field if not zero + * (for the chips newer than 57711). + */ + if (pbd_e2_parsing_data) + pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data); + + tx_data_bd = (struct eth_tx_bd *)tx_start_bd; + + /* Handle fragmented skb */ + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + mapping = dma_map_page(&bp->pdev->dev, frag->page, + frag->page_offset, frag->size, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { + + DP(NETIF_MSG_TX_QUEUED, "Unable to map page - " + "dropping packet...\n"); + + /* we need unmap all buffers already mapped + * for this SKB; + * first_bd->nbd need to be properly updated + * before call to bnx2x_free_tx_pkt + */ + first_bd->nbd = cpu_to_le16(nbd); + bnx2x_free_tx_pkt(bp, txdata, + TX_BD(txdata->tx_pkt_prod)); + return NETDEV_TX_OK; + } + + bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); + tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; + if (total_pkt_bd == NULL) + total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; + + tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); + tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); + tx_data_bd->nbytes = cpu_to_le16(frag->size); + le16_add_cpu(&pkt_size, frag->size); + nbd++; + + DP(NETIF_MSG_TX_QUEUED, + "frag %d bd @%p addr (%x:%x) nbytes %d\n", + i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo, + le16_to_cpu(tx_data_bd->nbytes)); + } + + DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd); + + /* update with actual num BDs */ + first_bd->nbd = cpu_to_le16(nbd); + + bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); + + /* now send a tx doorbell, counting the next BD + * if the packet contains or ends with it + */ + if (TX_BD_POFF(bd_prod) < nbd) + nbd++; + + /* total_pkt_bytes should be set on the first data BD if + * it's not an LSO packet and there is more than one + * data BD. In this case pkt_size is limited by an MTU value. + * However we prefer to set it for an LSO packet (while we don't + * have to) in order to save some CPU cycles in a none-LSO + * case, when we much more care about them. + */ + if (total_pkt_bd != NULL) + total_pkt_bd->total_pkt_bytes = pkt_size; + + if (pbd_e1x) + DP(NETIF_MSG_TX_QUEUED, + "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u" + " tcp_flags %x xsum %x seq %u hlen %u\n", + pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w, + pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags, + pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq, + le16_to_cpu(pbd_e1x->total_hlen_w)); + if (pbd_e2) + DP(NETIF_MSG_TX_QUEUED, + "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n", + pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid, + pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi, + pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo, + pbd_e2->parsing_data); + DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); + + txdata->tx_pkt_prod++; + /* + * Make sure that the BD data is updated before updating the producer + * since FW might read the BD right after the producer is updated. + * This is only applicable for weak-ordered memory model archs such + * as IA-64. The following barrier is also mandatory since FW will + * assumes packets must have BDs. + */ + wmb(); + + txdata->tx_db.data.prod += nbd; + barrier(); + + DOORBELL(bp, txdata->cid, txdata->tx_db.raw); + + mmiowb(); + + txdata->tx_bd_prod += nbd; + + if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) { + netif_tx_stop_queue(txq); + + /* paired memory barrier is in bnx2x_tx_int(), we have to keep + * ordering of set_bit() in netif_tx_stop_queue() and read of + * fp->bd_tx_cons */ + smp_mb(); + + fp->eth_q_stats.driver_xoff++; + if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3) + netif_tx_wake_queue(txq); + } + txdata->tx_pkt++; + + return NETDEV_TX_OK; +} + +/** + * bnx2x_setup_tc - routine to configure net_device for multi tc + * + * @netdev: net device to configure + * @tc: number of traffic classes to enable + * + * callback connected to the ndo_setup_tc function pointer + */ +int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) +{ + int cos, prio, count, offset; + struct bnx2x *bp = netdev_priv(dev); + + /* setup tc must be called under rtnl lock */ + ASSERT_RTNL(); + + /* no traffic classes requested. aborting */ + if (!num_tc) { + netdev_reset_tc(dev); + return 0; + } + + /* requested to support too many traffic classes */ + if (num_tc > bp->max_cos) { + DP(NETIF_MSG_TX_ERR, "support for too many traffic classes" + " requested: %d. max supported is %d\n", + num_tc, bp->max_cos); + return -EINVAL; + } + + /* declare amount of supported traffic classes */ + if (netdev_set_num_tc(dev, num_tc)) { + DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes\n", + num_tc); + return -EINVAL; + } + + /* configure priority to traffic class mapping */ + for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) { + netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]); + DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", + prio, bp->prio_to_cos[prio]); + } + + + /* Use this configuration to diffrentiate tc0 from other COSes + This can be used for ets or pfc, and save the effort of setting + up a multio class queue disc or negotiating DCBX with a switch + netdev_set_prio_tc_map(dev, 0, 0); + DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0); + for (prio = 1; prio < 16; prio++) { + netdev_set_prio_tc_map(dev, prio, 1); + DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1); + } */ + + /* configure traffic class to transmission queue mapping */ + for (cos = 0; cos < bp->max_cos; cos++) { + count = BNX2X_NUM_ETH_QUEUES(bp); + offset = cos * MAX_TXQS_PER_COS; + netdev_set_tc_queue(dev, cos, count, offset); + DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d\n", + cos, offset, count); + } + + return 0; +} + +/* called with rtnl_lock */ +int bnx2x_change_mac_addr(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + struct bnx2x *bp = netdev_priv(dev); + int rc = 0; + + if (!is_valid_ether_addr((u8 *)(addr->sa_data))) + return -EINVAL; + + if (netif_running(dev)) { + rc = bnx2x_set_eth_mac(bp, false); + if (rc) + return rc; + } + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + if (netif_running(dev)) + rc = bnx2x_set_eth_mac(bp, true); + + return rc; +} + +static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index) +{ + union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk); + struct bnx2x_fastpath *fp = &bp->fp[fp_index]; + u8 cos; + + /* Common */ +#ifdef BCM_CNIC + if (IS_FCOE_IDX(fp_index)) { + memset(sb, 0, sizeof(union host_hc_status_block)); + fp->status_blk_mapping = 0; + + } else { +#endif + /* status blocks */ + if (!CHIP_IS_E1x(bp)) + BNX2X_PCI_FREE(sb->e2_sb, + bnx2x_fp(bp, fp_index, + status_blk_mapping), + sizeof(struct host_hc_status_block_e2)); + else + BNX2X_PCI_FREE(sb->e1x_sb, + bnx2x_fp(bp, fp_index, + status_blk_mapping), + sizeof(struct host_hc_status_block_e1x)); +#ifdef BCM_CNIC + } +#endif + /* Rx */ + if (!skip_rx_queue(bp, fp_index)) { + bnx2x_free_rx_bds(fp); + + /* fastpath rx rings: rx_buf rx_desc rx_comp */ + BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring)); + BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring), + bnx2x_fp(bp, fp_index, rx_desc_mapping), + sizeof(struct eth_rx_bd) * NUM_RX_BD); + + BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring), + bnx2x_fp(bp, fp_index, rx_comp_mapping), + sizeof(struct eth_fast_path_rx_cqe) * + NUM_RCQ_BD); + + /* SGE ring */ + BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring)); + BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring), + bnx2x_fp(bp, fp_index, rx_sge_mapping), + BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); + } + + /* Tx */ + if (!skip_tx_queue(bp, fp_index)) { + /* fastpath tx rings: tx_buf tx_desc */ + for_each_cos_in_tx_queue(fp, cos) { + struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; + + DP(BNX2X_MSG_SP, + "freeing tx memory of fp %d cos %d cid %d\n", + fp_index, cos, txdata->cid); + + BNX2X_FREE(txdata->tx_buf_ring); + BNX2X_PCI_FREE(txdata->tx_desc_ring, + txdata->tx_desc_mapping, + sizeof(union eth_tx_bd_types) * NUM_TX_BD); + } + } + /* end of fastpath */ +} + +void bnx2x_free_fp_mem(struct bnx2x *bp) +{ + int i; + for_each_queue(bp, i) + bnx2x_free_fp_mem_at(bp, i); +} + +static inline void set_sb_shortcuts(struct bnx2x *bp, int index) +{ + union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk); + if (!CHIP_IS_E1x(bp)) { + bnx2x_fp(bp, index, sb_index_values) = + (__le16 *)status_blk.e2_sb->sb.index_values; + bnx2x_fp(bp, index, sb_running_index) = + (__le16 *)status_blk.e2_sb->sb.running_index; + } else { + bnx2x_fp(bp, index, sb_index_values) = + (__le16 *)status_blk.e1x_sb->sb.index_values; + bnx2x_fp(bp, index, sb_running_index) = + (__le16 *)status_blk.e1x_sb->sb.running_index; + } +} + +static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) +{ + union host_hc_status_block *sb; + struct bnx2x_fastpath *fp = &bp->fp[index]; + int ring_size = 0; + u8 cos; + + /* if rx_ring_size specified - use it */ + int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size : + MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); + + /* allocate at least number of buffers required by FW */ + rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : + MIN_RX_SIZE_TPA, + rx_ring_size); + + /* Common */ + sb = &bnx2x_fp(bp, index, status_blk); +#ifdef BCM_CNIC + if (!IS_FCOE_IDX(index)) { +#endif + /* status blocks */ + if (!CHIP_IS_E1x(bp)) + BNX2X_PCI_ALLOC(sb->e2_sb, + &bnx2x_fp(bp, index, status_blk_mapping), + sizeof(struct host_hc_status_block_e2)); + else + BNX2X_PCI_ALLOC(sb->e1x_sb, + &bnx2x_fp(bp, index, status_blk_mapping), + sizeof(struct host_hc_status_block_e1x)); +#ifdef BCM_CNIC + } +#endif + + /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to + * set shortcuts for it. + */ + if (!IS_FCOE_IDX(index)) + set_sb_shortcuts(bp, index); + + /* Tx */ + if (!skip_tx_queue(bp, index)) { + /* fastpath tx rings: tx_buf tx_desc */ + for_each_cos_in_tx_queue(fp, cos) { + struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; + + DP(BNX2X_MSG_SP, "allocating tx memory of " + "fp %d cos %d\n", + index, cos); + + BNX2X_ALLOC(txdata->tx_buf_ring, + sizeof(struct sw_tx_bd) * NUM_TX_BD); + BNX2X_PCI_ALLOC(txdata->tx_desc_ring, + &txdata->tx_desc_mapping, + sizeof(union eth_tx_bd_types) * NUM_TX_BD); + } + } + + /* Rx */ + if (!skip_rx_queue(bp, index)) { + /* fastpath rx rings: rx_buf rx_desc rx_comp */ + BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring), + sizeof(struct sw_rx_bd) * NUM_RX_BD); + BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring), + &bnx2x_fp(bp, index, rx_desc_mapping), + sizeof(struct eth_rx_bd) * NUM_RX_BD); + + BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring), + &bnx2x_fp(bp, index, rx_comp_mapping), + sizeof(struct eth_fast_path_rx_cqe) * + NUM_RCQ_BD); + + /* SGE ring */ + BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring), + sizeof(struct sw_rx_page) * NUM_RX_SGE); + BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring), + &bnx2x_fp(bp, index, rx_sge_mapping), + BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); + /* RX BD ring */ + bnx2x_set_next_page_rx_bd(fp); + + /* CQ ring */ + bnx2x_set_next_page_rx_cq(fp); + + /* BDs */ + ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size); + if (ring_size < rx_ring_size) + goto alloc_mem_err; + } + + return 0; + +/* handles low memory cases */ +alloc_mem_err: + BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n", + index, ring_size); + /* FW will drop all packets if queue is not big enough, + * In these cases we disable the queue + * Min size is different for OOO, TPA and non-TPA queues + */ + if (ring_size < (fp->disable_tpa ? + MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) { + /* release memory allocated for this queue */ + bnx2x_free_fp_mem_at(bp, index); + return -ENOMEM; + } + return 0; +} + +int bnx2x_alloc_fp_mem(struct bnx2x *bp) +{ + int i; + + /** + * 1. Allocate FP for leading - fatal if error + * 2. {CNIC} Allocate FCoE FP - fatal if error + * 3. {CNIC} Allocate OOO + FWD - disable OOO if error + * 4. Allocate RSS - fix number of queues if error + */ + + /* leading */ + if (bnx2x_alloc_fp_mem_at(bp, 0)) + return -ENOMEM; + +#ifdef BCM_CNIC + if (!NO_FCOE(bp)) + /* FCoE */ + if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX)) + /* we will fail load process instead of mark + * NO_FCOE_FLAG + */ + return -ENOMEM; +#endif + + /* RSS */ + for_each_nondefault_eth_queue(bp, i) + if (bnx2x_alloc_fp_mem_at(bp, i)) + break; + + /* handle memory failures */ + if (i != BNX2X_NUM_ETH_QUEUES(bp)) { + int delta = BNX2X_NUM_ETH_QUEUES(bp) - i; + + WARN_ON(delta < 0); +#ifdef BCM_CNIC + /** + * move non eth FPs next to last eth FP + * must be done in that order + * FCOE_IDX < FWD_IDX < OOO_IDX + */ + + /* move FCoE fp even NO_FCOE_FLAG is on */ + bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta); +#endif + bp->num_queues -= delta; + BNX2X_ERR("Adjusted num of queues from %d to %d\n", + bp->num_queues + delta, bp->num_queues); + } + + return 0; +} + +void bnx2x_free_mem_bp(struct bnx2x *bp) +{ + kfree(bp->fp); + kfree(bp->msix_table); + kfree(bp->ilt); +} + +int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp) +{ + struct bnx2x_fastpath *fp; + struct msix_entry *tbl; + struct bnx2x_ilt *ilt; + int msix_table_size = 0; + + /* + * The biggest MSI-X table we might need is as a maximum number of fast + * path IGU SBs plus default SB (for PF). + */ + msix_table_size = bp->igu_sb_cnt + 1; + + /* fp array: RSS plus CNIC related L2 queues */ + fp = kzalloc((BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE) * + sizeof(*fp), GFP_KERNEL); + if (!fp) + goto alloc_err; + bp->fp = fp; + + /* msix table */ + tbl = kzalloc(msix_table_size * sizeof(*tbl), GFP_KERNEL); + if (!tbl) + goto alloc_err; + bp->msix_table = tbl; + + /* ilt */ + ilt = kzalloc(sizeof(*ilt), GFP_KERNEL); + if (!ilt) + goto alloc_err; + bp->ilt = ilt; + + return 0; +alloc_err: + bnx2x_free_mem_bp(bp); + return -ENOMEM; + +} + +int bnx2x_reload_if_running(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (unlikely(!netif_running(dev))) + return 0; + + bnx2x_nic_unload(bp, UNLOAD_NORMAL); + return bnx2x_nic_load(bp, LOAD_NORMAL); +} + +int bnx2x_get_cur_phy_idx(struct bnx2x *bp) +{ + u32 sel_phy_idx = 0; + if (bp->link_params.num_phys <= 1) + return INT_PHY; + + if (bp->link_vars.link_up) { + sel_phy_idx = EXT_PHY1; + /* In case link is SERDES, check if the EXT_PHY2 is the one */ + if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) && + (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE)) + sel_phy_idx = EXT_PHY2; + } else { + + switch (bnx2x_phy_selection(&bp->link_params)) { + case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: + case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: + case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: + sel_phy_idx = EXT_PHY1; + break; + case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: + case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: + sel_phy_idx = EXT_PHY2; + break; + } + } + + return sel_phy_idx; + +} +int bnx2x_get_link_cfg_idx(struct bnx2x *bp) +{ + u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp); + /* + * The selected actived PHY is always after swapping (in case PHY + * swapping is enabled). So when swapping is enabled, we need to reverse + * the configuration + */ + + if (bp->link_params.multi_phy_config & + PORT_HW_CFG_PHY_SWAPPED_ENABLED) { + if (sel_phy_idx == EXT_PHY1) + sel_phy_idx = EXT_PHY2; + else if (sel_phy_idx == EXT_PHY2) + sel_phy_idx = EXT_PHY1; + } + return LINK_CONFIG_IDX(sel_phy_idx); +} + +#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) +int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) +{ + struct bnx2x *bp = netdev_priv(dev); + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + + switch (type) { + case NETDEV_FCOE_WWNN: + *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi, + cp->fcoe_wwn_node_name_lo); + break; + case NETDEV_FCOE_WWPN: + *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi, + cp->fcoe_wwn_port_name_lo); + break; + default: + return -EINVAL; + } + + return 0; +} +#endif + +/* called with rtnl_lock */ +int bnx2x_change_mtu(struct net_device *dev, int new_mtu) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (bp->recovery_state != BNX2X_RECOVERY_DONE) { + pr_err("Handling parity error recovery. Try again later\n"); + return -EAGAIN; + } + + if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) || + ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) + return -EINVAL; + + /* This does not race with packet allocation + * because the actual alloc size is + * only updated as part of load + */ + dev->mtu = new_mtu; + + return bnx2x_reload_if_running(dev); +} + +u32 bnx2x_fix_features(struct net_device *dev, u32 features) +{ + struct bnx2x *bp = netdev_priv(dev); + + /* TPA requires Rx CSUM offloading */ + if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) + features &= ~NETIF_F_LRO; + + return features; +} + +int bnx2x_set_features(struct net_device *dev, u32 features) +{ + struct bnx2x *bp = netdev_priv(dev); + u32 flags = bp->flags; + bool bnx2x_reload = false; + + if (features & NETIF_F_LRO) + flags |= TPA_ENABLE_FLAG; + else + flags &= ~TPA_ENABLE_FLAG; + + if (features & NETIF_F_LOOPBACK) { + if (bp->link_params.loopback_mode != LOOPBACK_BMAC) { + bp->link_params.loopback_mode = LOOPBACK_BMAC; + bnx2x_reload = true; + } + } else { + if (bp->link_params.loopback_mode != LOOPBACK_NONE) { + bp->link_params.loopback_mode = LOOPBACK_NONE; + bnx2x_reload = true; + } + } + + if (flags ^ bp->flags) { + bp->flags = flags; + bnx2x_reload = true; + } + + if (bnx2x_reload) { + if (bp->recovery_state == BNX2X_RECOVERY_DONE) + return bnx2x_reload_if_running(dev); + /* else: bnx2x_nic_load() will be called at end of recovery */ + } + + return 0; +} + +void bnx2x_tx_timeout(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + +#ifdef BNX2X_STOP_ON_ERROR + if (!bp->panic) + bnx2x_panic(); +#endif + + smp_mb__before_clear_bit(); + set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state); + smp_mb__after_clear_bit(); + + /* This allows the netif to be shutdown gracefully before resetting */ + schedule_delayed_work(&bp->sp_rtnl_task, 0); +} + +int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2x *bp; + + if (!dev) { + dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); + return -ENODEV; + } + bp = netdev_priv(dev); + + rtnl_lock(); + + pci_save_state(pdev); + + if (!netif_running(dev)) { + rtnl_unlock(); + return 0; + } + + netif_device_detach(dev); + + bnx2x_nic_unload(bp, UNLOAD_CLOSE); + + bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); + + rtnl_unlock(); + + return 0; +} + +int bnx2x_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2x *bp; + int rc; + + if (!dev) { + dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); + return -ENODEV; + } + bp = netdev_priv(dev); + + if (bp->recovery_state != BNX2X_RECOVERY_DONE) { + pr_err("Handling parity error recovery. Try again later\n"); + return -EAGAIN; + } + + rtnl_lock(); + + pci_restore_state(pdev); + + if (!netif_running(dev)) { + rtnl_unlock(); + return 0; + } + + bnx2x_set_power_state(bp, PCI_D0); + netif_device_attach(dev); + + /* Since the chip was reset, clear the FW sequence number */ + bp->fw_seq = 0; + rc = bnx2x_nic_load(bp, LOAD_OPEN); + + rtnl_unlock(); + + return rc; +} + + +void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, + u32 cid) +{ + /* ustorm cxt validation */ + cxt->ustorm_ag_context.cdu_usage = + CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), + CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE); + /* xcontext validation */ + cxt->xstorm_ag_context.cdu_reserved = + CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), + CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE); +} + +static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, + u8 fw_sb_id, u8 sb_index, + u8 ticks) +{ + + u32 addr = BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index); + REG_WR8(bp, addr, ticks); + DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n", + port, fw_sb_id, sb_index, ticks); +} + +static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port, + u16 fw_sb_id, u8 sb_index, + u8 disable) +{ + u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); + u32 addr = BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index); + u16 flags = REG_RD16(bp, addr); + /* clear and set */ + flags &= ~HC_INDEX_DATA_HC_ENABLED; + flags |= enable_flag; + REG_WR16(bp, addr, flags); + DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n", + port, fw_sb_id, sb_index, disable); +} + +void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, + u8 sb_index, u8 disable, u16 usec) +{ + int port = BP_PORT(bp); + u8 ticks = usec / BNX2X_BTR; + + storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks); + + disable = disable ? 1 : (usec ? 0 : 1); + storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable); +} diff --cc drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 9525b936cf62,000000000000..0b9bd551580b mode 100644,000000..100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@@ -1,2510 -1,0 +1,2510 @@@ +/* bnx2x_dcb.c: Broadcom Everest network driver. + * + * Copyright 2009-2011 Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2, available + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a + * license other than the GPL, without Broadcom's express prior written + * consent. + * + * Maintained by: Eilon Greenstein + * Written by: Dmitry Kravkov + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +#include "bnx2x.h" +#include "bnx2x_cmn.h" +#include "bnx2x_dcb.h" + +/* forward declarations of dcbx related functions */ +static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp); +static void bnx2x_pfc_set_pfc(struct bnx2x *bp); +static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp); +static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp); +static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp, + u32 *set_configuration_ets_pg, + u32 *pri_pg_tbl); +static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp, + u32 *pg_pri_orginal_spread, + struct pg_help_data *help_data); +static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp, + struct pg_help_data *help_data, + struct dcbx_ets_feature *ets, + u32 *pg_pri_orginal_spread); +static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp, + struct cos_help_data *cos_data, + u32 *pg_pri_orginal_spread, + struct dcbx_ets_feature *ets); +static void bnx2x_dcbx_fw_struct(struct bnx2x *bp, + struct bnx2x_func_tx_start_params*); + +/* helpers: read/write len bytes from addr into buff by REG_RD/REG_WR */ +static void bnx2x_read_data(struct bnx2x *bp, u32 *buff, + u32 addr, u32 len) +{ + int i; + for (i = 0; i < len; i += 4, buff++) + *buff = REG_RD(bp, addr + i); +} + +static void bnx2x_write_data(struct bnx2x *bp, u32 *buff, + u32 addr, u32 len) +{ + int i; + for (i = 0; i < len; i += 4, buff++) + REG_WR(bp, addr + i, *buff); +} + +static void bnx2x_pfc_set(struct bnx2x *bp) +{ + struct bnx2x_nig_brb_pfc_port_params pfc_params = {0}; + u32 pri_bit, val = 0; + int i; + + pfc_params.num_of_rx_cos_priority_mask = + bp->dcbx_port_params.ets.num_of_cos; + + /* Tx COS configuration */ + for (i = 0; i < bp->dcbx_port_params.ets.num_of_cos; i++) + /* + * We configure only the pauseable bits (non pauseable aren't + * configured at all) it's done to avoid false pauses from + * network + */ + pfc_params.rx_cos_priority_mask[i] = + bp->dcbx_port_params.ets.cos_params[i].pri_bitmask + & DCBX_PFC_PRI_PAUSE_MASK(bp); + + /* + * Rx COS configuration + * Changing PFC RX configuration . + * In RX COS0 will always be configured to lossy and COS1 to lossless + */ + for (i = 0 ; i < MAX_PFC_PRIORITIES ; i++) { + pri_bit = 1 << i; + + if (pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp)) + val |= 1 << (i * 4); + } + + pfc_params.pkt_priority_to_cos = val; + + /* RX COS0 */ + pfc_params.llfc_low_priority_classes = 0; + /* RX COS1 */ + pfc_params.llfc_high_priority_classes = DCBX_PFC_PRI_PAUSE_MASK(bp); + + /* BRB configuration */ + pfc_params.cos0_pauseable = false; + pfc_params.cos1_pauseable = true; + + bnx2x_acquire_phy_lock(bp); + bp->link_params.feature_config_flags |= FEATURE_CONFIG_PFC_ENABLED; + bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &pfc_params); + bnx2x_release_phy_lock(bp); +} + +static void bnx2x_pfc_clear(struct bnx2x *bp) +{ + struct bnx2x_nig_brb_pfc_port_params nig_params = {0}; + nig_params.pause_enable = 1; +#ifdef BNX2X_SAFC + if (bp->flags & SAFC_TX_FLAG) { + u32 high = 0, low = 0; + int i; + + for (i = 0; i < BNX2X_MAX_PRIORITY; i++) { + if (bp->pri_map[i] == 1) + high |= (1 << i); + if (bp->pri_map[i] == 0) + low |= (1 << i); + } + + nig_params.llfc_low_priority_classes = high; + nig_params.llfc_low_priority_classes = low; + + nig_params.pause_enable = 0; + nig_params.llfc_enable = 1; + nig_params.llfc_out_en = 1; + } +#endif /* BNX2X_SAFC */ + bnx2x_acquire_phy_lock(bp); + bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_PFC_ENABLED; + bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &nig_params); + bnx2x_release_phy_lock(bp); +} + +static void bnx2x_dump_dcbx_drv_param(struct bnx2x *bp, + struct dcbx_features *features, + u32 error) +{ + u8 i = 0; + DP(NETIF_MSG_LINK, "local_mib.error %x\n", error); + + /* PG */ + DP(NETIF_MSG_LINK, + "local_mib.features.ets.enabled %x\n", features->ets.enabled); + for (i = 0; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++) + DP(NETIF_MSG_LINK, + "local_mib.features.ets.pg_bw_tbl[%d] %d\n", i, + DCBX_PG_BW_GET(features->ets.pg_bw_tbl, i)); + for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) + DP(NETIF_MSG_LINK, + "local_mib.features.ets.pri_pg_tbl[%d] %d\n", i, + DCBX_PRI_PG_GET(features->ets.pri_pg_tbl, i)); + + /* pfc */ + DP(NETIF_MSG_LINK, "dcbx_features.pfc.pri_en_bitmap %x\n", + features->pfc.pri_en_bitmap); + DP(NETIF_MSG_LINK, "dcbx_features.pfc.pfc_caps %x\n", + features->pfc.pfc_caps); + DP(NETIF_MSG_LINK, "dcbx_features.pfc.enabled %x\n", + features->pfc.enabled); + + DP(NETIF_MSG_LINK, "dcbx_features.app.default_pri %x\n", + features->app.default_pri); + DP(NETIF_MSG_LINK, "dcbx_features.app.tc_supported %x\n", + features->app.tc_supported); + DP(NETIF_MSG_LINK, "dcbx_features.app.enabled %x\n", + features->app.enabled); + for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { + DP(NETIF_MSG_LINK, + "dcbx_features.app.app_pri_tbl[%x].app_id %x\n", + i, features->app.app_pri_tbl[i].app_id); + DP(NETIF_MSG_LINK, + "dcbx_features.app.app_pri_tbl[%x].pri_bitmap %x\n", + i, features->app.app_pri_tbl[i].pri_bitmap); + DP(NETIF_MSG_LINK, + "dcbx_features.app.app_pri_tbl[%x].appBitfield %x\n", + i, features->app.app_pri_tbl[i].appBitfield); + } +} + +static void bnx2x_dcbx_get_ap_priority(struct bnx2x *bp, + u8 pri_bitmap, + u8 llfc_traf_type) +{ + u32 pri = MAX_PFC_PRIORITIES; + u32 index = MAX_PFC_PRIORITIES - 1; + u32 pri_mask; + u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; + + /* Choose the highest priority */ + while ((MAX_PFC_PRIORITIES == pri) && (0 != index)) { + pri_mask = 1 << index; + if (GET_FLAGS(pri_bitmap, pri_mask)) + pri = index ; + index--; + } + + if (pri < MAX_PFC_PRIORITIES) + ttp[llfc_traf_type] = max_t(u32, ttp[llfc_traf_type], pri); +} + +static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp, + struct dcbx_app_priority_feature *app, + u32 error) { + u8 index; + u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; + + if (GET_FLAGS(error, DCBX_LOCAL_APP_ERROR)) + DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_ERROR\n"); + + if (GET_FLAGS(error, DCBX_LOCAL_APP_MISMATCH)) + DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_MISMATCH\n"); + + if (app->enabled && + !GET_FLAGS(error, DCBX_LOCAL_APP_ERROR | DCBX_LOCAL_APP_MISMATCH)) { + + bp->dcbx_port_params.app.enabled = true; + + for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++) + ttp[index] = 0; + + if (app->default_pri < MAX_PFC_PRIORITIES) + ttp[LLFC_TRAFFIC_TYPE_NW] = app->default_pri; + + for (index = 0 ; index < DCBX_MAX_APP_PROTOCOL; index++) { + struct dcbx_app_priority_entry *entry = + app->app_pri_tbl; + + if (GET_FLAGS(entry[index].appBitfield, + DCBX_APP_SF_ETH_TYPE) && + ETH_TYPE_FCOE == entry[index].app_id) + bnx2x_dcbx_get_ap_priority(bp, + entry[index].pri_bitmap, + LLFC_TRAFFIC_TYPE_FCOE); + + if (GET_FLAGS(entry[index].appBitfield, + DCBX_APP_SF_PORT) && + TCP_PORT_ISCSI == entry[index].app_id) + bnx2x_dcbx_get_ap_priority(bp, + entry[index].pri_bitmap, + LLFC_TRAFFIC_TYPE_ISCSI); + } + } else { + DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_DISABLED\n"); + bp->dcbx_port_params.app.enabled = false; + for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++) + ttp[index] = INVALID_TRAFFIC_TYPE_PRIORITY; + } +} + +static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp, + struct dcbx_ets_feature *ets, + u32 error) { + int i = 0; + u32 pg_pri_orginal_spread[DCBX_MAX_NUM_PG_BW_ENTRIES] = {0}; + struct pg_help_data pg_help_data; + struct bnx2x_dcbx_cos_params *cos_params = + bp->dcbx_port_params.ets.cos_params; + + memset(&pg_help_data, 0, sizeof(struct pg_help_data)); + + + if (GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR)) + DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ERROR\n"); + + + /* Clean up old settings of ets on COS */ + for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params) ; i++) { + cos_params[i].pauseable = false; + cos_params[i].strict = BNX2X_DCBX_STRICT_INVALID; + cos_params[i].bw_tbl = DCBX_INVALID_COS_BW; + cos_params[i].pri_bitmask = 0; + } + + if (bp->dcbx_port_params.app.enabled && + !GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR) && + ets->enabled) { + DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ENABLE\n"); + bp->dcbx_port_params.ets.enabled = true; + + bnx2x_dcbx_get_ets_pri_pg_tbl(bp, + pg_pri_orginal_spread, + ets->pri_pg_tbl); + + bnx2x_dcbx_get_num_pg_traf_type(bp, + pg_pri_orginal_spread, + &pg_help_data); + + bnx2x_dcbx_fill_cos_params(bp, &pg_help_data, + ets, pg_pri_orginal_spread); + + } else { + DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_DISABLED\n"); + bp->dcbx_port_params.ets.enabled = false; + ets->pri_pg_tbl[0] = 0; + + for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES ; i++) + DCBX_PG_BW_SET(ets->pg_bw_tbl, i, 1); + } +} + +static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp, + struct dcbx_pfc_feature *pfc, u32 error) +{ + + if (GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR)) + DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_ERROR\n"); + + if (bp->dcbx_port_params.app.enabled && + !GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR | DCBX_LOCAL_PFC_MISMATCH) && + pfc->enabled) { + bp->dcbx_port_params.pfc.enabled = true; + bp->dcbx_port_params.pfc.priority_non_pauseable_mask = + ~(pfc->pri_en_bitmap); + } else { + DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_DISABLED\n"); + bp->dcbx_port_params.pfc.enabled = false; + bp->dcbx_port_params.pfc.priority_non_pauseable_mask = 0; + } +} + +/* maps unmapped priorities to to the same COS as L2 */ +static void bnx2x_dcbx_map_nw(struct bnx2x *bp) +{ + int i; + u32 unmapped = (1 << MAX_PFC_PRIORITIES) - 1; /* all ones */ + u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; + u32 nw_prio = 1 << ttp[LLFC_TRAFFIC_TYPE_NW]; + struct bnx2x_dcbx_cos_params *cos_params = + bp->dcbx_port_params.ets.cos_params; + + /* get unmapped priorities by clearing mapped bits */ + for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) + unmapped &= ~(1 << ttp[i]); + + /* find cos for nw prio and extend it with unmapped */ + for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params); i++) { + if (cos_params[i].pri_bitmask & nw_prio) { + /* extend the bitmask with unmapped */ + DP(NETIF_MSG_LINK, + "cos %d extended with 0x%08x\n", i, unmapped); + cos_params[i].pri_bitmask |= unmapped; + break; + } + } +} + +static void bnx2x_get_dcbx_drv_param(struct bnx2x *bp, + struct dcbx_features *features, + u32 error) +{ + bnx2x_dcbx_get_ap_feature(bp, &features->app, error); + + bnx2x_dcbx_get_pfc_feature(bp, &features->pfc, error); + + bnx2x_dcbx_get_ets_feature(bp, &features->ets, error); + + bnx2x_dcbx_map_nw(bp); +} + +#define DCBX_LOCAL_MIB_MAX_TRY_READ (100) +static int bnx2x_dcbx_read_mib(struct bnx2x *bp, + u32 *base_mib_addr, + u32 offset, + int read_mib_type) +{ + int max_try_read = 0; + u32 mib_size, prefix_seq_num, suffix_seq_num; + struct lldp_remote_mib *remote_mib ; + struct lldp_local_mib *local_mib; + + + switch (read_mib_type) { + case DCBX_READ_LOCAL_MIB: + mib_size = sizeof(struct lldp_local_mib); + break; + case DCBX_READ_REMOTE_MIB: + mib_size = sizeof(struct lldp_remote_mib); + break; + default: + return 1; /*error*/ + } + + offset += BP_PORT(bp) * mib_size; + + do { + bnx2x_read_data(bp, base_mib_addr, offset, mib_size); + + max_try_read++; + + switch (read_mib_type) { + case DCBX_READ_LOCAL_MIB: + local_mib = (struct lldp_local_mib *) base_mib_addr; + prefix_seq_num = local_mib->prefix_seq_num; + suffix_seq_num = local_mib->suffix_seq_num; + break; + case DCBX_READ_REMOTE_MIB: + remote_mib = (struct lldp_remote_mib *) base_mib_addr; + prefix_seq_num = remote_mib->prefix_seq_num; + suffix_seq_num = remote_mib->suffix_seq_num; + break; + default: + return 1; /*error*/ + } + } while ((prefix_seq_num != suffix_seq_num) && + (max_try_read < DCBX_LOCAL_MIB_MAX_TRY_READ)); + + if (max_try_read >= DCBX_LOCAL_MIB_MAX_TRY_READ) { + BNX2X_ERR("MIB could not be read\n"); + return 1; + } + + return 0; +} + +static void bnx2x_pfc_set_pfc(struct bnx2x *bp) +{ + if (bp->dcbx_port_params.pfc.enabled && + !(bp->dcbx_error & DCBX_REMOTE_MIB_ERROR)) + /* + * 1. Fills up common PFC structures if required + * 2. Configure NIG, MAC and BRB via the elink + */ + bnx2x_pfc_set(bp); + else + bnx2x_pfc_clear(bp); +} + +static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp) +{ + struct bnx2x_func_state_params func_params = {0}; + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_TX_STOP; + + DP(NETIF_MSG_LINK, "STOP TRAFFIC\n"); + return bnx2x_func_state_change(bp, &func_params); +} + +static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp) +{ + struct bnx2x_func_state_params func_params = {0}; + struct bnx2x_func_tx_start_params *tx_params = + &func_params.params.tx_start; + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_TX_START; + + bnx2x_dcbx_fw_struct(bp, tx_params); + + DP(NETIF_MSG_LINK, "START TRAFFIC\n"); + return bnx2x_func_state_change(bp, &func_params); +} + +static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp) +{ + struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets); + int rc = 0; + + if (ets->num_of_cos == 0 || ets->num_of_cos > DCBX_COS_MAX_NUM_E2) { + BNX2X_ERR("Illegal number of COSes %d\n", ets->num_of_cos); + return; + } + + /* valid COS entries */ + if (ets->num_of_cos == 1) /* no ETS */ + return; + + /* sanity */ + if (((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[0].strict) && + (DCBX_INVALID_COS_BW == ets->cos_params[0].bw_tbl)) || + ((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[1].strict) && + (DCBX_INVALID_COS_BW == ets->cos_params[1].bw_tbl))) { + BNX2X_ERR("all COS should have at least bw_limit or strict" + "ets->cos_params[0].strict= %x" + "ets->cos_params[0].bw_tbl= %x" + "ets->cos_params[1].strict= %x" + "ets->cos_params[1].bw_tbl= %x", + ets->cos_params[0].strict, + ets->cos_params[0].bw_tbl, + ets->cos_params[1].strict, + ets->cos_params[1].bw_tbl); + return; + } + /* If we join a group and there is bw_tbl and strict then bw rules */ + if ((DCBX_INVALID_COS_BW != ets->cos_params[0].bw_tbl) && + (DCBX_INVALID_COS_BW != ets->cos_params[1].bw_tbl)) { + u32 bw_tbl_0 = ets->cos_params[0].bw_tbl; + u32 bw_tbl_1 = ets->cos_params[1].bw_tbl; + /* Do not allow 0-100 configuration + * since PBF does not support it + * force 1-99 instead + */ + if (bw_tbl_0 == 0) { + bw_tbl_0 = 1; + bw_tbl_1 = 99; + } else if (bw_tbl_1 == 0) { + bw_tbl_1 = 1; + bw_tbl_0 = 99; + } + + bnx2x_ets_bw_limit(&bp->link_params, bw_tbl_0, bw_tbl_1); + } else { + if (ets->cos_params[0].strict == BNX2X_DCBX_STRICT_COS_HIGHEST) + rc = bnx2x_ets_strict(&bp->link_params, 0); + else if (ets->cos_params[1].strict + == BNX2X_DCBX_STRICT_COS_HIGHEST) + rc = bnx2x_ets_strict(&bp->link_params, 1); + if (rc) + BNX2X_ERR("update_ets_params failed\n"); + } +} + +/* + * In E3B0 the configuration may have more than 2 COS. + */ +void bnx2x_dcbx_update_ets_config(struct bnx2x *bp) +{ + struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets); + struct bnx2x_ets_params ets_params = { 0 }; + u8 i; + + ets_params.num_of_cos = ets->num_of_cos; + + for (i = 0; i < ets->num_of_cos; i++) { + /* COS is SP */ + if (ets->cos_params[i].strict != BNX2X_DCBX_STRICT_INVALID) { + if (ets->cos_params[i].bw_tbl != DCBX_INVALID_COS_BW) { + BNX2X_ERR("COS can't be not BW and not SP\n"); + return; + } + + ets_params.cos[i].state = bnx2x_cos_state_strict; + ets_params.cos[i].params.sp_params.pri = + ets->cos_params[i].strict; + } else { /* COS is BW */ + if (ets->cos_params[i].bw_tbl == DCBX_INVALID_COS_BW) { + BNX2X_ERR("COS can't be not BW and not SP\n"); + return; + } + ets_params.cos[i].state = bnx2x_cos_state_bw; + ets_params.cos[i].params.bw_params.bw = + (u8)ets->cos_params[i].bw_tbl; + } + } + + /* Configure the ETS in HW */ + if (bnx2x_ets_e3b0_config(&bp->link_params, &bp->link_vars, + &ets_params)) { + BNX2X_ERR("bnx2x_ets_e3b0_config failed\n"); + bnx2x_ets_disabled(&bp->link_params, &bp->link_vars); + } +} + +static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp) +{ + bnx2x_ets_disabled(&bp->link_params, &bp->link_vars); + + if (!bp->dcbx_port_params.ets.enabled || + (bp->dcbx_error & DCBX_REMOTE_MIB_ERROR)) + return; + + if (CHIP_IS_E3B0(bp)) + bnx2x_dcbx_update_ets_config(bp); + else + bnx2x_dcbx_2cos_limit_update_ets_config(bp); +} + +#ifdef BCM_DCBNL +static int bnx2x_dcbx_read_shmem_remote_mib(struct bnx2x *bp) +{ + struct lldp_remote_mib remote_mib = {0}; + u32 dcbx_remote_mib_offset = SHMEM2_RD(bp, dcbx_remote_mib_offset); + int rc; + + DP(NETIF_MSG_LINK, "dcbx_remote_mib_offset 0x%x\n", + dcbx_remote_mib_offset); + + if (SHMEM_DCBX_REMOTE_MIB_NONE == dcbx_remote_mib_offset) { + BNX2X_ERR("FW doesn't support dcbx_remote_mib_offset\n"); + return -EINVAL; + } + + rc = bnx2x_dcbx_read_mib(bp, (u32 *)&remote_mib, dcbx_remote_mib_offset, + DCBX_READ_REMOTE_MIB); + + if (rc) { + BNX2X_ERR("Faild to read remote mib from FW\n"); + return rc; + } + + /* save features and flags */ + bp->dcbx_remote_feat = remote_mib.features; + bp->dcbx_remote_flags = remote_mib.flags; + return 0; +} +#endif + +static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp) +{ + struct lldp_local_mib local_mib = {0}; + u32 dcbx_neg_res_offset = SHMEM2_RD(bp, dcbx_neg_res_offset); + int rc; + + DP(NETIF_MSG_LINK, "dcbx_neg_res_offset 0x%x\n", dcbx_neg_res_offset); + + if (SHMEM_DCBX_NEG_RES_NONE == dcbx_neg_res_offset) { + BNX2X_ERR("FW doesn't support dcbx_neg_res_offset\n"); + return -EINVAL; + } + + rc = bnx2x_dcbx_read_mib(bp, (u32 *)&local_mib, dcbx_neg_res_offset, + DCBX_READ_LOCAL_MIB); + + if (rc) { + BNX2X_ERR("Faild to read local mib from FW\n"); + return rc; + } + + /* save features and error */ + bp->dcbx_local_feat = local_mib.features; + bp->dcbx_error = local_mib.error; + return 0; +} + + +#ifdef BCM_DCBNL +static inline +u8 bnx2x_dcbx_dcbnl_app_up(struct dcbx_app_priority_entry *ent) +{ + u8 pri; + + /* Choose the highest priority */ + for (pri = MAX_PFC_PRIORITIES - 1; pri > 0; pri--) + if (ent->pri_bitmap & (1 << pri)) + break; + return pri; +} + +static inline +u8 bnx2x_dcbx_dcbnl_app_idtype(struct dcbx_app_priority_entry *ent) +{ + return ((ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) == + DCBX_APP_SF_PORT) ? DCB_APP_IDTYPE_PORTNUM : + DCB_APP_IDTYPE_ETHTYPE; +} + +int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall) +{ + int i, err = 0; + + for (i = 0; i < DCBX_MAX_APP_PROTOCOL && err == 0; i++) { + struct dcbx_app_priority_entry *ent = + &bp->dcbx_local_feat.app.app_pri_tbl[i]; + + if (ent->appBitfield & DCBX_APP_ENTRY_VALID) { + u8 up = bnx2x_dcbx_dcbnl_app_up(ent); + + /* avoid invalid user-priority */ + if (up) { + struct dcb_app app; + app.selector = bnx2x_dcbx_dcbnl_app_idtype(ent); + app.protocol = ent->app_id; + app.priority = delall ? 0 : up; + err = dcb_setapp(bp->dev, &app); + } + } + } + return err; +} +#endif + +static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set) +{ + if (SHMEM2_HAS(bp, drv_flags)) { + u32 drv_flags; + bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS); + drv_flags = SHMEM2_RD(bp, drv_flags); + + if (set) + SET_FLAGS(drv_flags, flags); + else + RESET_FLAGS(drv_flags, flags); + + SHMEM2_WR(bp, drv_flags, drv_flags); + DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags); + bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS); + } +} + +static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp) +{ + u8 prio, cos; + for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++) { + for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) { + if (bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask + & (1 << prio)) { + bp->prio_to_cos[prio] = cos; + DP(NETIF_MSG_LINK, + "tx_mapping %d --> %d\n", prio, cos); + } + } + } + + /* setup tc must be called under rtnl lock, but we can't take it here + * as we are handling an attetntion on a work queue which must be + * flushed at some rtnl-locked contexts (e.g. if down) + */ + if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) + schedule_delayed_work(&bp->sp_rtnl_task, 0); +} + +void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) +{ + switch (state) { + case BNX2X_DCBX_STATE_NEG_RECEIVED: + { + DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n"); +#ifdef BCM_DCBNL + /** + * Delete app tlvs from dcbnl before reading new + * negotiation results + */ + bnx2x_dcbnl_update_applist(bp, true); + + /* Read rmeote mib if dcbx is in the FW */ + if (bnx2x_dcbx_read_shmem_remote_mib(bp)) + return; +#endif + /* Read neg results if dcbx is in the FW */ + if (bnx2x_dcbx_read_shmem_neg_results(bp)) + return; + + bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat, + bp->dcbx_error); + + bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat, + bp->dcbx_error); + + /* mark DCBX result for PMF migration */ + bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 1); +#ifdef BCM_DCBNL + /** + * Add new app tlvs to dcbnl + */ + bnx2x_dcbnl_update_applist(bp, false); +#endif + bnx2x_dcbx_stop_hw_tx(bp); + + /* reconfigure the netdevice with the results of the new + * dcbx negotiation. + */ + bnx2x_dcbx_update_tc_mapping(bp); + + return; + } + case BNX2X_DCBX_STATE_TX_PAUSED: + DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n"); + bnx2x_pfc_set_pfc(bp); + + bnx2x_dcbx_update_ets_params(bp); + bnx2x_dcbx_resume_hw_tx(bp); + return; + case BNX2X_DCBX_STATE_TX_RELEASED: + DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n"); + bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0); +#ifdef BCM_DCBNL + /* + * Send a notification for the new negotiated parameters + */ + dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0); +#endif + return; + default: + BNX2X_ERR("Unknown DCBX_STATE\n"); + } +} + +#define LLDP_ADMIN_MIB_OFFSET(bp) (PORT_MAX*sizeof(struct lldp_params) + \ + BP_PORT(bp)*sizeof(struct lldp_admin_mib)) + +static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, + u32 dcbx_lldp_params_offset) +{ + struct lldp_admin_mib admin_mib; + u32 i, other_traf_type = PREDEFINED_APP_IDX_MAX, traf_type = 0; + u32 offset = dcbx_lldp_params_offset + LLDP_ADMIN_MIB_OFFSET(bp); + + /*shortcuts*/ + struct dcbx_features *af = &admin_mib.features; + struct bnx2x_config_dcbx_params *dp = &bp->dcbx_config_params; + + memset(&admin_mib, 0, sizeof(struct lldp_admin_mib)); + + /* Read the data first */ + bnx2x_read_data(bp, (u32 *)&admin_mib, offset, + sizeof(struct lldp_admin_mib)); + + if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON) + SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED); + else + RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED); + + if (dp->overwrite_settings == BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE) { + + RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_CEE_VERSION_MASK); + admin_mib.ver_cfg_flags |= + (dp->admin_dcbx_version << DCBX_CEE_VERSION_SHIFT) & + DCBX_CEE_VERSION_MASK; + + af->ets.enabled = (u8)dp->admin_ets_enable; + + af->pfc.enabled = (u8)dp->admin_pfc_enable; + + /* FOR IEEE dp->admin_tc_supported_tx_enable */ + if (dp->admin_ets_configuration_tx_enable) + SET_FLAGS(admin_mib.ver_cfg_flags, + DCBX_ETS_CONFIG_TX_ENABLED); + else + RESET_FLAGS(admin_mib.ver_cfg_flags, + DCBX_ETS_CONFIG_TX_ENABLED); + /* For IEEE admin_ets_recommendation_tx_enable */ + if (dp->admin_pfc_tx_enable) + SET_FLAGS(admin_mib.ver_cfg_flags, + DCBX_PFC_CONFIG_TX_ENABLED); + else + RESET_FLAGS(admin_mib.ver_cfg_flags, + DCBX_PFC_CONFIG_TX_ENABLED); + + if (dp->admin_application_priority_tx_enable) + SET_FLAGS(admin_mib.ver_cfg_flags, + DCBX_APP_CONFIG_TX_ENABLED); + else + RESET_FLAGS(admin_mib.ver_cfg_flags, + DCBX_APP_CONFIG_TX_ENABLED); + + if (dp->admin_ets_willing) + SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING); + else + RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING); + /* For IEEE admin_ets_reco_valid */ + if (dp->admin_pfc_willing) + SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING); + else + RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING); + + if (dp->admin_app_priority_willing) + SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING); + else + RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING); + + for (i = 0 ; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++) { + DCBX_PG_BW_SET(af->ets.pg_bw_tbl, i, + (u8)dp->admin_configuration_bw_precentage[i]); + + DP(NETIF_MSG_LINK, "pg_bw_tbl[%d] = %02x\n", + i, DCBX_PG_BW_GET(af->ets.pg_bw_tbl, i)); + } + + for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) { + DCBX_PRI_PG_SET(af->ets.pri_pg_tbl, i, + (u8)dp->admin_configuration_ets_pg[i]); + + DP(NETIF_MSG_LINK, "pri_pg_tbl[%d] = %02x\n", + i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i)); + } + + /*For IEEE admin_recommendation_bw_precentage + *For IEEE admin_recommendation_ets_pg */ + af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap; + for (i = 0; i < 4; i++) { + if (dp->admin_priority_app_table[i].valid) { + struct bnx2x_admin_priority_app_table *table = + dp->admin_priority_app_table; + if ((ETH_TYPE_FCOE == table[i].app_id) && + (TRAFFIC_TYPE_ETH == table[i].traffic_type)) + traf_type = FCOE_APP_IDX; + else if ((TCP_PORT_ISCSI == table[i].app_id) && + (TRAFFIC_TYPE_PORT == table[i].traffic_type)) + traf_type = ISCSI_APP_IDX; + else + traf_type = other_traf_type++; + + af->app.app_pri_tbl[traf_type].app_id = + table[i].app_id; + + af->app.app_pri_tbl[traf_type].pri_bitmap = + (u8)(1 << table[i].priority); + + af->app.app_pri_tbl[traf_type].appBitfield = + (DCBX_APP_ENTRY_VALID); + + af->app.app_pri_tbl[traf_type].appBitfield |= + (TRAFFIC_TYPE_ETH == table[i].traffic_type) ? + DCBX_APP_SF_ETH_TYPE : DCBX_APP_SF_PORT; + } + } + + af->app.default_pri = (u8)dp->admin_default_priority; + + } + + /* Write the data. */ + bnx2x_write_data(bp, (u32 *)&admin_mib, offset, + sizeof(struct lldp_admin_mib)); + +} + +void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled) +{ - if (!CHIP_IS_E1x(bp)) { ++ if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3(bp)) { + bp->dcb_state = dcb_on; + bp->dcbx_enabled = dcbx_enabled; + } else { + bp->dcb_state = false; + bp->dcbx_enabled = BNX2X_DCBX_ENABLED_INVALID; + } + DP(NETIF_MSG_LINK, "DCB state [%s:%s]\n", + dcb_on ? "ON" : "OFF", + dcbx_enabled == BNX2X_DCBX_ENABLED_OFF ? "user-mode" : + dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF ? "on-chip static" : + dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON ? + "on-chip with negotiation" : "invalid"); +} + +void bnx2x_dcbx_init_params(struct bnx2x *bp) +{ + bp->dcbx_config_params.admin_dcbx_version = 0x0; /* 0 - CEE; 1 - IEEE */ + bp->dcbx_config_params.admin_ets_willing = 1; + bp->dcbx_config_params.admin_pfc_willing = 1; + bp->dcbx_config_params.overwrite_settings = 1; + bp->dcbx_config_params.admin_ets_enable = 1; + bp->dcbx_config_params.admin_pfc_enable = 1; + bp->dcbx_config_params.admin_tc_supported_tx_enable = 1; + bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1; + bp->dcbx_config_params.admin_pfc_tx_enable = 1; + bp->dcbx_config_params.admin_application_priority_tx_enable = 1; + bp->dcbx_config_params.admin_ets_reco_valid = 1; + bp->dcbx_config_params.admin_app_priority_willing = 1; + bp->dcbx_config_params.admin_configuration_bw_precentage[0] = 00; + bp->dcbx_config_params.admin_configuration_bw_precentage[1] = 50; + bp->dcbx_config_params.admin_configuration_bw_precentage[2] = 50; + bp->dcbx_config_params.admin_configuration_bw_precentage[3] = 0; + bp->dcbx_config_params.admin_configuration_bw_precentage[4] = 0; + bp->dcbx_config_params.admin_configuration_bw_precentage[5] = 0; + bp->dcbx_config_params.admin_configuration_bw_precentage[6] = 0; + bp->dcbx_config_params.admin_configuration_bw_precentage[7] = 0; + bp->dcbx_config_params.admin_configuration_ets_pg[0] = 1; + bp->dcbx_config_params.admin_configuration_ets_pg[1] = 0; + bp->dcbx_config_params.admin_configuration_ets_pg[2] = 0; + bp->dcbx_config_params.admin_configuration_ets_pg[3] = 2; + bp->dcbx_config_params.admin_configuration_ets_pg[4] = 0; + bp->dcbx_config_params.admin_configuration_ets_pg[5] = 0; + bp->dcbx_config_params.admin_configuration_ets_pg[6] = 0; + bp->dcbx_config_params.admin_configuration_ets_pg[7] = 0; + bp->dcbx_config_params.admin_recommendation_bw_precentage[0] = 0; + bp->dcbx_config_params.admin_recommendation_bw_precentage[1] = 1; + bp->dcbx_config_params.admin_recommendation_bw_precentage[2] = 2; + bp->dcbx_config_params.admin_recommendation_bw_precentage[3] = 0; + bp->dcbx_config_params.admin_recommendation_bw_precentage[4] = 7; + bp->dcbx_config_params.admin_recommendation_bw_precentage[5] = 5; + bp->dcbx_config_params.admin_recommendation_bw_precentage[6] = 6; + bp->dcbx_config_params.admin_recommendation_bw_precentage[7] = 7; + bp->dcbx_config_params.admin_recommendation_ets_pg[0] = 0; + bp->dcbx_config_params.admin_recommendation_ets_pg[1] = 1; + bp->dcbx_config_params.admin_recommendation_ets_pg[2] = 2; + bp->dcbx_config_params.admin_recommendation_ets_pg[3] = 3; + bp->dcbx_config_params.admin_recommendation_ets_pg[4] = 4; + bp->dcbx_config_params.admin_recommendation_ets_pg[5] = 5; + bp->dcbx_config_params.admin_recommendation_ets_pg[6] = 6; + bp->dcbx_config_params.admin_recommendation_ets_pg[7] = 7; + bp->dcbx_config_params.admin_pfc_bitmap = 0x8; /* FCoE(3) enable */ + bp->dcbx_config_params.admin_priority_app_table[0].valid = 1; + bp->dcbx_config_params.admin_priority_app_table[1].valid = 1; + bp->dcbx_config_params.admin_priority_app_table[2].valid = 0; + bp->dcbx_config_params.admin_priority_app_table[3].valid = 0; + bp->dcbx_config_params.admin_priority_app_table[0].priority = 3; + bp->dcbx_config_params.admin_priority_app_table[1].priority = 0; + bp->dcbx_config_params.admin_priority_app_table[2].priority = 0; + bp->dcbx_config_params.admin_priority_app_table[3].priority = 0; + bp->dcbx_config_params.admin_priority_app_table[0].traffic_type = 0; + bp->dcbx_config_params.admin_priority_app_table[1].traffic_type = 1; + bp->dcbx_config_params.admin_priority_app_table[2].traffic_type = 0; + bp->dcbx_config_params.admin_priority_app_table[3].traffic_type = 0; + bp->dcbx_config_params.admin_priority_app_table[0].app_id = 0x8906; + bp->dcbx_config_params.admin_priority_app_table[1].app_id = 3260; + bp->dcbx_config_params.admin_priority_app_table[2].app_id = 0; + bp->dcbx_config_params.admin_priority_app_table[3].app_id = 0; + bp->dcbx_config_params.admin_default_priority = + bp->dcbx_config_params.admin_priority_app_table[1].priority; +} + +void bnx2x_dcbx_init(struct bnx2x *bp) +{ + u32 dcbx_lldp_params_offset = SHMEM_LLDP_DCBX_PARAMS_NONE; + + if (bp->dcbx_enabled <= 0) + return; + + /* validate: + * chip of good for dcbx version, + * dcb is wanted + * the function is pmf + * shmem2 contains DCBX support fields + */ + DP(NETIF_MSG_LINK, "dcb_state %d bp->port.pmf %d\n", + bp->dcb_state, bp->port.pmf); + + if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf && + SHMEM2_HAS(bp, dcbx_lldp_params_offset)) { + dcbx_lldp_params_offset = + SHMEM2_RD(bp, dcbx_lldp_params_offset); + + DP(NETIF_MSG_LINK, "dcbx_lldp_params_offset 0x%x\n", + dcbx_lldp_params_offset); + + bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0); + + if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) { + bnx2x_dcbx_admin_mib_updated_params(bp, + dcbx_lldp_params_offset); + + /* Let HW start negotiation */ + bnx2x_fw_command(bp, + DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0); + } + } +} +static void +bnx2x_dcbx_print_cos_params(struct bnx2x *bp, + struct bnx2x_func_tx_start_params *pfc_fw_cfg) +{ + u8 pri = 0; + u8 cos = 0; + + DP(NETIF_MSG_LINK, + "pfc_fw_cfg->dcb_version %x\n", pfc_fw_cfg->dcb_version); + DP(NETIF_MSG_LINK, + "pdev->params.dcbx_port_params.pfc." + "priority_non_pauseable_mask %x\n", + bp->dcbx_port_params.pfc.priority_non_pauseable_mask); + + for (cos = 0 ; cos < bp->dcbx_port_params.ets.num_of_cos ; cos++) { + DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets." + "cos_params[%d].pri_bitmask %x\n", cos, + bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask); + + DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets." + "cos_params[%d].bw_tbl %x\n", cos, + bp->dcbx_port_params.ets.cos_params[cos].bw_tbl); + + DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets." + "cos_params[%d].strict %x\n", cos, + bp->dcbx_port_params.ets.cos_params[cos].strict); + + DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets." + "cos_params[%d].pauseable %x\n", cos, + bp->dcbx_port_params.ets.cos_params[cos].pauseable); + } + + for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) { + DP(NETIF_MSG_LINK, + "pfc_fw_cfg->traffic_type_to_priority_cos[%d]." + "priority %x\n", pri, + pfc_fw_cfg->traffic_type_to_priority_cos[pri].priority); + + DP(NETIF_MSG_LINK, + "pfc_fw_cfg->traffic_type_to_priority_cos[%d].cos %x\n", + pri, pfc_fw_cfg->traffic_type_to_priority_cos[pri].cos); + } +} + +/* fills help_data according to pg_info */ +static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp, + u32 *pg_pri_orginal_spread, + struct pg_help_data *help_data) +{ + bool pg_found = false; + u32 i, traf_type, add_traf_type, add_pg; + u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; + struct pg_entry_help_data *data = help_data->data; /*shotcut*/ + + /* Set to invalid */ + for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) + data[i].pg = DCBX_ILLEGAL_PG; + + for (add_traf_type = 0; + add_traf_type < LLFC_DRIVER_TRAFFIC_TYPE_MAX; add_traf_type++) { + pg_found = false; + if (ttp[add_traf_type] < MAX_PFC_PRIORITIES) { + add_pg = (u8)pg_pri_orginal_spread[ttp[add_traf_type]]; + for (traf_type = 0; + traf_type < LLFC_DRIVER_TRAFFIC_TYPE_MAX; + traf_type++) { + if (data[traf_type].pg == add_pg) { + if (!(data[traf_type].pg_priority & + (1 << ttp[add_traf_type]))) + data[traf_type]. + num_of_dif_pri++; + data[traf_type].pg_priority |= + (1 << ttp[add_traf_type]); + pg_found = true; + break; + } + } + if (false == pg_found) { + data[help_data->num_of_pg].pg = add_pg; + data[help_data->num_of_pg].pg_priority = + (1 << ttp[add_traf_type]); + data[help_data->num_of_pg].num_of_dif_pri = 1; + help_data->num_of_pg++; + } + } + DP(NETIF_MSG_LINK, + "add_traf_type %d pg_found %s num_of_pg %d\n", + add_traf_type, (false == pg_found) ? "NO" : "YES", + help_data->num_of_pg); + } +} + +static void bnx2x_dcbx_ets_disabled_entry_data(struct bnx2x *bp, + struct cos_help_data *cos_data, + u32 pri_join_mask) +{ + /* Only one priority than only one COS */ + cos_data->data[0].pausable = + IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask); + cos_data->data[0].pri_join_mask = pri_join_mask; + cos_data->data[0].cos_bw = 100; + cos_data->num_of_cos = 1; +} + +static inline void bnx2x_dcbx_add_to_cos_bw(struct bnx2x *bp, + struct cos_entry_help_data *data, + u8 pg_bw) +{ + if (data->cos_bw == DCBX_INVALID_COS_BW) + data->cos_bw = pg_bw; + else + data->cos_bw += pg_bw; +} + +static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp, + struct cos_help_data *cos_data, + u32 *pg_pri_orginal_spread, + struct dcbx_ets_feature *ets) +{ + u32 pri_tested = 0; + u8 i = 0; + u8 entry = 0; + u8 pg_entry = 0; + u8 num_of_pri = LLFC_DRIVER_TRAFFIC_TYPE_MAX; + + cos_data->data[0].pausable = true; + cos_data->data[1].pausable = false; + cos_data->data[0].pri_join_mask = cos_data->data[1].pri_join_mask = 0; + + for (i = 0 ; i < num_of_pri ; i++) { + pri_tested = 1 << bp->dcbx_port_params. + app.traffic_type_priority[i]; + + if (pri_tested & DCBX_PFC_PRI_NON_PAUSE_MASK(bp)) { + cos_data->data[1].pri_join_mask |= pri_tested; + entry = 1; + } else { + cos_data->data[0].pri_join_mask |= pri_tested; + entry = 0; + } + pg_entry = (u8)pg_pri_orginal_spread[bp->dcbx_port_params. + app.traffic_type_priority[i]]; + /* There can be only one strict pg */ + if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES) + bnx2x_dcbx_add_to_cos_bw(bp, &cos_data->data[entry], + DCBX_PG_BW_GET(ets->pg_bw_tbl, pg_entry)); + else + /* If we join a group and one is strict + * than the bw rulls */ + cos_data->data[entry].strict = + BNX2X_DCBX_STRICT_COS_HIGHEST; + } + if ((0 == cos_data->data[0].pri_join_mask) && + (0 == cos_data->data[1].pri_join_mask)) + BNX2X_ERR("dcbx error: Both groups must have priorities\n"); +} + + +#ifndef POWER_OF_2 +#define POWER_OF_2(x) ((0 != x) && (0 == (x & (x-1)))) +#endif + +static void bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(struct bnx2x *bp, + struct pg_help_data *pg_help_data, + struct cos_help_data *cos_data, + u32 pri_join_mask, + u8 num_of_dif_pri) +{ + u8 i = 0; + u32 pri_tested = 0; + u32 pri_mask_without_pri = 0; + u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; + /*debug*/ + if (num_of_dif_pri == 1) { + bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data, pri_join_mask); + return; + } + /* single priority group */ + if (pg_help_data->data[0].pg < DCBX_MAX_NUM_PG_BW_ENTRIES) { + /* If there are both pauseable and non-pauseable priorities, + * the pauseable priorities go to the first queue and + * the non-pauseable priorities go to the second queue. + */ + if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) { + /* Pauseable */ + cos_data->data[0].pausable = true; + /* Non pauseable.*/ + cos_data->data[1].pausable = false; + + if (2 == num_of_dif_pri) { + cos_data->data[0].cos_bw = 50; + cos_data->data[1].cos_bw = 50; + } + + if (3 == num_of_dif_pri) { + if (POWER_OF_2(DCBX_PFC_PRI_GET_PAUSE(bp, + pri_join_mask))) { + cos_data->data[0].cos_bw = 33; + cos_data->data[1].cos_bw = 67; + } else { + cos_data->data[0].cos_bw = 67; + cos_data->data[1].cos_bw = 33; + } + } + + } else if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask)) { + /* If there are only pauseable priorities, + * then one/two priorities go to the first queue + * and one priority goes to the second queue. + */ + if (2 == num_of_dif_pri) { + cos_data->data[0].cos_bw = 50; + cos_data->data[1].cos_bw = 50; + } else { + cos_data->data[0].cos_bw = 67; + cos_data->data[1].cos_bw = 33; + } + cos_data->data[1].pausable = true; + cos_data->data[0].pausable = true; + /* All priorities except FCOE */ + cos_data->data[0].pri_join_mask = (pri_join_mask & + ((u8)~(1 << ttp[LLFC_TRAFFIC_TYPE_FCOE]))); + /* Only FCOE priority.*/ + cos_data->data[1].pri_join_mask = + (1 << ttp[LLFC_TRAFFIC_TYPE_FCOE]); + } else + /* If there are only non-pauseable priorities, + * they will all go to the same queue. + */ + bnx2x_dcbx_ets_disabled_entry_data(bp, + cos_data, pri_join_mask); + } else { + /* priority group which is not BW limited (PG#15):*/ + if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) { + /* If there are both pauseable and non-pauseable + * priorities, the pauseable priorities go to the first + * queue and the non-pauseable priorities + * go to the second queue. + */ + if (DCBX_PFC_PRI_GET_PAUSE(bp, pri_join_mask) > + DCBX_PFC_PRI_GET_NON_PAUSE(bp, pri_join_mask)) { + cos_data->data[0].strict = + BNX2X_DCBX_STRICT_COS_HIGHEST; + cos_data->data[1].strict = + BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI( + BNX2X_DCBX_STRICT_COS_HIGHEST); + } else { + cos_data->data[0].strict = + BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI( + BNX2X_DCBX_STRICT_COS_HIGHEST); + cos_data->data[1].strict = + BNX2X_DCBX_STRICT_COS_HIGHEST; + } + /* Pauseable */ + cos_data->data[0].pausable = true; + /* Non pause-able.*/ + cos_data->data[1].pausable = false; + } else { + /* If there are only pauseable priorities or + * only non-pauseable,* the lower priorities go + * to the first queue and the higherpriorities go + * to the second queue. + */ + cos_data->data[0].pausable = + cos_data->data[1].pausable = + IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask); + + for (i = 0 ; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) { + pri_tested = 1 << bp->dcbx_port_params. + app.traffic_type_priority[i]; + /* Remove priority tested */ + pri_mask_without_pri = + (pri_join_mask & ((u8)(~pri_tested))); + if (pri_mask_without_pri < pri_tested) + break; + } + + if (i == LLFC_DRIVER_TRAFFIC_TYPE_MAX) + BNX2X_ERR("Invalid value for pri_join_mask -" + " could not find a priority\n"); + + cos_data->data[0].pri_join_mask = pri_mask_without_pri; + cos_data->data[1].pri_join_mask = pri_tested; + /* Both queues are strict priority, + * and that with the highest priority + * gets the highest strict priority in the arbiter. + */ + cos_data->data[0].strict = + BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI( + BNX2X_DCBX_STRICT_COS_HIGHEST); + cos_data->data[1].strict = + BNX2X_DCBX_STRICT_COS_HIGHEST; + } + } +} + +static void bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params( + struct bnx2x *bp, + struct pg_help_data *pg_help_data, + struct dcbx_ets_feature *ets, + struct cos_help_data *cos_data, + u32 *pg_pri_orginal_spread, + u32 pri_join_mask, + u8 num_of_dif_pri) +{ + u8 i = 0; + u8 pg[DCBX_COS_MAX_NUM_E2] = { 0 }; + + /* If there are both pauseable and non-pauseable priorities, + * the pauseable priorities go to the first queue and + * the non-pauseable priorities go to the second queue. + */ + if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) { + if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, + pg_help_data->data[0].pg_priority) || + IS_DCBX_PFC_PRI_MIX_PAUSE(bp, + pg_help_data->data[1].pg_priority)) { + /* If one PG contains both pauseable and + * non-pauseable priorities then ETS is disabled. + */ + bnx2x_dcbx_separate_pauseable_from_non(bp, cos_data, + pg_pri_orginal_spread, ets); + bp->dcbx_port_params.ets.enabled = false; + return; + } + + /* Pauseable */ + cos_data->data[0].pausable = true; + /* Non pauseable. */ + cos_data->data[1].pausable = false; + if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, + pg_help_data->data[0].pg_priority)) { + /* 0 is pauseable */ + cos_data->data[0].pri_join_mask = + pg_help_data->data[0].pg_priority; + pg[0] = pg_help_data->data[0].pg; + cos_data->data[1].pri_join_mask = + pg_help_data->data[1].pg_priority; + pg[1] = pg_help_data->data[1].pg; + } else {/* 1 is pauseable */ + cos_data->data[0].pri_join_mask = + pg_help_data->data[1].pg_priority; + pg[0] = pg_help_data->data[1].pg; + cos_data->data[1].pri_join_mask = + pg_help_data->data[0].pg_priority; + pg[1] = pg_help_data->data[0].pg; + } + } else { + /* If there are only pauseable priorities or + * only non-pauseable, each PG goes to a queue. + */ + cos_data->data[0].pausable = cos_data->data[1].pausable = + IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask); + cos_data->data[0].pri_join_mask = + pg_help_data->data[0].pg_priority; + pg[0] = pg_help_data->data[0].pg; + cos_data->data[1].pri_join_mask = + pg_help_data->data[1].pg_priority; + pg[1] = pg_help_data->data[1].pg; + } + + /* There can be only one strict pg */ + for (i = 0 ; i < ARRAY_SIZE(pg); i++) { + if (pg[i] < DCBX_MAX_NUM_PG_BW_ENTRIES) + cos_data->data[i].cos_bw = + DCBX_PG_BW_GET(ets->pg_bw_tbl, pg[i]); + else + cos_data->data[i].strict = + BNX2X_DCBX_STRICT_COS_HIGHEST; + } +} + +static int bnx2x_dcbx_join_pgs( + struct bnx2x *bp, + struct dcbx_ets_feature *ets, + struct pg_help_data *pg_help_data, + u8 required_num_of_pg) +{ + u8 entry_joined = pg_help_data->num_of_pg - 1; + u8 entry_removed = entry_joined + 1; + u8 pg_joined = 0; + + if (required_num_of_pg == 0 || ARRAY_SIZE(pg_help_data->data) + <= pg_help_data->num_of_pg) { + + BNX2X_ERR("required_num_of_pg can't be zero\n"); + return -EINVAL; + } + + while (required_num_of_pg < pg_help_data->num_of_pg) { + entry_joined = pg_help_data->num_of_pg - 2; + entry_removed = entry_joined + 1; + /* protect index */ + entry_removed %= ARRAY_SIZE(pg_help_data->data); + + pg_help_data->data[entry_joined].pg_priority |= + pg_help_data->data[entry_removed].pg_priority; + + pg_help_data->data[entry_joined].num_of_dif_pri += + pg_help_data->data[entry_removed].num_of_dif_pri; + + if (pg_help_data->data[entry_joined].pg == DCBX_STRICT_PRI_PG || + pg_help_data->data[entry_removed].pg == DCBX_STRICT_PRI_PG) + /* Entries joined strict priority rules */ + pg_help_data->data[entry_joined].pg = + DCBX_STRICT_PRI_PG; + else { + /* Entries can be joined join BW */ + pg_joined = DCBX_PG_BW_GET(ets->pg_bw_tbl, + pg_help_data->data[entry_joined].pg) + + DCBX_PG_BW_GET(ets->pg_bw_tbl, + pg_help_data->data[entry_removed].pg); + + DCBX_PG_BW_SET(ets->pg_bw_tbl, + pg_help_data->data[entry_joined].pg, pg_joined); + } + /* Joined the entries */ + pg_help_data->num_of_pg--; + } + + return 0; +} + +static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( + struct bnx2x *bp, + struct pg_help_data *pg_help_data, + struct dcbx_ets_feature *ets, + struct cos_help_data *cos_data, + u32 *pg_pri_orginal_spread, + u32 pri_join_mask, + u8 num_of_dif_pri) +{ + u8 i = 0; + u32 pri_tested = 0; + u8 entry = 0; + u8 pg_entry = 0; + bool b_found_strict = false; + u8 num_of_pri = LLFC_DRIVER_TRAFFIC_TYPE_MAX; + + cos_data->data[0].pri_join_mask = cos_data->data[1].pri_join_mask = 0; + /* If there are both pauseable and non-pauseable priorities, + * the pauseable priorities go to the first queue and the + * non-pauseable priorities go to the second queue. + */ + if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) + bnx2x_dcbx_separate_pauseable_from_non(bp, + cos_data, pg_pri_orginal_spread, ets); + else { + /* If two BW-limited PG-s were combined to one queue, + * the BW is their sum. + * + * If there are only pauseable priorities or only non-pauseable, + * and there are both BW-limited and non-BW-limited PG-s, + * the BW-limited PG/s go to one queue and the non-BW-limited + * PG/s go to the second queue. + * + * If there are only pauseable priorities or only non-pauseable + * and all are BW limited, then two priorities go to the first + * queue and one priority goes to the second queue. + * + * We will join this two cases: + * if one is BW limited it will go to the secoend queue + * otherwise the last priority will get it + */ + + cos_data->data[0].pausable = cos_data->data[1].pausable = + IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask); + + for (i = 0 ; i < num_of_pri; i++) { + pri_tested = 1 << bp->dcbx_port_params. + app.traffic_type_priority[i]; + pg_entry = (u8)pg_pri_orginal_spread[bp-> + dcbx_port_params.app.traffic_type_priority[i]]; + + if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES) { + entry = 0; + + if (i == (num_of_pri-1) && + false == b_found_strict) + /* last entry will be handled separately + * If no priority is strict than last + * enty goes to last queue.*/ + entry = 1; + cos_data->data[entry].pri_join_mask |= + pri_tested; + bnx2x_dcbx_add_to_cos_bw(bp, + &cos_data->data[entry], + DCBX_PG_BW_GET(ets->pg_bw_tbl, + pg_entry)); + } else { + b_found_strict = true; + cos_data->data[1].pri_join_mask |= pri_tested; + /* If we join a group and one is strict + * than the bw rulls */ + cos_data->data[1].strict = + BNX2X_DCBX_STRICT_COS_HIGHEST; + } + } + } +} + + +static void bnx2x_dcbx_2cos_limit_cee_fill_cos_params(struct bnx2x *bp, + struct pg_help_data *help_data, + struct dcbx_ets_feature *ets, + struct cos_help_data *cos_data, + u32 *pg_pri_orginal_spread, + u32 pri_join_mask, + u8 num_of_dif_pri) +{ + + /* default E2 settings */ + cos_data->num_of_cos = DCBX_COS_MAX_NUM_E2; + + switch (help_data->num_of_pg) { + case 1: + bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params( + bp, + help_data, + cos_data, + pri_join_mask, + num_of_dif_pri); + break; + case 2: + bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params( + bp, + help_data, + ets, + cos_data, + pg_pri_orginal_spread, + pri_join_mask, + num_of_dif_pri); + break; + + case 3: + bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( + bp, + help_data, + ets, + cos_data, + pg_pri_orginal_spread, + pri_join_mask, + num_of_dif_pri); + break; + default: + BNX2X_ERR("Wrong pg_help_data.num_of_pg\n"); + bnx2x_dcbx_ets_disabled_entry_data(bp, + cos_data, pri_join_mask); + } +} + +static int bnx2x_dcbx_spread_strict_pri(struct bnx2x *bp, + struct cos_help_data *cos_data, + u8 entry, + u8 num_spread_of_entries, + u8 strict_app_pris) +{ + u8 strict_pri = BNX2X_DCBX_STRICT_COS_HIGHEST; + u8 num_of_app_pri = MAX_PFC_PRIORITIES; + u8 app_pri_bit = 0; + + while (num_spread_of_entries && num_of_app_pri > 0) { + app_pri_bit = 1 << (num_of_app_pri - 1); + if (app_pri_bit & strict_app_pris) { + struct cos_entry_help_data *data = &cos_data-> + data[entry]; + num_spread_of_entries--; + if (num_spread_of_entries == 0) { + /* last entry needed put all the entries left */ + data->cos_bw = DCBX_INVALID_COS_BW; + data->strict = strict_pri; + data->pri_join_mask = strict_app_pris; + data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, + data->pri_join_mask); + } else { + strict_app_pris &= ~app_pri_bit; + + data->cos_bw = DCBX_INVALID_COS_BW; + data->strict = strict_pri; + data->pri_join_mask = app_pri_bit; + data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, + data->pri_join_mask); + } + + strict_pri = + BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(strict_pri); + entry++; + } + + num_of_app_pri--; + } + + if (num_spread_of_entries) + return -EINVAL; + + return 0; +} + +static u8 bnx2x_dcbx_cee_fill_strict_pri(struct bnx2x *bp, + struct cos_help_data *cos_data, + u8 entry, + u8 num_spread_of_entries, + u8 strict_app_pris) +{ + + if (bnx2x_dcbx_spread_strict_pri(bp, cos_data, entry, + num_spread_of_entries, + strict_app_pris)) { + struct cos_entry_help_data *data = &cos_data-> + data[entry]; + /* Fill BW entry */ + data->cos_bw = DCBX_INVALID_COS_BW; + data->strict = BNX2X_DCBX_STRICT_COS_HIGHEST; + data->pri_join_mask = strict_app_pris; + data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, + data->pri_join_mask); + return 1; + } + + return num_spread_of_entries; +} + +static void bnx2x_dcbx_cee_fill_cos_params(struct bnx2x *bp, + struct pg_help_data *help_data, + struct dcbx_ets_feature *ets, + struct cos_help_data *cos_data, + u32 pri_join_mask) + +{ + u8 need_num_of_entries = 0; + u8 i = 0; + u8 entry = 0; + + /* + * if the number of requested PG-s in CEE is greater than 3 + * then the results are not determined since this is a violation + * of the standard. + */ + if (help_data->num_of_pg > DCBX_COS_MAX_NUM_E3B0) { + if (bnx2x_dcbx_join_pgs(bp, ets, help_data, + DCBX_COS_MAX_NUM_E3B0)) { + BNX2X_ERR("Unable to reduce the number of PGs -" + "we will disables ETS\n"); + bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data, + pri_join_mask); + return; + } + } + + for (i = 0 ; i < help_data->num_of_pg; i++) { + struct pg_entry_help_data *pg = &help_data->data[i]; + if (pg->pg < DCBX_MAX_NUM_PG_BW_ENTRIES) { + struct cos_entry_help_data *data = &cos_data-> + data[entry]; + /* Fill BW entry */ + data->cos_bw = DCBX_PG_BW_GET(ets->pg_bw_tbl, pg->pg); + data->strict = BNX2X_DCBX_STRICT_INVALID; + data->pri_join_mask = pg->pg_priority; + data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, + data->pri_join_mask); + + entry++; + } else { + need_num_of_entries = min_t(u8, + (u8)pg->num_of_dif_pri, + (u8)DCBX_COS_MAX_NUM_E3B0 - + help_data->num_of_pg + 1); + /* + * If there are still VOQ-s which have no associated PG, + * then associate these VOQ-s to PG15. These PG-s will + * be used for SP between priorities on PG15. + */ + entry += bnx2x_dcbx_cee_fill_strict_pri(bp, cos_data, + entry, need_num_of_entries, pg->pg_priority); + } + } + + /* the entry will represent the number of COSes used */ + cos_data->num_of_cos = entry; +} +static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp, + struct pg_help_data *help_data, + struct dcbx_ets_feature *ets, + u32 *pg_pri_orginal_spread) +{ + struct cos_help_data cos_data; + u8 i = 0; + u32 pri_join_mask = 0; + u8 num_of_dif_pri = 0; + + memset(&cos_data, 0, sizeof(cos_data)); + + /* Validate the pg value */ + for (i = 0; i < help_data->num_of_pg ; i++) { + if (DCBX_STRICT_PRIORITY != help_data->data[i].pg && + DCBX_MAX_NUM_PG_BW_ENTRIES <= help_data->data[i].pg) + BNX2X_ERR("Invalid pg[%d] data %x\n", i, + help_data->data[i].pg); + pri_join_mask |= help_data->data[i].pg_priority; + num_of_dif_pri += help_data->data[i].num_of_dif_pri; + } + + /* defaults */ + cos_data.num_of_cos = 1; + for (i = 0; i < ARRAY_SIZE(cos_data.data); i++) { + cos_data.data[i].pri_join_mask = 0; + cos_data.data[i].pausable = false; + cos_data.data[i].strict = BNX2X_DCBX_STRICT_INVALID; + cos_data.data[i].cos_bw = DCBX_INVALID_COS_BW; + } + + if (CHIP_IS_E3B0(bp)) + bnx2x_dcbx_cee_fill_cos_params(bp, help_data, ets, + &cos_data, pri_join_mask); + else /* E2 + E3A0 */ + bnx2x_dcbx_2cos_limit_cee_fill_cos_params(bp, + help_data, ets, + &cos_data, + pg_pri_orginal_spread, + pri_join_mask, + num_of_dif_pri); + + for (i = 0; i < cos_data.num_of_cos ; i++) { + struct bnx2x_dcbx_cos_params *p = + &bp->dcbx_port_params.ets.cos_params[i]; + + p->strict = cos_data.data[i].strict; + p->bw_tbl = cos_data.data[i].cos_bw; + p->pri_bitmask = cos_data.data[i].pri_join_mask; + p->pauseable = cos_data.data[i].pausable; + + /* sanity */ + if (p->bw_tbl != DCBX_INVALID_COS_BW || + p->strict != BNX2X_DCBX_STRICT_INVALID) { + if (p->pri_bitmask == 0) + BNX2X_ERR("Invalid pri_bitmask for %d\n", i); + + if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) { + + if (p->pauseable && + DCBX_PFC_PRI_GET_NON_PAUSE(bp, + p->pri_bitmask) != 0) + BNX2X_ERR("Inconsistent config for " + "pausable COS %d\n", i); + + if (!p->pauseable && + DCBX_PFC_PRI_GET_PAUSE(bp, + p->pri_bitmask) != 0) + BNX2X_ERR("Inconsistent config for " + "nonpausable COS %d\n", i); + } + } + + if (p->pauseable) + DP(NETIF_MSG_LINK, "COS %d PAUSABLE prijoinmask 0x%x\n", + i, cos_data.data[i].pri_join_mask); + else + DP(NETIF_MSG_LINK, "COS %d NONPAUSABLE prijoinmask " + "0x%x\n", + i, cos_data.data[i].pri_join_mask); + } + + bp->dcbx_port_params.ets.num_of_cos = cos_data.num_of_cos ; +} + +static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp, + u32 *set_configuration_ets_pg, + u32 *pri_pg_tbl) +{ + int i; + + for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) { + set_configuration_ets_pg[i] = DCBX_PRI_PG_GET(pri_pg_tbl, i); + + DP(NETIF_MSG_LINK, "set_configuration_ets_pg[%d] = 0x%x\n", + i, set_configuration_ets_pg[i]); + } +} + +static void bnx2x_dcbx_fw_struct(struct bnx2x *bp, + struct bnx2x_func_tx_start_params *pfc_fw_cfg) +{ + u16 pri_bit = 0; + u8 cos = 0, pri = 0; + struct priority_cos *tt2cos; + u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; + + memset(pfc_fw_cfg, 0, sizeof(*pfc_fw_cfg)); + + /* to disable DCB - the structure must be zeroed */ + if (bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) + return; + + /*shortcut*/ + tt2cos = pfc_fw_cfg->traffic_type_to_priority_cos; + + /* Fw version should be incremented each update */ + pfc_fw_cfg->dcb_version = ++bp->dcb_version; + pfc_fw_cfg->dcb_enabled = 1; + + /* Fill priority parameters */ + for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) { + tt2cos[pri].priority = ttp[pri]; + pri_bit = 1 << tt2cos[pri].priority; + + /* Fill COS parameters based on COS calculated to + * make it more general for future use */ + for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++) + if (bp->dcbx_port_params.ets.cos_params[cos]. + pri_bitmask & pri_bit) + tt2cos[pri].cos = cos; + } + + /* we never want the FW to add a 0 vlan tag */ + pfc_fw_cfg->dont_add_pri_0_en = 1; + + bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg); +} + +void bnx2x_dcbx_pmf_update(struct bnx2x *bp) +{ + /* if we need to syncronize DCBX result from prev PMF + * read it from shmem and update bp accordingly + */ + if (SHMEM2_HAS(bp, drv_flags) && + GET_FLAGS(SHMEM2_RD(bp, drv_flags), DRV_FLAGS_DCB_CONFIGURED)) { + /* Read neg results if dcbx is in the FW */ + if (bnx2x_dcbx_read_shmem_neg_results(bp)) + return; + + bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat, + bp->dcbx_error); + bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat, + bp->dcbx_error); + } +} + +/* DCB netlink */ +#ifdef BCM_DCBNL + +#define BNX2X_DCBX_CAPS (DCB_CAP_DCBX_LLD_MANAGED | \ + DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_STATIC) + +static inline bool bnx2x_dcbnl_set_valid(struct bnx2x *bp) +{ + /* validate dcbnl call that may change HW state: + * DCB is on and DCBX mode was SUCCESSFULLY set by the user. + */ + return bp->dcb_state && bp->dcbx_mode_uset; +} + +static u8 bnx2x_dcbnl_get_state(struct net_device *netdev) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(NETIF_MSG_LINK, "state = %d\n", bp->dcb_state); + return bp->dcb_state; +} + +static u8 bnx2x_dcbnl_set_state(struct net_device *netdev, u8 state) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(NETIF_MSG_LINK, "state = %s\n", state ? "on" : "off"); + + bnx2x_dcbx_set_state(bp, (state ? true : false), bp->dcbx_enabled); + return 0; +} + +static void bnx2x_dcbnl_get_perm_hw_addr(struct net_device *netdev, + u8 *perm_addr) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(NETIF_MSG_LINK, "GET-PERM-ADDR\n"); + + /* first the HW mac address */ + memcpy(perm_addr, netdev->dev_addr, netdev->addr_len); + +#ifdef BCM_CNIC + /* second SAN address */ + memcpy(perm_addr+netdev->addr_len, bp->fip_mac, netdev->addr_len); +#endif +} + +static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio, + u8 prio_type, u8 pgid, u8 bw_pct, + u8 up_map) +{ + struct bnx2x *bp = netdev_priv(netdev); + + DP(NETIF_MSG_LINK, "prio[%d] = %d\n", prio, pgid); + if (!bnx2x_dcbnl_set_valid(bp) || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES) + return; + + /** + * bw_pct ingnored - band-width percentage devision between user + * priorities within the same group is not + * standard and hence not supported + * + * prio_type igonred - priority levels within the same group are not + * standard and hence are not supported. According + * to the standard pgid 15 is dedicated to strict + * prioirty traffic (on the port level). + * + * up_map ignored + */ + + bp->dcbx_config_params.admin_configuration_ets_pg[prio] = pgid; + bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1; +} + +static void bnx2x_dcbnl_set_pg_bwgcfg_tx(struct net_device *netdev, + int pgid, u8 bw_pct) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(NETIF_MSG_LINK, "pgid[%d] = %d\n", pgid, bw_pct); + + if (!bnx2x_dcbnl_set_valid(bp) || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES) + return; + + bp->dcbx_config_params.admin_configuration_bw_precentage[pgid] = bw_pct; + bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1; +} + +static void bnx2x_dcbnl_set_pg_tccfg_rx(struct net_device *netdev, int prio, + u8 prio_type, u8 pgid, u8 bw_pct, + u8 up_map) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(NETIF_MSG_LINK, "Nothing to set; No RX support\n"); +} + +static void bnx2x_dcbnl_set_pg_bwgcfg_rx(struct net_device *netdev, + int pgid, u8 bw_pct) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(NETIF_MSG_LINK, "Nothing to set; No RX support\n"); +} + +static void bnx2x_dcbnl_get_pg_tccfg_tx(struct net_device *netdev, int prio, + u8 *prio_type, u8 *pgid, u8 *bw_pct, + u8 *up_map) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(NETIF_MSG_LINK, "prio = %d\n", prio); + + /** + * bw_pct ingnored - band-width percentage devision between user + * priorities within the same group is not + * standard and hence not supported + * + * prio_type igonred - priority levels within the same group are not + * standard and hence are not supported. According + * to the standard pgid 15 is dedicated to strict + * prioirty traffic (on the port level). + * + * up_map ignored + */ + *up_map = *bw_pct = *prio_type = *pgid = 0; + + if (!bp->dcb_state || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES) + return; + + *pgid = DCBX_PRI_PG_GET(bp->dcbx_local_feat.ets.pri_pg_tbl, prio); +} + +static void bnx2x_dcbnl_get_pg_bwgcfg_tx(struct net_device *netdev, + int pgid, u8 *bw_pct) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(NETIF_MSG_LINK, "pgid = %d\n", pgid); + + *bw_pct = 0; + + if (!bp->dcb_state || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES) + return; + + *bw_pct = DCBX_PG_BW_GET(bp->dcbx_local_feat.ets.pg_bw_tbl, pgid); +} + +static void bnx2x_dcbnl_get_pg_tccfg_rx(struct net_device *netdev, int prio, + u8 *prio_type, u8 *pgid, u8 *bw_pct, + u8 *up_map) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(NETIF_MSG_LINK, "Nothing to get; No RX support\n"); + + *prio_type = *pgid = *bw_pct = *up_map = 0; +} + +static void bnx2x_dcbnl_get_pg_bwgcfg_rx(struct net_device *netdev, + int pgid, u8 *bw_pct) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(NETIF_MSG_LINK, "Nothing to get; No RX support\n"); + + *bw_pct = 0; +} + +static void bnx2x_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, + u8 setting) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(NETIF_MSG_LINK, "prio[%d] = %d\n", prio, setting); + + if (!bnx2x_dcbnl_set_valid(bp) || prio >= MAX_PFC_PRIORITIES) + return; + + bp->dcbx_config_params.admin_pfc_bitmap |= ((setting ? 1 : 0) << prio); + + if (setting) + bp->dcbx_config_params.admin_pfc_tx_enable = 1; +} + +static void bnx2x_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, + u8 *setting) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(NETIF_MSG_LINK, "prio = %d\n", prio); + + *setting = 0; + + if (!bp->dcb_state || prio >= MAX_PFC_PRIORITIES) + return; + + *setting = (bp->dcbx_local_feat.pfc.pri_en_bitmap >> prio) & 0x1; +} + +static u8 bnx2x_dcbnl_set_all(struct net_device *netdev) +{ + struct bnx2x *bp = netdev_priv(netdev); + int rc = 0; + + DP(NETIF_MSG_LINK, "SET-ALL\n"); + + if (!bnx2x_dcbnl_set_valid(bp)) + return 1; + + if (bp->recovery_state != BNX2X_RECOVERY_DONE) { + netdev_err(bp->dev, "Handling parity error recovery. " + "Try again later\n"); + return 1; + } + if (netif_running(bp->dev)) { + bnx2x_nic_unload(bp, UNLOAD_NORMAL); + rc = bnx2x_nic_load(bp, LOAD_NORMAL); + } + DP(NETIF_MSG_LINK, "set_dcbx_params done (%d)\n", rc); + if (rc) + return 1; + + return 0; +} + +static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap) +{ + struct bnx2x *bp = netdev_priv(netdev); + u8 rval = 0; + + if (bp->dcb_state) { + switch (capid) { + case DCB_CAP_ATTR_PG: + *cap = true; + break; + case DCB_CAP_ATTR_PFC: + *cap = true; + break; + case DCB_CAP_ATTR_UP2TC: + *cap = false; + break; + case DCB_CAP_ATTR_PG_TCS: + *cap = 0x80; /* 8 priorities for PGs */ + break; + case DCB_CAP_ATTR_PFC_TCS: + *cap = 0x80; /* 8 priorities for PFC */ + break; + case DCB_CAP_ATTR_GSP: + *cap = true; + break; + case DCB_CAP_ATTR_BCN: + *cap = false; + break; + case DCB_CAP_ATTR_DCBX: + *cap = BNX2X_DCBX_CAPS; + default: + rval = -EINVAL; + break; + } + } else + rval = -EINVAL; + + DP(NETIF_MSG_LINK, "capid %d:%x\n", capid, *cap); + return rval; +} + +static u8 bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num) +{ + struct bnx2x *bp = netdev_priv(netdev); + u8 rval = 0; + + DP(NETIF_MSG_LINK, "tcid %d\n", tcid); + + if (bp->dcb_state) { + switch (tcid) { + case DCB_NUMTCS_ATTR_PG: + *num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 : + DCBX_COS_MAX_NUM_E2; + break; + case DCB_NUMTCS_ATTR_PFC: + *num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 : + DCBX_COS_MAX_NUM_E2; + break; + default: + rval = -EINVAL; + break; + } + } else + rval = -EINVAL; + + return rval; +} + +static u8 bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(NETIF_MSG_LINK, "num tcs = %d; Not supported\n", num); + return -EINVAL; +} + +static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(NETIF_MSG_LINK, "state = %d\n", bp->dcbx_local_feat.pfc.enabled); + + if (!bp->dcb_state) + return 0; + + return bp->dcbx_local_feat.pfc.enabled; +} + +static void bnx2x_dcbnl_set_pfc_state(struct net_device *netdev, u8 state) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(NETIF_MSG_LINK, "state = %s\n", state ? "on" : "off"); + + if (!bnx2x_dcbnl_set_valid(bp)) + return; + + bp->dcbx_config_params.admin_pfc_tx_enable = + bp->dcbx_config_params.admin_pfc_enable = (state ? 1 : 0); +} + +static void bnx2x_admin_app_set_ent( + struct bnx2x_admin_priority_app_table *app_ent, + u8 idtype, u16 idval, u8 up) +{ + app_ent->valid = 1; + + switch (idtype) { + case DCB_APP_IDTYPE_ETHTYPE: + app_ent->traffic_type = TRAFFIC_TYPE_ETH; + break; + case DCB_APP_IDTYPE_PORTNUM: + app_ent->traffic_type = TRAFFIC_TYPE_PORT; + break; + default: + break; /* never gets here */ + } + app_ent->app_id = idval; + app_ent->priority = up; +} + +static bool bnx2x_admin_app_is_equal( + struct bnx2x_admin_priority_app_table *app_ent, + u8 idtype, u16 idval) +{ + if (!app_ent->valid) + return false; + + switch (idtype) { + case DCB_APP_IDTYPE_ETHTYPE: + if (app_ent->traffic_type != TRAFFIC_TYPE_ETH) + return false; + break; + case DCB_APP_IDTYPE_PORTNUM: + if (app_ent->traffic_type != TRAFFIC_TYPE_PORT) + return false; + break; + default: + return false; + } + if (app_ent->app_id != idval) + return false; + + return true; +} + +static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up) +{ + int i, ff; + + /* iterate over the app entries looking for idtype and idval */ + for (i = 0, ff = -1; i < 4; i++) { + struct bnx2x_admin_priority_app_table *app_ent = + &bp->dcbx_config_params.admin_priority_app_table[i]; + if (bnx2x_admin_app_is_equal(app_ent, idtype, idval)) + break; + + if (ff < 0 && !app_ent->valid) + ff = i; + } + if (i < 4) + /* if found overwrite up */ + bp->dcbx_config_params. + admin_priority_app_table[i].priority = up; + else if (ff >= 0) + /* not found use first-free */ + bnx2x_admin_app_set_ent( + &bp->dcbx_config_params.admin_priority_app_table[ff], + idtype, idval, up); + else + /* app table is full */ + return -EBUSY; + + /* up configured, if not 0 make sure feature is enabled */ + if (up) + bp->dcbx_config_params.admin_application_priority_tx_enable = 1; + + return 0; +} + +static u8 bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype, + u16 idval, u8 up) +{ + struct bnx2x *bp = netdev_priv(netdev); + + DP(NETIF_MSG_LINK, "app_type %d, app_id %x, prio bitmap %d\n", + idtype, idval, up); + + if (!bnx2x_dcbnl_set_valid(bp)) + return -EINVAL; + + /* verify idtype */ + switch (idtype) { + case DCB_APP_IDTYPE_ETHTYPE: + case DCB_APP_IDTYPE_PORTNUM: + break; + default: + return -EINVAL; + } + return bnx2x_set_admin_app_up(bp, idtype, idval, up); +} + +static u8 bnx2x_dcbnl_get_dcbx(struct net_device *netdev) +{ + struct bnx2x *bp = netdev_priv(netdev); + u8 state; + + state = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE; + + if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF) + state |= DCB_CAP_DCBX_STATIC; + + return state; +} + +static u8 bnx2x_dcbnl_set_dcbx(struct net_device *netdev, u8 state) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(NETIF_MSG_LINK, "state = %02x\n", state); + + /* set dcbx mode */ + + if ((state & BNX2X_DCBX_CAPS) != state) { + BNX2X_ERR("Requested DCBX mode %x is beyond advertised " + "capabilities\n", state); + return 1; + } + + if (bp->dcb_state != BNX2X_DCB_STATE_ON) { + BNX2X_ERR("DCB turned off, DCBX configuration is invalid\n"); + return 1; + } + + if (state & DCB_CAP_DCBX_STATIC) + bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_OFF; + else + bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_ON; + + bp->dcbx_mode_uset = true; + return 0; +} + +static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid, + u8 *flags) +{ + struct bnx2x *bp = netdev_priv(netdev); + u8 rval = 0; + + DP(NETIF_MSG_LINK, "featid %d\n", featid); + + if (bp->dcb_state) { + *flags = 0; + switch (featid) { + case DCB_FEATCFG_ATTR_PG: + if (bp->dcbx_local_feat.ets.enabled) + *flags |= DCB_FEATCFG_ENABLE; + if (bp->dcbx_error & DCBX_LOCAL_ETS_ERROR) + *flags |= DCB_FEATCFG_ERROR; + break; + case DCB_FEATCFG_ATTR_PFC: + if (bp->dcbx_local_feat.pfc.enabled) + *flags |= DCB_FEATCFG_ENABLE; + if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR | + DCBX_LOCAL_PFC_MISMATCH)) + *flags |= DCB_FEATCFG_ERROR; + break; + case DCB_FEATCFG_ATTR_APP: + if (bp->dcbx_local_feat.app.enabled) + *flags |= DCB_FEATCFG_ENABLE; + if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR | + DCBX_LOCAL_APP_MISMATCH)) + *flags |= DCB_FEATCFG_ERROR; + break; + default: + rval = -EINVAL; + break; + } + } else + rval = -EINVAL; + + return rval; +} + +static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid, + u8 flags) +{ + struct bnx2x *bp = netdev_priv(netdev); + u8 rval = 0; + + DP(NETIF_MSG_LINK, "featid = %d flags = %02x\n", featid, flags); + + /* ignore the 'advertise' flag */ + if (bnx2x_dcbnl_set_valid(bp)) { + switch (featid) { + case DCB_FEATCFG_ATTR_PG: + bp->dcbx_config_params.admin_ets_enable = + flags & DCB_FEATCFG_ENABLE ? 1 : 0; + bp->dcbx_config_params.admin_ets_willing = + flags & DCB_FEATCFG_WILLING ? 1 : 0; + break; + case DCB_FEATCFG_ATTR_PFC: + bp->dcbx_config_params.admin_pfc_enable = + flags & DCB_FEATCFG_ENABLE ? 1 : 0; + bp->dcbx_config_params.admin_pfc_willing = + flags & DCB_FEATCFG_WILLING ? 1 : 0; + break; + case DCB_FEATCFG_ATTR_APP: + /* ignore enable, always enabled */ + bp->dcbx_config_params.admin_app_priority_willing = + flags & DCB_FEATCFG_WILLING ? 1 : 0; + break; + default: + rval = -EINVAL; + break; + } + } else + rval = -EINVAL; + + return rval; +} + +static int bnx2x_peer_appinfo(struct net_device *netdev, + struct dcb_peer_app_info *info, u16* app_count) +{ + int i; + struct bnx2x *bp = netdev_priv(netdev); + + DP(NETIF_MSG_LINK, "APP-INFO\n"); + + info->willing = (bp->dcbx_remote_flags & DCBX_APP_REM_WILLING) ?: 0; + info->error = (bp->dcbx_remote_flags & DCBX_APP_RX_ERROR) ?: 0; + *app_count = 0; + + for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) + if (bp->dcbx_remote_feat.app.app_pri_tbl[i].appBitfield & + DCBX_APP_ENTRY_VALID) + (*app_count)++; + return 0; +} + +static int bnx2x_peer_apptable(struct net_device *netdev, + struct dcb_app *table) +{ + int i, j; + struct bnx2x *bp = netdev_priv(netdev); + + DP(NETIF_MSG_LINK, "APP-TABLE\n"); + + for (i = 0, j = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { + struct dcbx_app_priority_entry *ent = + &bp->dcbx_remote_feat.app.app_pri_tbl[i]; + + if (ent->appBitfield & DCBX_APP_ENTRY_VALID) { + table[j].selector = bnx2x_dcbx_dcbnl_app_idtype(ent); + table[j].priority = bnx2x_dcbx_dcbnl_app_up(ent); + table[j++].protocol = ent->app_id; + } + } + return 0; +} + +static int bnx2x_cee_peer_getpg(struct net_device *netdev, struct cee_pg *pg) +{ + int i; + struct bnx2x *bp = netdev_priv(netdev); + + pg->willing = (bp->dcbx_remote_flags & DCBX_ETS_REM_WILLING) ?: 0; + + for (i = 0; i < CEE_DCBX_MAX_PGS; i++) { + pg->pg_bw[i] = + DCBX_PG_BW_GET(bp->dcbx_remote_feat.ets.pg_bw_tbl, i); + pg->prio_pg[i] = + DCBX_PRI_PG_GET(bp->dcbx_remote_feat.ets.pri_pg_tbl, i); + } + return 0; +} + +static int bnx2x_cee_peer_getpfc(struct net_device *netdev, + struct cee_pfc *pfc) +{ + struct bnx2x *bp = netdev_priv(netdev); + pfc->tcs_supported = bp->dcbx_remote_feat.pfc.pfc_caps; + pfc->pfc_en = bp->dcbx_remote_feat.pfc.pri_en_bitmap; + return 0; +} + +const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops = { + .getstate = bnx2x_dcbnl_get_state, + .setstate = bnx2x_dcbnl_set_state, + .getpermhwaddr = bnx2x_dcbnl_get_perm_hw_addr, + .setpgtccfgtx = bnx2x_dcbnl_set_pg_tccfg_tx, + .setpgbwgcfgtx = bnx2x_dcbnl_set_pg_bwgcfg_tx, + .setpgtccfgrx = bnx2x_dcbnl_set_pg_tccfg_rx, + .setpgbwgcfgrx = bnx2x_dcbnl_set_pg_bwgcfg_rx, + .getpgtccfgtx = bnx2x_dcbnl_get_pg_tccfg_tx, + .getpgbwgcfgtx = bnx2x_dcbnl_get_pg_bwgcfg_tx, + .getpgtccfgrx = bnx2x_dcbnl_get_pg_tccfg_rx, + .getpgbwgcfgrx = bnx2x_dcbnl_get_pg_bwgcfg_rx, + .setpfccfg = bnx2x_dcbnl_set_pfc_cfg, + .getpfccfg = bnx2x_dcbnl_get_pfc_cfg, + .setall = bnx2x_dcbnl_set_all, + .getcap = bnx2x_dcbnl_get_cap, + .getnumtcs = bnx2x_dcbnl_get_numtcs, + .setnumtcs = bnx2x_dcbnl_set_numtcs, + .getpfcstate = bnx2x_dcbnl_get_pfc_state, + .setpfcstate = bnx2x_dcbnl_set_pfc_state, + .setapp = bnx2x_dcbnl_set_app_up, + .getdcbx = bnx2x_dcbnl_get_dcbx, + .setdcbx = bnx2x_dcbnl_set_dcbx, + .getfeatcfg = bnx2x_dcbnl_get_featcfg, + .setfeatcfg = bnx2x_dcbnl_set_featcfg, + .peer_getappinfo = bnx2x_peer_appinfo, + .peer_getapptable = bnx2x_peer_apptable, + .cee_peer_getpg = bnx2x_cee_peer_getpg, + .cee_peer_getpfc = bnx2x_cee_peer_getpfc, +}; + +#endif /* BCM_DCBNL */ diff --cc drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index f4ab90c20891,000000000000..720478993950 mode 100644,000000..100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@@ -1,11526 -1,0 +1,11541 @@@ +/* bnx2x_main.c: Broadcom Everest network driver. + * + * Copyright (c) 2007-2011 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Maintained by: Eilon Greenstein + * Written by: Eliezer Tamir + * Based on code from Michael Chan's bnx2 driver + * UDP CSUM errata workaround by Arik Gendelman + * Slowpath and fastpath rework by Vladislav Zolotarov + * Statistics and Link management by Yitchak Gertner + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include /* for dev_info() */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bnx2x.h" +#include "bnx2x_init.h" +#include "bnx2x_init_ops.h" +#include "bnx2x_cmn.h" +#include "bnx2x_dcb.h" +#include "bnx2x_sp.h" + +#include +#include "bnx2x_fw_file_hdr.h" +/* FW files */ +#define FW_FILE_VERSION \ + __stringify(BCM_5710_FW_MAJOR_VERSION) "." \ + __stringify(BCM_5710_FW_MINOR_VERSION) "." \ + __stringify(BCM_5710_FW_REVISION_VERSION) "." \ + __stringify(BCM_5710_FW_ENGINEERING_VERSION) +#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw" +#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw" +#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw" + +/* Time in jiffies before concluding the transmitter is hung */ +#define TX_TIMEOUT (5*HZ) + +static char version[] __devinitdata = + "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver " + DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + +MODULE_AUTHOR("Eliezer Tamir"); +MODULE_DESCRIPTION("Broadcom NetXtreme II " + "BCM57710/57711/57711E/" + "57712/57712_MF/57800/57800_MF/57810/57810_MF/" + "57840/57840_MF Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); +MODULE_FIRMWARE(FW_FILE_NAME_E1); +MODULE_FIRMWARE(FW_FILE_NAME_E1H); +MODULE_FIRMWARE(FW_FILE_NAME_E2); + +static int multi_mode = 1; +module_param(multi_mode, int, 0); +MODULE_PARM_DESC(multi_mode, " Multi queue mode " + "(0 Disable; 1 Enable (default))"); + +int num_queues; +module_param(num_queues, int, 0); +MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1" + " (default is as a number of CPUs)"); + +static int disable_tpa; +module_param(disable_tpa, int, 0); +MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature"); + +#define INT_MODE_INTx 1 +#define INT_MODE_MSI 2 +static int int_mode; +module_param(int_mode, int, 0); +MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X " + "(1 INT#x; 2 MSI)"); + +static int dropless_fc; +module_param(dropless_fc, int, 0); +MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring"); + +static int poll; +module_param(poll, int, 0); +MODULE_PARM_DESC(poll, " Use polling (for debug)"); + +static int mrrs = -1; +module_param(mrrs, int, 0); +MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)"); + +static int debug; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, " Default debug msglevel"); + + + +struct workqueue_struct *bnx2x_wq; + +enum bnx2x_board_type { + BCM57710 = 0, + BCM57711, + BCM57711E, + BCM57712, + BCM57712_MF, + BCM57800, + BCM57800_MF, + BCM57810, + BCM57810_MF, + BCM57840, + BCM57840_MF +}; + +/* indexed by board_type, above */ +static struct { + char *name; +} board_info[] __devinitdata = { + { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" }, + { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" }, + { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" }, + { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" }, + { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" }, + { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" }, + { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" }, + { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" }, + { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" }, + { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" }, + { "Broadcom NetXtreme II BCM57840 10/20 Gigabit " + "Ethernet Multi Function"} +}; + +#ifndef PCI_DEVICE_ID_NX2_57710 +#define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710 +#endif +#ifndef PCI_DEVICE_ID_NX2_57711 +#define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711 +#endif +#ifndef PCI_DEVICE_ID_NX2_57711E +#define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E +#endif +#ifndef PCI_DEVICE_ID_NX2_57712 +#define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712 +#endif +#ifndef PCI_DEVICE_ID_NX2_57712_MF +#define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF +#endif +#ifndef PCI_DEVICE_ID_NX2_57800 +#define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800 +#endif +#ifndef PCI_DEVICE_ID_NX2_57800_MF +#define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF +#endif +#ifndef PCI_DEVICE_ID_NX2_57810 +#define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810 +#endif +#ifndef PCI_DEVICE_ID_NX2_57810_MF +#define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF +#endif +#ifndef PCI_DEVICE_ID_NX2_57840 +#define PCI_DEVICE_ID_NX2_57840 CHIP_NUM_57840 +#endif +#ifndef PCI_DEVICE_ID_NX2_57840_MF +#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF +#endif +static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = { + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF }, + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl); + +/**************************************************************************** +* General service functions +****************************************************************************/ + +static inline void __storm_memset_dma_mapping(struct bnx2x *bp, + u32 addr, dma_addr_t mapping) +{ + REG_WR(bp, addr, U64_LO(mapping)); + REG_WR(bp, addr + 4, U64_HI(mapping)); +} + +static inline void storm_memset_spq_addr(struct bnx2x *bp, + dma_addr_t mapping, u16 abs_fid) +{ + u32 addr = XSEM_REG_FAST_MEMORY + + XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid); + + __storm_memset_dma_mapping(bp, addr, mapping); +} + +static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, + u16 pf_id) +{ + REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), + pf_id); + REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), + pf_id); + REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), + pf_id); + REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), + pf_id); +} + +static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, + u8 enable) +{ + REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), + enable); + REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), + enable); + REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), + enable); + REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), + enable); +} + +static inline void storm_memset_eq_data(struct bnx2x *bp, + struct event_ring_data *eq_data, + u16 pfid) +{ + size_t size = sizeof(struct event_ring_data); + + u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid); + + __storm_memset_struct(bp, addr, size, (u32 *)eq_data); +} + +static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod, + u16 pfid) +{ + u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid); + REG_WR16(bp, addr, eq_prod); +} + +/* used only at init + * locking is done by mcp + */ +static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val) +{ + pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); + pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val); + pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, + PCICFG_VENDOR_ID_OFFSET); +} + +static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr) +{ + u32 val; + + pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); + pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val); + pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, + PCICFG_VENDOR_ID_OFFSET); + + return val; +} + +#define DMAE_DP_SRC_GRC "grc src_addr [%08x]" +#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]" +#define DMAE_DP_DST_GRC "grc dst_addr [%08x]" +#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]" +#define DMAE_DP_DST_NONE "dst_addr [none]" + +static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, + int msglvl) +{ + u32 src_type = dmae->opcode & DMAE_COMMAND_SRC; + + switch (dmae->opcode & DMAE_COMMAND_DST) { + case DMAE_CMD_DST_PCI: + if (src_type == DMAE_CMD_SRC_PCI) + DP(msglvl, "DMAE: opcode 0x%08x\n" + "src [%x:%08x], len [%d*4], dst [%x:%08x]\n" + "comp_addr [%x:%08x], comp_val 0x%08x\n", + dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, + dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, + dmae->comp_addr_hi, dmae->comp_addr_lo, + dmae->comp_val); + else + DP(msglvl, "DMAE: opcode 0x%08x\n" + "src [%08x], len [%d*4], dst [%x:%08x]\n" + "comp_addr [%x:%08x], comp_val 0x%08x\n", + dmae->opcode, dmae->src_addr_lo >> 2, + dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, + dmae->comp_addr_hi, dmae->comp_addr_lo, + dmae->comp_val); + break; + case DMAE_CMD_DST_GRC: + if (src_type == DMAE_CMD_SRC_PCI) + DP(msglvl, "DMAE: opcode 0x%08x\n" + "src [%x:%08x], len [%d*4], dst_addr [%08x]\n" + "comp_addr [%x:%08x], comp_val 0x%08x\n", + dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, + dmae->len, dmae->dst_addr_lo >> 2, + dmae->comp_addr_hi, dmae->comp_addr_lo, + dmae->comp_val); + else + DP(msglvl, "DMAE: opcode 0x%08x\n" + "src [%08x], len [%d*4], dst [%08x]\n" + "comp_addr [%x:%08x], comp_val 0x%08x\n", + dmae->opcode, dmae->src_addr_lo >> 2, + dmae->len, dmae->dst_addr_lo >> 2, + dmae->comp_addr_hi, dmae->comp_addr_lo, + dmae->comp_val); + break; + default: + if (src_type == DMAE_CMD_SRC_PCI) + DP(msglvl, "DMAE: opcode 0x%08x\n" + "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n" + "comp_addr [%x:%08x] comp_val 0x%08x\n", + dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, + dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, + dmae->comp_val); + else + DP(msglvl, "DMAE: opcode 0x%08x\n" + "src_addr [%08x] len [%d * 4] dst_addr [none]\n" + "comp_addr [%x:%08x] comp_val 0x%08x\n", + dmae->opcode, dmae->src_addr_lo >> 2, + dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, + dmae->comp_val); + break; + } + +} + +/* copy command into DMAE command memory and set DMAE command go */ +void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx) +{ + u32 cmd_offset; + int i; + + cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx); + for (i = 0; i < (sizeof(struct dmae_command)/4); i++) { + REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i)); + + DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n", + idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i)); + } + REG_WR(bp, dmae_reg_go_c[idx], 1); +} + +u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type) +{ + return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) | + DMAE_CMD_C_ENABLE); +} + +u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode) +{ + return opcode & ~DMAE_CMD_SRC_RESET; +} + +u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, + bool with_comp, u8 comp_type) +{ + u32 opcode = 0; + + opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) | + (dst_type << DMAE_COMMAND_DST_SHIFT)); + + opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); + + opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); + opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) | + (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); + opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); + +#ifdef __BIG_ENDIAN + opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP; +#else + opcode |= DMAE_CMD_ENDIANITY_DW_SWAP; +#endif + if (with_comp) + opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type); + return opcode; +} + +static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, + struct dmae_command *dmae, + u8 src_type, u8 dst_type) +{ + memset(dmae, 0, sizeof(struct dmae_command)); + + /* set the opcode */ + dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type, + true, DMAE_COMP_PCI); + + /* fill in the completion parameters */ + dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp)); + dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp)); + dmae->comp_val = DMAE_COMP_VAL; +} + +/* issue a dmae command over the init-channel and wailt for completion */ +static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, + struct dmae_command *dmae) +{ + u32 *wb_comp = bnx2x_sp(bp, wb_comp); + int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000; + int rc = 0; + + DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n", + bp->slowpath->wb_data[0], bp->slowpath->wb_data[1], + bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]); + + /* + * Lock the dmae channel. Disable BHs to prevent a dead-lock + * as long as this code is called both from syscall context and + * from ndo_set_rx_mode() flow that may be called from BH. + */ + spin_lock_bh(&bp->dmae_lock); + + /* reset completion */ + *wb_comp = 0; + + /* post the command on the channel used for initializations */ + bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); + + /* wait for completion */ + udelay(5); + while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { + DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp); + + if (!cnt) { + BNX2X_ERR("DMAE timeout!\n"); + rc = DMAE_TIMEOUT; + goto unlock; + } + cnt--; + udelay(50); + } + if (*wb_comp & DMAE_PCI_ERR_FLAG) { + BNX2X_ERR("DMAE PCI error!\n"); + rc = DMAE_PCI_ERROR; + } + + DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n", + bp->slowpath->wb_data[0], bp->slowpath->wb_data[1], + bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]); + +unlock: + spin_unlock_bh(&bp->dmae_lock); + return rc; +} + +void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, + u32 len32) +{ + struct dmae_command dmae; + + if (!bp->dmae_ready) { + u32 *data = bnx2x_sp(bp, wb_data[0]); + + DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)" + " using indirect\n", dst_addr, len32); + bnx2x_init_ind_wr(bp, dst_addr, data, len32); + return; + } + + /* set opcode and fixed command fields */ + bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); + + /* fill in addresses and len */ + dmae.src_addr_lo = U64_LO(dma_addr); + dmae.src_addr_hi = U64_HI(dma_addr); + dmae.dst_addr_lo = dst_addr >> 2; + dmae.dst_addr_hi = 0; + dmae.len = len32; + + bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF); + + /* issue the command and wait for completion */ + bnx2x_issue_dmae_with_comp(bp, &dmae); +} + +void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) +{ + struct dmae_command dmae; + + if (!bp->dmae_ready) { + u32 *data = bnx2x_sp(bp, wb_data[0]); + int i; + + DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)" + " using indirect\n", src_addr, len32); + for (i = 0; i < len32; i++) + data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4); + return; + } + + /* set opcode and fixed command fields */ + bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); + + /* fill in addresses and len */ + dmae.src_addr_lo = src_addr >> 2; + dmae.src_addr_hi = 0; + dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data)); + dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data)); + dmae.len = len32; + + bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF); + + /* issue the command and wait for completion */ + bnx2x_issue_dmae_with_comp(bp, &dmae); +} + +static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, + u32 addr, u32 len) +{ + int dmae_wr_max = DMAE_LEN32_WR_MAX(bp); + int offset = 0; + + while (len > dmae_wr_max) { + bnx2x_write_dmae(bp, phys_addr + offset, + addr + offset, dmae_wr_max); + offset += dmae_wr_max * 4; + len -= dmae_wr_max; + } + + bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len); +} + +/* used only for slowpath so not inlined */ +static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo) +{ + u32 wb_write[2]; + + wb_write[0] = val_hi; + wb_write[1] = val_lo; + REG_WR_DMAE(bp, reg, wb_write, 2); +} + +#ifdef USE_WB_RD +static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg) +{ + u32 wb_data[2]; + + REG_RD_DMAE(bp, reg, wb_data, 2); + + return HILO_U64(wb_data[0], wb_data[1]); +} +#endif + +static int bnx2x_mc_assert(struct bnx2x *bp) +{ + char last_idx; + int i, rc = 0; + u32 row0, row1, row2, row3; + + /* XSTORM */ + last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM + + XSTORM_ASSERT_LIST_INDEX_OFFSET); + if (last_idx) + BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); + + /* print the asserts */ + for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { + + row0 = REG_RD(bp, BAR_XSTRORM_INTMEM + + XSTORM_ASSERT_LIST_OFFSET(i)); + row1 = REG_RD(bp, BAR_XSTRORM_INTMEM + + XSTORM_ASSERT_LIST_OFFSET(i) + 4); + row2 = REG_RD(bp, BAR_XSTRORM_INTMEM + + XSTORM_ASSERT_LIST_OFFSET(i) + 8); + row3 = REG_RD(bp, BAR_XSTRORM_INTMEM + + XSTORM_ASSERT_LIST_OFFSET(i) + 12); + + if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { + BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x" + " 0x%08x 0x%08x 0x%08x\n", + i, row3, row2, row1, row0); + rc++; + } else { + break; + } + } + + /* TSTORM */ + last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM + + TSTORM_ASSERT_LIST_INDEX_OFFSET); + if (last_idx) + BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); + + /* print the asserts */ + for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { + + row0 = REG_RD(bp, BAR_TSTRORM_INTMEM + + TSTORM_ASSERT_LIST_OFFSET(i)); + row1 = REG_RD(bp, BAR_TSTRORM_INTMEM + + TSTORM_ASSERT_LIST_OFFSET(i) + 4); + row2 = REG_RD(bp, BAR_TSTRORM_INTMEM + + TSTORM_ASSERT_LIST_OFFSET(i) + 8); + row3 = REG_RD(bp, BAR_TSTRORM_INTMEM + + TSTORM_ASSERT_LIST_OFFSET(i) + 12); + + if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { + BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x" + " 0x%08x 0x%08x 0x%08x\n", + i, row3, row2, row1, row0); + rc++; + } else { + break; + } + } + + /* CSTORM */ + last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM + + CSTORM_ASSERT_LIST_INDEX_OFFSET); + if (last_idx) + BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); + + /* print the asserts */ + for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { + + row0 = REG_RD(bp, BAR_CSTRORM_INTMEM + + CSTORM_ASSERT_LIST_OFFSET(i)); + row1 = REG_RD(bp, BAR_CSTRORM_INTMEM + + CSTORM_ASSERT_LIST_OFFSET(i) + 4); + row2 = REG_RD(bp, BAR_CSTRORM_INTMEM + + CSTORM_ASSERT_LIST_OFFSET(i) + 8); + row3 = REG_RD(bp, BAR_CSTRORM_INTMEM + + CSTORM_ASSERT_LIST_OFFSET(i) + 12); + + if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { + BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x" + " 0x%08x 0x%08x 0x%08x\n", + i, row3, row2, row1, row0); + rc++; + } else { + break; + } + } + + /* USTORM */ + last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM + + USTORM_ASSERT_LIST_INDEX_OFFSET); + if (last_idx) + BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); + + /* print the asserts */ + for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { + + row0 = REG_RD(bp, BAR_USTRORM_INTMEM + + USTORM_ASSERT_LIST_OFFSET(i)); + row1 = REG_RD(bp, BAR_USTRORM_INTMEM + + USTORM_ASSERT_LIST_OFFSET(i) + 4); + row2 = REG_RD(bp, BAR_USTRORM_INTMEM + + USTORM_ASSERT_LIST_OFFSET(i) + 8); + row3 = REG_RD(bp, BAR_USTRORM_INTMEM + + USTORM_ASSERT_LIST_OFFSET(i) + 12); + + if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { + BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x" + " 0x%08x 0x%08x 0x%08x\n", + i, row3, row2, row1, row0); + rc++; + } else { + break; + } + } + + return rc; +} + +void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) +{ + u32 addr, val; + u32 mark, offset; + __be32 data[9]; + int word; + u32 trace_shmem_base; + if (BP_NOMCP(bp)) { + BNX2X_ERR("NO MCP - can not dump\n"); + return; + } + netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n", + (bp->common.bc_ver & 0xff0000) >> 16, + (bp->common.bc_ver & 0xff00) >> 8, + (bp->common.bc_ver & 0xff)); + + val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER); + if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER)) + printk("%s" "MCP PC at 0x%x\n", lvl, val); + + if (BP_PATH(bp) == 0) + trace_shmem_base = bp->common.shmem_base; + else + trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr); + addr = trace_shmem_base - 0x0800 + 4; + mark = REG_RD(bp, addr); + mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) + + ((mark + 0x3) & ~0x3) - 0x08000000; + printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark); + + printk("%s", lvl); + for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) { + for (word = 0; word < 8; word++) + data[word] = htonl(REG_RD(bp, offset + 4*word)); + data[8] = 0x0; + pr_cont("%s", (char *)data); + } + for (offset = addr + 4; offset <= mark; offset += 0x8*4) { + for (word = 0; word < 8; word++) + data[word] = htonl(REG_RD(bp, offset + 4*word)); + data[8] = 0x0; + pr_cont("%s", (char *)data); + } + printk("%s" "end of fw dump\n", lvl); +} + +static inline void bnx2x_fw_dump(struct bnx2x *bp) +{ + bnx2x_fw_dump_lvl(bp, KERN_ERR); +} + +void bnx2x_panic_dump(struct bnx2x *bp) +{ + int i; + u16 j; + struct hc_sp_status_block_data sp_sb_data; + int func = BP_FUNC(bp); +#ifdef BNX2X_STOP_ON_ERROR + u16 start = 0, end = 0; + u8 cos; +#endif + + bp->stats_state = STATS_STATE_DISABLED; + DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); + + BNX2X_ERR("begin crash dump -----------------\n"); + + /* Indices */ + /* Common */ + BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)" + " spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n", + bp->def_idx, bp->def_att_idx, bp->attn_state, + bp->spq_prod_idx, bp->stats_counter); + BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n", + bp->def_status_blk->atten_status_block.attn_bits, + bp->def_status_blk->atten_status_block.attn_bits_ack, + bp->def_status_blk->atten_status_block.status_block_id, + bp->def_status_blk->atten_status_block.attn_bits_index); + BNX2X_ERR(" def ("); + for (i = 0; i < HC_SP_SB_MAX_INDICES; i++) + pr_cont("0x%x%s", + bp->def_status_blk->sp_sb.index_values[i], + (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " "); + + for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++) + *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM + + CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + + i*sizeof(u32)); + + pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n", + sp_sb_data.igu_sb_id, + sp_sb_data.igu_seg_id, + sp_sb_data.p_func.pf_id, + sp_sb_data.p_func.vnic_id, + sp_sb_data.p_func.vf_id, + sp_sb_data.p_func.vf_valid, + sp_sb_data.state); + + + for_each_eth_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + int loop; + struct hc_status_block_data_e2 sb_data_e2; + struct hc_status_block_data_e1x sb_data_e1x; + struct hc_status_block_sm *hc_sm_p = + CHIP_IS_E1x(bp) ? + sb_data_e1x.common.state_machine : + sb_data_e2.common.state_machine; + struct hc_index_data *hc_index_p = + CHIP_IS_E1x(bp) ? + sb_data_e1x.index_data : + sb_data_e2.index_data; + u8 data_size, cos; + u32 *sb_data_p; + struct bnx2x_fp_txdata txdata; + + /* Rx */ + BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)" + " rx_comp_prod(0x%x)" + " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n", + i, fp->rx_bd_prod, fp->rx_bd_cons, + fp->rx_comp_prod, + fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb)); + BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)" + " fp_hc_idx(0x%x)\n", + fp->rx_sge_prod, fp->last_max_sge, + le16_to_cpu(fp->fp_hc_idx)); + + /* Tx */ + for_each_cos_in_tx_queue(fp, cos) + { + txdata = fp->txdata[cos]; + BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)" + " tx_bd_prod(0x%x) tx_bd_cons(0x%x)" + " *tx_cons_sb(0x%x)\n", + i, txdata.tx_pkt_prod, + txdata.tx_pkt_cons, txdata.tx_bd_prod, + txdata.tx_bd_cons, + le16_to_cpu(*txdata.tx_cons_sb)); + } + + loop = CHIP_IS_E1x(bp) ? + HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2; + + /* host sb data */ + +#ifdef BCM_CNIC + if (IS_FCOE_FP(fp)) + continue; +#endif + BNX2X_ERR(" run indexes ("); + for (j = 0; j < HC_SB_MAX_SM; j++) + pr_cont("0x%x%s", + fp->sb_running_index[j], + (j == HC_SB_MAX_SM - 1) ? ")" : " "); + + BNX2X_ERR(" indexes ("); + for (j = 0; j < loop; j++) + pr_cont("0x%x%s", + fp->sb_index_values[j], + (j == loop - 1) ? ")" : " "); + /* fw sb data */ + data_size = CHIP_IS_E1x(bp) ? + sizeof(struct hc_status_block_data_e1x) : + sizeof(struct hc_status_block_data_e2); + data_size /= sizeof(u32); + sb_data_p = CHIP_IS_E1x(bp) ? + (u32 *)&sb_data_e1x : + (u32 *)&sb_data_e2; + /* copy sb data in here */ + for (j = 0; j < data_size; j++) + *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) + + j * sizeof(u32)); + + if (!CHIP_IS_E1x(bp)) { + pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) " + "vnic_id(0x%x) same_igu_sb_1b(0x%x) " + "state(0x%x)\n", + sb_data_e2.common.p_func.pf_id, + sb_data_e2.common.p_func.vf_id, + sb_data_e2.common.p_func.vf_valid, + sb_data_e2.common.p_func.vnic_id, + sb_data_e2.common.same_igu_sb_1b, + sb_data_e2.common.state); + } else { + pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) " + "vnic_id(0x%x) same_igu_sb_1b(0x%x) " + "state(0x%x)\n", + sb_data_e1x.common.p_func.pf_id, + sb_data_e1x.common.p_func.vf_id, + sb_data_e1x.common.p_func.vf_valid, + sb_data_e1x.common.p_func.vnic_id, + sb_data_e1x.common.same_igu_sb_1b, + sb_data_e1x.common.state); + } + + /* SB_SMs data */ + for (j = 0; j < HC_SB_MAX_SM; j++) { + pr_cont("SM[%d] __flags (0x%x) " + "igu_sb_id (0x%x) igu_seg_id(0x%x) " + "time_to_expire (0x%x) " + "timer_value(0x%x)\n", j, + hc_sm_p[j].__flags, + hc_sm_p[j].igu_sb_id, + hc_sm_p[j].igu_seg_id, + hc_sm_p[j].time_to_expire, + hc_sm_p[j].timer_value); + } + + /* Indecies data */ + for (j = 0; j < loop; j++) { + pr_cont("INDEX[%d] flags (0x%x) " + "timeout (0x%x)\n", j, + hc_index_p[j].flags, + hc_index_p[j].timeout); + } + } + +#ifdef BNX2X_STOP_ON_ERROR + /* Rings */ + /* Rx */ + for_each_rx_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + + start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); + end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503); + for (j = start; j != end; j = RX_BD(j + 1)) { + u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j]; + struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j]; + + BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n", + i, j, rx_bd[1], rx_bd[0], sw_bd->skb); + } + + start = RX_SGE(fp->rx_sge_prod); + end = RX_SGE(fp->last_max_sge); + for (j = start; j != end; j = RX_SGE(j + 1)) { + u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j]; + struct sw_rx_page *sw_page = &fp->rx_page_ring[j]; + + BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n", + i, j, rx_sge[1], rx_sge[0], sw_page->page); + } + + start = RCQ_BD(fp->rx_comp_cons - 10); + end = RCQ_BD(fp->rx_comp_cons + 503); + for (j = start; j != end; j = RCQ_BD(j + 1)) { + u32 *cqe = (u32 *)&fp->rx_comp_ring[j]; + + BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n", + i, j, cqe[0], cqe[1], cqe[2], cqe[3]); + } + } + + /* Tx */ + for_each_tx_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + for_each_cos_in_tx_queue(fp, cos) { + struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; + + start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10); + end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245); + for (j = start; j != end; j = TX_BD(j + 1)) { + struct sw_tx_bd *sw_bd = + &txdata->tx_buf_ring[j]; + + BNX2X_ERR("fp%d: txdata %d, " + "packet[%x]=[%p,%x]\n", + i, cos, j, sw_bd->skb, + sw_bd->first_bd); + } + + start = TX_BD(txdata->tx_bd_cons - 10); + end = TX_BD(txdata->tx_bd_cons + 254); + for (j = start; j != end; j = TX_BD(j + 1)) { + u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j]; + + BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=" + "[%x:%x:%x:%x]\n", + i, cos, j, tx_bd[0], tx_bd[1], + tx_bd[2], tx_bd[3]); + } + } + } +#endif + bnx2x_fw_dump(bp); + bnx2x_mc_assert(bp); + BNX2X_ERR("end crash dump -----------------\n"); +} + +/* + * FLR Support for E2 + * + * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW + * initialization. + */ +#define FLR_WAIT_USEC 10000 /* 10 miliseconds */ +#define FLR_WAIT_INTERAVAL 50 /* usec */ +#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERAVAL) /* 200 */ + +struct pbf_pN_buf_regs { + int pN; + u32 init_crd; + u32 crd; + u32 crd_freed; +}; + +struct pbf_pN_cmd_regs { + int pN; + u32 lines_occup; + u32 lines_freed; +}; + +static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp, + struct pbf_pN_buf_regs *regs, + u32 poll_count) +{ + u32 init_crd, crd, crd_start, crd_freed, crd_freed_start; + u32 cur_cnt = poll_count; + + crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed); + crd = crd_start = REG_RD(bp, regs->crd); + init_crd = REG_RD(bp, regs->init_crd); + + DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); + DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd); + DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); + + while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) < + (init_crd - crd_start))) { + if (cur_cnt--) { + udelay(FLR_WAIT_INTERAVAL); + crd = REG_RD(bp, regs->crd); + crd_freed = REG_RD(bp, regs->crd_freed); + } else { + DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n", + regs->pN); + DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n", + regs->pN, crd); + DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n", + regs->pN, crd_freed); + break; + } + } + DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n", + poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN); +} + +static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp, + struct pbf_pN_cmd_regs *regs, + u32 poll_count) +{ + u32 occup, to_free, freed, freed_start; + u32 cur_cnt = poll_count; + + occup = to_free = REG_RD(bp, regs->lines_occup); + freed = freed_start = REG_RD(bp, regs->lines_freed); + + DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); + DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); + + while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) { + if (cur_cnt--) { + udelay(FLR_WAIT_INTERAVAL); + occup = REG_RD(bp, regs->lines_occup); + freed = REG_RD(bp, regs->lines_freed); + } else { + DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n", + regs->pN); + DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", + regs->pN, occup); + DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", + regs->pN, freed); + break; + } + } + DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n", + poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN); +} + +static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg, + u32 expected, u32 poll_count) +{ + u32 cur_cnt = poll_count; + u32 val; + + while ((val = REG_RD(bp, reg)) != expected && cur_cnt--) + udelay(FLR_WAIT_INTERAVAL); + + return val; +} + +static inline int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, + char *msg, u32 poll_cnt) +{ + u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt); + if (val != 0) { + BNX2X_ERR("%s usage count=%d\n", msg, val); + return 1; + } + return 0; +} + +static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp) +{ + /* adjust polling timeout */ + if (CHIP_REV_IS_EMUL(bp)) + return FLR_POLL_CNT * 2000; + + if (CHIP_REV_IS_FPGA(bp)) + return FLR_POLL_CNT * 120; + + return FLR_POLL_CNT; +} + +static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) +{ + struct pbf_pN_cmd_regs cmd_regs[] = { + {0, (CHIP_IS_E3B0(bp)) ? + PBF_REG_TQ_OCCUPANCY_Q0 : + PBF_REG_P0_TQ_OCCUPANCY, + (CHIP_IS_E3B0(bp)) ? + PBF_REG_TQ_LINES_FREED_CNT_Q0 : + PBF_REG_P0_TQ_LINES_FREED_CNT}, + {1, (CHIP_IS_E3B0(bp)) ? + PBF_REG_TQ_OCCUPANCY_Q1 : + PBF_REG_P1_TQ_OCCUPANCY, + (CHIP_IS_E3B0(bp)) ? + PBF_REG_TQ_LINES_FREED_CNT_Q1 : + PBF_REG_P1_TQ_LINES_FREED_CNT}, + {4, (CHIP_IS_E3B0(bp)) ? + PBF_REG_TQ_OCCUPANCY_LB_Q : + PBF_REG_P4_TQ_OCCUPANCY, + (CHIP_IS_E3B0(bp)) ? + PBF_REG_TQ_LINES_FREED_CNT_LB_Q : + PBF_REG_P4_TQ_LINES_FREED_CNT} + }; + + struct pbf_pN_buf_regs buf_regs[] = { + {0, (CHIP_IS_E3B0(bp)) ? + PBF_REG_INIT_CRD_Q0 : + PBF_REG_P0_INIT_CRD , + (CHIP_IS_E3B0(bp)) ? + PBF_REG_CREDIT_Q0 : + PBF_REG_P0_CREDIT, + (CHIP_IS_E3B0(bp)) ? + PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : + PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, + {1, (CHIP_IS_E3B0(bp)) ? + PBF_REG_INIT_CRD_Q1 : + PBF_REG_P1_INIT_CRD, + (CHIP_IS_E3B0(bp)) ? + PBF_REG_CREDIT_Q1 : + PBF_REG_P1_CREDIT, + (CHIP_IS_E3B0(bp)) ? + PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : + PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, + {4, (CHIP_IS_E3B0(bp)) ? + PBF_REG_INIT_CRD_LB_Q : + PBF_REG_P4_INIT_CRD, + (CHIP_IS_E3B0(bp)) ? + PBF_REG_CREDIT_LB_Q : + PBF_REG_P4_CREDIT, + (CHIP_IS_E3B0(bp)) ? + PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : + PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, + }; + + int i; + + /* Verify the command queues are flushed P0, P1, P4 */ + for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) + bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count); + + + /* Verify the transmission buffers are flushed P0, P1, P4 */ + for (i = 0; i < ARRAY_SIZE(buf_regs); i++) + bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count); +} + +#define OP_GEN_PARAM(param) \ + (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) + +#define OP_GEN_TYPE(type) \ + (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) + +#define OP_GEN_AGG_VECT(index) \ + (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) + + +static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, + u32 poll_cnt) +{ + struct sdm_op_gen op_gen = {0}; + + u32 comp_addr = BAR_CSTRORM_INTMEM + + CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func); + int ret = 0; + + if (REG_RD(bp, comp_addr)) { + BNX2X_ERR("Cleanup complete is not 0\n"); + return 1; + } + + op_gen.command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); + op_gen.command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); + op_gen.command |= OP_GEN_AGG_VECT(clnup_func); + op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; + + DP(BNX2X_MSG_SP, "FW Final cleanup\n"); + REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command); + + if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) { + BNX2X_ERR("FW final cleanup did not succeed\n"); + ret = 1; + } + /* Zero completion for nxt FLR */ + REG_WR(bp, comp_addr, 0); + + return ret; +} + +static inline u8 bnx2x_is_pcie_pending(struct pci_dev *dev) +{ + int pos; + u16 status; + + pos = pci_pcie_cap(dev); + if (!pos) + return false; + + pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status); + return status & PCI_EXP_DEVSTA_TRPND; +} + +/* PF FLR specific routines +*/ +static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt) +{ + + /* wait for CFC PF usage-counter to zero (includes all the VFs) */ + if (bnx2x_flr_clnup_poll_hw_counter(bp, + CFC_REG_NUM_LCIDS_INSIDE_PF, + "CFC PF usage counter timed out", + poll_cnt)) + return 1; + + + /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ + if (bnx2x_flr_clnup_poll_hw_counter(bp, + DORQ_REG_PF_USAGE_CNT, + "DQ PF usage counter timed out", + poll_cnt)) + return 1; + + /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ + if (bnx2x_flr_clnup_poll_hw_counter(bp, + QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp), + "QM PF usage counter timed out", + poll_cnt)) + return 1; + + /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ + if (bnx2x_flr_clnup_poll_hw_counter(bp, + TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp), + "Timers VNIC usage counter timed out", + poll_cnt)) + return 1; + if (bnx2x_flr_clnup_poll_hw_counter(bp, + TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp), + "Timers NUM_SCANS usage counter timed out", + poll_cnt)) + return 1; + + /* Wait DMAE PF usage counter to zero */ + if (bnx2x_flr_clnup_poll_hw_counter(bp, + dmae_reg_go_c[INIT_DMAE_C(bp)], + "DMAE dommand register timed out", + poll_cnt)) + return 1; + + return 0; +} + +static void bnx2x_hw_enable_status(struct bnx2x *bp) +{ + u32 val; + + val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF); + DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); + + val = REG_RD(bp, PBF_REG_DISABLE_PF); + DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val); + + val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN); + DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); + + val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN); + DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); + + val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK); + DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); + + val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); + DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); + + val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); + DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); + + val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); + DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", + val); +} + +static int bnx2x_pf_flr_clnup(struct bnx2x *bp) +{ + u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); + + DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp)); + + /* Re-enable PF target read access */ + REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); + + /* Poll HW usage counters */ + if (bnx2x_poll_hw_usage_counters(bp, poll_cnt)) + return -EBUSY; + + /* Zero the igu 'trailing edge' and 'leading edge' */ + + /* Send the FW cleanup command */ + if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt)) + return -EBUSY; + + /* ATC cleanup */ + + /* Verify TX hw is flushed */ + bnx2x_tx_hw_flushed(bp, poll_cnt); + + /* Wait 100ms (not adjusted according to platform) */ + msleep(100); + + /* Verify no pending pci transactions */ + if (bnx2x_is_pcie_pending(bp->pdev)) + BNX2X_ERR("PCIE Transactions still pending\n"); + + /* Debug */ + bnx2x_hw_enable_status(bp); + + /* + * Master enable - Due to WB DMAE writes performed before this + * register is re-initialized as part of the regular function init + */ + REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); + + return 0; +} + +static void bnx2x_hc_int_enable(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; + u32 val = REG_RD(bp, addr); + int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; + int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0; + + if (msix) { + val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | + HC_CONFIG_0_REG_INT_LINE_EN_0); + val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | + HC_CONFIG_0_REG_ATTN_BIT_EN_0); + } else if (msi) { + val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; + val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | + HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | + HC_CONFIG_0_REG_ATTN_BIT_EN_0); + } else { + val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | + HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | + HC_CONFIG_0_REG_INT_LINE_EN_0 | + HC_CONFIG_0_REG_ATTN_BIT_EN_0); + + if (!CHIP_IS_E1(bp)) { + DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n", + val, port, addr); + + REG_WR(bp, addr, val); + + val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; + } + } + + if (CHIP_IS_E1(bp)) + REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF); + + DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n", + val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); + + REG_WR(bp, addr, val); + /* + * Ensure that HC_CONFIG is written before leading/trailing edge config + */ + mmiowb(); + barrier(); + + if (!CHIP_IS_E1(bp)) { + /* init leading/trailing edge */ + if (IS_MF(bp)) { + val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); + if (bp->port.pmf) + /* enable nig and gpio3 attention */ + val |= 0x1100; + } else + val = 0xffff; + + REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); + REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); + } + + /* Make sure that interrupts are indeed enabled from here on */ + mmiowb(); +} + +static void bnx2x_igu_int_enable(struct bnx2x *bp) +{ + u32 val; + int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; + int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0; + + val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); + + if (msix) { + val &= ~(IGU_PF_CONF_INT_LINE_EN | + IGU_PF_CONF_SINGLE_ISR_EN); + val |= (IGU_PF_CONF_FUNC_EN | + IGU_PF_CONF_MSI_MSIX_EN | + IGU_PF_CONF_ATTN_BIT_EN); + } else if (msi) { + val &= ~IGU_PF_CONF_INT_LINE_EN; + val |= (IGU_PF_CONF_FUNC_EN | + IGU_PF_CONF_MSI_MSIX_EN | + IGU_PF_CONF_ATTN_BIT_EN | + IGU_PF_CONF_SINGLE_ISR_EN); + } else { + val &= ~IGU_PF_CONF_MSI_MSIX_EN; + val |= (IGU_PF_CONF_FUNC_EN | + IGU_PF_CONF_INT_LINE_EN | + IGU_PF_CONF_ATTN_BIT_EN | + IGU_PF_CONF_SINGLE_ISR_EN); + } + + DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n", + val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); + + REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); + + barrier(); + + /* init leading/trailing edge */ + if (IS_MF(bp)) { + val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); + if (bp->port.pmf) + /* enable nig and gpio3 attention */ + val |= 0x1100; + } else + val = 0xffff; + + REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); + REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); + + /* Make sure that interrupts are indeed enabled from here on */ + mmiowb(); +} + +void bnx2x_int_enable(struct bnx2x *bp) +{ + if (bp->common.int_block == INT_BLOCK_HC) + bnx2x_hc_int_enable(bp); + else + bnx2x_igu_int_enable(bp); +} + +static void bnx2x_hc_int_disable(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; + u32 val = REG_RD(bp, addr); + + /* + * in E1 we must use only PCI configuration space to disable + * MSI/MSIX capablility + * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block + */ + if (CHIP_IS_E1(bp)) { + /* Since IGU_PF_CONF_MSI_MSIX_EN still always on + * Use mask register to prevent from HC sending interrupts + * after we exit the function + */ + REG_WR(bp, HC_REG_INT_MASK + port*4, 0); + + val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | + HC_CONFIG_0_REG_INT_LINE_EN_0 | + HC_CONFIG_0_REG_ATTN_BIT_EN_0); + } else + val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | + HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | + HC_CONFIG_0_REG_INT_LINE_EN_0 | + HC_CONFIG_0_REG_ATTN_BIT_EN_0); + + DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n", + val, port, addr); + + /* flush all outstanding writes */ + mmiowb(); + + REG_WR(bp, addr, val); + if (REG_RD(bp, addr) != val) + BNX2X_ERR("BUG! proper val not read from IGU!\n"); +} + +static void bnx2x_igu_int_disable(struct bnx2x *bp) +{ + u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); + + val &= ~(IGU_PF_CONF_MSI_MSIX_EN | + IGU_PF_CONF_INT_LINE_EN | + IGU_PF_CONF_ATTN_BIT_EN); + + DP(NETIF_MSG_INTR, "write %x to IGU\n", val); + + /* flush all outstanding writes */ + mmiowb(); + + REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); + if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val) + BNX2X_ERR("BUG! proper val not read from IGU!\n"); +} + +void bnx2x_int_disable(struct bnx2x *bp) +{ + if (bp->common.int_block == INT_BLOCK_HC) + bnx2x_hc_int_disable(bp); + else + bnx2x_igu_int_disable(bp); +} + +void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) +{ + int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; + int i, offset; + + if (disable_hw) + /* prevent the HW from sending interrupts */ + bnx2x_int_disable(bp); + + /* make sure all ISRs are done */ + if (msix) { + synchronize_irq(bp->msix_table[0].vector); + offset = 1; +#ifdef BCM_CNIC + offset++; +#endif + for_each_eth_queue(bp, i) + synchronize_irq(bp->msix_table[offset++].vector); + } else + synchronize_irq(bp->pdev->irq); + + /* make sure sp_task is not running */ + cancel_delayed_work(&bp->sp_task); + cancel_delayed_work(&bp->period_task); + flush_workqueue(bnx2x_wq); +} + +/* fast path */ + +/* + * General service functions + */ + +/* Return true if succeeded to acquire the lock */ +static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource) +{ + u32 lock_status; + u32 resource_bit = (1 << resource); + int func = BP_FUNC(bp); + u32 hw_lock_control_reg; + + DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource); + + /* Validating that the resource is within range */ + if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { + DP(NETIF_MSG_HW, + "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", + resource, HW_LOCK_MAX_RESOURCE_VALUE); + return false; + } + + if (func <= 5) + hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); + else + hw_lock_control_reg = + (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); + + /* Try to acquire the lock */ + REG_WR(bp, hw_lock_control_reg + 4, resource_bit); + lock_status = REG_RD(bp, hw_lock_control_reg); + if (lock_status & resource_bit) + return true; + + DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource); + return false; +} + +/** + * bnx2x_get_leader_lock_resource - get the recovery leader resource id + * + * @bp: driver handle + * + * Returns the recovery leader resource id according to the engine this function + * belongs to. Currently only only 2 engines is supported. + */ +static inline int bnx2x_get_leader_lock_resource(struct bnx2x *bp) +{ + if (BP_PATH(bp)) + return HW_LOCK_RESOURCE_RECOVERY_LEADER_1; + else + return HW_LOCK_RESOURCE_RECOVERY_LEADER_0; +} + +/** + * bnx2x_trylock_leader_lock- try to aquire a leader lock. + * + * @bp: driver handle + * + * Tries to aquire a leader lock for cuurent engine. + */ +static inline bool bnx2x_trylock_leader_lock(struct bnx2x *bp) +{ + return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); +} + +#ifdef BCM_CNIC +static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err); +#endif + +void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) +{ + struct bnx2x *bp = fp->bp; + int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); + int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); + enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX; + struct bnx2x_queue_sp_obj *q_obj = &fp->q_obj; + + DP(BNX2X_MSG_SP, + "fp %d cid %d got ramrod #%d state is %x type is %d\n", + fp->index, cid, command, bp->state, + rr_cqe->ramrod_cqe.ramrod_type); + + switch (command) { + case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): + DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid); + drv_cmd = BNX2X_Q_CMD_UPDATE; + break; + + case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): + DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid); + drv_cmd = BNX2X_Q_CMD_SETUP; + break; + + case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): + DP(NETIF_MSG_IFUP, "got MULTI[%d] tx-only setup ramrod\n", cid); + drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY; + break; + + case (RAMROD_CMD_ID_ETH_HALT): + DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid); + drv_cmd = BNX2X_Q_CMD_HALT; + break; + + case (RAMROD_CMD_ID_ETH_TERMINATE): + DP(BNX2X_MSG_SP, "got MULTI[%d] teminate ramrod\n", cid); + drv_cmd = BNX2X_Q_CMD_TERMINATE; + break; + + case (RAMROD_CMD_ID_ETH_EMPTY): + DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid); + drv_cmd = BNX2X_Q_CMD_EMPTY; + break; + + default: + BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n", + command, fp->index); + return; + } + + if ((drv_cmd != BNX2X_Q_CMD_MAX) && + q_obj->complete_cmd(bp, q_obj, drv_cmd)) + /* q_obj->complete_cmd() failure means that this was + * an unexpected completion. + * + * In this case we don't want to increase the bp->spq_left + * because apparently we haven't sent this command the first + * place. + */ +#ifdef BNX2X_STOP_ON_ERROR + bnx2x_panic(); +#else + return; +#endif + + smp_mb__before_atomic_inc(); + atomic_inc(&bp->cq_spq_left); + /* push the change in bp->spq_left and towards the memory */ + smp_mb__after_atomic_inc(); + + DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left)); + + return; +} + +void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, + u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod) +{ + u32 start = BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset; + + bnx2x_update_rx_prod_gen(bp, fp, bd_prod, rx_comp_prod, rx_sge_prod, + start); +} + +irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) +{ + struct bnx2x *bp = netdev_priv(dev_instance); + u16 status = bnx2x_ack_int(bp); + u16 mask; + int i; + u8 cos; + + /* Return here if interrupt is shared and it's not for us */ + if (unlikely(status == 0)) { + DP(NETIF_MSG_INTR, "not our interrupt!\n"); + return IRQ_NONE; + } + DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status); + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return IRQ_HANDLED; +#endif + + for_each_eth_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + + mask = 0x2 << (fp->index + CNIC_PRESENT); + if (status & mask) { + /* Handle Rx or Tx according to SB id */ + prefetch(fp->rx_cons_sb); + for_each_cos_in_tx_queue(fp, cos) + prefetch(fp->txdata[cos].tx_cons_sb); + prefetch(&fp->sb_running_index[SM_RX_ID]); + napi_schedule(&bnx2x_fp(bp, fp->index, napi)); + status &= ~mask; + } + } + +#ifdef BCM_CNIC + mask = 0x2; + if (status & (mask | 0x1)) { + struct cnic_ops *c_ops = NULL; + + if (likely(bp->state == BNX2X_STATE_OPEN)) { + rcu_read_lock(); + c_ops = rcu_dereference(bp->cnic_ops); + if (c_ops) + c_ops->cnic_handler(bp->cnic_data, NULL); + rcu_read_unlock(); + } + + status &= ~mask; + } +#endif + + if (unlikely(status & 0x1)) { + queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); + + status &= ~0x1; + if (!status) + return IRQ_HANDLED; + } + + if (unlikely(status)) + DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n", + status); + + return IRQ_HANDLED; +} + +/* Link */ + +/* + * General service functions + */ + +int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) +{ + u32 lock_status; + u32 resource_bit = (1 << resource); + int func = BP_FUNC(bp); + u32 hw_lock_control_reg; + int cnt; + + /* Validating that the resource is within range */ + if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { + DP(NETIF_MSG_HW, + "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", + resource, HW_LOCK_MAX_RESOURCE_VALUE); + return -EINVAL; + } + + if (func <= 5) { + hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); + } else { + hw_lock_control_reg = + (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); + } + + /* Validating that the resource is not already taken */ + lock_status = REG_RD(bp, hw_lock_control_reg); + if (lock_status & resource_bit) { + DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", + lock_status, resource_bit); + return -EEXIST; + } + + /* Try for 5 second every 5ms */ + for (cnt = 0; cnt < 1000; cnt++) { + /* Try to acquire the lock */ + REG_WR(bp, hw_lock_control_reg + 4, resource_bit); + lock_status = REG_RD(bp, hw_lock_control_reg); + if (lock_status & resource_bit) + return 0; + + msleep(5); + } + DP(NETIF_MSG_HW, "Timeout\n"); + return -EAGAIN; +} + +int bnx2x_release_leader_lock(struct bnx2x *bp) +{ + return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); +} + +int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) +{ + u32 lock_status; + u32 resource_bit = (1 << resource); + int func = BP_FUNC(bp); + u32 hw_lock_control_reg; + + DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource); + + /* Validating that the resource is within range */ + if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { + DP(NETIF_MSG_HW, + "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", + resource, HW_LOCK_MAX_RESOURCE_VALUE); + return -EINVAL; + } + + if (func <= 5) { + hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); + } else { + hw_lock_control_reg = + (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); + } + + /* Validating that the resource is currently taken */ + lock_status = REG_RD(bp, hw_lock_control_reg); + if (!(lock_status & resource_bit)) { + DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", + lock_status, resource_bit); + return -EFAULT; + } + + REG_WR(bp, hw_lock_control_reg, resource_bit); + return 0; +} + + +int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port) +{ + /* The GPIO should be swapped if swap register is set and active */ + int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && + REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; + int gpio_shift = gpio_num + + (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); + u32 gpio_mask = (1 << gpio_shift); + u32 gpio_reg; + int value; + + if (gpio_num > MISC_REGISTERS_GPIO_3) { + BNX2X_ERR("Invalid GPIO %d\n", gpio_num); + return -EINVAL; + } + + /* read GPIO value */ + gpio_reg = REG_RD(bp, MISC_REG_GPIO); + + /* get the requested pin value */ + if ((gpio_reg & gpio_mask) == gpio_mask) + value = 1; + else + value = 0; + + DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value); + + return value; +} + +int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) +{ + /* The GPIO should be swapped if swap register is set and active */ + int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && + REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; + int gpio_shift = gpio_num + + (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); + u32 gpio_mask = (1 << gpio_shift); + u32 gpio_reg; + + if (gpio_num > MISC_REGISTERS_GPIO_3) { + BNX2X_ERR("Invalid GPIO %d\n", gpio_num); + return -EINVAL; + } + + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); + /* read GPIO and mask except the float bits */ + gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); + + switch (mode) { + case MISC_REGISTERS_GPIO_OUTPUT_LOW: + DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n", + gpio_num, gpio_shift); + /* clear FLOAT and set CLR */ + gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); + gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); + break; + + case MISC_REGISTERS_GPIO_OUTPUT_HIGH: + DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n", + gpio_num, gpio_shift); + /* clear FLOAT and set SET */ + gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); + gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); + break; + + case MISC_REGISTERS_GPIO_INPUT_HI_Z: + DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n", + gpio_num, gpio_shift); + /* set FLOAT */ + gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); + break; + + default: + break; + } + + REG_WR(bp, MISC_REG_GPIO, gpio_reg); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); + + return 0; +} + +int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode) +{ + u32 gpio_reg = 0; + int rc = 0; + + /* Any port swapping should be handled by caller. */ + + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); + /* read GPIO and mask except the float bits */ + gpio_reg = REG_RD(bp, MISC_REG_GPIO); + gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); + gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); + gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); + + switch (mode) { + case MISC_REGISTERS_GPIO_OUTPUT_LOW: + DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins); + /* set CLR */ + gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); + break; + + case MISC_REGISTERS_GPIO_OUTPUT_HIGH: + DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins); + /* set SET */ + gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); + break; + + case MISC_REGISTERS_GPIO_INPUT_HI_Z: + DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins); + /* set FLOAT */ + gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); + break; + + default: + BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode); + rc = -EINVAL; + break; + } + + if (rc == 0) + REG_WR(bp, MISC_REG_GPIO, gpio_reg); + + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); + + return rc; +} + +int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) +{ + /* The GPIO should be swapped if swap register is set and active */ + int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && + REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; + int gpio_shift = gpio_num + + (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); + u32 gpio_mask = (1 << gpio_shift); + u32 gpio_reg; + + if (gpio_num > MISC_REGISTERS_GPIO_3) { + BNX2X_ERR("Invalid GPIO %d\n", gpio_num); + return -EINVAL; + } + + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); + /* read GPIO int */ + gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT); + + switch (mode) { + case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: + DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> " + "output low\n", gpio_num, gpio_shift); + /* clear SET and set CLR */ + gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); + gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); + break; + + case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: + DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> " + "output high\n", gpio_num, gpio_shift); + /* clear CLR and set SET */ + gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); + gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); + break; + + default: + break; + } + + REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); + + return 0; +} + +static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode) +{ + u32 spio_mask = (1 << spio_num); + u32 spio_reg; + + if ((spio_num < MISC_REGISTERS_SPIO_4) || + (spio_num > MISC_REGISTERS_SPIO_7)) { + BNX2X_ERR("Invalid SPIO %d\n", spio_num); + return -EINVAL; + } + + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); + /* read SPIO and mask except the float bits */ + spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT); + + switch (mode) { + case MISC_REGISTERS_SPIO_OUTPUT_LOW: + DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num); + /* clear FLOAT and set CLR */ + spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); + spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS); + break; + + case MISC_REGISTERS_SPIO_OUTPUT_HIGH: + DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num); + /* clear FLOAT and set SET */ + spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); + spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS); + break; + + case MISC_REGISTERS_SPIO_INPUT_HI_Z: + DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num); + /* set FLOAT */ + spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); + break; + + default: + break; + } + + REG_WR(bp, MISC_REG_SPIO, spio_reg); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); + + return 0; +} + +void bnx2x_calc_fc_adv(struct bnx2x *bp) +{ + u8 cfg_idx = bnx2x_get_link_cfg_idx(bp); + switch (bp->link_vars.ieee_fc & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { + case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: + bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | + ADVERTISED_Pause); + break; + + case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: + bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | + ADVERTISED_Pause); + break; + + case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: + bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; + break; + + default: + bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | + ADVERTISED_Pause); + break; + } +} + +u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) +{ + if (!BP_NOMCP(bp)) { + u8 rc; + int cfx_idx = bnx2x_get_link_cfg_idx(bp); + u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx]; + /* + * Initialize link parameters structure variables + * It is recommended to turn off RX FC for jumbo frames + * for better performance + */ + if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000)) + bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX; + else + bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; + + bnx2x_acquire_phy_lock(bp); + + if (load_mode == LOAD_DIAG) { + struct link_params *lp = &bp->link_params; + lp->loopback_mode = LOOPBACK_XGXS; + /* do PHY loopback at 10G speed, if possible */ + if (lp->req_line_speed[cfx_idx] < SPEED_10000) { + if (lp->speed_cap_mask[cfx_idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) + lp->req_line_speed[cfx_idx] = + SPEED_10000; + else + lp->req_line_speed[cfx_idx] = + SPEED_1000; + } + } + + rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); + + bnx2x_release_phy_lock(bp); + + bnx2x_calc_fc_adv(bp); + + if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) { + bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); + bnx2x_link_report(bp); + } else + queue_delayed_work(bnx2x_wq, &bp->period_task, 0); + bp->link_params.req_line_speed[cfx_idx] = req_line_speed; + return rc; + } + BNX2X_ERR("Bootcode is missing - can not initialize link\n"); + return -EINVAL; +} + +void bnx2x_link_set(struct bnx2x *bp) +{ + if (!BP_NOMCP(bp)) { + bnx2x_acquire_phy_lock(bp); + bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); + bnx2x_phy_init(&bp->link_params, &bp->link_vars); + bnx2x_release_phy_lock(bp); + + bnx2x_calc_fc_adv(bp); + } else + BNX2X_ERR("Bootcode is missing - can not set link\n"); +} + +static void bnx2x__link_reset(struct bnx2x *bp) +{ + if (!BP_NOMCP(bp)) { + bnx2x_acquire_phy_lock(bp); + bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); + bnx2x_release_phy_lock(bp); + } else + BNX2X_ERR("Bootcode is missing - can not reset link\n"); +} + +u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes) +{ + u8 rc = 0; + + if (!BP_NOMCP(bp)) { + bnx2x_acquire_phy_lock(bp); + rc = bnx2x_test_link(&bp->link_params, &bp->link_vars, + is_serdes); + bnx2x_release_phy_lock(bp); + } else + BNX2X_ERR("Bootcode is missing - can not test link\n"); + + return rc; +} + +static void bnx2x_init_port_minmax(struct bnx2x *bp) +{ + u32 r_param = bp->link_vars.line_speed / 8; + u32 fair_periodic_timeout_usec; + u32 t_fair; + + memset(&(bp->cmng.rs_vars), 0, + sizeof(struct rate_shaping_vars_per_port)); + memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port)); + + /* 100 usec in SDM ticks = 25 since each tick is 4 usec */ + bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4; + + /* this is the threshold below which no timer arming will occur + 1.25 coefficient is for the threshold to be a little bigger + than the real time, to compensate for timer in-accuracy */ + bp->cmng.rs_vars.rs_threshold = + (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4; + + /* resolution of fairness timer */ + fair_periodic_timeout_usec = QM_ARB_BYTES / r_param; + /* for 10G it is 1000usec. for 1G it is 10000usec. */ + t_fair = T_FAIR_COEF / bp->link_vars.line_speed; + + /* this is the threshold below which we won't arm the timer anymore */ + bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES; + + /* we multiply by 1e3/8 to get bytes/msec. + We don't want the credits to pass a credit + of the t_fair*FAIR_MEM (algorithm resolution) */ + bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM; + /* since each tick is 4 usec */ + bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4; +} + +/* Calculates the sum of vn_min_rates. + It's needed for further normalizing of the min_rates. + Returns: + sum of vn_min_rates. + or + 0 - if all the min_rates are 0. + In the later case fainess algorithm should be deactivated. + If not all min_rates are zero then those that are zeroes will be set to 1. + */ +static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) +{ + int all_zero = 1; + int vn; + + bp->vn_weight_sum = 0; + for (vn = VN_0; vn < E1HVN_MAX; vn++) { + u32 vn_cfg = bp->mf_config[vn]; + u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> + FUNC_MF_CFG_MIN_BW_SHIFT) * 100; + + /* Skip hidden vns */ + if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) + continue; + + /* If min rate is zero - set it to 1 */ + if (!vn_min_rate) + vn_min_rate = DEF_MIN_RATE; + else + all_zero = 0; + + bp->vn_weight_sum += vn_min_rate; + } + + /* if ETS or all min rates are zeros - disable fairness */ + if (BNX2X_IS_ETS_ENABLED(bp)) { + bp->cmng.flags.cmng_enables &= + ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; + DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n"); + } else if (all_zero) { + bp->cmng.flags.cmng_enables &= + ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; + DP(NETIF_MSG_IFUP, "All MIN values are zeroes" + " fairness will be disabled\n"); + } else + bp->cmng.flags.cmng_enables |= + CMNG_FLAGS_PER_PORT_FAIRNESS_VN; +} + +static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) +{ + struct rate_shaping_vars_per_vn m_rs_vn; + struct fairness_vars_per_vn m_fair_vn; + u32 vn_cfg = bp->mf_config[vn]; + int func = 2*vn + BP_PORT(bp); + u16 vn_min_rate, vn_max_rate; + int i; + + /* If function is hidden - set min and max to zeroes */ + if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { + vn_min_rate = 0; + vn_max_rate = 0; + + } else { + u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg); + + vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> + FUNC_MF_CFG_MIN_BW_SHIFT) * 100; + /* If fairness is enabled (not all min rates are zeroes) and + if current min rate is zero - set it to 1. + This is a requirement of the algorithm. */ + if (bp->vn_weight_sum && (vn_min_rate == 0)) + vn_min_rate = DEF_MIN_RATE; + + if (IS_MF_SI(bp)) + /* maxCfg in percents of linkspeed */ + vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100; + else + /* maxCfg is absolute in 100Mb units */ + vn_max_rate = maxCfg * 100; + } + + DP(NETIF_MSG_IFUP, + "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n", + func, vn_min_rate, vn_max_rate, bp->vn_weight_sum); + + memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn)); + memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn)); + + /* global vn counter - maximal Mbps for this vn */ + m_rs_vn.vn_counter.rate = vn_max_rate; + + /* quota - number of bytes transmitted in this period */ + m_rs_vn.vn_counter.quota = + (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8; + + if (bp->vn_weight_sum) { + /* credit for each period of the fairness algorithm: + number of bytes in T_FAIR (the vn share the port rate). + vn_weight_sum should not be larger than 10000, thus + T_FAIR_COEF / (8 * vn_weight_sum) will always be greater + than zero */ + m_fair_vn.vn_credit_delta = + max_t(u32, (vn_min_rate * (T_FAIR_COEF / + (8 * bp->vn_weight_sum))), + (bp->cmng.fair_vars.fair_threshold + + MIN_ABOVE_THRESH)); + DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n", + m_fair_vn.vn_credit_delta); + } + + /* Store it to internal memory */ + for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++) + REG_WR(bp, BAR_XSTRORM_INTMEM + + XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4, + ((u32 *)(&m_rs_vn))[i]); + + for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++) + REG_WR(bp, BAR_XSTRORM_INTMEM + + XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4, + ((u32 *)(&m_fair_vn))[i]); +} + +static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp) +{ + if (CHIP_REV_IS_SLOW(bp)) + return CMNG_FNS_NONE; + if (IS_MF(bp)) + return CMNG_FNS_MINMAX; + + return CMNG_FNS_NONE; +} + +void bnx2x_read_mf_cfg(struct bnx2x *bp) +{ + int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1); + + if (BP_NOMCP(bp)) + return; /* what should be the default bvalue in this case */ + + /* For 2 port configuration the absolute function number formula + * is: + * abs_func = 2 * vn + BP_PORT + BP_PATH + * + * and there are 4 functions per port + * + * For 4 port configuration it is + * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH + * + * and there are 2 functions per port + */ + for (vn = VN_0; vn < E1HVN_MAX; vn++) { + int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp); + + if (func >= E1H_FUNC_MAX) + break; + + bp->mf_config[vn] = + MF_CFG_RD(bp, func_mf_config[func].config); + } +} + +static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) +{ + + if (cmng_type == CMNG_FNS_MINMAX) { + int vn; + + /* clear cmng_enables */ + bp->cmng.flags.cmng_enables = 0; + + /* read mf conf from shmem */ + if (read_cfg) + bnx2x_read_mf_cfg(bp); + + /* Init rate shaping and fairness contexts */ + bnx2x_init_port_minmax(bp); + + /* vn_weight_sum and enable fairness if not 0 */ + bnx2x_calc_vn_weight_sum(bp); + + /* calculate and set min-max rate for each vn */ + if (bp->port.pmf) + for (vn = VN_0; vn < E1HVN_MAX; vn++) + bnx2x_init_vn_minmax(bp, vn); + + /* always enable rate shaping and fairness */ + bp->cmng.flags.cmng_enables |= + CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; + if (!bp->vn_weight_sum) + DP(NETIF_MSG_IFUP, "All MIN values are zeroes" + " fairness will be disabled\n"); + return; + } + + /* rate shaping and fairness are disabled */ + DP(NETIF_MSG_IFUP, + "rate shaping and fairness are disabled\n"); +} + +static inline void bnx2x_link_sync_notify(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + int func; + int vn; + + /* Set the attention towards other drivers on the same port */ + for (vn = VN_0; vn < E1HVN_MAX; vn++) { + if (vn == BP_E1HVN(bp)) + continue; + + func = ((vn << 1) | port); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + + (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); + } +} + +/* This function is called upon link interrupt */ +static void bnx2x_link_attn(struct bnx2x *bp) +{ + /* Make sure that we are synced with the current statistics */ + bnx2x_stats_handle(bp, STATS_EVENT_STOP); + + bnx2x_link_update(&bp->link_params, &bp->link_vars); + + if (bp->link_vars.link_up) { + + /* dropless flow control */ + if (!CHIP_IS_E1(bp) && bp->dropless_fc) { + int port = BP_PORT(bp); + u32 pause_enabled = 0; + + if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) + pause_enabled = 1; + + REG_WR(bp, BAR_USTRORM_INTMEM + + USTORM_ETH_PAUSE_ENABLED_OFFSET(port), + pause_enabled); + } + + if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { + struct host_port_stats *pstats; + + pstats = bnx2x_sp(bp, port_stats); + /* reset old mac stats */ + memset(&(pstats->mac_stx[0]), 0, + sizeof(struct mac_stx)); + } + if (bp->state == BNX2X_STATE_OPEN) + bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); + } + + if (bp->link_vars.link_up && bp->link_vars.line_speed) { + int cmng_fns = bnx2x_get_cmng_fns_mode(bp); + + if (cmng_fns != CMNG_FNS_NONE) { + bnx2x_cmng_fns_init(bp, false, cmng_fns); + storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); + } else + /* rate shaping and fairness are disabled */ + DP(NETIF_MSG_IFUP, + "single function mode without fairness\n"); + } + + __bnx2x_link_report(bp); + + if (IS_MF(bp)) + bnx2x_link_sync_notify(bp); +} + +void bnx2x__link_status_update(struct bnx2x *bp) +{ + if (bp->state != BNX2X_STATE_OPEN) + return; + + bnx2x_link_status_update(&bp->link_params, &bp->link_vars); + + if (bp->link_vars.link_up) + bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); + else + bnx2x_stats_handle(bp, STATS_EVENT_STOP); + + /* indicate link status */ + bnx2x_link_report(bp); +} + +static void bnx2x_pmf_update(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + u32 val; + + bp->port.pmf = 1; + DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); + + /* + * We need the mb() to ensure the ordering between the writing to + * bp->port.pmf here and reading it from the bnx2x_periodic_task(). + */ + smp_mb(); + + /* queue a periodic task */ + queue_delayed_work(bnx2x_wq, &bp->period_task, 0); + + bnx2x_dcbx_pmf_update(bp); + + /* enable nig attention */ + val = (0xff0f | (1 << (BP_E1HVN(bp) + 4))); + if (bp->common.int_block == INT_BLOCK_HC) { + REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); + REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); + } else if (!CHIP_IS_E1x(bp)) { + REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); + REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); + } + + bnx2x_stats_handle(bp, STATS_EVENT_PMF); +} + +/* end of Link */ + +/* slow path */ + +/* + * General service functions + */ + +/* send the MCP a request, block until there is a reply */ +u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) +{ + int mb_idx = BP_FW_MB_IDX(bp); + u32 seq; + u32 rc = 0; + u32 cnt = 1; + u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10; + + mutex_lock(&bp->fw_mb_mutex); + seq = ++bp->fw_seq; + SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param); + SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq)); + + DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n", + (command | seq), param); + + do { + /* let the FW do it's magic ... */ + msleep(delay); + + rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header); + + /* Give the FW up to 5 second (500*10ms) */ + } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); + + DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n", + cnt*delay, rc, seq); + + /* is this a reply to our command? */ + if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) + rc &= FW_MSG_CODE_MASK; + else { + /* FW BUG! */ + BNX2X_ERR("FW failed to respond!\n"); + bnx2x_fw_dump(bp); + rc = 0; + } + mutex_unlock(&bp->fw_mb_mutex); + + return rc; +} + +static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp) +{ +#ifdef BCM_CNIC + /* Statistics are not supported for CNIC Clients at the moment */ + if (IS_FCOE_FP(fp)) + return false; +#endif + return true; +} + +void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) +{ + if (CHIP_IS_E1x(bp)) { + struct tstorm_eth_function_common_config tcfg = {0}; + + storm_memset_func_cfg(bp, &tcfg, p->func_id); + } + + /* Enable the function in the FW */ + storm_memset_vf_to_pf(bp, p->func_id, p->pf_id); + storm_memset_func_en(bp, p->func_id, 1); + + /* spq */ + if (p->func_flgs & FUNC_FLG_SPQ) { + storm_memset_spq_addr(bp, p->spq_map, p->func_id); + REG_WR(bp, XSEM_REG_FAST_MEMORY + + XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod); + } +} + +/** + * bnx2x_get_tx_only_flags - Return common flags + * + * @bp device handle + * @fp queue handle + * @zero_stats TRUE if statistics zeroing is needed + * + * Return the flags that are common for the Tx-only and not normal connections. + */ +static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp, + struct bnx2x_fastpath *fp, + bool zero_stats) +{ + unsigned long flags = 0; + + /* PF driver will always initialize the Queue to an ACTIVE state */ + __set_bit(BNX2X_Q_FLG_ACTIVE, &flags); + + /* tx only connections collect statistics (on the same index as the + * parent connection). The statistics are zeroed when the parent + * connection is initialized. + */ + if (stat_counter_valid(bp, fp)) { + __set_bit(BNX2X_Q_FLG_STATS, &flags); + if (zero_stats) + __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags); + } + + return flags; +} + +static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp, + struct bnx2x_fastpath *fp, + bool leading) +{ + unsigned long flags = 0; + + /* calculate other queue flags */ + if (IS_MF_SD(bp)) + __set_bit(BNX2X_Q_FLG_OV, &flags); + + if (IS_FCOE_FP(fp)) + __set_bit(BNX2X_Q_FLG_FCOE, &flags); + + if (!fp->disable_tpa) { + __set_bit(BNX2X_Q_FLG_TPA, &flags); + __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags); + } + + if (leading) { + __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags); + __set_bit(BNX2X_Q_FLG_MCAST, &flags); + } + + /* Always set HW VLAN stripping */ + __set_bit(BNX2X_Q_FLG_VLAN, &flags); + + + return flags | bnx2x_get_common_flags(bp, fp, true); +} + +static void bnx2x_pf_q_prep_general(struct bnx2x *bp, + struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init, + u8 cos) +{ + gen_init->stat_id = bnx2x_stats_id(fp); + gen_init->spcl_id = fp->cl_id; + + /* Always use mini-jumbo MTU for FCoE L2 ring */ + if (IS_FCOE_FP(fp)) + gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU; + else + gen_init->mtu = bp->dev->mtu; + + gen_init->cos = cos; +} + +static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, + struct bnx2x_fastpath *fp, struct rxq_pause_params *pause, + struct bnx2x_rxq_setup_params *rxq_init) +{ + u8 max_sge = 0; + u16 sge_sz = 0; + u16 tpa_agg_size = 0; + + if (!fp->disable_tpa) { + pause->sge_th_hi = 250; + pause->sge_th_lo = 150; + tpa_agg_size = min_t(u32, + (min_t(u32, 8, MAX_SKB_FRAGS) * + SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); + max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >> + SGE_PAGE_SHIFT; + max_sge = ((max_sge + PAGES_PER_SGE - 1) & + (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT; + sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE, + 0xffff); + } + + /* pause - not for e1 */ + if (!CHIP_IS_E1(bp)) { + pause->bd_th_hi = 350; + pause->bd_th_lo = 250; + pause->rcq_th_hi = 350; + pause->rcq_th_lo = 250; + + pause->pri_map = 1; + } + + /* rxq setup */ + rxq_init->dscr_map = fp->rx_desc_mapping; + rxq_init->sge_map = fp->rx_sge_mapping; + rxq_init->rcq_map = fp->rx_comp_mapping; + rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE; + + /* This should be a maximum number of data bytes that may be + * placed on the BD (not including paddings). + */ + rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN - + IP_HEADER_ALIGNMENT_PADDING; + + rxq_init->cl_qzone_id = fp->cl_qzone_id; + rxq_init->tpa_agg_sz = tpa_agg_size; + rxq_init->sge_buf_sz = sge_sz; + rxq_init->max_sges_pkt = max_sge; + rxq_init->rss_engine_id = BP_FUNC(bp); + + /* Maximum number or simultaneous TPA aggregation for this Queue. + * + * For PF Clients it should be the maximum avaliable number. + * VF driver(s) may want to define it to a smaller value. + */ + rxq_init->max_tpa_queues = + (CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : + ETH_MAX_AGGREGATION_QUEUES_E1H_E2); + + rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; + rxq_init->fw_sb_id = fp->fw_sb_id; + + if (IS_FCOE_FP(fp)) + rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS; + else + rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; +} + +static void bnx2x_pf_tx_q_prep(struct bnx2x *bp, + struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init, + u8 cos) +{ + txq_init->dscr_map = fp->txdata[cos].tx_desc_mapping; + txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; + txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; + txq_init->fw_sb_id = fp->fw_sb_id; + + /* + * set the tss leading client id for TX classfication == + * leading RSS client id + */ + txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id); + + if (IS_FCOE_FP(fp)) { + txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS; + txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE; + } +} + +static void bnx2x_pf_init(struct bnx2x *bp) +{ + struct bnx2x_func_init_params func_init = {0}; + struct event_ring_data eq_data = { {0} }; + u16 flags; + + if (!CHIP_IS_E1x(bp)) { + /* reset IGU PF statistics: MSIX + ATTN */ + /* PF */ + REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + + BNX2X_IGU_STAS_MSG_VF_CNT*4 + + (CHIP_MODE_IS_4_PORT(bp) ? + BP_FUNC(bp) : BP_VN(bp))*4, 0); + /* ATTN */ + REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + + BNX2X_IGU_STAS_MSG_VF_CNT*4 + + BNX2X_IGU_STAS_MSG_PF_CNT*4 + + (CHIP_MODE_IS_4_PORT(bp) ? + BP_FUNC(bp) : BP_VN(bp))*4, 0); + } + + /* function setup flags */ + flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); + + /* This flag is relevant for E1x only. + * E2 doesn't have a TPA configuration in a function level. + */ + flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0; + + func_init.func_flgs = flags; + func_init.pf_id = BP_FUNC(bp); + func_init.func_id = BP_FUNC(bp); + func_init.spq_map = bp->spq_mapping; + func_init.spq_prod = bp->spq_prod_idx; + + bnx2x_func_init(bp, &func_init); + + memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port)); + + /* + * Congestion management values depend on the link rate + * There is no active link so initial link rate is set to 10 Gbps. + * When the link comes up The congestion management values are + * re-calculated according to the actual link rate. + */ + bp->link_vars.line_speed = SPEED_10000; + bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp)); + + /* Only the PMF sets the HW */ + if (bp->port.pmf) + storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); + + /* init Event Queue */ + eq_data.base_addr.hi = U64_HI(bp->eq_mapping); + eq_data.base_addr.lo = U64_LO(bp->eq_mapping); + eq_data.producer = bp->eq_prod; + eq_data.index_id = HC_SP_INDEX_EQ_CONS; + eq_data.sb_id = DEF_SB_ID; + storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp)); +} + + +static void bnx2x_e1h_disable(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + + bnx2x_tx_disable(bp); + + REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); +} + +static void bnx2x_e1h_enable(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + + REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); + + /* Tx queue should be only reenabled */ + netif_tx_wake_all_queues(bp->dev); + + /* + * Should not call netif_carrier_on since it will be called if the link + * is up when checking for link state + */ +} + +/* called due to MCP event (on pmf): + * reread new bandwidth configuration + * configure FW + * notify others function about the change + */ +static inline void bnx2x_config_mf_bw(struct bnx2x *bp) +{ + if (bp->link_vars.link_up) { + bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX); + bnx2x_link_sync_notify(bp); + } + storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); +} + +static inline void bnx2x_set_mf_bw(struct bnx2x *bp) +{ + bnx2x_config_mf_bw(bp); + bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0); +} + +static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) +{ + DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event); + + if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { + + /* + * This is the only place besides the function initialization + * where the bp->flags can change so it is done without any + * locks + */ + if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { + DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n"); + bp->flags |= MF_FUNC_DIS; + + bnx2x_e1h_disable(bp); + } else { + DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n"); + bp->flags &= ~MF_FUNC_DIS; + + bnx2x_e1h_enable(bp); + } + dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; + } + if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { + bnx2x_config_mf_bw(bp); + dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; + } + + /* Report results to MCP */ + if (dcc_event) + bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0); + else + bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0); +} + +/* must be called under the spq lock */ +static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp) +{ + struct eth_spe *next_spe = bp->spq_prod_bd; + + if (bp->spq_prod_bd == bp->spq_last_bd) { + bp->spq_prod_bd = bp->spq; + bp->spq_prod_idx = 0; + DP(NETIF_MSG_TIMER, "end of spq\n"); + } else { + bp->spq_prod_bd++; + bp->spq_prod_idx++; + } + return next_spe; +} + +/* must be called under the spq lock */ +static inline void bnx2x_sp_prod_update(struct bnx2x *bp) +{ + int func = BP_FUNC(bp); + + /* + * Make sure that BD data is updated before writing the producer: + * BD data is written to the memory, the producer is read from the + * memory, thus we need a full memory barrier to ensure the ordering. + */ + mb(); + + REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), + bp->spq_prod_idx); + mmiowb(); +} + +/** + * bnx2x_is_contextless_ramrod - check if the current command ends on EQ + * + * @cmd: command to check + * @cmd_type: command type + */ +static inline bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type) +{ + if ((cmd_type == NONE_CONNECTION_TYPE) || + (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || + (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || + (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || + (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || + (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || + (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) + return true; + else + return false; + +} + + +/** + * bnx2x_sp_post - place a single command on an SP ring + * + * @bp: driver handle + * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) + * @cid: SW CID the command is related to + * @data_hi: command private data address (high 32 bits) + * @data_lo: command private data address (low 32 bits) + * @cmd_type: command type (e.g. NONE, ETH) + * + * SP data is handled as if it's always an address pair, thus data fields are + * not swapped to little endian in upper functions. Instead this function swaps + * data as if it's two u32 fields. + */ +int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, + u32 data_hi, u32 data_lo, int cmd_type) +{ + struct eth_spe *spe; + u16 type; + bool common = bnx2x_is_contextless_ramrod(command, cmd_type); + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return -EIO; +#endif + + spin_lock_bh(&bp->spq_lock); + + if (common) { + if (!atomic_read(&bp->eq_spq_left)) { + BNX2X_ERR("BUG! EQ ring full!\n"); + spin_unlock_bh(&bp->spq_lock); + bnx2x_panic(); + return -EBUSY; + } + } else if (!atomic_read(&bp->cq_spq_left)) { + BNX2X_ERR("BUG! SPQ ring full!\n"); + spin_unlock_bh(&bp->spq_lock); + bnx2x_panic(); + return -EBUSY; + } + + spe = bnx2x_sp_get_next(bp); + + /* CID needs port number to be encoded int it */ + spe->hdr.conn_and_cmd_data = + cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | + HW_CID(bp, cid)); + + type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; + + type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & + SPE_HDR_FUNCTION_ID); + + spe->hdr.type = cpu_to_le16(type); + + spe->data.update_data_addr.hi = cpu_to_le32(data_hi); + spe->data.update_data_addr.lo = cpu_to_le32(data_lo); + + /* + * It's ok if the actual decrement is issued towards the memory + * somewhere between the spin_lock and spin_unlock. Thus no + * more explict memory barrier is needed. + */ + if (common) + atomic_dec(&bp->eq_spq_left); + else + atomic_dec(&bp->cq_spq_left); + + + DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/, + "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) " + "type(0x%x) left (CQ, EQ) (%x,%x)\n", + bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), + (u32)(U64_LO(bp->spq_mapping) + + (void *)bp->spq_prod_bd - (void *)bp->spq), command, common, + HW_CID(bp, cid), data_hi, data_lo, type, + atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left)); + + bnx2x_sp_prod_update(bp); + spin_unlock_bh(&bp->spq_lock); + return 0; +} + +/* acquire split MCP access lock register */ +static int bnx2x_acquire_alr(struct bnx2x *bp) +{ + u32 j, val; + int rc = 0; + + might_sleep(); + for (j = 0; j < 1000; j++) { + val = (1UL << 31); + REG_WR(bp, GRCBASE_MCP + 0x9c, val); + val = REG_RD(bp, GRCBASE_MCP + 0x9c); + if (val & (1L << 31)) + break; + + msleep(5); + } + if (!(val & (1L << 31))) { + BNX2X_ERR("Cannot acquire MCP access lock register\n"); + rc = -EBUSY; + } + + return rc; +} + +/* release split MCP access lock register */ +static void bnx2x_release_alr(struct bnx2x *bp) +{ + REG_WR(bp, GRCBASE_MCP + 0x9c, 0); +} + +#define BNX2X_DEF_SB_ATT_IDX 0x0001 +#define BNX2X_DEF_SB_IDX 0x0002 + +static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp) +{ + struct host_sp_status_block *def_sb = bp->def_status_blk; + u16 rc = 0; + + barrier(); /* status block is written to by the chip */ + if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { + bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; + rc |= BNX2X_DEF_SB_ATT_IDX; + } + + if (bp->def_idx != def_sb->sp_sb.running_index) { + bp->def_idx = def_sb->sp_sb.running_index; + rc |= BNX2X_DEF_SB_IDX; + } + + /* Do not reorder: indecies reading should complete before handling */ + barrier(); + return rc; +} + +/* + * slow path service functions + */ + +static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) +{ + int port = BP_PORT(bp); + u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : + MISC_REG_AEU_MASK_ATTN_FUNC_0; + u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : + NIG_REG_MASK_INTERRUPT_PORT0; + u32 aeu_mask; + u32 nig_mask = 0; + u32 reg_addr; + + if (bp->attn_state & asserted) + BNX2X_ERR("IGU ERROR\n"); + + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); + aeu_mask = REG_RD(bp, aeu_addr); + + DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", + aeu_mask, asserted); + aeu_mask &= ~(asserted & 0x3ff); + DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); + + REG_WR(bp, aeu_addr, aeu_mask); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); + + DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); + bp->attn_state |= asserted; + DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); + + if (asserted & ATTN_HARD_WIRED_MASK) { + if (asserted & ATTN_NIG_FOR_FUNC) { + + bnx2x_acquire_phy_lock(bp); + + /* save nig interrupt mask */ + nig_mask = REG_RD(bp, nig_int_mask_addr); + + /* If nig_mask is not set, no need to call the update + * function. + */ + if (nig_mask) { + REG_WR(bp, nig_int_mask_addr, 0); + + bnx2x_link_attn(bp); + } + + /* handle unicore attn? */ + } + if (asserted & ATTN_SW_TIMER_4_FUNC) + DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n"); + + if (asserted & GPIO_2_FUNC) + DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n"); + + if (asserted & GPIO_3_FUNC) + DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n"); + + if (asserted & GPIO_4_FUNC) + DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n"); + + if (port == 0) { + if (asserted & ATTN_GENERAL_ATTN_1) { + DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n"); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); + } + if (asserted & ATTN_GENERAL_ATTN_2) { + DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n"); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); + } + if (asserted & ATTN_GENERAL_ATTN_3) { + DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n"); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); + } + } else { + if (asserted & ATTN_GENERAL_ATTN_4) { + DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n"); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); + } + if (asserted & ATTN_GENERAL_ATTN_5) { + DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n"); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); + } + if (asserted & ATTN_GENERAL_ATTN_6) { + DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n"); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); + } + } + + } /* if hardwired */ + + if (bp->common.int_block == INT_BLOCK_HC) + reg_addr = (HC_REG_COMMAND_REG + port*32 + + COMMAND_REG_ATTN_BITS_SET); + else + reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8); + + DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted, + (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); + REG_WR(bp, reg_addr, asserted); + + /* now set back the mask */ + if (asserted & ATTN_NIG_FOR_FUNC) { + REG_WR(bp, nig_int_mask_addr, nig_mask); + bnx2x_release_phy_lock(bp); + } +} + +static inline void bnx2x_fan_failure(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + u32 ext_phy_config; + /* mark the failure */ + ext_phy_config = + SHMEM_RD(bp, + dev_info.port_hw_config[port].external_phy_config); + + ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; + ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; + SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config, + ext_phy_config); + + /* log the failure */ + netdev_err(bp->dev, "Fan Failure on Network Controller has caused" + " the driver to shutdown the card to prevent permanent" + " damage. Please contact OEM Support for assistance\n"); +} + +static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) +{ + int port = BP_PORT(bp); + int reg_offset; + u32 val; + + reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : + MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); + + if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { + + val = REG_RD(bp, reg_offset); + val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; + REG_WR(bp, reg_offset, val); + + BNX2X_ERR("SPIO5 hw attention\n"); + + /* Fan failure attention */ + bnx2x_hw_reset_phy(&bp->link_params); + bnx2x_fan_failure(bp); + } + + if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) { + bnx2x_acquire_phy_lock(bp); + bnx2x_handle_module_detect_int(&bp->link_params); + bnx2x_release_phy_lock(bp); + } + + if (attn & HW_INTERRUT_ASSERT_SET_0) { + + val = REG_RD(bp, reg_offset); + val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); + REG_WR(bp, reg_offset, val); + + BNX2X_ERR("FATAL HW block attention set0 0x%x\n", + (u32)(attn & HW_INTERRUT_ASSERT_SET_0)); + bnx2x_panic(); + } +} + +static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) +{ + u32 val; + + if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { + + val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR); + BNX2X_ERR("DB hw attention 0x%x\n", val); + /* DORQ discard attention */ + if (val & 0x2) + BNX2X_ERR("FATAL error from DORQ\n"); + } + + if (attn & HW_INTERRUT_ASSERT_SET_1) { + + int port = BP_PORT(bp); + int reg_offset; + + reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : + MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); + + val = REG_RD(bp, reg_offset); + val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); + REG_WR(bp, reg_offset, val); + + BNX2X_ERR("FATAL HW block attention set1 0x%x\n", + (u32)(attn & HW_INTERRUT_ASSERT_SET_1)); + bnx2x_panic(); + } +} + +static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) +{ + u32 val; + + if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { + + val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR); + BNX2X_ERR("CFC hw attention 0x%x\n", val); + /* CFC error attention */ + if (val & 0x2) + BNX2X_ERR("FATAL error from CFC\n"); + } + + if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { + val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0); + BNX2X_ERR("PXP hw attention-0 0x%x\n", val); + /* RQ_USDMDP_FIFO_OVERFLOW */ + if (val & 0x18000) + BNX2X_ERR("FATAL error from PXP\n"); + + if (!CHIP_IS_E1x(bp)) { + val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1); + BNX2X_ERR("PXP hw attention-1 0x%x\n", val); + } + } + + if (attn & HW_INTERRUT_ASSERT_SET_2) { + + int port = BP_PORT(bp); + int reg_offset; + + reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : + MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); + + val = REG_RD(bp, reg_offset); + val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); + REG_WR(bp, reg_offset, val); + + BNX2X_ERR("FATAL HW block attention set2 0x%x\n", + (u32)(attn & HW_INTERRUT_ASSERT_SET_2)); + bnx2x_panic(); + } +} + +static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) +{ + u32 val; + + if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { + + if (attn & BNX2X_PMF_LINK_ASSERT) { + int func = BP_FUNC(bp); + + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); + bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp, + func_mf_config[BP_ABS_FUNC(bp)].config); + val = SHMEM_RD(bp, + func_mb[BP_FW_MB_IDX(bp)].drv_status); + if (val & DRV_STATUS_DCC_EVENT_MASK) + bnx2x_dcc_event(bp, + (val & DRV_STATUS_DCC_EVENT_MASK)); + + if (val & DRV_STATUS_SET_MF_BW) + bnx2x_set_mf_bw(bp); + + if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) + bnx2x_pmf_update(bp); + + if (bp->port.pmf && + (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) && + bp->dcbx_enabled > 0) + /* start dcbx state machine */ + bnx2x_dcbx_set_params(bp, + BNX2X_DCBX_STATE_NEG_RECEIVED); + if (bp->link_vars.periodic_flags & + PERIODIC_FLAGS_LINK_EVENT) { + /* sync with link */ + bnx2x_acquire_phy_lock(bp); + bp->link_vars.periodic_flags &= + ~PERIODIC_FLAGS_LINK_EVENT; + bnx2x_release_phy_lock(bp); + if (IS_MF(bp)) + bnx2x_link_sync_notify(bp); + bnx2x_link_report(bp); + } + /* Always call it here: bnx2x_link_report() will + * prevent the link indication duplication. + */ + bnx2x__link_status_update(bp); + } else if (attn & BNX2X_MC_ASSERT_BITS) { + + BNX2X_ERR("MC assert!\n"); + bnx2x_mc_assert(bp); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0); + bnx2x_panic(); + + } else if (attn & BNX2X_MCP_ASSERT) { + + BNX2X_ERR("MCP assert!\n"); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0); + bnx2x_fw_dump(bp); + + } else + BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn); + } + + if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { + BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn); + if (attn & BNX2X_GRC_TIMEOUT) { + val = CHIP_IS_E1(bp) ? 0 : + REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN); + BNX2X_ERR("GRC time-out 0x%08x\n", val); + } + if (attn & BNX2X_GRC_RSV) { + val = CHIP_IS_E1(bp) ? 0 : + REG_RD(bp, MISC_REG_GRC_RSV_ATTN); + BNX2X_ERR("GRC reserved 0x%08x\n", val); + } + REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); + } +} + +/* + * Bits map: + * 0-7 - Engine0 load counter. + * 8-15 - Engine1 load counter. + * 16 - Engine0 RESET_IN_PROGRESS bit. + * 17 - Engine1 RESET_IN_PROGRESS bit. + * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active function + * on the engine + * 19 - Engine1 ONE_IS_LOADED. + * 20 - Chip reset flow bit. When set none-leader must wait for both engines + * leader to complete (check for both RESET_IN_PROGRESS bits and not for + * just the one belonging to its engine). + * + */ +#define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 + +#define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff +#define BNX2X_PATH0_LOAD_CNT_SHIFT 0 +#define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00 +#define BNX2X_PATH1_LOAD_CNT_SHIFT 8 +#define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000 +#define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000 +#define BNX2X_GLOBAL_RESET_BIT 0x00040000 + +/* + * Set the GLOBAL_RESET bit. + * + * Should be run under rtnl lock + */ +void bnx2x_set_reset_global(struct bnx2x *bp) +{ + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + + REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT); + barrier(); + mmiowb(); +} + +/* + * Clear the GLOBAL_RESET bit. + * + * Should be run under rtnl lock + */ +static inline void bnx2x_clear_reset_global(struct bnx2x *bp) +{ + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + + REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT)); + barrier(); + mmiowb(); +} + +/* + * Checks the GLOBAL_RESET bit. + * + * should be run under rtnl lock + */ +static inline bool bnx2x_reset_is_global(struct bnx2x *bp) +{ + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + + DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val); + return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false; +} + +/* + * Clear RESET_IN_PROGRESS bit for the current engine. + * + * Should be run under rtnl lock + */ +static inline void bnx2x_set_reset_done(struct bnx2x *bp) +{ + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + u32 bit = BP_PATH(bp) ? + BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; + + /* Clear the bit */ + val &= ~bit; + REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); + barrier(); + mmiowb(); +} + +/* + * Set RESET_IN_PROGRESS for the current engine. + * + * should be run under rtnl lock + */ +void bnx2x_set_reset_in_progress(struct bnx2x *bp) +{ + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + u32 bit = BP_PATH(bp) ? + BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; + + /* Set the bit */ + val |= bit; + REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); + barrier(); + mmiowb(); +} + +/* + * Checks the RESET_IN_PROGRESS bit for the given engine. + * should be run under rtnl lock + */ +bool bnx2x_reset_is_done(struct bnx2x *bp, int engine) +{ + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + u32 bit = engine ? + BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; + + /* return false if bit is set */ + return (val & bit) ? false : true; +} + +/* + * Increment the load counter for the current engine. + * + * should be run under rtnl lock + */ +void bnx2x_inc_load_cnt(struct bnx2x *bp) +{ + u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : + BNX2X_PATH0_LOAD_CNT_MASK; + u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : + BNX2X_PATH0_LOAD_CNT_SHIFT; + + DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val); + + /* get the current counter value */ + val1 = (val & mask) >> shift; + + /* increment... */ + val1++; + + /* clear the old value */ + val &= ~mask; + + /* set the new one */ + val |= ((val1 << shift) & mask); + + REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); + barrier(); + mmiowb(); +} + +/** + * bnx2x_dec_load_cnt - decrement the load counter + * + * @bp: driver handle + * + * Should be run under rtnl lock. + * Decrements the load counter for the current engine. Returns + * the new counter value. + */ +u32 bnx2x_dec_load_cnt(struct bnx2x *bp) +{ + u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : + BNX2X_PATH0_LOAD_CNT_MASK; + u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : + BNX2X_PATH0_LOAD_CNT_SHIFT; + + DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val); + + /* get the current counter value */ + val1 = (val & mask) >> shift; + + /* decrement... */ + val1--; + + /* clear the old value */ + val &= ~mask; + + /* set the new one */ + val |= ((val1 << shift) & mask); + + REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); + barrier(); + mmiowb(); + + return val1; +} + +/* + * Read the load counter for the current engine. + * + * should be run under rtnl lock + */ +static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp, int engine) +{ + u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK : + BNX2X_PATH0_LOAD_CNT_MASK); + u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT : + BNX2X_PATH0_LOAD_CNT_SHIFT); + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + + DP(NETIF_MSG_HW, "GLOB_REG=0x%08x\n", val); + + val = (val & mask) >> shift; + + DP(NETIF_MSG_HW, "load_cnt for engine %d = %d\n", engine, val); + + return val; +} + +/* + * Reset the load counter for the current engine. + * + * should be run under rtnl lock + */ +static inline void bnx2x_clear_load_cnt(struct bnx2x *bp) +{ + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : + BNX2X_PATH0_LOAD_CNT_MASK); + + REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask)); +} + +static inline void _print_next_block(int idx, const char *blk) +{ + pr_cont("%s%s", idx ? ", " : "", blk); +} + +static inline int bnx2x_check_blocks_with_parity0(u32 sig, int par_num, + bool print) +{ + int i = 0; + u32 cur_bit = 0; + for (i = 0; sig; i++) { + cur_bit = ((u32)0x1 << i); + if (sig & cur_bit) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "BRB"); + break; + case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "PARSER"); + break; + case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "TSDM"); + break; + case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: + if (print) + _print_next_block(par_num++, + "SEARCHER"); + break; + case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "TCM"); + break; + case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "TSEMI"); + break; + case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "XPB"); + break; + } + + /* Clear the bit */ + sig &= ~cur_bit; + } + } + + return par_num; +} + +static inline int bnx2x_check_blocks_with_parity1(u32 sig, int par_num, + bool *global, bool print) +{ + int i = 0; + u32 cur_bit = 0; + for (i = 0; sig; i++) { + cur_bit = ((u32)0x1 << i); + if (sig & cur_bit) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "PBF"); + break; + case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "QM"); + break; + case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "TM"); + break; + case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "XSDM"); + break; + case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "XCM"); + break; + case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "XSEMI"); + break; + case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: + if (print) + _print_next_block(par_num++, + "DOORBELLQ"); + break; + case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "NIG"); + break; + case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: + if (print) + _print_next_block(par_num++, + "VAUX PCI CORE"); + *global = true; + break; + case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "DEBUG"); + break; + case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "USDM"); + break; + case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "UCM"); + break; + case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "USEMI"); + break; + case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "UPB"); + break; + case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "CSDM"); + break; + case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "CCM"); + break; + } + + /* Clear the bit */ + sig &= ~cur_bit; + } + } + + return par_num; +} + +static inline int bnx2x_check_blocks_with_parity2(u32 sig, int par_num, + bool print) +{ + int i = 0; + u32 cur_bit = 0; + for (i = 0; sig; i++) { + cur_bit = ((u32)0x1 << i); + if (sig & cur_bit) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "CSEMI"); + break; + case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "PXP"); + break; + case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: + if (print) + _print_next_block(par_num++, + "PXPPCICLOCKCLIENT"); + break; + case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "CFC"); + break; + case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "CDU"); + break; + case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "DMAE"); + break; + case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "IGU"); + break; + case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "MISC"); + break; + } + + /* Clear the bit */ + sig &= ~cur_bit; + } + } + + return par_num; +} + +static inline int bnx2x_check_blocks_with_parity3(u32 sig, int par_num, + bool *global, bool print) +{ + int i = 0; + u32 cur_bit = 0; + for (i = 0; sig; i++) { + cur_bit = ((u32)0x1 << i); + if (sig & cur_bit) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: + if (print) + _print_next_block(par_num++, "MCP ROM"); + *global = true; + break; + case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: + if (print) + _print_next_block(par_num++, + "MCP UMP RX"); + *global = true; + break; + case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: + if (print) + _print_next_block(par_num++, + "MCP UMP TX"); + *global = true; + break; + case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: + if (print) + _print_next_block(par_num++, + "MCP SCPAD"); + *global = true; + break; + } + + /* Clear the bit */ + sig &= ~cur_bit; + } + } + + return par_num; +} + +static inline int bnx2x_check_blocks_with_parity4(u32 sig, int par_num, + bool print) +{ + int i = 0; + u32 cur_bit = 0; + for (i = 0; sig; i++) { + cur_bit = ((u32)0x1 << i); + if (sig & cur_bit) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "PGLUE_B"); + break; + case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "ATC"); + break; + } + + /* Clear the bit */ + sig &= ~cur_bit; + } + } + + return par_num; +} + +static inline bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, + u32 *sig) +{ + if ((sig[0] & HW_PRTY_ASSERT_SET_0) || + (sig[1] & HW_PRTY_ASSERT_SET_1) || + (sig[2] & HW_PRTY_ASSERT_SET_2) || + (sig[3] & HW_PRTY_ASSERT_SET_3) || + (sig[4] & HW_PRTY_ASSERT_SET_4)) { + int par_num = 0; + DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: " + "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x " + "[4]:0x%08x\n", + sig[0] & HW_PRTY_ASSERT_SET_0, + sig[1] & HW_PRTY_ASSERT_SET_1, + sig[2] & HW_PRTY_ASSERT_SET_2, + sig[3] & HW_PRTY_ASSERT_SET_3, + sig[4] & HW_PRTY_ASSERT_SET_4); + if (print) + netdev_err(bp->dev, + "Parity errors detected in blocks: "); + par_num = bnx2x_check_blocks_with_parity0( + sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print); + par_num = bnx2x_check_blocks_with_parity1( + sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print); + par_num = bnx2x_check_blocks_with_parity2( + sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print); + par_num = bnx2x_check_blocks_with_parity3( + sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print); + par_num = bnx2x_check_blocks_with_parity4( + sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print); + + if (print) + pr_cont("\n"); + + return true; + } else + return false; +} + +/** + * bnx2x_chk_parity_attn - checks for parity attentions. + * + * @bp: driver handle + * @global: true if there was a global attention + * @print: show parity attention in syslog + */ +bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print) +{ + struct attn_route attn = { {0} }; + int port = BP_PORT(bp); + + attn.sig[0] = REG_RD(bp, + MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + + port*4); + attn.sig[1] = REG_RD(bp, + MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + + port*4); + attn.sig[2] = REG_RD(bp, + MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + + port*4); + attn.sig[3] = REG_RD(bp, + MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + + port*4); + + if (!CHIP_IS_E1x(bp)) + attn.sig[4] = REG_RD(bp, + MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + + port*4); + + return bnx2x_parity_attn(bp, global, print, attn.sig); +} + + +static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) +{ + u32 val; + if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { + + val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); + BNX2X_ERR("PGLUE hw attention 0x%x\n", val); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" + "ADDRESS_ERROR\n"); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" + "INCORRECT_RCV_BEHAVIOR\n"); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" + "WAS_ERROR_ATTN\n"); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" + "VF_LENGTH_VIOLATION_ATTN\n"); + if (val & + PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" + "VF_GRC_SPACE_VIOLATION_ATTN\n"); + if (val & + PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" + "VF_MSIX_BAR_VIOLATION_ATTN\n"); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" + "TCPL_ERROR_ATTN\n"); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" + "TCPL_IN_TWO_RCBS_ATTN\n"); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" + "CSSNOOP_FIFO_OVERFLOW\n"); + } + if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { + val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR); + BNX2X_ERR("ATC hw attention 0x%x\n", val); + if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) + BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n"); + if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) + BNX2X_ERR("ATC_ATC_INT_STS_REG" + "_ATC_TCPL_TO_NOT_PEND\n"); + if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) + BNX2X_ERR("ATC_ATC_INT_STS_REG_" + "ATC_GPA_MULTIPLE_HITS\n"); + if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) + BNX2X_ERR("ATC_ATC_INT_STS_REG_" + "ATC_RCPL_TO_EMPTY_CNT\n"); + if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) + BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n"); + if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) + BNX2X_ERR("ATC_ATC_INT_STS_REG_" + "ATC_IREQ_LESS_THAN_STU\n"); + } + + if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | + AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) { + BNX2X_ERR("FATAL parity attention set4 0x%x\n", + (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | + AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); + } + +} + +static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) +{ + struct attn_route attn, *group_mask; + int port = BP_PORT(bp); + int index; + u32 reg_addr; + u32 val; + u32 aeu_mask; + bool global = false; + + /* need to take HW lock because MCP or other port might also + try to handle this event */ + bnx2x_acquire_alr(bp); + + if (bnx2x_chk_parity_attn(bp, &global, true)) { +#ifndef BNX2X_STOP_ON_ERROR + bp->recovery_state = BNX2X_RECOVERY_INIT; + schedule_delayed_work(&bp->sp_rtnl_task, 0); + /* Disable HW interrupts */ + bnx2x_int_disable(bp); + /* In case of parity errors don't handle attentions so that + * other function would "see" parity errors. + */ +#else + bnx2x_panic(); +#endif + bnx2x_release_alr(bp); + return; + } + + attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); + attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); + attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); + attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); + if (!CHIP_IS_E1x(bp)) + attn.sig[4] = + REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); + else + attn.sig[4] = 0; + + DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n", + attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]); + + for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { + if (deasserted & (1 << index)) { + group_mask = &bp->attn_group[index]; + + DP(NETIF_MSG_HW, "group[%d]: %08x %08x " + "%08x %08x %08x\n", + index, + group_mask->sig[0], group_mask->sig[1], + group_mask->sig[2], group_mask->sig[3], + group_mask->sig[4]); + + bnx2x_attn_int_deasserted4(bp, + attn.sig[4] & group_mask->sig[4]); + bnx2x_attn_int_deasserted3(bp, + attn.sig[3] & group_mask->sig[3]); + bnx2x_attn_int_deasserted1(bp, + attn.sig[1] & group_mask->sig[1]); + bnx2x_attn_int_deasserted2(bp, + attn.sig[2] & group_mask->sig[2]); + bnx2x_attn_int_deasserted0(bp, + attn.sig[0] & group_mask->sig[0]); + } + } + + bnx2x_release_alr(bp); + + if (bp->common.int_block == INT_BLOCK_HC) + reg_addr = (HC_REG_COMMAND_REG + port*32 + + COMMAND_REG_ATTN_BITS_CLR); + else + reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8); + + val = ~deasserted; + DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val, + (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); + REG_WR(bp, reg_addr, val); + + if (~bp->attn_state & deasserted) + BNX2X_ERR("IGU ERROR\n"); + + reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : + MISC_REG_AEU_MASK_ATTN_FUNC_0; + + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); + aeu_mask = REG_RD(bp, reg_addr); + + DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n", + aeu_mask, deasserted); + aeu_mask |= (deasserted & 0x3ff); + DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); + + REG_WR(bp, reg_addr, aeu_mask); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); + + DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); + bp->attn_state &= ~deasserted; + DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); +} + +static void bnx2x_attn_int(struct bnx2x *bp) +{ + /* read local copy of bits */ + u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block. + attn_bits); + u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block. + attn_bits_ack); + u32 attn_state = bp->attn_state; + + /* look for changed bits */ + u32 asserted = attn_bits & ~attn_ack & ~attn_state; + u32 deasserted = ~attn_bits & attn_ack & attn_state; + + DP(NETIF_MSG_HW, + "attn_bits %x attn_ack %x asserted %x deasserted %x\n", + attn_bits, attn_ack, asserted, deasserted); + + if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) + BNX2X_ERR("BAD attention state\n"); + + /* handle bits that were raised */ + if (asserted) + bnx2x_attn_int_asserted(bp, asserted); + + if (deasserted) + bnx2x_attn_int_deasserted(bp, deasserted); +} + +void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, + u16 index, u8 op, u8 update) +{ + u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; + + bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update, + igu_addr); +} + +static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) +{ + /* No memory barriers */ + storm_memset_eq_prod(bp, prod, BP_FUNC(bp)); + mmiowb(); /* keep prod updates ordered */ +} + +#ifdef BCM_CNIC +static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, + union event_ring_elem *elem) +{ + u8 err = elem->message.error; + + if (!bp->cnic_eth_dev.starting_cid || + (cid < bp->cnic_eth_dev.starting_cid && + cid != bp->cnic_eth_dev.iscsi_l2_cid)) + return 1; + + DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid); + + if (unlikely(err)) { + + BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n", + cid); + bnx2x_panic_dump(bp); + } + bnx2x_cnic_cfc_comp(bp, cid, err); + return 0; +} +#endif + +static inline void bnx2x_handle_mcast_eqe(struct bnx2x *bp) +{ + struct bnx2x_mcast_ramrod_params rparam; + int rc; + + memset(&rparam, 0, sizeof(rparam)); + + rparam.mcast_obj = &bp->mcast_obj; + + netif_addr_lock_bh(bp->dev); + + /* Clear pending state for the last command */ + bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw); + + /* If there are pending mcast commands - send them */ + if (bp->mcast_obj.check_pending(&bp->mcast_obj)) { + rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); + if (rc < 0) + BNX2X_ERR("Failed to send pending mcast commands: %d\n", + rc); + } + + netif_addr_unlock_bh(bp->dev); +} + +static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp, + union event_ring_elem *elem) +{ + unsigned long ramrod_flags = 0; + int rc = 0; + u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK; + struct bnx2x_vlan_mac_obj *vlan_mac_obj; + + /* Always push next commands out, don't wait here */ + __set_bit(RAMROD_CONT, &ramrod_flags); + + switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { + case BNX2X_FILTER_MAC_PENDING: +#ifdef BCM_CNIC + if (cid == BNX2X_ISCSI_ETH_CID) + vlan_mac_obj = &bp->iscsi_l2_mac_obj; + else +#endif + vlan_mac_obj = &bp->fp[cid].mac_obj; + + break; + vlan_mac_obj = &bp->fp[cid].mac_obj; + + case BNX2X_FILTER_MCAST_PENDING: + /* This is only relevant for 57710 where multicast MACs are + * configured as unicast MACs using the same ramrod. + */ + bnx2x_handle_mcast_eqe(bp); + return; + default: + BNX2X_ERR("Unsupported classification command: %d\n", + elem->message.data.eth_event.echo); + return; + } + + rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags); + + if (rc < 0) + BNX2X_ERR("Failed to schedule new commands: %d\n", rc); + else if (rc > 0) + DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n"); + +} + +#ifdef BCM_CNIC +static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start); +#endif + +static inline void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp) +{ + netif_addr_lock_bh(bp->dev); + + clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); + + /* Send rx_mode command again if was requested */ + if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state)) + bnx2x_set_storm_rx_mode(bp); +#ifdef BCM_CNIC + else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, + &bp->sp_state)) + bnx2x_set_iscsi_eth_rx_mode(bp, true); + else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, + &bp->sp_state)) + bnx2x_set_iscsi_eth_rx_mode(bp, false); +#endif + + netif_addr_unlock_bh(bp->dev); +} + +static inline struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj( + struct bnx2x *bp, u32 cid) +{ + DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid); +#ifdef BCM_CNIC + if (cid == BNX2X_FCOE_ETH_CID) + return &bnx2x_fcoe(bp, q_obj); + else +#endif + return &bnx2x_fp(bp, CID_TO_FP(cid), q_obj); +} + +static void bnx2x_eq_int(struct bnx2x *bp) +{ + u16 hw_cons, sw_cons, sw_prod; + union event_ring_elem *elem; + u32 cid; + u8 opcode; + int spqe_cnt = 0; + struct bnx2x_queue_sp_obj *q_obj; + struct bnx2x_func_sp_obj *f_obj = &bp->func_obj; + struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw; + + hw_cons = le16_to_cpu(*bp->eq_cons_sb); + + /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256. + * when we get the the next-page we nned to adjust so the loop + * condition below will be met. The next element is the size of a + * regular element and hence incrementing by 1 + */ + if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) + hw_cons++; + + /* This function may never run in parallel with itself for a + * specific bp, thus there is no need in "paired" read memory + * barrier here. + */ + sw_cons = bp->eq_cons; + sw_prod = bp->eq_prod; + + DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n", + hw_cons, sw_cons, atomic_read(&bp->eq_spq_left)); + + for (; sw_cons != hw_cons; + sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { + + + elem = &bp->eq_ring[EQ_DESC(sw_cons)]; + + cid = SW_CID(elem->message.data.cfc_del_event.cid); + opcode = elem->message.opcode; + + + /* handle eq element */ + switch (opcode) { + case EVENT_RING_OPCODE_STAT_QUERY: + DP(NETIF_MSG_TIMER, "got statistics comp event %d\n", + bp->stats_comp++); + /* nothing to do with stats comp */ + goto next_spqe; + + case EVENT_RING_OPCODE_CFC_DEL: + /* handle according to cid range */ + /* + * we may want to verify here that the bp state is + * HALTING + */ + DP(BNX2X_MSG_SP, + "got delete ramrod for MULTI[%d]\n", cid); +#ifdef BCM_CNIC + if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem)) + goto next_spqe; +#endif + q_obj = bnx2x_cid_to_q_obj(bp, cid); + + if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL)) + break; + + + + goto next_spqe; + + case EVENT_RING_OPCODE_STOP_TRAFFIC: + DP(BNX2X_MSG_SP, "got STOP TRAFFIC\n"); + if (f_obj->complete_cmd(bp, f_obj, + BNX2X_F_CMD_TX_STOP)) + break; + bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); + goto next_spqe; + + case EVENT_RING_OPCODE_START_TRAFFIC: + DP(BNX2X_MSG_SP, "got START TRAFFIC\n"); + if (f_obj->complete_cmd(bp, f_obj, + BNX2X_F_CMD_TX_START)) + break; + bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); + goto next_spqe; + case EVENT_RING_OPCODE_FUNCTION_START: + DP(BNX2X_MSG_SP, "got FUNC_START ramrod\n"); + if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START)) + break; + + goto next_spqe; + + case EVENT_RING_OPCODE_FUNCTION_STOP: + DP(BNX2X_MSG_SP, "got FUNC_STOP ramrod\n"); + if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP)) + break; + + goto next_spqe; + } + + switch (opcode | bp->state) { + case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | + BNX2X_STATE_OPEN): + case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | + BNX2X_STATE_OPENING_WAIT4_PORT): + cid = elem->message.data.eth_event.echo & + BNX2X_SWCID_MASK; + DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n", + cid); + rss_raw->clear_pending(rss_raw); + break; + + case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN): + case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG): + case (EVENT_RING_OPCODE_SET_MAC | + BNX2X_STATE_CLOSING_WAIT4_HALT): + case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | + BNX2X_STATE_OPEN): + case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | + BNX2X_STATE_DIAG): + case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | + BNX2X_STATE_CLOSING_WAIT4_HALT): + DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n"); + bnx2x_handle_classification_eqe(bp, elem); + break; + + case (EVENT_RING_OPCODE_MULTICAST_RULES | + BNX2X_STATE_OPEN): + case (EVENT_RING_OPCODE_MULTICAST_RULES | + BNX2X_STATE_DIAG): + case (EVENT_RING_OPCODE_MULTICAST_RULES | + BNX2X_STATE_CLOSING_WAIT4_HALT): + DP(BNX2X_MSG_SP, "got mcast ramrod\n"); + bnx2x_handle_mcast_eqe(bp); + break; + + case (EVENT_RING_OPCODE_FILTERS_RULES | + BNX2X_STATE_OPEN): + case (EVENT_RING_OPCODE_FILTERS_RULES | + BNX2X_STATE_DIAG): + case (EVENT_RING_OPCODE_FILTERS_RULES | + BNX2X_STATE_CLOSING_WAIT4_HALT): + DP(BNX2X_MSG_SP, "got rx_mode ramrod\n"); + bnx2x_handle_rx_mode_eqe(bp); + break; + default: + /* unknown event log error and continue */ + BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n", + elem->message.opcode, bp->state); + } +next_spqe: + spqe_cnt++; + } /* for */ + + smp_mb__before_atomic_inc(); + atomic_add(spqe_cnt, &bp->eq_spq_left); + + bp->eq_cons = sw_cons; + bp->eq_prod = sw_prod; + /* Make sure that above mem writes were issued towards the memory */ + smp_wmb(); + + /* update producer */ + bnx2x_update_eq_prod(bp, bp->eq_prod); +} + +static void bnx2x_sp_task(struct work_struct *work) +{ + struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); + u16 status; + + status = bnx2x_update_dsb_idx(bp); +/* if (status == 0) */ +/* BNX2X_ERR("spurious slowpath interrupt!\n"); */ + + DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status); + + /* HW attentions */ + if (status & BNX2X_DEF_SB_ATT_IDX) { + bnx2x_attn_int(bp); + status &= ~BNX2X_DEF_SB_ATT_IDX; + } + + /* SP events: STAT_QUERY and others */ + if (status & BNX2X_DEF_SB_IDX) { +#ifdef BCM_CNIC + struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); + + if ((!NO_FCOE(bp)) && + (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { + /* + * Prevent local bottom-halves from running as + * we are going to change the local NAPI list. + */ + local_bh_disable(); + napi_schedule(&bnx2x_fcoe(bp, napi)); + local_bh_enable(); + } +#endif + /* Handle EQ completions */ + bnx2x_eq_int(bp); + + bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, + le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1); + + status &= ~BNX2X_DEF_SB_IDX; + } + + if (unlikely(status)) + DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n", + status); + + bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, + le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); +} + +irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) +{ + struct net_device *dev = dev_instance; + struct bnx2x *bp = netdev_priv(dev); + + bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, + IGU_INT_DISABLE, 0); + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return IRQ_HANDLED; +#endif + +#ifdef BCM_CNIC + { + struct cnic_ops *c_ops; + + rcu_read_lock(); + c_ops = rcu_dereference(bp->cnic_ops); + if (c_ops) + c_ops->cnic_handler(bp->cnic_data, NULL); + rcu_read_unlock(); + } +#endif + queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); + + return IRQ_HANDLED; +} + +/* end of slow path */ + + +void bnx2x_drv_pulse(struct bnx2x *bp) +{ + SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb, + bp->fw_drv_pulse_wr_seq); +} + + +static void bnx2x_timer(unsigned long data) +{ + u8 cos; + struct bnx2x *bp = (struct bnx2x *) data; + + if (!netif_running(bp->dev)) + return; + + if (poll) { + struct bnx2x_fastpath *fp = &bp->fp[0]; + + for_each_cos_in_tx_queue(fp, cos) + bnx2x_tx_int(bp, &fp->txdata[cos]); + bnx2x_rx_int(fp, 1000); + } + + if (!BP_NOMCP(bp)) { + int mb_idx = BP_FW_MB_IDX(bp); + u32 drv_pulse; + u32 mcp_pulse; + + ++bp->fw_drv_pulse_wr_seq; + bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; + /* TBD - add SYSTEM_TIME */ + drv_pulse = bp->fw_drv_pulse_wr_seq; + bnx2x_drv_pulse(bp); + + mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) & + MCP_PULSE_SEQ_MASK); + /* The delta between driver pulse and mcp response + * should be 1 (before mcp response) or 0 (after mcp response) + */ + if ((drv_pulse != mcp_pulse) && + (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { + /* someone lost a heartbeat... */ + BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n", + drv_pulse, mcp_pulse); + } + } + + if (bp->state == BNX2X_STATE_OPEN) + bnx2x_stats_handle(bp, STATS_EVENT_UPDATE); + + mod_timer(&bp->timer, jiffies + bp->current_interval); +} + +/* end of Statistics */ + +/* nic init */ + +/* + * nic init service functions + */ + +static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) +{ + u32 i; + if (!(len%4) && !(addr%4)) + for (i = 0; i < len; i += 4) + REG_WR(bp, addr + i, fill); + else + for (i = 0; i < len; i++) + REG_WR8(bp, addr + i, fill); + +} + +/* helper: writes FP SP data to FW - data_size in dwords */ +static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp, + int fw_sb_id, + u32 *sb_data_p, + u32 data_size) +{ + int index; + for (index = 0; index < data_size; index++) + REG_WR(bp, BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + + sizeof(u32)*index, + *(sb_data_p + index)); +} + +static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id) +{ + u32 *sb_data_p; + u32 data_size = 0; + struct hc_status_block_data_e2 sb_data_e2; + struct hc_status_block_data_e1x sb_data_e1x; + + /* disable the function first */ + if (!CHIP_IS_E1x(bp)) { + memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); + sb_data_e2.common.state = SB_DISABLED; + sb_data_e2.common.p_func.vf_valid = false; + sb_data_p = (u32 *)&sb_data_e2; + data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); + } else { + memset(&sb_data_e1x, 0, + sizeof(struct hc_status_block_data_e1x)); + sb_data_e1x.common.state = SB_DISABLED; + sb_data_e1x.common.p_func.vf_valid = false; + sb_data_p = (u32 *)&sb_data_e1x; + data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); + } + bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); + + bnx2x_fill(bp, BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0, + CSTORM_STATUS_BLOCK_SIZE); + bnx2x_fill(bp, BAR_CSTRORM_INTMEM + + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0, + CSTORM_SYNC_BLOCK_SIZE); +} + +/* helper: writes SP SB data to FW */ +static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp, + struct hc_sp_status_block_data *sp_sb_data) +{ + int func = BP_FUNC(bp); + int i; + for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++) + REG_WR(bp, BAR_CSTRORM_INTMEM + + CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + + i*sizeof(u32), + *((u32 *)sp_sb_data + i)); +} + +static inline void bnx2x_zero_sp_sb(struct bnx2x *bp) +{ + int func = BP_FUNC(bp); + struct hc_sp_status_block_data sp_sb_data; + memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); + + sp_sb_data.state = SB_DISABLED; + sp_sb_data.p_func.vf_valid = false; + + bnx2x_wr_sp_sb_data(bp, &sp_sb_data); + + bnx2x_fill(bp, BAR_CSTRORM_INTMEM + + CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0, + CSTORM_SP_STATUS_BLOCK_SIZE); + bnx2x_fill(bp, BAR_CSTRORM_INTMEM + + CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0, + CSTORM_SP_SYNC_BLOCK_SIZE); + +} + + +static inline +void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, + int igu_sb_id, int igu_seg_id) +{ + hc_sm->igu_sb_id = igu_sb_id; + hc_sm->igu_seg_id = igu_seg_id; + hc_sm->timer_value = 0xFF; + hc_sm->time_to_expire = 0xFFFFFFFF; +} + +static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, + u8 vf_valid, int fw_sb_id, int igu_sb_id) +{ + int igu_seg_id; + + struct hc_status_block_data_e2 sb_data_e2; + struct hc_status_block_data_e1x sb_data_e1x; + struct hc_status_block_sm *hc_sm_p; + int data_size; + u32 *sb_data_p; + + if (CHIP_INT_MODE_IS_BC(bp)) + igu_seg_id = HC_SEG_ACCESS_NORM; + else + igu_seg_id = IGU_SEG_ACCESS_NORM; + + bnx2x_zero_fp_sb(bp, fw_sb_id); + + if (!CHIP_IS_E1x(bp)) { + memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); + sb_data_e2.common.state = SB_ENABLED; + sb_data_e2.common.p_func.pf_id = BP_FUNC(bp); + sb_data_e2.common.p_func.vf_id = vfid; + sb_data_e2.common.p_func.vf_valid = vf_valid; + sb_data_e2.common.p_func.vnic_id = BP_VN(bp); + sb_data_e2.common.same_igu_sb_1b = true; + sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping); + sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping); + hc_sm_p = sb_data_e2.common.state_machine; + sb_data_p = (u32 *)&sb_data_e2; + data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); + } else { + memset(&sb_data_e1x, 0, + sizeof(struct hc_status_block_data_e1x)); + sb_data_e1x.common.state = SB_ENABLED; + sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp); + sb_data_e1x.common.p_func.vf_id = 0xff; + sb_data_e1x.common.p_func.vf_valid = false; + sb_data_e1x.common.p_func.vnic_id = BP_VN(bp); + sb_data_e1x.common.same_igu_sb_1b = true; + sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping); + sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping); + hc_sm_p = sb_data_e1x.common.state_machine; + sb_data_p = (u32 *)&sb_data_e1x; + data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); + } + + bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], + igu_sb_id, igu_seg_id); + bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], + igu_sb_id, igu_seg_id); + + DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id); + + /* write indecies to HW */ + bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); +} + +static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id, + u16 tx_usec, u16 rx_usec) +{ + bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS, + false, rx_usec); + bnx2x_update_coalesce_sb_index(bp, fw_sb_id, + HC_INDEX_ETH_TX_CQ_CONS_COS0, false, + tx_usec); + bnx2x_update_coalesce_sb_index(bp, fw_sb_id, + HC_INDEX_ETH_TX_CQ_CONS_COS1, false, + tx_usec); + bnx2x_update_coalesce_sb_index(bp, fw_sb_id, + HC_INDEX_ETH_TX_CQ_CONS_COS2, false, + tx_usec); +} + +static void bnx2x_init_def_sb(struct bnx2x *bp) +{ + struct host_sp_status_block *def_sb = bp->def_status_blk; + dma_addr_t mapping = bp->def_status_blk_mapping; + int igu_sp_sb_index; + int igu_seg_id; + int port = BP_PORT(bp); + int func = BP_FUNC(bp); + int reg_offset; + u64 section; + int index; + struct hc_sp_status_block_data sp_sb_data; + memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); + + if (CHIP_INT_MODE_IS_BC(bp)) { + igu_sp_sb_index = DEF_SB_IGU_ID; + igu_seg_id = HC_SEG_ACCESS_DEF; + } else { + igu_sp_sb_index = bp->igu_dsb_id; + igu_seg_id = IGU_SEG_ACCESS_DEF; + } + + /* ATTN */ + section = ((u64)mapping) + offsetof(struct host_sp_status_block, + atten_status_block); + def_sb->atten_status_block.status_block_id = igu_sp_sb_index; + + bp->attn_state = 0; + + reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : + MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); + for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { + int sindex; + /* take care of sig[0]..sig[4] */ + for (sindex = 0; sindex < 4; sindex++) + bp->attn_group[index].sig[sindex] = + REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index); + + if (!CHIP_IS_E1x(bp)) + /* + * enable5 is separate from the rest of the registers, + * and therefore the address skip is 4 + * and not 16 between the different groups + */ + bp->attn_group[index].sig[4] = REG_RD(bp, + reg_offset + 0x10 + 0x4*index); + else + bp->attn_group[index].sig[4] = 0; + } + + if (bp->common.int_block == INT_BLOCK_HC) { + reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L : + HC_REG_ATTN_MSG0_ADDR_L); + + REG_WR(bp, reg_offset, U64_LO(section)); + REG_WR(bp, reg_offset + 4, U64_HI(section)); + } else if (!CHIP_IS_E1x(bp)) { + REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); + REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); + } + + section = ((u64)mapping) + offsetof(struct host_sp_status_block, + sp_sb); + + bnx2x_zero_sp_sb(bp); + + sp_sb_data.state = SB_ENABLED; + sp_sb_data.host_sb_addr.lo = U64_LO(section); + sp_sb_data.host_sb_addr.hi = U64_HI(section); + sp_sb_data.igu_sb_id = igu_sp_sb_index; + sp_sb_data.igu_seg_id = igu_seg_id; + sp_sb_data.p_func.pf_id = func; + sp_sb_data.p_func.vnic_id = BP_VN(bp); + sp_sb_data.p_func.vf_id = 0xff; + + bnx2x_wr_sp_sb_data(bp, &sp_sb_data); + + bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); +} + +void bnx2x_update_coalesce(struct bnx2x *bp) +{ + int i; + + for_each_eth_queue(bp, i) + bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id, + bp->tx_ticks, bp->rx_ticks); +} + +static void bnx2x_init_sp_ring(struct bnx2x *bp) +{ + spin_lock_init(&bp->spq_lock); + atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING); + + bp->spq_prod_idx = 0; + bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; + bp->spq_prod_bd = bp->spq; + bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT; +} + +static void bnx2x_init_eq_ring(struct bnx2x *bp) +{ + int i; + for (i = 1; i <= NUM_EQ_PAGES; i++) { + union event_ring_elem *elem = + &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1]; + + elem->next_page.addr.hi = + cpu_to_le32(U64_HI(bp->eq_mapping + + BCM_PAGE_SIZE * (i % NUM_EQ_PAGES))); + elem->next_page.addr.lo = + cpu_to_le32(U64_LO(bp->eq_mapping + + BCM_PAGE_SIZE*(i % NUM_EQ_PAGES))); + } + bp->eq_cons = 0; + bp->eq_prod = NUM_EQ_DESC; + bp->eq_cons_sb = BNX2X_EQ_INDEX; + /* we want a warning message before it gets rought... */ + atomic_set(&bp->eq_spq_left, + min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1); +} + + +/* called with netif_addr_lock_bh() */ +void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, + unsigned long rx_mode_flags, + unsigned long rx_accept_flags, + unsigned long tx_accept_flags, + unsigned long ramrod_flags) +{ + struct bnx2x_rx_mode_ramrod_params ramrod_param; + int rc; + + memset(&ramrod_param, 0, sizeof(ramrod_param)); + + /* Prepare ramrod parameters */ + ramrod_param.cid = 0; + ramrod_param.cl_id = cl_id; + ramrod_param.rx_mode_obj = &bp->rx_mode_obj; + ramrod_param.func_id = BP_FUNC(bp); + + ramrod_param.pstate = &bp->sp_state; + ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING; + + ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata); + ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata); + + set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); + + ramrod_param.ramrod_flags = ramrod_flags; + ramrod_param.rx_mode_flags = rx_mode_flags; + + ramrod_param.rx_accept_flags = rx_accept_flags; + ramrod_param.tx_accept_flags = tx_accept_flags; + + rc = bnx2x_config_rx_mode(bp, &ramrod_param); + if (rc < 0) { + BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode); + return; + } +} + +/* called with netif_addr_lock_bh() */ +void bnx2x_set_storm_rx_mode(struct bnx2x *bp) +{ + unsigned long rx_mode_flags = 0, ramrod_flags = 0; + unsigned long rx_accept_flags = 0, tx_accept_flags = 0; + +#ifdef BCM_CNIC + if (!NO_FCOE(bp)) + + /* Configure rx_mode of FCoE Queue */ + __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags); +#endif + + switch (bp->rx_mode) { + case BNX2X_RX_MODE_NONE: + /* + * 'drop all' supersedes any accept flags that may have been + * passed to the function. + */ + break; + case BNX2X_RX_MODE_NORMAL: + __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); + __set_bit(BNX2X_ACCEPT_MULTICAST, &rx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); + + /* internal switching mode */ + __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); + __set_bit(BNX2X_ACCEPT_MULTICAST, &tx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); + + break; + case BNX2X_RX_MODE_ALLMULTI: + __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); + __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); + + /* internal switching mode */ + __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); + __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); + + break; + case BNX2X_RX_MODE_PROMISC: + /* According to deffinition of SI mode, iface in promisc mode + * should receive matched and unmatched (in resolution of port) + * unicast packets. + */ + __set_bit(BNX2X_ACCEPT_UNMATCHED, &rx_accept_flags); + __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); + __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); + + /* internal switching mode */ + __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); + + if (IS_MF_SI(bp)) + __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &tx_accept_flags); + else + __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); + + break; + default: + BNX2X_ERR("Unknown rx_mode: %d\n", bp->rx_mode); + return; + } + + if (bp->rx_mode != BNX2X_RX_MODE_NONE) { + __set_bit(BNX2X_ACCEPT_ANY_VLAN, &rx_accept_flags); + __set_bit(BNX2X_ACCEPT_ANY_VLAN, &tx_accept_flags); + } + + __set_bit(RAMROD_RX, &ramrod_flags); + __set_bit(RAMROD_TX, &ramrod_flags); + + bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, rx_accept_flags, + tx_accept_flags, ramrod_flags); +} + +static void bnx2x_init_internal_common(struct bnx2x *bp) +{ + int i; + + if (IS_MF_SI(bp)) + /* + * In switch independent mode, the TSTORM needs to accept + * packets that failed classification, since approximate match + * mac addresses aren't written to NIG LLH + */ + REG_WR8(bp, BAR_TSTRORM_INTMEM + + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2); + else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */ + REG_WR8(bp, BAR_TSTRORM_INTMEM + + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0); + + /* Zero this manually as its initialization is + currently missing in the initTool */ + for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) + REG_WR(bp, BAR_USTRORM_INTMEM + + USTORM_AGG_DATA_OFFSET + i * 4, 0); + if (!CHIP_IS_E1x(bp)) { + REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET, + CHIP_INT_MODE_IS_BC(bp) ? + HC_IGU_BC_MODE : HC_IGU_NBC_MODE); + } +} + +static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) +{ + switch (load_code) { + case FW_MSG_CODE_DRV_LOAD_COMMON: + case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: + bnx2x_init_internal_common(bp); + /* no break */ + + case FW_MSG_CODE_DRV_LOAD_PORT: + /* nothing to do */ + /* no break */ + + case FW_MSG_CODE_DRV_LOAD_FUNCTION: + /* internal memory per function is + initialized inside bnx2x_pf_init */ + break; + + default: + BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); + break; + } +} + +static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp) +{ + return fp->bp->igu_base_sb + fp->index + CNIC_PRESENT; +} + +static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp) +{ + return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT; +} + +static inline u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp) +{ + if (CHIP_IS_E1x(fp->bp)) + return BP_L_ID(fp->bp) + fp->index; + else /* We want Client ID to be the same as IGU SB ID for 57712 */ + return bnx2x_fp_igu_sb_id(fp); +} + +static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx) +{ + struct bnx2x_fastpath *fp = &bp->fp[fp_idx]; + u8 cos; + unsigned long q_type = 0; + u32 cids[BNX2X_MULTI_TX_COS] = { 0 }; + + fp->cid = fp_idx; + fp->cl_id = bnx2x_fp_cl_id(fp); + fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp); + fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp); + /* qZone id equals to FW (per path) client id */ + fp->cl_qzone_id = bnx2x_fp_qzone_id(fp); + + /* init shortcut */ + fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp); + /* Setup SB indicies */ + fp->rx_cons_sb = BNX2X_RX_SB_INDEX; + + /* Configure Queue State object */ + __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); + __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); + + BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS); + + /* init tx data */ + for_each_cos_in_tx_queue(fp, cos) { + bnx2x_init_txdata(bp, &fp->txdata[cos], + CID_COS_TO_TX_ONLY_CID(fp->cid, cos), + FP_COS_TO_TXQ(fp, cos), + BNX2X_TX_SB_INDEX_BASE + cos); + cids[cos] = fp->txdata[cos].cid; + } + + bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, cids, fp->max_cos, + BP_FUNC(bp), bnx2x_sp(bp, q_rdata), + bnx2x_sp_mapping(bp, q_rdata), q_type); + + /** + * Configure classification DBs: Always enable Tx switching + */ + bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX); + + DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) " + "cl_id %d fw_sb %d igu_sb %d\n", + fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, + fp->igu_sb_id); + bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false, + fp->fw_sb_id, fp->igu_sb_id); + + bnx2x_update_fpsb_idx(fp); +} + +void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) +{ + int i; + + for_each_eth_queue(bp, i) + bnx2x_init_eth_fp(bp, i); +#ifdef BCM_CNIC + if (!NO_FCOE(bp)) + bnx2x_init_fcoe_fp(bp); + + bnx2x_init_sb(bp, bp->cnic_sb_mapping, + BNX2X_VF_ID_INVALID, false, + bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp)); + +#endif + + /* Initialize MOD_ABS interrupts */ + bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, + bp->common.shmem_base, bp->common.shmem2_base, + BP_PORT(bp)); + /* ensure status block indices were read */ + rmb(); + + bnx2x_init_def_sb(bp); + bnx2x_update_dsb_idx(bp); + bnx2x_init_rx_rings(bp); + bnx2x_init_tx_rings(bp); + bnx2x_init_sp_ring(bp); + bnx2x_init_eq_ring(bp); + bnx2x_init_internal(bp, load_code); + bnx2x_pf_init(bp); + bnx2x_stats_init(bp); + + /* flush all before enabling interrupts */ + mb(); + mmiowb(); + + bnx2x_int_enable(bp); + + /* Check for SPIO5 */ + bnx2x_attn_int_deasserted0(bp, + REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) & + AEU_INPUTS_ATTN_BITS_SPIO5); +} + +/* end of nic init */ + +/* + * gzip service functions + */ + +static int bnx2x_gunzip_init(struct bnx2x *bp) +{ + bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE, + &bp->gunzip_mapping, GFP_KERNEL); + if (bp->gunzip_buf == NULL) + goto gunzip_nomem1; + + bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL); + if (bp->strm == NULL) + goto gunzip_nomem2; + + bp->strm->workspace = vmalloc(zlib_inflate_workspacesize()); + if (bp->strm->workspace == NULL) + goto gunzip_nomem3; + + return 0; + +gunzip_nomem3: + kfree(bp->strm); + bp->strm = NULL; + +gunzip_nomem2: + dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, + bp->gunzip_mapping); + bp->gunzip_buf = NULL; + +gunzip_nomem1: + netdev_err(bp->dev, "Cannot allocate firmware buffer for" + " un-compression\n"); + return -ENOMEM; +} + +static void bnx2x_gunzip_end(struct bnx2x *bp) +{ + if (bp->strm) { + vfree(bp->strm->workspace); + kfree(bp->strm); + bp->strm = NULL; + } + + if (bp->gunzip_buf) { + dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, + bp->gunzip_mapping); + bp->gunzip_buf = NULL; + } +} + +static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len) +{ + int n, rc; + + /* check gzip header */ + if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) { + BNX2X_ERR("Bad gzip header\n"); + return -EINVAL; + } + + n = 10; + +#define FNAME 0x8 + + if (zbuf[3] & FNAME) + while ((zbuf[n++] != 0) && (n < len)); + + bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n; + bp->strm->avail_in = len - n; + bp->strm->next_out = bp->gunzip_buf; + bp->strm->avail_out = FW_BUF_SIZE; + + rc = zlib_inflateInit2(bp->strm, -MAX_WBITS); + if (rc != Z_OK) + return rc; + + rc = zlib_inflate(bp->strm, Z_FINISH); + if ((rc != Z_OK) && (rc != Z_STREAM_END)) + netdev_err(bp->dev, "Firmware decompression error: %s\n", + bp->strm->msg); + + bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out); + if (bp->gunzip_outlen & 0x3) + netdev_err(bp->dev, "Firmware decompression error:" + " gunzip_outlen (%d) not aligned\n", + bp->gunzip_outlen); + bp->gunzip_outlen >>= 2; + + zlib_inflateEnd(bp->strm); + + if (rc == Z_STREAM_END) + return 0; + + return rc; +} + +/* nic load/unload */ + +/* + * General service functions + */ + +/* send a NIG loopback debug packet */ +static void bnx2x_lb_pckt(struct bnx2x *bp) +{ + u32 wb_write[3]; + + /* Ethernet source and destination addresses */ + wb_write[0] = 0x55555555; + wb_write[1] = 0x55555555; + wb_write[2] = 0x20; /* SOP */ + REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); + + /* NON-IP protocol */ + wb_write[0] = 0x09000000; + wb_write[1] = 0x55555555; + wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ + REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); +} + +/* some of the internal memories + * are not directly readable from the driver + * to test them we send debug packets + */ +static int bnx2x_int_mem_test(struct bnx2x *bp) +{ + int factor; + int count, i; + u32 val = 0; + + if (CHIP_REV_IS_FPGA(bp)) + factor = 120; + else if (CHIP_REV_IS_EMUL(bp)) + factor = 200; + else + factor = 1; + + /* Disable inputs of parser neighbor blocks */ + REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); + REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); + REG_WR(bp, CFC_REG_DEBUG0, 0x1); + REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); + + /* Write 0 to parser credits for CFC search request */ + REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); + + /* send Ethernet packet */ + bnx2x_lb_pckt(bp); + + /* TODO do i reset NIG statistic? */ + /* Wait until NIG register shows 1 packet of size 0x10 */ + count = 1000 * factor; + while (count) { + + bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); + val = *bnx2x_sp(bp, wb_data[0]); + if (val == 0x10) + break; + + msleep(10); + count--; + } + if (val != 0x10) { + BNX2X_ERR("NIG timeout val = 0x%x\n", val); + return -1; + } + + /* Wait until PRS register shows 1 packet */ + count = 1000 * factor; + while (count) { + val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); + if (val == 1) + break; + + msleep(10); + count--; + } + if (val != 0x1) { + BNX2X_ERR("PRS timeout val = 0x%x\n", val); + return -2; + } + + /* Reset and init BRB, PRS */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); + msleep(50); + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); + msleep(50); + bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); + + DP(NETIF_MSG_HW, "part2\n"); + + /* Disable inputs of parser neighbor blocks */ + REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); + REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); + REG_WR(bp, CFC_REG_DEBUG0, 0x1); + REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); + + /* Write 0 to parser credits for CFC search request */ + REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); + + /* send 10 Ethernet packets */ + for (i = 0; i < 10; i++) + bnx2x_lb_pckt(bp); + + /* Wait until NIG register shows 10 + 1 + packets of size 11*0x10 = 0xb0 */ + count = 1000 * factor; + while (count) { + + bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); + val = *bnx2x_sp(bp, wb_data[0]); + if (val == 0xb0) + break; + + msleep(10); + count--; + } + if (val != 0xb0) { + BNX2X_ERR("NIG timeout val = 0x%x\n", val); + return -3; + } + + /* Wait until PRS register shows 2 packets */ + val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); + if (val != 2) + BNX2X_ERR("PRS timeout val = 0x%x\n", val); + + /* Write 1 to parser credits for CFC search request */ + REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1); + + /* Wait until PRS register shows 3 packets */ + msleep(10 * factor); + /* Wait until NIG register shows 1 packet of size 0x10 */ + val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); + if (val != 3) + BNX2X_ERR("PRS timeout val = 0x%x\n", val); + + /* clear NIG EOP FIFO */ + for (i = 0; i < 11; i++) + REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO); + val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY); + if (val != 1) { + BNX2X_ERR("clear of NIG failed\n"); + return -4; + } + + /* Reset and init BRB, PRS, NIG */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); + msleep(50); + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); + msleep(50); + bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); +#ifndef BCM_CNIC + /* set NIC mode */ + REG_WR(bp, PRS_REG_NIC_MODE, 1); +#endif + + /* Enable inputs of parser neighbor blocks */ + REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff); + REG_WR(bp, TCM_REG_PRS_IFEN, 0x1); + REG_WR(bp, CFC_REG_DEBUG0, 0x0); + REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1); + + DP(NETIF_MSG_HW, "done\n"); + + return 0; /* OK */ +} + +static void bnx2x_enable_blocks_attention(struct bnx2x *bp) +{ + REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); + if (!CHIP_IS_E1x(bp)) + REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40); + else + REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0); + REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); + REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); + /* + * mask read length error interrupts in brb for parser + * (parsing unit and 'checksum and crc' unit) + * these errors are legal (PU reads fixed length and CAC can cause + * read length error on truncated packets) + */ + REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00); + REG_WR(bp, QM_REG_QM_INT_MASK, 0); + REG_WR(bp, TM_REG_TM_INT_MASK, 0); + REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0); + REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0); + REG_WR(bp, XCM_REG_XCM_INT_MASK, 0); +/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */ +/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */ + REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0); + REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0); + REG_WR(bp, UCM_REG_UCM_INT_MASK, 0); +/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */ +/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */ + REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); + REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0); + REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0); + REG_WR(bp, CCM_REG_CCM_INT_MASK, 0); +/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */ +/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */ + + if (CHIP_REV_IS_FPGA(bp)) + REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000); + else if (!CHIP_IS_E1x(bp)) + REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, + (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF + | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT + | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN + | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED + | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED)); + else + REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000); + REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0); + REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0); + REG_WR(bp, TCM_REG_TCM_INT_MASK, 0); +/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */ + + if (!CHIP_IS_E1x(bp)) + /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ + REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); + + REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); + REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); +/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ + REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ +} + +static void bnx2x_reset_common(struct bnx2x *bp) +{ + u32 val = 0x1400; + + /* reset_common */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, + 0xd3ffff7f); + + if (CHIP_IS_E3(bp)) { + val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; + val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; + } + + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val); +} + +static void bnx2x_setup_dmae(struct bnx2x *bp) +{ + bp->dmae_ready = 0; + spin_lock_init(&bp->dmae_lock); +} + +static void bnx2x_init_pxp(struct bnx2x *bp) +{ + u16 devctl; + int r_order, w_order; + + pci_read_config_word(bp->pdev, + pci_pcie_cap(bp->pdev) + PCI_EXP_DEVCTL, &devctl); + DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl); + w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); + if (bp->mrrs == -1) + r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12); + else { + DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs); + r_order = bp->mrrs; + } + + bnx2x_init_pxp_arb(bp, r_order, w_order); +} + +static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp) +{ + int is_required; + u32 val; + int port; + + if (BP_NOMCP(bp)) + return; + + is_required = 0; + val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) & + SHARED_HW_CFG_FAN_FAILURE_MASK; + + if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) + is_required = 1; + + /* + * The fan failure mechanism is usually related to the PHY type since + * the power consumption of the board is affected by the PHY. Currently, + * fan is required for most designs with SFX7101, BCM8727 and BCM8481. + */ + else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) + for (port = PORT_0; port < PORT_MAX; port++) { + is_required |= + bnx2x_fan_failure_det_req( + bp, + bp->common.shmem_base, + bp->common.shmem2_base, + port); + } + + DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required); + + if (is_required == 0) + return; + + /* Fan failure is indicated by SPIO 5 */ + bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5, + MISC_REGISTERS_SPIO_INPUT_HI_Z); + + /* set to active low mode */ + val = REG_RD(bp, MISC_REG_SPIO_INT); + val |= ((1 << MISC_REGISTERS_SPIO_5) << + MISC_REGISTERS_SPIO_INT_OLD_SET_POS); + REG_WR(bp, MISC_REG_SPIO_INT, val); + + /* enable interrupt to signal the IGU */ + val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); + val |= (1 << MISC_REGISTERS_SPIO_5); + REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val); +} + +static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num) +{ + u32 offset = 0; + + if (CHIP_IS_E1(bp)) + return; + if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX)) + return; + + switch (BP_ABS_FUNC(bp)) { + case 0: + offset = PXP2_REG_PGL_PRETEND_FUNC_F0; + break; + case 1: + offset = PXP2_REG_PGL_PRETEND_FUNC_F1; + break; + case 2: + offset = PXP2_REG_PGL_PRETEND_FUNC_F2; + break; + case 3: + offset = PXP2_REG_PGL_PRETEND_FUNC_F3; + break; + case 4: + offset = PXP2_REG_PGL_PRETEND_FUNC_F4; + break; + case 5: + offset = PXP2_REG_PGL_PRETEND_FUNC_F5; + break; + case 6: + offset = PXP2_REG_PGL_PRETEND_FUNC_F6; + break; + case 7: + offset = PXP2_REG_PGL_PRETEND_FUNC_F7; + break; + default: + return; + } + + REG_WR(bp, offset, pretend_func_num); + REG_RD(bp, offset); + DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num); +} + +void bnx2x_pf_disable(struct bnx2x *bp) +{ + u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); + val &= ~IGU_PF_CONF_FUNC_EN; + + REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); + REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); + REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0); +} + +static inline void bnx2x__common_init_phy(struct bnx2x *bp) +{ + u32 shmem_base[2], shmem2_base[2]; + shmem_base[0] = bp->common.shmem_base; + shmem2_base[0] = bp->common.shmem2_base; + if (!CHIP_IS_E1x(bp)) { + shmem_base[1] = + SHMEM2_RD(bp, other_shmem_base_addr); + shmem2_base[1] = + SHMEM2_RD(bp, other_shmem2_base_addr); + } + bnx2x_acquire_phy_lock(bp); + bnx2x_common_init_phy(bp, shmem_base, shmem2_base, + bp->common.chip_id); + bnx2x_release_phy_lock(bp); +} + +/** + * bnx2x_init_hw_common - initialize the HW at the COMMON phase. + * + * @bp: driver handle + */ +static int bnx2x_init_hw_common(struct bnx2x *bp) +{ + u32 val; + + DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp)); + ++ /* ++ * take the UNDI lock to protect undi_unload flow from accessing ++ * registers while we're resetting the chip ++ */ ++ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); ++ + bnx2x_reset_common(bp); + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); + + val = 0xfffc; + if (CHIP_IS_E3(bp)) { + val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; + val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; + } + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); + ++ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); ++ + bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); + + if (!CHIP_IS_E1x(bp)) { + u8 abs_func_id; + + /** + * 4-port mode or 2-port mode we need to turn of master-enable + * for everyone, after that, turn it back on for self. + * so, we disregard multi-function or not, and always disable + * for all functions on the given path, this means 0,2,4,6 for + * path 0 and 1,3,5,7 for path 1 + */ + for (abs_func_id = BP_PATH(bp); + abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) { + if (abs_func_id == BP_ABS_FUNC(bp)) { + REG_WR(bp, + PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, + 1); + continue; + } + + bnx2x_pretend_func(bp, abs_func_id); + /* clear pf enable */ + bnx2x_pf_disable(bp); + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); + } + } + + bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON); + if (CHIP_IS_E1(bp)) { + /* enable HW interrupt from PXP on USDM overflow + bit 16 on INT_MASK_0 */ + REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); + } + + bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON); + bnx2x_init_pxp(bp); + +#ifdef __BIG_ENDIAN + REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1); + REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1); + REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1); + REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1); + REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1); + /* make sure this value is 0 */ + REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0); + +/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */ + REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1); + REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1); + REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1); + REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); +#endif + + bnx2x_ilt_init_page_size(bp, INITOP_SET); + + if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp)) + REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1); + + /* let the HW do it's magic ... */ + msleep(100); + /* finish PXP init */ + val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE); + if (val != 1) { + BNX2X_ERR("PXP2 CFG failed\n"); + return -EBUSY; + } + val = REG_RD(bp, PXP2_REG_RD_INIT_DONE); + if (val != 1) { + BNX2X_ERR("PXP2 RD_INIT failed\n"); + return -EBUSY; + } + + /* Timers bug workaround E2 only. We need to set the entire ILT to + * have entries with value "0" and valid bit on. + * This needs to be done by the first PF that is loaded in a path + * (i.e. common phase) + */ + if (!CHIP_IS_E1x(bp)) { +/* In E2 there is a bug in the timers block that can cause function 6 / 7 + * (i.e. vnic3) to start even if it is marked as "scan-off". + * This occurs when a different function (func2,3) is being marked + * as "scan-off". Real-life scenario for example: if a driver is being + * load-unloaded while func6,7 are down. This will cause the timer to access + * the ilt, translate to a logical address and send a request to read/write. + * Since the ilt for the function that is down is not valid, this will cause + * a translation error which is unrecoverable. + * The Workaround is intended to make sure that when this happens nothing fatal + * will occur. The workaround: + * 1. First PF driver which loads on a path will: + * a. After taking the chip out of reset, by using pretend, + * it will write "0" to the following registers of + * the other vnics. + * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); + * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); + * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); + * And for itself it will write '1' to + * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable + * dmae-operations (writing to pram for example.) + * note: can be done for only function 6,7 but cleaner this + * way. + * b. Write zero+valid to the entire ILT. + * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of + * VNIC3 (of that port). The range allocated will be the + * entire ILT. This is needed to prevent ILT range error. + * 2. Any PF driver load flow: + * a. ILT update with the physical addresses of the allocated + * logical pages. + * b. Wait 20msec. - note that this timeout is needed to make + * sure there are no requests in one of the PXP internal + * queues with "old" ILT addresses. + * c. PF enable in the PGLC. + * d. Clear the was_error of the PF in the PGLC. (could have + * occured while driver was down) + * e. PF enable in the CFC (WEAK + STRONG) + * f. Timers scan enable + * 3. PF driver unload flow: + * a. Clear the Timers scan_en. + * b. Polling for scan_on=0 for that PF. + * c. Clear the PF enable bit in the PXP. + * d. Clear the PF enable in the CFC (WEAK + STRONG) + * e. Write zero+valid to all ILT entries (The valid bit must + * stay set) + * f. If this is VNIC 3 of a port then also init + * first_timers_ilt_entry to zero and last_timers_ilt_entry + * to the last enrty in the ILT. + * + * Notes: + * Currently the PF error in the PGLC is non recoverable. + * In the future the there will be a recovery routine for this error. + * Currently attention is masked. + * Having an MCP lock on the load/unload process does not guarantee that + * there is no Timer disable during Func6/7 enable. This is because the + * Timers scan is currently being cleared by the MCP on FLR. + * Step 2.d can be done only for PF6/7 and the driver can also check if + * there is error before clearing it. But the flow above is simpler and + * more general. + * All ILT entries are written by zero+valid and not just PF6/7 + * ILT entries since in the future the ILT entries allocation for + * PF-s might be dynamic. + */ + struct ilt_client_info ilt_cli; + struct bnx2x_ilt ilt; + memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); + memset(&ilt, 0, sizeof(struct bnx2x_ilt)); + + /* initialize dummy TM client */ + ilt_cli.start = 0; + ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; + ilt_cli.client_num = ILT_CLIENT_TM; + + /* Step 1: set zeroes to all ilt page entries with valid bit on + * Step 2: set the timers first/last ilt entry to point + * to the entire range to prevent ILT range error for 3rd/4th + * vnic (this code assumes existance of the vnic) + * + * both steps performed by call to bnx2x_ilt_client_init_op() + * with dummy TM client + * + * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT + * and his brother are split registers + */ + bnx2x_pretend_func(bp, (BP_PATH(bp) + 6)); + bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR); + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); + + REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN); + REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN); + REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); + } + + + REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0); + REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0); + + if (!CHIP_IS_E1x(bp)) { + int factor = CHIP_REV_IS_EMUL(bp) ? 1000 : + (CHIP_REV_IS_FPGA(bp) ? 400 : 0); + bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON); + + bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON); + + /* let the HW do it's magic ... */ + do { + msleep(200); + val = REG_RD(bp, ATC_REG_ATC_INIT_DONE); + } while (factor-- && (val != 1)); + + if (val != 1) { + BNX2X_ERR("ATC_INIT failed\n"); + return -EBUSY; + } + } + + bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON); + + /* clean the DMAE memory */ + bp->dmae_ready = 1; + bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1); + + bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON); + + bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON); + + bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON); + + bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON); + + bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3); + bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3); + bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3); + bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3); + + bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON); + + + /* QM queues pointers table */ + bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET); + + /* soft reset pulse */ + REG_WR(bp, QM_REG_SOFT_RESET, 1); + REG_WR(bp, QM_REG_SOFT_RESET, 0); + +#ifdef BCM_CNIC + bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON); +#endif + + bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON); + REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT); + if (!CHIP_REV_IS_SLOW(bp)) + /* enable hw interrupt from doorbell Q */ + REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); + + bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); + + bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); + REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); + + if (!CHIP_IS_E1(bp)) + REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan); + + if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) + /* Bit-map indicating which L2 hdrs may appear + * after the basic Ethernet header + */ + REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, + bp->path_has_ovlan ? 7 : 6); + + bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON); + + if (!CHIP_IS_E1x(bp)) { + /* reset VFC memories */ + REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, + VFC_MEMORIES_RST_REG_CAM_RST | + VFC_MEMORIES_RST_REG_RAM_RST); + REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, + VFC_MEMORIES_RST_REG_CAM_RST | + VFC_MEMORIES_RST_REG_RAM_RST); + + msleep(20); + } + + bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON); + + /* sync semi rtc */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, + 0x80000000); + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, + 0x80000000); + + bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON); + + if (!CHIP_IS_E1x(bp)) + REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, + bp->path_has_ovlan ? 7 : 6); + + REG_WR(bp, SRC_REG_SOFT_RST, 1); + + bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON); + +#ifdef BCM_CNIC + REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); + REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); + REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b); + REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a); + REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116); + REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b); + REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf); + REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); + REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f); + REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7); +#endif + REG_WR(bp, SRC_REG_SOFT_RST, 0); + + if (sizeof(union cdu_context) != 1024) + /* we currently assume that a context is 1024 bytes */ + dev_alert(&bp->pdev->dev, "please adjust the size " + "of cdu_context(%ld)\n", + (long)sizeof(union cdu_context)); + + bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON); + val = (4 << 24) + (0 << 12) + 1024; + REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val); + + bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON); + REG_WR(bp, CFC_REG_INIT_REG, 0x7FF); + /* enable context validation interrupt from CFC */ + REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); + + /* set the thresholds to prevent CFC/CDU race */ + REG_WR(bp, CFC_REG_DEBUG0, 0x20020000); + + bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON); + + if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp)) + REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36); + + bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON); + + /* Reset PCIE errors for debug */ + REG_WR(bp, 0x2814, 0xffffffff); + REG_WR(bp, 0x3820, 0xffffffff); + + if (!CHIP_IS_E1x(bp)) { + REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, + (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | + PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); + REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, + (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | + PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | + PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2)); + REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, + (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | + PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | + PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); + } + + bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON); + if (!CHIP_IS_E1(bp)) { + /* in E3 this done in per-port section */ + if (!CHIP_IS_E3(bp)) + REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp)); + } + if (CHIP_IS_E1H(bp)) + /* not applicable for E2 (and above ...) */ + REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp)); + + if (CHIP_REV_IS_SLOW(bp)) + msleep(200); + + /* finish CFC init */ + val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10); + if (val != 1) { + BNX2X_ERR("CFC LL_INIT failed\n"); + return -EBUSY; + } + val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10); + if (val != 1) { + BNX2X_ERR("CFC AC_INIT failed\n"); + return -EBUSY; + } + val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10); + if (val != 1) { + BNX2X_ERR("CFC CAM_INIT failed\n"); + return -EBUSY; + } + REG_WR(bp, CFC_REG_DEBUG0, 0); + + if (CHIP_IS_E1(bp)) { + /* read NIG statistic + to see if this is our first up since powerup */ + bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); + val = *bnx2x_sp(bp, wb_data[0]); + + /* do internal memory self test */ + if ((val == 0) && bnx2x_int_mem_test(bp)) { + BNX2X_ERR("internal mem self test failed\n"); + return -EBUSY; + } + } + + bnx2x_setup_fan_failure_detection(bp); + + /* clear PXP2 attentions */ + REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); + + bnx2x_enable_blocks_attention(bp); + bnx2x_enable_blocks_parity(bp); + + if (!BP_NOMCP(bp)) { + if (CHIP_IS_E1x(bp)) + bnx2x__common_init_phy(bp); + } else + BNX2X_ERR("Bootcode is missing - can not initialize link\n"); + + return 0; +} + +/** + * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase. + * + * @bp: driver handle + */ +static int bnx2x_init_hw_common_chip(struct bnx2x *bp) +{ + int rc = bnx2x_init_hw_common(bp); + + if (rc) + return rc; + + /* In E2 2-PORT mode, same ext phy is used for the two paths */ + if (!BP_NOMCP(bp)) + bnx2x__common_init_phy(bp); + + return 0; +} + +static int bnx2x_init_hw_port(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; + u32 low, high; + u32 val; + + bnx2x__link_reset(bp); + + DP(BNX2X_MSG_MCP, "starting port init port %d\n", port); + + REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); + + bnx2x_init_block(bp, BLOCK_MISC, init_phase); + bnx2x_init_block(bp, BLOCK_PXP, init_phase); + bnx2x_init_block(bp, BLOCK_PXP2, init_phase); + + /* Timers bug workaround: disables the pf_master bit in pglue at + * common phase, we need to enable it here before any dmae access are + * attempted. Therefore we manually added the enable-master to the + * port phase (it also happens in the function phase) + */ + if (!CHIP_IS_E1x(bp)) + REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); + + bnx2x_init_block(bp, BLOCK_ATC, init_phase); + bnx2x_init_block(bp, BLOCK_DMAE, init_phase); + bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); + bnx2x_init_block(bp, BLOCK_QM, init_phase); + + bnx2x_init_block(bp, BLOCK_TCM, init_phase); + bnx2x_init_block(bp, BLOCK_UCM, init_phase); + bnx2x_init_block(bp, BLOCK_CCM, init_phase); + bnx2x_init_block(bp, BLOCK_XCM, init_phase); + + /* QM cid (connection) count */ + bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET); + +#ifdef BCM_CNIC + bnx2x_init_block(bp, BLOCK_TM, init_phase); + REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); + REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); +#endif + + bnx2x_init_block(bp, BLOCK_DORQ, init_phase); + + if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) { + bnx2x_init_block(bp, BLOCK_BRB1, init_phase); + + if (IS_MF(bp)) + low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); + else if (bp->dev->mtu > 4096) { + if (bp->flags & ONE_PORT_FLAG) + low = 160; + else { + val = bp->dev->mtu; + /* (24*1024 + val*4)/256 */ + low = 96 + (val/64) + + ((val % 64) ? 1 : 0); + } + } else + low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160); + high = low + 56; /* 14*1024/256 */ + REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); + REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); + } + + if (CHIP_MODE_IS_4_PORT(bp)) + REG_WR(bp, (BP_PORT(bp) ? + BRB1_REG_MAC_GUARANTIED_1 : + BRB1_REG_MAC_GUARANTIED_0), 40); + + + bnx2x_init_block(bp, BLOCK_PRS, init_phase); + if (CHIP_IS_E3B0(bp)) + /* Ovlan exists only if we are in multi-function + + * switch-dependent mode, in switch-independent there + * is no ovlan headers + */ + REG_WR(bp, BP_PORT(bp) ? + PRS_REG_HDRS_AFTER_BASIC_PORT_1 : + PRS_REG_HDRS_AFTER_BASIC_PORT_0, + (bp->path_has_ovlan ? 7 : 6)); + + bnx2x_init_block(bp, BLOCK_TSDM, init_phase); + bnx2x_init_block(bp, BLOCK_CSDM, init_phase); + bnx2x_init_block(bp, BLOCK_USDM, init_phase); + bnx2x_init_block(bp, BLOCK_XSDM, init_phase); + + bnx2x_init_block(bp, BLOCK_TSEM, init_phase); + bnx2x_init_block(bp, BLOCK_USEM, init_phase); + bnx2x_init_block(bp, BLOCK_CSEM, init_phase); + bnx2x_init_block(bp, BLOCK_XSEM, init_phase); + + bnx2x_init_block(bp, BLOCK_UPB, init_phase); + bnx2x_init_block(bp, BLOCK_XPB, init_phase); + + bnx2x_init_block(bp, BLOCK_PBF, init_phase); + + if (CHIP_IS_E1x(bp)) { + /* configure PBF to work without PAUSE mtu 9000 */ + REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); + + /* update threshold */ + REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); + /* update init credit */ + REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); + + /* probe changes */ + REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1); + udelay(50); + REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0); + } + +#ifdef BCM_CNIC + bnx2x_init_block(bp, BLOCK_SRC, init_phase); +#endif + bnx2x_init_block(bp, BLOCK_CDU, init_phase); + bnx2x_init_block(bp, BLOCK_CFC, init_phase); + + if (CHIP_IS_E1(bp)) { + REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); + REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); + } + bnx2x_init_block(bp, BLOCK_HC, init_phase); + + bnx2x_init_block(bp, BLOCK_IGU, init_phase); + + bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); + /* init aeu_mask_attn_func_0/1: + * - SF mode: bits 3-7 are masked. only bits 0-2 are in use + * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF + * bits 4-7 are used for "per vn group attention" */ + val = IS_MF(bp) ? 0xF7 : 0x7; + /* Enable DCBX attention for all but E1 */ + val |= CHIP_IS_E1(bp) ? 0 : 0x10; + REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); + + bnx2x_init_block(bp, BLOCK_NIG, init_phase); + + if (!CHIP_IS_E1x(bp)) { + /* Bit-map indicating which L2 hdrs may appear after the + * basic Ethernet header + */ + REG_WR(bp, BP_PORT(bp) ? + NIG_REG_P1_HDRS_AFTER_BASIC : + NIG_REG_P0_HDRS_AFTER_BASIC, + IS_MF_SD(bp) ? 7 : 6); + + if (CHIP_IS_E3(bp)) + REG_WR(bp, BP_PORT(bp) ? + NIG_REG_LLH1_MF_MODE : + NIG_REG_LLH_MF_MODE, IS_MF(bp)); + } + if (!CHIP_IS_E3(bp)) + REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); + + if (!CHIP_IS_E1(bp)) { + /* 0x2 disable mf_ov, 0x1 enable */ + REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, + (IS_MF_SD(bp) ? 0x1 : 0x2)); + + if (!CHIP_IS_E1x(bp)) { + val = 0; + switch (bp->mf_mode) { + case MULTI_FUNCTION_SD: + val = 1; + break; + case MULTI_FUNCTION_SI: + val = 2; + break; + } + + REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE : + NIG_REG_LLH0_CLS_TYPE), val); + } + { + REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0); + REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); + REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1); + } + } + + + /* If SPIO5 is set to generate interrupts, enable it for this port */ + val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); + if (val & (1 << MISC_REGISTERS_SPIO_5)) { + u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : + MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); + val = REG_RD(bp, reg_addr); + val |= AEU_INPUTS_ATTN_BITS_SPIO5; + REG_WR(bp, reg_addr, val); + } + + return 0; +} + +static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) +{ + int reg; + + if (CHIP_IS_E1(bp)) + reg = PXP2_REG_RQ_ONCHIP_AT + index*8; + else + reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; + + bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr)); +} + +static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id) +{ + bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/); +} + +static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func) +{ + u32 i, base = FUNC_ILT_BASE(func); + for (i = base; i < base + ILT_PER_FUNC; i++) + bnx2x_ilt_wr(bp, i, 0); +} + +static int bnx2x_init_hw_func(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + int func = BP_FUNC(bp); + int init_phase = PHASE_PF0 + func; + struct bnx2x_ilt *ilt = BP_ILT(bp); + u16 cdu_ilt_start; + u32 addr, val; + u32 main_mem_base, main_mem_size, main_mem_prty_clr; + int i, main_mem_width; + + DP(BNX2X_MSG_MCP, "starting func init func %d\n", func); + + /* FLR cleanup - hmmm */ + if (!CHIP_IS_E1x(bp)) + bnx2x_pf_flr_clnup(bp); + + /* set MSI reconfigure capability */ + if (bp->common.int_block == INT_BLOCK_HC) { + addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); + val = REG_RD(bp, addr); + val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; + REG_WR(bp, addr, val); + } + + bnx2x_init_block(bp, BLOCK_PXP, init_phase); + bnx2x_init_block(bp, BLOCK_PXP2, init_phase); + + ilt = BP_ILT(bp); + cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; + + for (i = 0; i < L2_ILT_LINES(bp); i++) { + ilt->lines[cdu_ilt_start + i].page = + bp->context.vcxt + (ILT_PAGE_CIDS * i); + ilt->lines[cdu_ilt_start + i].page_mapping = + bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i); + /* cdu ilt pages are allocated manually so there's no need to + set the size */ + } + bnx2x_ilt_init_op(bp, INITOP_SET); + +#ifdef BCM_CNIC + bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM); + + /* T1 hash bits value determines the T1 number of entries */ + REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS); +#endif + +#ifndef BCM_CNIC + /* set NIC mode */ + REG_WR(bp, PRS_REG_NIC_MODE, 1); +#endif /* BCM_CNIC */ + + if (!CHIP_IS_E1x(bp)) { + u32 pf_conf = IGU_PF_CONF_FUNC_EN; + + /* Turn on a single ISR mode in IGU if driver is going to use + * INT#x or MSI + */ + if (!(bp->flags & USING_MSIX_FLAG)) + pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; + /* + * Timers workaround bug: function init part. + * Need to wait 20msec after initializing ILT, + * needed to make sure there are no requests in + * one of the PXP internal queues with "old" ILT addresses + */ + msleep(20); + /* + * Master enable - Due to WB DMAE writes performed before this + * register is re-initialized as part of the regular function + * init + */ + REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); + /* Enable the function in IGU */ + REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf); + } + + bp->dmae_ready = 1; + + bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); + + if (!CHIP_IS_E1x(bp)) + REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); + + bnx2x_init_block(bp, BLOCK_ATC, init_phase); + bnx2x_init_block(bp, BLOCK_DMAE, init_phase); + bnx2x_init_block(bp, BLOCK_NIG, init_phase); + bnx2x_init_block(bp, BLOCK_SRC, init_phase); + bnx2x_init_block(bp, BLOCK_MISC, init_phase); + bnx2x_init_block(bp, BLOCK_TCM, init_phase); + bnx2x_init_block(bp, BLOCK_UCM, init_phase); + bnx2x_init_block(bp, BLOCK_CCM, init_phase); + bnx2x_init_block(bp, BLOCK_XCM, init_phase); + bnx2x_init_block(bp, BLOCK_TSEM, init_phase); + bnx2x_init_block(bp, BLOCK_USEM, init_phase); + bnx2x_init_block(bp, BLOCK_CSEM, init_phase); + bnx2x_init_block(bp, BLOCK_XSEM, init_phase); + + if (!CHIP_IS_E1x(bp)) + REG_WR(bp, QM_REG_PF_EN, 1); + + if (!CHIP_IS_E1x(bp)) { + REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); + REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); + REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); + REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); + } + bnx2x_init_block(bp, BLOCK_QM, init_phase); + + bnx2x_init_block(bp, BLOCK_TM, init_phase); + bnx2x_init_block(bp, BLOCK_DORQ, init_phase); + bnx2x_init_block(bp, BLOCK_BRB1, init_phase); + bnx2x_init_block(bp, BLOCK_PRS, init_phase); + bnx2x_init_block(bp, BLOCK_TSDM, init_phase); + bnx2x_init_block(bp, BLOCK_CSDM, init_phase); + bnx2x_init_block(bp, BLOCK_USDM, init_phase); + bnx2x_init_block(bp, BLOCK_XSDM, init_phase); + bnx2x_init_block(bp, BLOCK_UPB, init_phase); + bnx2x_init_block(bp, BLOCK_XPB, init_phase); + bnx2x_init_block(bp, BLOCK_PBF, init_phase); + if (!CHIP_IS_E1x(bp)) + REG_WR(bp, PBF_REG_DISABLE_PF, 0); + + bnx2x_init_block(bp, BLOCK_CDU, init_phase); + + bnx2x_init_block(bp, BLOCK_CFC, init_phase); + + if (!CHIP_IS_E1x(bp)) + REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1); + + if (IS_MF(bp)) { + REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); + REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov); + } + + bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); + + /* HC init per function */ + if (bp->common.int_block == INT_BLOCK_HC) { + if (CHIP_IS_E1H(bp)) { + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); + + REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); + REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); + } + bnx2x_init_block(bp, BLOCK_HC, init_phase); + + } else { + int num_segs, sb_idx, prod_offset; + + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); + + if (!CHIP_IS_E1x(bp)) { + REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); + REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); + } + + bnx2x_init_block(bp, BLOCK_IGU, init_phase); + + if (!CHIP_IS_E1x(bp)) { + int dsb_idx = 0; + /** + * Producer memory: + * E2 mode: address 0-135 match to the mapping memory; + * 136 - PF0 default prod; 137 - PF1 default prod; + * 138 - PF2 default prod; 139 - PF3 default prod; + * 140 - PF0 attn prod; 141 - PF1 attn prod; + * 142 - PF2 attn prod; 143 - PF3 attn prod; + * 144-147 reserved. + * + * E1.5 mode - In backward compatible mode; + * for non default SB; each even line in the memory + * holds the U producer and each odd line hold + * the C producer. The first 128 producers are for + * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 + * producers are for the DSB for each PF. + * Each PF has five segments: (the order inside each + * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; + * 132-135 C prods; 136-139 X prods; 140-143 T prods; + * 144-147 attn prods; + */ + /* non-default-status-blocks */ + num_segs = CHIP_INT_MODE_IS_BC(bp) ? + IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS; + for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) { + prod_offset = (bp->igu_base_sb + sb_idx) * + num_segs; + + for (i = 0; i < num_segs; i++) { + addr = IGU_REG_PROD_CONS_MEMORY + + (prod_offset + i) * 4; + REG_WR(bp, addr, 0); + } + /* send consumer update with value 0 */ + bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx, + USTORM_ID, 0, IGU_INT_NOP, 1); + bnx2x_igu_clear_sb(bp, + bp->igu_base_sb + sb_idx); + } + + /* default-status-blocks */ + num_segs = CHIP_INT_MODE_IS_BC(bp) ? + IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS; + + if (CHIP_MODE_IS_4_PORT(bp)) + dsb_idx = BP_FUNC(bp); + else + dsb_idx = BP_E1HVN(bp); + + prod_offset = (CHIP_INT_MODE_IS_BC(bp) ? + IGU_BC_BASE_DSB_PROD + dsb_idx : + IGU_NORM_BASE_DSB_PROD + dsb_idx); + + for (i = 0; i < (num_segs * E1HVN_MAX); + i += E1HVN_MAX) { + addr = IGU_REG_PROD_CONS_MEMORY + + (prod_offset + i)*4; + REG_WR(bp, addr, 0); + } + /* send consumer update with 0 */ + if (CHIP_INT_MODE_IS_BC(bp)) { + bnx2x_ack_sb(bp, bp->igu_dsb_id, + USTORM_ID, 0, IGU_INT_NOP, 1); + bnx2x_ack_sb(bp, bp->igu_dsb_id, + CSTORM_ID, 0, IGU_INT_NOP, 1); + bnx2x_ack_sb(bp, bp->igu_dsb_id, + XSTORM_ID, 0, IGU_INT_NOP, 1); + bnx2x_ack_sb(bp, bp->igu_dsb_id, + TSTORM_ID, 0, IGU_INT_NOP, 1); + bnx2x_ack_sb(bp, bp->igu_dsb_id, + ATTENTION_ID, 0, IGU_INT_NOP, 1); + } else { + bnx2x_ack_sb(bp, bp->igu_dsb_id, + USTORM_ID, 0, IGU_INT_NOP, 1); + bnx2x_ack_sb(bp, bp->igu_dsb_id, + ATTENTION_ID, 0, IGU_INT_NOP, 1); + } + bnx2x_igu_clear_sb(bp, bp->igu_dsb_id); + + /* !!! these should become driver const once + rf-tool supports split-68 const */ + REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); + REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); + REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); + REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); + REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); + REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); + } + } + + /* Reset PCIE errors for debug */ + REG_WR(bp, 0x2114, 0xffffffff); + REG_WR(bp, 0x2120, 0xffffffff); + + if (CHIP_IS_E1x(bp)) { + main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ + main_mem_base = HC_REG_MAIN_MEMORY + + BP_PORT(bp) * (main_mem_size * 4); + main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; + main_mem_width = 8; + + val = REG_RD(bp, main_mem_prty_clr); + if (val) + DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC " + "block during " + "function init (0x%x)!\n", val); + + /* Clear "false" parity errors in MSI-X table */ + for (i = main_mem_base; + i < main_mem_base + main_mem_size * 4; + i += main_mem_width) { + bnx2x_read_dmae(bp, i, main_mem_width / 4); + bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), + i, main_mem_width / 4); + } + /* Clear HC parity attention */ + REG_RD(bp, main_mem_prty_clr); + } + +#ifdef BNX2X_STOP_ON_ERROR + /* Enable STORMs SP logging */ + REG_WR8(bp, BAR_USTRORM_INTMEM + + USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); + REG_WR8(bp, BAR_TSTRORM_INTMEM + + TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); + REG_WR8(bp, BAR_CSTRORM_INTMEM + + CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); + REG_WR8(bp, BAR_XSTRORM_INTMEM + + XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); +#endif + + bnx2x_phy_probe(&bp->link_params); + + return 0; +} + + +void bnx2x_free_mem(struct bnx2x *bp) +{ + /* fastpath */ + bnx2x_free_fp_mem(bp); + /* end of fastpath */ + + BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, + sizeof(struct host_sp_status_block)); + + BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, + bp->fw_stats_data_sz + bp->fw_stats_req_sz); + + BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, + sizeof(struct bnx2x_slowpath)); + + BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping, + bp->context.size); + + bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE); + + BNX2X_FREE(bp->ilt->lines); + +#ifdef BCM_CNIC + if (!CHIP_IS_E1x(bp)) + BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping, + sizeof(struct host_hc_status_block_e2)); + else + BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping, + sizeof(struct host_hc_status_block_e1x)); + + BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); +#endif + + BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); + + BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, + BCM_PAGE_SIZE * NUM_EQ_PAGES); +} + +static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) +{ + int num_groups; + + /* number of eth_queues */ + u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp); + + /* Total number of FW statistics requests = + * 1 for port stats + 1 for PF stats + num_eth_queues */ + bp->fw_stats_num = 2 + num_queue_stats; + + + /* Request is built from stats_query_header and an array of + * stats_query_cmd_group each of which contains + * STATS_QUERY_CMD_COUNT rules. The real number or requests is + * configured in the stats_query_header. + */ + num_groups = (2 + num_queue_stats) / STATS_QUERY_CMD_COUNT + + (((2 + num_queue_stats) % STATS_QUERY_CMD_COUNT) ? 1 : 0); + + bp->fw_stats_req_sz = sizeof(struct stats_query_header) + + num_groups * sizeof(struct stats_query_cmd_group); + + /* Data for statistics requests + stats_conter + * + * stats_counter holds per-STORM counters that are incremented + * when STORM has finished with the current request. + */ + bp->fw_stats_data_sz = sizeof(struct per_port_stats) + + sizeof(struct per_pf_stats) + + sizeof(struct per_queue_stats) * num_queue_stats + + sizeof(struct stats_counter); + + BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping, + bp->fw_stats_data_sz + bp->fw_stats_req_sz); + + /* Set shortcuts */ + bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats; + bp->fw_stats_req_mapping = bp->fw_stats_mapping; + + bp->fw_stats_data = (struct bnx2x_fw_stats_data *) + ((u8 *)bp->fw_stats + bp->fw_stats_req_sz); + + bp->fw_stats_data_mapping = bp->fw_stats_mapping + + bp->fw_stats_req_sz; + return 0; + +alloc_mem_err: + BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, + bp->fw_stats_data_sz + bp->fw_stats_req_sz); + return -ENOMEM; +} + + +int bnx2x_alloc_mem(struct bnx2x *bp) +{ +#ifdef BCM_CNIC + if (!CHIP_IS_E1x(bp)) + /* size = the status block + ramrod buffers */ + BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping, + sizeof(struct host_hc_status_block_e2)); + else + BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping, + sizeof(struct host_hc_status_block_e1x)); + + /* allocate searcher T2 table */ + BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); +#endif + + + BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping, + sizeof(struct host_sp_status_block)); + + BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, + sizeof(struct bnx2x_slowpath)); + + /* Allocated memory for FW statistics */ + if (bnx2x_alloc_fw_stats_mem(bp)) + goto alloc_mem_err; + + bp->context.size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp); + + BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping, + bp->context.size); + + BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES); + + if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC)) + goto alloc_mem_err; + + /* Slow path ring */ + BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE); + + /* EQ */ + BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping, + BCM_PAGE_SIZE * NUM_EQ_PAGES); + + + /* fastpath */ + /* need to be done at the end, since it's self adjusting to amount + * of memory available for RSS queues + */ + if (bnx2x_alloc_fp_mem(bp)) + goto alloc_mem_err; + return 0; + +alloc_mem_err: + bnx2x_free_mem(bp); + return -ENOMEM; +} + +/* + * Init service functions + */ + +int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, + struct bnx2x_vlan_mac_obj *obj, bool set, + int mac_type, unsigned long *ramrod_flags) +{ + int rc; + struct bnx2x_vlan_mac_ramrod_params ramrod_param; + + memset(&ramrod_param, 0, sizeof(ramrod_param)); + + /* Fill general parameters */ + ramrod_param.vlan_mac_obj = obj; + ramrod_param.ramrod_flags = *ramrod_flags; + + /* Fill a user request section if needed */ + if (!test_bit(RAMROD_CONT, ramrod_flags)) { + memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN); + + __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); + + /* Set the command: ADD or DEL */ + if (set) + ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; + else + ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL; + } + + rc = bnx2x_config_vlan_mac(bp, &ramrod_param); + if (rc < 0) + BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del")); + return rc; +} + +int bnx2x_del_all_macs(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *mac_obj, + int mac_type, bool wait_for_comp) +{ + int rc; + unsigned long ramrod_flags = 0, vlan_mac_flags = 0; + + /* Wait for completion of requested */ + if (wait_for_comp) + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + + /* Set the mac type of addresses we want to clear */ + __set_bit(mac_type, &vlan_mac_flags); + + rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags); + if (rc < 0) + BNX2X_ERR("Failed to delete MACs: %d\n", rc); + + return rc; +} + +int bnx2x_set_eth_mac(struct bnx2x *bp, bool set) +{ + unsigned long ramrod_flags = 0; + + DP(NETIF_MSG_IFUP, "Adding Eth MAC\n"); + + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + /* Eth MAC is set on RSS leading client (fp[0]) */ + return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->fp->mac_obj, set, + BNX2X_ETH_MAC, &ramrod_flags); +} + +int bnx2x_setup_leading(struct bnx2x *bp) +{ + return bnx2x_setup_queue(bp, &bp->fp[0], 1); +} + +/** + * bnx2x_set_int_mode - configure interrupt mode + * + * @bp: driver handle + * + * In case of MSI-X it will also try to enable MSI-X. + */ +static void __devinit bnx2x_set_int_mode(struct bnx2x *bp) +{ + switch (int_mode) { + case INT_MODE_MSI: + bnx2x_enable_msi(bp); + /* falling through... */ + case INT_MODE_INTx: + bp->num_queues = 1 + NON_ETH_CONTEXT_USE; + DP(NETIF_MSG_IFUP, "set number of queues to 1\n"); + break; + default: + /* Set number of queues according to bp->multi_mode value */ + bnx2x_set_num_queues(bp); + + DP(NETIF_MSG_IFUP, "set number of queues to %d\n", + bp->num_queues); + + /* if we can't use MSI-X we only need one fp, + * so try to enable MSI-X with the requested number of fp's + * and fallback to MSI or legacy INTx with one fp + */ + if (bnx2x_enable_msix(bp)) { + /* failed to enable MSI-X */ + if (bp->multi_mode) + DP(NETIF_MSG_IFUP, + "Multi requested but failed to " + "enable MSI-X (%d), " + "set number of queues to %d\n", + bp->num_queues, + 1 + NON_ETH_CONTEXT_USE); + bp->num_queues = 1 + NON_ETH_CONTEXT_USE; + + /* Try to enable MSI */ + if (!(bp->flags & DISABLE_MSI_FLAG)) + bnx2x_enable_msi(bp); + } + break; + } +} + +/* must be called prioir to any HW initializations */ +static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp) +{ + return L2_ILT_LINES(bp); +} + +void bnx2x_ilt_set_info(struct bnx2x *bp) +{ + struct ilt_client_info *ilt_client; + struct bnx2x_ilt *ilt = BP_ILT(bp); + u16 line = 0; + + ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp)); + DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line); + + /* CDU */ + ilt_client = &ilt->clients[ILT_CLIENT_CDU]; + ilt_client->client_num = ILT_CLIENT_CDU; + ilt_client->page_size = CDU_ILT_PAGE_SZ; + ilt_client->flags = ILT_CLIENT_SKIP_MEM; + ilt_client->start = line; + line += bnx2x_cid_ilt_lines(bp); +#ifdef BCM_CNIC + line += CNIC_ILT_LINES; +#endif + ilt_client->end = line - 1; + + DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, " + "flags 0x%x, hw psz %d\n", + ilt_client->start, + ilt_client->end, + ilt_client->page_size, + ilt_client->flags, + ilog2(ilt_client->page_size >> 12)); + + /* QM */ + if (QM_INIT(bp->qm_cid_count)) { + ilt_client = &ilt->clients[ILT_CLIENT_QM]; + ilt_client->client_num = ILT_CLIENT_QM; + ilt_client->page_size = QM_ILT_PAGE_SZ; + ilt_client->flags = 0; + ilt_client->start = line; + + /* 4 bytes for each cid */ + line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4, + QM_ILT_PAGE_SZ); + + ilt_client->end = line - 1; + + DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, " + "flags 0x%x, hw psz %d\n", + ilt_client->start, + ilt_client->end, + ilt_client->page_size, + ilt_client->flags, + ilog2(ilt_client->page_size >> 12)); + + } + /* SRC */ + ilt_client = &ilt->clients[ILT_CLIENT_SRC]; +#ifdef BCM_CNIC + ilt_client->client_num = ILT_CLIENT_SRC; + ilt_client->page_size = SRC_ILT_PAGE_SZ; + ilt_client->flags = 0; + ilt_client->start = line; + line += SRC_ILT_LINES; + ilt_client->end = line - 1; + + DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, " + "flags 0x%x, hw psz %d\n", + ilt_client->start, + ilt_client->end, + ilt_client->page_size, + ilt_client->flags, + ilog2(ilt_client->page_size >> 12)); + +#else + ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM); +#endif + + /* TM */ + ilt_client = &ilt->clients[ILT_CLIENT_TM]; +#ifdef BCM_CNIC + ilt_client->client_num = ILT_CLIENT_TM; + ilt_client->page_size = TM_ILT_PAGE_SZ; + ilt_client->flags = 0; + ilt_client->start = line; + line += TM_ILT_LINES; + ilt_client->end = line - 1; + + DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, " + "flags 0x%x, hw psz %d\n", + ilt_client->start, + ilt_client->end, + ilt_client->page_size, + ilt_client->flags, + ilog2(ilt_client->page_size >> 12)); + +#else + ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM); +#endif + BUG_ON(line > ILT_MAX_LINES); +} + +/** + * bnx2x_pf_q_prep_init - prepare INIT transition parameters + * + * @bp: driver handle + * @fp: pointer to fastpath + * @init_params: pointer to parameters structure + * + * parameters configured: + * - HC configuration + * - Queue's CDU context + */ +static inline void bnx2x_pf_q_prep_init(struct bnx2x *bp, + struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params) +{ + + u8 cos; + /* FCoE Queue uses Default SB, thus has no HC capabilities */ + if (!IS_FCOE_FP(fp)) { + __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags); + __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags); + + /* If HC is supporterd, enable host coalescing in the transition + * to INIT state. + */ + __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags); + __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags); + + /* HC rate */ + init_params->rx.hc_rate = bp->rx_ticks ? + (1000000 / bp->rx_ticks) : 0; + init_params->tx.hc_rate = bp->tx_ticks ? + (1000000 / bp->tx_ticks) : 0; + + /* FW SB ID */ + init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = + fp->fw_sb_id; + + /* + * CQ index among the SB indices: FCoE clients uses the default + * SB, therefore it's different. + */ + init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; + init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; + } + + /* set maximum number of COSs supported by this queue */ + init_params->max_cos = fp->max_cos; + + DP(BNX2X_MSG_SP, "fp: %d setting queue params max cos to: %d\n", + fp->index, init_params->max_cos); + + /* set the context pointers queue object */ + for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) + init_params->cxts[cos] = + &bp->context.vcxt[fp->txdata[cos].cid].eth; +} + +int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp, + struct bnx2x_queue_state_params *q_params, + struct bnx2x_queue_setup_tx_only_params *tx_only_params, + int tx_index, bool leading) +{ + memset(tx_only_params, 0, sizeof(*tx_only_params)); + + /* Set the command */ + q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY; + + /* Set tx-only QUEUE flags: don't zero statistics */ + tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false); + + /* choose the index of the cid to send the slow path on */ + tx_only_params->cid_index = tx_index; + + /* Set general TX_ONLY_SETUP parameters */ + bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index); + + /* Set Tx TX_ONLY_SETUP parameters */ + bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index); + + DP(BNX2X_MSG_SP, "preparing to send tx-only ramrod for connection:" + "cos %d, primary cid %d, cid %d, " + "client id %d, sp-client id %d, flags %lx\n", + tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX], + q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id, + tx_only_params->gen_params.spcl_id, tx_only_params->flags); + + /* send the ramrod */ + return bnx2x_queue_state_change(bp, q_params); +} + + +/** + * bnx2x_setup_queue - setup queue + * + * @bp: driver handle + * @fp: pointer to fastpath + * @leading: is leading + * + * This function performs 2 steps in a Queue state machine + * actually: 1) RESET->INIT 2) INIT->SETUP + */ + +int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, + bool leading) +{ + struct bnx2x_queue_state_params q_params = {0}; + struct bnx2x_queue_setup_params *setup_params = + &q_params.params.setup; + struct bnx2x_queue_setup_tx_only_params *tx_only_params = + &q_params.params.tx_only; + int rc; + u8 tx_index; + + DP(BNX2X_MSG_SP, "setting up queue %d\n", fp->index); + + /* reset IGU state skip FCoE L2 queue */ + if (!IS_FCOE_FP(fp)) + bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, + IGU_INT_ENABLE, 0); + + q_params.q_obj = &fp->q_obj; + /* We want to wait for completion in this context */ + __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); + + /* Prepare the INIT parameters */ + bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init); + + /* Set the command */ + q_params.cmd = BNX2X_Q_CMD_INIT; + + /* Change the state to INIT */ + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) { + BNX2X_ERR("Queue(%d) INIT failed\n", fp->index); + return rc; + } + + DP(BNX2X_MSG_SP, "init complete\n"); + + + /* Now move the Queue to the SETUP state... */ + memset(setup_params, 0, sizeof(*setup_params)); + + /* Set QUEUE flags */ + setup_params->flags = bnx2x_get_q_flags(bp, fp, leading); + + /* Set general SETUP parameters */ + bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params, + FIRST_TX_COS_INDEX); + + bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params, + &setup_params->rxq_params); + + bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params, + FIRST_TX_COS_INDEX); + + /* Set the command */ + q_params.cmd = BNX2X_Q_CMD_SETUP; + + /* Change the state to SETUP */ + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) { + BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index); + return rc; + } + + /* loop through the relevant tx-only indices */ + for (tx_index = FIRST_TX_ONLY_COS_INDEX; + tx_index < fp->max_cos; + tx_index++) { + + /* prepare and send tx-only ramrod*/ + rc = bnx2x_setup_tx_only(bp, fp, &q_params, + tx_only_params, tx_index, leading); + if (rc) { + BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n", + fp->index, tx_index); + return rc; + } + } + + return rc; +} + +static int bnx2x_stop_queue(struct bnx2x *bp, int index) +{ + struct bnx2x_fastpath *fp = &bp->fp[index]; + struct bnx2x_fp_txdata *txdata; + struct bnx2x_queue_state_params q_params = {0}; + int rc, tx_index; + + DP(BNX2X_MSG_SP, "stopping queue %d cid %d\n", index, fp->cid); + + q_params.q_obj = &fp->q_obj; + /* We want to wait for completion in this context */ + __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); + + + /* close tx-only connections */ + for (tx_index = FIRST_TX_ONLY_COS_INDEX; + tx_index < fp->max_cos; + tx_index++){ + + /* ascertain this is a normal queue*/ + txdata = &fp->txdata[tx_index]; + + DP(BNX2X_MSG_SP, "stopping tx-only queue %d\n", + txdata->txq_index); + + /* send halt terminate on tx-only connection */ + q_params.cmd = BNX2X_Q_CMD_TERMINATE; + memset(&q_params.params.terminate, 0, + sizeof(q_params.params.terminate)); + q_params.params.terminate.cid_index = tx_index; + + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) + return rc; + + /* send halt terminate on tx-only connection */ + q_params.cmd = BNX2X_Q_CMD_CFC_DEL; + memset(&q_params.params.cfc_del, 0, + sizeof(q_params.params.cfc_del)); + q_params.params.cfc_del.cid_index = tx_index; + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) + return rc; + } + /* Stop the primary connection: */ + /* ...halt the connection */ + q_params.cmd = BNX2X_Q_CMD_HALT; + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) + return rc; + + /* ...terminate the connection */ + q_params.cmd = BNX2X_Q_CMD_TERMINATE; + memset(&q_params.params.terminate, 0, + sizeof(q_params.params.terminate)); + q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) + return rc; + /* ...delete cfc entry */ + q_params.cmd = BNX2X_Q_CMD_CFC_DEL; + memset(&q_params.params.cfc_del, 0, + sizeof(q_params.params.cfc_del)); + q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; + return bnx2x_queue_state_change(bp, &q_params); +} + + +static void bnx2x_reset_func(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + int func = BP_FUNC(bp); + int i; + + /* Disable the function in the FW */ + REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); + REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); + REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); + REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); + + /* FP SBs */ + for_each_eth_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + REG_WR8(bp, BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), + SB_DISABLED); + } + +#ifdef BCM_CNIC + /* CNIC SB */ + REG_WR8(bp, BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(bnx2x_cnic_fw_sb_id(bp)), + SB_DISABLED); +#endif + /* SP SB */ + REG_WR8(bp, BAR_CSTRORM_INTMEM + + CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), + SB_DISABLED); + + for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) + REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), + 0); + + /* Configure IGU */ + if (bp->common.int_block == INT_BLOCK_HC) { + REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); + REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); + } else { + REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); + REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); + } + +#ifdef BCM_CNIC + /* Disable Timer scan */ + REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); + /* + * Wait for at least 10ms and up to 2 second for the timers scan to + * complete + */ + for (i = 0; i < 200; i++) { + msleep(10); + if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4)) + break; + } +#endif + /* Clear ILT */ + bnx2x_clear_func_ilt(bp, func); + + /* Timers workaround bug for E2: if this is vnic-3, + * we need to set the entire ilt range for this timers. + */ + if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) { + struct ilt_client_info ilt_cli; + /* use dummy TM client */ + memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); + ilt_cli.start = 0; + ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; + ilt_cli.client_num = ILT_CLIENT_TM; + + bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR); + } + + /* this assumes that reset_port() called before reset_func()*/ + if (!CHIP_IS_E1x(bp)) + bnx2x_pf_disable(bp); + + bp->dmae_ready = 0; +} + +static void bnx2x_reset_port(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + u32 val; + + /* Reset physical Link */ + bnx2x__link_reset(bp); + + REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); + + /* Do not rcv packets to BRB */ + REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); + /* Do not direct rcv packets that are not for MCP to the BRB */ + REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : + NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); + + /* Configure AEU */ + REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); + + msleep(100); + /* Check for BRB port occupancy */ + val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); + if (val) + DP(NETIF_MSG_IFDOWN, + "BRB1 is not empty %d blocks are occupied\n", val); + + /* TODO: Close Doorbell port? */ +} + +static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code) +{ + struct bnx2x_func_state_params func_params = {0}; + + /* Prepare parameters for function state transitions */ + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_HW_RESET; + + func_params.params.hw_init.load_phase = load_code; + + return bnx2x_func_state_change(bp, &func_params); +} + +static inline int bnx2x_func_stop(struct bnx2x *bp) +{ + struct bnx2x_func_state_params func_params = {0}; + int rc; + + /* Prepare parameters for function state transitions */ + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_STOP; + + /* + * Try to stop the function the 'good way'. If fails (in case + * of a parity error during bnx2x_chip_cleanup()) and we are + * not in a debug mode, perform a state transaction in order to + * enable further HW_RESET transaction. + */ + rc = bnx2x_func_state_change(bp, &func_params); + if (rc) { +#ifdef BNX2X_STOP_ON_ERROR + return rc; +#else + BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry " + "transaction\n"); + __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); + return bnx2x_func_state_change(bp, &func_params); +#endif + } + + return 0; +} + +/** + * bnx2x_send_unload_req - request unload mode from the MCP. + * + * @bp: driver handle + * @unload_mode: requested function's unload mode + * + * Return unload mode returned by the MCP: COMMON, PORT or FUNC. + */ +u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) +{ + u32 reset_code = 0; + int port = BP_PORT(bp); + + /* Select the UNLOAD request mode */ + if (unload_mode == UNLOAD_NORMAL) + reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; + + else if (bp->flags & NO_WOL_FLAG) + reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; + + else if (bp->wol) { + u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; + u8 *mac_addr = bp->dev->dev_addr; + u32 val; + /* The mac address is written to entries 1-4 to + preserve entry 0 which is used by the PMF */ + u8 entry = (BP_E1HVN(bp) + 1)*8; + + val = (mac_addr[0] << 8) | mac_addr[1]; + EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); + + val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | + (mac_addr[4] << 8) | mac_addr[5]; + EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); + + reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; + + } else + reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; + + /* Send the request to the MCP */ + if (!BP_NOMCP(bp)) + reset_code = bnx2x_fw_command(bp, reset_code, 0); + else { + int path = BP_PATH(bp); + + DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] " + "%d, %d, %d\n", + path, load_count[path][0], load_count[path][1], + load_count[path][2]); + load_count[path][0]--; + load_count[path][1 + port]--; + DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] " + "%d, %d, %d\n", + path, load_count[path][0], load_count[path][1], + load_count[path][2]); + if (load_count[path][0] == 0) + reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; + else if (load_count[path][1 + port] == 0) + reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; + else + reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; + } + + return reset_code; +} + +/** + * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. + * + * @bp: driver handle + */ +void bnx2x_send_unload_done(struct bnx2x *bp) +{ + /* Report UNLOAD_DONE to MCP */ + if (!BP_NOMCP(bp)) + bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); +} + +static inline int bnx2x_func_wait_started(struct bnx2x *bp) +{ + int tout = 50; + int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; + + if (!bp->port.pmf) + return 0; + + /* + * (assumption: No Attention from MCP at this stage) + * PMF probably in the middle of TXdisable/enable transaction + * 1. Sync IRS for default SB + * 2. Sync SP queue - this guarantes us that attention handling started + * 3. Wait, that TXdisable/enable transaction completes + * + * 1+2 guranty that if DCBx attention was scheduled it already changed + * pending bit of transaction from STARTED-->TX_STOPPED, if we alredy + * received complettion for the transaction the state is TX_STOPPED. + * State will return to STARTED after completion of TX_STOPPED-->STARTED + * transaction. + */ + + /* make sure default SB ISR is done */ + if (msix) + synchronize_irq(bp->msix_table[0].vector); + else + synchronize_irq(bp->pdev->irq); + + flush_workqueue(bnx2x_wq); + + while (bnx2x_func_get_state(bp, &bp->func_obj) != + BNX2X_F_STATE_STARTED && tout--) + msleep(20); + + if (bnx2x_func_get_state(bp, &bp->func_obj) != + BNX2X_F_STATE_STARTED) { +#ifdef BNX2X_STOP_ON_ERROR + return -EBUSY; +#else + /* + * Failed to complete the transaction in a "good way" + * Force both transactions with CLR bit + */ + struct bnx2x_func_state_params func_params = {0}; + + DP(BNX2X_MSG_SP, "Hmmm... unexpected function state! " + "Forcing STARTED-->TX_ST0PPED-->STARTED\n"); + + func_params.f_obj = &bp->func_obj; + __set_bit(RAMROD_DRV_CLR_ONLY, + &func_params.ramrod_flags); + + /* STARTED-->TX_ST0PPED */ + func_params.cmd = BNX2X_F_CMD_TX_STOP; + bnx2x_func_state_change(bp, &func_params); + + /* TX_ST0PPED-->STARTED */ + func_params.cmd = BNX2X_F_CMD_TX_START; + return bnx2x_func_state_change(bp, &func_params); +#endif + } + + return 0; +} + +void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) +{ + int port = BP_PORT(bp); + int i, rc = 0; + u8 cos; + struct bnx2x_mcast_ramrod_params rparam = {0}; + u32 reset_code; + + /* Wait until tx fastpath tasks complete */ + for_each_tx_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + + for_each_cos_in_tx_queue(fp, cos) + rc = bnx2x_clean_tx_queue(bp, &fp->txdata[cos]); +#ifdef BNX2X_STOP_ON_ERROR + if (rc) + return; +#endif + } + + /* Give HW time to discard old tx messages */ + usleep_range(1000, 1000); + + /* Clean all ETH MACs */ + rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_ETH_MAC, false); + if (rc < 0) + BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc); + + /* Clean up UC list */ + rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC, + true); + if (rc < 0) + BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: " + "%d\n", rc); + + /* Disable LLH */ + if (!CHIP_IS_E1(bp)) + REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); + + /* Set "drop all" (stop Rx). + * We need to take a netif_addr_lock() here in order to prevent + * a race between the completion code and this code. + */ + netif_addr_lock_bh(bp->dev); + /* Schedule the rx_mode command */ + if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) + set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); + else + bnx2x_set_storm_rx_mode(bp); + + /* Cleanup multicast configuration */ + rparam.mcast_obj = &bp->mcast_obj; + rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); + if (rc < 0) + BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc); + + netif_addr_unlock_bh(bp->dev); + + + + /* + * Send the UNLOAD_REQUEST to the MCP. This will return if + * this function should perform FUNC, PORT or COMMON HW + * reset. + */ + reset_code = bnx2x_send_unload_req(bp, unload_mode); + + /* + * (assumption: No Attention from MCP at this stage) + * PMF probably in the middle of TXdisable/enable transaction + */ + rc = bnx2x_func_wait_started(bp); + if (rc) { + BNX2X_ERR("bnx2x_func_wait_started failed\n"); +#ifdef BNX2X_STOP_ON_ERROR + return; +#endif + } + + /* Close multi and leading connections + * Completions for ramrods are collected in a synchronous way + */ + for_each_queue(bp, i) + if (bnx2x_stop_queue(bp, i)) +#ifdef BNX2X_STOP_ON_ERROR + return; +#else + goto unload_error; +#endif + /* If SP settings didn't get completed so far - something + * very wrong has happen. + */ + if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) + BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n"); + +#ifndef BNX2X_STOP_ON_ERROR +unload_error: +#endif + rc = bnx2x_func_stop(bp); + if (rc) { + BNX2X_ERR("Function stop failed!\n"); +#ifdef BNX2X_STOP_ON_ERROR + return; +#endif + } + + /* Disable HW interrupts, NAPI */ + bnx2x_netif_stop(bp, 1); + + /* Release IRQs */ + bnx2x_free_irq(bp); + + /* Reset the chip */ + rc = bnx2x_reset_hw(bp, reset_code); + if (rc) + BNX2X_ERR("HW_RESET failed\n"); + + + /* Report UNLOAD_DONE to MCP */ + bnx2x_send_unload_done(bp); +} + +void bnx2x_disable_close_the_gate(struct bnx2x *bp) +{ + u32 val; + + DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n"); + + if (CHIP_IS_E1(bp)) { + int port = BP_PORT(bp); + u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : + MISC_REG_AEU_MASK_ATTN_FUNC_0; + + val = REG_RD(bp, addr); + val &= ~(0x300); + REG_WR(bp, addr, val); + } else { + val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK); + val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | + MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); + REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val); + } +} + +/* Close gates #2, #3 and #4: */ +static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) +{ + u32 val; + + /* Gates #2 and #4a are closed/opened for "not E1" only */ + if (!CHIP_IS_E1(bp)) { + /* #4 */ + REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close); + /* #2 */ + REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close); + } + + /* #3 */ + if (CHIP_IS_E1x(bp)) { + /* Prevent interrupts from HC on both ports */ + val = REG_RD(bp, HC_REG_CONFIG_1); + REG_WR(bp, HC_REG_CONFIG_1, + (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) : + (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1)); + + val = REG_RD(bp, HC_REG_CONFIG_0); + REG_WR(bp, HC_REG_CONFIG_0, + (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) : + (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0)); + } else { + /* Prevent incomming interrupts in IGU */ + val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); + + REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, + (!close) ? + (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) : + (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); + } + + DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n", + close ? "closing" : "opening"); + mmiowb(); +} + +#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */ + +static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val) +{ + /* Do some magic... */ + u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); + *magic_val = val & SHARED_MF_CLP_MAGIC; + MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); +} + +/** + * bnx2x_clp_reset_done - restore the value of the `magic' bit. + * + * @bp: driver handle + * @magic_val: old value of the `magic' bit. + */ +static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val) +{ + /* Restore the `magic' bit value... */ + u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); + MF_CFG_WR(bp, shared_mf_config.clp_mb, + (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); +} + +/** + * bnx2x_reset_mcp_prep - prepare for MCP reset. + * + * @bp: driver handle + * @magic_val: old value of 'magic' bit. + * + * Takes care of CLP configurations. + */ +static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val) +{ + u32 shmem; + u32 validity_offset; + + DP(NETIF_MSG_HW, "Starting\n"); + + /* Set `magic' bit in order to save MF config */ + if (!CHIP_IS_E1(bp)) + bnx2x_clp_reset_prep(bp, magic_val); + + /* Get shmem offset */ + shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); + validity_offset = offsetof(struct shmem_region, validity_map[0]); + + /* Clear validity map flags */ + if (shmem > 0) + REG_WR(bp, shmem + validity_offset, 0); +} + +#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ +#define MCP_ONE_TIMEOUT 100 /* 100 ms */ + +/** + * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT + * + * @bp: driver handle + */ +static inline void bnx2x_mcp_wait_one(struct bnx2x *bp) +{ + /* special handling for emulation and FPGA, + wait 10 times longer */ + if (CHIP_REV_IS_SLOW(bp)) + msleep(MCP_ONE_TIMEOUT*10); + else + msleep(MCP_ONE_TIMEOUT); +} + +/* + * initializes bp->common.shmem_base and waits for validity signature to appear + */ +static int bnx2x_init_shmem(struct bnx2x *bp) +{ + int cnt = 0; + u32 val = 0; + + do { + bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); + if (bp->common.shmem_base) { + val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); + if (val & SHR_MEM_VALIDITY_MB) + return 0; + } + + bnx2x_mcp_wait_one(bp); + + } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); + + BNX2X_ERR("BAD MCP validity signature\n"); + + return -ENODEV; +} + +static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val) +{ + int rc = bnx2x_init_shmem(bp); + + /* Restore the `magic' bit value */ + if (!CHIP_IS_E1(bp)) + bnx2x_clp_reset_done(bp, magic_val); + + return rc; +} + +static void bnx2x_pxp_prep(struct bnx2x *bp) +{ + if (!CHIP_IS_E1(bp)) { + REG_WR(bp, PXP2_REG_RD_START_INIT, 0); + REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0); + mmiowb(); + } +} + +/* + * Reset the whole chip except for: + * - PCIE core + * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by + * one reset bit) + * - IGU + * - MISC (including AEU) + * - GRC + * - RBCN, RBCP + */ +static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global) +{ + u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; + u32 global_bits2, stay_reset2; + + /* + * Bits that have to be set in reset_mask2 if we want to reset 'global' + * (per chip) blocks. + */ + global_bits2 = + MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | + MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; + + /* Don't reset the following blocks */ + not_reset_mask1 = + MISC_REGISTERS_RESET_REG_1_RST_HC | + MISC_REGISTERS_RESET_REG_1_RST_PXPV | + MISC_REGISTERS_RESET_REG_1_RST_PXP; + + not_reset_mask2 = + MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | + MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | + MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | + MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | + MISC_REGISTERS_RESET_REG_2_RST_RBCN | + MISC_REGISTERS_RESET_REG_2_RST_GRC | + MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | + MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | + MISC_REGISTERS_RESET_REG_2_RST_ATC | + MISC_REGISTERS_RESET_REG_2_PGLC; + + /* + * Keep the following blocks in reset: + * - all xxMACs are handled by the bnx2x_link code. + */ + stay_reset2 = + MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | + MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | + MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | + MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | + MISC_REGISTERS_RESET_REG_2_UMAC0 | + MISC_REGISTERS_RESET_REG_2_UMAC1 | + MISC_REGISTERS_RESET_REG_2_XMAC | + MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; + + /* Full reset masks according to the chip */ + reset_mask1 = 0xffffffff; + + if (CHIP_IS_E1(bp)) + reset_mask2 = 0xffff; + else if (CHIP_IS_E1H(bp)) + reset_mask2 = 0x1ffff; + else if (CHIP_IS_E2(bp)) + reset_mask2 = 0xfffff; + else /* CHIP_IS_E3 */ + reset_mask2 = 0x3ffffff; + + /* Don't reset global blocks unless we need to */ + if (!global) + reset_mask2 &= ~global_bits2; + + /* + * In case of attention in the QM, we need to reset PXP + * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM + * because otherwise QM reset would release 'close the gates' shortly + * before resetting the PXP, then the PSWRQ would send a write + * request to PGLUE. Then when PXP is reset, PGLUE would try to + * read the payload data from PSWWR, but PSWWR would not + * respond. The write queue in PGLUE would stuck, dmae commands + * would not return. Therefore it's important to reset the second + * reset register (containing the + * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the + * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM + * bit). + */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + reset_mask2 & (~not_reset_mask2)); + + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, + reset_mask1 & (~not_reset_mask1)); + + barrier(); + mmiowb(); + + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, + reset_mask2 & (~stay_reset2)); + + barrier(); + mmiowb(); + + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); + mmiowb(); +} + +/** + * bnx2x_er_poll_igu_vq - poll for pending writes bit. + * It should get cleared in no more than 1s. + * + * @bp: driver handle + * + * It should get cleared in no more than 1s. Returns 0 if + * pending writes bit gets cleared. + */ +static int bnx2x_er_poll_igu_vq(struct bnx2x *bp) +{ + u32 cnt = 1000; + u32 pend_bits = 0; + + do { + pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS); + + if (pend_bits == 0) + break; + + usleep_range(1000, 1000); + } while (cnt-- > 0); + + if (cnt <= 0) { + BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n", + pend_bits); + return -EBUSY; + } + + return 0; +} + +static int bnx2x_process_kill(struct bnx2x *bp, bool global) +{ + int cnt = 1000; + u32 val = 0; + u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; + + + /* Empty the Tetris buffer, wait for 1s */ + do { + sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT); + blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT); + port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0); + port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1); + pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2); + if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && + ((port_is_idle_0 & 0x1) == 0x1) && + ((port_is_idle_1 & 0x1) == 0x1) && + (pgl_exp_rom2 == 0xffffffff)) + break; + usleep_range(1000, 1000); + } while (cnt-- > 0); + + if (cnt <= 0) { + DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there" + " are still" + " outstanding read requests after 1s!\n"); + DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x," + " port_is_idle_0=0x%08x," + " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", + sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, + pgl_exp_rom2); + return -EAGAIN; + } + + barrier(); + + /* Close gates #2, #3 and #4 */ + bnx2x_set_234_gates(bp, true); + + /* Poll for IGU VQs for 57712 and newer chips */ + if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp)) + return -EAGAIN; + + + /* TBD: Indicate that "process kill" is in progress to MCP */ + + /* Clear "unprepared" bit */ + REG_WR(bp, MISC_REG_UNPREPARED, 0); + barrier(); + + /* Make sure all is written to the chip before the reset */ + mmiowb(); + + /* Wait for 1ms to empty GLUE and PCI-E core queues, + * PSWHST, GRC and PSWRD Tetris buffer. + */ + usleep_range(1000, 1000); + + /* Prepare to chip reset: */ + /* MCP */ + if (global) + bnx2x_reset_mcp_prep(bp, &val); + + /* PXP */ + bnx2x_pxp_prep(bp); + barrier(); + + /* reset the chip */ + bnx2x_process_kill_chip_reset(bp, global); + barrier(); + + /* Recover after reset: */ + /* MCP */ + if (global && bnx2x_reset_mcp_comp(bp, val)) + return -EAGAIN; + + /* TBD: Add resetting the NO_MCP mode DB here */ + + /* PXP */ + bnx2x_pxp_prep(bp); + + /* Open the gates #2, #3 and #4 */ + bnx2x_set_234_gates(bp, false); + + /* TBD: IGU/AEU preparation bring back the AEU/IGU to a + * reset state, re-enable attentions. */ + + return 0; +} + +int bnx2x_leader_reset(struct bnx2x *bp) +{ + int rc = 0; + bool global = bnx2x_reset_is_global(bp); + + /* Try to recover after the failure */ + if (bnx2x_process_kill(bp, global)) { + netdev_err(bp->dev, "Something bad had happen on engine %d! " + "Aii!\n", BP_PATH(bp)); + rc = -EAGAIN; + goto exit_leader_reset; + } + + /* + * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver + * state. + */ + bnx2x_set_reset_done(bp); + if (global) + bnx2x_clear_reset_global(bp); + +exit_leader_reset: + bp->is_leader = 0; + bnx2x_release_leader_lock(bp); + smp_mb(); + return rc; +} + +static inline void bnx2x_recovery_failed(struct bnx2x *bp) +{ + netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n"); + + /* Disconnect this device */ + netif_device_detach(bp->dev); + + /* + * Block ifup for all function on this engine until "process kill" + * or power cycle. + */ + bnx2x_set_reset_in_progress(bp); + + /* Shut down the power */ + bnx2x_set_power_state(bp, PCI_D3hot); + + bp->recovery_state = BNX2X_RECOVERY_FAILED; + + smp_mb(); +} + +/* + * Assumption: runs under rtnl lock. This together with the fact + * that it's called only from bnx2x_sp_rtnl() ensure that it + * will never be called when netif_running(bp->dev) is false. + */ +static void bnx2x_parity_recover(struct bnx2x *bp) +{ + bool global = false; + + DP(NETIF_MSG_HW, "Handling parity\n"); + while (1) { + switch (bp->recovery_state) { + case BNX2X_RECOVERY_INIT: + DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n"); + bnx2x_chk_parity_attn(bp, &global, false); + + /* Try to get a LEADER_LOCK HW lock */ + if (bnx2x_trylock_leader_lock(bp)) { + bnx2x_set_reset_in_progress(bp); + /* + * Check if there is a global attention and if + * there was a global attention, set the global + * reset bit. + */ + + if (global) + bnx2x_set_reset_global(bp); + + bp->is_leader = 1; + } + + /* Stop the driver */ + /* If interface has been removed - break */ + if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY)) + return; + + bp->recovery_state = BNX2X_RECOVERY_WAIT; + + /* + * Reset MCP command sequence number and MCP mail box + * sequence as we are going to reset the MCP. + */ + if (global) { + bp->fw_seq = 0; + bp->fw_drv_pulse_wr_seq = 0; + } + + /* Ensure "is_leader", MCP command sequence and + * "recovery_state" update values are seen on other + * CPUs. + */ + smp_mb(); + break; + + case BNX2X_RECOVERY_WAIT: + DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n"); + if (bp->is_leader) { + int other_engine = BP_PATH(bp) ? 0 : 1; + u32 other_load_counter = + bnx2x_get_load_cnt(bp, other_engine); + u32 load_counter = + bnx2x_get_load_cnt(bp, BP_PATH(bp)); + global = bnx2x_reset_is_global(bp); + + /* + * In case of a parity in a global block, let + * the first leader that performs a + * leader_reset() reset the global blocks in + * order to clear global attentions. Otherwise + * the the gates will remain closed for that + * engine. + */ + if (load_counter || + (global && other_load_counter)) { + /* Wait until all other functions get + * down. + */ + schedule_delayed_work(&bp->sp_rtnl_task, + HZ/10); + return; + } else { + /* If all other functions got down - + * try to bring the chip back to + * normal. In any case it's an exit + * point for a leader. + */ + if (bnx2x_leader_reset(bp)) { + bnx2x_recovery_failed(bp); + return; + } + + /* If we are here, means that the + * leader has succeeded and doesn't + * want to be a leader any more. Try + * to continue as a none-leader. + */ + break; + } + } else { /* non-leader */ + if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) { + /* Try to get a LEADER_LOCK HW lock as + * long as a former leader may have + * been unloaded by the user or + * released a leadership by another + * reason. + */ + if (bnx2x_trylock_leader_lock(bp)) { + /* I'm a leader now! Restart a + * switch case. + */ + bp->is_leader = 1; + break; + } + + schedule_delayed_work(&bp->sp_rtnl_task, + HZ/10); + return; + + } else { + /* + * If there was a global attention, wait + * for it to be cleared. + */ + if (bnx2x_reset_is_global(bp)) { + schedule_delayed_work( + &bp->sp_rtnl_task, + HZ/10); + return; + } + + if (bnx2x_nic_load(bp, LOAD_NORMAL)) + bnx2x_recovery_failed(bp); + else { + bp->recovery_state = + BNX2X_RECOVERY_DONE; + smp_mb(); + } + + return; + } + } + default: + return; + } + } +} + +/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is + * scheduled on a general queue in order to prevent a dead lock. + */ +static void bnx2x_sp_rtnl_task(struct work_struct *work) +{ + struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work); + + rtnl_lock(); + + if (!netif_running(bp->dev)) + goto sp_rtnl_exit; + + /* if stop on error is defined no recovery flows should be executed */ +#ifdef BNX2X_STOP_ON_ERROR + BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined " + "so reset not done to allow debug dump,\n" + "you will need to reboot when done\n"); + goto sp_rtnl_not_reset; +#endif + + if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) { + /* + * Clear all pending SP commands as we are going to reset the + * function anyway. + */ + bp->sp_rtnl_state = 0; + smp_mb(); + + bnx2x_parity_recover(bp); + + goto sp_rtnl_exit; + } + + if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) { + /* + * Clear all pending SP commands as we are going to reset the + * function anyway. + */ + bp->sp_rtnl_state = 0; + smp_mb(); + + bnx2x_nic_unload(bp, UNLOAD_NORMAL); + bnx2x_nic_load(bp, LOAD_NORMAL); + + goto sp_rtnl_exit; + } +#ifdef BNX2X_STOP_ON_ERROR +sp_rtnl_not_reset: +#endif + if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) + bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos); + +sp_rtnl_exit: + rtnl_unlock(); +} + +/* end of nic load/unload */ + +static void bnx2x_period_task(struct work_struct *work) +{ + struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work); + + if (!netif_running(bp->dev)) + goto period_task_exit; + + if (CHIP_REV_IS_SLOW(bp)) { + BNX2X_ERR("period task called on emulation, ignoring\n"); + goto period_task_exit; + } + + bnx2x_acquire_phy_lock(bp); + /* + * The barrier is needed to ensure the ordering between the writing to + * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and + * the reading here. + */ + smp_mb(); + if (bp->port.pmf) { + bnx2x_period_func(&bp->link_params, &bp->link_vars); + + /* Re-queue task in 1 sec */ + queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ); + } + + bnx2x_release_phy_lock(bp); +period_task_exit: + return; +} + +/* + * Init service functions + */ + +static u32 bnx2x_get_pretend_reg(struct bnx2x *bp) +{ + u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0; + u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base; + return base + (BP_ABS_FUNC(bp)) * stride; +} + +static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp) +{ + u32 reg = bnx2x_get_pretend_reg(bp); + + /* Flush all outstanding writes */ + mmiowb(); + + /* Pretend to be function 0 */ + REG_WR(bp, reg, 0); + REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */ + + /* From now we are in the "like-E1" mode */ + bnx2x_int_disable(bp); + + /* Flush all outstanding writes */ + mmiowb(); + + /* Restore the original function */ + REG_WR(bp, reg, BP_ABS_FUNC(bp)); + REG_RD(bp, reg); +} + +static inline void bnx2x_undi_int_disable(struct bnx2x *bp) +{ + if (CHIP_IS_E1(bp)) + bnx2x_int_disable(bp); + else + bnx2x_undi_int_disable_e1h(bp); +} + +static void __devinit bnx2x_undi_unload(struct bnx2x *bp) +{ + u32 val; + + /* Check if there is any driver already loaded */ + val = REG_RD(bp, MISC_REG_UNPREPARED); + if (val == 0x1) { + /* Check if it is the UNDI driver + * UNDI driver initializes CID offset for normal bell to 0x7 + */ + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); + val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); + if (val == 0x7) { + u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; + /* save our pf_num */ + int orig_pf_num = bp->pf_num; + int port; + u32 swap_en, swap_val, value; + + /* clear the UNDI indication */ + REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); + + BNX2X_DEV_INFO("UNDI is active! reset device\n"); + + /* try unload UNDI on port 0 */ + bp->pf_num = 0; + bp->fw_seq = + (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK); + reset_code = bnx2x_fw_command(bp, reset_code, 0); + + /* if UNDI is loaded on the other port */ + if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) { + + /* send "DONE" for previous unload */ + bnx2x_fw_command(bp, + DRV_MSG_CODE_UNLOAD_DONE, 0); + + /* unload UNDI on port 1 */ + bp->pf_num = 1; + bp->fw_seq = + (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK); + reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; + + bnx2x_fw_command(bp, reset_code, 0); + } + + /* now it's safe to release the lock */ + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); + + bnx2x_undi_int_disable(bp); + port = BP_PORT(bp); + + /* close input traffic and wait for it */ + /* Do not rcv packets to BRB */ + REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_DRV_MASK : + NIG_REG_LLH0_BRB1_DRV_MASK), 0x0); + /* Do not direct rcv packets that are not for MCP to + * the BRB */ + REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : + NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); + /* clear AEU */ + REG_WR(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : + MISC_REG_AEU_MASK_ATTN_FUNC_0), 0); + msleep(10); + + /* save NIG port swap info */ + swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); + swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); + /* reset device */ + REG_WR(bp, + GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, + 0xd3ffffff); + + value = 0x1400; + if (CHIP_IS_E3(bp)) { + value |= MISC_REGISTERS_RESET_REG_2_MSTAT0; + value |= MISC_REGISTERS_RESET_REG_2_MSTAT1; + } + + REG_WR(bp, + GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + value); + + /* take the NIG out of reset and restore swap values */ + REG_WR(bp, + GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, + MISC_REGISTERS_RESET_REG_1_RST_NIG); + REG_WR(bp, NIG_REG_PORT_SWAP, swap_val); + REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en); + + /* send unload done to the MCP */ + bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); + + /* restore our func and fw_seq */ + bp->pf_num = orig_pf_num; + bp->fw_seq = + (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK); + } else + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); + } +} + +static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) +{ + u32 val, val2, val3, val4, id; + u16 pmc; + + /* Get the chip revision id and number. */ + /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ + val = REG_RD(bp, MISC_REG_CHIP_NUM); + id = ((val & 0xffff) << 16); + val = REG_RD(bp, MISC_REG_CHIP_REV); + id |= ((val & 0xf) << 12); + val = REG_RD(bp, MISC_REG_CHIP_METAL); + id |= ((val & 0xff) << 4); + val = REG_RD(bp, MISC_REG_BOND_ID); + id |= (val & 0xf); + bp->common.chip_id = id; + + /* Set doorbell size */ + bp->db_size = (1 << BNX2X_DB_SHIFT); + + if (!CHIP_IS_E1x(bp)) { + val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR); + if ((val & 1) == 0) + val = REG_RD(bp, MISC_REG_PORT4MODE_EN); + else + val = (val >> 1) & 1; + BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" : + "2_PORT_MODE"); + bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE : + CHIP_2_PORT_MODE; + + if (CHIP_MODE_IS_4_PORT(bp)) + bp->pfid = (bp->pf_num >> 1); /* 0..3 */ + else + bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */ + } else { + bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */ + bp->pfid = bp->pf_num; /* 0..7 */ + } + + bp->link_params.chip_id = bp->common.chip_id; + BNX2X_DEV_INFO("chip ID is 0x%x\n", id); + + val = (REG_RD(bp, 0x2874) & 0x55); + if ((bp->common.chip_id & 0x1) || + (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) { + bp->flags |= ONE_PORT_FLAG; + BNX2X_DEV_INFO("single port device\n"); + } + + val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4); + bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE << + (val & MCPR_NVM_CFG4_FLASH_SIZE)); + BNX2X_DEV_INFO("flash_size 0x%x (%d)\n", + bp->common.flash_size, bp->common.flash_size); + + bnx2x_init_shmem(bp); + + + + bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ? + MISC_REG_GENERIC_CR_1 : + MISC_REG_GENERIC_CR_0)); + + bp->link_params.shmem_base = bp->common.shmem_base; + bp->link_params.shmem2_base = bp->common.shmem2_base; + BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n", + bp->common.shmem_base, bp->common.shmem2_base); + + if (!bp->common.shmem_base) { + BNX2X_DEV_INFO("MCP not active\n"); + bp->flags |= NO_MCP_FLAG; + return; + } + + bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); + BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config); + + bp->link_params.hw_led_mode = ((bp->common.hw_config & + SHARED_HW_CFG_LED_MODE_MASK) >> + SHARED_HW_CFG_LED_MODE_SHIFT); + + bp->link_params.feature_config_flags = 0; + val = SHMEM_RD(bp, dev_info.shared_feature_config.config); + if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) + bp->link_params.feature_config_flags |= + FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; + else + bp->link_params.feature_config_flags &= + ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; + + val = SHMEM_RD(bp, dev_info.bc_rev) >> 8; + bp->common.bc_ver = val; + BNX2X_DEV_INFO("bc_ver %X\n", val); + if (val < BNX2X_BC_VER) { + /* for now only warn + * later we might need to enforce this */ + BNX2X_ERR("This driver needs bc_ver %X but found %X, " + "please upgrade BC\n", BNX2X_BC_VER, val); + } + bp->link_params.feature_config_flags |= + (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ? + FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0; + + bp->link_params.feature_config_flags |= + (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ? + FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0; + + bp->link_params.feature_config_flags |= + (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ? + FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0; + + pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc); + bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG; + + BNX2X_DEV_INFO("%sWoL capable\n", + (bp->flags & NO_WOL_FLAG) ? "not " : ""); + + val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num); + val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]); + val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]); + val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]); + + dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n", + val, val2, val3, val4); +} + +#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) +#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) + +static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) +{ + int pfid = BP_FUNC(bp); + int vn = BP_E1HVN(bp); + int igu_sb_id; + u32 val; + u8 fid, igu_sb_cnt = 0; + + bp->igu_base_sb = 0xff; + if (CHIP_INT_MODE_IS_BC(bp)) { + igu_sb_cnt = bp->igu_sb_cnt; + bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * + FP_SB_MAX_E1x; + + bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x + + (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn); + + return; + } + + /* IGU in normal mode - read CAM */ + for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; + igu_sb_id++) { + val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); + if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) + continue; + fid = IGU_FID(val); + if ((fid & IGU_FID_ENCODE_IS_PF)) { + if ((fid & IGU_FID_PF_NUM_MASK) != pfid) + continue; + if (IGU_VEC(val) == 0) + /* default status block */ + bp->igu_dsb_id = igu_sb_id; + else { + if (bp->igu_base_sb == 0xff) + bp->igu_base_sb = igu_sb_id; + igu_sb_cnt++; + } + } + } + +#ifdef CONFIG_PCI_MSI + /* + * It's expected that number of CAM entries for this functions is equal + * to the number evaluated based on the MSI-X table size. We want a + * harsh warning if these values are different! + */ + WARN_ON(bp->igu_sb_cnt != igu_sb_cnt); +#endif + + if (igu_sb_cnt == 0) + BNX2X_ERR("CAM configuration error\n"); +} + +static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp, + u32 switch_cfg) +{ + int cfg_size = 0, idx, port = BP_PORT(bp); + + /* Aggregation of supported attributes of all external phys */ + bp->port.supported[0] = 0; + bp->port.supported[1] = 0; + switch (bp->link_params.num_phys) { + case 1: + bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported; + cfg_size = 1; + break; + case 2: + bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported; + cfg_size = 1; + break; + case 3: + if (bp->link_params.multi_phy_config & + PORT_HW_CFG_PHY_SWAPPED_ENABLED) { + bp->port.supported[1] = + bp->link_params.phy[EXT_PHY1].supported; + bp->port.supported[0] = + bp->link_params.phy[EXT_PHY2].supported; + } else { + bp->port.supported[0] = + bp->link_params.phy[EXT_PHY1].supported; + bp->port.supported[1] = + bp->link_params.phy[EXT_PHY2].supported; + } + cfg_size = 2; + break; + } + + if (!(bp->port.supported[0] || bp->port.supported[1])) { + BNX2X_ERR("NVRAM config error. BAD phy config." + "PHY1 config 0x%x, PHY2 config 0x%x\n", + SHMEM_RD(bp, + dev_info.port_hw_config[port].external_phy_config), + SHMEM_RD(bp, + dev_info.port_hw_config[port].external_phy_config2)); + return; + } + + if (CHIP_IS_E3(bp)) + bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR); + else { + switch (switch_cfg) { + case SWITCH_CFG_1G: + bp->port.phy_addr = REG_RD( + bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10); + break; + case SWITCH_CFG_10G: + bp->port.phy_addr = REG_RD( + bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18); + break; + default: + BNX2X_ERR("BAD switch_cfg link_config 0x%x\n", + bp->port.link_config[0]); + return; + } + } + BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); + /* mask what we support according to speed_cap_mask per configuration */ + for (idx = 0; idx < cfg_size; idx++) { + if (!(bp->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) + bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half; + + if (!(bp->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) + bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full; + + if (!(bp->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) + bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half; + + if (!(bp->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) + bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full; + + if (!(bp->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) + bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full); + + if (!(bp->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) + bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full; + + if (!(bp->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) + bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full; + + } + + BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0], + bp->port.supported[1]); +} + +static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) +{ + u32 link_config, idx, cfg_size = 0; + bp->port.advertising[0] = 0; + bp->port.advertising[1] = 0; + switch (bp->link_params.num_phys) { + case 1: + case 2: + cfg_size = 1; + break; + case 3: + cfg_size = 2; + break; + } + for (idx = 0; idx < cfg_size; idx++) { + bp->link_params.req_duplex[idx] = DUPLEX_FULL; + link_config = bp->port.link_config[idx]; + switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { + case PORT_FEATURE_LINK_SPEED_AUTO: + if (bp->port.supported[idx] & SUPPORTED_Autoneg) { + bp->link_params.req_line_speed[idx] = + SPEED_AUTO_NEG; + bp->port.advertising[idx] |= + bp->port.supported[idx]; + } else { + /* force 10G, no AN */ + bp->link_params.req_line_speed[idx] = + SPEED_10000; + bp->port.advertising[idx] |= + (ADVERTISED_10000baseT_Full | + ADVERTISED_FIBRE); + continue; + } + break; + + case PORT_FEATURE_LINK_SPEED_10M_FULL: + if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) { + bp->link_params.req_line_speed[idx] = + SPEED_10; + bp->port.advertising[idx] |= + (ADVERTISED_10baseT_Full | + ADVERTISED_TP); + } else { + BNX2X_ERR("NVRAM config error. " + "Invalid link_config 0x%x" + " speed_cap_mask 0x%x\n", + link_config, + bp->link_params.speed_cap_mask[idx]); + return; + } + break; + + case PORT_FEATURE_LINK_SPEED_10M_HALF: + if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) { + bp->link_params.req_line_speed[idx] = + SPEED_10; + bp->link_params.req_duplex[idx] = + DUPLEX_HALF; + bp->port.advertising[idx] |= + (ADVERTISED_10baseT_Half | + ADVERTISED_TP); + } else { + BNX2X_ERR("NVRAM config error. " + "Invalid link_config 0x%x" + " speed_cap_mask 0x%x\n", + link_config, + bp->link_params.speed_cap_mask[idx]); + return; + } + break; + + case PORT_FEATURE_LINK_SPEED_100M_FULL: + if (bp->port.supported[idx] & + SUPPORTED_100baseT_Full) { + bp->link_params.req_line_speed[idx] = + SPEED_100; + bp->port.advertising[idx] |= + (ADVERTISED_100baseT_Full | + ADVERTISED_TP); + } else { + BNX2X_ERR("NVRAM config error. " + "Invalid link_config 0x%x" + " speed_cap_mask 0x%x\n", + link_config, + bp->link_params.speed_cap_mask[idx]); + return; + } + break; + + case PORT_FEATURE_LINK_SPEED_100M_HALF: + if (bp->port.supported[idx] & + SUPPORTED_100baseT_Half) { + bp->link_params.req_line_speed[idx] = + SPEED_100; + bp->link_params.req_duplex[idx] = + DUPLEX_HALF; + bp->port.advertising[idx] |= + (ADVERTISED_100baseT_Half | + ADVERTISED_TP); + } else { + BNX2X_ERR("NVRAM config error. " + "Invalid link_config 0x%x" + " speed_cap_mask 0x%x\n", + link_config, + bp->link_params.speed_cap_mask[idx]); + return; + } + break; + + case PORT_FEATURE_LINK_SPEED_1G: + if (bp->port.supported[idx] & + SUPPORTED_1000baseT_Full) { + bp->link_params.req_line_speed[idx] = + SPEED_1000; + bp->port.advertising[idx] |= + (ADVERTISED_1000baseT_Full | + ADVERTISED_TP); + } else { + BNX2X_ERR("NVRAM config error. " + "Invalid link_config 0x%x" + " speed_cap_mask 0x%x\n", + link_config, + bp->link_params.speed_cap_mask[idx]); + return; + } + break; + + case PORT_FEATURE_LINK_SPEED_2_5G: + if (bp->port.supported[idx] & + SUPPORTED_2500baseX_Full) { + bp->link_params.req_line_speed[idx] = + SPEED_2500; + bp->port.advertising[idx] |= + (ADVERTISED_2500baseX_Full | + ADVERTISED_TP); + } else { + BNX2X_ERR("NVRAM config error. " + "Invalid link_config 0x%x" + " speed_cap_mask 0x%x\n", + link_config, + bp->link_params.speed_cap_mask[idx]); + return; + } + break; + + case PORT_FEATURE_LINK_SPEED_10G_CX4: + if (bp->port.supported[idx] & + SUPPORTED_10000baseT_Full) { + bp->link_params.req_line_speed[idx] = + SPEED_10000; + bp->port.advertising[idx] |= + (ADVERTISED_10000baseT_Full | + ADVERTISED_FIBRE); + } else { + BNX2X_ERR("NVRAM config error. " + "Invalid link_config 0x%x" + " speed_cap_mask 0x%x\n", + link_config, + bp->link_params.speed_cap_mask[idx]); + return; + } + break; + case PORT_FEATURE_LINK_SPEED_20G: + bp->link_params.req_line_speed[idx] = SPEED_20000; + + break; + default: + BNX2X_ERR("NVRAM config error. " + "BAD link speed link_config 0x%x\n", + link_config); + bp->link_params.req_line_speed[idx] = + SPEED_AUTO_NEG; + bp->port.advertising[idx] = + bp->port.supported[idx]; + break; + } + + bp->link_params.req_flow_ctrl[idx] = (link_config & + PORT_FEATURE_FLOW_CONTROL_MASK); + if ((bp->link_params.req_flow_ctrl[idx] == + BNX2X_FLOW_CTRL_AUTO) && + !(bp->port.supported[idx] & SUPPORTED_Autoneg)) { + bp->link_params.req_flow_ctrl[idx] = + BNX2X_FLOW_CTRL_NONE; + } + + BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl" + " 0x%x advertising 0x%x\n", + bp->link_params.req_line_speed[idx], + bp->link_params.req_duplex[idx], + bp->link_params.req_flow_ctrl[idx], + bp->port.advertising[idx]); + } +} + +static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi) +{ + mac_hi = cpu_to_be16(mac_hi); + mac_lo = cpu_to_be32(mac_lo); + memcpy(mac_buf, &mac_hi, sizeof(mac_hi)); + memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo)); +} + +static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + u32 config; + u32 ext_phy_type, ext_phy_config; + + bp->link_params.bp = bp; + bp->link_params.port = port; + + bp->link_params.lane_config = + SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config); + + bp->link_params.speed_cap_mask[0] = + SHMEM_RD(bp, + dev_info.port_hw_config[port].speed_capability_mask); + bp->link_params.speed_cap_mask[1] = + SHMEM_RD(bp, + dev_info.port_hw_config[port].speed_capability_mask2); + bp->port.link_config[0] = + SHMEM_RD(bp, dev_info.port_feature_config[port].link_config); + + bp->port.link_config[1] = + SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2); + + bp->link_params.multi_phy_config = + SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config); + /* If the device is capable of WoL, set the default state according + * to the HW + */ + config = SHMEM_RD(bp, dev_info.port_feature_config[port].config); + bp->wol = (!(bp->flags & NO_WOL_FLAG) && + (config & PORT_FEATURE_WOL_ENABLED)); + + BNX2X_DEV_INFO("lane_config 0x%08x " + "speed_cap_mask0 0x%08x link_config0 0x%08x\n", + bp->link_params.lane_config, + bp->link_params.speed_cap_mask[0], + bp->port.link_config[0]); + + bp->link_params.switch_cfg = (bp->port.link_config[0] & + PORT_FEATURE_CONNECTED_SWITCH_MASK); + bnx2x_phy_probe(&bp->link_params); + bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg); + + bnx2x_link_settings_requested(bp); + + /* + * If connected directly, work with the internal PHY, otherwise, work + * with the external PHY + */ + ext_phy_config = + SHMEM_RD(bp, + dev_info.port_hw_config[port].external_phy_config); + ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config); + if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) + bp->mdio.prtad = bp->port.phy_addr; + + else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) && + (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) + bp->mdio.prtad = + XGXS_EXT_PHY_ADDR(ext_phy_config); + + /* + * Check if hw lock is required to access MDC/MDIO bus to the PHY(s) + * In MF mode, it is set to cover self test cases + */ + if (IS_MF(bp)) + bp->port.need_hw_lock = 1; + else + bp->port.need_hw_lock = bnx2x_hw_lock_required(bp, + bp->common.shmem_base, + bp->common.shmem2_base); +} + +#ifdef BCM_CNIC +static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + int func = BP_ABS_FUNC(bp); + + u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, + drv_lic_key[port].max_iscsi_conn); + u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, + drv_lic_key[port].max_fcoe_conn); + + /* Get the number of maximum allowed iSCSI and FCoE connections */ + bp->cnic_eth_dev.max_iscsi_conn = + (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >> + BNX2X_MAX_ISCSI_INIT_CONN_SHIFT; + + bp->cnic_eth_dev.max_fcoe_conn = + (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >> + BNX2X_MAX_FCOE_INIT_CONN_SHIFT; + + /* Read the WWN: */ + if (!IS_MF(bp)) { + /* Port info */ + bp->cnic_eth_dev.fcoe_wwn_port_name_hi = + SHMEM_RD(bp, + dev_info.port_hw_config[port]. + fcoe_wwn_port_name_upper); + bp->cnic_eth_dev.fcoe_wwn_port_name_lo = + SHMEM_RD(bp, + dev_info.port_hw_config[port]. + fcoe_wwn_port_name_lower); + + /* Node info */ + bp->cnic_eth_dev.fcoe_wwn_node_name_hi = + SHMEM_RD(bp, + dev_info.port_hw_config[port]. + fcoe_wwn_node_name_upper); + bp->cnic_eth_dev.fcoe_wwn_node_name_lo = + SHMEM_RD(bp, + dev_info.port_hw_config[port]. + fcoe_wwn_node_name_lower); + } else if (!IS_MF_SD(bp)) { + u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); + + /* + * Read the WWN info only if the FCoE feature is enabled for + * this function. + */ + if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { + /* Port info */ + bp->cnic_eth_dev.fcoe_wwn_port_name_hi = + MF_CFG_RD(bp, func_ext_config[func]. + fcoe_wwn_port_name_upper); + bp->cnic_eth_dev.fcoe_wwn_port_name_lo = + MF_CFG_RD(bp, func_ext_config[func]. + fcoe_wwn_port_name_lower); + + /* Node info */ + bp->cnic_eth_dev.fcoe_wwn_node_name_hi = + MF_CFG_RD(bp, func_ext_config[func]. + fcoe_wwn_node_name_upper); + bp->cnic_eth_dev.fcoe_wwn_node_name_lo = + MF_CFG_RD(bp, func_ext_config[func]. + fcoe_wwn_node_name_lower); + } + } + + BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n", + bp->cnic_eth_dev.max_iscsi_conn, + bp->cnic_eth_dev.max_fcoe_conn); + + /* + * If maximum allowed number of connections is zero - + * disable the feature. + */ + if (!bp->cnic_eth_dev.max_iscsi_conn) + bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; + + if (!bp->cnic_eth_dev.max_fcoe_conn) + bp->flags |= NO_FCOE_FLAG; +} +#endif + +static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) +{ + u32 val, val2; + int func = BP_ABS_FUNC(bp); + int port = BP_PORT(bp); +#ifdef BCM_CNIC + u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac; + u8 *fip_mac = bp->fip_mac; +#endif + + /* Zero primary MAC configuration */ + memset(bp->dev->dev_addr, 0, ETH_ALEN); + + if (BP_NOMCP(bp)) { + BNX2X_ERROR("warning: random MAC workaround active\n"); + random_ether_addr(bp->dev->dev_addr); + } else if (IS_MF(bp)) { + val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper); + val = MF_CFG_RD(bp, func_mf_config[func].mac_lower); + if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) && + (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) + bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); + +#ifdef BCM_CNIC + /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or + * FCoE MAC then the appropriate feature should be disabled. + */ + if (IS_MF_SI(bp)) { + u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); + if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { + val2 = MF_CFG_RD(bp, func_ext_config[func]. + iscsi_mac_addr_upper); + val = MF_CFG_RD(bp, func_ext_config[func]. + iscsi_mac_addr_lower); + bnx2x_set_mac_buf(iscsi_mac, val, val2); + BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n", + iscsi_mac); + } else + bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; + + if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { + val2 = MF_CFG_RD(bp, func_ext_config[func]. + fcoe_mac_addr_upper); + val = MF_CFG_RD(bp, func_ext_config[func]. + fcoe_mac_addr_lower); + bnx2x_set_mac_buf(fip_mac, val, val2); + BNX2X_DEV_INFO("Read FCoE L2 MAC to %pM\n", + fip_mac); + + } else + bp->flags |= NO_FCOE_FLAG; + } +#endif + } else { + /* in SF read MACs from port configuration */ + val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); + val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); + bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); + +#ifdef BCM_CNIC + val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. + iscsi_mac_upper); + val = SHMEM_RD(bp, dev_info.port_hw_config[port]. + iscsi_mac_lower); + bnx2x_set_mac_buf(iscsi_mac, val, val2); + + val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. + fcoe_fip_mac_upper); + val = SHMEM_RD(bp, dev_info.port_hw_config[port]. + fcoe_fip_mac_lower); + bnx2x_set_mac_buf(fip_mac, val, val2); +#endif + } + + memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); + memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); + +#ifdef BCM_CNIC + /* Set the FCoE MAC in MF_SD mode */ + if (!CHIP_IS_E1x(bp) && IS_MF_SD(bp)) + memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN); + + /* Disable iSCSI if MAC configuration is + * invalid. + */ + if (!is_valid_ether_addr(iscsi_mac)) { + bp->flags |= NO_ISCSI_FLAG; + memset(iscsi_mac, 0, ETH_ALEN); + } + + /* Disable FCoE if MAC configuration is + * invalid. + */ + if (!is_valid_ether_addr(fip_mac)) { + bp->flags |= NO_FCOE_FLAG; + memset(bp->fip_mac, 0, ETH_ALEN); + } +#endif + + if (!is_valid_ether_addr(bp->dev->dev_addr)) + dev_err(&bp->pdev->dev, + "bad Ethernet MAC address configuration: " + "%pM, change it manually before bringing up " + "the appropriate network interface\n", + bp->dev->dev_addr); +} + +static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) +{ + int /*abs*/func = BP_ABS_FUNC(bp); + int vn; + u32 val = 0; + int rc = 0; + + bnx2x_get_common_hwinfo(bp); + + /* + * initialize IGU parameters + */ + if (CHIP_IS_E1x(bp)) { + bp->common.int_block = INT_BLOCK_HC; + + bp->igu_dsb_id = DEF_SB_IGU_ID; + bp->igu_base_sb = 0; + } else { + bp->common.int_block = INT_BLOCK_IGU; + val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); + + if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { + int tout = 5000; + + BNX2X_DEV_INFO("FORCING Normal Mode\n"); + + val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); + REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val); + REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f); + + while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) { + tout--; + usleep_range(1000, 1000); + } + + if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) { + dev_err(&bp->pdev->dev, + "FORCING Normal Mode failed!!!\n"); + return -EPERM; + } + } + + if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { + BNX2X_DEV_INFO("IGU Backward Compatible Mode\n"); + bp->common.int_block |= INT_BLOCK_MODE_BW_COMP; + } else + BNX2X_DEV_INFO("IGU Normal Mode\n"); + + bnx2x_get_igu_cam_info(bp); + + } + + /* + * set base FW non-default (fast path) status block id, this value is + * used to initialize the fw_sb_id saved on the fp/queue structure to + * determine the id used by the FW. + */ + if (CHIP_IS_E1x(bp)) + bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp); + else /* + * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of + * the same queue are indicated on the same IGU SB). So we prefer + * FW and IGU SBs to be the same value. + */ + bp->base_fw_ndsb = bp->igu_base_sb; + + BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n" + "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb, + bp->igu_sb_cnt, bp->base_fw_ndsb); + + /* + * Initialize MF configuration + */ + + bp->mf_ov = 0; + bp->mf_mode = 0; + vn = BP_E1HVN(bp); + + if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { + BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", + bp->common.shmem2_base, SHMEM2_RD(bp, size), + (u32)offsetof(struct shmem2_region, mf_cfg_addr)); + + if (SHMEM2_HAS(bp, mf_cfg_addr)) + bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr); + else + bp->common.mf_cfg_base = bp->common.shmem_base + + offsetof(struct shmem_region, func_mb) + + E1H_FUNC_MAX * sizeof(struct drv_func_mb); + /* + * get mf configuration: + * 1. existence of MF configuration + * 2. MAC address must be legal (check only upper bytes) + * for Switch-Independent mode; + * OVLAN must be legal for Switch-Dependent mode + * 3. SF_MODE configures specific MF mode + */ + if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { + /* get mf configuration */ + val = SHMEM_RD(bp, + dev_info.shared_feature_config.config); + val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK; + + switch (val) { + case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: + val = MF_CFG_RD(bp, func_mf_config[func]. + mac_upper); + /* check for legal mac (upper bytes)*/ + if (val != 0xffff) { + bp->mf_mode = MULTI_FUNCTION_SI; + bp->mf_config[vn] = MF_CFG_RD(bp, + func_mf_config[func].config); + } else + BNX2X_DEV_INFO("illegal MAC address " + "for SI\n"); + break; + case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: + /* get OV configuration */ + val = MF_CFG_RD(bp, + func_mf_config[FUNC_0].e1hov_tag); + val &= FUNC_MF_CFG_E1HOV_TAG_MASK; + + if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { + bp->mf_mode = MULTI_FUNCTION_SD; + bp->mf_config[vn] = MF_CFG_RD(bp, + func_mf_config[func].config); + } else + BNX2X_DEV_INFO("illegal OV for SD\n"); + break; + default: + /* Unknown configuration: reset mf_config */ + bp->mf_config[vn] = 0; + BNX2X_DEV_INFO("unkown MF mode 0x%x\n", val); + } + } + + BNX2X_DEV_INFO("%s function mode\n", + IS_MF(bp) ? "multi" : "single"); + + switch (bp->mf_mode) { + case MULTI_FUNCTION_SD: + val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & + FUNC_MF_CFG_E1HOV_TAG_MASK; + if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { + bp->mf_ov = val; + bp->path_has_ovlan = true; + + BNX2X_DEV_INFO("MF OV for func %d is %d " + "(0x%04x)\n", func, bp->mf_ov, + bp->mf_ov); + } else { + dev_err(&bp->pdev->dev, + "No valid MF OV for func %d, " + "aborting\n", func); + return -EPERM; + } + break; + case MULTI_FUNCTION_SI: + BNX2X_DEV_INFO("func %d is in MF " + "switch-independent mode\n", func); + break; + default: + if (vn) { + dev_err(&bp->pdev->dev, + "VN %d is in a single function mode, " + "aborting\n", vn); + return -EPERM; + } + break; + } + + /* check if other port on the path needs ovlan: + * Since MF configuration is shared between ports + * Possible mixed modes are only + * {SF, SI} {SF, SD} {SD, SF} {SI, SF} + */ + if (CHIP_MODE_IS_4_PORT(bp) && + !bp->path_has_ovlan && + !IS_MF(bp) && + bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { + u8 other_port = !BP_PORT(bp); + u8 other_func = BP_PATH(bp) + 2*other_port; + val = MF_CFG_RD(bp, + func_mf_config[other_func].e1hov_tag); + if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) + bp->path_has_ovlan = true; + } + } + + /* adjust igu_sb_cnt to MF for E1x */ + if (CHIP_IS_E1x(bp) && IS_MF(bp)) + bp->igu_sb_cnt /= E1HVN_MAX; + + /* port info */ + bnx2x_get_port_hwinfo(bp); + + if (!BP_NOMCP(bp)) { + bp->fw_seq = + (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK); + BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); + } + + /* Get MAC addresses */ + bnx2x_get_mac_hwinfo(bp); + +#ifdef BCM_CNIC + bnx2x_get_cnic_info(bp); +#endif + + /* Get current FW pulse sequence */ + if (!BP_NOMCP(bp)) { + int mb_idx = BP_FW_MB_IDX(bp); + + bp->fw_drv_pulse_wr_seq = + (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) & + DRV_PULSE_SEQ_MASK); + BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); + } + + return rc; +} + +static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp) +{ + int cnt, i, block_end, rodi; + char vpd_data[BNX2X_VPD_LEN+1]; + char str_id_reg[VENDOR_ID_LEN+1]; + char str_id_cap[VENDOR_ID_LEN+1]; + u8 len; + + cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data); + memset(bp->fw_ver, 0, sizeof(bp->fw_ver)); + + if (cnt < BNX2X_VPD_LEN) + goto out_not_found; + + i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN, + PCI_VPD_LRDT_RO_DATA); + if (i < 0) + goto out_not_found; + + + block_end = i + PCI_VPD_LRDT_TAG_SIZE + + pci_vpd_lrdt_size(&vpd_data[i]); + + i += PCI_VPD_LRDT_TAG_SIZE; + + if (block_end > BNX2X_VPD_LEN) + goto out_not_found; + + rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, + PCI_VPD_RO_KEYWORD_MFR_ID); + if (rodi < 0) + goto out_not_found; + + len = pci_vpd_info_field_size(&vpd_data[rodi]); + + if (len != VENDOR_ID_LEN) + goto out_not_found; + + rodi += PCI_VPD_INFO_FLD_HDR_SIZE; + + /* vendor specific info */ + snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL); + snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL); + if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) || + !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) { + + rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, + PCI_VPD_RO_KEYWORD_VENDOR0); + if (rodi >= 0) { + len = pci_vpd_info_field_size(&vpd_data[rodi]); + + rodi += PCI_VPD_INFO_FLD_HDR_SIZE; + + if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) { + memcpy(bp->fw_ver, &vpd_data[rodi], len); + bp->fw_ver[len] = ' '; + } + } + return; + } +out_not_found: + return; +} + +static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp) +{ + u32 flags = 0; + + if (CHIP_REV_IS_FPGA(bp)) + SET_FLAGS(flags, MODE_FPGA); + else if (CHIP_REV_IS_EMUL(bp)) + SET_FLAGS(flags, MODE_EMUL); + else + SET_FLAGS(flags, MODE_ASIC); + + if (CHIP_MODE_IS_4_PORT(bp)) + SET_FLAGS(flags, MODE_PORT4); + else + SET_FLAGS(flags, MODE_PORT2); + + if (CHIP_IS_E2(bp)) + SET_FLAGS(flags, MODE_E2); + else if (CHIP_IS_E3(bp)) { + SET_FLAGS(flags, MODE_E3); + if (CHIP_REV(bp) == CHIP_REV_Ax) + SET_FLAGS(flags, MODE_E3_A0); + else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/ + SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3); + } + + if (IS_MF(bp)) { + SET_FLAGS(flags, MODE_MF); + switch (bp->mf_mode) { + case MULTI_FUNCTION_SD: + SET_FLAGS(flags, MODE_MF_SD); + break; + case MULTI_FUNCTION_SI: + SET_FLAGS(flags, MODE_MF_SI); + break; + } + } else + SET_FLAGS(flags, MODE_SF); + +#if defined(__LITTLE_ENDIAN) + SET_FLAGS(flags, MODE_LITTLE_ENDIAN); +#else /*(__BIG_ENDIAN)*/ + SET_FLAGS(flags, MODE_BIG_ENDIAN); +#endif + INIT_MODE_FLAGS(bp) = flags; +} + +static int __devinit bnx2x_init_bp(struct bnx2x *bp) +{ + int func; + int timer_interval; + int rc; + + mutex_init(&bp->port.phy_mutex); + mutex_init(&bp->fw_mb_mutex); + spin_lock_init(&bp->stats_lock); +#ifdef BCM_CNIC + mutex_init(&bp->cnic_mutex); +#endif + + INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); + INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); + INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task); + rc = bnx2x_get_hwinfo(bp); + if (rc) + return rc; + + bnx2x_set_modes_bitmap(bp); + + rc = bnx2x_alloc_mem_bp(bp); + if (rc) + return rc; + + bnx2x_read_fwinfo(bp); + + func = BP_FUNC(bp); + + /* need to reset chip if undi was active */ + if (!BP_NOMCP(bp)) + bnx2x_undi_unload(bp); + + if (CHIP_REV_IS_FPGA(bp)) + dev_err(&bp->pdev->dev, "FPGA detected\n"); + + if (BP_NOMCP(bp) && (func == 0)) + dev_err(&bp->pdev->dev, "MCP disabled, " + "must load devices in order!\n"); + + bp->multi_mode = multi_mode; + + /* Set TPA flags */ + if (disable_tpa) { + bp->flags &= ~TPA_ENABLE_FLAG; + bp->dev->features &= ~NETIF_F_LRO; + } else { + bp->flags |= TPA_ENABLE_FLAG; + bp->dev->features |= NETIF_F_LRO; + } + bp->disable_tpa = disable_tpa; + + if (CHIP_IS_E1(bp)) + bp->dropless_fc = 0; + else + bp->dropless_fc = dropless_fc; + + bp->mrrs = mrrs; + + bp->tx_ring_size = MAX_TX_AVAIL; + + /* make sure that the numbers are in the right granularity */ + bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR; + bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR; + + timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ); + bp->current_interval = (poll ? poll : timer_interval); + + init_timer(&bp->timer); + bp->timer.expires = jiffies + bp->current_interval; + bp->timer.data = (unsigned long) bp; + bp->timer.function = bnx2x_timer; + + bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON); + bnx2x_dcbx_init_params(bp); + +#ifdef BCM_CNIC + if (CHIP_IS_E1x(bp)) + bp->cnic_base_cl_id = FP_SB_MAX_E1x; + else + bp->cnic_base_cl_id = FP_SB_MAX_E2; +#endif + + /* multiple tx priority */ + if (CHIP_IS_E1x(bp)) + bp->max_cos = BNX2X_MULTI_TX_COS_E1X; + if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) + bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0; + if (CHIP_IS_E3B0(bp)) + bp->max_cos = BNX2X_MULTI_TX_COS_E3B0; + + return rc; +} + + +/**************************************************************************** +* General service functions +****************************************************************************/ + +/* + * net_device service functions + */ + +/* called with rtnl_lock */ +static int bnx2x_open(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + bool global = false; + int other_engine = BP_PATH(bp) ? 0 : 1; + u32 other_load_counter, load_counter; + + netif_carrier_off(dev); + + bnx2x_set_power_state(bp, PCI_D0); + + other_load_counter = bnx2x_get_load_cnt(bp, other_engine); + load_counter = bnx2x_get_load_cnt(bp, BP_PATH(bp)); + + /* + * If parity had happen during the unload, then attentions + * and/or RECOVERY_IN_PROGRES may still be set. In this case we + * want the first function loaded on the current engine to + * complete the recovery. + */ + if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) || + bnx2x_chk_parity_attn(bp, &global, true)) + do { + /* + * If there are attentions and they are in a global + * blocks, set the GLOBAL_RESET bit regardless whether + * it will be this function that will complete the + * recovery or not. + */ + if (global) + bnx2x_set_reset_global(bp); + + /* + * Only the first function on the current engine should + * try to recover in open. In case of attentions in + * global blocks only the first in the chip should try + * to recover. + */ + if ((!load_counter && + (!global || !other_load_counter)) && + bnx2x_trylock_leader_lock(bp) && + !bnx2x_leader_reset(bp)) { + netdev_info(bp->dev, "Recovered in open\n"); + break; + } + + /* recovery has failed... */ + bnx2x_set_power_state(bp, PCI_D3hot); + bp->recovery_state = BNX2X_RECOVERY_FAILED; + + netdev_err(bp->dev, "Recovery flow hasn't been properly" + " completed yet. Try again later. If u still see this" + " message after a few retries then power cycle is" + " required.\n"); + + return -EAGAIN; + } while (0); + + bp->recovery_state = BNX2X_RECOVERY_DONE; + return bnx2x_nic_load(bp, LOAD_OPEN); +} + +/* called with rtnl_lock */ +static int bnx2x_close(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + + /* Unload the driver, release IRQs */ + bnx2x_nic_unload(bp, UNLOAD_CLOSE); + + /* Power off */ + bnx2x_set_power_state(bp, PCI_D3hot); + + return 0; +} + +static inline int bnx2x_init_mcast_macs_list(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p) +{ + int mc_count = netdev_mc_count(bp->dev); + struct bnx2x_mcast_list_elem *mc_mac = + kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC); + struct netdev_hw_addr *ha; + + if (!mc_mac) + return -ENOMEM; + + INIT_LIST_HEAD(&p->mcast_list); + + netdev_for_each_mc_addr(ha, bp->dev) { + mc_mac->mac = bnx2x_mc_addr(ha); + list_add_tail(&mc_mac->link, &p->mcast_list); + mc_mac++; + } + + p->mcast_list_len = mc_count; + + return 0; +} + +static inline void bnx2x_free_mcast_macs_list( + struct bnx2x_mcast_ramrod_params *p) +{ + struct bnx2x_mcast_list_elem *mc_mac = + list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem, + link); + + WARN_ON(!mc_mac); + kfree(mc_mac); +} + +/** + * bnx2x_set_uc_list - configure a new unicast MACs list. + * + * @bp: driver handle + * + * We will use zero (0) as a MAC type for these MACs. + */ +static inline int bnx2x_set_uc_list(struct bnx2x *bp) +{ + int rc; + struct net_device *dev = bp->dev; + struct netdev_hw_addr *ha; + struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj; + unsigned long ramrod_flags = 0; + + /* First schedule a cleanup up of old configuration */ + rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false); + if (rc < 0) { + BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc); + return rc; + } + + netdev_for_each_uc_addr(ha, dev) { + rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true, + BNX2X_UC_LIST_MAC, &ramrod_flags); + if (rc < 0) { + BNX2X_ERR("Failed to schedule ADD operations: %d\n", + rc); + return rc; + } + } + + /* Execute the pending commands */ + __set_bit(RAMROD_CONT, &ramrod_flags); + return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */, + BNX2X_UC_LIST_MAC, &ramrod_flags); +} + +static inline int bnx2x_set_mc_list(struct bnx2x *bp) +{ + struct net_device *dev = bp->dev; + struct bnx2x_mcast_ramrod_params rparam = {0}; + int rc = 0; + + rparam.mcast_obj = &bp->mcast_obj; + + /* first, clear all configured multicast MACs */ + rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); + if (rc < 0) { + BNX2X_ERR("Failed to clear multicast " + "configuration: %d\n", rc); + return rc; + } + + /* then, configure a new MACs list */ + if (netdev_mc_count(dev)) { + rc = bnx2x_init_mcast_macs_list(bp, &rparam); + if (rc) { + BNX2X_ERR("Failed to create multicast MACs " + "list: %d\n", rc); + return rc; + } + + /* Now add the new MACs */ + rc = bnx2x_config_mcast(bp, &rparam, + BNX2X_MCAST_CMD_ADD); + if (rc < 0) + BNX2X_ERR("Failed to set a new multicast " + "configuration: %d\n", rc); + + bnx2x_free_mcast_macs_list(&rparam); + } + + return rc; +} + + +/* If bp->state is OPEN, should be called with netif_addr_lock_bh() */ +void bnx2x_set_rx_mode(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + u32 rx_mode = BNX2X_RX_MODE_NORMAL; + + if (bp->state != BNX2X_STATE_OPEN) { + DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); + return; + } + + DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags); + + if (dev->flags & IFF_PROMISC) + rx_mode = BNX2X_RX_MODE_PROMISC; + else if ((dev->flags & IFF_ALLMULTI) || + ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) && + CHIP_IS_E1(bp))) + rx_mode = BNX2X_RX_MODE_ALLMULTI; + else { + /* some multicasts */ + if (bnx2x_set_mc_list(bp) < 0) + rx_mode = BNX2X_RX_MODE_ALLMULTI; + + if (bnx2x_set_uc_list(bp) < 0) + rx_mode = BNX2X_RX_MODE_PROMISC; + } + + bp->rx_mode = rx_mode; + + /* Schedule the rx_mode command */ + if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) { + set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); + return; + } + + bnx2x_set_storm_rx_mode(bp); +} + +/* called with rtnl_lock */ +static int bnx2x_mdio_read(struct net_device *netdev, int prtad, + int devad, u16 addr) +{ + struct bnx2x *bp = netdev_priv(netdev); + u16 value; + int rc; + + DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n", + prtad, devad, addr); + + /* The HW expects different devad if CL22 is used */ + devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; + + bnx2x_acquire_phy_lock(bp); + rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value); + bnx2x_release_phy_lock(bp); + DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc); + + if (!rc) + rc = value; + return rc; +} + +/* called with rtnl_lock */ +static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad, + u16 addr, u16 value) +{ + struct bnx2x *bp = netdev_priv(netdev); + int rc; + + DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x," + " value 0x%x\n", prtad, devad, addr, value); + + /* The HW expects different devad if CL22 is used */ + devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; + + bnx2x_acquire_phy_lock(bp); + rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value); + bnx2x_release_phy_lock(bp); + return rc; +} + +/* called with rtnl_lock */ +static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct bnx2x *bp = netdev_priv(dev); + struct mii_ioctl_data *mdio = if_mii(ifr); + + DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n", + mdio->phy_id, mdio->reg_num, mdio->val_in); + + if (!netif_running(dev)) + return -EAGAIN; + + return mdio_mii_ioctl(&bp->mdio, mdio, cmd); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void poll_bnx2x(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + + disable_irq(bp->pdev->irq); + bnx2x_interrupt(bp->pdev->irq, dev); + enable_irq(bp->pdev->irq); +} +#endif + +static const struct net_device_ops bnx2x_netdev_ops = { + .ndo_open = bnx2x_open, + .ndo_stop = bnx2x_close, + .ndo_start_xmit = bnx2x_start_xmit, + .ndo_select_queue = bnx2x_select_queue, + .ndo_set_rx_mode = bnx2x_set_rx_mode, + .ndo_set_mac_address = bnx2x_change_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = bnx2x_ioctl, + .ndo_change_mtu = bnx2x_change_mtu, + .ndo_fix_features = bnx2x_fix_features, + .ndo_set_features = bnx2x_set_features, + .ndo_tx_timeout = bnx2x_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = poll_bnx2x, +#endif + .ndo_setup_tc = bnx2x_setup_tc, + +#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) + .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, +#endif +}; + +static inline int bnx2x_set_coherency_mask(struct bnx2x *bp) +{ + struct device *dev = &bp->pdev->dev; + + if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { + bp->flags |= USING_DAC_FLAG; + if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { + dev_err(dev, "dma_set_coherent_mask failed, " + "aborting\n"); + return -EIO; + } + } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) { + dev_err(dev, "System does not support DMA, aborting\n"); + return -EIO; + } + + return 0; +} + +static int __devinit bnx2x_init_dev(struct pci_dev *pdev, + struct net_device *dev, + unsigned long board_type) +{ + struct bnx2x *bp; + int rc; + + SET_NETDEV_DEV(dev, &pdev->dev); + bp = netdev_priv(dev); + + bp->dev = dev; + bp->pdev = pdev; + bp->flags = 0; + bp->pf_num = PCI_FUNC(pdev->devfn); + + rc = pci_enable_device(pdev); + if (rc) { + dev_err(&bp->pdev->dev, + "Cannot enable PCI device, aborting\n"); + goto err_out; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + dev_err(&bp->pdev->dev, + "Cannot find PCI device base address, aborting\n"); + rc = -ENODEV; + goto err_out_disable; + } + + if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { + dev_err(&bp->pdev->dev, "Cannot find second PCI device" + " base address, aborting\n"); + rc = -ENODEV; + goto err_out_disable; + } + + if (atomic_read(&pdev->enable_cnt) == 1) { + rc = pci_request_regions(pdev, DRV_MODULE_NAME); + if (rc) { + dev_err(&bp->pdev->dev, + "Cannot obtain PCI resources, aborting\n"); + goto err_out_disable; + } + + pci_set_master(pdev); + pci_save_state(pdev); + } + + bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); + if (bp->pm_cap == 0) { + dev_err(&bp->pdev->dev, + "Cannot find power management capability, aborting\n"); + rc = -EIO; + goto err_out_release; + } + + if (!pci_is_pcie(pdev)) { + dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n"); + rc = -EIO; + goto err_out_release; + } + + rc = bnx2x_set_coherency_mask(bp); + if (rc) + goto err_out_release; + + dev->mem_start = pci_resource_start(pdev, 0); + dev->base_addr = dev->mem_start; + dev->mem_end = pci_resource_end(pdev, 0); + + dev->irq = pdev->irq; + + bp->regview = pci_ioremap_bar(pdev, 0); + if (!bp->regview) { + dev_err(&bp->pdev->dev, + "Cannot map register space, aborting\n"); + rc = -ENOMEM; + goto err_out_release; + } + + bnx2x_set_power_state(bp, PCI_D0); + + /* clean indirect addresses */ + pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, + PCICFG_VENDOR_ID_OFFSET); - REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0); - REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0); - REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0); - REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0); ++ /* Clean the following indirect addresses for all functions since it ++ * is not used by the driver. ++ */ ++ REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0); ++ REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0); ++ REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); ++ REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); ++ REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); ++ REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); ++ REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); ++ REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); + + /* + * Enable internal target-read (in case we are probed after PF FLR). + * Must be done prior to any BAR read access. Only for 57712 and up + */ + if (board_type != BCM57710 && + board_type != BCM57711 && + board_type != BCM57711E) + REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); + + /* Reset the load counter */ + bnx2x_clear_load_cnt(bp); + + dev->watchdog_timeo = TX_TIMEOUT; + + dev->netdev_ops = &bnx2x_netdev_ops; + bnx2x_set_ethtool_ops(dev); + + dev->priv_flags |= IFF_UNICAST_FLT; + + dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | + NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_TX; + + dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA; + + dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX; + if (bp->flags & USING_DAC_FLAG) + dev->features |= NETIF_F_HIGHDMA; + + /* Add Loopback capability to the device */ + dev->hw_features |= NETIF_F_LOOPBACK; + +#ifdef BCM_DCBNL + dev->dcbnl_ops = &bnx2x_dcbnl_ops; +#endif + + /* get_port_hwinfo() will set prtad and mmds properly */ + bp->mdio.prtad = MDIO_PRTAD_NONE; + bp->mdio.mmds = 0; + bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; + bp->mdio.dev = dev; + bp->mdio.mdio_read = bnx2x_mdio_read; + bp->mdio.mdio_write = bnx2x_mdio_write; + + return 0; + +err_out_release: + if (atomic_read(&pdev->enable_cnt) == 1) + pci_release_regions(pdev); + +err_out_disable: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + +err_out: + return rc; +} + +static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp, + int *width, int *speed) +{ + u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL); + + *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT; + + /* return value of 1=2.5GHz 2=5GHz */ + *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT; +} + +static int bnx2x_check_firmware(struct bnx2x *bp) +{ + const struct firmware *firmware = bp->firmware; + struct bnx2x_fw_file_hdr *fw_hdr; + struct bnx2x_fw_file_section *sections; + u32 offset, len, num_ops; + u16 *ops_offsets; + int i; + const u8 *fw_ver; + + if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) + return -EINVAL; + + fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data; + sections = (struct bnx2x_fw_file_section *)fw_hdr; + + /* Make sure none of the offsets and sizes make us read beyond + * the end of the firmware data */ + for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) { + offset = be32_to_cpu(sections[i].offset); + len = be32_to_cpu(sections[i].len); + if (offset + len > firmware->size) { + dev_err(&bp->pdev->dev, + "Section %d length is out of bounds\n", i); + return -EINVAL; + } + } + + /* Likewise for the init_ops offsets */ + offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset); + ops_offsets = (u16 *)(firmware->data + offset); + num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op); + + for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) { + if (be16_to_cpu(ops_offsets[i]) > num_ops) { + dev_err(&bp->pdev->dev, + "Section offset %d is out of bounds\n", i); + return -EINVAL; + } + } + + /* Check FW version */ + offset = be32_to_cpu(fw_hdr->fw_version.offset); + fw_ver = firmware->data + offset; + if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) || + (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) || + (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) || + (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) { + dev_err(&bp->pdev->dev, + "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n", + fw_ver[0], fw_ver[1], fw_ver[2], + fw_ver[3], BCM_5710_FW_MAJOR_VERSION, + BCM_5710_FW_MINOR_VERSION, + BCM_5710_FW_REVISION_VERSION, + BCM_5710_FW_ENGINEERING_VERSION); + return -EINVAL; + } + + return 0; +} + +static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n) +{ + const __be32 *source = (const __be32 *)_source; + u32 *target = (u32 *)_target; + u32 i; + + for (i = 0; i < n/4; i++) + target[i] = be32_to_cpu(source[i]); +} + +/* + Ops array is stored in the following format: + {op(8bit), offset(24bit, big endian), data(32bit, big endian)} + */ +static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n) +{ + const __be32 *source = (const __be32 *)_source; + struct raw_op *target = (struct raw_op *)_target; + u32 i, j, tmp; + + for (i = 0, j = 0; i < n/8; i++, j += 2) { + tmp = be32_to_cpu(source[j]); + target[i].op = (tmp >> 24) & 0xff; + target[i].offset = tmp & 0xffffff; + target[i].raw_data = be32_to_cpu(source[j + 1]); + } +} + +/** + * IRO array is stored in the following format: + * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) } + */ +static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n) +{ + const __be32 *source = (const __be32 *)_source; + struct iro *target = (struct iro *)_target; + u32 i, j, tmp; + + for (i = 0, j = 0; i < n/sizeof(struct iro); i++) { + target[i].base = be32_to_cpu(source[j]); + j++; + tmp = be32_to_cpu(source[j]); + target[i].m1 = (tmp >> 16) & 0xffff; + target[i].m2 = tmp & 0xffff; + j++; + tmp = be32_to_cpu(source[j]); + target[i].m3 = (tmp >> 16) & 0xffff; + target[i].size = tmp & 0xffff; + j++; + } +} + +static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n) +{ + const __be16 *source = (const __be16 *)_source; + u16 *target = (u16 *)_target; + u32 i; + + for (i = 0; i < n/2; i++) + target[i] = be16_to_cpu(source[i]); +} + +#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \ +do { \ + u32 len = be32_to_cpu(fw_hdr->arr.len); \ + bp->arr = kmalloc(len, GFP_KERNEL); \ + if (!bp->arr) { \ + pr_err("Failed to allocate %d bytes for "#arr"\n", len); \ + goto lbl; \ + } \ + func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \ + (u8 *)bp->arr, len); \ +} while (0) + +int bnx2x_init_firmware(struct bnx2x *bp) +{ + const char *fw_file_name; + struct bnx2x_fw_file_hdr *fw_hdr; + int rc; + + if (CHIP_IS_E1(bp)) + fw_file_name = FW_FILE_NAME_E1; + else if (CHIP_IS_E1H(bp)) + fw_file_name = FW_FILE_NAME_E1H; + else if (!CHIP_IS_E1x(bp)) + fw_file_name = FW_FILE_NAME_E2; + else { + BNX2X_ERR("Unsupported chip revision\n"); + return -EINVAL; + } + + BNX2X_DEV_INFO("Loading %s\n", fw_file_name); + + rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev); + if (rc) { + BNX2X_ERR("Can't load firmware file %s\n", fw_file_name); + goto request_firmware_exit; + } + + rc = bnx2x_check_firmware(bp); + if (rc) { + BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name); + goto request_firmware_exit; + } + + fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data; + + /* Initialize the pointers to the init arrays */ + /* Blob */ + BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n); + + /* Opcodes */ + BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops); + + /* Offsets */ + BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, + be16_to_cpu_n); + + /* STORMs firmware */ + INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data + + be32_to_cpu(fw_hdr->tsem_int_table_data.offset); + INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data + + be32_to_cpu(fw_hdr->tsem_pram_data.offset); + INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data + + be32_to_cpu(fw_hdr->usem_int_table_data.offset); + INIT_USEM_PRAM_DATA(bp) = bp->firmware->data + + be32_to_cpu(fw_hdr->usem_pram_data.offset); + INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data + + be32_to_cpu(fw_hdr->xsem_int_table_data.offset); + INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data + + be32_to_cpu(fw_hdr->xsem_pram_data.offset); + INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data + + be32_to_cpu(fw_hdr->csem_int_table_data.offset); + INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data + + be32_to_cpu(fw_hdr->csem_pram_data.offset); + /* IRO */ + BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro); + + return 0; + +iro_alloc_err: + kfree(bp->init_ops_offsets); +init_offsets_alloc_err: + kfree(bp->init_ops); +init_ops_alloc_err: + kfree(bp->init_data); +request_firmware_exit: + release_firmware(bp->firmware); + + return rc; +} + +static void bnx2x_release_firmware(struct bnx2x *bp) +{ + kfree(bp->init_ops_offsets); + kfree(bp->init_ops); + kfree(bp->init_data); + release_firmware(bp->firmware); +} + + +static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = { + .init_hw_cmn_chip = bnx2x_init_hw_common_chip, + .init_hw_cmn = bnx2x_init_hw_common, + .init_hw_port = bnx2x_init_hw_port, + .init_hw_func = bnx2x_init_hw_func, + + .reset_hw_cmn = bnx2x_reset_common, + .reset_hw_port = bnx2x_reset_port, + .reset_hw_func = bnx2x_reset_func, + + .gunzip_init = bnx2x_gunzip_init, + .gunzip_end = bnx2x_gunzip_end, + + .init_fw = bnx2x_init_firmware, + .release_fw = bnx2x_release_firmware, +}; + +void bnx2x__init_func_obj(struct bnx2x *bp) +{ + /* Prepare DMAE related driver resources */ + bnx2x_setup_dmae(bp); + + bnx2x_init_func_obj(bp, &bp->func_obj, + bnx2x_sp(bp, func_rdata), + bnx2x_sp_mapping(bp, func_rdata), + &bnx2x_func_sp_drv); +} + +/* must be called after sriov-enable */ +static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp) +{ + int cid_count = BNX2X_L2_CID_COUNT(bp); + +#ifdef BCM_CNIC + cid_count += CNIC_CID_MAX; +#endif + return roundup(cid_count, QM_CID_ROUND); +} + +/** + * bnx2x_get_num_none_def_sbs - return the number of none default SBs + * + * @dev: pci device + * + */ +static inline int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev) +{ + int pos; + u16 control; + + pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); + + /* + * If MSI-X is not supported - return number of SBs needed to support + * one fast path queue: one FP queue + SB for CNIC + */ + if (!pos) + return 1 + CNIC_PRESENT; + + /* + * The value in the PCI configuration space is the index of the last + * entry, namely one less than the actual size of the table, which is + * exactly what we want to return from this function: number of all SBs + * without the default SB. + */ + pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control); + return control & PCI_MSIX_FLAGS_QSIZE; +} + +static int __devinit bnx2x_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *dev = NULL; + struct bnx2x *bp; + int pcie_width, pcie_speed; + int rc, max_non_def_sbs; + int rx_count, tx_count, rss_count; + /* + * An estimated maximum supported CoS number according to the chip + * version. + * We will try to roughly estimate the maximum number of CoSes this chip + * may support in order to minimize the memory allocated for Tx + * netdev_queue's. This number will be accurately calculated during the + * initialization of bp->max_cos based on the chip versions AND chip + * revision in the bnx2x_init_bp(). + */ + u8 max_cos_est = 0; + + switch (ent->driver_data) { + case BCM57710: + case BCM57711: + case BCM57711E: + max_cos_est = BNX2X_MULTI_TX_COS_E1X; + break; + + case BCM57712: + case BCM57712_MF: + max_cos_est = BNX2X_MULTI_TX_COS_E2_E3A0; + break; + + case BCM57800: + case BCM57800_MF: + case BCM57810: + case BCM57810_MF: + case BCM57840: + case BCM57840_MF: + max_cos_est = BNX2X_MULTI_TX_COS_E3B0; + break; + + default: + pr_err("Unknown board_type (%ld), aborting\n", + ent->driver_data); + return -ENODEV; + } + + max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev); + + /* !!! FIXME !!! + * Do not allow the maximum SB count to grow above 16 + * since Special CIDs starts from 16*BNX2X_MULTI_TX_COS=48. + * We will use the FP_SB_MAX_E1x macro for this matter. + */ + max_non_def_sbs = min_t(int, FP_SB_MAX_E1x, max_non_def_sbs); + + WARN_ON(!max_non_def_sbs); + + /* Maximum number of RSS queues: one IGU SB goes to CNIC */ + rss_count = max_non_def_sbs - CNIC_PRESENT; + + /* Maximum number of netdev Rx queues: RSS + FCoE L2 */ + rx_count = rss_count + FCOE_PRESENT; + + /* + * Maximum number of netdev Tx queues: + * Maximum TSS queues * Maximum supported number of CoS + FCoE L2 + */ + tx_count = MAX_TXQS_PER_COS * max_cos_est + FCOE_PRESENT; + + /* dev zeroed in init_etherdev */ + dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count); + if (!dev) { + dev_err(&pdev->dev, "Cannot allocate net device\n"); + return -ENOMEM; + } + + bp = netdev_priv(dev); + + DP(NETIF_MSG_DRV, "Allocated netdev with %d tx and %d rx queues\n", + tx_count, rx_count); + + bp->igu_sb_cnt = max_non_def_sbs; + bp->msg_enable = debug; + pci_set_drvdata(pdev, dev); + + rc = bnx2x_init_dev(pdev, dev, ent->driver_data); + if (rc < 0) { + free_netdev(dev); + return rc; + } + + DP(NETIF_MSG_DRV, "max_non_def_sbs %d\n", max_non_def_sbs); + + rc = bnx2x_init_bp(bp); + if (rc) + goto init_one_exit; + + /* + * Map doorbels here as we need the real value of bp->max_cos which + * is initialized in bnx2x_init_bp(). + */ + bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), + min_t(u64, BNX2X_DB_SIZE(bp), + pci_resource_len(pdev, 2))); + if (!bp->doorbells) { + dev_err(&bp->pdev->dev, + "Cannot map doorbell space, aborting\n"); + rc = -ENOMEM; + goto init_one_exit; + } + + /* calc qm_cid_count */ + bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); + +#ifdef BCM_CNIC + /* disable FCOE L2 queue for E1x and E3*/ + if (CHIP_IS_E1x(bp) || CHIP_IS_E3(bp)) + bp->flags |= NO_FCOE_FLAG; + +#endif + + /* Configure interrupt mode: try to enable MSI-X/MSI if + * needed, set bp->num_queues appropriately. + */ + bnx2x_set_int_mode(bp); + + /* Add all NAPI objects */ + bnx2x_add_all_napi(bp); + + rc = register_netdev(dev); + if (rc) { + dev_err(&pdev->dev, "Cannot register net device\n"); + goto init_one_exit; + } + +#ifdef BCM_CNIC + if (!NO_FCOE(bp)) { + /* Add storage MAC address */ + rtnl_lock(); + dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); + rtnl_unlock(); + } +#endif + + bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed); + + netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n", + board_info[ent->driver_data].name, + (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), + pcie_width, + ((!CHIP_IS_E2(bp) && pcie_speed == 2) || + (CHIP_IS_E2(bp) && pcie_speed == 1)) ? + "5GHz (Gen2)" : "2.5GHz", + dev->base_addr, bp->pdev->irq, dev->dev_addr); + + return 0; + +init_one_exit: + if (bp->regview) + iounmap(bp->regview); + + if (bp->doorbells) + iounmap(bp->doorbells); + + free_netdev(dev); + + if (atomic_read(&pdev->enable_cnt) == 1) + pci_release_regions(pdev); + + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + return rc; +} + +static void __devexit bnx2x_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2x *bp; + + if (!dev) { + dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); + return; + } + bp = netdev_priv(dev); + +#ifdef BCM_CNIC + /* Delete storage MAC address */ + if (!NO_FCOE(bp)) { + rtnl_lock(); + dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); + rtnl_unlock(); + } +#endif + +#ifdef BCM_DCBNL + /* Delete app tlvs from dcbnl */ + bnx2x_dcbnl_update_applist(bp, true); +#endif + + unregister_netdev(dev); + + /* Delete all NAPI objects */ + bnx2x_del_all_napi(bp); + + /* Power on: we can't let PCI layer write to us while we are in D3 */ + bnx2x_set_power_state(bp, PCI_D0); + + /* Disable MSI/MSI-X */ + bnx2x_disable_msi(bp); + + /* Power off */ + bnx2x_set_power_state(bp, PCI_D3hot); + + /* Make sure RESET task is not scheduled before continuing */ + cancel_delayed_work_sync(&bp->sp_rtnl_task); + + if (bp->regview) + iounmap(bp->regview); + + if (bp->doorbells) + iounmap(bp->doorbells); + + bnx2x_free_mem_bp(bp); + + free_netdev(dev); + + if (atomic_read(&pdev->enable_cnt) == 1) + pci_release_regions(pdev); + + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +static int bnx2x_eeh_nic_unload(struct bnx2x *bp) +{ + int i; + + bp->state = BNX2X_STATE_ERROR; + + bp->rx_mode = BNX2X_RX_MODE_NONE; + +#ifdef BCM_CNIC + bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); +#endif + /* Stop Tx */ + bnx2x_tx_disable(bp); + + bnx2x_netif_stop(bp, 0); + + del_timer_sync(&bp->timer); + + bnx2x_stats_handle(bp, STATS_EVENT_STOP); + + /* Release IRQs */ + bnx2x_free_irq(bp); + + /* Free SKBs, SGEs, TPA pool and driver internals */ + bnx2x_free_skbs(bp); + + for_each_rx_queue(bp, i) + bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); + + bnx2x_free_mem(bp); + + bp->state = BNX2X_STATE_CLOSED; + + netif_carrier_off(bp->dev); + + return 0; +} + +static void bnx2x_eeh_recover(struct bnx2x *bp) +{ + u32 val; + + mutex_init(&bp->port.phy_mutex); + + bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); + bp->link_params.shmem_base = bp->common.shmem_base; + BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base); + + if (!bp->common.shmem_base || + (bp->common.shmem_base < 0xA0000) || + (bp->common.shmem_base >= 0xC0000)) { + BNX2X_DEV_INFO("MCP not active\n"); + bp->flags |= NO_MCP_FLAG; + return; + } + + val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); + if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) + != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) + BNX2X_ERR("BAD MCP validity signature\n"); + + if (!BP_NOMCP(bp)) { + bp->fw_seq = + (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK); + BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); + } +} + +/** + * bnx2x_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2x *bp = netdev_priv(dev); + + rtnl_lock(); + + netif_device_detach(dev); + + if (state == pci_channel_io_perm_failure) { + rtnl_unlock(); + return PCI_ERS_RESULT_DISCONNECT; + } + + if (netif_running(dev)) + bnx2x_eeh_nic_unload(bp); + + pci_disable_device(pdev); + + rtnl_unlock(); + + /* Request a slot reset */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * bnx2x_io_slot_reset - called after the PCI bus has been reset + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + */ +static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2x *bp = netdev_priv(dev); + + rtnl_lock(); + + if (pci_enable_device(pdev)) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset\n"); + rtnl_unlock(); + return PCI_ERS_RESULT_DISCONNECT; + } + + pci_set_master(pdev); + pci_restore_state(pdev); + + if (netif_running(dev)) + bnx2x_set_power_state(bp, PCI_D0); + + rtnl_unlock(); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * bnx2x_io_resume - called when traffic can start flowing again + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. + */ +static void bnx2x_io_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2x *bp = netdev_priv(dev); + + if (bp->recovery_state != BNX2X_RECOVERY_DONE) { + netdev_err(bp->dev, "Handling parity error recovery. " + "Try again later\n"); + return; + } + + rtnl_lock(); + + bnx2x_eeh_recover(bp); + + if (netif_running(dev)) + bnx2x_nic_load(bp, LOAD_NORMAL); + + netif_device_attach(dev); + + rtnl_unlock(); +} + +static struct pci_error_handlers bnx2x_err_handler = { + .error_detected = bnx2x_io_error_detected, + .slot_reset = bnx2x_io_slot_reset, + .resume = bnx2x_io_resume, +}; + +static struct pci_driver bnx2x_pci_driver = { + .name = DRV_MODULE_NAME, + .id_table = bnx2x_pci_tbl, + .probe = bnx2x_init_one, + .remove = __devexit_p(bnx2x_remove_one), + .suspend = bnx2x_suspend, + .resume = bnx2x_resume, + .err_handler = &bnx2x_err_handler, +}; + +static int __init bnx2x_init(void) +{ + int ret; + + pr_info("%s", version); + + bnx2x_wq = create_singlethread_workqueue("bnx2x"); + if (bnx2x_wq == NULL) { + pr_err("Cannot create workqueue\n"); + return -ENOMEM; + } + + ret = pci_register_driver(&bnx2x_pci_driver); + if (ret) { + pr_err("Cannot register driver\n"); + destroy_workqueue(bnx2x_wq); + } + return ret; +} + +static void __exit bnx2x_cleanup(void) +{ + pci_unregister_driver(&bnx2x_pci_driver); + + destroy_workqueue(bnx2x_wq); +} + +void bnx2x_notify_link_changed(struct bnx2x *bp) +{ + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1); +} + +module_init(bnx2x_init); +module_exit(bnx2x_cleanup); + +#ifdef BCM_CNIC +/** + * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s). + * + * @bp: driver handle + * @set: set or clear the CAM entry + * + * This function will wait until the ramdord completion returns. + * Return 0 if success, -ENODEV if ramrod doesn't return. + */ +static inline int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) +{ + unsigned long ramrod_flags = 0; + + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac, + &bp->iscsi_l2_mac_obj, true, + BNX2X_ISCSI_ETH_MAC, &ramrod_flags); +} + +/* count denotes the number of new completions we have seen */ +static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) +{ + struct eth_spe *spe; + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return; +#endif + + spin_lock_bh(&bp->spq_lock); + BUG_ON(bp->cnic_spq_pending < count); + bp->cnic_spq_pending -= count; + + + for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) { + u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type) + & SPE_HDR_CONN_TYPE) >> + SPE_HDR_CONN_TYPE_SHIFT; + u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data) + >> SPE_HDR_CMD_ID_SHIFT) & 0xff; + + /* Set validation for iSCSI L2 client before sending SETUP + * ramrod + */ + if (type == ETH_CONNECTION_TYPE) { + if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) + bnx2x_set_ctx_validation(bp, &bp->context. + vcxt[BNX2X_ISCSI_ETH_CID].eth, + BNX2X_ISCSI_ETH_CID); + } + + /* + * There may be not more than 8 L2, not more than 8 L5 SPEs + * and in the air. We also check that number of outstanding + * COMMON ramrods is not more than the EQ and SPQ can + * accommodate. + */ + if (type == ETH_CONNECTION_TYPE) { + if (!atomic_read(&bp->cq_spq_left)) + break; + else + atomic_dec(&bp->cq_spq_left); + } else if (type == NONE_CONNECTION_TYPE) { + if (!atomic_read(&bp->eq_spq_left)) + break; + else + atomic_dec(&bp->eq_spq_left); + } else if ((type == ISCSI_CONNECTION_TYPE) || + (type == FCOE_CONNECTION_TYPE)) { + if (bp->cnic_spq_pending >= + bp->cnic_eth_dev.max_kwqe_pending) + break; + else + bp->cnic_spq_pending++; + } else { + BNX2X_ERR("Unknown SPE type: %d\n", type); + bnx2x_panic(); + break; + } + + spe = bnx2x_sp_get_next(bp); + *spe = *bp->cnic_kwq_cons; + + DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n", + bp->cnic_spq_pending, bp->cnic_kwq_pending, count); + + if (bp->cnic_kwq_cons == bp->cnic_kwq_last) + bp->cnic_kwq_cons = bp->cnic_kwq; + else + bp->cnic_kwq_cons++; + } + bnx2x_sp_prod_update(bp); + spin_unlock_bh(&bp->spq_lock); +} + +static int bnx2x_cnic_sp_queue(struct net_device *dev, + struct kwqe_16 *kwqes[], u32 count) +{ + struct bnx2x *bp = netdev_priv(dev); + int i; + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return -EIO; +#endif + + spin_lock_bh(&bp->spq_lock); + + for (i = 0; i < count; i++) { + struct eth_spe *spe = (struct eth_spe *)kwqes[i]; + + if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT) + break; + + *bp->cnic_kwq_prod = *spe; + + bp->cnic_kwq_pending++; + + DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n", + spe->hdr.conn_and_cmd_data, spe->hdr.type, + spe->data.update_data_addr.hi, + spe->data.update_data_addr.lo, + bp->cnic_kwq_pending); + + if (bp->cnic_kwq_prod == bp->cnic_kwq_last) + bp->cnic_kwq_prod = bp->cnic_kwq; + else + bp->cnic_kwq_prod++; + } + + spin_unlock_bh(&bp->spq_lock); + + if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending) + bnx2x_cnic_sp_post(bp, 0); + + return i; +} + +static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl) +{ + struct cnic_ops *c_ops; + int rc = 0; + + mutex_lock(&bp->cnic_mutex); + c_ops = rcu_dereference_protected(bp->cnic_ops, + lockdep_is_held(&bp->cnic_mutex)); + if (c_ops) + rc = c_ops->cnic_ctl(bp->cnic_data, ctl); + mutex_unlock(&bp->cnic_mutex); + + return rc; +} + +static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl) +{ + struct cnic_ops *c_ops; + int rc = 0; + + rcu_read_lock(); + c_ops = rcu_dereference(bp->cnic_ops); + if (c_ops) + rc = c_ops->cnic_ctl(bp->cnic_data, ctl); + rcu_read_unlock(); + + return rc; +} + +/* + * for commands that have no data + */ +int bnx2x_cnic_notify(struct bnx2x *bp, int cmd) +{ + struct cnic_ctl_info ctl = {0}; + + ctl.cmd = cmd; + + return bnx2x_cnic_ctl_send(bp, &ctl); +} + +static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err) +{ + struct cnic_ctl_info ctl = {0}; + + /* first we tell CNIC and only then we count this as a completion */ + ctl.cmd = CNIC_CTL_COMPLETION_CMD; + ctl.data.comp.cid = cid; + ctl.data.comp.error = err; + + bnx2x_cnic_ctl_send_bh(bp, &ctl); + bnx2x_cnic_sp_post(bp, 0); +} + + +/* Called with netif_addr_lock_bh() taken. + * Sets an rx_mode config for an iSCSI ETH client. + * Doesn't block. + * Completion should be checked outside. + */ +static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start) +{ + unsigned long accept_flags = 0, ramrod_flags = 0; + u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); + int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED; + + if (start) { + /* Start accepting on iSCSI L2 ring. Accept all multicasts + * because it's the only way for UIO Queue to accept + * multicasts (in non-promiscuous mode only one Queue per + * function will receive multicast packets (leading in our + * case). + */ + __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags); + __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags); + __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); + + /* Clear STOP_PENDING bit if START is requested */ + clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state); + + sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED; + } else + /* Clear START_PENDING bit if STOP is requested */ + clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state); + + if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) + set_bit(sched_state, &bp->sp_state); + else { + __set_bit(RAMROD_RX, &ramrod_flags); + bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0, + ramrod_flags); + } +} + + +static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) +{ + struct bnx2x *bp = netdev_priv(dev); + int rc = 0; + + switch (ctl->cmd) { + case DRV_CTL_CTXTBL_WR_CMD: { + u32 index = ctl->data.io.offset; + dma_addr_t addr = ctl->data.io.dma_addr; + + bnx2x_ilt_wr(bp, index, addr); + break; + } + + case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: { + int count = ctl->data.credit.credit_count; + + bnx2x_cnic_sp_post(bp, count); + break; + } + + /* rtnl_lock is held. */ + case DRV_CTL_START_L2_CMD: { + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + unsigned long sp_bits = 0; + + /* Configure the iSCSI classification object */ + bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj, + cp->iscsi_l2_client_id, + cp->iscsi_l2_cid, BP_FUNC(bp), + bnx2x_sp(bp, mac_rdata), + bnx2x_sp_mapping(bp, mac_rdata), + BNX2X_FILTER_MAC_PENDING, + &bp->sp_state, BNX2X_OBJ_TYPE_RX, + &bp->macs_pool); + + /* Set iSCSI MAC address */ + rc = bnx2x_set_iscsi_eth_mac_addr(bp); + if (rc) + break; + + mmiowb(); + barrier(); + + /* Start accepting on iSCSI L2 ring */ + + netif_addr_lock_bh(dev); + bnx2x_set_iscsi_eth_rx_mode(bp, true); + netif_addr_unlock_bh(dev); + + /* bits to wait on */ + __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits); + __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits); + + if (!bnx2x_wait_sp_comp(bp, sp_bits)) + BNX2X_ERR("rx_mode completion timed out!\n"); + + break; + } + + /* rtnl_lock is held. */ + case DRV_CTL_STOP_L2_CMD: { + unsigned long sp_bits = 0; + + /* Stop accepting on iSCSI L2 ring */ + netif_addr_lock_bh(dev); + bnx2x_set_iscsi_eth_rx_mode(bp, false); + netif_addr_unlock_bh(dev); + + /* bits to wait on */ + __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits); + __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits); + + if (!bnx2x_wait_sp_comp(bp, sp_bits)) + BNX2X_ERR("rx_mode completion timed out!\n"); + + mmiowb(); + barrier(); + + /* Unset iSCSI L2 MAC */ + rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj, + BNX2X_ISCSI_ETH_MAC, true); + break; + } + case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: { + int count = ctl->data.credit.credit_count; + + smp_mb__before_atomic_inc(); + atomic_add(count, &bp->cq_spq_left); + smp_mb__after_atomic_inc(); + break; + } + + default: + BNX2X_ERR("unknown command %x\n", ctl->cmd); + rc = -EINVAL; + } + + return rc; +} + +void bnx2x_setup_cnic_irq_info(struct bnx2x *bp) +{ + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + + if (bp->flags & USING_MSIX_FLAG) { + cp->drv_state |= CNIC_DRV_STATE_USING_MSIX; + cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX; + cp->irq_arr[0].vector = bp->msix_table[1].vector; + } else { + cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; + cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; + } + if (!CHIP_IS_E1x(bp)) + cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb; + else + cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb; + + cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp); + cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp); + cp->irq_arr[1].status_blk = bp->def_status_blk; + cp->irq_arr[1].status_blk_num = DEF_SB_ID; + cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID; + + cp->num_irq = 2; +} + +static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, + void *data) +{ + struct bnx2x *bp = netdev_priv(dev); + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + + if (ops == NULL) + return -EINVAL; + + bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!bp->cnic_kwq) + return -ENOMEM; + + bp->cnic_kwq_cons = bp->cnic_kwq; + bp->cnic_kwq_prod = bp->cnic_kwq; + bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT; + + bp->cnic_spq_pending = 0; + bp->cnic_kwq_pending = 0; + + bp->cnic_data = data; + + cp->num_irq = 0; + cp->drv_state |= CNIC_DRV_STATE_REGD; + cp->iro_arr = bp->iro_arr; + + bnx2x_setup_cnic_irq_info(bp); + + rcu_assign_pointer(bp->cnic_ops, ops); + + return 0; +} + +static int bnx2x_unregister_cnic(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + + mutex_lock(&bp->cnic_mutex); + cp->drv_state = 0; + rcu_assign_pointer(bp->cnic_ops, NULL); + mutex_unlock(&bp->cnic_mutex); + synchronize_rcu(); + kfree(bp->cnic_kwq); + bp->cnic_kwq = NULL; + + return 0; +} + +struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + + /* If both iSCSI and FCoE are disabled - return NULL in + * order to indicate CNIC that it should not try to work + * with this device. + */ + if (NO_ISCSI(bp) && NO_FCOE(bp)) + return NULL; + + cp->drv_owner = THIS_MODULE; + cp->chip_id = CHIP_ID(bp); + cp->pdev = bp->pdev; + cp->io_base = bp->regview; + cp->io_base2 = bp->doorbells; + cp->max_kwqe_pending = 8; + cp->ctx_blk_size = CDU_ILT_PAGE_SZ; + cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + + bnx2x_cid_ilt_lines(bp); + cp->ctx_tbl_len = CNIC_ILT_LINES; + cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; + cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue; + cp->drv_ctl = bnx2x_drv_ctl; + cp->drv_register_cnic = bnx2x_register_cnic; + cp->drv_unregister_cnic = bnx2x_unregister_cnic; + cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID; + cp->iscsi_l2_client_id = + bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); + cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID; + + if (NO_ISCSI_OOO(bp)) + cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; + + if (NO_ISCSI(bp)) + cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI; + + if (NO_FCOE(bp)) + cp->drv_state |= CNIC_DRV_STATE_NO_FCOE; + + DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, " + "starting cid %d\n", + cp->ctx_blk_size, + cp->ctx_tbl_offset, + cp->ctx_tbl_len, + cp->starting_cid); + return cp; +} +EXPORT_SYMBOL(bnx2x_cnic_probe); + +#endif /* BCM_CNIC */ + diff --cc drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 27b5ecb11830,000000000000..40266c14e6dc mode 100644,000000..100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@@ -1,7146 -1,0 +1,7162 @@@ +/* bnx2x_reg.h: Broadcom Everest network driver. + * + * Copyright (c) 2007-2011 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * The registers description starts with the register Access type followed + * by size in bits. For example [RW 32]. The access types are: + * R - Read only + * RC - Clear on read + * RW - Read/Write + * ST - Statistics register (clear on read) + * W - Write only + * WB - Wide bus register - the size is over 32 bits and it should be + * read/write in consecutive 32 bits accesses + * WR - Write Clear (write 1 to clear the bit) + * + */ +#ifndef BNX2X_REG_H +#define BNX2X_REG_H + +#define ATC_ATC_INT_STS_REG_ADDRESS_ERROR (0x1<<0) +#define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS (0x1<<2) +#define ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU (0x1<<5) +#define ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT (0x1<<3) +#define ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR (0x1<<4) +#define ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND (0x1<<1) +/* [RW 1] Initiate the ATC array - reset all the valid bits */ +#define ATC_REG_ATC_INIT_ARRAY 0x1100b8 +/* [R 1] ATC initalization done */ +#define ATC_REG_ATC_INIT_DONE 0x1100bc +/* [RC 6] Interrupt register #0 read clear */ +#define ATC_REG_ATC_INT_STS_CLR 0x1101c0 +/* [RW 5] Parity mask register #0 read/write */ +#define ATC_REG_ATC_PRTY_MASK 0x1101d8 +/* [RC 5] Parity register #0 read clear */ +#define ATC_REG_ATC_PRTY_STS_CLR 0x1101d0 +/* [RW 19] Interrupt mask register #0 read/write */ +#define BRB1_REG_BRB1_INT_MASK 0x60128 +/* [R 19] Interrupt register #0 read */ +#define BRB1_REG_BRB1_INT_STS 0x6011c +/* [RW 4] Parity mask register #0 read/write */ +#define BRB1_REG_BRB1_PRTY_MASK 0x60138 +/* [R 4] Parity register #0 read */ +#define BRB1_REG_BRB1_PRTY_STS 0x6012c +/* [RC 4] Parity register #0 read clear */ +#define BRB1_REG_BRB1_PRTY_STS_CLR 0x60130 +/* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At + * address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address + * BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning - + * following reset the first rbc access to this reg must be write; there can + * be no more rbc writes after the first one; there can be any number of rbc + * read following the first write; rbc access not following these rules will + * result in hang condition. */ +#define BRB1_REG_FREE_LIST_PRS_CRDT 0x60200 +/* [RW 10] The number of free blocks below which the full signal to class 0 + * is asserted */ +#define BRB1_REG_FULL_0_XOFF_THRESHOLD_0 0x601d0 +#define BRB1_REG_FULL_0_XOFF_THRESHOLD_1 0x60230 +/* [RW 11] The number of free blocks above which the full signal to class 0 + * is de-asserted */ +#define BRB1_REG_FULL_0_XON_THRESHOLD_0 0x601d4 +#define BRB1_REG_FULL_0_XON_THRESHOLD_1 0x60234 +/* [RW 11] The number of free blocks below which the full signal to class 1 + * is asserted */ +#define BRB1_REG_FULL_1_XOFF_THRESHOLD_0 0x601d8 +#define BRB1_REG_FULL_1_XOFF_THRESHOLD_1 0x60238 +/* [RW 11] The number of free blocks above which the full signal to class 1 + * is de-asserted */ +#define BRB1_REG_FULL_1_XON_THRESHOLD_0 0x601dc +#define BRB1_REG_FULL_1_XON_THRESHOLD_1 0x6023c +/* [RW 11] The number of free blocks below which the full signal to the LB + * port is asserted */ +#define BRB1_REG_FULL_LB_XOFF_THRESHOLD 0x601e0 +/* [RW 10] The number of free blocks above which the full signal to the LB + * port is de-asserted */ +#define BRB1_REG_FULL_LB_XON_THRESHOLD 0x601e4 +/* [RW 10] The number of free blocks above which the High_llfc signal to + interface #n is de-asserted. */ +#define BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD_0 0x6014c +/* [RW 10] The number of free blocks below which the High_llfc signal to + interface #n is asserted. */ +#define BRB1_REG_HIGH_LLFC_LOW_THRESHOLD_0 0x6013c +/* [RW 11] The number of blocks guarantied for the LB port */ +#define BRB1_REG_LB_GUARANTIED 0x601ec +/* [RW 11] The hysteresis on the guarantied buffer space for the Lb port + * before signaling XON. */ +#define BRB1_REG_LB_GUARANTIED_HYST 0x60264 +/* [RW 24] LL RAM data. */ +#define BRB1_REG_LL_RAM 0x61000 +/* [RW 10] The number of free blocks above which the Low_llfc signal to + interface #n is de-asserted. */ +#define BRB1_REG_LOW_LLFC_HIGH_THRESHOLD_0 0x6016c +/* [RW 10] The number of free blocks below which the Low_llfc signal to + interface #n is asserted. */ +#define BRB1_REG_LOW_LLFC_LOW_THRESHOLD_0 0x6015c +/* [RW 11] The number of blocks guarantied for class 0 in MAC 0. The + * register is applicable only when per_class_guaranty_mode is set. */ +#define BRB1_REG_MAC_0_CLASS_0_GUARANTIED 0x60244 +/* [RW 11] The hysteresis on the guarantied buffer space for class 0 in MAC + * 1 before signaling XON. The register is applicable only when + * per_class_guaranty_mode is set. */ +#define BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST 0x60254 +/* [RW 11] The number of blocks guarantied for class 1 in MAC 0. The + * register is applicable only when per_class_guaranty_mode is set. */ +#define BRB1_REG_MAC_0_CLASS_1_GUARANTIED 0x60248 +/* [RW 11] The hysteresis on the guarantied buffer space for class 1in MAC 0 + * before signaling XON. The register is applicable only when + * per_class_guaranty_mode is set. */ +#define BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST 0x60258 +/* [RW 11] The number of blocks guarantied for class 0in MAC1.The register + * is applicable only when per_class_guaranty_mode is set. */ +#define BRB1_REG_MAC_1_CLASS_0_GUARANTIED 0x6024c +/* [RW 11] The hysteresis on the guarantied buffer space for class 0 in MAC + * 1 before signaling XON. The register is applicable only when + * per_class_guaranty_mode is set. */ +#define BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST 0x6025c +/* [RW 11] The number of blocks guarantied for class 1 in MAC 1. The + * register is applicable only when per_class_guaranty_mode is set. */ +#define BRB1_REG_MAC_1_CLASS_1_GUARANTIED 0x60250 +/* [RW 11] The hysteresis on the guarantied buffer space for class 1 in MAC + * 1 before signaling XON. The register is applicable only when + * per_class_guaranty_mode is set. */ +#define BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST 0x60260 +/* [RW 11] The number of blocks guarantied for the MAC port. The register is + * applicable only when per_class_guaranty_mode is reset. */ +#define BRB1_REG_MAC_GUARANTIED_0 0x601e8 +#define BRB1_REG_MAC_GUARANTIED_1 0x60240 +/* [R 24] The number of full blocks. */ +#define BRB1_REG_NUM_OF_FULL_BLOCKS 0x60090 +/* [ST 32] The number of cycles that the write_full signal towards MAC #0 + was asserted. */ +#define BRB1_REG_NUM_OF_FULL_CYCLES_0 0x600c8 +#define BRB1_REG_NUM_OF_FULL_CYCLES_1 0x600cc +#define BRB1_REG_NUM_OF_FULL_CYCLES_4 0x600d8 +/* [ST 32] The number of cycles that the pause signal towards MAC #0 was + asserted. */ +#define BRB1_REG_NUM_OF_PAUSE_CYCLES_0 0x600b8 +#define BRB1_REG_NUM_OF_PAUSE_CYCLES_1 0x600bc +/* [RW 10] The number of free blocks below which the pause signal to class 0 + * is asserted */ +#define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 0x601c0 +#define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 0x60220 +/* [RW 11] The number of free blocks above which the pause signal to class 0 + * is de-asserted */ +#define BRB1_REG_PAUSE_0_XON_THRESHOLD_0 0x601c4 +#define BRB1_REG_PAUSE_0_XON_THRESHOLD_1 0x60224 +/* [RW 11] The number of free blocks below which the pause signal to class 1 + * is asserted */ +#define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0 0x601c8 +#define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 0x60228 +/* [RW 11] The number of free blocks above which the pause signal to class 1 + * is de-asserted */ +#define BRB1_REG_PAUSE_1_XON_THRESHOLD_0 0x601cc +#define BRB1_REG_PAUSE_1_XON_THRESHOLD_1 0x6022c +/* [RW 10] Write client 0: De-assert pause threshold. Not Functional */ +#define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 0x60078 +#define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 0x6007c +/* [RW 10] Write client 0: Assert pause threshold. */ +#define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068 +#define BRB1_REG_PAUSE_LOW_THRESHOLD_1 0x6006c +/* [R 24] The number of full blocks occupied by port. */ +#define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 0x60094 +/* [RW 1] Reset the design by software. */ +#define BRB1_REG_SOFT_RESET 0x600dc +/* [R 5] Used to read the value of the XX protection CAM occupancy counter. */ +#define CCM_REG_CAM_OCCUP 0xd0188 +/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define CCM_REG_CCM_CFC_IFEN 0xd003c +/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define CCM_REG_CCM_CQM_IFEN 0xd000c +/* [RW 1] If set the Q index; received from the QM is inserted to event ID. + Otherwise 0 is inserted. */ +#define CCM_REG_CCM_CQM_USE_Q 0xd00c0 +/* [RW 11] Interrupt mask register #0 read/write */ +#define CCM_REG_CCM_INT_MASK 0xd01e4 +/* [R 11] Interrupt register #0 read */ +#define CCM_REG_CCM_INT_STS 0xd01d8 +/* [RW 27] Parity mask register #0 read/write */ +#define CCM_REG_CCM_PRTY_MASK 0xd01f4 +/* [R 27] Parity register #0 read */ +#define CCM_REG_CCM_PRTY_STS 0xd01e8 +/* [RC 27] Parity register #0 read clear */ +#define CCM_REG_CCM_PRTY_STS_CLR 0xd01ec +/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS + REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). + Is used to determine the number of the AG context REG-pairs written back; + when the input message Reg1WbFlg isn't set. */ +#define CCM_REG_CCM_REG0_SZ 0xd00c4 +/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define CCM_REG_CCM_STORM0_IFEN 0xd0004 +/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define CCM_REG_CCM_STORM1_IFEN 0xd0008 +/* [RW 1] CDU AG read Interface enable. If 0 - the request input is + disregarded; valid output is deasserted; all other signals are treated as + usual; if 1 - normal activity. */ +#define CCM_REG_CDU_AG_RD_IFEN 0xd0030 +/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input + are disregarded; all other signals are treated as usual; if 1 - normal + activity. */ +#define CCM_REG_CDU_AG_WR_IFEN 0xd002c +/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is + disregarded; valid output is deasserted; all other signals are treated as + usual; if 1 - normal activity. */ +#define CCM_REG_CDU_SM_RD_IFEN 0xd0038 +/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid + input is disregarded; all other signals are treated as usual; if 1 - + normal activity. */ +#define CCM_REG_CDU_SM_WR_IFEN 0xd0034 +/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes + the initial credit value; read returns the current value of the credit + counter. Must be initialized to 1 at start-up. */ +#define CCM_REG_CFC_INIT_CRD 0xd0204 +/* [RW 2] Auxiliary counter flag Q number 1. */ +#define CCM_REG_CNT_AUX1_Q 0xd00c8 +/* [RW 2] Auxiliary counter flag Q number 2. */ +#define CCM_REG_CNT_AUX2_Q 0xd00cc +/* [RW 28] The CM header value for QM request (primary). */ +#define CCM_REG_CQM_CCM_HDR_P 0xd008c +/* [RW 28] The CM header value for QM request (secondary). */ +#define CCM_REG_CQM_CCM_HDR_S 0xd0090 +/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define CCM_REG_CQM_CCM_IFEN 0xd0014 +/* [RW 6] QM output initial credit. Max credit available - 32. Write writes + the initial credit value; read returns the current value of the credit + counter. Must be initialized to 32 at start-up. */ +#define CCM_REG_CQM_INIT_CRD 0xd020c +/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0 + stands for weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define CCM_REG_CQM_P_WEIGHT 0xd00b8 +/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0 + stands for weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define CCM_REG_CQM_S_WEIGHT 0xd00bc +/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define CCM_REG_CSDM_IFEN 0xd0018 +/* [RC 1] Set when the message length mismatch (relative to last indication) + at the SDM interface is detected. */ +#define CCM_REG_CSDM_LENGTH_MIS 0xd0170 +/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define CCM_REG_CSDM_WEIGHT 0xd00b4 +/* [RW 28] The CM header for QM formatting in case of an error in the QM + inputs. */ +#define CCM_REG_ERR_CCM_HDR 0xd0094 +/* [RW 8] The Event ID in case the input message ErrorFlg is set. */ +#define CCM_REG_ERR_EVNT_ID 0xd0098 +/* [RW 8] FIC0 output initial credit. Max credit available - 255. Write + writes the initial credit value; read returns the current value of the + credit counter. Must be initialized to 64 at start-up. */ +#define CCM_REG_FIC0_INIT_CRD 0xd0210 +/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write + writes the initial credit value; read returns the current value of the + credit counter. Must be initialized to 64 at start-up. */ +#define CCM_REG_FIC1_INIT_CRD 0xd0214 +/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1 + - strict priority defined by ~ccm_registers_gr_ag_pr.gr_ag_pr; + ~ccm_registers_gr_ld0_pr.gr_ld0_pr and + ~ccm_registers_gr_ld1_pr.gr_ld1_pr. Groups are according to channels and + outputs to STORM: aggregation; load FIC0; load FIC1 and store. */ +#define CCM_REG_GR_ARB_TYPE 0xd015c +/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the + highest priority is 3. It is supposed; that the Store channel priority is + the compliment to 4 of the rest priorities - Aggregation channel; Load + (FIC0) channel and Load (FIC1). */ +#define CCM_REG_GR_LD0_PR 0xd0164 +/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the + highest priority is 3. It is supposed; that the Store channel priority is + the compliment to 4 of the rest priorities - Aggregation channel; Load + (FIC0) channel and Load (FIC1). */ +#define CCM_REG_GR_LD1_PR 0xd0168 +/* [RW 2] General flags index. */ +#define CCM_REG_INV_DONE_Q 0xd0108 +/* [RW 4] The number of double REG-pairs(128 bits); loaded from the STORM + context and sent to STORM; for a specific connection type. The double + REG-pairs are used in order to align to STORM context row size of 128 + bits. The offset of these data in the STORM context is always 0. Index + _(0..15) stands for the connection type (one of 16). */ +#define CCM_REG_N_SM_CTX_LD_0 0xd004c +#define CCM_REG_N_SM_CTX_LD_1 0xd0050 +#define CCM_REG_N_SM_CTX_LD_2 0xd0054 +#define CCM_REG_N_SM_CTX_LD_3 0xd0058 +#define CCM_REG_N_SM_CTX_LD_4 0xd005c +/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define CCM_REG_PBF_IFEN 0xd0028 +/* [RC 1] Set when the message length mismatch (relative to last indication) + at the pbf interface is detected. */ +#define CCM_REG_PBF_LENGTH_MIS 0xd0180 +/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define CCM_REG_PBF_WEIGHT 0xd00ac +#define CCM_REG_PHYS_QNUM1_0 0xd0134 +#define CCM_REG_PHYS_QNUM1_1 0xd0138 +#define CCM_REG_PHYS_QNUM2_0 0xd013c +#define CCM_REG_PHYS_QNUM2_1 0xd0140 +#define CCM_REG_PHYS_QNUM3_0 0xd0144 +#define CCM_REG_PHYS_QNUM3_1 0xd0148 +#define CCM_REG_QOS_PHYS_QNUM0_0 0xd0114 +#define CCM_REG_QOS_PHYS_QNUM0_1 0xd0118 +#define CCM_REG_QOS_PHYS_QNUM1_0 0xd011c +#define CCM_REG_QOS_PHYS_QNUM1_1 0xd0120 +#define CCM_REG_QOS_PHYS_QNUM2_0 0xd0124 +#define CCM_REG_QOS_PHYS_QNUM2_1 0xd0128 +#define CCM_REG_QOS_PHYS_QNUM3_0 0xd012c +#define CCM_REG_QOS_PHYS_QNUM3_1 0xd0130 +/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define CCM_REG_STORM_CCM_IFEN 0xd0010 +/* [RC 1] Set when the message length mismatch (relative to last indication) + at the STORM interface is detected. */ +#define CCM_REG_STORM_LENGTH_MIS 0xd016c +/* [RW 3] The weight of the STORM input in the WRR (Weighted Round robin) + mechanism. 0 stands for weight 8 (the most prioritised); 1 stands for + weight 1(least prioritised); 2 stands for weight 2 (more prioritised); + tc. */ +#define CCM_REG_STORM_WEIGHT 0xd009c +/* [RW 1] Input tsem Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define CCM_REG_TSEM_IFEN 0xd001c +/* [RC 1] Set when the message length mismatch (relative to last indication) + at the tsem interface is detected. */ +#define CCM_REG_TSEM_LENGTH_MIS 0xd0174 +/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define CCM_REG_TSEM_WEIGHT 0xd00a0 +/* [RW 1] Input usem Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define CCM_REG_USEM_IFEN 0xd0024 +/* [RC 1] Set when message length mismatch (relative to last indication) at + the usem interface is detected. */ +#define CCM_REG_USEM_LENGTH_MIS 0xd017c +/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define CCM_REG_USEM_WEIGHT 0xd00a8 +/* [RW 1] Input xsem Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define CCM_REG_XSEM_IFEN 0xd0020 +/* [RC 1] Set when the message length mismatch (relative to last indication) + at the xsem interface is detected. */ +#define CCM_REG_XSEM_LENGTH_MIS 0xd0178 +/* [RW 3] The weight of the input xsem in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define CCM_REG_XSEM_WEIGHT 0xd00a4 +/* [RW 19] Indirect access to the descriptor table of the XX protection + mechanism. The fields are: [5:0] - message length; [12:6] - message + pointer; 18:13] - next pointer. */ +#define CCM_REG_XX_DESCR_TABLE 0xd0300 +#define CCM_REG_XX_DESCR_TABLE_SIZE 24 +/* [R 7] Used to read the value of XX protection Free counter. */ +#define CCM_REG_XX_FREE 0xd0184 +/* [RW 6] Initial value for the credit counter; responsible for fulfilling + of the Input Stage XX protection buffer by the XX protection pending + messages. Max credit available - 127. Write writes the initial credit + value; read returns the current value of the credit counter. Must be + initialized to maximum XX protected message size - 2 at start-up. */ +#define CCM_REG_XX_INIT_CRD 0xd0220 +/* [RW 7] The maximum number of pending messages; which may be stored in XX + protection. At read the ~ccm_registers_xx_free.xx_free counter is read. + At write comprises the start value of the ~ccm_registers_xx_free.xx_free + counter. */ +#define CCM_REG_XX_MSG_NUM 0xd0224 +/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */ +#define CCM_REG_XX_OVFL_EVNT_ID 0xd0044 +/* [RW 18] Indirect access to the XX table of the XX protection mechanism. + The fields are: [5:0] - tail pointer; 11:6] - Link List size; 17:12] - + header pointer. */ +#define CCM_REG_XX_TABLE 0xd0280 +#define CDU_REG_CDU_CHK_MASK0 0x101000 +#define CDU_REG_CDU_CHK_MASK1 0x101004 +#define CDU_REG_CDU_CONTROL0 0x101008 +#define CDU_REG_CDU_DEBUG 0x101010 +#define CDU_REG_CDU_GLOBAL_PARAMS 0x101020 +/* [RW 7] Interrupt mask register #0 read/write */ +#define CDU_REG_CDU_INT_MASK 0x10103c +/* [R 7] Interrupt register #0 read */ +#define CDU_REG_CDU_INT_STS 0x101030 +/* [RW 5] Parity mask register #0 read/write */ +#define CDU_REG_CDU_PRTY_MASK 0x10104c +/* [R 5] Parity register #0 read */ +#define CDU_REG_CDU_PRTY_STS 0x101040 +/* [RC 5] Parity register #0 read clear */ +#define CDU_REG_CDU_PRTY_STS_CLR 0x101044 +/* [RC 32] logging of error data in case of a CDU load error: + {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error; + ype_error; ctual_active; ctual_compressed_context}; */ +#define CDU_REG_ERROR_DATA 0x101014 +/* [WB 216] L1TT ram access. each entry has the following format : + {mrege_regions[7:0]; ffset12[5:0]...offset0[5:0]; + ength12[5:0]...length0[5:0]; d12[3:0]...id0[3:0]} */ +#define CDU_REG_L1TT 0x101800 +/* [WB 24] MATT ram access. each entry has the following + format:{RegionLength[11:0]; egionOffset[11:0]} */ +#define CDU_REG_MATT 0x101100 +/* [RW 1] when this bit is set the CDU operates in e1hmf mode */ +#define CDU_REG_MF_MODE 0x101050 +/* [R 1] indication the initializing the activity counter by the hardware + was done. */ +#define CFC_REG_AC_INIT_DONE 0x104078 +/* [RW 13] activity counter ram access */ +#define CFC_REG_ACTIVITY_COUNTER 0x104400 +#define CFC_REG_ACTIVITY_COUNTER_SIZE 256 +/* [R 1] indication the initializing the cams by the hardware was done. */ +#define CFC_REG_CAM_INIT_DONE 0x10407c +/* [RW 2] Interrupt mask register #0 read/write */ +#define CFC_REG_CFC_INT_MASK 0x104108 +/* [R 2] Interrupt register #0 read */ +#define CFC_REG_CFC_INT_STS 0x1040fc +/* [RC 2] Interrupt register #0 read clear */ +#define CFC_REG_CFC_INT_STS_CLR 0x104100 +/* [RW 4] Parity mask register #0 read/write */ +#define CFC_REG_CFC_PRTY_MASK 0x104118 +/* [R 4] Parity register #0 read */ +#define CFC_REG_CFC_PRTY_STS 0x10410c +/* [RC 4] Parity register #0 read clear */ +#define CFC_REG_CFC_PRTY_STS_CLR 0x104110 +/* [RW 21] CID cam access (21:1 - Data; alid - 0) */ +#define CFC_REG_CID_CAM 0x104800 +#define CFC_REG_CONTROL0 0x104028 +#define CFC_REG_DEBUG0 0x104050 +/* [RW 14] indicates per error (in #cfc_registers_cfc_error_vector.cfc_error + vector) whether the cfc should be disabled upon it */ +#define CFC_REG_DISABLE_ON_ERROR 0x104044 +/* [RC 14] CFC error vector. when the CFC detects an internal error it will + set one of these bits. the bit description can be found in CFC + specifications */ +#define CFC_REG_ERROR_VECTOR 0x10403c +/* [WB 93] LCID info ram access */ +#define CFC_REG_INFO_RAM 0x105000 +#define CFC_REG_INFO_RAM_SIZE 1024 +#define CFC_REG_INIT_REG 0x10404c +#define CFC_REG_INTERFACES 0x104058 +/* [RW 24] {weight_load_client7[2:0] to weight_load_client0[2:0]}. this + field allows changing the priorities of the weighted-round-robin arbiter + which selects which CFC load client should be served next */ +#define CFC_REG_LCREQ_WEIGHTS 0x104084 +/* [RW 16] Link List ram access; data = {prev_lcid; ext_lcid} */ +#define CFC_REG_LINK_LIST 0x104c00 +#define CFC_REG_LINK_LIST_SIZE 256 +/* [R 1] indication the initializing the link list by the hardware was done. */ +#define CFC_REG_LL_INIT_DONE 0x104074 +/* [R 9] Number of allocated LCIDs which are at empty state */ +#define CFC_REG_NUM_LCIDS_ALLOC 0x104020 +/* [R 9] Number of Arriving LCIDs in Link List Block */ +#define CFC_REG_NUM_LCIDS_ARRIVING 0x104004 +#define CFC_REG_NUM_LCIDS_INSIDE_PF 0x104120 +/* [R 9] Number of Leaving LCIDs in Link List Block */ +#define CFC_REG_NUM_LCIDS_LEAVING 0x104018 +#define CFC_REG_WEAK_ENABLE_PF 0x104124 +/* [RW 8] The event id for aggregated interrupt 0 */ +#define CSDM_REG_AGG_INT_EVENT_0 0xc2038 +#define CSDM_REG_AGG_INT_EVENT_10 0xc2060 +#define CSDM_REG_AGG_INT_EVENT_11 0xc2064 +#define CSDM_REG_AGG_INT_EVENT_12 0xc2068 +#define CSDM_REG_AGG_INT_EVENT_13 0xc206c +#define CSDM_REG_AGG_INT_EVENT_14 0xc2070 +#define CSDM_REG_AGG_INT_EVENT_15 0xc2074 +#define CSDM_REG_AGG_INT_EVENT_16 0xc2078 +#define CSDM_REG_AGG_INT_EVENT_2 0xc2040 +#define CSDM_REG_AGG_INT_EVENT_3 0xc2044 +#define CSDM_REG_AGG_INT_EVENT_4 0xc2048 +#define CSDM_REG_AGG_INT_EVENT_5 0xc204c +#define CSDM_REG_AGG_INT_EVENT_6 0xc2050 +#define CSDM_REG_AGG_INT_EVENT_7 0xc2054 +#define CSDM_REG_AGG_INT_EVENT_8 0xc2058 +#define CSDM_REG_AGG_INT_EVENT_9 0xc205c +/* [RW 1] For each aggregated interrupt index whether the mode is normal (0) + or auto-mask-mode (1) */ +#define CSDM_REG_AGG_INT_MODE_10 0xc21e0 +#define CSDM_REG_AGG_INT_MODE_11 0xc21e4 +#define CSDM_REG_AGG_INT_MODE_12 0xc21e8 +#define CSDM_REG_AGG_INT_MODE_13 0xc21ec +#define CSDM_REG_AGG_INT_MODE_14 0xc21f0 +#define CSDM_REG_AGG_INT_MODE_15 0xc21f4 +#define CSDM_REG_AGG_INT_MODE_16 0xc21f8 +#define CSDM_REG_AGG_INT_MODE_6 0xc21d0 +#define CSDM_REG_AGG_INT_MODE_7 0xc21d4 +#define CSDM_REG_AGG_INT_MODE_8 0xc21d8 +#define CSDM_REG_AGG_INT_MODE_9 0xc21dc +/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */ +#define CSDM_REG_CFC_RSP_START_ADDR 0xc2008 +/* [RW 16] The maximum value of the completion counter #0 */ +#define CSDM_REG_CMP_COUNTER_MAX0 0xc201c +/* [RW 16] The maximum value of the completion counter #1 */ +#define CSDM_REG_CMP_COUNTER_MAX1 0xc2020 +/* [RW 16] The maximum value of the completion counter #2 */ +#define CSDM_REG_CMP_COUNTER_MAX2 0xc2024 +/* [RW 16] The maximum value of the completion counter #3 */ +#define CSDM_REG_CMP_COUNTER_MAX3 0xc2028 +/* [RW 13] The start address in the internal RAM for the completion + counters. */ +#define CSDM_REG_CMP_COUNTER_START_ADDR 0xc200c +/* [RW 32] Interrupt mask register #0 read/write */ +#define CSDM_REG_CSDM_INT_MASK_0 0xc229c +#define CSDM_REG_CSDM_INT_MASK_1 0xc22ac +/* [R 32] Interrupt register #0 read */ +#define CSDM_REG_CSDM_INT_STS_0 0xc2290 +#define CSDM_REG_CSDM_INT_STS_1 0xc22a0 +/* [RW 11] Parity mask register #0 read/write */ +#define CSDM_REG_CSDM_PRTY_MASK 0xc22bc +/* [R 11] Parity register #0 read */ +#define CSDM_REG_CSDM_PRTY_STS 0xc22b0 +/* [RC 11] Parity register #0 read clear */ +#define CSDM_REG_CSDM_PRTY_STS_CLR 0xc22b4 +#define CSDM_REG_ENABLE_IN1 0xc2238 +#define CSDM_REG_ENABLE_IN2 0xc223c +#define CSDM_REG_ENABLE_OUT1 0xc2240 +#define CSDM_REG_ENABLE_OUT2 0xc2244 +/* [RW 4] The initial number of messages that can be sent to the pxp control + interface without receiving any ACK. */ +#define CSDM_REG_INIT_CREDIT_PXP_CTRL 0xc24bc +/* [ST 32] The number of ACK after placement messages received */ +#define CSDM_REG_NUM_OF_ACK_AFTER_PLACE 0xc227c +/* [ST 32] The number of packet end messages received from the parser */ +#define CSDM_REG_NUM_OF_PKT_END_MSG 0xc2274 +/* [ST 32] The number of requests received from the pxp async if */ +#define CSDM_REG_NUM_OF_PXP_ASYNC_REQ 0xc2278 +/* [ST 32] The number of commands received in queue 0 */ +#define CSDM_REG_NUM_OF_Q0_CMD 0xc2248 +/* [ST 32] The number of commands received in queue 10 */ +#define CSDM_REG_NUM_OF_Q10_CMD 0xc226c +/* [ST 32] The number of commands received in queue 11 */ +#define CSDM_REG_NUM_OF_Q11_CMD 0xc2270 +/* [ST 32] The number of commands received in queue 1 */ +#define CSDM_REG_NUM_OF_Q1_CMD 0xc224c +/* [ST 32] The number of commands received in queue 3 */ +#define CSDM_REG_NUM_OF_Q3_CMD 0xc2250 +/* [ST 32] The number of commands received in queue 4 */ +#define CSDM_REG_NUM_OF_Q4_CMD 0xc2254 +/* [ST 32] The number of commands received in queue 5 */ +#define CSDM_REG_NUM_OF_Q5_CMD 0xc2258 +/* [ST 32] The number of commands received in queue 6 */ +#define CSDM_REG_NUM_OF_Q6_CMD 0xc225c +/* [ST 32] The number of commands received in queue 7 */ +#define CSDM_REG_NUM_OF_Q7_CMD 0xc2260 +/* [ST 32] The number of commands received in queue 8 */ +#define CSDM_REG_NUM_OF_Q8_CMD 0xc2264 +/* [ST 32] The number of commands received in queue 9 */ +#define CSDM_REG_NUM_OF_Q9_CMD 0xc2268 +/* [RW 13] The start address in the internal RAM for queue counters */ +#define CSDM_REG_Q_COUNTER_START_ADDR 0xc2010 +/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */ +#define CSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0xc2548 +/* [R 1] parser fifo empty in sdm_sync block */ +#define CSDM_REG_SYNC_PARSER_EMPTY 0xc2550 +/* [R 1] parser serial fifo empty in sdm_sync block */ +#define CSDM_REG_SYNC_SYNC_EMPTY 0xc2558 +/* [RW 32] Tick for timer counter. Applicable only when + ~csdm_registers_timer_tick_enable.timer_tick_enable =1 */ +#define CSDM_REG_TIMER_TICK 0xc2000 +/* [RW 5] The number of time_slots in the arbitration cycle */ +#define CSEM_REG_ARB_CYCLE_SIZE 0x200034 +/* [RW 3] The source that is associated with arbitration element 0. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2 */ +#define CSEM_REG_ARB_ELEMENT0 0x200020 +/* [RW 3] The source that is associated with arbitration element 1. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~csem_registers_arb_element0.arb_element0 */ +#define CSEM_REG_ARB_ELEMENT1 0x200024 +/* [RW 3] The source that is associated with arbitration element 2. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~csem_registers_arb_element0.arb_element0 + and ~csem_registers_arb_element1.arb_element1 */ +#define CSEM_REG_ARB_ELEMENT2 0x200028 +/* [RW 3] The source that is associated with arbitration element 3. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2.Could + not be equal to register ~csem_registers_arb_element0.arb_element0 and + ~csem_registers_arb_element1.arb_element1 and + ~csem_registers_arb_element2.arb_element2 */ +#define CSEM_REG_ARB_ELEMENT3 0x20002c +/* [RW 3] The source that is associated with arbitration element 4. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~csem_registers_arb_element0.arb_element0 + and ~csem_registers_arb_element1.arb_element1 and + ~csem_registers_arb_element2.arb_element2 and + ~csem_registers_arb_element3.arb_element3 */ +#define CSEM_REG_ARB_ELEMENT4 0x200030 +/* [RW 32] Interrupt mask register #0 read/write */ +#define CSEM_REG_CSEM_INT_MASK_0 0x200110 +#define CSEM_REG_CSEM_INT_MASK_1 0x200120 +/* [R 32] Interrupt register #0 read */ +#define CSEM_REG_CSEM_INT_STS_0 0x200104 +#define CSEM_REG_CSEM_INT_STS_1 0x200114 +/* [RW 32] Parity mask register #0 read/write */ +#define CSEM_REG_CSEM_PRTY_MASK_0 0x200130 +#define CSEM_REG_CSEM_PRTY_MASK_1 0x200140 +/* [R 32] Parity register #0 read */ +#define CSEM_REG_CSEM_PRTY_STS_0 0x200124 +#define CSEM_REG_CSEM_PRTY_STS_1 0x200134 +/* [RC 32] Parity register #0 read clear */ +#define CSEM_REG_CSEM_PRTY_STS_CLR_0 0x200128 +#define CSEM_REG_CSEM_PRTY_STS_CLR_1 0x200138 +#define CSEM_REG_ENABLE_IN 0x2000a4 +#define CSEM_REG_ENABLE_OUT 0x2000a8 +/* [RW 32] This address space contains all registers and memories that are + placed in SEM_FAST block. The SEM_FAST registers are described in + appendix B. In order to access the sem_fast registers the base address + ~fast_memory.fast_memory should be added to eachsem_fast register offset. */ +#define CSEM_REG_FAST_MEMORY 0x220000 +/* [RW 1] Disables input messages from FIC0 May be updated during run_time + by the microcode */ +#define CSEM_REG_FIC0_DISABLE 0x200224 +/* [RW 1] Disables input messages from FIC1 May be updated during run_time + by the microcode */ +#define CSEM_REG_FIC1_DISABLE 0x200234 +/* [RW 15] Interrupt table Read and write access to it is not possible in + the middle of the work */ +#define CSEM_REG_INT_TABLE 0x200400 +/* [ST 24] Statistics register. The number of messages that entered through + FIC0 */ +#define CSEM_REG_MSG_NUM_FIC0 0x200000 +/* [ST 24] Statistics register. The number of messages that entered through + FIC1 */ +#define CSEM_REG_MSG_NUM_FIC1 0x200004 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC0 */ +#define CSEM_REG_MSG_NUM_FOC0 0x200008 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC1 */ +#define CSEM_REG_MSG_NUM_FOC1 0x20000c +/* [ST 24] Statistics register. The number of messages that were sent to + FOC2 */ +#define CSEM_REG_MSG_NUM_FOC2 0x200010 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC3 */ +#define CSEM_REG_MSG_NUM_FOC3 0x200014 +/* [RW 1] Disables input messages from the passive buffer May be updated + during run_time by the microcode */ +#define CSEM_REG_PAS_DISABLE 0x20024c +/* [WB 128] Debug only. Passive buffer memory */ +#define CSEM_REG_PASSIVE_BUFFER 0x202000 +/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */ +#define CSEM_REG_PRAM 0x240000 +/* [R 16] Valid sleeping threads indication have bit per thread */ +#define CSEM_REG_SLEEP_THREADS_VALID 0x20026c +/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */ +#define CSEM_REG_SLOW_EXT_STORE_EMPTY 0x2002a0 +/* [RW 16] List of free threads . There is a bit per thread. */ +#define CSEM_REG_THREADS_LIST 0x2002e4 +/* [RW 3] The arbitration scheme of time_slot 0 */ +#define CSEM_REG_TS_0_AS 0x200038 +/* [RW 3] The arbitration scheme of time_slot 10 */ +#define CSEM_REG_TS_10_AS 0x200060 +/* [RW 3] The arbitration scheme of time_slot 11 */ +#define CSEM_REG_TS_11_AS 0x200064 +/* [RW 3] The arbitration scheme of time_slot 12 */ +#define CSEM_REG_TS_12_AS 0x200068 +/* [RW 3] The arbitration scheme of time_slot 13 */ +#define CSEM_REG_TS_13_AS 0x20006c +/* [RW 3] The arbitration scheme of time_slot 14 */ +#define CSEM_REG_TS_14_AS 0x200070 +/* [RW 3] The arbitration scheme of time_slot 15 */ +#define CSEM_REG_TS_15_AS 0x200074 +/* [RW 3] The arbitration scheme of time_slot 16 */ +#define CSEM_REG_TS_16_AS 0x200078 +/* [RW 3] The arbitration scheme of time_slot 17 */ +#define CSEM_REG_TS_17_AS 0x20007c +/* [RW 3] The arbitration scheme of time_slot 18 */ +#define CSEM_REG_TS_18_AS 0x200080 +/* [RW 3] The arbitration scheme of time_slot 1 */ +#define CSEM_REG_TS_1_AS 0x20003c +/* [RW 3] The arbitration scheme of time_slot 2 */ +#define CSEM_REG_TS_2_AS 0x200040 +/* [RW 3] The arbitration scheme of time_slot 3 */ +#define CSEM_REG_TS_3_AS 0x200044 +/* [RW 3] The arbitration scheme of time_slot 4 */ +#define CSEM_REG_TS_4_AS 0x200048 +/* [RW 3] The arbitration scheme of time_slot 5 */ +#define CSEM_REG_TS_5_AS 0x20004c +/* [RW 3] The arbitration scheme of time_slot 6 */ +#define CSEM_REG_TS_6_AS 0x200050 +/* [RW 3] The arbitration scheme of time_slot 7 */ +#define CSEM_REG_TS_7_AS 0x200054 +/* [RW 3] The arbitration scheme of time_slot 8 */ +#define CSEM_REG_TS_8_AS 0x200058 +/* [RW 3] The arbitration scheme of time_slot 9 */ +#define CSEM_REG_TS_9_AS 0x20005c +/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64 + * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */ +#define CSEM_REG_VFPF_ERR_NUM 0x200380 +/* [RW 1] Parity mask register #0 read/write */ +#define DBG_REG_DBG_PRTY_MASK 0xc0a8 +/* [R 1] Parity register #0 read */ +#define DBG_REG_DBG_PRTY_STS 0xc09c +/* [RC 1] Parity register #0 read clear */ +#define DBG_REG_DBG_PRTY_STS_CLR 0xc0a0 +/* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The + * function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0; + * 4.Completion function=0; 5.Error handling=0 */ +#define DMAE_REG_BACKWARD_COMP_EN 0x10207c +/* [RW 32] Commands memory. The address to command X; row Y is to calculated + as 14*X+Y. */ +#define DMAE_REG_CMD_MEM 0x102400 +#define DMAE_REG_CMD_MEM_SIZE 224 +/* [RW 1] If 0 - the CRC-16c initial value is all zeroes; if 1 - the CRC-16c + initial value is all ones. */ +#define DMAE_REG_CRC16C_INIT 0x10201c +/* [RW 1] If 0 - the CRC-16 T10 initial value is all zeroes; if 1 - the + CRC-16 T10 initial value is all ones. */ +#define DMAE_REG_CRC16T10_INIT 0x102020 +/* [RW 2] Interrupt mask register #0 read/write */ +#define DMAE_REG_DMAE_INT_MASK 0x102054 +/* [RW 4] Parity mask register #0 read/write */ +#define DMAE_REG_DMAE_PRTY_MASK 0x102064 +/* [R 4] Parity register #0 read */ +#define DMAE_REG_DMAE_PRTY_STS 0x102058 +/* [RC 4] Parity register #0 read clear */ +#define DMAE_REG_DMAE_PRTY_STS_CLR 0x10205c +/* [RW 1] Command 0 go. */ +#define DMAE_REG_GO_C0 0x102080 +/* [RW 1] Command 1 go. */ +#define DMAE_REG_GO_C1 0x102084 +/* [RW 1] Command 10 go. */ +#define DMAE_REG_GO_C10 0x102088 +/* [RW 1] Command 11 go. */ +#define DMAE_REG_GO_C11 0x10208c +/* [RW 1] Command 12 go. */ +#define DMAE_REG_GO_C12 0x102090 +/* [RW 1] Command 13 go. */ +#define DMAE_REG_GO_C13 0x102094 +/* [RW 1] Command 14 go. */ +#define DMAE_REG_GO_C14 0x102098 +/* [RW 1] Command 15 go. */ +#define DMAE_REG_GO_C15 0x10209c +/* [RW 1] Command 2 go. */ +#define DMAE_REG_GO_C2 0x1020a0 +/* [RW 1] Command 3 go. */ +#define DMAE_REG_GO_C3 0x1020a4 +/* [RW 1] Command 4 go. */ +#define DMAE_REG_GO_C4 0x1020a8 +/* [RW 1] Command 5 go. */ +#define DMAE_REG_GO_C5 0x1020ac +/* [RW 1] Command 6 go. */ +#define DMAE_REG_GO_C6 0x1020b0 +/* [RW 1] Command 7 go. */ +#define DMAE_REG_GO_C7 0x1020b4 +/* [RW 1] Command 8 go. */ +#define DMAE_REG_GO_C8 0x1020b8 +/* [RW 1] Command 9 go. */ +#define DMAE_REG_GO_C9 0x1020bc +/* [RW 1] DMAE GRC Interface (Target; aster) enable. If 0 - the acknowledge + input is disregarded; valid is deasserted; all other signals are treated + as usual; if 1 - normal activity. */ +#define DMAE_REG_GRC_IFEN 0x102008 +/* [RW 1] DMAE PCI Interface (Request; ead; rite) enable. If 0 - the + acknowledge input is disregarded; valid is deasserted; full is asserted; + all other signals are treated as usual; if 1 - normal activity. */ +#define DMAE_REG_PCI_IFEN 0x102004 +/* [RW 4] DMAE- PCI Request Interface initial credit. Write writes the + initial value to the credit counter; related to the address. Read returns + the current value of the counter. */ +#define DMAE_REG_PXP_REQ_INIT_CRD 0x1020c0 +/* [RW 8] Aggregation command. */ +#define DORQ_REG_AGG_CMD0 0x170060 +/* [RW 8] Aggregation command. */ +#define DORQ_REG_AGG_CMD1 0x170064 +/* [RW 8] Aggregation command. */ +#define DORQ_REG_AGG_CMD2 0x170068 +/* [RW 8] Aggregation command. */ +#define DORQ_REG_AGG_CMD3 0x17006c +/* [RW 28] UCM Header. */ +#define DORQ_REG_CMHEAD_RX 0x170050 +/* [RW 32] Doorbell address for RBC doorbells (function 0). */ +#define DORQ_REG_DB_ADDR0 0x17008c +/* [RW 5] Interrupt mask register #0 read/write */ +#define DORQ_REG_DORQ_INT_MASK 0x170180 +/* [R 5] Interrupt register #0 read */ +#define DORQ_REG_DORQ_INT_STS 0x170174 +/* [RC 5] Interrupt register #0 read clear */ +#define DORQ_REG_DORQ_INT_STS_CLR 0x170178 +/* [RW 2] Parity mask register #0 read/write */ +#define DORQ_REG_DORQ_PRTY_MASK 0x170190 +/* [R 2] Parity register #0 read */ +#define DORQ_REG_DORQ_PRTY_STS 0x170184 +/* [RC 2] Parity register #0 read clear */ +#define DORQ_REG_DORQ_PRTY_STS_CLR 0x170188 +/* [RW 8] The address to write the DPM CID to STORM. */ +#define DORQ_REG_DPM_CID_ADDR 0x170044 +/* [RW 5] The DPM mode CID extraction offset. */ +#define DORQ_REG_DPM_CID_OFST 0x170030 +/* [RW 12] The threshold of the DQ FIFO to send the almost full interrupt. */ +#define DORQ_REG_DQ_FIFO_AFULL_TH 0x17007c +/* [RW 12] The threshold of the DQ FIFO to send the full interrupt. */ +#define DORQ_REG_DQ_FIFO_FULL_TH 0x170078 +/* [R 13] Current value of the DQ FIFO fill level according to following + pointer. The range is 0 - 256 FIFO rows; where each row stands for the + doorbell. */ +#define DORQ_REG_DQ_FILL_LVLF 0x1700a4 +/* [R 1] DQ FIFO full status. Is set; when FIFO filling level is more or + equal to full threshold; reset on full clear. */ +#define DORQ_REG_DQ_FULL_ST 0x1700c0 +/* [RW 28] The value sent to CM header in the case of CFC load error. */ +#define DORQ_REG_ERR_CMHEAD 0x170058 +#define DORQ_REG_IF_EN 0x170004 +#define DORQ_REG_MODE_ACT 0x170008 +/* [RW 5] The normal mode CID extraction offset. */ +#define DORQ_REG_NORM_CID_OFST 0x17002c +/* [RW 28] TCM Header when only TCP context is loaded. */ +#define DORQ_REG_NORM_CMHEAD_TX 0x17004c +/* [RW 3] The number of simultaneous outstanding requests to Context Fetch + Interface. */ +#define DORQ_REG_OUTST_REQ 0x17003c +#define DORQ_REG_PF_USAGE_CNT 0x1701d0 +#define DORQ_REG_REGN 0x170038 +/* [R 4] Current value of response A counter credit. Initial credit is + configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd + register. */ +#define DORQ_REG_RSPA_CRD_CNT 0x1700ac +/* [R 4] Current value of response B counter credit. Initial credit is + configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd + register. */ +#define DORQ_REG_RSPB_CRD_CNT 0x1700b0 +/* [RW 4] The initial credit at the Doorbell Response Interface. The write + writes the same initial credit to the rspa_crd_cnt and rspb_crd_cnt. The + read reads this written value. */ +#define DORQ_REG_RSP_INIT_CRD 0x170048 +/* [RW 4] Initial activity counter value on the load request; when the + shortcut is done. */ +#define DORQ_REG_SHRT_ACT_CNT 0x170070 +/* [RW 28] TCM Header when both ULP and TCP context is loaded. */ +#define DORQ_REG_SHRT_CMHEAD 0x170054 +#define HC_CONFIG_0_REG_ATTN_BIT_EN_0 (0x1<<4) +#define HC_CONFIG_0_REG_BLOCK_DISABLE_0 (0x1<<0) +#define HC_CONFIG_0_REG_INT_LINE_EN_0 (0x1<<3) +#define HC_CONFIG_0_REG_MSI_ATTN_EN_0 (0x1<<7) +#define HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 (0x1<<2) +#define HC_CONFIG_0_REG_SINGLE_ISR_EN_0 (0x1<<1) +#define HC_CONFIG_1_REG_BLOCK_DISABLE_1 (0x1<<0) +#define HC_REG_AGG_INT_0 0x108050 +#define HC_REG_AGG_INT_1 0x108054 +#define HC_REG_ATTN_BIT 0x108120 +#define HC_REG_ATTN_IDX 0x108100 +#define HC_REG_ATTN_MSG0_ADDR_L 0x108018 +#define HC_REG_ATTN_MSG1_ADDR_L 0x108020 +#define HC_REG_ATTN_NUM_P0 0x108038 +#define HC_REG_ATTN_NUM_P1 0x10803c +#define HC_REG_COMMAND_REG 0x108180 +#define HC_REG_CONFIG_0 0x108000 +#define HC_REG_CONFIG_1 0x108004 +#define HC_REG_FUNC_NUM_P0 0x1080ac +#define HC_REG_FUNC_NUM_P1 0x1080b0 +/* [RW 3] Parity mask register #0 read/write */ +#define HC_REG_HC_PRTY_MASK 0x1080a0 +/* [R 3] Parity register #0 read */ +#define HC_REG_HC_PRTY_STS 0x108094 +/* [RC 3] Parity register #0 read clear */ +#define HC_REG_HC_PRTY_STS_CLR 0x108098 +#define HC_REG_INT_MASK 0x108108 +#define HC_REG_LEADING_EDGE_0 0x108040 +#define HC_REG_LEADING_EDGE_1 0x108048 +#define HC_REG_MAIN_MEMORY 0x108800 +#define HC_REG_MAIN_MEMORY_SIZE 152 +#define HC_REG_P0_PROD_CONS 0x108200 +#define HC_REG_P1_PROD_CONS 0x108400 +#define HC_REG_PBA_COMMAND 0x108140 +#define HC_REG_PCI_CONFIG_0 0x108010 +#define HC_REG_PCI_CONFIG_1 0x108014 +#define HC_REG_STATISTIC_COUNTERS 0x109000 +#define HC_REG_TRAILING_EDGE_0 0x108044 +#define HC_REG_TRAILING_EDGE_1 0x10804c +#define HC_REG_UC_RAM_ADDR_0 0x108028 +#define HC_REG_UC_RAM_ADDR_1 0x108030 +#define HC_REG_USTORM_ADDR_FOR_COALESCE 0x108068 +#define HC_REG_VQID_0 0x108008 +#define HC_REG_VQID_1 0x10800c +#define IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN (0x1<<1) +#define IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE (0x1<<0) +#define IGU_REG_ATTENTION_ACK_BITS 0x130108 +/* [R 4] Debug: attn_fsm */ +#define IGU_REG_ATTN_FSM 0x130054 +#define IGU_REG_ATTN_MSG_ADDR_H 0x13011c +#define IGU_REG_ATTN_MSG_ADDR_L 0x130120 +/* [R 4] Debug: [3] - attention write done message is pending (0-no pending; + * 1-pending). [2:0] = PFID. Pending means attention message was sent; but + * write done didn't receive. */ +#define IGU_REG_ATTN_WRITE_DONE_PENDING 0x130030 +#define IGU_REG_BLOCK_CONFIGURATION 0x130000 +#define IGU_REG_COMMAND_REG_32LSB_DATA 0x130124 +#define IGU_REG_COMMAND_REG_CTRL 0x13012c +/* [WB_R 32] Cleanup bit status per SB. 1 = cleanup is set. 0 = cleanup bit + * is clear. The bits in this registers are set and clear via the producer + * command. Data valid only in addresses 0-4. all the rest are zero. */ +#define IGU_REG_CSTORM_TYPE_0_SB_CLEANUP 0x130200 +/* [R 5] Debug: ctrl_fsm */ +#define IGU_REG_CTRL_FSM 0x130064 +/* [R 1] data available for error memory. If this bit is clear do not red + * from error_handling_memory. */ +#define IGU_REG_ERROR_HANDLING_DATA_VALID 0x130130 +/* [RW 11] Parity mask register #0 read/write */ +#define IGU_REG_IGU_PRTY_MASK 0x1300a8 +/* [R 11] Parity register #0 read */ +#define IGU_REG_IGU_PRTY_STS 0x13009c +/* [RC 11] Parity register #0 read clear */ +#define IGU_REG_IGU_PRTY_STS_CLR 0x1300a0 +/* [R 4] Debug: int_handle_fsm */ +#define IGU_REG_INT_HANDLE_FSM 0x130050 +#define IGU_REG_LEADING_EDGE_LATCH 0x130134 +/* [RW 14] mapping CAM; relevant for E2 operating mode only. [0] - valid. + * [6:1] - vector number; [13:7] - FID (if VF - [13] = 0; [12:7] = VF + * number; if PF - [13] = 1; [12:10] = 0; [9:7] = PF number); */ +#define IGU_REG_MAPPING_MEMORY 0x131000 +#define IGU_REG_MAPPING_MEMORY_SIZE 136 +#define IGU_REG_PBA_STATUS_LSB 0x130138 +#define IGU_REG_PBA_STATUS_MSB 0x13013c +#define IGU_REG_PCI_PF_MSI_EN 0x130140 +#define IGU_REG_PCI_PF_MSIX_EN 0x130144 +#define IGU_REG_PCI_PF_MSIX_FUNC_MASK 0x130148 +/* [WB_R 32] Each bit represent the pending bits status for that SB. 0 = no + * pending; 1 = pending. Pendings means interrupt was asserted; and write + * done was not received. Data valid only in addresses 0-4. all the rest are + * zero. */ +#define IGU_REG_PENDING_BITS_STATUS 0x130300 +#define IGU_REG_PF_CONFIGURATION 0x130154 +/* [RW 20] producers only. E2 mode: address 0-135 match to the mapping + * memory; 136 - PF0 default prod; 137 PF1 default prod; 138 - PF2 default + * prod; 139 PF3 default prod; 140 - PF0 - ATTN prod; 141 - PF1 - ATTN prod; + * 142 - PF2 - ATTN prod; 143 - PF3 - ATTN prod; 144-147 reserved. E1.5 mode + * - In backward compatible mode; for non default SB; each even line in the + * memory holds the U producer and each odd line hold the C producer. The + * first 128 producer are for NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The + * last 20 producers are for the DSB for each PF. each PF has five segments + * (the order inside each segment is PF0; PF1; PF2; PF3) - 128-131 U prods; + * 132-135 C prods; 136-139 X prods; 140-143 T prods; 144-147 ATTN prods; */ +#define IGU_REG_PROD_CONS_MEMORY 0x132000 +/* [R 3] Debug: pxp_arb_fsm */ +#define IGU_REG_PXP_ARB_FSM 0x130068 +/* [RW 6] Write one for each bit will reset the appropriate memory. When the + * memory reset finished the appropriate bit will be clear. Bit 0 - mapping + * memory; Bit 1 - SB memory; Bit 2 - SB interrupt and mask register; Bit 3 + * - MSIX memory; Bit 4 - PBA memory; Bit 5 - statistics; */ +#define IGU_REG_RESET_MEMORIES 0x130158 +/* [R 4] Debug: sb_ctrl_fsm */ +#define IGU_REG_SB_CTRL_FSM 0x13004c +#define IGU_REG_SB_INT_BEFORE_MASK_LSB 0x13015c +#define IGU_REG_SB_INT_BEFORE_MASK_MSB 0x130160 +#define IGU_REG_SB_MASK_LSB 0x130164 +#define IGU_REG_SB_MASK_MSB 0x130168 +/* [RW 16] Number of command that were dropped without causing an interrupt + * due to: read access for WO BAR address; or write access for RO BAR + * address or any access for reserved address or PCI function error is set + * and address is not MSIX; PBA or cleanup */ +#define IGU_REG_SILENT_DROP 0x13016c +/* [RW 10] Number of MSI/MSIX/ATTN messages sent for the function: 0-63 - + * number of MSIX messages per VF; 64-67 - number of MSI/MSIX messages per + * PF; 68-71 number of ATTN messages per PF */ +#define IGU_REG_STATISTIC_NUM_MESSAGE_SENT 0x130800 +/* [RW 32] Number of cycles the timer mask masking the IGU interrupt when a + * timer mask command arrives. Value must be bigger than 100. */ +#define IGU_REG_TIMER_MASKING_VALUE 0x13003c +#define IGU_REG_TRAILING_EDGE_LATCH 0x130104 +#define IGU_REG_VF_CONFIGURATION 0x130170 +/* [WB_R 32] Each bit represent write done pending bits status for that SB + * (MSI/MSIX message was sent and write done was not received yet). 0 = + * clear; 1 = set. Data valid only in addresses 0-4. all the rest are zero. */ +#define IGU_REG_WRITE_DONE_PENDING 0x130480 +#define MCP_A_REG_MCPR_SCRATCH 0x3a0000 +#define MCP_REG_MCPR_CPU_PROGRAM_COUNTER 0x8501c +#define MCP_REG_MCPR_GP_INPUTS 0x800c0 +#define MCP_REG_MCPR_GP_OENABLE 0x800c8 +#define MCP_REG_MCPR_GP_OUTPUTS 0x800c4 +#define MCP_REG_MCPR_IMC_COMMAND 0x85900 +#define MCP_REG_MCPR_IMC_DATAREG0 0x85920 +#define MCP_REG_MCPR_IMC_SLAVE_CONTROL 0x85904 +#define MCP_REG_MCPR_CPU_PROGRAM_COUNTER 0x8501c +#define MCP_REG_MCPR_NVM_ACCESS_ENABLE 0x86424 +#define MCP_REG_MCPR_NVM_ADDR 0x8640c +#define MCP_REG_MCPR_NVM_CFG4 0x8642c +#define MCP_REG_MCPR_NVM_COMMAND 0x86400 +#define MCP_REG_MCPR_NVM_READ 0x86410 +#define MCP_REG_MCPR_NVM_SW_ARB 0x86420 +#define MCP_REG_MCPR_NVM_WRITE 0x86408 +#define MCP_REG_MCPR_SCRATCH 0xa0000 +#define MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK (0x1<<1) +#define MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK (0x1<<0) +/* [R 32] read first 32 bit after inversion of function 0. mapped as + follows: [0] NIG attention for function0; [1] NIG attention for + function1; [2] GPIO1 mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; + [6] GPIO1 function 1; [7] GPIO2 function 1; [8] GPIO3 function 1; [9] + GPIO4 function 1; [10] PCIE glue/PXP VPD event function0; [11] PCIE + glue/PXP VPD event function1; [12] PCIE glue/PXP Expansion ROM event0; + [13] PCIE glue/PXP Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] + MSI/X indication for mcp; [17] MSI/X indication for function 1; [18] BRB + Parity error; [19] BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw + interrupt; [22] SRC Parity error; [23] SRC Hw interrupt; [24] TSDM Parity + error; [25] TSDM Hw interrupt; [26] TCM Parity error; [27] TCM Hw + interrupt; [28] TSEMI Parity error; [29] TSEMI Hw interrupt; [30] PBF + Parity error; [31] PBF Hw interrupt; */ +#define MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 0xa42c +#define MISC_REG_AEU_AFTER_INVERT_1_FUNC_1 0xa430 +/* [R 32] read first 32 bit after inversion of mcp. mapped as follows: [0] + NIG attention for function0; [1] NIG attention for function1; [2] GPIO1 + mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1; + [7] GPIO2 function 1; [8] GPIO3 function 1; [9] GPIO4 function 1; [10] + PCIE glue/PXP VPD event function0; [11] PCIE glue/PXP VPD event + function1; [12] PCIE glue/PXP Expansion ROM event0; [13] PCIE glue/PXP + Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] MSI/X indication for + mcp; [17] MSI/X indication for function 1; [18] BRB Parity error; [19] + BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC + Parity error; [23] SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw + interrupt; [26] TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI + Parity error; [29] TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw + interrupt; */ +#define MISC_REG_AEU_AFTER_INVERT_1_MCP 0xa434 +/* [R 32] read second 32 bit after inversion of function 0. mapped as + follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM + Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw + interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity + error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw + interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] + NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; + [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw + interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM + Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI + Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM + Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw + interrupt; */ +#define MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 0xa438 +#define MISC_REG_AEU_AFTER_INVERT_2_FUNC_1 0xa43c +/* [R 32] read second 32 bit after inversion of mcp. mapped as follows: [0] + PBClient Parity error; [1] PBClient Hw interrupt; [2] QM Parity error; + [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw interrupt; + [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity error; [9] + XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw interrupt; [12] + DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] NIG Parity + error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; [17] Vaux + PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw interrupt; + [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM Parity error; + [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI Hw interrupt; + [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM Parity error; + [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw interrupt; */ +#define MISC_REG_AEU_AFTER_INVERT_2_MCP 0xa440 +/* [R 32] read third 32 bit after inversion of function 0. mapped as + follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP Parity + error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error; [5] + PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw + interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity + error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) + Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16] + pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20] + MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23] + SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW + timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 + func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General + attn1; */ +#define MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 0xa444 +#define MISC_REG_AEU_AFTER_INVERT_3_FUNC_1 0xa448 +/* [R 32] read third 32 bit after inversion of mcp. mapped as follows: [0] + CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP Parity error; [3] PXP + Hw interrupt; [4] PXPpciClockClient Parity error; [5] PXPpciClockClient + Hw interrupt; [6] CFC Parity error; [7] CFC Hw interrupt; [8] CDU Parity + error; [9] CDU Hw interrupt; [10] DMAE Parity error; [11] DMAE Hw + interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) Hw interrupt; [14] + MISC Parity error; [15] MISC Hw interrupt; [16] pxp_misc_mps_attn; [17] + Flash event; [18] SMB event; [19] MCP attn0; [20] MCP attn1; [21] SW + timers attn_1 func0; [22] SW timers attn_2 func0; [23] SW timers attn_3 + func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW timers attn_1 + func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 func1; [29] SW + timers attn_4 func1; [30] General attn0; [31] General attn1; */ +#define MISC_REG_AEU_AFTER_INVERT_3_MCP 0xa44c +/* [R 32] read fourth 32 bit after inversion of function 0. mapped as + follows: [0] General attn2; [1] General attn3; [2] General attn4; [3] + General attn5; [4] General attn6; [5] General attn7; [6] General attn8; + [7] General attn9; [8] General attn10; [9] General attn11; [10] General + attn12; [11] General attn13; [12] General attn14; [13] General attn15; + [14] General attn16; [15] General attn17; [16] General attn18; [17] + General attn19; [18] General attn20; [19] General attn21; [20] Main power + interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN + Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC + Latched timeout attention; [27] GRC Latched reserved access attention; + [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP + Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ +#define MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 0xa450 +#define MISC_REG_AEU_AFTER_INVERT_4_FUNC_1 0xa454 +/* [R 32] read fourth 32 bit after inversion of mcp. mapped as follows: [0] + General attn2; [1] General attn3; [2] General attn4; [3] General attn5; + [4] General attn6; [5] General attn7; [6] General attn8; [7] General + attn9; [8] General attn10; [9] General attn11; [10] General attn12; [11] + General attn13; [12] General attn14; [13] General attn15; [14] General + attn16; [15] General attn17; [16] General attn18; [17] General attn19; + [18] General attn20; [19] General attn21; [20] Main power interrupt; [21] + RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN Latched attn; [24] + RBCU Latched attn; [25] RBCP Latched attn; [26] GRC Latched timeout + attention; [27] GRC Latched reserved access attention; [28] MCP Latched + rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP Latched + ump_tx_parity; [31] MCP Latched scpad_parity; */ +#define MISC_REG_AEU_AFTER_INVERT_4_MCP 0xa458 +/* [R 32] Read fifth 32 bit after inversion of function 0. Mapped as + * follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC + * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6] + * CNIG attention (reserved); [7] CNIG parity (reserved); [31-8] Reserved; */ +#define MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 0xa700 +/* [W 14] write to this register results with the clear of the latched + signals; one in d0 clears RBCR latch; one in d1 clears RBCT latch; one in + d2 clears RBCN latch; one in d3 clears RBCU latch; one in d4 clears RBCP + latch; one in d5 clears GRC Latched timeout attention; one in d6 clears + GRC Latched reserved access attention; one in d7 clears Latched + rom_parity; one in d8 clears Latched ump_rx_parity; one in d9 clears + Latched ump_tx_parity; one in d10 clears Latched scpad_parity (both + ports); one in d11 clears pxpv_misc_mps_attn; one in d12 clears + pxp_misc_exp_rom_attn0; one in d13 clears pxp_misc_exp_rom_attn1; read + from this register return zero */ +#define MISC_REG_AEU_CLR_LATCH_SIGNAL 0xa45c +/* [RW 32] first 32b for enabling the output for function 0 output0. mapped + as follows: [0] NIG attention for function0; [1] NIG attention for + function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function + 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8] + GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event + function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP + Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14] + SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X + indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt; + [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23] + SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26] + TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29] + TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */ +#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0 0xa06c +#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1 0xa07c +#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2 0xa08c +#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_3 0xa09c +#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_5 0xa0bc +#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_6 0xa0cc +#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_7 0xa0dc +/* [RW 32] first 32b for enabling the output for function 1 output0. mapped + as follows: [0] NIG attention for function0; [1] NIG attention for + function1; [2] GPIO1 function 1; [3] GPIO2 function 1; [4] GPIO3 function + 1; [5] GPIO4 function 1; [6] GPIO1 function 1; [7] GPIO2 function 1; [8] + GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event + function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP + Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14] + SPIO4; [15] SPIO5; [16] MSI/X indication for function 1; [17] MSI/X + indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt; + [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23] + SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26] + TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29] + TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */ +#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 0xa10c +#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 0xa11c +#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 0xa12c +#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_3 0xa13c +#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_5 0xa15c +#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_6 0xa16c +#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_7 0xa17c +/* [RW 32] first 32b for enabling the output for close the gate nig. mapped + as follows: [0] NIG attention for function0; [1] NIG attention for + function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function + 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8] + GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event + function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP + Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14] + SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X + indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt; + [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23] + SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26] + TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29] + TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */ +#define MISC_REG_AEU_ENABLE1_NIG_0 0xa0ec +#define MISC_REG_AEU_ENABLE1_NIG_1 0xa18c +/* [RW 32] first 32b for enabling the output for close the gate pxp. mapped + as follows: [0] NIG attention for function0; [1] NIG attention for + function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function + 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8] + GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event + function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP + Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14] + SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X + indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt; + [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23] + SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26] + TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29] + TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */ +#define MISC_REG_AEU_ENABLE1_PXP_0 0xa0fc +#define MISC_REG_AEU_ENABLE1_PXP_1 0xa19c +/* [RW 32] second 32b for enabling the output for function 0 output0. mapped + as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM + Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw + interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity + error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw + interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] + NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; + [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw + interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM + Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI + Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM + Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw + interrupt; */ +#define MISC_REG_AEU_ENABLE2_FUNC_0_OUT_0 0xa070 +#define MISC_REG_AEU_ENABLE2_FUNC_0_OUT_1 0xa080 +/* [RW 32] second 32b for enabling the output for function 1 output0. mapped + as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM + Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw + interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity + error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw + interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] + NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; + [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw + interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM + Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI + Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM + Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw + interrupt; */ +#define MISC_REG_AEU_ENABLE2_FUNC_1_OUT_0 0xa110 +#define MISC_REG_AEU_ENABLE2_FUNC_1_OUT_1 0xa120 +/* [RW 32] second 32b for enabling the output for close the gate nig. mapped + as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM + Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw + interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity + error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw + interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] + NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; + [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw + interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM + Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI + Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM + Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw + interrupt; */ +#define MISC_REG_AEU_ENABLE2_NIG_0 0xa0f0 +#define MISC_REG_AEU_ENABLE2_NIG_1 0xa190 +/* [RW 32] second 32b for enabling the output for close the gate pxp. mapped + as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM + Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw + interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity + error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw + interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] + NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; + [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw + interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM + Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI + Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM + Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw + interrupt; */ +#define MISC_REG_AEU_ENABLE2_PXP_0 0xa100 +#define MISC_REG_AEU_ENABLE2_PXP_1 0xa1a0 +/* [RW 32] third 32b for enabling the output for function 0 output0. mapped + as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP + Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error; + [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw + interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity + error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) + Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16] + pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20] + MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23] + SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW + timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 + func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General + attn1; */ +#define MISC_REG_AEU_ENABLE3_FUNC_0_OUT_0 0xa074 +#define MISC_REG_AEU_ENABLE3_FUNC_0_OUT_1 0xa084 +/* [RW 32] third 32b for enabling the output for function 1 output0. mapped + as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP + Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error; + [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw + interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity + error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) + Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16] + pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20] + MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23] + SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW + timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 + func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General + attn1; */ +#define MISC_REG_AEU_ENABLE3_FUNC_1_OUT_0 0xa114 +#define MISC_REG_AEU_ENABLE3_FUNC_1_OUT_1 0xa124 +/* [RW 32] third 32b for enabling the output for close the gate nig. mapped + as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP + Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error; + [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw + interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity + error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) + Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16] + pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20] + MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23] + SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW + timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 + func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General + attn1; */ +#define MISC_REG_AEU_ENABLE3_NIG_0 0xa0f4 +#define MISC_REG_AEU_ENABLE3_NIG_1 0xa194 +/* [RW 32] third 32b for enabling the output for close the gate pxp. mapped + as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP + Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error; + [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw + interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity + error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) + Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16] + pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20] + MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23] + SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW + timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 + func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General + attn1; */ +#define MISC_REG_AEU_ENABLE3_PXP_0 0xa104 +#define MISC_REG_AEU_ENABLE3_PXP_1 0xa1a4 +/* [RW 32] fourth 32b for enabling the output for function 0 output0.mapped + as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3] + General attn5; [4] General attn6; [5] General attn7; [6] General attn8; + [7] General attn9; [8] General attn10; [9] General attn11; [10] General + attn12; [11] General attn13; [12] General attn14; [13] General attn15; + [14] General attn16; [15] General attn17; [16] General attn18; [17] + General attn19; [18] General attn20; [19] General attn21; [20] Main power + interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN + Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC + Latched timeout attention; [27] GRC Latched reserved access attention; + [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP + Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ +#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 0xa078 +#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_2 0xa098 +#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_4 0xa0b8 +#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_5 0xa0c8 +#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_6 0xa0d8 +#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_7 0xa0e8 +/* [RW 32] fourth 32b for enabling the output for function 1 output0.mapped + as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3] + General attn5; [4] General attn6; [5] General attn7; [6] General attn8; + [7] General attn9; [8] General attn10; [9] General attn11; [10] General + attn12; [11] General attn13; [12] General attn14; [13] General attn15; + [14] General attn16; [15] General attn17; [16] General attn18; [17] + General attn19; [18] General attn20; [19] General attn21; [20] Main power + interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN + Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC + Latched timeout attention; [27] GRC Latched reserved access attention; + [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP + Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ +#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0 0xa118 +#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_2 0xa138 +#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_4 0xa158 +#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_5 0xa168 +#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_6 0xa178 +#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_7 0xa188 +/* [RW 32] fourth 32b for enabling the output for close the gate nig.mapped + as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3] + General attn5; [4] General attn6; [5] General attn7; [6] General attn8; + [7] General attn9; [8] General attn10; [9] General attn11; [10] General + attn12; [11] General attn13; [12] General attn14; [13] General attn15; + [14] General attn16; [15] General attn17; [16] General attn18; [17] + General attn19; [18] General attn20; [19] General attn21; [20] Main power + interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN + Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC + Latched timeout attention; [27] GRC Latched reserved access attention; + [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP + Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ +#define MISC_REG_AEU_ENABLE4_NIG_0 0xa0f8 +#define MISC_REG_AEU_ENABLE4_NIG_1 0xa198 +/* [RW 32] fourth 32b for enabling the output for close the gate pxp.mapped + as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3] + General attn5; [4] General attn6; [5] General attn7; [6] General attn8; + [7] General attn9; [8] General attn10; [9] General attn11; [10] General + attn12; [11] General attn13; [12] General attn14; [13] General attn15; + [14] General attn16; [15] General attn17; [16] General attn18; [17] + General attn19; [18] General attn20; [19] General attn21; [20] Main power + interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN + Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC + Latched timeout attention; [27] GRC Latched reserved access attention; + [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP + Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ +#define MISC_REG_AEU_ENABLE4_PXP_0 0xa108 +#define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8 +/* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu + 128 bit vector */ +#define MISC_REG_AEU_GENERAL_ATTN_0 0xa000 +#define MISC_REG_AEU_GENERAL_ATTN_1 0xa004 +#define MISC_REG_AEU_GENERAL_ATTN_10 0xa028 +#define MISC_REG_AEU_GENERAL_ATTN_11 0xa02c +#define MISC_REG_AEU_GENERAL_ATTN_12 0xa030 +#define MISC_REG_AEU_GENERAL_ATTN_2 0xa008 +#define MISC_REG_AEU_GENERAL_ATTN_3 0xa00c +#define MISC_REG_AEU_GENERAL_ATTN_4 0xa010 +#define MISC_REG_AEU_GENERAL_ATTN_5 0xa014 +#define MISC_REG_AEU_GENERAL_ATTN_6 0xa018 +#define MISC_REG_AEU_GENERAL_ATTN_7 0xa01c +#define MISC_REG_AEU_GENERAL_ATTN_8 0xa020 +#define MISC_REG_AEU_GENERAL_ATTN_9 0xa024 +#define MISC_REG_AEU_GENERAL_MASK 0xa61c +/* [RW 32] first 32b for inverting the input for function 0; for each bit: + 0= do not invert; 1= invert; mapped as follows: [0] NIG attention for + function0; [1] NIG attention for function1; [2] GPIO1 mcp; [3] GPIO2 mcp; + [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1; [7] GPIO2 function 1; + [8] GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event + function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP + Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14] + SPIO4; [15] SPIO5; [16] MSI/X indication for mcp; [17] MSI/X indication + for function 1; [18] BRB Parity error; [19] BRB Hw interrupt; [20] PRS + Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23] SRC Hw + interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26] TCM + Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29] TSEMI + Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */ +#define MISC_REG_AEU_INVERTER_1_FUNC_0 0xa22c +#define MISC_REG_AEU_INVERTER_1_FUNC_1 0xa23c +/* [RW 32] second 32b for inverting the input for function 0; for each bit: + 0= do not invert; 1= invert. mapped as follows: [0] PBClient Parity + error; [1] PBClient Hw interrupt; [2] QM Parity error; [3] QM Hw + interrupt; [4] Timers Parity error; [5] Timers Hw interrupt; [6] XSDM + Parity error; [7] XSDM Hw interrupt; [8] XCM Parity error; [9] XCM Hw + interrupt; [10] XSEMI Parity error; [11] XSEMI Hw interrupt; [12] + DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] NIG Parity + error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; [17] Vaux + PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw interrupt; + [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM Parity error; + [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI Hw interrupt; + [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM Parity error; + [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw interrupt; */ +#define MISC_REG_AEU_INVERTER_2_FUNC_0 0xa230 +#define MISC_REG_AEU_INVERTER_2_FUNC_1 0xa240 +/* [RW 10] [7:0] = mask 8 attention output signals toward IGU function0; + [9:8] = raserved. Zero = mask; one = unmask */ +#define MISC_REG_AEU_MASK_ATTN_FUNC_0 0xa060 +#define MISC_REG_AEU_MASK_ATTN_FUNC_1 0xa064 +/* [RW 1] If set a system kill occurred */ +#define MISC_REG_AEU_SYS_KILL_OCCURRED 0xa610 +/* [RW 32] Represent the status of the input vector to the AEU when a system + kill occurred. The register is reset in por reset. Mapped as follows: [0] + NIG attention for function0; [1] NIG attention for function1; [2] GPIO1 + mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1; + [7] GPIO2 function 1; [8] GPIO3 function 1; [9] GPIO4 function 1; [10] + PCIE glue/PXP VPD event function0; [11] PCIE glue/PXP VPD event + function1; [12] PCIE glue/PXP Expansion ROM event0; [13] PCIE glue/PXP + Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] MSI/X indication for + mcp; [17] MSI/X indication for function 1; [18] BRB Parity error; [19] + BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC + Parity error; [23] SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw + interrupt; [26] TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI + Parity error; [29] TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw + interrupt; */ +#define MISC_REG_AEU_SYS_KILL_STATUS_0 0xa600 +#define MISC_REG_AEU_SYS_KILL_STATUS_1 0xa604 +#define MISC_REG_AEU_SYS_KILL_STATUS_2 0xa608 +#define MISC_REG_AEU_SYS_KILL_STATUS_3 0xa60c +/* [R 4] This field indicates the type of the device. '0' - 2 Ports; '1' - 1 + Port. */ +#define MISC_REG_BOND_ID 0xa400 +/* [R 8] These bits indicate the metal revision of the chip. This value + starts at 0x00 for each all-layer tape-out and increments by one for each + tape-out. */ +#define MISC_REG_CHIP_METAL 0xa404 +/* [R 16] These bits indicate the part number for the chip. */ +#define MISC_REG_CHIP_NUM 0xa408 +/* [R 4] These bits indicate the base revision of the chip. This value + starts at 0x0 for the A0 tape-out and increments by one for each + all-layer tape-out. */ +#define MISC_REG_CHIP_REV 0xa40c +/* [RW 32] The following driver registers(1...16) represent 16 drivers and + 32 clients. Each client can be controlled by one driver only. One in each + bit represent that this driver control the appropriate client (Ex: bit 5 + is set means this driver control client number 5). addr1 = set; addr0 = + clear; read from both addresses will give the same result = status. write + to address 1 will set a request to control all the clients that their + appropriate bit (in the write command) is set. if the client is free (the + appropriate bit in all the other drivers is clear) one will be written to + that driver register; if the client isn't free the bit will remain zero. + if the appropriate bit is set (the driver request to gain control on a + client it already controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW + interrupt will be asserted). write to address 0 will set a request to + free all the clients that their appropriate bit (in the write command) is + set. if the appropriate bit is clear (the driver request to free a client + it doesn't controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW interrupt will + be asserted). */ +#define MISC_REG_DRIVER_CONTROL_1 0xa510 +#define MISC_REG_DRIVER_CONTROL_7 0xa3c8 +/* [RW 1] e1hmf for WOL. If clr WOL signal o the PXP will be send on bit 0 + only. */ +#define MISC_REG_E1HMF_MODE 0xa5f8 +/* [R 1] Status of four port mode path swap input pin. */ +#define MISC_REG_FOUR_PORT_PATH_SWAP 0xa75c +/* [RW 2] 4 port path swap overwrite.[0] - Overwrite control; if it is 0 - + the path_swap output is equal to 4 port mode path swap input pin; if it + is 1 - the path_swap output is equal to bit[1] of this register; [1] - + Overwrite value. If bit[0] of this register is 1 this is the value that + receives the path_swap output. Reset on Hard reset. */ +#define MISC_REG_FOUR_PORT_PATH_SWAP_OVWR 0xa738 +/* [R 1] Status of 4 port mode port swap input pin. */ +#define MISC_REG_FOUR_PORT_PORT_SWAP 0xa754 +/* [RW 2] 4 port port swap overwrite.[0] - Overwrite control; if it is 0 - + the port_swap output is equal to 4 port mode port swap input pin; if it + is 1 - the port_swap output is equal to bit[1] of this register; [1] - + Overwrite value. If bit[0] of this register is 1 this is the value that + receives the port_swap output. Reset on Hard reset. */ +#define MISC_REG_FOUR_PORT_PORT_SWAP_OVWR 0xa734 +/* [RW 32] Debug only: spare RW register reset by core reset */ +#define MISC_REG_GENERIC_CR_0 0xa460 +#define MISC_REG_GENERIC_CR_1 0xa464 +/* [RW 32] Debug only: spare RW register reset by por reset */ +#define MISC_REG_GENERIC_POR_1 0xa474 +/* [RW 32] Bit[0]: EPIO MODE SEL: Setting this bit to 1 will allow SW/FW to + use all of the 32 Extended GPIO pins. Without setting this bit; an EPIO + can not be configured as an output. Each output has its output enable in + the MCP register space; but this bit needs to be set to make use of that. + Bit[3:1] spare. Bit[4]: WCVTMON_PWRDN: Powerdown for Warpcore VTMON. When + set to 1 - Powerdown. Bit[5]: WCVTMON_RESETB: Reset for Warpcore VTMON. + When set to 0 - vTMON is in reset. Bit[6]: setting this bit will change + the i/o to an output and will drive the TimeSync output. Bit[31:7]: + spare. Global register. Reset by hard reset. */ +#define MISC_REG_GEN_PURP_HWG 0xa9a0 +/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of + these bits is written as a '1'; the corresponding SPIO bit will turn off + it's drivers and become an input. This is the reset state of all GPIO + pins. The read value of these bits will be a '1' if that last command + (#SET; #CLR; or #FLOAT) for this bit was a #FLOAT. (reset value 0xff). + [23-20] CLR port 1; 19-16] CLR port 0; When any of these bits is written + as a '1'; the corresponding GPIO bit will drive low. The read value of + these bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for + this bit was a #CLR. (reset value 0). [15-12] SET port 1; 11-8] port 0; + SET When any of these bits is written as a '1'; the corresponding GPIO + bit will drive high (if it has that capability). The read value of these + bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for this + bit was a #SET. (reset value 0). [7-4] VALUE port 1; [3-0] VALUE port 0; + RO; These bits indicate the read value of each of the eight GPIO pins. + This is the result value of the pin; not the drive value. Writing these + bits will have not effect. */ +#define MISC_REG_GPIO 0xa490 +/* [RW 8] These bits enable the GPIO_INTs to signals event to the + IGU/MCP.according to the following map: [0] p0_gpio_0; [1] p0_gpio_1; [2] + p0_gpio_2; [3] p0_gpio_3; [4] p1_gpio_0; [5] p1_gpio_1; [6] p1_gpio_2; + [7] p1_gpio_3; */ +#define MISC_REG_GPIO_EVENT_EN 0xa2bc +/* [RW 32] GPIO INT. [31-28] OLD_CLR port1; [27-24] OLD_CLR port0; Writing a + '1' to these bit clears the corresponding bit in the #OLD_VALUE register. + This will acknowledge an interrupt on the falling edge of corresponding + GPIO input (reset value 0). [23-16] OLD_SET [23-16] port1; OLD_SET port0; + Writing a '1' to these bit sets the corresponding bit in the #OLD_VALUE + register. This will acknowledge an interrupt on the rising edge of + corresponding SPIO input (reset value 0). [15-12] OLD_VALUE [11-8] port1; + OLD_VALUE port0; RO; These bits indicate the old value of the GPIO input + value. When the ~INT_STATE bit is set; this bit indicates the OLD value + of the pin such that if ~INT_STATE is set and this bit is '0'; then the + interrupt is due to a low to high edge. If ~INT_STATE is set and this bit + is '1'; then the interrupt is due to a high to low edge (reset value 0). + [7-4] INT_STATE port1; [3-0] INT_STATE RO port0; These bits indicate the + current GPIO interrupt state for each GPIO pin. This bit is cleared when + the appropriate #OLD_SET or #OLD_CLR command bit is written. This bit is + set when the GPIO input does not match the current value in #OLD_VALUE + (reset value 0). */ +#define MISC_REG_GPIO_INT 0xa494 +/* [R 28] this field hold the last information that caused reserved + attention. bits [19:0] - address; [22:20] function; [23] reserved; + [27:24] the master that caused the attention - according to the following + encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 = + dbu; 8 = dmae */ +#define MISC_REG_GRC_RSV_ATTN 0xa3c0 +/* [R 28] this field hold the last information that caused timeout + attention. bits [19:0] - address; [22:20] function; [23] reserved; + [27:24] the master that caused the attention - according to the following + encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 = + dbu; 8 = dmae */ +#define MISC_REG_GRC_TIMEOUT_ATTN 0xa3c4 +/* [RW 1] Setting this bit enables a timer in the GRC block to timeout any + access that does not finish within + ~misc_registers_grc_timout_val.grc_timeout_val cycles. When this bit is + cleared; this timeout is disabled. If this timeout occurs; the GRC shall + assert it attention output. */ +#define MISC_REG_GRC_TIMEOUT_EN 0xa280 +/* [RW 28] 28 LSB of LCPLL first register; reset val = 521. inside order of + the bits is: [2:0] OAC reset value 001) CML output buffer bias control; + 111 for +40%; 011 for +20%; 001 for 0%; 000 for -20%. [5:3] Icp_ctrl + (reset value 001) Charge pump current control; 111 for 720u; 011 for + 600u; 001 for 480u and 000 for 360u. [7:6] Bias_ctrl (reset value 00) + Global bias control; When bit 7 is high bias current will be 10 0gh; When + bit 6 is high bias will be 100w; Valid values are 00; 10; 01. [10:8] + Pll_observe (reset value 010) Bits to control observability. bit 10 is + for test bias; bit 9 is for test CK; bit 8 is test Vc. [12:11] Vth_ctrl + (reset value 00) Comparator threshold control. 00 for 0.6V; 01 for 0.54V + and 10 for 0.66V. [13] pllSeqStart (reset value 0) Enables VCO tuning + sequencer: 1= sequencer disabled; 0= sequencer enabled (inverted + internally). [14] reserved (reset value 0) Reset for VCO sequencer is + connected to RESET input directly. [15] capRetry_en (reset value 0) + enable retry on cap search failure (inverted). [16] freqMonitor_e (reset + value 0) bit to continuously monitor vco freq (inverted). [17] + freqDetRestart_en (reset value 0) bit to enable restart when not freq + locked (inverted). [18] freqDetRetry_en (reset value 0) bit to enable + retry on freq det failure(inverted). [19] pllForceFdone_en (reset value + 0) bit to enable pllForceFdone & pllForceFpass into pllSeq. [20] + pllForceFdone (reset value 0) bit to force freqDone. [21] pllForceFpass + (reset value 0) bit to force freqPass. [22] pllForceDone_en (reset value + 0) bit to enable pllForceCapDone. [23] pllForceCapDone (reset value 0) + bit to force capDone. [24] pllForceCapPass_en (reset value 0) bit to + enable pllForceCapPass. [25] pllForceCapPass (reset value 0) bit to force + capPass. [26] capRestart (reset value 0) bit to force cap sequencer to + restart. [27] capSelectM_en (reset value 0) bit to enable cap select + register bits. */ +#define MISC_REG_LCPLL_CTRL_1 0xa2a4 +#define MISC_REG_LCPLL_CTRL_REG_2 0xa2a8 +/* [RW 4] Interrupt mask register #0 read/write */ +#define MISC_REG_MISC_INT_MASK 0xa388 +/* [RW 1] Parity mask register #0 read/write */ +#define MISC_REG_MISC_PRTY_MASK 0xa398 +/* [R 1] Parity register #0 read */ +#define MISC_REG_MISC_PRTY_STS 0xa38c +/* [RC 1] Parity register #0 read clear */ +#define MISC_REG_MISC_PRTY_STS_CLR 0xa390 +#define MISC_REG_NIG_WOL_P0 0xa270 +#define MISC_REG_NIG_WOL_P1 0xa274 +/* [R 1] If set indicate that the pcie_rst_b was asserted without perst + assertion */ +#define MISC_REG_PCIE_HOT_RESET 0xa618 +/* [RW 32] 32 LSB of storm PLL first register; reset val = 0x 071d2911. + inside order of the bits is: [0] P1 divider[0] (reset value 1); [1] P1 + divider[1] (reset value 0); [2] P1 divider[2] (reset value 0); [3] P1 + divider[3] (reset value 0); [4] P2 divider[0] (reset value 1); [5] P2 + divider[1] (reset value 0); [6] P2 divider[2] (reset value 0); [7] P2 + divider[3] (reset value 0); [8] ph_det_dis (reset value 1); [9] + freq_det_dis (reset value 0); [10] Icpx[0] (reset value 0); [11] Icpx[1] + (reset value 1); [12] Icpx[2] (reset value 0); [13] Icpx[3] (reset value + 1); [14] Icpx[4] (reset value 0); [15] Icpx[5] (reset value 0); [16] + Rx[0] (reset value 1); [17] Rx[1] (reset value 0); [18] vc_en (reset + value 1); [19] vco_rng[0] (reset value 1); [20] vco_rng[1] (reset value + 1); [21] Kvco_xf[0] (reset value 0); [22] Kvco_xf[1] (reset value 0); + [23] Kvco_xf[2] (reset value 0); [24] Kvco_xs[0] (reset value 1); [25] + Kvco_xs[1] (reset value 1); [26] Kvco_xs[2] (reset value 1); [27] + testd_en (reset value 0); [28] testd_sel[0] (reset value 0); [29] + testd_sel[1] (reset value 0); [30] testd_sel[2] (reset value 0); [31] + testa_en (reset value 0); */ +#define MISC_REG_PLL_STORM_CTRL_1 0xa294 +#define MISC_REG_PLL_STORM_CTRL_2 0xa298 +#define MISC_REG_PLL_STORM_CTRL_3 0xa29c +#define MISC_REG_PLL_STORM_CTRL_4 0xa2a0 +/* [R 1] Status of 4 port mode enable input pin. */ +#define MISC_REG_PORT4MODE_EN 0xa750 +/* [RW 2] 4 port mode enable overwrite.[0] - Overwrite control; if it is 0 - + * the port4mode_en output is equal to 4 port mode input pin; if it is 1 - + * the port4mode_en output is equal to bit[1] of this register; [1] - + * Overwrite value. If bit[0] of this register is 1 this is the value that + * receives the port4mode_en output . */ +#define MISC_REG_PORT4MODE_EN_OVWR 0xa720 +/* [RW 32] reset reg#2; rite/read one = the specific block is out of reset; + write/read zero = the specific block is in reset; addr 0-wr- the write + value will be written to the register; addr 1-set - one will be written + to all the bits that have the value of one in the data written (bits that + have the value of zero will not be change) ; addr 2-clear - zero will be + written to all the bits that have the value of one in the data written + (bits that have the value of zero will not be change); addr 3-ignore; + read ignore from all addr except addr 00; inside order of the bits is: + [0] rst_bmac0; [1] rst_bmac1; [2] rst_emac0; [3] rst_emac1; [4] rst_grc; + [5] rst_mcp_n_reset_reg_hard_core; [6] rst_ mcp_n_hard_core_rst_b; [7] + rst_ mcp_n_reset_cmn_cpu; [8] rst_ mcp_n_reset_cmn_core; [9] rst_rbcn; + [10] rst_dbg; [11] rst_misc_core; [12] rst_dbue (UART); [13] + Pci_resetmdio_n; [14] rst_emac0_hard_core; [15] rst_emac1_hard_core; 16] + rst_pxp_rq_rd_wr; 31:17] reserved */ +#define MISC_REG_RESET_REG_2 0xa590 +/* [RW 20] 20 bit GRC address where the scratch-pad of the MCP that is + shared with the driver resides */ +#define MISC_REG_SHARED_MEM_ADDR 0xa2b4 +/* [RW 32] SPIO. [31-24] FLOAT When any of these bits is written as a '1'; + the corresponding SPIO bit will turn off it's drivers and become an + input. This is the reset state of all SPIO pins. The read value of these + bits will be a '1' if that last command (#SET; #CL; or #FLOAT) for this + bit was a #FLOAT. (reset value 0xff). [23-16] CLR When any of these bits + is written as a '1'; the corresponding SPIO bit will drive low. The read + value of these bits will be a '1' if that last command (#SET; #CLR; or +#FLOAT) for this bit was a #CLR. (reset value 0). [15-8] SET When any of + these bits is written as a '1'; the corresponding SPIO bit will drive + high (if it has that capability). The read value of these bits will be a + '1' if that last command (#SET; #CLR; or #FLOAT) for this bit was a #SET. + (reset value 0). [7-0] VALUE RO; These bits indicate the read value of + each of the eight SPIO pins. This is the result value of the pin; not the + drive value. Writing these bits will have not effect. Each 8 bits field + is divided as follows: [0] VAUX Enable; when pulsed low; enables supply + from VAUX. (This is an output pin only; the FLOAT field is not applicable + for this pin); [1] VAUX Disable; when pulsed low; disables supply form + VAUX. (This is an output pin only; FLOAT field is not applicable for this + pin); [2] SEL_VAUX_B - Control to power switching logic. Drive low to + select VAUX supply. (This is an output pin only; it is not controlled by + the SET and CLR fields; it is controlled by the Main Power SM; the FLOAT + field is not applicable for this pin; only the VALUE fields is relevant - + it reflects the output value); [3] port swap [4] spio_4; [5] spio_5; [6] + Bit 0 of UMP device ID select; read by UMP firmware; [7] Bit 1 of UMP + device ID select; read by UMP firmware. */ +#define MISC_REG_SPIO 0xa4fc +/* [RW 8] These bits enable the SPIO_INTs to signals event to the IGU/MC. + according to the following map: [3:0] reserved; [4] spio_4 [5] spio_5; + [7:0] reserved */ +#define MISC_REG_SPIO_EVENT_EN 0xa2b8 +/* [RW 32] SPIO INT. [31-24] OLD_CLR Writing a '1' to these bit clears the + corresponding bit in the #OLD_VALUE register. This will acknowledge an + interrupt on the falling edge of corresponding SPIO input (reset value + 0). [23-16] OLD_SET Writing a '1' to these bit sets the corresponding bit + in the #OLD_VALUE register. This will acknowledge an interrupt on the + rising edge of corresponding SPIO input (reset value 0). [15-8] OLD_VALUE + RO; These bits indicate the old value of the SPIO input value. When the + ~INT_STATE bit is set; this bit indicates the OLD value of the pin such + that if ~INT_STATE is set and this bit is '0'; then the interrupt is due + to a low to high edge. If ~INT_STATE is set and this bit is '1'; then the + interrupt is due to a high to low edge (reset value 0). [7-0] INT_STATE + RO; These bits indicate the current SPIO interrupt state for each SPIO + pin. This bit is cleared when the appropriate #OLD_SET or #OLD_CLR + command bit is written. This bit is set when the SPIO input does not + match the current value in #OLD_VALUE (reset value 0). */ +#define MISC_REG_SPIO_INT 0xa500 +/* [RW 32] reload value for counter 4 if reload; the value will be reload if + the counter reached zero and the reload bit + (~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */ +#define MISC_REG_SW_TIMER_RELOAD_VAL_4 0xa2fc +/* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses + in this register. address 0 - timer 1; address 1 - timer 2, ... address 7 - + timer 8 */ +#define MISC_REG_SW_TIMER_VAL 0xa5c0 +/* [R 1] Status of two port mode path swap input pin. */ +#define MISC_REG_TWO_PORT_PATH_SWAP 0xa758 +/* [RW 2] 2 port swap overwrite.[0] - Overwrite control; if it is 0 - the + path_swap output is equal to 2 port mode path swap input pin; if it is 1 + - the path_swap output is equal to bit[1] of this register; [1] - + Overwrite value. If bit[0] of this register is 1 this is the value that + receives the path_swap output. Reset on Hard reset. */ +#define MISC_REG_TWO_PORT_PATH_SWAP_OVWR 0xa72c +/* [RW 1] Set by the MCP to remember if one or more of the drivers is/are + loaded; 0-prepare; -unprepare */ +#define MISC_REG_UNPREPARED 0xa424 +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST (0x1<<0) +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST (0x1<<1) +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4) +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2) +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3) +/* [RW 5] MDIO PHY Address. The WC uses this address to determine whether or + * not it is the recipient of the message on the MDIO interface. The value + * is compared to the value on ctrl_md_devad. Drives output + * misc_xgxs0_phy_addr. Global register. */ +#define MISC_REG_WC0_CTRL_PHY_ADDR 0xa9cc +/* [RW 2] XMAC Core port mode. Indicates the number of ports on the system + side. This should be less than or equal to phy_port_mode; if some of the + ports are not used. This enables reduction of frequency on the core side. + This is a strap input for the XMAC_MP core. 00 - Single Port Mode; 01 - + Dual Port Mode; 10 - Tri Port Mode; 11 - Quad Port Mode. This is a strap + input for the XMAC_MP core; and should be changed only while reset is + held low. Reset on Hard reset. */ +#define MISC_REG_XMAC_CORE_PORT_MODE 0xa964 +/* [RW 2] XMAC PHY port mode. Indicates the number of ports on the Warp + Core. This is a strap input for the XMAC_MP core. 00 - Single Port Mode; + 01 - Dual Port Mode; 1x - Quad Port Mode; This is a strap input for the + XMAC_MP core; and should be changed only while reset is held low. Reset + on Hard reset. */ +#define MISC_REG_XMAC_PHY_PORT_MODE 0xa960 +/* [RW 32] 1 [47] Packet Size = 64 Write to this register write bits 31:0. + * Reads from this register will clear bits 31:0. */ +#define MSTAT_REG_RX_STAT_GR64_LO 0x200 +/* [RW 32] 1 [00] Tx Good Packet Count Write to this register write bits + * 31:0. Reads from this register will clear bits 31:0. */ +#define MSTAT_REG_TX_STAT_GTXPOK_LO 0 +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST (0x1<<0) +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST (0x1<<1) +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4) +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2) +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3) +#define NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN (0x1<<0) +#define NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN (0x1<<0) +#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT (0x1<<0) +#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS (0x1<<9) +#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G (0x1<<15) +#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS (0xf<<18) +/* [RW 1] Input enable for RX_BMAC0 IF */ +#define NIG_REG_BMAC0_IN_EN 0x100ac +/* [RW 1] output enable for TX_BMAC0 IF */ +#define NIG_REG_BMAC0_OUT_EN 0x100e0 +/* [RW 1] output enable for TX BMAC pause port 0 IF */ +#define NIG_REG_BMAC0_PAUSE_OUT_EN 0x10110 +/* [RW 1] output enable for RX_BMAC0_REGS IF */ +#define NIG_REG_BMAC0_REGS_OUT_EN 0x100e8 +/* [RW 1] output enable for RX BRB1 port0 IF */ +#define NIG_REG_BRB0_OUT_EN 0x100f8 +/* [RW 1] Input enable for TX BRB1 pause port 0 IF */ +#define NIG_REG_BRB0_PAUSE_IN_EN 0x100c4 +/* [RW 1] output enable for RX BRB1 port1 IF */ +#define NIG_REG_BRB1_OUT_EN 0x100fc +/* [RW 1] Input enable for TX BRB1 pause port 1 IF */ +#define NIG_REG_BRB1_PAUSE_IN_EN 0x100c8 +/* [RW 1] output enable for RX BRB1 LP IF */ +#define NIG_REG_BRB_LB_OUT_EN 0x10100 +/* [WB_W 82] Debug packet to LP from RBC; Data spelling:[63:0] data; 64] + error; [67:65]eop_bvalid; [68]eop; [69]sop; [70]port_id; 71]flush; + 72:73]-vnic_num; 81:74]-sideband_info */ +#define NIG_REG_DEBUG_PACKET_LB 0x10800 +/* [RW 1] Input enable for TX Debug packet */ +#define NIG_REG_EGRESS_DEBUG_IN_EN 0x100dc +/* [RW 1] If 1 - egress drain mode for port0 is active. In this mode all + packets from PBFare not forwarded to the MAC and just deleted from FIFO. + First packet may be deleted from the middle. And last packet will be + always deleted till the end. */ +#define NIG_REG_EGRESS_DRAIN0_MODE 0x10060 +/* [RW 1] Output enable to EMAC0 */ +#define NIG_REG_EGRESS_EMAC0_OUT_EN 0x10120 +/* [RW 1] MAC configuration for packets of port0. If 1 - all packet outputs + to emac for port0; other way to bmac for port0 */ +#define NIG_REG_EGRESS_EMAC0_PORT 0x10058 +/* [RW 1] Input enable for TX PBF user packet port0 IF */ +#define NIG_REG_EGRESS_PBF0_IN_EN 0x100cc +/* [RW 1] Input enable for TX PBF user packet port1 IF */ +#define NIG_REG_EGRESS_PBF1_IN_EN 0x100d0 +/* [RW 1] Input enable for TX UMP management packet port0 IF */ +#define NIG_REG_EGRESS_UMP0_IN_EN 0x100d4 +/* [RW 1] Input enable for RX_EMAC0 IF */ +#define NIG_REG_EMAC0_IN_EN 0x100a4 +/* [RW 1] output enable for TX EMAC pause port 0 IF */ +#define NIG_REG_EMAC0_PAUSE_OUT_EN 0x10118 +/* [R 1] status from emac0. This bit is set when MDINT from either the + EXT_MDINT pin or from the Copper PHY is driven low. This condition must + be cleared in the attached PHY device that is driving the MINT pin. */ +#define NIG_REG_EMAC0_STATUS_MISC_MI_INT 0x10494 +/* [WB 48] This address space contains BMAC0 registers. The BMAC registers + are described in appendix A. In order to access the BMAC0 registers; the + base address; NIG_REGISTERS_INGRESS_BMAC0_MEM; Offset: 0x10c00; should be + added to each BMAC register offset */ +#define NIG_REG_INGRESS_BMAC0_MEM 0x10c00 +/* [WB 48] This address space contains BMAC1 registers. The BMAC registers + are described in appendix A. In order to access the BMAC0 registers; the + base address; NIG_REGISTERS_INGRESS_BMAC1_MEM; Offset: 0x11000; should be + added to each BMAC register offset */ +#define NIG_REG_INGRESS_BMAC1_MEM 0x11000 +/* [R 1] FIFO empty in EOP descriptor FIFO of LP in NIG_RX_EOP */ +#define NIG_REG_INGRESS_EOP_LB_EMPTY 0x104e0 +/* [RW 17] Debug only. RX_EOP_DSCR_lb_FIFO in NIG_RX_EOP. Data + packet_length[13:0]; mac_error[14]; trunc_error[15]; parity[16] */ +#define NIG_REG_INGRESS_EOP_LB_FIFO 0x104e4 +/* [RW 27] 0 - must be active for Everest A0; 1- for Everest B0 when latch + logic for interrupts must be used. Enable per bit of interrupt of + ~latch_status.latch_status */ +#define NIG_REG_LATCH_BC_0 0x16210 +/* [RW 27] Latch for each interrupt from Unicore.b[0] + status_emac0_misc_mi_int; b[1] status_emac0_misc_mi_complete; + b[2]status_emac0_misc_cfg_change; b[3]status_emac0_misc_link_status; + b[4]status_emac0_misc_link_change; b[5]status_emac0_misc_attn; + b[6]status_serdes0_mac_crs; b[7]status_serdes0_autoneg_complete; + b[8]status_serdes0_fiber_rxact; b[9]status_serdes0_link_status; + b[10]status_serdes0_mr_page_rx; b[11]status_serdes0_cl73_an_complete; + b[12]status_serdes0_cl73_mr_page_rx; b[13]status_serdes0_rx_sigdet; + b[14]status_xgxs0_remotemdioreq; b[15]status_xgxs0_link10g; + b[16]status_xgxs0_autoneg_complete; b[17]status_xgxs0_fiber_rxact; + b[21:18]status_xgxs0_link_status; b[22]status_xgxs0_mr_page_rx; + b[23]status_xgxs0_cl73_an_complete; b[24]status_xgxs0_cl73_mr_page_rx; + b[25]status_xgxs0_rx_sigdet; b[26]status_xgxs0_mac_crs */ +#define NIG_REG_LATCH_STATUS_0 0x18000 +/* [RW 1] led 10g for port 0 */ +#define NIG_REG_LED_10G_P0 0x10320 +/* [RW 1] led 10g for port 1 */ +#define NIG_REG_LED_10G_P1 0x10324 +/* [RW 1] Port0: This bit is set to enable the use of the + ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 field + defined below. If this bit is cleared; then the blink rate will be about + 8Hz. */ +#define NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 0x10318 +/* [RW 12] Port0: Specifies the period of each blink cycle (on + off) for + Traffic LED in milliseconds. Must be a non-zero value. This 12-bit field + is reset to 0x080; giving a default blink period of approximately 8Hz. */ +#define NIG_REG_LED_CONTROL_BLINK_RATE_P0 0x10310 +/* [RW 1] Port0: If set along with the + ~nig_registers_led_control_override_traffic_p0.led_control_override_traffic_p0 + bit and ~nig_registers_led_control_traffic_p0.led_control_traffic_p0 LED + bit; the Traffic LED will blink with the blink rate specified in + ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 and + ~nig_registers_led_control_blink_rate_ena_p0.led_control_blink_rate_ena_p0 + fields. */ +#define NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 0x10308 +/* [RW 1] Port0: If set overrides hardware control of the Traffic LED. The + Traffic LED will then be controlled via bit ~nig_registers_ + led_control_traffic_p0.led_control_traffic_p0 and bit + ~nig_registers_led_control_blink_traffic_p0.led_control_blink_traffic_p0 */ +#define NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 0x102f8 +/* [RW 1] Port0: If set along with the led_control_override_trafic_p0 bit; + turns on the Traffic LED. If the led_control_blink_traffic_p0 bit is also + set; the LED will blink with blink rate specified in + ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 and + ~nig_regsters_led_control_blink_rate_ena_p0.led_control_blink_rate_ena_p0 + fields. */ +#define NIG_REG_LED_CONTROL_TRAFFIC_P0 0x10300 +/* [RW 4] led mode for port0: 0 MAC; 1-3 PHY1; 4 MAC2; 5-7 PHY4; 8-MAC3; + 9-11PHY7; 12 MAC4; 13-15 PHY10; */ +#define NIG_REG_LED_MODE_P0 0x102f0 +/* [RW 3] for port0 enable for llfc ppp and pause. b0 - brb1 enable; b1- + tsdm enable; b2- usdm enable */ +#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 0x16070 +#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 0x16074 +/* [RW 1] SAFC enable for port0. This register may get 1 only when + ~ppp_enable.ppp_enable = 0 and pause_enable.pause_enable =0 for the same + port */ +#define NIG_REG_LLFC_ENABLE_0 0x16208 +#define NIG_REG_LLFC_ENABLE_1 0x1620c +/* [RW 16] classes are high-priority for port0 */ +#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0 0x16058 +#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 0x1605c +/* [RW 16] classes are low-priority for port0 */ +#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0 0x16060 +#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 0x16064 +/* [RW 1] Output enable of message to LLFC BMAC IF for port0 */ +#define NIG_REG_LLFC_OUT_EN_0 0x160c8 +#define NIG_REG_LLFC_OUT_EN_1 0x160cc +#define NIG_REG_LLH0_ACPI_PAT_0_CRC 0x1015c +#define NIG_REG_LLH0_ACPI_PAT_6_LEN 0x10154 +#define NIG_REG_LLH0_BRB1_DRV_MASK 0x10244 +#define NIG_REG_LLH0_BRB1_DRV_MASK_MF 0x16048 +/* [RW 1] send to BRB1 if no match on any of RMP rules. */ +#define NIG_REG_LLH0_BRB1_NOT_MCP 0x1025c +/* [RW 2] Determine the classification participants. 0: no classification.1: + classification upon VLAN id. 2: classification upon MAC address. 3: + classification upon both VLAN id & MAC addr. */ +#define NIG_REG_LLH0_CLS_TYPE 0x16080 +/* [RW 32] cm header for llh0 */ +#define NIG_REG_LLH0_CM_HEADER 0x1007c +#define NIG_REG_LLH0_DEST_IP_0_1 0x101dc +#define NIG_REG_LLH0_DEST_MAC_0_0 0x101c0 +/* [RW 16] destination TCP address 1. The LLH will look for this address in + all incoming packets. */ +#define NIG_REG_LLH0_DEST_TCP_0 0x10220 +/* [RW 16] destination UDP address 1 The LLH will look for this address in + all incoming packets. */ +#define NIG_REG_LLH0_DEST_UDP_0 0x10214 +#define NIG_REG_LLH0_ERROR_MASK 0x1008c +/* [RW 8] event id for llh0 */ +#define NIG_REG_LLH0_EVENT_ID 0x10084 +#define NIG_REG_LLH0_FUNC_EN 0x160fc +#define NIG_REG_LLH0_FUNC_MEM 0x16180 +#define NIG_REG_LLH0_FUNC_MEM_ENABLE 0x16140 +#define NIG_REG_LLH0_FUNC_VLAN_ID 0x16100 +/* [RW 1] Determine the IP version to look for in + ~nig_registers_llh0_dest_ip_0.llh0_dest_ip_0. 0 - IPv6; 1-IPv4 */ +#define NIG_REG_LLH0_IPV4_IPV6_0 0x10208 +/* [RW 1] t bit for llh0 */ +#define NIG_REG_LLH0_T_BIT 0x10074 +/* [RW 12] VLAN ID 1. In case of VLAN packet the LLH will look for this ID. */ +#define NIG_REG_LLH0_VLAN_ID_0 0x1022c +/* [RW 8] init credit counter for port0 in LLH */ +#define NIG_REG_LLH0_XCM_INIT_CREDIT 0x10554 +#define NIG_REG_LLH0_XCM_MASK 0x10130 +#define NIG_REG_LLH1_BRB1_DRV_MASK 0x10248 +/* [RW 1] send to BRB1 if no match on any of RMP rules. */ +#define NIG_REG_LLH1_BRB1_NOT_MCP 0x102dc +/* [RW 2] Determine the classification participants. 0: no classification.1: + classification upon VLAN id. 2: classification upon MAC address. 3: + classification upon both VLAN id & MAC addr. */ +#define NIG_REG_LLH1_CLS_TYPE 0x16084 +/* [RW 32] cm header for llh1 */ +#define NIG_REG_LLH1_CM_HEADER 0x10080 +#define NIG_REG_LLH1_ERROR_MASK 0x10090 +/* [RW 8] event id for llh1 */ +#define NIG_REG_LLH1_EVENT_ID 0x10088 +#define NIG_REG_LLH1_FUNC_MEM 0x161c0 +#define NIG_REG_LLH1_FUNC_MEM_ENABLE 0x16160 +#define NIG_REG_LLH1_FUNC_MEM_SIZE 16 +/* [RW 1] When this bit is set; the LLH will classify the packet before + * sending it to the BRB or calculating WoL on it. This bit controls port 1 + * only. The legacy llh_multi_function_mode bit controls port 0. */ +#define NIG_REG_LLH1_MF_MODE 0x18614 +/* [RW 8] init credit counter for port1 in LLH */ +#define NIG_REG_LLH1_XCM_INIT_CREDIT 0x10564 +#define NIG_REG_LLH1_XCM_MASK 0x10134 +/* [RW 1] When this bit is set; the LLH will expect all packets to be with + e1hov */ +#define NIG_REG_LLH_E1HOV_MODE 0x160d8 +/* [RW 1] When this bit is set; the LLH will classify the packet before + sending it to the BRB or calculating WoL on it. */ +#define NIG_REG_LLH_MF_MODE 0x16024 +#define NIG_REG_MASK_INTERRUPT_PORT0 0x10330 +#define NIG_REG_MASK_INTERRUPT_PORT1 0x10334 +/* [RW 1] Output signal from NIG to EMAC0. When set enables the EMAC0 block. */ +#define NIG_REG_NIG_EMAC0_EN 0x1003c +/* [RW 1] Output signal from NIG to EMAC1. When set enables the EMAC1 block. */ +#define NIG_REG_NIG_EMAC1_EN 0x10040 +/* [RW 1] Output signal from NIG to TX_EMAC0. When set indicates to the + EMAC0 to strip the CRC from the ingress packets. */ +#define NIG_REG_NIG_INGRESS_EMAC0_NO_CRC 0x10044 +/* [R 32] Interrupt register #0 read */ +#define NIG_REG_NIG_INT_STS_0 0x103b0 +#define NIG_REG_NIG_INT_STS_1 0x103c0 +/* [R 32] Legacy E1 and E1H location for parity error mask register. */ +#define NIG_REG_NIG_PRTY_MASK 0x103dc +/* [RW 32] Parity mask register #0 read/write */ +#define NIG_REG_NIG_PRTY_MASK_0 0x183c8 +#define NIG_REG_NIG_PRTY_MASK_1 0x183d8 +/* [R 32] Legacy E1 and E1H location for parity error status register. */ +#define NIG_REG_NIG_PRTY_STS 0x103d0 +/* [R 32] Parity register #0 read */ +#define NIG_REG_NIG_PRTY_STS_0 0x183bc +#define NIG_REG_NIG_PRTY_STS_1 0x183cc +/* [R 32] Legacy E1 and E1H location for parity error status clear register. */ +#define NIG_REG_NIG_PRTY_STS_CLR 0x103d4 +/* [RC 32] Parity register #0 read clear */ +#define NIG_REG_NIG_PRTY_STS_CLR_0 0x183c0 +#define NIG_REG_NIG_PRTY_STS_CLR_1 0x183d0 +#define MCPR_IMC_COMMAND_ENABLE (1L<<31) +#define MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT 16 +#define MCPR_IMC_COMMAND_OPERATION_BITSHIFT 28 +#define MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT 8 +/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic + * Ethernet header. */ +#define NIG_REG_P0_HDRS_AFTER_BASIC 0x18038 +/* [RW 1] HW PFC enable bit. Set this bit to enable the PFC functionality in + * the NIG. Other flow control modes such as PAUSE and SAFC/LLFC should be + * disabled when this bit is set. */ +#define NIG_REG_P0_HWPFC_ENABLE 0x18078 +#define NIG_REG_P0_LLH_FUNC_MEM2 0x18480 +#define NIG_REG_P0_LLH_FUNC_MEM2_ENABLE 0x18440 +/* [RW 1] Input enable for RX MAC interface. */ +#define NIG_REG_P0_MAC_IN_EN 0x185ac +/* [RW 1] Output enable for TX MAC interface */ +#define NIG_REG_P0_MAC_OUT_EN 0x185b0 +/* [RW 1] Output enable for TX PAUSE signal to the MAC. */ +#define NIG_REG_P0_MAC_PAUSE_OUT_EN 0x185b4 +/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for + * future expansion) each priorty is to be mapped to. Bits 3:0 specify the + * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit + * priority field is extracted from the outer-most VLAN in receive packet. + * Only COS 0 and COS 1 are supported in E2. */ +#define NIG_REG_P0_PKT_PRIORITY_TO_COS 0x18054 +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A + * priority is mapped to COS 0 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. */ +#define NIG_REG_P0_RX_COS0_PRIORITY_MASK 0x18058 +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 1. A + * priority is mapped to COS 1 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. */ +#define NIG_REG_P0_RX_COS1_PRIORITY_MASK 0x1805c +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A + * priority is mapped to COS 2 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. */ +#define NIG_REG_P0_RX_COS2_PRIORITY_MASK 0x186b0 +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 3. A + * priority is mapped to COS 3 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. */ +#define NIG_REG_P0_RX_COS3_PRIORITY_MASK 0x186b4 +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 4. A + * priority is mapped to COS 4 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. */ +#define NIG_REG_P0_RX_COS4_PRIORITY_MASK 0x186b8 +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 5. A + * priority is mapped to COS 5 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. */ +#define NIG_REG_P0_RX_COS5_PRIORITY_MASK 0x186bc +/* [R 1] RX FIFO for receiving data from MAC is empty. */ +/* [RW 15] Specify which of the credit registers the client is to be mapped + * to. Bits[2:0] are for client 0; bits [14:12] are for client 4. For + * clients that are not subject to WFQ credit blocking - their + * specifications here are not used. */ +#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP 0x180f0 +/* [RW 32] Specify which of the credit registers the client is to be mapped + * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are + * for client 0; bits [35:32] are for client 8. For clients that are not + * subject to WFQ credit blocking - their specifications here are not used. + * This is a new register (with 2_) added in E3 B0 to accommodate the 9 + * input clients to ETS arbiter. The reset default is set for management and + * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to + * use credit registers 0-5 respectively (0x543210876). Note that credit + * registers can not be shared between clients. */ +#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB 0x18688 +/* [RW 4] Specify which of the credit registers the client is to be mapped + * to. This register specifies bits 35:32 of the 36-bit value. Bits[3:0] are + * for client 0; bits [35:32] are for client 8. For clients that are not + * subject to WFQ credit blocking - their specifications here are not used. + * This is a new register (with 2_) added in E3 B0 to accommodate the 9 + * input clients to ETS arbiter. The reset default is set for management and + * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to + * use credit registers 0-5 respectively (0x543210876). Note that credit + * registers can not be shared between clients. */ +#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB 0x1868c +/* [RW 5] Specify whether the client competes directly in the strict + * priority arbiter. The bits are mapped according to client ID (client IDs + * are defined in tx_arb_priority_client). Default value is set to enable + * strict priorities for clients 0-2 -- management and debug traffic. */ +#define NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT 0x180e8 +/* [RW 5] Specify whether the client is subject to WFQ credit blocking. The + * bits are mapped according to client ID (client IDs are defined in + * tx_arb_priority_client). Default value is 0 for not using WFQ credit + * blocking. */ +#define NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x180ec +/* [RW 32] Specify the upper bound that credit register 0 is allowed to + * reach. */ +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0 0x1810c +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1 0x18110 +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2 0x18114 +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3 0x18118 +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4 0x1811c +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5 0x186a0 +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6 0x186a4 +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7 0x186a8 +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8 0x186ac +/* [RW 32] Specify the weight (in bytes) to be added to credit register 0 + * when it is time to increment. */ +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0 0x180f8 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1 0x180fc +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2 0x18100 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3 0x18104 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4 0x18108 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5 0x18690 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6 0x18694 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7 0x18698 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8 0x1869c +/* [RW 12] Specify the number of strict priority arbitration slots between + * two round-robin arbitration slots to avoid starvation. A value of 0 means + * no strict priority cycles - the strict priority with anti-starvation + * arbiter becomes a round-robin arbiter. */ +#define NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS 0x180f4 +/* [RW 15] Specify the client number to be assigned to each priority of the + * strict priority arbiter. Priority 0 is the highest priority. Bits [2:0] + * are for priority 0 client; bits [14:12] are for priority 4 client. The + * clients are assigned the following IDs: 0-management; 1-debug traffic + * from this port; 2-debug traffic from other port; 3-COS0 traffic; 4-COS1 + * traffic. The reset value[14:0] is set to 0x4688 (15'b100_011_010_001_000) + * for management at priority 0; debug traffic at priorities 1 and 2; COS0 + * traffic at priority 3; and COS1 traffic at priority 4. */ +#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT 0x180e4 +/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic + * Ethernet header. */ +#define NIG_REG_P1_HDRS_AFTER_BASIC 0x1818c +#define NIG_REG_P1_LLH_FUNC_MEM2 0x184c0 +#define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE 0x18460 +/* [RW 32] Specify the client number to be assigned to each priority of the + * strict priority arbiter. This register specifies bits 31:0 of the 36-bit + * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0 + * client; bits [35-32] are for priority 8 client. The clients are assigned + * the following IDs: 0-management; 1-debug traffic from this port; 2-debug + * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic; + * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is + * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to + * accommodate the 9 input clients to ETS arbiter. */ +#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB 0x18680 +/* [RW 4] Specify the client number to be assigned to each priority of the + * strict priority arbiter. This register specifies bits 35:32 of the 36-bit + * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0 + * client; bits [35-32] are for priority 8 client. The clients are assigned + * the following IDs: 0-management; 1-debug traffic from this port; 2-debug + * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic; + * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is + * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to + * accommodate the 9 input clients to ETS arbiter. */ +#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB 0x18684 +#define NIG_REG_P1_MAC_IN_EN 0x185c0 +/* [RW 1] Output enable for TX MAC interface */ +#define NIG_REG_P1_MAC_OUT_EN 0x185c4 +/* [RW 1] Output enable for TX PAUSE signal to the MAC. */ +#define NIG_REG_P1_MAC_PAUSE_OUT_EN 0x185c8 +/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for + * future expansion) each priorty is to be mapped to. Bits 3:0 specify the + * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit + * priority field is extracted from the outer-most VLAN in receive packet. + * Only COS 0 and COS 1 are supported in E2. */ +#define NIG_REG_P1_PKT_PRIORITY_TO_COS 0x181a8 +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A + * priority is mapped to COS 0 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. */ +#define NIG_REG_P1_RX_COS0_PRIORITY_MASK 0x181ac +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 1. A + * priority is mapped to COS 1 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. */ +#define NIG_REG_P1_RX_COS1_PRIORITY_MASK 0x181b0 +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A + * priority is mapped to COS 2 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. */ +#define NIG_REG_P1_RX_COS2_PRIORITY_MASK 0x186f8 +/* [R 1] RX FIFO for receiving data from MAC is empty. */ +#define NIG_REG_P1_RX_MACFIFO_EMPTY 0x1858c +/* [R 1] TLLH FIFO is empty. */ +#define NIG_REG_P1_TLLH_FIFO_EMPTY 0x18338 +/* [RW 32] Specify which of the credit registers the client is to be mapped + * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are + * for client 0; bits [35:32] are for client 8. For clients that are not + * subject to WFQ credit blocking - their specifications here are not used. + * This is a new register (with 2_) added in E3 B0 to accommodate the 9 + * input clients to ETS arbiter. The reset default is set for management and + * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to + * use credit registers 0-5 respectively (0x543210876). Note that credit + * registers can not be shared between clients. Note also that there are + * only COS0-2 in port 1- there is a total of 6 clients in port 1. Only + * credit registers 0-5 are valid. This register should be configured + * appropriately before enabling WFQ. */ +#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB 0x186e8 +/* [RW 4] Specify which of the credit registers the client is to be mapped + * to. This register specifies bits 35:32 of the 36-bit value. Bits[3:0] are + * for client 0; bits [35:32] are for client 8. For clients that are not + * subject to WFQ credit blocking - their specifications here are not used. + * This is a new register (with 2_) added in E3 B0 to accommodate the 9 + * input clients to ETS arbiter. The reset default is set for management and + * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to + * use credit registers 0-5 respectively (0x543210876). Note that credit + * registers can not be shared between clients. Note also that there are + * only COS0-2 in port 1- there is a total of 6 clients in port 1. Only + * credit registers 0-5 are valid. This register should be configured + * appropriately before enabling WFQ. */ +#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB 0x186ec +/* [RW 9] Specify whether the client competes directly in the strict + * priority arbiter. The bits are mapped according to client ID (client IDs + * are defined in tx_arb_priority_client2): 0-management; 1-debug traffic + * from this port; 2-debug traffic from other port; 3-COS0 traffic; 4-COS1 + * traffic; 5-COS2 traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. + * Default value is set to enable strict priorities for all clients. */ +#define NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT 0x18234 +/* [RW 9] Specify whether the client is subject to WFQ credit blocking. The + * bits are mapped according to client ID (client IDs are defined in + * tx_arb_priority_client2): 0-management; 1-debug traffic from this port; + * 2-debug traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 + * traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. Default value is + * 0 for not using WFQ credit blocking. */ +#define NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x18238 +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 0x18258 +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 0x1825c +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 0x18260 +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 0x18264 +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 0x18268 +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 0x186f4 +/* [RW 32] Specify the weight (in bytes) to be added to credit register 0 + * when it is time to increment. */ +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 0x18244 +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 0x18248 +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 0x1824c +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 0x18250 +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 0x18254 +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 0x186f0 +/* [RW 12] Specify the number of strict priority arbitration slots between + two round-robin arbitration slots to avoid starvation. A value of 0 means + no strict priority cycles - the strict priority with anti-starvation + arbiter becomes a round-robin arbiter. */ +#define NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS 0x18240 +/* [RW 32] Specify the client number to be assigned to each priority of the + strict priority arbiter. This register specifies bits 31:0 of the 36-bit + value. Priority 0 is the highest priority. Bits [3:0] are for priority 0 + client; bits [35-32] are for priority 8 client. The clients are assigned + the following IDs: 0-management; 1-debug traffic from this port; 2-debug + traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic; + 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is + set to 0x345678021. This is a new register (with 2_) added in E3 B0 to + accommodate the 9 input clients to ETS arbiter. Note that this register + is the same as the one for port 0, except that port 1 only has COS 0-2 + traffic. There is no traffic for COS 3-5 of port 1. */ +#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB 0x186e0 +/* [RW 4] Specify the client number to be assigned to each priority of the + strict priority arbiter. This register specifies bits 35:32 of the 36-bit + value. Priority 0 is the highest priority. Bits [3:0] are for priority 0 + client; bits [35-32] are for priority 8 client. The clients are assigned + the following IDs: 0-management; 1-debug traffic from this port; 2-debug + traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic; + 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is + set to 0x345678021. This is a new register (with 2_) added in E3 B0 to + accommodate the 9 input clients to ETS arbiter. Note that this register + is the same as the one for port 0, except that port 1 only has COS 0-2 + traffic. There is no traffic for COS 3-5 of port 1. */ +#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB 0x186e4 +/* [R 1] TX FIFO for transmitting data to MAC is empty. */ +#define NIG_REG_P1_TX_MACFIFO_EMPTY 0x18594 +/* [R 1] FIFO empty status of the MCP TX FIFO used for storing MCP packets + forwarded to the host. */ +#define NIG_REG_P1_TX_MNG_HOST_FIFO_EMPTY 0x182b8 +/* [RW 32] Specify the upper bound that credit register 0 is allowed to + * reach. */ +/* [RW 1] Pause enable for port0. This register may get 1 only when + ~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same + port */ +#define NIG_REG_PAUSE_ENABLE_0 0x160c0 +#define NIG_REG_PAUSE_ENABLE_1 0x160c4 +/* [RW 1] Input enable for RX PBF LP IF */ +#define NIG_REG_PBF_LB_IN_EN 0x100b4 +/* [RW 1] Value of this register will be transmitted to port swap when + ~nig_registers_strap_override.strap_override =1 */ +#define NIG_REG_PORT_SWAP 0x10394 +/* [RW 1] PPP enable for port0. This register may get 1 only when + * ~safc_enable.safc_enable = 0 and pause_enable.pause_enable =0 for the + * same port */ +#define NIG_REG_PPP_ENABLE_0 0x160b0 +#define NIG_REG_PPP_ENABLE_1 0x160b4 +/* [RW 1] output enable for RX parser descriptor IF */ +#define NIG_REG_PRS_EOP_OUT_EN 0x10104 +/* [RW 1] Input enable for RX parser request IF */ +#define NIG_REG_PRS_REQ_IN_EN 0x100b8 +/* [RW 5] control to serdes - CL45 DEVAD */ +#define NIG_REG_SERDES0_CTRL_MD_DEVAD 0x10370 +/* [RW 1] control to serdes; 0 - clause 45; 1 - clause 22 */ +#define NIG_REG_SERDES0_CTRL_MD_ST 0x1036c +/* [RW 5] control to serdes - CL22 PHY_ADD and CL45 PRTAD */ +#define NIG_REG_SERDES0_CTRL_PHY_ADDR 0x10374 +/* [R 1] status from serdes0 that inputs to interrupt logic of link status */ +#define NIG_REG_SERDES0_STATUS_LINK_STATUS 0x10578 +/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure + for port0 */ +#define NIG_REG_STAT0_BRB_DISCARD 0x105f0 +/* [R 32] Rx statistics : In user packets truncated due to BRB backpressure + for port0 */ +#define NIG_REG_STAT0_BRB_TRUNCATE 0x105f8 +/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that + between 1024 and 1522 bytes for port0 */ +#define NIG_REG_STAT0_EGRESS_MAC_PKT0 0x10750 +/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that + between 1523 bytes and above for port0 */ +#define NIG_REG_STAT0_EGRESS_MAC_PKT1 0x10760 +/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure + for port1 */ +#define NIG_REG_STAT1_BRB_DISCARD 0x10628 +/* [WB_R 36] Tx statistics : Number of packets from emac1 or bmac1 that + between 1024 and 1522 bytes for port1 */ +#define NIG_REG_STAT1_EGRESS_MAC_PKT0 0x107a0 +/* [WB_R 36] Tx statistics : Number of packets from emac1 or bmac1 that + between 1523 bytes and above for port1 */ +#define NIG_REG_STAT1_EGRESS_MAC_PKT1 0x107b0 +/* [WB_R 64] Rx statistics : User octets received for LP */ +#define NIG_REG_STAT2_BRB_OCTET 0x107e0 +#define NIG_REG_STATUS_INTERRUPT_PORT0 0x10328 +#define NIG_REG_STATUS_INTERRUPT_PORT1 0x1032c +/* [RW 1] port swap mux selection. If this register equal to 0 then port + swap is equal to SPIO pin that inputs from ifmux_serdes_swap. If 1 then + ort swap is equal to ~nig_registers_port_swap.port_swap */ +#define NIG_REG_STRAP_OVERRIDE 0x10398 +/* [RW 1] output enable for RX_XCM0 IF */ +#define NIG_REG_XCM0_OUT_EN 0x100f0 +/* [RW 1] output enable for RX_XCM1 IF */ +#define NIG_REG_XCM1_OUT_EN 0x100f4 +/* [RW 1] control to xgxs - remote PHY in-band MDIO */ +#define NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST 0x10348 +/* [RW 5] control to xgxs - CL45 DEVAD */ +#define NIG_REG_XGXS0_CTRL_MD_DEVAD 0x1033c +/* [RW 1] control to xgxs; 0 - clause 45; 1 - clause 22 */ +#define NIG_REG_XGXS0_CTRL_MD_ST 0x10338 +/* [RW 5] control to xgxs - CL22 PHY_ADD and CL45 PRTAD */ +#define NIG_REG_XGXS0_CTRL_PHY_ADDR 0x10340 +/* [R 1] status from xgxs0 that inputs to interrupt logic of link10g. */ +#define NIG_REG_XGXS0_STATUS_LINK10G 0x10680 +/* [R 4] status from xgxs0 that inputs to interrupt logic of link status */ +#define NIG_REG_XGXS0_STATUS_LINK_STATUS 0x10684 +/* [RW 2] selection for XGXS lane of port 0 in NIG_MUX block */ +#define NIG_REG_XGXS_LANE_SEL_P0 0x102e8 +/* [RW 1] selection for port0 for NIG_MUX block : 0 = SerDes; 1 = XGXS */ +#define NIG_REG_XGXS_SERDES0_MODE_SEL 0x102e0 +#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT (0x1<<0) +#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS (0x1<<9) +#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G (0x1<<15) +#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS (0xf<<18) +#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE 18 +/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter. */ +#define PBF_REG_COS0_UPPER_BOUND 0x15c05c +/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter + * of port 0. */ +#define PBF_REG_COS0_UPPER_BOUND_P0 0x15c2cc +/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter + * of port 1. */ +#define PBF_REG_COS0_UPPER_BOUND_P1 0x15c2e4 +/* [RW 31] The weight of COS0 in the ETS command arbiter. */ +#define PBF_REG_COS0_WEIGHT 0x15c054 +/* [RW 31] The weight of COS0 in port 0 ETS command arbiter. */ +#define PBF_REG_COS0_WEIGHT_P0 0x15c2a8 +/* [RW 31] The weight of COS0 in port 1 ETS command arbiter. */ +#define PBF_REG_COS0_WEIGHT_P1 0x15c2c0 +/* [RW 31] The upper bound of the weight of COS1 in the ETS command arbiter. */ +#define PBF_REG_COS1_UPPER_BOUND 0x15c060 +/* [RW 31] The weight of COS1 in the ETS command arbiter. */ +#define PBF_REG_COS1_WEIGHT 0x15c058 +/* [RW 31] The weight of COS1 in port 0 ETS command arbiter. */ +#define PBF_REG_COS1_WEIGHT_P0 0x15c2ac +/* [RW 31] The weight of COS1 in port 1 ETS command arbiter. */ +#define PBF_REG_COS1_WEIGHT_P1 0x15c2c4 +/* [RW 31] The weight of COS2 in port 0 ETS command arbiter. */ +#define PBF_REG_COS2_WEIGHT_P0 0x15c2b0 +/* [RW 31] The weight of COS2 in port 1 ETS command arbiter. */ +#define PBF_REG_COS2_WEIGHT_P1 0x15c2c8 +/* [RW 31] The weight of COS3 in port 0 ETS command arbiter. */ +#define PBF_REG_COS3_WEIGHT_P0 0x15c2b4 +/* [RW 31] The weight of COS4 in port 0 ETS command arbiter. */ +#define PBF_REG_COS4_WEIGHT_P0 0x15c2b8 +/* [RW 31] The weight of COS5 in port 0 ETS command arbiter. */ +#define PBF_REG_COS5_WEIGHT_P0 0x15c2bc +/* [R 11] Current credit for the LB queue in the tx port buffers in 16 byte + * lines. */ +#define PBF_REG_CREDIT_LB_Q 0x140338 +/* [R 11] Current credit for queue 0 in the tx port buffers in 16 byte + * lines. */ +#define PBF_REG_CREDIT_Q0 0x14033c +/* [R 11] Current credit for queue 1 in the tx port buffers in 16 byte + * lines. */ +#define PBF_REG_CREDIT_Q1 0x140340 +/* [RW 1] Disable processing further tasks from port 0 (after ending the + current task in process). */ +#define PBF_REG_DISABLE_NEW_TASK_PROC_P0 0x14005c +/* [RW 1] Disable processing further tasks from port 1 (after ending the + current task in process). */ +#define PBF_REG_DISABLE_NEW_TASK_PROC_P1 0x140060 +/* [RW 1] Disable processing further tasks from port 4 (after ending the + current task in process). */ +#define PBF_REG_DISABLE_NEW_TASK_PROC_P4 0x14006c +#define PBF_REG_DISABLE_PF 0x1402e8 +/* [RW 18] For port 0: For each client that is subject to WFQ (the + * corresponding bit is 1); indicates to which of the credit registers this + * client is mapped. For clients which are not credit blocked; their mapping + * is dont care. */ +#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0 0x15c288 +/* [RW 9] For port 1: For each client that is subject to WFQ (the + * corresponding bit is 1); indicates to which of the credit registers this + * client is mapped. For clients which are not credit blocked; their mapping + * is dont care. */ +#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1 0x15c28c +/* [RW 6] For port 0: Bit per client to indicate if the client competes in + * the strict priority arbiter directly (corresponding bit = 1); or first + * goes to the RR arbiter (corresponding bit = 0); and then competes in the + * lowest priority in the strict-priority arbiter. */ +#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 0x15c278 +/* [RW 3] For port 1: Bit per client to indicate if the client competes in + * the strict priority arbiter directly (corresponding bit = 1); or first + * goes to the RR arbiter (corresponding bit = 0); and then competes in the + * lowest priority in the strict-priority arbiter. */ +#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 0x15c27c +/* [RW 6] For port 0: Bit per client to indicate if the client is subject to + * WFQ credit blocking (corresponding bit = 1). */ +#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 0x15c280 +/* [RW 3] For port 0: Bit per client to indicate if the client is subject to + * WFQ credit blocking (corresponding bit = 1). */ +#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 0x15c284 +/* [RW 16] For port 0: The number of strict priority arbitration slots + * between 2 RR arbitration slots. A value of 0 means no strict priority + * cycles; i.e. the strict-priority w/ anti-starvation arbiter is a RR + * arbiter. */ +#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0 0x15c2a0 +/* [RW 16] For port 1: The number of strict priority arbitration slots + * between 2 RR arbitration slots. A value of 0 means no strict priority + * cycles; i.e. the strict-priority w/ anti-starvation arbiter is a RR + * arbiter. */ +#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 0x15c2a4 +/* [RW 18] For port 0: Indicates which client is connected to each priority + * in the strict-priority arbiter. Priority 0 is the highest priority, and + * priority 5 is the lowest; to which the RR output is connected to (this is + * not configurable). */ +#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 0x15c270 +/* [RW 9] For port 1: Indicates which client is connected to each priority + * in the strict-priority arbiter. Priority 0 is the highest priority, and + * priority 5 is the lowest; to which the RR output is connected to (this is + * not configurable). */ +#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 0x15c274 +/* [RW 1] Indicates that ETS is performed between the COSes in the command + * arbiter. If reset strict priority w/ anti-starvation will be performed + * w/o WFQ. */ +#define PBF_REG_ETS_ENABLED 0x15c050 +/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic + * Ethernet header. */ +#define PBF_REG_HDRS_AFTER_BASIC 0x15c0a8 +/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */ +#define PBF_REG_HDRS_AFTER_TAG_0 0x15c0b8 +/* [R 1] Removed for E3 B0 - Indicates which COS is conncted to the highest + * priority in the command arbiter. */ +#define PBF_REG_HIGH_PRIORITY_COS_NUM 0x15c04c +#define PBF_REG_IF_ENABLE_REG 0x140044 +/* [RW 1] Init bit. When set the initial credits are copied to the credit + registers (except the port credits). Should be set and then reset after + the configuration of the block has ended. */ +#define PBF_REG_INIT 0x140000 +/* [RW 11] Initial credit for the LB queue in the tx port buffers in 16 byte + * lines. */ +#define PBF_REG_INIT_CRD_LB_Q 0x15c248 +/* [RW 11] Initial credit for queue 0 in the tx port buffers in 16 byte + * lines. */ +#define PBF_REG_INIT_CRD_Q0 0x15c230 +/* [RW 11] Initial credit for queue 1 in the tx port buffers in 16 byte + * lines. */ +#define PBF_REG_INIT_CRD_Q1 0x15c234 +/* [RW 1] Init bit for port 0. When set the initial credit of port 0 is + copied to the credit register. Should be set and then reset after the + configuration of the port has ended. */ +#define PBF_REG_INIT_P0 0x140004 +/* [RW 1] Init bit for port 1. When set the initial credit of port 1 is + copied to the credit register. Should be set and then reset after the + configuration of the port has ended. */ +#define PBF_REG_INIT_P1 0x140008 +/* [RW 1] Init bit for port 4. When set the initial credit of port 4 is + copied to the credit register. Should be set and then reset after the + configuration of the port has ended. */ +#define PBF_REG_INIT_P4 0x14000c +/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for + * the LB queue. Reset upon init. */ +#define PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q 0x140354 +/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for + * queue 0. Reset upon init. */ +#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 0x140358 +/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for + * queue 1. Reset upon init. */ +#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 0x14035c +/* [RW 1] Enable for mac interface 0. */ +#define PBF_REG_MAC_IF0_ENABLE 0x140030 +/* [RW 1] Enable for mac interface 1. */ +#define PBF_REG_MAC_IF1_ENABLE 0x140034 +/* [RW 1] Enable for the loopback interface. */ +#define PBF_REG_MAC_LB_ENABLE 0x140040 +/* [RW 6] Bit-map indicating which headers must appear in the packet */ +#define PBF_REG_MUST_HAVE_HDRS 0x15c0c4 +/* [RW 16] The number of strict priority arbitration slots between 2 RR + * arbitration slots. A value of 0 means no strict priority cycles; i.e. the + * strict-priority w/ anti-starvation arbiter is a RR arbiter. */ +#define PBF_REG_NUM_STRICT_ARB_SLOTS 0x15c064 +/* [RW 10] Port 0 threshold used by arbiter in 16 byte lines used when pause + not suppoterd. */ +#define PBF_REG_P0_ARB_THRSH 0x1400e4 +/* [R 11] Current credit for port 0 in the tx port buffers in 16 byte lines. */ +#define PBF_REG_P0_CREDIT 0x140200 +/* [RW 11] Initial credit for port 0 in the tx port buffers in 16 byte + lines. */ +#define PBF_REG_P0_INIT_CRD 0x1400d0 +/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for + * port 0. Reset upon init. */ +#define PBF_REG_P0_INTERNAL_CRD_FREED_CNT 0x140308 +/* [R 1] Removed for E3 B0 - Indication that pause is enabled for port 0. */ +#define PBF_REG_P0_PAUSE_ENABLE 0x140014 +/* [R 8] Removed for E3 B0 - Number of tasks in port 0 task queue. */ +#define PBF_REG_P0_TASK_CNT 0x140204 +/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines + * freed from the task queue of port 0. Reset upon init. */ +#define PBF_REG_P0_TQ_LINES_FREED_CNT 0x1402f0 +/* [R 12] Number of 8 bytes lines occupied in the task queue of port 0. */ +#define PBF_REG_P0_TQ_OCCUPANCY 0x1402fc +/* [R 11] Removed for E3 B0 - Current credit for port 1 in the tx port + * buffers in 16 byte lines. */ +#define PBF_REG_P1_CREDIT 0x140208 +/* [R 11] Removed for E3 B0 - Initial credit for port 0 in the tx port + * buffers in 16 byte lines. */ +#define PBF_REG_P1_INIT_CRD 0x1400d4 +/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for + * port 1. Reset upon init. */ +#define PBF_REG_P1_INTERNAL_CRD_FREED_CNT 0x14030c +/* [R 8] Removed for E3 B0 - Number of tasks in port 1 task queue. */ +#define PBF_REG_P1_TASK_CNT 0x14020c +/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines + * freed from the task queue of port 1. Reset upon init. */ +#define PBF_REG_P1_TQ_LINES_FREED_CNT 0x1402f4 +/* [R 12] Number of 8 bytes lines occupied in the task queue of port 1. */ +#define PBF_REG_P1_TQ_OCCUPANCY 0x140300 +/* [R 11] Current credit for port 4 in the tx port buffers in 16 byte lines. */ +#define PBF_REG_P4_CREDIT 0x140210 +/* [RW 11] Initial credit for port 4 in the tx port buffers in 16 byte + lines. */ +#define PBF_REG_P4_INIT_CRD 0x1400e0 +/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for + * port 4. Reset upon init. */ +#define PBF_REG_P4_INTERNAL_CRD_FREED_CNT 0x140310 +/* [R 8] Removed for E3 B0 - Number of tasks in port 4 task queue. */ +#define PBF_REG_P4_TASK_CNT 0x140214 +/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines + * freed from the task queue of port 4. Reset upon init. */ +#define PBF_REG_P4_TQ_LINES_FREED_CNT 0x1402f8 +/* [R 12] Number of 8 bytes lines occupied in the task queue of port 4. */ +#define PBF_REG_P4_TQ_OCCUPANCY 0x140304 +/* [RW 5] Interrupt mask register #0 read/write */ +#define PBF_REG_PBF_INT_MASK 0x1401d4 +/* [R 5] Interrupt register #0 read */ +#define PBF_REG_PBF_INT_STS 0x1401c8 +/* [RW 20] Parity mask register #0 read/write */ +#define PBF_REG_PBF_PRTY_MASK 0x1401e4 +/* [RC 20] Parity register #0 read clear */ +#define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc +/* [RW 16] The Ethernet type value for L2 tag 0 */ +#define PBF_REG_TAG_ETHERTYPE_0 0x15c090 +/* [RW 4] The length of the info field for L2 tag 0. The length is between + * 2B and 14B; in 2B granularity */ +#define PBF_REG_TAG_LEN_0 0x15c09c +/* [R 32] Cyclic counter for number of 8 byte lines freed from the LB task + * queue. Reset upon init. */ +#define PBF_REG_TQ_LINES_FREED_CNT_LB_Q 0x14038c +/* [R 32] Cyclic counter for number of 8 byte lines freed from the task + * queue 0. Reset upon init. */ +#define PBF_REG_TQ_LINES_FREED_CNT_Q0 0x140390 +/* [R 32] Cyclic counter for number of 8 byte lines freed from task queue 1. + * Reset upon init. */ +#define PBF_REG_TQ_LINES_FREED_CNT_Q1 0x140394 +/* [R 13] Number of 8 bytes lines occupied in the task queue of the LB + * queue. */ +#define PBF_REG_TQ_OCCUPANCY_LB_Q 0x1403a8 +/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 0. */ +#define PBF_REG_TQ_OCCUPANCY_Q0 0x1403ac +/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 1. */ +#define PBF_REG_TQ_OCCUPANCY_Q1 0x1403b0 +#define PB_REG_CONTROL 0 +/* [RW 2] Interrupt mask register #0 read/write */ +#define PB_REG_PB_INT_MASK 0x28 +/* [R 2] Interrupt register #0 read */ +#define PB_REG_PB_INT_STS 0x1c +/* [RW 4] Parity mask register #0 read/write */ +#define PB_REG_PB_PRTY_MASK 0x38 +/* [R 4] Parity register #0 read */ +#define PB_REG_PB_PRTY_STS 0x2c +/* [RC 4] Parity register #0 read clear */ +#define PB_REG_PB_PRTY_STS_CLR 0x30 +#define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR (0x1<<0) +#define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW (0x1<<8) +#define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR (0x1<<1) +#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN (0x1<<6) +#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN (0x1<<7) +#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN (0x1<<4) +#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN (0x1<<3) +#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN (0x1<<5) +#define PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN (0x1<<2) +/* [R 8] Config space A attention dirty bits. Each bit indicates that the + * corresponding PF generates config space A attention. Set by PXP. Reset by + * MCP writing 1 to icfg_space_a_request_clr. Note: register contains bits + * from both paths. */ +#define PGLUE_B_REG_CFG_SPACE_A_REQUEST 0x9010 +/* [R 8] Config space B attention dirty bits. Each bit indicates that the + * corresponding PF generates config space B attention. Set by PXP. Reset by + * MCP writing 1 to icfg_space_b_request_clr. Note: register contains bits + * from both paths. */ +#define PGLUE_B_REG_CFG_SPACE_B_REQUEST 0x9014 +/* [RW 1] Type A PF enable inbound interrupt table for CSDM. 0 - disable; 1 + * - enable. */ +#define PGLUE_B_REG_CSDM_INB_INT_A_PF_ENABLE 0x9194 +/* [RW 18] Type B VF inbound interrupt table for CSDM: bits[17:9]-mask; + * its[8:0]-address. Bits [1:0] must be zero (DW resolution address). */ +#define PGLUE_B_REG_CSDM_INB_INT_B_VF 0x916c +/* [RW 1] Type B VF enable inbound interrupt table for CSDM. 0 - disable; 1 + * - enable. */ +#define PGLUE_B_REG_CSDM_INB_INT_B_VF_ENABLE 0x919c +/* [RW 16] Start offset of CSDM zone A (queue zone) in the internal RAM */ +#define PGLUE_B_REG_CSDM_START_OFFSET_A 0x9100 +/* [RW 16] Start offset of CSDM zone B (legacy zone) in the internal RAM */ +#define PGLUE_B_REG_CSDM_START_OFFSET_B 0x9108 +/* [RW 5] VF Shift of CSDM zone B (legacy zone) in the internal RAM */ +#define PGLUE_B_REG_CSDM_VF_SHIFT_B 0x9110 +/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */ +#define PGLUE_B_REG_CSDM_ZONE_A_SIZE_PF 0x91ac +/* [R 8] FLR request attention dirty bits for PFs 0 to 7. Each bit indicates + * that the FLR register of the corresponding PF was set. Set by PXP. Reset + * by MCP writing 1 to flr_request_pf_7_0_clr. Note: register contains bits + * from both paths. */ +#define PGLUE_B_REG_FLR_REQUEST_PF_7_0 0x9028 +/* [W 8] FLR request attention dirty bits clear for PFs 0 to 7. MCP writes 1 + * to a bit in this register in order to clear the corresponding bit in + * flr_request_pf_7_0 register. Note: register contains bits from both + * paths. */ +#define PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR 0x9418 +/* [R 32] FLR request attention dirty bits for VFs 96 to 127. Each bit + * indicates that the FLR register of the corresponding VF was set. Set by + * PXP. Reset by MCP writing 1 to flr_request_vf_127_96_clr. */ +#define PGLUE_B_REG_FLR_REQUEST_VF_127_96 0x9024 +/* [R 32] FLR request attention dirty bits for VFs 0 to 31. Each bit + * indicates that the FLR register of the corresponding VF was set. Set by + * PXP. Reset by MCP writing 1 to flr_request_vf_31_0_clr. */ +#define PGLUE_B_REG_FLR_REQUEST_VF_31_0 0x9018 +/* [R 32] FLR request attention dirty bits for VFs 32 to 63. Each bit + * indicates that the FLR register of the corresponding VF was set. Set by + * PXP. Reset by MCP writing 1 to flr_request_vf_63_32_clr. */ +#define PGLUE_B_REG_FLR_REQUEST_VF_63_32 0x901c +/* [R 32] FLR request attention dirty bits for VFs 64 to 95. Each bit + * indicates that the FLR register of the corresponding VF was set. Set by + * PXP. Reset by MCP writing 1 to flr_request_vf_95_64_clr. */ +#define PGLUE_B_REG_FLR_REQUEST_VF_95_64 0x9020 +/* [R 8] Each bit indicates an incorrect behavior in user RX interface. Bit + * 0 - Target memory read arrived with a correctable error. Bit 1 - Target + * memory read arrived with an uncorrectable error. Bit 2 - Configuration RW + * arrived with a correctable error. Bit 3 - Configuration RW arrived with + * an uncorrectable error. Bit 4 - Completion with Configuration Request + * Retry Status. Bit 5 - Expansion ROM access received with a write request. + * Bit 6 - Completion with pcie_rx_err of 0000; CMPL_STATUS of non-zero; and + * pcie_rx_last not asserted. Bit 7 - Completion with pcie_rx_err of 1010; + * and pcie_rx_last not asserted. */ +#define PGLUE_B_REG_INCORRECT_RCV_DETAILS 0x9068 +#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER 0x942c +#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ 0x9430 +#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_WRITE 0x9434 +#define PGLUE_B_REG_INTERNAL_VFID_ENABLE 0x9438 +/* [R 9] Interrupt register #0 read */ +#define PGLUE_B_REG_PGLUE_B_INT_STS 0x9298 +/* [RC 9] Interrupt register #0 read clear */ +#define PGLUE_B_REG_PGLUE_B_INT_STS_CLR 0x929c +/* [RW 2] Parity mask register #0 read/write */ +#define PGLUE_B_REG_PGLUE_B_PRTY_MASK 0x92b4 +/* [R 2] Parity register #0 read */ +#define PGLUE_B_REG_PGLUE_B_PRTY_STS 0x92a8 +/* [RC 2] Parity register #0 read clear */ +#define PGLUE_B_REG_PGLUE_B_PRTY_STS_CLR 0x92ac +/* [R 13] Details of first request received with error. [2:0] - PFID. [3] - + * VF_VALID. [9:4] - VFID. [11:10] - Error Code - 0 - Indicates Completion + * Timeout of a User Tx non-posted request. 1 - unsupported request. 2 - + * completer abort. 3 - Illegal value for this field. [12] valid - indicates + * if there was a completion error since the last time this register was + * cleared. */ +#define PGLUE_B_REG_RX_ERR_DETAILS 0x9080 +/* [R 18] Details of first ATS Translation Completion request received with + * error. [2:0] - PFID. [3] - VF_VALID. [9:4] - VFID. [11:10] - Error Code - + * 0 - Indicates Completion Timeout of a User Tx non-posted request. 1 - + * unsupported request. 2 - completer abort. 3 - Illegal value for this + * field. [16:12] - ATC OTB EntryID. [17] valid - indicates if there was a + * completion error since the last time this register was cleared. */ +#define PGLUE_B_REG_RX_TCPL_ERR_DETAILS 0x9084 +/* [W 8] Debug only - Shadow BME bits clear for PFs 0 to 7. MCP writes 1 to + * a bit in this register in order to clear the corresponding bit in + * shadow_bme_pf_7_0 register. MCP should never use this unless a + * work-around is needed. Note: register contains bits from both paths. */ +#define PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR 0x9458 +/* [R 8] SR IOV disabled attention dirty bits. Each bit indicates that the + * VF enable register of the corresponding PF is written to 0 and was + * previously 1. Set by PXP. Reset by MCP writing 1 to + * sr_iov_disabled_request_clr. Note: register contains bits from both + * paths. */ +#define PGLUE_B_REG_SR_IOV_DISABLED_REQUEST 0x9030 +/* [R 32] Indicates the status of tags 32-63. 0 - tags is used - read + * completion did not return yet. 1 - tag is unused. Same functionality as + * pxp2_registers_pgl_exp_rom_data2 for tags 0-31. */ +#define PGLUE_B_REG_TAGS_63_32 0x9244 +/* [RW 1] Type A PF enable inbound interrupt table for TSDM. 0 - disable; 1 + * - enable. */ +#define PGLUE_B_REG_TSDM_INB_INT_A_PF_ENABLE 0x9170 +/* [RW 16] Start offset of TSDM zone A (queue zone) in the internal RAM */ +#define PGLUE_B_REG_TSDM_START_OFFSET_A 0x90c4 +/* [RW 16] Start offset of TSDM zone B (legacy zone) in the internal RAM */ +#define PGLUE_B_REG_TSDM_START_OFFSET_B 0x90cc +/* [RW 5] VF Shift of TSDM zone B (legacy zone) in the internal RAM */ +#define PGLUE_B_REG_TSDM_VF_SHIFT_B 0x90d4 +/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */ +#define PGLUE_B_REG_TSDM_ZONE_A_SIZE_PF 0x91a0 +/* [R 32] Address [31:0] of first read request not submitted due to error */ +#define PGLUE_B_REG_TX_ERR_RD_ADD_31_0 0x9098 +/* [R 32] Address [63:32] of first read request not submitted due to error */ +#define PGLUE_B_REG_TX_ERR_RD_ADD_63_32 0x909c +/* [R 31] Details of first read request not submitted due to error. [4:0] + * VQID. [5] TREQ. 1 - Indicates the request is a Translation Request. + * [20:8] - Length in bytes. [23:21] - PFID. [24] - VF_VALID. [30:25] - + * VFID. */ +#define PGLUE_B_REG_TX_ERR_RD_DETAILS 0x90a0 +/* [R 26] Details of first read request not submitted due to error. [15:0] + * Request ID. [19:16] client ID. [20] - last SR. [24:21] - Error type - + * [21] - Indicates was_error was set; [22] - Indicates BME was cleared; + * [23] - Indicates FID_enable was cleared; [24] - Indicates VF with parent + * PF FLR_request or IOV_disable_request dirty bit is set. [25] valid - + * indicates if there was a request not submitted due to error since the + * last time this register was cleared. */ +#define PGLUE_B_REG_TX_ERR_RD_DETAILS2 0x90a4 +/* [R 32] Address [31:0] of first write request not submitted due to error */ +#define PGLUE_B_REG_TX_ERR_WR_ADD_31_0 0x9088 +/* [R 32] Address [63:32] of first write request not submitted due to error */ +#define PGLUE_B_REG_TX_ERR_WR_ADD_63_32 0x908c +/* [R 31] Details of first write request not submitted due to error. [4:0] + * VQID. [20:8] - Length in bytes. [23:21] - PFID. [24] - VF_VALID. [30:25] + * - VFID. */ +#define PGLUE_B_REG_TX_ERR_WR_DETAILS 0x9090 +/* [R 26] Details of first write request not submitted due to error. [15:0] + * Request ID. [19:16] client ID. [20] - last SR. [24:21] - Error type - + * [21] - Indicates was_error was set; [22] - Indicates BME was cleared; + * [23] - Indicates FID_enable was cleared; [24] - Indicates VF with parent + * PF FLR_request or IOV_disable_request dirty bit is set. [25] valid - + * indicates if there was a request not submitted due to error since the + * last time this register was cleared. */ +#define PGLUE_B_REG_TX_ERR_WR_DETAILS2 0x9094 +/* [RW 10] Type A PF/VF inbound interrupt table for USDM: bits[9:5]-mask; + * its[4:0]-address relative to start_offset_a. Bits [1:0] can have any + * value (Byte resolution address). */ +#define PGLUE_B_REG_USDM_INB_INT_A_0 0x9128 +#define PGLUE_B_REG_USDM_INB_INT_A_1 0x912c +#define PGLUE_B_REG_USDM_INB_INT_A_2 0x9130 +#define PGLUE_B_REG_USDM_INB_INT_A_3 0x9134 +#define PGLUE_B_REG_USDM_INB_INT_A_4 0x9138 +#define PGLUE_B_REG_USDM_INB_INT_A_5 0x913c +#define PGLUE_B_REG_USDM_INB_INT_A_6 0x9140 +/* [RW 1] Type A PF enable inbound interrupt table for USDM. 0 - disable; 1 + * - enable. */ +#define PGLUE_B_REG_USDM_INB_INT_A_PF_ENABLE 0x917c +/* [RW 1] Type A VF enable inbound interrupt table for USDM. 0 - disable; 1 + * - enable. */ +#define PGLUE_B_REG_USDM_INB_INT_A_VF_ENABLE 0x9180 +/* [RW 1] Type B VF enable inbound interrupt table for USDM. 0 - disable; 1 + * - enable. */ +#define PGLUE_B_REG_USDM_INB_INT_B_VF_ENABLE 0x9184 +/* [RW 16] Start offset of USDM zone A (queue zone) in the internal RAM */ +#define PGLUE_B_REG_USDM_START_OFFSET_A 0x90d8 +/* [RW 16] Start offset of USDM zone B (legacy zone) in the internal RAM */ +#define PGLUE_B_REG_USDM_START_OFFSET_B 0x90e0 +/* [RW 5] VF Shift of USDM zone B (legacy zone) in the internal RAM */ +#define PGLUE_B_REG_USDM_VF_SHIFT_B 0x90e8 +/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */ +#define PGLUE_B_REG_USDM_ZONE_A_SIZE_PF 0x91a4 +/* [R 26] Details of first target VF request accessing VF GRC space that + * failed permission check. [14:0] Address. [15] w_nr: 0 - Read; 1 - Write. + * [21:16] VFID. [24:22] - PFID. [25] valid - indicates if there was a + * request accessing VF GRC space that failed permission check since the + * last time this register was cleared. Permission checks are: function + * permission; R/W permission; address range permission. */ +#define PGLUE_B_REG_VF_GRC_SPACE_VIOLATION_DETAILS 0x9234 +/* [R 31] Details of first target VF request with length violation (too many + * DWs) accessing BAR0. [12:0] Address in DWs (bits [14:2] of byte address). + * [14:13] BAR. [20:15] VFID. [23:21] - PFID. [29:24] - Length in DWs. [30] + * valid - indicates if there was a request with length violation since the + * last time this register was cleared. Length violations: length of more + * than 2DWs; length of 2DWs and address not QW aligned; window is GRC and + * length is more than 1 DW. */ +#define PGLUE_B_REG_VF_LENGTH_VIOLATION_DETAILS 0x9230 +/* [R 8] Was_error indication dirty bits for PFs 0 to 7. Each bit indicates + * that there was a completion with uncorrectable error for the + * corresponding PF. Set by PXP. Reset by MCP writing 1 to + * was_error_pf_7_0_clr. */ +#define PGLUE_B_REG_WAS_ERROR_PF_7_0 0x907c +/* [W 8] Was_error indication dirty bits clear for PFs 0 to 7. MCP writes 1 + * to a bit in this register in order to clear the corresponding bit in + * flr_request_pf_7_0 register. */ +#define PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR 0x9470 +/* [R 32] Was_error indication dirty bits for VFs 96 to 127. Each bit + * indicates that there was a completion with uncorrectable error for the + * corresponding VF. Set by PXP. Reset by MCP writing 1 to + * was_error_vf_127_96_clr. */ +#define PGLUE_B_REG_WAS_ERROR_VF_127_96 0x9078 +/* [W 32] Was_error indication dirty bits clear for VFs 96 to 127. MCP + * writes 1 to a bit in this register in order to clear the corresponding + * bit in was_error_vf_127_96 register. */ +#define PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR 0x9474 +/* [R 32] Was_error indication dirty bits for VFs 0 to 31. Each bit + * indicates that there was a completion with uncorrectable error for the + * corresponding VF. Set by PXP. Reset by MCP writing 1 to + * was_error_vf_31_0_clr. */ +#define PGLUE_B_REG_WAS_ERROR_VF_31_0 0x906c +/* [W 32] Was_error indication dirty bits clear for VFs 0 to 31. MCP writes + * 1 to a bit in this register in order to clear the corresponding bit in + * was_error_vf_31_0 register. */ +#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR 0x9478 +/* [R 32] Was_error indication dirty bits for VFs 32 to 63. Each bit + * indicates that there was a completion with uncorrectable error for the + * corresponding VF. Set by PXP. Reset by MCP writing 1 to + * was_error_vf_63_32_clr. */ +#define PGLUE_B_REG_WAS_ERROR_VF_63_32 0x9070 +/* [W 32] Was_error indication dirty bits clear for VFs 32 to 63. MCP writes + * 1 to a bit in this register in order to clear the corresponding bit in + * was_error_vf_63_32 register. */ +#define PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR 0x947c +/* [R 32] Was_error indication dirty bits for VFs 64 to 95. Each bit + * indicates that there was a completion with uncorrectable error for the + * corresponding VF. Set by PXP. Reset by MCP writing 1 to + * was_error_vf_95_64_clr. */ +#define PGLUE_B_REG_WAS_ERROR_VF_95_64 0x9074 +/* [W 32] Was_error indication dirty bits clear for VFs 64 to 95. MCP writes + * 1 to a bit in this register in order to clear the corresponding bit in + * was_error_vf_95_64 register. */ +#define PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR 0x9480 +/* [RW 1] Type A PF enable inbound interrupt table for XSDM. 0 - disable; 1 + * - enable. */ +#define PGLUE_B_REG_XSDM_INB_INT_A_PF_ENABLE 0x9188 +/* [RW 16] Start offset of XSDM zone A (queue zone) in the internal RAM */ +#define PGLUE_B_REG_XSDM_START_OFFSET_A 0x90ec +/* [RW 16] Start offset of XSDM zone B (legacy zone) in the internal RAM */ +#define PGLUE_B_REG_XSDM_START_OFFSET_B 0x90f4 +/* [RW 5] VF Shift of XSDM zone B (legacy zone) in the internal RAM */ +#define PGLUE_B_REG_XSDM_VF_SHIFT_B 0x90fc +/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */ +#define PGLUE_B_REG_XSDM_ZONE_A_SIZE_PF 0x91a8 +#define PRS_REG_A_PRSU_20 0x40134 +/* [R 8] debug only: CFC load request current credit. Transaction based. */ +#define PRS_REG_CFC_LD_CURRENT_CREDIT 0x40164 +/* [R 8] debug only: CFC search request current credit. Transaction based. */ +#define PRS_REG_CFC_SEARCH_CURRENT_CREDIT 0x40168 +/* [RW 6] The initial credit for the search message to the CFC interface. + Credit is transaction based. */ +#define PRS_REG_CFC_SEARCH_INITIAL_CREDIT 0x4011c +/* [RW 24] CID for port 0 if no match */ +#define PRS_REG_CID_PORT_0 0x400fc +/* [RW 32] The CM header for flush message where 'load existed' bit in CFC + load response is reset and packet type is 0. Used in packet start message + to TCM. */ +#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_0 0x400dc +#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_1 0x400e0 +#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_2 0x400e4 +#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_3 0x400e8 +#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_4 0x400ec +#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_5 0x400f0 +/* [RW 32] The CM header for flush message where 'load existed' bit in CFC + load response is set and packet type is 0. Used in packet start message + to TCM. */ +#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_0 0x400bc +#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_1 0x400c0 +#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_2 0x400c4 +#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_3 0x400c8 +#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_4 0x400cc +#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_5 0x400d0 +/* [RW 32] The CM header for a match and packet type 1 for loopback port. + Used in packet start message to TCM. */ +#define PRS_REG_CM_HDR_LOOPBACK_TYPE_1 0x4009c +#define PRS_REG_CM_HDR_LOOPBACK_TYPE_2 0x400a0 +#define PRS_REG_CM_HDR_LOOPBACK_TYPE_3 0x400a4 +#define PRS_REG_CM_HDR_LOOPBACK_TYPE_4 0x400a8 +/* [RW 32] The CM header for a match and packet type 0. Used in packet start + message to TCM. */ +#define PRS_REG_CM_HDR_TYPE_0 0x40078 +#define PRS_REG_CM_HDR_TYPE_1 0x4007c +#define PRS_REG_CM_HDR_TYPE_2 0x40080 +#define PRS_REG_CM_HDR_TYPE_3 0x40084 +#define PRS_REG_CM_HDR_TYPE_4 0x40088 +/* [RW 32] The CM header in case there was not a match on the connection */ +#define PRS_REG_CM_NO_MATCH_HDR 0x400b8 +/* [RW 1] Indicates if in e1hov mode. 0=non-e1hov mode; 1=e1hov mode. */ +#define PRS_REG_E1HOV_MODE 0x401c8 +/* [RW 8] The 8-bit event ID for a match and packet type 1. Used in packet + start message to TCM. */ +#define PRS_REG_EVENT_ID_1 0x40054 +#define PRS_REG_EVENT_ID_2 0x40058 +#define PRS_REG_EVENT_ID_3 0x4005c +/* [RW 16] The Ethernet type value for FCoE */ +#define PRS_REG_FCOE_TYPE 0x401d0 +/* [RW 8] Context region for flush packet with packet type 0. Used in CFC + load request message. */ +#define PRS_REG_FLUSH_REGIONS_TYPE_0 0x40004 +#define PRS_REG_FLUSH_REGIONS_TYPE_1 0x40008 +#define PRS_REG_FLUSH_REGIONS_TYPE_2 0x4000c +#define PRS_REG_FLUSH_REGIONS_TYPE_3 0x40010 +#define PRS_REG_FLUSH_REGIONS_TYPE_4 0x40014 +#define PRS_REG_FLUSH_REGIONS_TYPE_5 0x40018 +#define PRS_REG_FLUSH_REGIONS_TYPE_6 0x4001c +#define PRS_REG_FLUSH_REGIONS_TYPE_7 0x40020 +/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic + * Ethernet header. */ +#define PRS_REG_HDRS_AFTER_BASIC 0x40238 +/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic + * Ethernet header for port 0 packets. */ +#define PRS_REG_HDRS_AFTER_BASIC_PORT_0 0x40270 +#define PRS_REG_HDRS_AFTER_BASIC_PORT_1 0x40290 +/* [R 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */ +#define PRS_REG_HDRS_AFTER_TAG_0 0x40248 +/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 for + * port 0 packets */ +#define PRS_REG_HDRS_AFTER_TAG_0_PORT_0 0x40280 +#define PRS_REG_HDRS_AFTER_TAG_0_PORT_1 0x402a0 +/* [RW 4] The increment value to send in the CFC load request message */ +#define PRS_REG_INC_VALUE 0x40048 +/* [RW 6] Bit-map indicating which headers must appear in the packet */ +#define PRS_REG_MUST_HAVE_HDRS 0x40254 +/* [RW 6] Bit-map indicating which headers must appear in the packet for + * port 0 packets */ +#define PRS_REG_MUST_HAVE_HDRS_PORT_0 0x4028c +#define PRS_REG_MUST_HAVE_HDRS_PORT_1 0x402ac +#define PRS_REG_NIC_MODE 0x40138 +/* [RW 8] The 8-bit event ID for cases where there is no match on the + connection. Used in packet start message to TCM. */ +#define PRS_REG_NO_MATCH_EVENT_ID 0x40070 +/* [ST 24] The number of input CFC flush packets */ +#define PRS_REG_NUM_OF_CFC_FLUSH_MESSAGES 0x40128 +/* [ST 32] The number of cycles the Parser halted its operation since it + could not allocate the next serial number */ +#define PRS_REG_NUM_OF_DEAD_CYCLES 0x40130 +/* [ST 24] The number of input packets */ +#define PRS_REG_NUM_OF_PACKETS 0x40124 +/* [ST 24] The number of input transparent flush packets */ +#define PRS_REG_NUM_OF_TRANSPARENT_FLUSH_MESSAGES 0x4012c +/* [RW 8] Context region for received Ethernet packet with a match and + packet type 0. Used in CFC load request message */ +#define PRS_REG_PACKET_REGIONS_TYPE_0 0x40028 +#define PRS_REG_PACKET_REGIONS_TYPE_1 0x4002c +#define PRS_REG_PACKET_REGIONS_TYPE_2 0x40030 +#define PRS_REG_PACKET_REGIONS_TYPE_3 0x40034 +#define PRS_REG_PACKET_REGIONS_TYPE_4 0x40038 +#define PRS_REG_PACKET_REGIONS_TYPE_5 0x4003c +#define PRS_REG_PACKET_REGIONS_TYPE_6 0x40040 +#define PRS_REG_PACKET_REGIONS_TYPE_7 0x40044 +/* [R 2] debug only: Number of pending requests for CAC on port 0. */ +#define PRS_REG_PENDING_BRB_CAC0_RQ 0x40174 +/* [R 2] debug only: Number of pending requests for header parsing. */ +#define PRS_REG_PENDING_BRB_PRS_RQ 0x40170 +/* [R 1] Interrupt register #0 read */ +#define PRS_REG_PRS_INT_STS 0x40188 +/* [RW 8] Parity mask register #0 read/write */ +#define PRS_REG_PRS_PRTY_MASK 0x401a4 +/* [R 8] Parity register #0 read */ +#define PRS_REG_PRS_PRTY_STS 0x40198 +/* [RC 8] Parity register #0 read clear */ +#define PRS_REG_PRS_PRTY_STS_CLR 0x4019c +/* [RW 8] Context region for pure acknowledge packets. Used in CFC load + request message */ +#define PRS_REG_PURE_REGIONS 0x40024 +/* [R 32] debug only: Serial number status lsb 32 bits. '1' indicates this + serail number was released by SDM but cannot be used because a previous + serial number was not released. */ +#define PRS_REG_SERIAL_NUM_STATUS_LSB 0x40154 +/* [R 32] debug only: Serial number status msb 32 bits. '1' indicates this + serail number was released by SDM but cannot be used because a previous + serial number was not released. */ +#define PRS_REG_SERIAL_NUM_STATUS_MSB 0x40158 +/* [R 4] debug only: SRC current credit. Transaction based. */ +#define PRS_REG_SRC_CURRENT_CREDIT 0x4016c +/* [RW 16] The Ethernet type value for L2 tag 0 */ +#define PRS_REG_TAG_ETHERTYPE_0 0x401d4 +/* [RW 4] The length of the info field for L2 tag 0. The length is between + * 2B and 14B; in 2B granularity */ +#define PRS_REG_TAG_LEN_0 0x4022c +/* [R 8] debug only: TCM current credit. Cycle based. */ +#define PRS_REG_TCM_CURRENT_CREDIT 0x40160 +/* [R 8] debug only: TSDM current credit. Transaction based. */ +#define PRS_REG_TSDM_CURRENT_CREDIT 0x4015c +#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT (0x1<<19) +#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF (0x1<<20) +#define PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN (0x1<<22) +#define PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED (0x1<<23) +#define PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED (0x1<<24) +#define PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR (0x1<<7) +#define PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR (0x1<<7) +/* [R 6] Debug only: Number of used entries in the data FIFO */ +#define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c +/* [R 7] Debug only: Number of used entries in the header FIFO */ - #define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478 - #define PXP2_REG_PGL_ADDR_88_F0 0x120534 - #define PXP2_REG_PGL_ADDR_8C_F0 0x120538 - #define PXP2_REG_PGL_ADDR_90_F0 0x12053c - #define PXP2_REG_PGL_ADDR_94_F0 0x120540 ++#define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478 ++#define PXP2_REG_PGL_ADDR_88_F0 0x120534 ++/* [R 32] GRC address for configuration access to PCIE config address 0x88. ++ * any write to this PCIE address will cause a GRC write access to the ++ * address that's in t this register */ ++#define PXP2_REG_PGL_ADDR_88_F1 0x120544 ++#define PXP2_REG_PGL_ADDR_8C_F0 0x120538 ++/* [R 32] GRC address for configuration access to PCIE config address 0x8c. ++ * any write to this PCIE address will cause a GRC write access to the ++ * address that's in t this register */ ++#define PXP2_REG_PGL_ADDR_8C_F1 0x120548 ++#define PXP2_REG_PGL_ADDR_90_F0 0x12053c ++/* [R 32] GRC address for configuration access to PCIE config address 0x90. ++ * any write to this PCIE address will cause a GRC write access to the ++ * address that's in t this register */ ++#define PXP2_REG_PGL_ADDR_90_F1 0x12054c ++#define PXP2_REG_PGL_ADDR_94_F0 0x120540 ++/* [R 32] GRC address for configuration access to PCIE config address 0x94. ++ * any write to this PCIE address will cause a GRC write access to the ++ * address that's in t this register */ ++#define PXP2_REG_PGL_ADDR_94_F1 0x120550 +#define PXP2_REG_PGL_CONTROL0 0x120490 +#define PXP2_REG_PGL_CONTROL1 0x120514 +#define PXP2_REG_PGL_DEBUG 0x120520 +/* [RW 32] third dword data of expansion rom request. this register is + special. reading from it provides a vector outstanding read requests. if + a bit is zero it means that a read request on the corresponding tag did + not finish yet (not all completions have arrived for it) */ +#define PXP2_REG_PGL_EXP_ROM2 0x120808 +/* [RW 32] Inbound interrupt table for CSDM: bits[31:16]-mask; + its[15:0]-address */ +#define PXP2_REG_PGL_INT_CSDM_0 0x1204f4 +#define PXP2_REG_PGL_INT_CSDM_1 0x1204f8 +#define PXP2_REG_PGL_INT_CSDM_2 0x1204fc +#define PXP2_REG_PGL_INT_CSDM_3 0x120500 +#define PXP2_REG_PGL_INT_CSDM_4 0x120504 +#define PXP2_REG_PGL_INT_CSDM_5 0x120508 +#define PXP2_REG_PGL_INT_CSDM_6 0x12050c +#define PXP2_REG_PGL_INT_CSDM_7 0x120510 +/* [RW 32] Inbound interrupt table for TSDM: bits[31:16]-mask; + its[15:0]-address */ +#define PXP2_REG_PGL_INT_TSDM_0 0x120494 +#define PXP2_REG_PGL_INT_TSDM_1 0x120498 +#define PXP2_REG_PGL_INT_TSDM_2 0x12049c +#define PXP2_REG_PGL_INT_TSDM_3 0x1204a0 +#define PXP2_REG_PGL_INT_TSDM_4 0x1204a4 +#define PXP2_REG_PGL_INT_TSDM_5 0x1204a8 +#define PXP2_REG_PGL_INT_TSDM_6 0x1204ac +#define PXP2_REG_PGL_INT_TSDM_7 0x1204b0 +/* [RW 32] Inbound interrupt table for USDM: bits[31:16]-mask; + its[15:0]-address */ +#define PXP2_REG_PGL_INT_USDM_0 0x1204b4 +#define PXP2_REG_PGL_INT_USDM_1 0x1204b8 +#define PXP2_REG_PGL_INT_USDM_2 0x1204bc +#define PXP2_REG_PGL_INT_USDM_3 0x1204c0 +#define PXP2_REG_PGL_INT_USDM_4 0x1204c4 +#define PXP2_REG_PGL_INT_USDM_5 0x1204c8 +#define PXP2_REG_PGL_INT_USDM_6 0x1204cc +#define PXP2_REG_PGL_INT_USDM_7 0x1204d0 +/* [RW 32] Inbound interrupt table for XSDM: bits[31:16]-mask; + its[15:0]-address */ +#define PXP2_REG_PGL_INT_XSDM_0 0x1204d4 +#define PXP2_REG_PGL_INT_XSDM_1 0x1204d8 +#define PXP2_REG_PGL_INT_XSDM_2 0x1204dc +#define PXP2_REG_PGL_INT_XSDM_3 0x1204e0 +#define PXP2_REG_PGL_INT_XSDM_4 0x1204e4 +#define PXP2_REG_PGL_INT_XSDM_5 0x1204e8 +#define PXP2_REG_PGL_INT_XSDM_6 0x1204ec +#define PXP2_REG_PGL_INT_XSDM_7 0x1204f0 +/* [RW 3] this field allows one function to pretend being another function + when accessing any BAR mapped resource within the device. the value of + the field is the number of the function that will be accessed + effectively. after software write to this bit it must read it in order to + know that the new value is updated */ +#define PXP2_REG_PGL_PRETEND_FUNC_F0 0x120674 +#define PXP2_REG_PGL_PRETEND_FUNC_F1 0x120678 +#define PXP2_REG_PGL_PRETEND_FUNC_F2 0x12067c +#define PXP2_REG_PGL_PRETEND_FUNC_F3 0x120680 +#define PXP2_REG_PGL_PRETEND_FUNC_F4 0x120684 +#define PXP2_REG_PGL_PRETEND_FUNC_F5 0x120688 +#define PXP2_REG_PGL_PRETEND_FUNC_F6 0x12068c +#define PXP2_REG_PGL_PRETEND_FUNC_F7 0x120690 +/* [R 1] this bit indicates that a read request was blocked because of + bus_master_en was deasserted */ +#define PXP2_REG_PGL_READ_BLOCKED 0x120568 +#define PXP2_REG_PGL_TAGS_LIMIT 0x1205a8 +/* [R 18] debug only */ +#define PXP2_REG_PGL_TXW_CDTS 0x12052c +/* [R 1] this bit indicates that a write request was blocked because of + bus_master_en was deasserted */ +#define PXP2_REG_PGL_WRITE_BLOCKED 0x120564 +#define PXP2_REG_PSWRQ_BW_ADD1 0x1201c0 +#define PXP2_REG_PSWRQ_BW_ADD10 0x1201e4 +#define PXP2_REG_PSWRQ_BW_ADD11 0x1201e8 +#define PXP2_REG_PSWRQ_BW_ADD2 0x1201c4 +#define PXP2_REG_PSWRQ_BW_ADD28 0x120228 +#define PXP2_REG_PSWRQ_BW_ADD3 0x1201c8 +#define PXP2_REG_PSWRQ_BW_ADD6 0x1201d4 +#define PXP2_REG_PSWRQ_BW_ADD7 0x1201d8 +#define PXP2_REG_PSWRQ_BW_ADD8 0x1201dc +#define PXP2_REG_PSWRQ_BW_ADD9 0x1201e0 +#define PXP2_REG_PSWRQ_BW_CREDIT 0x12032c +#define PXP2_REG_PSWRQ_BW_L1 0x1202b0 +#define PXP2_REG_PSWRQ_BW_L10 0x1202d4 +#define PXP2_REG_PSWRQ_BW_L11 0x1202d8 +#define PXP2_REG_PSWRQ_BW_L2 0x1202b4 +#define PXP2_REG_PSWRQ_BW_L28 0x120318 +#define PXP2_REG_PSWRQ_BW_L3 0x1202b8 +#define PXP2_REG_PSWRQ_BW_L6 0x1202c4 +#define PXP2_REG_PSWRQ_BW_L7 0x1202c8 +#define PXP2_REG_PSWRQ_BW_L8 0x1202cc +#define PXP2_REG_PSWRQ_BW_L9 0x1202d0 +#define PXP2_REG_PSWRQ_BW_RD 0x120324 +#define PXP2_REG_PSWRQ_BW_UB1 0x120238 +#define PXP2_REG_PSWRQ_BW_UB10 0x12025c +#define PXP2_REG_PSWRQ_BW_UB11 0x120260 +#define PXP2_REG_PSWRQ_BW_UB2 0x12023c +#define PXP2_REG_PSWRQ_BW_UB28 0x1202a0 +#define PXP2_REG_PSWRQ_BW_UB3 0x120240 +#define PXP2_REG_PSWRQ_BW_UB6 0x12024c +#define PXP2_REG_PSWRQ_BW_UB7 0x120250 +#define PXP2_REG_PSWRQ_BW_UB8 0x120254 +#define PXP2_REG_PSWRQ_BW_UB9 0x120258 +#define PXP2_REG_PSWRQ_BW_WR 0x120328 +#define PXP2_REG_PSWRQ_CDU0_L2P 0x120000 +#define PXP2_REG_PSWRQ_QM0_L2P 0x120038 +#define PXP2_REG_PSWRQ_SRC0_L2P 0x120054 +#define PXP2_REG_PSWRQ_TM0_L2P 0x12001c +#define PXP2_REG_PSWRQ_TSDM0_L2P 0x1200e0 +/* [RW 32] Interrupt mask register #0 read/write */ +#define PXP2_REG_PXP2_INT_MASK_0 0x120578 +/* [R 32] Interrupt register #0 read */ +#define PXP2_REG_PXP2_INT_STS_0 0x12056c +#define PXP2_REG_PXP2_INT_STS_1 0x120608 +/* [RC 32] Interrupt register #0 read clear */ +#define PXP2_REG_PXP2_INT_STS_CLR_0 0x120570 +/* [RW 32] Parity mask register #0 read/write */ +#define PXP2_REG_PXP2_PRTY_MASK_0 0x120588 +#define PXP2_REG_PXP2_PRTY_MASK_1 0x120598 +/* [R 32] Parity register #0 read */ +#define PXP2_REG_PXP2_PRTY_STS_0 0x12057c +#define PXP2_REG_PXP2_PRTY_STS_1 0x12058c +/* [RC 32] Parity register #0 read clear */ +#define PXP2_REG_PXP2_PRTY_STS_CLR_0 0x120580 +#define PXP2_REG_PXP2_PRTY_STS_CLR_1 0x120590 +/* [R 1] Debug only: The 'almost full' indication from each fifo (gives + indication about backpressure) */ +#define PXP2_REG_RD_ALMOST_FULL_0 0x120424 +/* [R 8] Debug only: The blocks counter - number of unused block ids */ +#define PXP2_REG_RD_BLK_CNT 0x120418 +/* [RW 8] Debug only: Total number of available blocks in Tetris Buffer. + Must be bigger than 6. Normally should not be changed. */ +#define PXP2_REG_RD_BLK_NUM_CFG 0x12040c +/* [RW 2] CDU byte swapping mode configuration for master read requests */ +#define PXP2_REG_RD_CDURD_SWAP_MODE 0x120404 +/* [RW 1] When '1'; inputs to the PSWRD block are ignored */ +#define PXP2_REG_RD_DISABLE_INPUTS 0x120374 +/* [R 1] PSWRD internal memories initialization is done */ +#define PXP2_REG_RD_INIT_DONE 0x120370 +/* [RW 8] The maximum number of blocks in Tetris Buffer that can be + allocated for vq10 */ +#define PXP2_REG_RD_MAX_BLKS_VQ10 0x1203a0 +/* [RW 8] The maximum number of blocks in Tetris Buffer that can be + allocated for vq11 */ +#define PXP2_REG_RD_MAX_BLKS_VQ11 0x1203a4 +/* [RW 8] The maximum number of blocks in Tetris Buffer that can be + allocated for vq17 */ +#define PXP2_REG_RD_MAX_BLKS_VQ17 0x1203bc +/* [RW 8] The maximum number of blocks in Tetris Buffer that can be + allocated for vq18 */ +#define PXP2_REG_RD_MAX_BLKS_VQ18 0x1203c0 +/* [RW 8] The maximum number of blocks in Tetris Buffer that can be + allocated for vq19 */ +#define PXP2_REG_RD_MAX_BLKS_VQ19 0x1203c4 +/* [RW 8] The maximum number of blocks in Tetris Buffer that can be + allocated for vq22 */ +#define PXP2_REG_RD_MAX_BLKS_VQ22 0x1203d0 +/* [RW 8] The maximum number of blocks in Tetris Buffer that can be + allocated for vq25 */ +#define PXP2_REG_RD_MAX_BLKS_VQ25 0x1203dc +/* [RW 8] The maximum number of blocks in Tetris Buffer that can be + allocated for vq6 */ +#define PXP2_REG_RD_MAX_BLKS_VQ6 0x120390 +/* [RW 8] The maximum number of blocks in Tetris Buffer that can be + allocated for vq9 */ +#define PXP2_REG_RD_MAX_BLKS_VQ9 0x12039c +/* [RW 2] PBF byte swapping mode configuration for master read requests */ +#define PXP2_REG_RD_PBF_SWAP_MODE 0x1203f4 +/* [R 1] Debug only: Indication if delivery ports are idle */ +#define PXP2_REG_RD_PORT_IS_IDLE_0 0x12041c +#define PXP2_REG_RD_PORT_IS_IDLE_1 0x120420 +/* [RW 2] QM byte swapping mode configuration for master read requests */ +#define PXP2_REG_RD_QM_SWAP_MODE 0x1203f8 +/* [R 7] Debug only: The SR counter - number of unused sub request ids */ +#define PXP2_REG_RD_SR_CNT 0x120414 +/* [RW 2] SRC byte swapping mode configuration for master read requests */ +#define PXP2_REG_RD_SRC_SWAP_MODE 0x120400 +/* [RW 7] Debug only: Total number of available PCI read sub-requests. Must + be bigger than 1. Normally should not be changed. */ +#define PXP2_REG_RD_SR_NUM_CFG 0x120408 +/* [RW 1] Signals the PSWRD block to start initializing internal memories */ +#define PXP2_REG_RD_START_INIT 0x12036c +/* [RW 2] TM byte swapping mode configuration for master read requests */ +#define PXP2_REG_RD_TM_SWAP_MODE 0x1203fc +/* [RW 10] Bandwidth addition to VQ0 write requests */ +#define PXP2_REG_RQ_BW_RD_ADD0 0x1201bc +/* [RW 10] Bandwidth addition to VQ12 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD12 0x1201ec +/* [RW 10] Bandwidth addition to VQ13 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD13 0x1201f0 +/* [RW 10] Bandwidth addition to VQ14 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD14 0x1201f4 +/* [RW 10] Bandwidth addition to VQ15 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD15 0x1201f8 +/* [RW 10] Bandwidth addition to VQ16 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD16 0x1201fc +/* [RW 10] Bandwidth addition to VQ17 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD17 0x120200 +/* [RW 10] Bandwidth addition to VQ18 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD18 0x120204 +/* [RW 10] Bandwidth addition to VQ19 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD19 0x120208 +/* [RW 10] Bandwidth addition to VQ20 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD20 0x12020c +/* [RW 10] Bandwidth addition to VQ22 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD22 0x120210 +/* [RW 10] Bandwidth addition to VQ23 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD23 0x120214 +/* [RW 10] Bandwidth addition to VQ24 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD24 0x120218 +/* [RW 10] Bandwidth addition to VQ25 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD25 0x12021c +/* [RW 10] Bandwidth addition to VQ26 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD26 0x120220 +/* [RW 10] Bandwidth addition to VQ27 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD27 0x120224 +/* [RW 10] Bandwidth addition to VQ4 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD4 0x1201cc +/* [RW 10] Bandwidth addition to VQ5 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD5 0x1201d0 +/* [RW 10] Bandwidth Typical L for VQ0 Read requests */ +#define PXP2_REG_RQ_BW_RD_L0 0x1202ac +/* [RW 10] Bandwidth Typical L for VQ12 Read requests */ +#define PXP2_REG_RQ_BW_RD_L12 0x1202dc +/* [RW 10] Bandwidth Typical L for VQ13 Read requests */ +#define PXP2_REG_RQ_BW_RD_L13 0x1202e0 +/* [RW 10] Bandwidth Typical L for VQ14 Read requests */ +#define PXP2_REG_RQ_BW_RD_L14 0x1202e4 +/* [RW 10] Bandwidth Typical L for VQ15 Read requests */ +#define PXP2_REG_RQ_BW_RD_L15 0x1202e8 +/* [RW 10] Bandwidth Typical L for VQ16 Read requests */ +#define PXP2_REG_RQ_BW_RD_L16 0x1202ec +/* [RW 10] Bandwidth Typical L for VQ17 Read requests */ +#define PXP2_REG_RQ_BW_RD_L17 0x1202f0 +/* [RW 10] Bandwidth Typical L for VQ18 Read requests */ +#define PXP2_REG_RQ_BW_RD_L18 0x1202f4 +/* [RW 10] Bandwidth Typical L for VQ19 Read requests */ +#define PXP2_REG_RQ_BW_RD_L19 0x1202f8 +/* [RW 10] Bandwidth Typical L for VQ20 Read requests */ +#define PXP2_REG_RQ_BW_RD_L20 0x1202fc +/* [RW 10] Bandwidth Typical L for VQ22 Read requests */ +#define PXP2_REG_RQ_BW_RD_L22 0x120300 +/* [RW 10] Bandwidth Typical L for VQ23 Read requests */ +#define PXP2_REG_RQ_BW_RD_L23 0x120304 +/* [RW 10] Bandwidth Typical L for VQ24 Read requests */ +#define PXP2_REG_RQ_BW_RD_L24 0x120308 +/* [RW 10] Bandwidth Typical L for VQ25 Read requests */ +#define PXP2_REG_RQ_BW_RD_L25 0x12030c +/* [RW 10] Bandwidth Typical L for VQ26 Read requests */ +#define PXP2_REG_RQ_BW_RD_L26 0x120310 +/* [RW 10] Bandwidth Typical L for VQ27 Read requests */ +#define PXP2_REG_RQ_BW_RD_L27 0x120314 +/* [RW 10] Bandwidth Typical L for VQ4 Read requests */ +#define PXP2_REG_RQ_BW_RD_L4 0x1202bc +/* [RW 10] Bandwidth Typical L for VQ5 Read- currently not used */ +#define PXP2_REG_RQ_BW_RD_L5 0x1202c0 +/* [RW 7] Bandwidth upper bound for VQ0 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND0 0x120234 +/* [RW 7] Bandwidth upper bound for VQ12 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND12 0x120264 +/* [RW 7] Bandwidth upper bound for VQ13 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND13 0x120268 +/* [RW 7] Bandwidth upper bound for VQ14 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND14 0x12026c +/* [RW 7] Bandwidth upper bound for VQ15 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND15 0x120270 +/* [RW 7] Bandwidth upper bound for VQ16 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND16 0x120274 +/* [RW 7] Bandwidth upper bound for VQ17 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND17 0x120278 +/* [RW 7] Bandwidth upper bound for VQ18 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND18 0x12027c +/* [RW 7] Bandwidth upper bound for VQ19 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND19 0x120280 +/* [RW 7] Bandwidth upper bound for VQ20 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND20 0x120284 +/* [RW 7] Bandwidth upper bound for VQ22 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND22 0x120288 +/* [RW 7] Bandwidth upper bound for VQ23 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND23 0x12028c +/* [RW 7] Bandwidth upper bound for VQ24 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND24 0x120290 +/* [RW 7] Bandwidth upper bound for VQ25 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND25 0x120294 +/* [RW 7] Bandwidth upper bound for VQ26 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND26 0x120298 +/* [RW 7] Bandwidth upper bound for VQ27 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND27 0x12029c +/* [RW 7] Bandwidth upper bound for VQ4 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND4 0x120244 +/* [RW 7] Bandwidth upper bound for VQ5 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND5 0x120248 +/* [RW 10] Bandwidth addition to VQ29 write requests */ +#define PXP2_REG_RQ_BW_WR_ADD29 0x12022c +/* [RW 10] Bandwidth addition to VQ30 write requests */ +#define PXP2_REG_RQ_BW_WR_ADD30 0x120230 +/* [RW 10] Bandwidth Typical L for VQ29 Write requests */ +#define PXP2_REG_RQ_BW_WR_L29 0x12031c +/* [RW 10] Bandwidth Typical L for VQ30 Write requests */ +#define PXP2_REG_RQ_BW_WR_L30 0x120320 +/* [RW 7] Bandwidth upper bound for VQ29 */ +#define PXP2_REG_RQ_BW_WR_UBOUND29 0x1202a4 +/* [RW 7] Bandwidth upper bound for VQ30 */ +#define PXP2_REG_RQ_BW_WR_UBOUND30 0x1202a8 +/* [RW 18] external first_mem_addr field in L2P table for CDU module port 0 */ +#define PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR 0x120008 +/* [RW 2] Endian mode for cdu */ +#define PXP2_REG_RQ_CDU_ENDIAN_M 0x1201a0 +#define PXP2_REG_RQ_CDU_FIRST_ILT 0x12061c +#define PXP2_REG_RQ_CDU_LAST_ILT 0x120620 +/* [RW 3] page size in L2P table for CDU module; -4k; -8k; -16k; -32k; -64k; + -128k */ +#define PXP2_REG_RQ_CDU_P_SIZE 0x120018 +/* [R 1] 1' indicates that the requester has finished its internal + configuration */ +#define PXP2_REG_RQ_CFG_DONE 0x1201b4 +/* [RW 2] Endian mode for debug */ +#define PXP2_REG_RQ_DBG_ENDIAN_M 0x1201a4 +/* [RW 1] When '1'; requests will enter input buffers but wont get out + towards the glue */ +#define PXP2_REG_RQ_DISABLE_INPUTS 0x120330 +/* [RW 4] Determines alignment of write SRs when a request is split into + * several SRs. 0 - 8B aligned. 1 - 64B aligned. 2 - 128B aligned. 3 - 256B + * aligned. 4 - 512B aligned. */ +#define PXP2_REG_RQ_DRAM_ALIGN 0x1205b0 +/* [RW 4] Determines alignment of read SRs when a request is split into + * several SRs. 0 - 8B aligned. 1 - 64B aligned. 2 - 128B aligned. 3 - 256B + * aligned. 4 - 512B aligned. */ +#define PXP2_REG_RQ_DRAM_ALIGN_RD 0x12092c +/* [RW 1] when set the new alignment method (E2) will be applied; when reset + * the original alignment method (E1 E1H) will be applied */ +#define PXP2_REG_RQ_DRAM_ALIGN_SEL 0x120930 +/* [RW 1] If 1 ILT failiue will not result in ELT access; An interrupt will + be asserted */ +#define PXP2_REG_RQ_ELT_DISABLE 0x12066c +/* [RW 2] Endian mode for hc */ +#define PXP2_REG_RQ_HC_ENDIAN_M 0x1201a8 +/* [RW 1] when '0' ILT logic will work as in A0; otherwise B0; for back + compatibility needs; Note that different registers are used per mode */ +#define PXP2_REG_RQ_ILT_MODE 0x1205b4 +/* [WB 53] Onchip address table */ +#define PXP2_REG_RQ_ONCHIP_AT 0x122000 +/* [WB 53] Onchip address table - B0 */ +#define PXP2_REG_RQ_ONCHIP_AT_B0 0x128000 +/* [RW 13] Pending read limiter threshold; in Dwords */ +#define PXP2_REG_RQ_PDR_LIMIT 0x12033c +/* [RW 2] Endian mode for qm */ +#define PXP2_REG_RQ_QM_ENDIAN_M 0x120194 +#define PXP2_REG_RQ_QM_FIRST_ILT 0x120634 +#define PXP2_REG_RQ_QM_LAST_ILT 0x120638 +/* [RW 3] page size in L2P table for QM module; -4k; -8k; -16k; -32k; -64k; + -128k */ +#define PXP2_REG_RQ_QM_P_SIZE 0x120050 +/* [RW 1] 1' indicates that the RBC has finished configuring the PSWRQ */ +#define PXP2_REG_RQ_RBC_DONE 0x1201b0 +/* [RW 3] Max burst size filed for read requests port 0; 000 - 128B; + 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */ +#define PXP2_REG_RQ_RD_MBS0 0x120160 +/* [RW 3] Max burst size filed for read requests port 1; 000 - 128B; + 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */ +#define PXP2_REG_RQ_RD_MBS1 0x120168 +/* [RW 2] Endian mode for src */ +#define PXP2_REG_RQ_SRC_ENDIAN_M 0x12019c +#define PXP2_REG_RQ_SRC_FIRST_ILT 0x12063c +#define PXP2_REG_RQ_SRC_LAST_ILT 0x120640 +/* [RW 3] page size in L2P table for SRC module; -4k; -8k; -16k; -32k; -64k; + -128k */ +#define PXP2_REG_RQ_SRC_P_SIZE 0x12006c +/* [RW 2] Endian mode for tm */ +#define PXP2_REG_RQ_TM_ENDIAN_M 0x120198 +#define PXP2_REG_RQ_TM_FIRST_ILT 0x120644 +#define PXP2_REG_RQ_TM_LAST_ILT 0x120648 +/* [RW 3] page size in L2P table for TM module; -4k; -8k; -16k; -32k; -64k; + -128k */ +#define PXP2_REG_RQ_TM_P_SIZE 0x120034 +/* [R 5] Number of entries in the ufifo; his fifo has l2p completions */ +#define PXP2_REG_RQ_UFIFO_NUM_OF_ENTRY 0x12080c +/* [RW 18] external first_mem_addr field in L2P table for USDM module port 0 */ +#define PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR 0x120094 +/* [R 8] Number of entries occupied by vq 0 in pswrq memory */ +#define PXP2_REG_RQ_VQ0_ENTRY_CNT 0x120810 +/* [R 8] Number of entries occupied by vq 10 in pswrq memory */ +#define PXP2_REG_RQ_VQ10_ENTRY_CNT 0x120818 +/* [R 8] Number of entries occupied by vq 11 in pswrq memory */ +#define PXP2_REG_RQ_VQ11_ENTRY_CNT 0x120820 +/* [R 8] Number of entries occupied by vq 12 in pswrq memory */ +#define PXP2_REG_RQ_VQ12_ENTRY_CNT 0x120828 +/* [R 8] Number of entries occupied by vq 13 in pswrq memory */ +#define PXP2_REG_RQ_VQ13_ENTRY_CNT 0x120830 +/* [R 8] Number of entries occupied by vq 14 in pswrq memory */ +#define PXP2_REG_RQ_VQ14_ENTRY_CNT 0x120838 +/* [R 8] Number of entries occupied by vq 15 in pswrq memory */ +#define PXP2_REG_RQ_VQ15_ENTRY_CNT 0x120840 +/* [R 8] Number of entries occupied by vq 16 in pswrq memory */ +#define PXP2_REG_RQ_VQ16_ENTRY_CNT 0x120848 +/* [R 8] Number of entries occupied by vq 17 in pswrq memory */ +#define PXP2_REG_RQ_VQ17_ENTRY_CNT 0x120850 +/* [R 8] Number of entries occupied by vq 18 in pswrq memory */ +#define PXP2_REG_RQ_VQ18_ENTRY_CNT 0x120858 +/* [R 8] Number of entries occupied by vq 19 in pswrq memory */ +#define PXP2_REG_RQ_VQ19_ENTRY_CNT 0x120860 +/* [R 8] Number of entries occupied by vq 1 in pswrq memory */ +#define PXP2_REG_RQ_VQ1_ENTRY_CNT 0x120868 +/* [R 8] Number of entries occupied by vq 20 in pswrq memory */ +#define PXP2_REG_RQ_VQ20_ENTRY_CNT 0x120870 +/* [R 8] Number of entries occupied by vq 21 in pswrq memory */ +#define PXP2_REG_RQ_VQ21_ENTRY_CNT 0x120878 +/* [R 8] Number of entries occupied by vq 22 in pswrq memory */ +#define PXP2_REG_RQ_VQ22_ENTRY_CNT 0x120880 +/* [R 8] Number of entries occupied by vq 23 in pswrq memory */ +#define PXP2_REG_RQ_VQ23_ENTRY_CNT 0x120888 +/* [R 8] Number of entries occupied by vq 24 in pswrq memory */ +#define PXP2_REG_RQ_VQ24_ENTRY_CNT 0x120890 +/* [R 8] Number of entries occupied by vq 25 in pswrq memory */ +#define PXP2_REG_RQ_VQ25_ENTRY_CNT 0x120898 +/* [R 8] Number of entries occupied by vq 26 in pswrq memory */ +#define PXP2_REG_RQ_VQ26_ENTRY_CNT 0x1208a0 +/* [R 8] Number of entries occupied by vq 27 in pswrq memory */ +#define PXP2_REG_RQ_VQ27_ENTRY_CNT 0x1208a8 +/* [R 8] Number of entries occupied by vq 28 in pswrq memory */ +#define PXP2_REG_RQ_VQ28_ENTRY_CNT 0x1208b0 +/* [R 8] Number of entries occupied by vq 29 in pswrq memory */ +#define PXP2_REG_RQ_VQ29_ENTRY_CNT 0x1208b8 +/* [R 8] Number of entries occupied by vq 2 in pswrq memory */ +#define PXP2_REG_RQ_VQ2_ENTRY_CNT 0x1208c0 +/* [R 8] Number of entries occupied by vq 30 in pswrq memory */ +#define PXP2_REG_RQ_VQ30_ENTRY_CNT 0x1208c8 +/* [R 8] Number of entries occupied by vq 31 in pswrq memory */ +#define PXP2_REG_RQ_VQ31_ENTRY_CNT 0x1208d0 +/* [R 8] Number of entries occupied by vq 3 in pswrq memory */ +#define PXP2_REG_RQ_VQ3_ENTRY_CNT 0x1208d8 +/* [R 8] Number of entries occupied by vq 4 in pswrq memory */ +#define PXP2_REG_RQ_VQ4_ENTRY_CNT 0x1208e0 +/* [R 8] Number of entries occupied by vq 5 in pswrq memory */ +#define PXP2_REG_RQ_VQ5_ENTRY_CNT 0x1208e8 +/* [R 8] Number of entries occupied by vq 6 in pswrq memory */ +#define PXP2_REG_RQ_VQ6_ENTRY_CNT 0x1208f0 +/* [R 8] Number of entries occupied by vq 7 in pswrq memory */ +#define PXP2_REG_RQ_VQ7_ENTRY_CNT 0x1208f8 +/* [R 8] Number of entries occupied by vq 8 in pswrq memory */ +#define PXP2_REG_RQ_VQ8_ENTRY_CNT 0x120900 +/* [R 8] Number of entries occupied by vq 9 in pswrq memory */ +#define PXP2_REG_RQ_VQ9_ENTRY_CNT 0x120908 +/* [RW 3] Max burst size filed for write requests port 0; 000 - 128B; + 001:256B; 010: 512B; */ +#define PXP2_REG_RQ_WR_MBS0 0x12015c +/* [RW 3] Max burst size filed for write requests port 1; 000 - 128B; + 001:256B; 010: 512B; */ +#define PXP2_REG_RQ_WR_MBS1 0x120164 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + buffer reaches this number has_payload will be asserted */ +#define PXP2_REG_WR_CDU_MPS 0x1205f0 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + buffer reaches this number has_payload will be asserted */ +#define PXP2_REG_WR_CSDM_MPS 0x1205d0 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + buffer reaches this number has_payload will be asserted */ +#define PXP2_REG_WR_DBG_MPS 0x1205e8 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + buffer reaches this number has_payload will be asserted */ +#define PXP2_REG_WR_DMAE_MPS 0x1205ec +/* [RW 10] if Number of entries in dmae fifo will be higher than this + threshold then has_payload indication will be asserted; the default value + should be equal to > write MBS size! */ +#define PXP2_REG_WR_DMAE_TH 0x120368 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + buffer reaches this number has_payload will be asserted */ +#define PXP2_REG_WR_HC_MPS 0x1205c8 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + buffer reaches this number has_payload will be asserted */ +#define PXP2_REG_WR_QM_MPS 0x1205dc +/* [RW 1] 0 - working in A0 mode; - working in B0 mode */ +#define PXP2_REG_WR_REV_MODE 0x120670 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + buffer reaches this number has_payload will be asserted */ +#define PXP2_REG_WR_SRC_MPS 0x1205e4 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + buffer reaches this number has_payload will be asserted */ +#define PXP2_REG_WR_TM_MPS 0x1205e0 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + buffer reaches this number has_payload will be asserted */ +#define PXP2_REG_WR_TSDM_MPS 0x1205d4 +/* [RW 10] if Number of entries in usdmdp fifo will be higher than this + threshold then has_payload indication will be asserted; the default value + should be equal to > write MBS size! */ +#define PXP2_REG_WR_USDMDP_TH 0x120348 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + buffer reaches this number has_payload will be asserted */ +#define PXP2_REG_WR_USDM_MPS 0x1205cc +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + buffer reaches this number has_payload will be asserted */ +#define PXP2_REG_WR_XSDM_MPS 0x1205d8 +/* [R 1] debug only: Indication if PSWHST arbiter is idle */ +#define PXP_REG_HST_ARB_IS_IDLE 0x103004 +/* [R 8] debug only: A bit mask for all PSWHST arbiter clients. '1' means + this client is waiting for the arbiter. */ +#define PXP_REG_HST_CLIENTS_WAITING_TO_ARB 0x103008 +/* [RW 1] When 1; doorbells are discarded and not passed to doorbell queue + block. Should be used for close the gates. */ +#define PXP_REG_HST_DISCARD_DOORBELLS 0x1030a4 +/* [R 1] debug only: '1' means this PSWHST is discarding doorbells. This bit + should update according to 'hst_discard_doorbells' register when the state + machine is idle */ +#define PXP_REG_HST_DISCARD_DOORBELLS_STATUS 0x1030a0 +/* [RW 1] When 1; new internal writes arriving to the block are discarded. + Should be used for close the gates. */ +#define PXP_REG_HST_DISCARD_INTERNAL_WRITES 0x1030a8 +/* [R 6] debug only: A bit mask for all PSWHST internal write clients. '1' + means this PSWHST is discarding inputs from this client. Each bit should + update according to 'hst_discard_internal_writes' register when the state + machine is idle. */ +#define PXP_REG_HST_DISCARD_INTERNAL_WRITES_STATUS 0x10309c +/* [WB 160] Used for initialization of the inbound interrupts memory */ +#define PXP_REG_HST_INBOUND_INT 0x103800 +/* [RW 32] Interrupt mask register #0 read/write */ +#define PXP_REG_PXP_INT_MASK_0 0x103074 +#define PXP_REG_PXP_INT_MASK_1 0x103084 +/* [R 32] Interrupt register #0 read */ +#define PXP_REG_PXP_INT_STS_0 0x103068 +#define PXP_REG_PXP_INT_STS_1 0x103078 +/* [RC 32] Interrupt register #0 read clear */ +#define PXP_REG_PXP_INT_STS_CLR_0 0x10306c +#define PXP_REG_PXP_INT_STS_CLR_1 0x10307c +/* [RW 27] Parity mask register #0 read/write */ +#define PXP_REG_PXP_PRTY_MASK 0x103094 +/* [R 26] Parity register #0 read */ +#define PXP_REG_PXP_PRTY_STS 0x103088 +/* [RC 27] Parity register #0 read clear */ +#define PXP_REG_PXP_PRTY_STS_CLR 0x10308c +/* [RW 4] The activity counter initial increment value sent in the load + request */ +#define QM_REG_ACTCTRINITVAL_0 0x168040 +#define QM_REG_ACTCTRINITVAL_1 0x168044 +#define QM_REG_ACTCTRINITVAL_2 0x168048 +#define QM_REG_ACTCTRINITVAL_3 0x16804c +/* [RW 32] The base logical address (in bytes) of each physical queue. The + index I represents the physical queue number. The 12 lsbs are ignore and + considered zero so practically there are only 20 bits in this register; + queues 63-0 */ +#define QM_REG_BASEADDR 0x168900 +/* [RW 32] The base logical address (in bytes) of each physical queue. The + index I represents the physical queue number. The 12 lsbs are ignore and + considered zero so practically there are only 20 bits in this register; + queues 127-64 */ +#define QM_REG_BASEADDR_EXT_A 0x16e100 +/* [RW 16] The byte credit cost for each task. This value is for both ports */ +#define QM_REG_BYTECRDCOST 0x168234 +/* [RW 16] The initial byte credit value for both ports. */ +#define QM_REG_BYTECRDINITVAL 0x168238 +/* [RW 32] A bit per physical queue. If the bit is cleared then the physical + queue uses port 0 else it uses port 1; queues 31-0 */ +#define QM_REG_BYTECRDPORT_LSB 0x168228 +/* [RW 32] A bit per physical queue. If the bit is cleared then the physical + queue uses port 0 else it uses port 1; queues 95-64 */ +#define QM_REG_BYTECRDPORT_LSB_EXT_A 0x16e520 +/* [RW 32] A bit per physical queue. If the bit is cleared then the physical + queue uses port 0 else it uses port 1; queues 63-32 */ +#define QM_REG_BYTECRDPORT_MSB 0x168224 +/* [RW 32] A bit per physical queue. If the bit is cleared then the physical + queue uses port 0 else it uses port 1; queues 127-96 */ +#define QM_REG_BYTECRDPORT_MSB_EXT_A 0x16e51c +/* [RW 16] The byte credit value that if above the QM is considered almost + full */ +#define QM_REG_BYTECREDITAFULLTHR 0x168094 +/* [RW 4] The initial credit for interface */ +#define QM_REG_CMINITCRD_0 0x1680cc +#define QM_REG_BYTECRDCMDQ_0 0x16e6e8 +#define QM_REG_CMINITCRD_1 0x1680d0 +#define QM_REG_CMINITCRD_2 0x1680d4 +#define QM_REG_CMINITCRD_3 0x1680d8 +#define QM_REG_CMINITCRD_4 0x1680dc +#define QM_REG_CMINITCRD_5 0x1680e0 +#define QM_REG_CMINITCRD_6 0x1680e4 +#define QM_REG_CMINITCRD_7 0x1680e8 +/* [RW 8] A mask bit per CM interface. If this bit is 0 then this interface + is masked */ +#define QM_REG_CMINTEN 0x1680ec +/* [RW 12] A bit vector which indicates which one of the queues are tied to + interface 0 */ +#define QM_REG_CMINTVOQMASK_0 0x1681f4 +#define QM_REG_CMINTVOQMASK_1 0x1681f8 +#define QM_REG_CMINTVOQMASK_2 0x1681fc +#define QM_REG_CMINTVOQMASK_3 0x168200 +#define QM_REG_CMINTVOQMASK_4 0x168204 +#define QM_REG_CMINTVOQMASK_5 0x168208 +#define QM_REG_CMINTVOQMASK_6 0x16820c +#define QM_REG_CMINTVOQMASK_7 0x168210 +/* [RW 20] The number of connections divided by 16 which dictates the size + of each queue which belongs to even function number. */ +#define QM_REG_CONNNUM_0 0x168020 +/* [R 6] Keep the fill level of the fifo from write client 4 */ +#define QM_REG_CQM_WRC_FIFOLVL 0x168018 +/* [RW 8] The context regions sent in the CFC load request */ +#define QM_REG_CTXREG_0 0x168030 +#define QM_REG_CTXREG_1 0x168034 +#define QM_REG_CTXREG_2 0x168038 +#define QM_REG_CTXREG_3 0x16803c +/* [RW 12] The VOQ mask used to select the VOQs which needs to be full for + bypass enable */ +#define QM_REG_ENBYPVOQMASK 0x16823c +/* [RW 32] A bit mask per each physical queue. If a bit is set then the + physical queue uses the byte credit; queues 31-0 */ +#define QM_REG_ENBYTECRD_LSB 0x168220 +/* [RW 32] A bit mask per each physical queue. If a bit is set then the + physical queue uses the byte credit; queues 95-64 */ +#define QM_REG_ENBYTECRD_LSB_EXT_A 0x16e518 +/* [RW 32] A bit mask per each physical queue. If a bit is set then the + physical queue uses the byte credit; queues 63-32 */ +#define QM_REG_ENBYTECRD_MSB 0x16821c +/* [RW 32] A bit mask per each physical queue. If a bit is set then the + physical queue uses the byte credit; queues 127-96 */ +#define QM_REG_ENBYTECRD_MSB_EXT_A 0x16e514 +/* [RW 4] If cleared then the secondary interface will not be served by the + RR arbiter */ +#define QM_REG_ENSEC 0x1680f0 +/* [RW 32] NA */ +#define QM_REG_FUNCNUMSEL_LSB 0x168230 +/* [RW 32] NA */ +#define QM_REG_FUNCNUMSEL_MSB 0x16822c +/* [RW 32] A mask register to mask the Almost empty signals which will not + be use for the almost empty indication to the HW block; queues 31:0 */ +#define QM_REG_HWAEMPTYMASK_LSB 0x168218 +/* [RW 32] A mask register to mask the Almost empty signals which will not + be use for the almost empty indication to the HW block; queues 95-64 */ +#define QM_REG_HWAEMPTYMASK_LSB_EXT_A 0x16e510 +/* [RW 32] A mask register to mask the Almost empty signals which will not + be use for the almost empty indication to the HW block; queues 63:32 */ +#define QM_REG_HWAEMPTYMASK_MSB 0x168214 +/* [RW 32] A mask register to mask the Almost empty signals which will not + be use for the almost empty indication to the HW block; queues 127-96 */ +#define QM_REG_HWAEMPTYMASK_MSB_EXT_A 0x16e50c +/* [RW 4] The number of outstanding request to CFC */ +#define QM_REG_OUTLDREQ 0x168804 +/* [RC 1] A flag to indicate that overflow error occurred in one of the + queues. */ +#define QM_REG_OVFERROR 0x16805c +/* [RC 7] the Q where the overflow occurs */ +#define QM_REG_OVFQNUM 0x168058 +/* [R 16] Pause state for physical queues 15-0 */ +#define QM_REG_PAUSESTATE0 0x168410 +/* [R 16] Pause state for physical queues 31-16 */ +#define QM_REG_PAUSESTATE1 0x168414 +/* [R 16] Pause state for physical queues 47-32 */ +#define QM_REG_PAUSESTATE2 0x16e684 +/* [R 16] Pause state for physical queues 63-48 */ +#define QM_REG_PAUSESTATE3 0x16e688 +/* [R 16] Pause state for physical queues 79-64 */ +#define QM_REG_PAUSESTATE4 0x16e68c +/* [R 16] Pause state for physical queues 95-80 */ +#define QM_REG_PAUSESTATE5 0x16e690 +/* [R 16] Pause state for physical queues 111-96 */ +#define QM_REG_PAUSESTATE6 0x16e694 +/* [R 16] Pause state for physical queues 127-112 */ +#define QM_REG_PAUSESTATE7 0x16e698 +/* [RW 2] The PCI attributes field used in the PCI request. */ +#define QM_REG_PCIREQAT 0x168054 +#define QM_REG_PF_EN 0x16e70c +/* [R 24] The number of tasks stored in the QM for the PF. only even + * functions are valid in E2 (odd I registers will be hard wired to 0) */ +#define QM_REG_PF_USG_CNT_0 0x16e040 +/* [R 16] NOT USED */ +#define QM_REG_PORT0BYTECRD 0x168300 +/* [R 16] The byte credit of port 1 */ +#define QM_REG_PORT1BYTECRD 0x168304 +/* [RW 3] pci function number of queues 15-0 */ +#define QM_REG_PQ2PCIFUNC_0 0x16e6bc +#define QM_REG_PQ2PCIFUNC_1 0x16e6c0 +#define QM_REG_PQ2PCIFUNC_2 0x16e6c4 +#define QM_REG_PQ2PCIFUNC_3 0x16e6c8 +#define QM_REG_PQ2PCIFUNC_4 0x16e6cc +#define QM_REG_PQ2PCIFUNC_5 0x16e6d0 +#define QM_REG_PQ2PCIFUNC_6 0x16e6d4 +#define QM_REG_PQ2PCIFUNC_7 0x16e6d8 +/* [WB 54] Pointer Table Memory for queues 63-0; The mapping is as follow: + ptrtbl[53:30] read pointer; ptrtbl[29:6] write pointer; ptrtbl[5:4] read + bank0; ptrtbl[3:2] read bank 1; ptrtbl[1:0] write bank; */ +#define QM_REG_PTRTBL 0x168a00 +/* [WB 54] Pointer Table Memory for queues 127-64; The mapping is as follow: + ptrtbl[53:30] read pointer; ptrtbl[29:6] write pointer; ptrtbl[5:4] read + bank0; ptrtbl[3:2] read bank 1; ptrtbl[1:0] write bank; */ +#define QM_REG_PTRTBL_EXT_A 0x16e200 +/* [RW 2] Interrupt mask register #0 read/write */ +#define QM_REG_QM_INT_MASK 0x168444 +/* [R 2] Interrupt register #0 read */ +#define QM_REG_QM_INT_STS 0x168438 +/* [RW 12] Parity mask register #0 read/write */ +#define QM_REG_QM_PRTY_MASK 0x168454 +/* [R 12] Parity register #0 read */ +#define QM_REG_QM_PRTY_STS 0x168448 +/* [RC 12] Parity register #0 read clear */ +#define QM_REG_QM_PRTY_STS_CLR 0x16844c +/* [R 32] Current queues in pipeline: Queues from 32 to 63 */ +#define QM_REG_QSTATUS_HIGH 0x16802c +/* [R 32] Current queues in pipeline: Queues from 96 to 127 */ +#define QM_REG_QSTATUS_HIGH_EXT_A 0x16e408 +/* [R 32] Current queues in pipeline: Queues from 0 to 31 */ +#define QM_REG_QSTATUS_LOW 0x168028 +/* [R 32] Current queues in pipeline: Queues from 64 to 95 */ +#define QM_REG_QSTATUS_LOW_EXT_A 0x16e404 +/* [R 24] The number of tasks queued for each queue; queues 63-0 */ +#define QM_REG_QTASKCTR_0 0x168308 +/* [R 24] The number of tasks queued for each queue; queues 127-64 */ +#define QM_REG_QTASKCTR_EXT_A_0 0x16e584 +/* [RW 4] Queue tied to VOQ */ +#define QM_REG_QVOQIDX_0 0x1680f4 +#define QM_REG_QVOQIDX_10 0x16811c +#define QM_REG_QVOQIDX_100 0x16e49c +#define QM_REG_QVOQIDX_101 0x16e4a0 +#define QM_REG_QVOQIDX_102 0x16e4a4 +#define QM_REG_QVOQIDX_103 0x16e4a8 +#define QM_REG_QVOQIDX_104 0x16e4ac +#define QM_REG_QVOQIDX_105 0x16e4b0 +#define QM_REG_QVOQIDX_106 0x16e4b4 +#define QM_REG_QVOQIDX_107 0x16e4b8 +#define QM_REG_QVOQIDX_108 0x16e4bc +#define QM_REG_QVOQIDX_109 0x16e4c0 +#define QM_REG_QVOQIDX_11 0x168120 +#define QM_REG_QVOQIDX_110 0x16e4c4 +#define QM_REG_QVOQIDX_111 0x16e4c8 +#define QM_REG_QVOQIDX_112 0x16e4cc +#define QM_REG_QVOQIDX_113 0x16e4d0 +#define QM_REG_QVOQIDX_114 0x16e4d4 +#define QM_REG_QVOQIDX_115 0x16e4d8 +#define QM_REG_QVOQIDX_116 0x16e4dc +#define QM_REG_QVOQIDX_117 0x16e4e0 +#define QM_REG_QVOQIDX_118 0x16e4e4 +#define QM_REG_QVOQIDX_119 0x16e4e8 +#define QM_REG_QVOQIDX_12 0x168124 +#define QM_REG_QVOQIDX_120 0x16e4ec +#define QM_REG_QVOQIDX_121 0x16e4f0 +#define QM_REG_QVOQIDX_122 0x16e4f4 +#define QM_REG_QVOQIDX_123 0x16e4f8 +#define QM_REG_QVOQIDX_124 0x16e4fc +#define QM_REG_QVOQIDX_125 0x16e500 +#define QM_REG_QVOQIDX_126 0x16e504 +#define QM_REG_QVOQIDX_127 0x16e508 +#define QM_REG_QVOQIDX_13 0x168128 +#define QM_REG_QVOQIDX_14 0x16812c +#define QM_REG_QVOQIDX_15 0x168130 +#define QM_REG_QVOQIDX_16 0x168134 +#define QM_REG_QVOQIDX_17 0x168138 +#define QM_REG_QVOQIDX_21 0x168148 +#define QM_REG_QVOQIDX_22 0x16814c +#define QM_REG_QVOQIDX_23 0x168150 +#define QM_REG_QVOQIDX_24 0x168154 +#define QM_REG_QVOQIDX_25 0x168158 +#define QM_REG_QVOQIDX_26 0x16815c +#define QM_REG_QVOQIDX_27 0x168160 +#define QM_REG_QVOQIDX_28 0x168164 +#define QM_REG_QVOQIDX_29 0x168168 +#define QM_REG_QVOQIDX_30 0x16816c +#define QM_REG_QVOQIDX_31 0x168170 +#define QM_REG_QVOQIDX_32 0x168174 +#define QM_REG_QVOQIDX_33 0x168178 +#define QM_REG_QVOQIDX_34 0x16817c +#define QM_REG_QVOQIDX_35 0x168180 +#define QM_REG_QVOQIDX_36 0x168184 +#define QM_REG_QVOQIDX_37 0x168188 +#define QM_REG_QVOQIDX_38 0x16818c +#define QM_REG_QVOQIDX_39 0x168190 +#define QM_REG_QVOQIDX_40 0x168194 +#define QM_REG_QVOQIDX_41 0x168198 +#define QM_REG_QVOQIDX_42 0x16819c +#define QM_REG_QVOQIDX_43 0x1681a0 +#define QM_REG_QVOQIDX_44 0x1681a4 +#define QM_REG_QVOQIDX_45 0x1681a8 +#define QM_REG_QVOQIDX_46 0x1681ac +#define QM_REG_QVOQIDX_47 0x1681b0 +#define QM_REG_QVOQIDX_48 0x1681b4 +#define QM_REG_QVOQIDX_49 0x1681b8 +#define QM_REG_QVOQIDX_5 0x168108 +#define QM_REG_QVOQIDX_50 0x1681bc +#define QM_REG_QVOQIDX_51 0x1681c0 +#define QM_REG_QVOQIDX_52 0x1681c4 +#define QM_REG_QVOQIDX_53 0x1681c8 +#define QM_REG_QVOQIDX_54 0x1681cc +#define QM_REG_QVOQIDX_55 0x1681d0 +#define QM_REG_QVOQIDX_56 0x1681d4 +#define QM_REG_QVOQIDX_57 0x1681d8 +#define QM_REG_QVOQIDX_58 0x1681dc +#define QM_REG_QVOQIDX_59 0x1681e0 +#define QM_REG_QVOQIDX_6 0x16810c +#define QM_REG_QVOQIDX_60 0x1681e4 +#define QM_REG_QVOQIDX_61 0x1681e8 +#define QM_REG_QVOQIDX_62 0x1681ec +#define QM_REG_QVOQIDX_63 0x1681f0 +#define QM_REG_QVOQIDX_64 0x16e40c +#define QM_REG_QVOQIDX_65 0x16e410 +#define QM_REG_QVOQIDX_69 0x16e420 +#define QM_REG_QVOQIDX_7 0x168110 +#define QM_REG_QVOQIDX_70 0x16e424 +#define QM_REG_QVOQIDX_71 0x16e428 +#define QM_REG_QVOQIDX_72 0x16e42c +#define QM_REG_QVOQIDX_73 0x16e430 +#define QM_REG_QVOQIDX_74 0x16e434 +#define QM_REG_QVOQIDX_75 0x16e438 +#define QM_REG_QVOQIDX_76 0x16e43c +#define QM_REG_QVOQIDX_77 0x16e440 +#define QM_REG_QVOQIDX_78 0x16e444 +#define QM_REG_QVOQIDX_79 0x16e448 +#define QM_REG_QVOQIDX_8 0x168114 +#define QM_REG_QVOQIDX_80 0x16e44c +#define QM_REG_QVOQIDX_81 0x16e450 +#define QM_REG_QVOQIDX_85 0x16e460 +#define QM_REG_QVOQIDX_86 0x16e464 +#define QM_REG_QVOQIDX_87 0x16e468 +#define QM_REG_QVOQIDX_88 0x16e46c +#define QM_REG_QVOQIDX_89 0x16e470 +#define QM_REG_QVOQIDX_9 0x168118 +#define QM_REG_QVOQIDX_90 0x16e474 +#define QM_REG_QVOQIDX_91 0x16e478 +#define QM_REG_QVOQIDX_92 0x16e47c +#define QM_REG_QVOQIDX_93 0x16e480 +#define QM_REG_QVOQIDX_94 0x16e484 +#define QM_REG_QVOQIDX_95 0x16e488 +#define QM_REG_QVOQIDX_96 0x16e48c +#define QM_REG_QVOQIDX_97 0x16e490 +#define QM_REG_QVOQIDX_98 0x16e494 +#define QM_REG_QVOQIDX_99 0x16e498 +/* [RW 1] Initialization bit command */ +#define QM_REG_SOFT_RESET 0x168428 +/* [RW 8] The credit cost per every task in the QM. A value per each VOQ */ +#define QM_REG_TASKCRDCOST_0 0x16809c +#define QM_REG_TASKCRDCOST_1 0x1680a0 +#define QM_REG_TASKCRDCOST_2 0x1680a4 +#define QM_REG_TASKCRDCOST_4 0x1680ac +#define QM_REG_TASKCRDCOST_5 0x1680b0 +/* [R 6] Keep the fill level of the fifo from write client 3 */ +#define QM_REG_TQM_WRC_FIFOLVL 0x168010 +/* [R 6] Keep the fill level of the fifo from write client 2 */ +#define QM_REG_UQM_WRC_FIFOLVL 0x168008 +/* [RC 32] Credit update error register */ +#define QM_REG_VOQCRDERRREG 0x168408 +/* [R 16] The credit value for each VOQ */ +#define QM_REG_VOQCREDIT_0 0x1682d0 +#define QM_REG_VOQCREDIT_1 0x1682d4 +#define QM_REG_VOQCREDIT_4 0x1682e0 +/* [RW 16] The credit value that if above the QM is considered almost full */ +#define QM_REG_VOQCREDITAFULLTHR 0x168090 +/* [RW 16] The init and maximum credit for each VoQ */ +#define QM_REG_VOQINITCREDIT_0 0x168060 +#define QM_REG_VOQINITCREDIT_1 0x168064 +#define QM_REG_VOQINITCREDIT_2 0x168068 +#define QM_REG_VOQINITCREDIT_4 0x168070 +#define QM_REG_VOQINITCREDIT_5 0x168074 +/* [RW 1] The port of which VOQ belongs */ +#define QM_REG_VOQPORT_0 0x1682a0 +#define QM_REG_VOQPORT_1 0x1682a4 +#define QM_REG_VOQPORT_2 0x1682a8 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_0_LSB 0x168240 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_0_LSB_EXT_A 0x16e524 +/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ +#define QM_REG_VOQQMASK_0_MSB 0x168244 +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_0_MSB_EXT_A 0x16e528 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_10_LSB 0x168290 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_10_LSB_EXT_A 0x16e574 +/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ +#define QM_REG_VOQQMASK_10_MSB 0x168294 +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_10_MSB_EXT_A 0x16e578 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_11_LSB 0x168298 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_11_LSB_EXT_A 0x16e57c +/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ +#define QM_REG_VOQQMASK_11_MSB 0x16829c +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_11_MSB_EXT_A 0x16e580 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_1_LSB 0x168248 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_1_LSB_EXT_A 0x16e52c +/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ +#define QM_REG_VOQQMASK_1_MSB 0x16824c +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_1_MSB_EXT_A 0x16e530 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_2_LSB 0x168250 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_2_LSB_EXT_A 0x16e534 +/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ +#define QM_REG_VOQQMASK_2_MSB 0x168254 +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_2_MSB_EXT_A 0x16e538 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_3_LSB 0x168258 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_3_LSB_EXT_A 0x16e53c +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_3_MSB_EXT_A 0x16e540 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_4_LSB 0x168260 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_4_LSB_EXT_A 0x16e544 +/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ +#define QM_REG_VOQQMASK_4_MSB 0x168264 +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_4_MSB_EXT_A 0x16e548 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_5_LSB 0x168268 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_5_LSB_EXT_A 0x16e54c +/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ +#define QM_REG_VOQQMASK_5_MSB 0x16826c +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_5_MSB_EXT_A 0x16e550 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_6_LSB 0x168270 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_6_LSB_EXT_A 0x16e554 +/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ +#define QM_REG_VOQQMASK_6_MSB 0x168274 +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_6_MSB_EXT_A 0x16e558 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_7_LSB 0x168278 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_7_LSB_EXT_A 0x16e55c +/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ +#define QM_REG_VOQQMASK_7_MSB 0x16827c +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_7_MSB_EXT_A 0x16e560 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_8_LSB 0x168280 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_8_LSB_EXT_A 0x16e564 +/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ +#define QM_REG_VOQQMASK_8_MSB 0x168284 +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_8_MSB_EXT_A 0x16e568 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_9_LSB 0x168288 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_9_LSB_EXT_A 0x16e56c +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_9_MSB_EXT_A 0x16e570 +/* [RW 32] Wrr weights */ +#define QM_REG_WRRWEIGHTS_0 0x16880c +#define QM_REG_WRRWEIGHTS_1 0x168810 +#define QM_REG_WRRWEIGHTS_10 0x168814 +#define QM_REG_WRRWEIGHTS_11 0x168818 +#define QM_REG_WRRWEIGHTS_12 0x16881c +#define QM_REG_WRRWEIGHTS_13 0x168820 +#define QM_REG_WRRWEIGHTS_14 0x168824 +#define QM_REG_WRRWEIGHTS_15 0x168828 +#define QM_REG_WRRWEIGHTS_16 0x16e000 +#define QM_REG_WRRWEIGHTS_17 0x16e004 +#define QM_REG_WRRWEIGHTS_18 0x16e008 +#define QM_REG_WRRWEIGHTS_19 0x16e00c +#define QM_REG_WRRWEIGHTS_2 0x16882c +#define QM_REG_WRRWEIGHTS_20 0x16e010 +#define QM_REG_WRRWEIGHTS_21 0x16e014 +#define QM_REG_WRRWEIGHTS_22 0x16e018 +#define QM_REG_WRRWEIGHTS_23 0x16e01c +#define QM_REG_WRRWEIGHTS_24 0x16e020 +#define QM_REG_WRRWEIGHTS_25 0x16e024 +#define QM_REG_WRRWEIGHTS_26 0x16e028 +#define QM_REG_WRRWEIGHTS_27 0x16e02c +#define QM_REG_WRRWEIGHTS_28 0x16e030 +#define QM_REG_WRRWEIGHTS_29 0x16e034 +#define QM_REG_WRRWEIGHTS_3 0x168830 +#define QM_REG_WRRWEIGHTS_30 0x16e038 +#define QM_REG_WRRWEIGHTS_31 0x16e03c +#define QM_REG_WRRWEIGHTS_4 0x168834 +#define QM_REG_WRRWEIGHTS_5 0x168838 +#define QM_REG_WRRWEIGHTS_6 0x16883c +#define QM_REG_WRRWEIGHTS_7 0x168840 +#define QM_REG_WRRWEIGHTS_8 0x168844 +#define QM_REG_WRRWEIGHTS_9 0x168848 +/* [R 6] Keep the fill level of the fifo from write client 1 */ +#define QM_REG_XQM_WRC_FIFOLVL 0x168000 +/* [W 1] reset to parity interrupt */ +#define SEM_FAST_REG_PARITY_RST 0x18840 +#define SRC_REG_COUNTFREE0 0x40500 +/* [RW 1] If clr the searcher is compatible to E1 A0 - support only two + ports. If set the searcher support 8 functions. */ +#define SRC_REG_E1HMF_ENABLE 0x404cc +#define SRC_REG_FIRSTFREE0 0x40510 +#define SRC_REG_KEYRSS0_0 0x40408 +#define SRC_REG_KEYRSS0_7 0x40424 +#define SRC_REG_KEYRSS1_9 0x40454 +#define SRC_REG_KEYSEARCH_0 0x40458 +#define SRC_REG_KEYSEARCH_1 0x4045c +#define SRC_REG_KEYSEARCH_2 0x40460 +#define SRC_REG_KEYSEARCH_3 0x40464 +#define SRC_REG_KEYSEARCH_4 0x40468 +#define SRC_REG_KEYSEARCH_5 0x4046c +#define SRC_REG_KEYSEARCH_6 0x40470 +#define SRC_REG_KEYSEARCH_7 0x40474 +#define SRC_REG_KEYSEARCH_8 0x40478 +#define SRC_REG_KEYSEARCH_9 0x4047c +#define SRC_REG_LASTFREE0 0x40530 +#define SRC_REG_NUMBER_HASH_BITS0 0x40400 +/* [RW 1] Reset internal state machines. */ +#define SRC_REG_SOFT_RST 0x4049c +/* [R 3] Interrupt register #0 read */ +#define SRC_REG_SRC_INT_STS 0x404ac +/* [RW 3] Parity mask register #0 read/write */ +#define SRC_REG_SRC_PRTY_MASK 0x404c8 +/* [R 3] Parity register #0 read */ +#define SRC_REG_SRC_PRTY_STS 0x404bc +/* [RC 3] Parity register #0 read clear */ +#define SRC_REG_SRC_PRTY_STS_CLR 0x404c0 +/* [R 4] Used to read the value of the XX protection CAM occupancy counter. */ +#define TCM_REG_CAM_OCCUP 0x5017c +/* [RW 1] CDU AG read Interface enable. If 0 - the request input is + disregarded; valid output is deasserted; all other signals are treated as + usual; if 1 - normal activity. */ +#define TCM_REG_CDU_AG_RD_IFEN 0x50034 +/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input + are disregarded; all other signals are treated as usual; if 1 - normal + activity. */ +#define TCM_REG_CDU_AG_WR_IFEN 0x50030 +/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is + disregarded; valid output is deasserted; all other signals are treated as + usual; if 1 - normal activity. */ +#define TCM_REG_CDU_SM_RD_IFEN 0x5003c +/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid + input is disregarded; all other signals are treated as usual; if 1 - + normal activity. */ +#define TCM_REG_CDU_SM_WR_IFEN 0x50038 +/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes + the initial credit value; read returns the current value of the credit + counter. Must be initialized to 1 at start-up. */ +#define TCM_REG_CFC_INIT_CRD 0x50204 +/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define TCM_REG_CP_WEIGHT 0x500c0 +/* [RW 1] Input csem Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define TCM_REG_CSEM_IFEN 0x5002c +/* [RC 1] Message length mismatch (relative to last indication) at the In#9 + interface. */ +#define TCM_REG_CSEM_LENGTH_MIS 0x50174 +/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define TCM_REG_CSEM_WEIGHT 0x500bc +/* [RW 8] The Event ID in case of ErrorFlg is set in the input message. */ +#define TCM_REG_ERR_EVNT_ID 0x500a0 +/* [RW 28] The CM erroneous header for QM and Timers formatting. */ +#define TCM_REG_ERR_TCM_HDR 0x5009c +/* [RW 8] The Event ID for Timers expiration. */ +#define TCM_REG_EXPR_EVNT_ID 0x500a4 +/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write + writes the initial credit value; read returns the current value of the + credit counter. Must be initialized to 64 at start-up. */ +#define TCM_REG_FIC0_INIT_CRD 0x5020c +/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write + writes the initial credit value; read returns the current value of the + credit counter. Must be initialized to 64 at start-up. */ +#define TCM_REG_FIC1_INIT_CRD 0x50210 +/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1 + - strict priority defined by ~tcm_registers_gr_ag_pr.gr_ag_pr; + ~tcm_registers_gr_ld0_pr.gr_ld0_pr and + ~tcm_registers_gr_ld1_pr.gr_ld1_pr. */ +#define TCM_REG_GR_ARB_TYPE 0x50114 +/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the + highest priority is 3. It is supposed that the Store channel is the + compliment of the other 3 groups. */ +#define TCM_REG_GR_LD0_PR 0x5011c +/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the + highest priority is 3. It is supposed that the Store channel is the + compliment of the other 3 groups. */ +#define TCM_REG_GR_LD1_PR 0x50120 +/* [RW 4] The number of double REG-pairs; loaded from the STORM context and + sent to STORM; for a specific connection type. The double REG-pairs are + used to align to STORM context row size of 128 bits. The offset of these + data in the STORM context is always 0. Index _i stands for the connection + type (one of 16). */ +#define TCM_REG_N_SM_CTX_LD_0 0x50050 +#define TCM_REG_N_SM_CTX_LD_1 0x50054 +#define TCM_REG_N_SM_CTX_LD_2 0x50058 +#define TCM_REG_N_SM_CTX_LD_3 0x5005c +#define TCM_REG_N_SM_CTX_LD_4 0x50060 +#define TCM_REG_N_SM_CTX_LD_5 0x50064 +/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define TCM_REG_PBF_IFEN 0x50024 +/* [RC 1] Message length mismatch (relative to last indication) at the In#7 + interface. */ +#define TCM_REG_PBF_LENGTH_MIS 0x5016c +/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define TCM_REG_PBF_WEIGHT 0x500b4 +#define TCM_REG_PHYS_QNUM0_0 0x500e0 +#define TCM_REG_PHYS_QNUM0_1 0x500e4 +#define TCM_REG_PHYS_QNUM1_0 0x500e8 +#define TCM_REG_PHYS_QNUM1_1 0x500ec +#define TCM_REG_PHYS_QNUM2_0 0x500f0 +#define TCM_REG_PHYS_QNUM2_1 0x500f4 +#define TCM_REG_PHYS_QNUM3_0 0x500f8 +#define TCM_REG_PHYS_QNUM3_1 0x500fc +/* [RW 1] Input prs Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define TCM_REG_PRS_IFEN 0x50020 +/* [RC 1] Message length mismatch (relative to last indication) at the In#6 + interface. */ +#define TCM_REG_PRS_LENGTH_MIS 0x50168 +/* [RW 3] The weight of the input prs in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define TCM_REG_PRS_WEIGHT 0x500b0 +/* [RW 8] The Event ID for Timers formatting in case of stop done. */ +#define TCM_REG_STOP_EVNT_ID 0x500a8 +/* [RC 1] Message length mismatch (relative to last indication) at the STORM + interface. */ +#define TCM_REG_STORM_LENGTH_MIS 0x50160 +/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define TCM_REG_STORM_TCM_IFEN 0x50010 +/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define TCM_REG_STORM_WEIGHT 0x500ac +/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define TCM_REG_TCM_CFC_IFEN 0x50040 +/* [RW 11] Interrupt mask register #0 read/write */ +#define TCM_REG_TCM_INT_MASK 0x501dc +/* [R 11] Interrupt register #0 read */ +#define TCM_REG_TCM_INT_STS 0x501d0 +/* [RW 27] Parity mask register #0 read/write */ +#define TCM_REG_TCM_PRTY_MASK 0x501ec +/* [R 27] Parity register #0 read */ +#define TCM_REG_TCM_PRTY_STS 0x501e0 +/* [RC 27] Parity register #0 read clear */ +#define TCM_REG_TCM_PRTY_STS_CLR 0x501e4 +/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS + REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). + Is used to determine the number of the AG context REG-pairs written back; + when the input message Reg1WbFlg isn't set. */ +#define TCM_REG_TCM_REG0_SZ 0x500d8 +/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define TCM_REG_TCM_STORM0_IFEN 0x50004 +/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define TCM_REG_TCM_STORM1_IFEN 0x50008 +/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define TCM_REG_TCM_TQM_IFEN 0x5000c +/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */ +#define TCM_REG_TCM_TQM_USE_Q 0x500d4 +/* [RW 28] The CM header for Timers expiration command. */ +#define TCM_REG_TM_TCM_HDR 0x50098 +/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define TCM_REG_TM_TCM_IFEN 0x5001c +/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define TCM_REG_TM_WEIGHT 0x500d0 +/* [RW 6] QM output initial credit. Max credit available - 32.Write writes + the initial credit value; read returns the current value of the credit + counter. Must be initialized to 32 at start-up. */ +#define TCM_REG_TQM_INIT_CRD 0x5021c +/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0 + stands for weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define TCM_REG_TQM_P_WEIGHT 0x500c8 +/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0 + stands for weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define TCM_REG_TQM_S_WEIGHT 0x500cc +/* [RW 28] The CM header value for QM request (primary). */ +#define TCM_REG_TQM_TCM_HDR_P 0x50090 +/* [RW 28] The CM header value for QM request (secondary). */ +#define TCM_REG_TQM_TCM_HDR_S 0x50094 +/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define TCM_REG_TQM_TCM_IFEN 0x50014 +/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define TCM_REG_TSDM_IFEN 0x50018 +/* [RC 1] Message length mismatch (relative to last indication) at the SDM + interface. */ +#define TCM_REG_TSDM_LENGTH_MIS 0x50164 +/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define TCM_REG_TSDM_WEIGHT 0x500c4 +/* [RW 1] Input usem Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define TCM_REG_USEM_IFEN 0x50028 +/* [RC 1] Message length mismatch (relative to last indication) at the In#8 + interface. */ +#define TCM_REG_USEM_LENGTH_MIS 0x50170 +/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define TCM_REG_USEM_WEIGHT 0x500b8 +/* [RW 21] Indirect access to the descriptor table of the XX protection + mechanism. The fields are: [5:0] - length of the message; 15:6] - message + pointer; 20:16] - next pointer. */ +#define TCM_REG_XX_DESCR_TABLE 0x50280 +#define TCM_REG_XX_DESCR_TABLE_SIZE 29 +/* [R 6] Use to read the value of XX protection Free counter. */ +#define TCM_REG_XX_FREE 0x50178 +/* [RW 6] Initial value for the credit counter; responsible for fulfilling + of the Input Stage XX protection buffer by the XX protection pending + messages. Max credit available - 127.Write writes the initial credit + value; read returns the current value of the credit counter. Must be + initialized to 19 at start-up. */ +#define TCM_REG_XX_INIT_CRD 0x50220 +/* [RW 6] Maximum link list size (messages locked) per connection in the XX + protection. */ +#define TCM_REG_XX_MAX_LL_SZ 0x50044 +/* [RW 6] The maximum number of pending messages; which may be stored in XX + protection. ~tcm_registers_xx_free.xx_free is read on read. */ +#define TCM_REG_XX_MSG_NUM 0x50224 +/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */ +#define TCM_REG_XX_OVFL_EVNT_ID 0x50048 +/* [RW 16] Indirect access to the XX table of the XX protection mechanism. + The fields are:[4:0] - tail pointer; [10:5] - Link List size; 15:11] - + header pointer. */ +#define TCM_REG_XX_TABLE 0x50240 +/* [RW 4] Load value for cfc ac credit cnt. */ +#define TM_REG_CFC_AC_CRDCNT_VAL 0x164208 +/* [RW 4] Load value for cfc cld credit cnt. */ +#define TM_REG_CFC_CLD_CRDCNT_VAL 0x164210 +/* [RW 8] Client0 context region. */ +#define TM_REG_CL0_CONT_REGION 0x164030 +/* [RW 8] Client1 context region. */ +#define TM_REG_CL1_CONT_REGION 0x164034 +/* [RW 8] Client2 context region. */ +#define TM_REG_CL2_CONT_REGION 0x164038 +/* [RW 2] Client in High priority client number. */ +#define TM_REG_CLIN_PRIOR0_CLIENT 0x164024 +/* [RW 4] Load value for clout0 cred cnt. */ +#define TM_REG_CLOUT_CRDCNT0_VAL 0x164220 +/* [RW 4] Load value for clout1 cred cnt. */ +#define TM_REG_CLOUT_CRDCNT1_VAL 0x164228 +/* [RW 4] Load value for clout2 cred cnt. */ +#define TM_REG_CLOUT_CRDCNT2_VAL 0x164230 +/* [RW 1] Enable client0 input. */ +#define TM_REG_EN_CL0_INPUT 0x164008 +/* [RW 1] Enable client1 input. */ +#define TM_REG_EN_CL1_INPUT 0x16400c +/* [RW 1] Enable client2 input. */ +#define TM_REG_EN_CL2_INPUT 0x164010 +#define TM_REG_EN_LINEAR0_TIMER 0x164014 +/* [RW 1] Enable real time counter. */ +#define TM_REG_EN_REAL_TIME_CNT 0x1640d8 +/* [RW 1] Enable for Timers state machines. */ +#define TM_REG_EN_TIMERS 0x164000 +/* [RW 4] Load value for expiration credit cnt. CFC max number of + outstanding load requests for timers (expiration) context loading. */ +#define TM_REG_EXP_CRDCNT_VAL 0x164238 +/* [RW 32] Linear0 logic address. */ +#define TM_REG_LIN0_LOGIC_ADDR 0x164240 +/* [RW 18] Linear0 Max active cid (in banks of 32 entries). */ +#define TM_REG_LIN0_MAX_ACTIVE_CID 0x164048 +/* [ST 16] Linear0 Number of scans counter. */ +#define TM_REG_LIN0_NUM_SCANS 0x1640a0 +/* [WB 64] Linear0 phy address. */ +#define TM_REG_LIN0_PHY_ADDR 0x164270 +/* [RW 1] Linear0 physical address valid. */ +#define TM_REG_LIN0_PHY_ADDR_VALID 0x164248 +#define TM_REG_LIN0_SCAN_ON 0x1640d0 +/* [RW 24] Linear0 array scan timeout. */ +#define TM_REG_LIN0_SCAN_TIME 0x16403c +#define TM_REG_LIN0_VNIC_UC 0x164128 +/* [RW 32] Linear1 logic address. */ +#define TM_REG_LIN1_LOGIC_ADDR 0x164250 +/* [WB 64] Linear1 phy address. */ +#define TM_REG_LIN1_PHY_ADDR 0x164280 +/* [RW 1] Linear1 physical address valid. */ +#define TM_REG_LIN1_PHY_ADDR_VALID 0x164258 +/* [RW 6] Linear timer set_clear fifo threshold. */ +#define TM_REG_LIN_SETCLR_FIFO_ALFULL_THR 0x164070 +/* [RW 2] Load value for pci arbiter credit cnt. */ +#define TM_REG_PCIARB_CRDCNT_VAL 0x164260 +/* [RW 20] The amount of hardware cycles for each timer tick. */ +#define TM_REG_TIMER_TICK_SIZE 0x16401c +/* [RW 8] Timers Context region. */ +#define TM_REG_TM_CONTEXT_REGION 0x164044 +/* [RW 1] Interrupt mask register #0 read/write */ +#define TM_REG_TM_INT_MASK 0x1640fc +/* [R 1] Interrupt register #0 read */ +#define TM_REG_TM_INT_STS 0x1640f0 +/* [RW 7] Parity mask register #0 read/write */ +#define TM_REG_TM_PRTY_MASK 0x16410c +/* [RC 7] Parity register #0 read clear */ +#define TM_REG_TM_PRTY_STS_CLR 0x164104 +/* [RW 8] The event id for aggregated interrupt 0 */ +#define TSDM_REG_AGG_INT_EVENT_0 0x42038 +#define TSDM_REG_AGG_INT_EVENT_1 0x4203c +#define TSDM_REG_AGG_INT_EVENT_2 0x42040 +#define TSDM_REG_AGG_INT_EVENT_3 0x42044 +#define TSDM_REG_AGG_INT_EVENT_4 0x42048 +/* [RW 1] The T bit for aggregated interrupt 0 */ +#define TSDM_REG_AGG_INT_T_0 0x420b8 +#define TSDM_REG_AGG_INT_T_1 0x420bc +/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */ +#define TSDM_REG_CFC_RSP_START_ADDR 0x42008 +/* [RW 16] The maximum value of the completion counter #0 */ +#define TSDM_REG_CMP_COUNTER_MAX0 0x4201c +/* [RW 16] The maximum value of the completion counter #1 */ +#define TSDM_REG_CMP_COUNTER_MAX1 0x42020 +/* [RW 16] The maximum value of the completion counter #2 */ +#define TSDM_REG_CMP_COUNTER_MAX2 0x42024 +/* [RW 16] The maximum value of the completion counter #3 */ +#define TSDM_REG_CMP_COUNTER_MAX3 0x42028 +/* [RW 13] The start address in the internal RAM for the completion + counters. */ +#define TSDM_REG_CMP_COUNTER_START_ADDR 0x4200c +#define TSDM_REG_ENABLE_IN1 0x42238 +#define TSDM_REG_ENABLE_IN2 0x4223c +#define TSDM_REG_ENABLE_OUT1 0x42240 +#define TSDM_REG_ENABLE_OUT2 0x42244 +/* [RW 4] The initial number of messages that can be sent to the pxp control + interface without receiving any ACK. */ +#define TSDM_REG_INIT_CREDIT_PXP_CTRL 0x424bc +/* [ST 32] The number of ACK after placement messages received */ +#define TSDM_REG_NUM_OF_ACK_AFTER_PLACE 0x4227c +/* [ST 32] The number of packet end messages received from the parser */ +#define TSDM_REG_NUM_OF_PKT_END_MSG 0x42274 +/* [ST 32] The number of requests received from the pxp async if */ +#define TSDM_REG_NUM_OF_PXP_ASYNC_REQ 0x42278 +/* [ST 32] The number of commands received in queue 0 */ +#define TSDM_REG_NUM_OF_Q0_CMD 0x42248 +/* [ST 32] The number of commands received in queue 10 */ +#define TSDM_REG_NUM_OF_Q10_CMD 0x4226c +/* [ST 32] The number of commands received in queue 11 */ +#define TSDM_REG_NUM_OF_Q11_CMD 0x42270 +/* [ST 32] The number of commands received in queue 1 */ +#define TSDM_REG_NUM_OF_Q1_CMD 0x4224c +/* [ST 32] The number of commands received in queue 3 */ +#define TSDM_REG_NUM_OF_Q3_CMD 0x42250 +/* [ST 32] The number of commands received in queue 4 */ +#define TSDM_REG_NUM_OF_Q4_CMD 0x42254 +/* [ST 32] The number of commands received in queue 5 */ +#define TSDM_REG_NUM_OF_Q5_CMD 0x42258 +/* [ST 32] The number of commands received in queue 6 */ +#define TSDM_REG_NUM_OF_Q6_CMD 0x4225c +/* [ST 32] The number of commands received in queue 7 */ +#define TSDM_REG_NUM_OF_Q7_CMD 0x42260 +/* [ST 32] The number of commands received in queue 8 */ +#define TSDM_REG_NUM_OF_Q8_CMD 0x42264 +/* [ST 32] The number of commands received in queue 9 */ +#define TSDM_REG_NUM_OF_Q9_CMD 0x42268 +/* [RW 13] The start address in the internal RAM for the packet end message */ +#define TSDM_REG_PCK_END_MSG_START_ADDR 0x42014 +/* [RW 13] The start address in the internal RAM for queue counters */ +#define TSDM_REG_Q_COUNTER_START_ADDR 0x42010 +/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */ +#define TSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0x42548 +/* [R 1] parser fifo empty in sdm_sync block */ +#define TSDM_REG_SYNC_PARSER_EMPTY 0x42550 +/* [R 1] parser serial fifo empty in sdm_sync block */ +#define TSDM_REG_SYNC_SYNC_EMPTY 0x42558 +/* [RW 32] Tick for timer counter. Applicable only when + ~tsdm_registers_timer_tick_enable.timer_tick_enable =1 */ +#define TSDM_REG_TIMER_TICK 0x42000 +/* [RW 32] Interrupt mask register #0 read/write */ +#define TSDM_REG_TSDM_INT_MASK_0 0x4229c +#define TSDM_REG_TSDM_INT_MASK_1 0x422ac +/* [R 32] Interrupt register #0 read */ +#define TSDM_REG_TSDM_INT_STS_0 0x42290 +#define TSDM_REG_TSDM_INT_STS_1 0x422a0 +/* [RW 11] Parity mask register #0 read/write */ +#define TSDM_REG_TSDM_PRTY_MASK 0x422bc +/* [R 11] Parity register #0 read */ +#define TSDM_REG_TSDM_PRTY_STS 0x422b0 +/* [RC 11] Parity register #0 read clear */ +#define TSDM_REG_TSDM_PRTY_STS_CLR 0x422b4 +/* [RW 5] The number of time_slots in the arbitration cycle */ +#define TSEM_REG_ARB_CYCLE_SIZE 0x180034 +/* [RW 3] The source that is associated with arbitration element 0. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2 */ +#define TSEM_REG_ARB_ELEMENT0 0x180020 +/* [RW 3] The source that is associated with arbitration element 1. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~tsem_registers_arb_element0.arb_element0 */ +#define TSEM_REG_ARB_ELEMENT1 0x180024 +/* [RW 3] The source that is associated with arbitration element 2. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~tsem_registers_arb_element0.arb_element0 + and ~tsem_registers_arb_element1.arb_element1 */ +#define TSEM_REG_ARB_ELEMENT2 0x180028 +/* [RW 3] The source that is associated with arbitration element 3. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2.Could + not be equal to register ~tsem_registers_arb_element0.arb_element0 and + ~tsem_registers_arb_element1.arb_element1 and + ~tsem_registers_arb_element2.arb_element2 */ +#define TSEM_REG_ARB_ELEMENT3 0x18002c +/* [RW 3] The source that is associated with arbitration element 4. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~tsem_registers_arb_element0.arb_element0 + and ~tsem_registers_arb_element1.arb_element1 and + ~tsem_registers_arb_element2.arb_element2 and + ~tsem_registers_arb_element3.arb_element3 */ +#define TSEM_REG_ARB_ELEMENT4 0x180030 +#define TSEM_REG_ENABLE_IN 0x1800a4 +#define TSEM_REG_ENABLE_OUT 0x1800a8 +/* [RW 32] This address space contains all registers and memories that are + placed in SEM_FAST block. The SEM_FAST registers are described in + appendix B. In order to access the sem_fast registers the base address + ~fast_memory.fast_memory should be added to eachsem_fast register offset. */ +#define TSEM_REG_FAST_MEMORY 0x1a0000 +/* [RW 1] Disables input messages from FIC0 May be updated during run_time + by the microcode */ +#define TSEM_REG_FIC0_DISABLE 0x180224 +/* [RW 1] Disables input messages from FIC1 May be updated during run_time + by the microcode */ +#define TSEM_REG_FIC1_DISABLE 0x180234 +/* [RW 15] Interrupt table Read and write access to it is not possible in + the middle of the work */ +#define TSEM_REG_INT_TABLE 0x180400 +/* [ST 24] Statistics register. The number of messages that entered through + FIC0 */ +#define TSEM_REG_MSG_NUM_FIC0 0x180000 +/* [ST 24] Statistics register. The number of messages that entered through + FIC1 */ +#define TSEM_REG_MSG_NUM_FIC1 0x180004 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC0 */ +#define TSEM_REG_MSG_NUM_FOC0 0x180008 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC1 */ +#define TSEM_REG_MSG_NUM_FOC1 0x18000c +/* [ST 24] Statistics register. The number of messages that were sent to + FOC2 */ +#define TSEM_REG_MSG_NUM_FOC2 0x180010 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC3 */ +#define TSEM_REG_MSG_NUM_FOC3 0x180014 +/* [RW 1] Disables input messages from the passive buffer May be updated + during run_time by the microcode */ +#define TSEM_REG_PAS_DISABLE 0x18024c +/* [WB 128] Debug only. Passive buffer memory */ +#define TSEM_REG_PASSIVE_BUFFER 0x181000 +/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */ +#define TSEM_REG_PRAM 0x1c0000 +/* [R 8] Valid sleeping threads indication have bit per thread */ +#define TSEM_REG_SLEEP_THREADS_VALID 0x18026c +/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */ +#define TSEM_REG_SLOW_EXT_STORE_EMPTY 0x1802a0 +/* [RW 8] List of free threads . There is a bit per thread. */ +#define TSEM_REG_THREADS_LIST 0x1802e4 +/* [RC 32] Parity register #0 read clear */ +#define TSEM_REG_TSEM_PRTY_STS_CLR_0 0x180118 +#define TSEM_REG_TSEM_PRTY_STS_CLR_1 0x180128 +/* [RW 3] The arbitration scheme of time_slot 0 */ +#define TSEM_REG_TS_0_AS 0x180038 +/* [RW 3] The arbitration scheme of time_slot 10 */ +#define TSEM_REG_TS_10_AS 0x180060 +/* [RW 3] The arbitration scheme of time_slot 11 */ +#define TSEM_REG_TS_11_AS 0x180064 +/* [RW 3] The arbitration scheme of time_slot 12 */ +#define TSEM_REG_TS_12_AS 0x180068 +/* [RW 3] The arbitration scheme of time_slot 13 */ +#define TSEM_REG_TS_13_AS 0x18006c +/* [RW 3] The arbitration scheme of time_slot 14 */ +#define TSEM_REG_TS_14_AS 0x180070 +/* [RW 3] The arbitration scheme of time_slot 15 */ +#define TSEM_REG_TS_15_AS 0x180074 +/* [RW 3] The arbitration scheme of time_slot 16 */ +#define TSEM_REG_TS_16_AS 0x180078 +/* [RW 3] The arbitration scheme of time_slot 17 */ +#define TSEM_REG_TS_17_AS 0x18007c +/* [RW 3] The arbitration scheme of time_slot 18 */ +#define TSEM_REG_TS_18_AS 0x180080 +/* [RW 3] The arbitration scheme of time_slot 1 */ +#define TSEM_REG_TS_1_AS 0x18003c +/* [RW 3] The arbitration scheme of time_slot 2 */ +#define TSEM_REG_TS_2_AS 0x180040 +/* [RW 3] The arbitration scheme of time_slot 3 */ +#define TSEM_REG_TS_3_AS 0x180044 +/* [RW 3] The arbitration scheme of time_slot 4 */ +#define TSEM_REG_TS_4_AS 0x180048 +/* [RW 3] The arbitration scheme of time_slot 5 */ +#define TSEM_REG_TS_5_AS 0x18004c +/* [RW 3] The arbitration scheme of time_slot 6 */ +#define TSEM_REG_TS_6_AS 0x180050 +/* [RW 3] The arbitration scheme of time_slot 7 */ +#define TSEM_REG_TS_7_AS 0x180054 +/* [RW 3] The arbitration scheme of time_slot 8 */ +#define TSEM_REG_TS_8_AS 0x180058 +/* [RW 3] The arbitration scheme of time_slot 9 */ +#define TSEM_REG_TS_9_AS 0x18005c +/* [RW 32] Interrupt mask register #0 read/write */ +#define TSEM_REG_TSEM_INT_MASK_0 0x180100 +#define TSEM_REG_TSEM_INT_MASK_1 0x180110 +/* [R 32] Interrupt register #0 read */ +#define TSEM_REG_TSEM_INT_STS_0 0x1800f4 +#define TSEM_REG_TSEM_INT_STS_1 0x180104 +/* [RW 32] Parity mask register #0 read/write */ +#define TSEM_REG_TSEM_PRTY_MASK_0 0x180120 +#define TSEM_REG_TSEM_PRTY_MASK_1 0x180130 +/* [R 32] Parity register #0 read */ +#define TSEM_REG_TSEM_PRTY_STS_0 0x180114 +#define TSEM_REG_TSEM_PRTY_STS_1 0x180124 +/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64 + * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */ +#define TSEM_REG_VFPF_ERR_NUM 0x180380 +/* [RW 32] Indirect access to AG context with 32-bits granularity. The bits + * [10:8] of the address should be the offset within the accessed LCID + * context; the bits [7:0] are the accessed LCID.Example: to write to REG10 + * LCID100. The RBC address should be 12'ha64. */ +#define UCM_REG_AG_CTX 0xe2000 +/* [R 5] Used to read the XX protection CAM occupancy counter. */ +#define UCM_REG_CAM_OCCUP 0xe0170 +/* [RW 1] CDU AG read Interface enable. If 0 - the request input is + disregarded; valid output is deasserted; all other signals are treated as + usual; if 1 - normal activity. */ +#define UCM_REG_CDU_AG_RD_IFEN 0xe0038 +/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input + are disregarded; all other signals are treated as usual; if 1 - normal + activity. */ +#define UCM_REG_CDU_AG_WR_IFEN 0xe0034 +/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is + disregarded; valid output is deasserted; all other signals are treated as + usual; if 1 - normal activity. */ +#define UCM_REG_CDU_SM_RD_IFEN 0xe0040 +/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid + input is disregarded; all other signals are treated as usual; if 1 - + normal activity. */ +#define UCM_REG_CDU_SM_WR_IFEN 0xe003c +/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes + the initial credit value; read returns the current value of the credit + counter. Must be initialized to 1 at start-up. */ +#define UCM_REG_CFC_INIT_CRD 0xe0204 +/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define UCM_REG_CP_WEIGHT 0xe00c4 +/* [RW 1] Input csem Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define UCM_REG_CSEM_IFEN 0xe0028 +/* [RC 1] Set when the message length mismatch (relative to last indication) + at the csem interface is detected. */ +#define UCM_REG_CSEM_LENGTH_MIS 0xe0160 +/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define UCM_REG_CSEM_WEIGHT 0xe00b8 +/* [RW 1] Input dorq Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define UCM_REG_DORQ_IFEN 0xe0030 +/* [RC 1] Set when the message length mismatch (relative to last indication) + at the dorq interface is detected. */ +#define UCM_REG_DORQ_LENGTH_MIS 0xe0168 +/* [RW 3] The weight of the input dorq in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define UCM_REG_DORQ_WEIGHT 0xe00c0 +/* [RW 8] The Event ID in case ErrorFlg input message bit is set. */ +#define UCM_REG_ERR_EVNT_ID 0xe00a4 +/* [RW 28] The CM erroneous header for QM and Timers formatting. */ +#define UCM_REG_ERR_UCM_HDR 0xe00a0 +/* [RW 8] The Event ID for Timers expiration. */ +#define UCM_REG_EXPR_EVNT_ID 0xe00a8 +/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write + writes the initial credit value; read returns the current value of the + credit counter. Must be initialized to 64 at start-up. */ +#define UCM_REG_FIC0_INIT_CRD 0xe020c +/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write + writes the initial credit value; read returns the current value of the + credit counter. Must be initialized to 64 at start-up. */ +#define UCM_REG_FIC1_INIT_CRD 0xe0210 +/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1 + - strict priority defined by ~ucm_registers_gr_ag_pr.gr_ag_pr; + ~ucm_registers_gr_ld0_pr.gr_ld0_pr and + ~ucm_registers_gr_ld1_pr.gr_ld1_pr. */ +#define UCM_REG_GR_ARB_TYPE 0xe0144 +/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the + highest priority is 3. It is supposed that the Store channel group is + compliment to the others. */ +#define UCM_REG_GR_LD0_PR 0xe014c +/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the + highest priority is 3. It is supposed that the Store channel group is + compliment to the others. */ +#define UCM_REG_GR_LD1_PR 0xe0150 +/* [RW 2] The queue index for invalidate counter flag decision. */ +#define UCM_REG_INV_CFLG_Q 0xe00e4 +/* [RW 5] The number of double REG-pairs; loaded from the STORM context and + sent to STORM; for a specific connection type. the double REG-pairs are + used in order to align to STORM context row size of 128 bits. The offset + of these data in the STORM context is always 0. Index _i stands for the + connection type (one of 16). */ +#define UCM_REG_N_SM_CTX_LD_0 0xe0054 +#define UCM_REG_N_SM_CTX_LD_1 0xe0058 +#define UCM_REG_N_SM_CTX_LD_2 0xe005c +#define UCM_REG_N_SM_CTX_LD_3 0xe0060 +#define UCM_REG_N_SM_CTX_LD_4 0xe0064 +#define UCM_REG_N_SM_CTX_LD_5 0xe0068 +#define UCM_REG_PHYS_QNUM0_0 0xe0110 +#define UCM_REG_PHYS_QNUM0_1 0xe0114 +#define UCM_REG_PHYS_QNUM1_0 0xe0118 +#define UCM_REG_PHYS_QNUM1_1 0xe011c +#define UCM_REG_PHYS_QNUM2_0 0xe0120 +#define UCM_REG_PHYS_QNUM2_1 0xe0124 +#define UCM_REG_PHYS_QNUM3_0 0xe0128 +#define UCM_REG_PHYS_QNUM3_1 0xe012c +/* [RW 8] The Event ID for Timers formatting in case of stop done. */ +#define UCM_REG_STOP_EVNT_ID 0xe00ac +/* [RC 1] Set when the message length mismatch (relative to last indication) + at the STORM interface is detected. */ +#define UCM_REG_STORM_LENGTH_MIS 0xe0154 +/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define UCM_REG_STORM_UCM_IFEN 0xe0010 +/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define UCM_REG_STORM_WEIGHT 0xe00b0 +/* [RW 4] Timers output initial credit. Max credit available - 15.Write + writes the initial credit value; read returns the current value of the + credit counter. Must be initialized to 4 at start-up. */ +#define UCM_REG_TM_INIT_CRD 0xe021c +/* [RW 28] The CM header for Timers expiration command. */ +#define UCM_REG_TM_UCM_HDR 0xe009c +/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define UCM_REG_TM_UCM_IFEN 0xe001c +/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define UCM_REG_TM_WEIGHT 0xe00d4 +/* [RW 1] Input tsem Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define UCM_REG_TSEM_IFEN 0xe0024 +/* [RC 1] Set when the message length mismatch (relative to last indication) + at the tsem interface is detected. */ +#define UCM_REG_TSEM_LENGTH_MIS 0xe015c +/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define UCM_REG_TSEM_WEIGHT 0xe00b4 +/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define UCM_REG_UCM_CFC_IFEN 0xe0044 +/* [RW 11] Interrupt mask register #0 read/write */ +#define UCM_REG_UCM_INT_MASK 0xe01d4 +/* [R 11] Interrupt register #0 read */ +#define UCM_REG_UCM_INT_STS 0xe01c8 +/* [RW 27] Parity mask register #0 read/write */ +#define UCM_REG_UCM_PRTY_MASK 0xe01e4 +/* [R 27] Parity register #0 read */ +#define UCM_REG_UCM_PRTY_STS 0xe01d8 +/* [RC 27] Parity register #0 read clear */ +#define UCM_REG_UCM_PRTY_STS_CLR 0xe01dc +/* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS + REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). + Is used to determine the number of the AG context REG-pairs written back; + when the Reg1WbFlg isn't set. */ +#define UCM_REG_UCM_REG0_SZ 0xe00dc +/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define UCM_REG_UCM_STORM0_IFEN 0xe0004 +/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define UCM_REG_UCM_STORM1_IFEN 0xe0008 +/* [RW 1] CM - Timers Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define UCM_REG_UCM_TM_IFEN 0xe0020 +/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define UCM_REG_UCM_UQM_IFEN 0xe000c +/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */ +#define UCM_REG_UCM_UQM_USE_Q 0xe00d8 +/* [RW 6] QM output initial credit. Max credit available - 32.Write writes + the initial credit value; read returns the current value of the credit + counter. Must be initialized to 32 at start-up. */ +#define UCM_REG_UQM_INIT_CRD 0xe0220 +/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0 + stands for weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define UCM_REG_UQM_P_WEIGHT 0xe00cc +/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0 + stands for weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define UCM_REG_UQM_S_WEIGHT 0xe00d0 +/* [RW 28] The CM header value for QM request (primary). */ +#define UCM_REG_UQM_UCM_HDR_P 0xe0094 +/* [RW 28] The CM header value for QM request (secondary). */ +#define UCM_REG_UQM_UCM_HDR_S 0xe0098 +/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define UCM_REG_UQM_UCM_IFEN 0xe0014 +/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define UCM_REG_USDM_IFEN 0xe0018 +/* [RC 1] Set when the message length mismatch (relative to last indication) + at the SDM interface is detected. */ +#define UCM_REG_USDM_LENGTH_MIS 0xe0158 +/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define UCM_REG_USDM_WEIGHT 0xe00c8 +/* [RW 1] Input xsem Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define UCM_REG_XSEM_IFEN 0xe002c +/* [RC 1] Set when the message length mismatch (relative to last indication) + at the xsem interface isdetected. */ +#define UCM_REG_XSEM_LENGTH_MIS 0xe0164 +/* [RW 3] The weight of the input xsem in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define UCM_REG_XSEM_WEIGHT 0xe00bc +/* [RW 20] Indirect access to the descriptor table of the XX protection + mechanism. The fields are:[5:0] - message length; 14:6] - message + pointer; 19:15] - next pointer. */ +#define UCM_REG_XX_DESCR_TABLE 0xe0280 +#define UCM_REG_XX_DESCR_TABLE_SIZE 27 +/* [R 6] Use to read the XX protection Free counter. */ +#define UCM_REG_XX_FREE 0xe016c +/* [RW 6] Initial value for the credit counter; responsible for fulfilling + of the Input Stage XX protection buffer by the XX protection pending + messages. Write writes the initial credit value; read returns the current + value of the credit counter. Must be initialized to 12 at start-up. */ +#define UCM_REG_XX_INIT_CRD 0xe0224 +/* [RW 6] The maximum number of pending messages; which may be stored in XX + protection. ~ucm_registers_xx_free.xx_free read on read. */ +#define UCM_REG_XX_MSG_NUM 0xe0228 +/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */ +#define UCM_REG_XX_OVFL_EVNT_ID 0xe004c +/* [RW 16] Indirect access to the XX table of the XX protection mechanism. + The fields are: [4:0] - tail pointer; 10:5] - Link List size; 15:11] - + header pointer. */ +#define UCM_REG_XX_TABLE 0xe0300 +#define UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE (0x1<<28) +#define UMAC_COMMAND_CONFIG_REG_LOOP_ENA (0x1<<15) +#define UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK (0x1<<24) +#define UMAC_COMMAND_CONFIG_REG_PAD_EN (0x1<<5) +#define UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE (0x1<<8) +#define UMAC_COMMAND_CONFIG_REG_PROMIS_EN (0x1<<4) +#define UMAC_COMMAND_CONFIG_REG_RX_ENA (0x1<<1) +#define UMAC_COMMAND_CONFIG_REG_SW_RESET (0x1<<13) +#define UMAC_COMMAND_CONFIG_REG_TX_ENA (0x1<<0) +#define UMAC_REG_COMMAND_CONFIG 0x8 +/* [RW 32] Register Bit 0 refers to Bit 16 of the MAC address; Bit 1 refers + * to bit 17 of the MAC address etc. */ +#define UMAC_REG_MAC_ADDR0 0xc +/* [RW 16] Register Bit 0 refers to Bit 0 of the MAC address; Register Bit 1 + * refers to Bit 1 of the MAC address etc. Bits 16 to 31 are reserved. */ +#define UMAC_REG_MAC_ADDR1 0x10 +/* [RW 14] Defines a 14-Bit maximum frame length used by the MAC receive + * logic to check frames. */ +#define UMAC_REG_MAXFR 0x14 +/* [RW 8] The event id for aggregated interrupt 0 */ +#define USDM_REG_AGG_INT_EVENT_0 0xc4038 +#define USDM_REG_AGG_INT_EVENT_1 0xc403c +#define USDM_REG_AGG_INT_EVENT_2 0xc4040 +#define USDM_REG_AGG_INT_EVENT_4 0xc4048 +#define USDM_REG_AGG_INT_EVENT_5 0xc404c +#define USDM_REG_AGG_INT_EVENT_6 0xc4050 +/* [RW 1] For each aggregated interrupt index whether the mode is normal (0) + or auto-mask-mode (1) */ +#define USDM_REG_AGG_INT_MODE_0 0xc41b8 +#define USDM_REG_AGG_INT_MODE_1 0xc41bc +#define USDM_REG_AGG_INT_MODE_4 0xc41c8 +#define USDM_REG_AGG_INT_MODE_5 0xc41cc +#define USDM_REG_AGG_INT_MODE_6 0xc41d0 +/* [RW 1] The T bit for aggregated interrupt 5 */ +#define USDM_REG_AGG_INT_T_5 0xc40cc +#define USDM_REG_AGG_INT_T_6 0xc40d0 +/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */ +#define USDM_REG_CFC_RSP_START_ADDR 0xc4008 +/* [RW 16] The maximum value of the completion counter #0 */ +#define USDM_REG_CMP_COUNTER_MAX0 0xc401c +/* [RW 16] The maximum value of the completion counter #1 */ +#define USDM_REG_CMP_COUNTER_MAX1 0xc4020 +/* [RW 16] The maximum value of the completion counter #2 */ +#define USDM_REG_CMP_COUNTER_MAX2 0xc4024 +/* [RW 16] The maximum value of the completion counter #3 */ +#define USDM_REG_CMP_COUNTER_MAX3 0xc4028 +/* [RW 13] The start address in the internal RAM for the completion + counters. */ +#define USDM_REG_CMP_COUNTER_START_ADDR 0xc400c +#define USDM_REG_ENABLE_IN1 0xc4238 +#define USDM_REG_ENABLE_IN2 0xc423c +#define USDM_REG_ENABLE_OUT1 0xc4240 +#define USDM_REG_ENABLE_OUT2 0xc4244 +/* [RW 4] The initial number of messages that can be sent to the pxp control + interface without receiving any ACK. */ +#define USDM_REG_INIT_CREDIT_PXP_CTRL 0xc44c0 +/* [ST 32] The number of ACK after placement messages received */ +#define USDM_REG_NUM_OF_ACK_AFTER_PLACE 0xc4280 +/* [ST 32] The number of packet end messages received from the parser */ +#define USDM_REG_NUM_OF_PKT_END_MSG 0xc4278 +/* [ST 32] The number of requests received from the pxp async if */ +#define USDM_REG_NUM_OF_PXP_ASYNC_REQ 0xc427c +/* [ST 32] The number of commands received in queue 0 */ +#define USDM_REG_NUM_OF_Q0_CMD 0xc4248 +/* [ST 32] The number of commands received in queue 10 */ +#define USDM_REG_NUM_OF_Q10_CMD 0xc4270 +/* [ST 32] The number of commands received in queue 11 */ +#define USDM_REG_NUM_OF_Q11_CMD 0xc4274 +/* [ST 32] The number of commands received in queue 1 */ +#define USDM_REG_NUM_OF_Q1_CMD 0xc424c +/* [ST 32] The number of commands received in queue 2 */ +#define USDM_REG_NUM_OF_Q2_CMD 0xc4250 +/* [ST 32] The number of commands received in queue 3 */ +#define USDM_REG_NUM_OF_Q3_CMD 0xc4254 +/* [ST 32] The number of commands received in queue 4 */ +#define USDM_REG_NUM_OF_Q4_CMD 0xc4258 +/* [ST 32] The number of commands received in queue 5 */ +#define USDM_REG_NUM_OF_Q5_CMD 0xc425c +/* [ST 32] The number of commands received in queue 6 */ +#define USDM_REG_NUM_OF_Q6_CMD 0xc4260 +/* [ST 32] The number of commands received in queue 7 */ +#define USDM_REG_NUM_OF_Q7_CMD 0xc4264 +/* [ST 32] The number of commands received in queue 8 */ +#define USDM_REG_NUM_OF_Q8_CMD 0xc4268 +/* [ST 32] The number of commands received in queue 9 */ +#define USDM_REG_NUM_OF_Q9_CMD 0xc426c +/* [RW 13] The start address in the internal RAM for the packet end message */ +#define USDM_REG_PCK_END_MSG_START_ADDR 0xc4014 +/* [RW 13] The start address in the internal RAM for queue counters */ +#define USDM_REG_Q_COUNTER_START_ADDR 0xc4010 +/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */ +#define USDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0xc4550 +/* [R 1] parser fifo empty in sdm_sync block */ +#define USDM_REG_SYNC_PARSER_EMPTY 0xc4558 +/* [R 1] parser serial fifo empty in sdm_sync block */ +#define USDM_REG_SYNC_SYNC_EMPTY 0xc4560 +/* [RW 32] Tick for timer counter. Applicable only when + ~usdm_registers_timer_tick_enable.timer_tick_enable =1 */ +#define USDM_REG_TIMER_TICK 0xc4000 +/* [RW 32] Interrupt mask register #0 read/write */ +#define USDM_REG_USDM_INT_MASK_0 0xc42a0 +#define USDM_REG_USDM_INT_MASK_1 0xc42b0 +/* [R 32] Interrupt register #0 read */ +#define USDM_REG_USDM_INT_STS_0 0xc4294 +#define USDM_REG_USDM_INT_STS_1 0xc42a4 +/* [RW 11] Parity mask register #0 read/write */ +#define USDM_REG_USDM_PRTY_MASK 0xc42c0 +/* [R 11] Parity register #0 read */ +#define USDM_REG_USDM_PRTY_STS 0xc42b4 +/* [RC 11] Parity register #0 read clear */ +#define USDM_REG_USDM_PRTY_STS_CLR 0xc42b8 +/* [RW 5] The number of time_slots in the arbitration cycle */ +#define USEM_REG_ARB_CYCLE_SIZE 0x300034 +/* [RW 3] The source that is associated with arbitration element 0. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2 */ +#define USEM_REG_ARB_ELEMENT0 0x300020 +/* [RW 3] The source that is associated with arbitration element 1. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~usem_registers_arb_element0.arb_element0 */ +#define USEM_REG_ARB_ELEMENT1 0x300024 +/* [RW 3] The source that is associated with arbitration element 2. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~usem_registers_arb_element0.arb_element0 + and ~usem_registers_arb_element1.arb_element1 */ +#define USEM_REG_ARB_ELEMENT2 0x300028 +/* [RW 3] The source that is associated with arbitration element 3. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2.Could + not be equal to register ~usem_registers_arb_element0.arb_element0 and + ~usem_registers_arb_element1.arb_element1 and + ~usem_registers_arb_element2.arb_element2 */ +#define USEM_REG_ARB_ELEMENT3 0x30002c +/* [RW 3] The source that is associated with arbitration element 4. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~usem_registers_arb_element0.arb_element0 + and ~usem_registers_arb_element1.arb_element1 and + ~usem_registers_arb_element2.arb_element2 and + ~usem_registers_arb_element3.arb_element3 */ +#define USEM_REG_ARB_ELEMENT4 0x300030 +#define USEM_REG_ENABLE_IN 0x3000a4 +#define USEM_REG_ENABLE_OUT 0x3000a8 +/* [RW 32] This address space contains all registers and memories that are + placed in SEM_FAST block. The SEM_FAST registers are described in + appendix B. In order to access the sem_fast registers the base address + ~fast_memory.fast_memory should be added to eachsem_fast register offset. */ +#define USEM_REG_FAST_MEMORY 0x320000 +/* [RW 1] Disables input messages from FIC0 May be updated during run_time + by the microcode */ +#define USEM_REG_FIC0_DISABLE 0x300224 +/* [RW 1] Disables input messages from FIC1 May be updated during run_time + by the microcode */ +#define USEM_REG_FIC1_DISABLE 0x300234 +/* [RW 15] Interrupt table Read and write access to it is not possible in + the middle of the work */ +#define USEM_REG_INT_TABLE 0x300400 +/* [ST 24] Statistics register. The number of messages that entered through + FIC0 */ +#define USEM_REG_MSG_NUM_FIC0 0x300000 +/* [ST 24] Statistics register. The number of messages that entered through + FIC1 */ +#define USEM_REG_MSG_NUM_FIC1 0x300004 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC0 */ +#define USEM_REG_MSG_NUM_FOC0 0x300008 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC1 */ +#define USEM_REG_MSG_NUM_FOC1 0x30000c +/* [ST 24] Statistics register. The number of messages that were sent to + FOC2 */ +#define USEM_REG_MSG_NUM_FOC2 0x300010 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC3 */ +#define USEM_REG_MSG_NUM_FOC3 0x300014 +/* [RW 1] Disables input messages from the passive buffer May be updated + during run_time by the microcode */ +#define USEM_REG_PAS_DISABLE 0x30024c +/* [WB 128] Debug only. Passive buffer memory */ +#define USEM_REG_PASSIVE_BUFFER 0x302000 +/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */ +#define USEM_REG_PRAM 0x340000 +/* [R 16] Valid sleeping threads indication have bit per thread */ +#define USEM_REG_SLEEP_THREADS_VALID 0x30026c +/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */ +#define USEM_REG_SLOW_EXT_STORE_EMPTY 0x3002a0 +/* [RW 16] List of free threads . There is a bit per thread. */ +#define USEM_REG_THREADS_LIST 0x3002e4 +/* [RW 3] The arbitration scheme of time_slot 0 */ +#define USEM_REG_TS_0_AS 0x300038 +/* [RW 3] The arbitration scheme of time_slot 10 */ +#define USEM_REG_TS_10_AS 0x300060 +/* [RW 3] The arbitration scheme of time_slot 11 */ +#define USEM_REG_TS_11_AS 0x300064 +/* [RW 3] The arbitration scheme of time_slot 12 */ +#define USEM_REG_TS_12_AS 0x300068 +/* [RW 3] The arbitration scheme of time_slot 13 */ +#define USEM_REG_TS_13_AS 0x30006c +/* [RW 3] The arbitration scheme of time_slot 14 */ +#define USEM_REG_TS_14_AS 0x300070 +/* [RW 3] The arbitration scheme of time_slot 15 */ +#define USEM_REG_TS_15_AS 0x300074 +/* [RW 3] The arbitration scheme of time_slot 16 */ +#define USEM_REG_TS_16_AS 0x300078 +/* [RW 3] The arbitration scheme of time_slot 17 */ +#define USEM_REG_TS_17_AS 0x30007c +/* [RW 3] The arbitration scheme of time_slot 18 */ +#define USEM_REG_TS_18_AS 0x300080 +/* [RW 3] The arbitration scheme of time_slot 1 */ +#define USEM_REG_TS_1_AS 0x30003c +/* [RW 3] The arbitration scheme of time_slot 2 */ +#define USEM_REG_TS_2_AS 0x300040 +/* [RW 3] The arbitration scheme of time_slot 3 */ +#define USEM_REG_TS_3_AS 0x300044 +/* [RW 3] The arbitration scheme of time_slot 4 */ +#define USEM_REG_TS_4_AS 0x300048 +/* [RW 3] The arbitration scheme of time_slot 5 */ +#define USEM_REG_TS_5_AS 0x30004c +/* [RW 3] The arbitration scheme of time_slot 6 */ +#define USEM_REG_TS_6_AS 0x300050 +/* [RW 3] The arbitration scheme of time_slot 7 */ +#define USEM_REG_TS_7_AS 0x300054 +/* [RW 3] The arbitration scheme of time_slot 8 */ +#define USEM_REG_TS_8_AS 0x300058 +/* [RW 3] The arbitration scheme of time_slot 9 */ +#define USEM_REG_TS_9_AS 0x30005c +/* [RW 32] Interrupt mask register #0 read/write */ +#define USEM_REG_USEM_INT_MASK_0 0x300110 +#define USEM_REG_USEM_INT_MASK_1 0x300120 +/* [R 32] Interrupt register #0 read */ +#define USEM_REG_USEM_INT_STS_0 0x300104 +#define USEM_REG_USEM_INT_STS_1 0x300114 +/* [RW 32] Parity mask register #0 read/write */ +#define USEM_REG_USEM_PRTY_MASK_0 0x300130 +#define USEM_REG_USEM_PRTY_MASK_1 0x300140 +/* [R 32] Parity register #0 read */ +#define USEM_REG_USEM_PRTY_STS_0 0x300124 +#define USEM_REG_USEM_PRTY_STS_1 0x300134 +/* [RC 32] Parity register #0 read clear */ +#define USEM_REG_USEM_PRTY_STS_CLR_0 0x300128 +#define USEM_REG_USEM_PRTY_STS_CLR_1 0x300138 +/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64 + * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */ +#define USEM_REG_VFPF_ERR_NUM 0x300380 +#define VFC_MEMORIES_RST_REG_CAM_RST (0x1<<0) +#define VFC_MEMORIES_RST_REG_RAM_RST (0x1<<1) +#define VFC_REG_MEMORIES_RST 0x1943c +/* [RW 32] Indirect access to AG context with 32-bits granularity. The bits + * [12:8] of the address should be the offset within the accessed LCID + * context; the bits [7:0] are the accessed LCID.Example: to write to REG10 + * LCID100. The RBC address should be 13'ha64. */ +#define XCM_REG_AG_CTX 0x28000 +/* [RW 2] The queue index for registration on Aux1 counter flag. */ +#define XCM_REG_AUX1_Q 0x20134 +/* [RW 2] Per each decision rule the queue index to register to. */ +#define XCM_REG_AUX_CNT_FLG_Q_19 0x201b0 +/* [R 5] Used to read the XX protection CAM occupancy counter. */ +#define XCM_REG_CAM_OCCUP 0x20244 +/* [RW 1] CDU AG read Interface enable. If 0 - the request input is + disregarded; valid output is deasserted; all other signals are treated as + usual; if 1 - normal activity. */ +#define XCM_REG_CDU_AG_RD_IFEN 0x20044 +/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input + are disregarded; all other signals are treated as usual; if 1 - normal + activity. */ +#define XCM_REG_CDU_AG_WR_IFEN 0x20040 +/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is + disregarded; valid output is deasserted; all other signals are treated as + usual; if 1 - normal activity. */ +#define XCM_REG_CDU_SM_RD_IFEN 0x2004c +/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid + input is disregarded; all other signals are treated as usual; if 1 - + normal activity. */ +#define XCM_REG_CDU_SM_WR_IFEN 0x20048 +/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes + the initial credit value; read returns the current value of the credit + counter. Must be initialized to 1 at start-up. */ +#define XCM_REG_CFC_INIT_CRD 0x20404 +/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_CP_WEIGHT 0x200dc +/* [RW 1] Input csem Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define XCM_REG_CSEM_IFEN 0x20028 +/* [RC 1] Set at message length mismatch (relative to last indication) at + the csem interface. */ +#define XCM_REG_CSEM_LENGTH_MIS 0x20228 +/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_CSEM_WEIGHT 0x200c4 +/* [RW 1] Input dorq Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define XCM_REG_DORQ_IFEN 0x20030 +/* [RC 1] Set at message length mismatch (relative to last indication) at + the dorq interface. */ +#define XCM_REG_DORQ_LENGTH_MIS 0x20230 +/* [RW 3] The weight of the input dorq in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_DORQ_WEIGHT 0x200cc +/* [RW 8] The Event ID in case the ErrorFlg input message bit is set. */ +#define XCM_REG_ERR_EVNT_ID 0x200b0 +/* [RW 28] The CM erroneous header for QM and Timers formatting. */ +#define XCM_REG_ERR_XCM_HDR 0x200ac +/* [RW 8] The Event ID for Timers expiration. */ +#define XCM_REG_EXPR_EVNT_ID 0x200b4 +/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write + writes the initial credit value; read returns the current value of the + credit counter. Must be initialized to 64 at start-up. */ +#define XCM_REG_FIC0_INIT_CRD 0x2040c +/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write + writes the initial credit value; read returns the current value of the + credit counter. Must be initialized to 64 at start-up. */ +#define XCM_REG_FIC1_INIT_CRD 0x20410 +#define XCM_REG_GLB_DEL_ACK_MAX_CNT_0 0x20118 +#define XCM_REG_GLB_DEL_ACK_MAX_CNT_1 0x2011c +#define XCM_REG_GLB_DEL_ACK_TMR_VAL_0 0x20108 +#define XCM_REG_GLB_DEL_ACK_TMR_VAL_1 0x2010c +/* [RW 1] Arbitratiojn between Input Arbiter groups: 0 - fair Round-Robin; 1 + - strict priority defined by ~xcm_registers_gr_ag_pr.gr_ag_pr; + ~xcm_registers_gr_ld0_pr.gr_ld0_pr and + ~xcm_registers_gr_ld1_pr.gr_ld1_pr. */ +#define XCM_REG_GR_ARB_TYPE 0x2020c +/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the + highest priority is 3. It is supposed that the Channel group is the + compliment of the other 3 groups. */ +#define XCM_REG_GR_LD0_PR 0x20214 +/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the + highest priority is 3. It is supposed that the Channel group is the + compliment of the other 3 groups. */ +#define XCM_REG_GR_LD1_PR 0x20218 +/* [RW 1] Input nig0 Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define XCM_REG_NIG0_IFEN 0x20038 +/* [RC 1] Set at message length mismatch (relative to last indication) at + the nig0 interface. */ +#define XCM_REG_NIG0_LENGTH_MIS 0x20238 +/* [RW 3] The weight of the input nig0 in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_NIG0_WEIGHT 0x200d4 +/* [RW 1] Input nig1 Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define XCM_REG_NIG1_IFEN 0x2003c +/* [RC 1] Set at message length mismatch (relative to last indication) at + the nig1 interface. */ +#define XCM_REG_NIG1_LENGTH_MIS 0x2023c +/* [RW 5] The number of double REG-pairs; loaded from the STORM context and + sent to STORM; for a specific connection type. The double REG-pairs are + used in order to align to STORM context row size of 128 bits. The offset + of these data in the STORM context is always 0. Index _i stands for the + connection type (one of 16). */ +#define XCM_REG_N_SM_CTX_LD_0 0x20060 +#define XCM_REG_N_SM_CTX_LD_1 0x20064 +#define XCM_REG_N_SM_CTX_LD_2 0x20068 +#define XCM_REG_N_SM_CTX_LD_3 0x2006c +#define XCM_REG_N_SM_CTX_LD_4 0x20070 +#define XCM_REG_N_SM_CTX_LD_5 0x20074 +/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define XCM_REG_PBF_IFEN 0x20034 +/* [RC 1] Set at message length mismatch (relative to last indication) at + the pbf interface. */ +#define XCM_REG_PBF_LENGTH_MIS 0x20234 +/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_PBF_WEIGHT 0x200d0 +#define XCM_REG_PHYS_QNUM3_0 0x20100 +#define XCM_REG_PHYS_QNUM3_1 0x20104 +/* [RW 8] The Event ID for Timers formatting in case of stop done. */ +#define XCM_REG_STOP_EVNT_ID 0x200b8 +/* [RC 1] Set at message length mismatch (relative to last indication) at + the STORM interface. */ +#define XCM_REG_STORM_LENGTH_MIS 0x2021c +/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_STORM_WEIGHT 0x200bc +/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define XCM_REG_STORM_XCM_IFEN 0x20010 +/* [RW 4] Timers output initial credit. Max credit available - 15.Write + writes the initial credit value; read returns the current value of the + credit counter. Must be initialized to 4 at start-up. */ +#define XCM_REG_TM_INIT_CRD 0x2041c +/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_TM_WEIGHT 0x200ec +/* [RW 28] The CM header for Timers expiration command. */ +#define XCM_REG_TM_XCM_HDR 0x200a8 +/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define XCM_REG_TM_XCM_IFEN 0x2001c +/* [RW 1] Input tsem Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define XCM_REG_TSEM_IFEN 0x20024 +/* [RC 1] Set at message length mismatch (relative to last indication) at + the tsem interface. */ +#define XCM_REG_TSEM_LENGTH_MIS 0x20224 +/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_TSEM_WEIGHT 0x200c0 +/* [RW 2] The queue index for registration on UNA greater NXT decision rule. */ +#define XCM_REG_UNA_GT_NXT_Q 0x20120 +/* [RW 1] Input usem Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define XCM_REG_USEM_IFEN 0x2002c +/* [RC 1] Message length mismatch (relative to last indication) at the usem + interface. */ +#define XCM_REG_USEM_LENGTH_MIS 0x2022c +/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_USEM_WEIGHT 0x200c8 +#define XCM_REG_WU_DA_CNT_CMD00 0x201d4 +#define XCM_REG_WU_DA_CNT_CMD01 0x201d8 +#define XCM_REG_WU_DA_CNT_CMD10 0x201dc +#define XCM_REG_WU_DA_CNT_CMD11 0x201e0 +#define XCM_REG_WU_DA_CNT_UPD_VAL00 0x201e4 +#define XCM_REG_WU_DA_CNT_UPD_VAL01 0x201e8 +#define XCM_REG_WU_DA_CNT_UPD_VAL10 0x201ec +#define XCM_REG_WU_DA_CNT_UPD_VAL11 0x201f0 +#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00 0x201c4 +#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01 0x201c8 +#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD10 0x201cc +#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD11 0x201d0 +/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define XCM_REG_XCM_CFC_IFEN 0x20050 +/* [RW 14] Interrupt mask register #0 read/write */ +#define XCM_REG_XCM_INT_MASK 0x202b4 +/* [R 14] Interrupt register #0 read */ +#define XCM_REG_XCM_INT_STS 0x202a8 +/* [RW 30] Parity mask register #0 read/write */ +#define XCM_REG_XCM_PRTY_MASK 0x202c4 +/* [R 30] Parity register #0 read */ +#define XCM_REG_XCM_PRTY_STS 0x202b8 +/* [RC 30] Parity register #0 read clear */ +#define XCM_REG_XCM_PRTY_STS_CLR 0x202bc + +/* [RW 4] The size of AG context region 0 in REG-pairs. Designates the MS + REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). + Is used to determine the number of the AG context REG-pairs written back; + when the Reg1WbFlg isn't set. */ +#define XCM_REG_XCM_REG0_SZ 0x200f4 +/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define XCM_REG_XCM_STORM0_IFEN 0x20004 +/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define XCM_REG_XCM_STORM1_IFEN 0x20008 +/* [RW 1] CM - Timers Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define XCM_REG_XCM_TM_IFEN 0x20020 +/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define XCM_REG_XCM_XQM_IFEN 0x2000c +/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */ +#define XCM_REG_XCM_XQM_USE_Q 0x200f0 +/* [RW 4] The value by which CFC updates the activity counter at QM bypass. */ +#define XCM_REG_XQM_BYP_ACT_UPD 0x200fc +/* [RW 6] QM output initial credit. Max credit available - 32.Write writes + the initial credit value; read returns the current value of the credit + counter. Must be initialized to 32 at start-up. */ +#define XCM_REG_XQM_INIT_CRD 0x20420 +/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0 + stands for weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_XQM_P_WEIGHT 0x200e4 +/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0 + stands for weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_XQM_S_WEIGHT 0x200e8 +/* [RW 28] The CM header value for QM request (primary). */ +#define XCM_REG_XQM_XCM_HDR_P 0x200a0 +/* [RW 28] The CM header value for QM request (secondary). */ +#define XCM_REG_XQM_XCM_HDR_S 0x200a4 +/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define XCM_REG_XQM_XCM_IFEN 0x20014 +/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define XCM_REG_XSDM_IFEN 0x20018 +/* [RC 1] Set at message length mismatch (relative to last indication) at + the SDM interface. */ +#define XCM_REG_XSDM_LENGTH_MIS 0x20220 +/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_XSDM_WEIGHT 0x200e0 +/* [RW 17] Indirect access to the descriptor table of the XX protection + mechanism. The fields are: [5:0] - message length; 11:6] - message + pointer; 16:12] - next pointer. */ +#define XCM_REG_XX_DESCR_TABLE 0x20480 +#define XCM_REG_XX_DESCR_TABLE_SIZE 32 +/* [R 6] Used to read the XX protection Free counter. */ +#define XCM_REG_XX_FREE 0x20240 +/* [RW 6] Initial value for the credit counter; responsible for fulfilling + of the Input Stage XX protection buffer by the XX protection pending + messages. Max credit available - 3.Write writes the initial credit value; + read returns the current value of the credit counter. Must be initialized + to 2 at start-up. */ +#define XCM_REG_XX_INIT_CRD 0x20424 +/* [RW 6] The maximum number of pending messages; which may be stored in XX + protection. ~xcm_registers_xx_free.xx_free read on read. */ +#define XCM_REG_XX_MSG_NUM 0x20428 +/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */ +#define XCM_REG_XX_OVFL_EVNT_ID 0x20058 +#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0) +#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1) +#define XMAC_CTRL_REG_CORE_LOCAL_LPBK (0x1<<3) +#define XMAC_CTRL_REG_RX_EN (0x1<<1) +#define XMAC_CTRL_REG_SOFT_RESET (0x1<<6) +#define XMAC_CTRL_REG_TX_EN (0x1<<0) +#define XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN (0x1<<18) +#define XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN (0x1<<17) +#define XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN (0x1<<0) +#define XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN (0x1<<3) +#define XMAC_PFC_CTRL_HI_REG_RX_PFC_EN (0x1<<4) +#define XMAC_PFC_CTRL_HI_REG_TX_PFC_EN (0x1<<5) +#define XMAC_REG_CLEAR_RX_LSS_STATUS 0x60 +#define XMAC_REG_CTRL 0 +/* [RW 16] Upper 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC + * packets transmitted by the MAC */ +#define XMAC_REG_CTRL_SA_HI 0x2c +/* [RW 32] Lower 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC + * packets transmitted by the MAC */ +#define XMAC_REG_CTRL_SA_LO 0x28 +#define XMAC_REG_PAUSE_CTRL 0x68 +#define XMAC_REG_PFC_CTRL 0x70 +#define XMAC_REG_PFC_CTRL_HI 0x74 +#define XMAC_REG_RX_LSS_STATUS 0x58 +/* [RW 14] Maximum packet size in receive direction; exclusive of preamble & + * CRC in strip mode */ +#define XMAC_REG_RX_MAX_SIZE 0x40 +#define XMAC_REG_TX_CTRL 0x20 +/* [RW 16] Indirect access to the XX table of the XX protection mechanism. + The fields are:[4:0] - tail pointer; 9:5] - Link List size; 14:10] - + header pointer. */ +#define XCM_REG_XX_TABLE 0x20500 +/* [RW 8] The event id for aggregated interrupt 0 */ +#define XSDM_REG_AGG_INT_EVENT_0 0x166038 +#define XSDM_REG_AGG_INT_EVENT_1 0x16603c +#define XSDM_REG_AGG_INT_EVENT_10 0x166060 +#define XSDM_REG_AGG_INT_EVENT_11 0x166064 +#define XSDM_REG_AGG_INT_EVENT_12 0x166068 +#define XSDM_REG_AGG_INT_EVENT_13 0x16606c +#define XSDM_REG_AGG_INT_EVENT_14 0x166070 +#define XSDM_REG_AGG_INT_EVENT_2 0x166040 +#define XSDM_REG_AGG_INT_EVENT_3 0x166044 +#define XSDM_REG_AGG_INT_EVENT_4 0x166048 +#define XSDM_REG_AGG_INT_EVENT_5 0x16604c +#define XSDM_REG_AGG_INT_EVENT_6 0x166050 +#define XSDM_REG_AGG_INT_EVENT_7 0x166054 +#define XSDM_REG_AGG_INT_EVENT_8 0x166058 +#define XSDM_REG_AGG_INT_EVENT_9 0x16605c +/* [RW 1] For each aggregated interrupt index whether the mode is normal (0) + or auto-mask-mode (1) */ +#define XSDM_REG_AGG_INT_MODE_0 0x1661b8 +#define XSDM_REG_AGG_INT_MODE_1 0x1661bc +/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */ +#define XSDM_REG_CFC_RSP_START_ADDR 0x166008 +/* [RW 16] The maximum value of the completion counter #0 */ +#define XSDM_REG_CMP_COUNTER_MAX0 0x16601c +/* [RW 16] The maximum value of the completion counter #1 */ +#define XSDM_REG_CMP_COUNTER_MAX1 0x166020 +/* [RW 16] The maximum value of the completion counter #2 */ +#define XSDM_REG_CMP_COUNTER_MAX2 0x166024 +/* [RW 16] The maximum value of the completion counter #3 */ +#define XSDM_REG_CMP_COUNTER_MAX3 0x166028 +/* [RW 13] The start address in the internal RAM for the completion + counters. */ +#define XSDM_REG_CMP_COUNTER_START_ADDR 0x16600c +#define XSDM_REG_ENABLE_IN1 0x166238 +#define XSDM_REG_ENABLE_IN2 0x16623c +#define XSDM_REG_ENABLE_OUT1 0x166240 +#define XSDM_REG_ENABLE_OUT2 0x166244 +/* [RW 4] The initial number of messages that can be sent to the pxp control + interface without receiving any ACK. */ +#define XSDM_REG_INIT_CREDIT_PXP_CTRL 0x1664bc +/* [ST 32] The number of ACK after placement messages received */ +#define XSDM_REG_NUM_OF_ACK_AFTER_PLACE 0x16627c +/* [ST 32] The number of packet end messages received from the parser */ +#define XSDM_REG_NUM_OF_PKT_END_MSG 0x166274 +/* [ST 32] The number of requests received from the pxp async if */ +#define XSDM_REG_NUM_OF_PXP_ASYNC_REQ 0x166278 +/* [ST 32] The number of commands received in queue 0 */ +#define XSDM_REG_NUM_OF_Q0_CMD 0x166248 +/* [ST 32] The number of commands received in queue 10 */ +#define XSDM_REG_NUM_OF_Q10_CMD 0x16626c +/* [ST 32] The number of commands received in queue 11 */ +#define XSDM_REG_NUM_OF_Q11_CMD 0x166270 +/* [ST 32] The number of commands received in queue 1 */ +#define XSDM_REG_NUM_OF_Q1_CMD 0x16624c +/* [ST 32] The number of commands received in queue 3 */ +#define XSDM_REG_NUM_OF_Q3_CMD 0x166250 +/* [ST 32] The number of commands received in queue 4 */ +#define XSDM_REG_NUM_OF_Q4_CMD 0x166254 +/* [ST 32] The number of commands received in queue 5 */ +#define XSDM_REG_NUM_OF_Q5_CMD 0x166258 +/* [ST 32] The number of commands received in queue 6 */ +#define XSDM_REG_NUM_OF_Q6_CMD 0x16625c +/* [ST 32] The number of commands received in queue 7 */ +#define XSDM_REG_NUM_OF_Q7_CMD 0x166260 +/* [ST 32] The number of commands received in queue 8 */ +#define XSDM_REG_NUM_OF_Q8_CMD 0x166264 +/* [ST 32] The number of commands received in queue 9 */ +#define XSDM_REG_NUM_OF_Q9_CMD 0x166268 +/* [RW 13] The start address in the internal RAM for queue counters */ +#define XSDM_REG_Q_COUNTER_START_ADDR 0x166010 +/* [W 17] Generate an operation after completion; bit-16 is + * AggVectIdx_valid; bits 15:8 are AggVectIdx; bits 7:5 are the TRIG and + * bits 4:0 are the T124Param[4:0] */ +#define XSDM_REG_OPERATION_GEN 0x1664c4 +/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */ +#define XSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0x166548 +/* [R 1] parser fifo empty in sdm_sync block */ +#define XSDM_REG_SYNC_PARSER_EMPTY 0x166550 +/* [R 1] parser serial fifo empty in sdm_sync block */ +#define XSDM_REG_SYNC_SYNC_EMPTY 0x166558 +/* [RW 32] Tick for timer counter. Applicable only when + ~xsdm_registers_timer_tick_enable.timer_tick_enable =1 */ +#define XSDM_REG_TIMER_TICK 0x166000 +/* [RW 32] Interrupt mask register #0 read/write */ +#define XSDM_REG_XSDM_INT_MASK_0 0x16629c +#define XSDM_REG_XSDM_INT_MASK_1 0x1662ac +/* [R 32] Interrupt register #0 read */ +#define XSDM_REG_XSDM_INT_STS_0 0x166290 +#define XSDM_REG_XSDM_INT_STS_1 0x1662a0 +/* [RW 11] Parity mask register #0 read/write */ +#define XSDM_REG_XSDM_PRTY_MASK 0x1662bc +/* [R 11] Parity register #0 read */ +#define XSDM_REG_XSDM_PRTY_STS 0x1662b0 +/* [RC 11] Parity register #0 read clear */ +#define XSDM_REG_XSDM_PRTY_STS_CLR 0x1662b4 +/* [RW 5] The number of time_slots in the arbitration cycle */ +#define XSEM_REG_ARB_CYCLE_SIZE 0x280034 +/* [RW 3] The source that is associated with arbitration element 0. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2 */ +#define XSEM_REG_ARB_ELEMENT0 0x280020 +/* [RW 3] The source that is associated with arbitration element 1. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~xsem_registers_arb_element0.arb_element0 */ +#define XSEM_REG_ARB_ELEMENT1 0x280024 +/* [RW 3] The source that is associated with arbitration element 2. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~xsem_registers_arb_element0.arb_element0 + and ~xsem_registers_arb_element1.arb_element1 */ +#define XSEM_REG_ARB_ELEMENT2 0x280028 +/* [RW 3] The source that is associated with arbitration element 3. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2.Could + not be equal to register ~xsem_registers_arb_element0.arb_element0 and + ~xsem_registers_arb_element1.arb_element1 and + ~xsem_registers_arb_element2.arb_element2 */ +#define XSEM_REG_ARB_ELEMENT3 0x28002c +/* [RW 3] The source that is associated with arbitration element 4. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~xsem_registers_arb_element0.arb_element0 + and ~xsem_registers_arb_element1.arb_element1 and + ~xsem_registers_arb_element2.arb_element2 and + ~xsem_registers_arb_element3.arb_element3 */ +#define XSEM_REG_ARB_ELEMENT4 0x280030 +#define XSEM_REG_ENABLE_IN 0x2800a4 +#define XSEM_REG_ENABLE_OUT 0x2800a8 +/* [RW 32] This address space contains all registers and memories that are + placed in SEM_FAST block. The SEM_FAST registers are described in + appendix B. In order to access the sem_fast registers the base address + ~fast_memory.fast_memory should be added to eachsem_fast register offset. */ +#define XSEM_REG_FAST_MEMORY 0x2a0000 +/* [RW 1] Disables input messages from FIC0 May be updated during run_time + by the microcode */ +#define XSEM_REG_FIC0_DISABLE 0x280224 +/* [RW 1] Disables input messages from FIC1 May be updated during run_time + by the microcode */ +#define XSEM_REG_FIC1_DISABLE 0x280234 +/* [RW 15] Interrupt table Read and write access to it is not possible in + the middle of the work */ +#define XSEM_REG_INT_TABLE 0x280400 +/* [ST 24] Statistics register. The number of messages that entered through + FIC0 */ +#define XSEM_REG_MSG_NUM_FIC0 0x280000 +/* [ST 24] Statistics register. The number of messages that entered through + FIC1 */ +#define XSEM_REG_MSG_NUM_FIC1 0x280004 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC0 */ +#define XSEM_REG_MSG_NUM_FOC0 0x280008 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC1 */ +#define XSEM_REG_MSG_NUM_FOC1 0x28000c +/* [ST 24] Statistics register. The number of messages that were sent to + FOC2 */ +#define XSEM_REG_MSG_NUM_FOC2 0x280010 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC3 */ +#define XSEM_REG_MSG_NUM_FOC3 0x280014 +/* [RW 1] Disables input messages from the passive buffer May be updated + during run_time by the microcode */ +#define XSEM_REG_PAS_DISABLE 0x28024c +/* [WB 128] Debug only. Passive buffer memory */ +#define XSEM_REG_PASSIVE_BUFFER 0x282000 +/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */ +#define XSEM_REG_PRAM 0x2c0000 +/* [R 16] Valid sleeping threads indication have bit per thread */ +#define XSEM_REG_SLEEP_THREADS_VALID 0x28026c +/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */ +#define XSEM_REG_SLOW_EXT_STORE_EMPTY 0x2802a0 +/* [RW 16] List of free threads . There is a bit per thread. */ +#define XSEM_REG_THREADS_LIST 0x2802e4 +/* [RW 3] The arbitration scheme of time_slot 0 */ +#define XSEM_REG_TS_0_AS 0x280038 +/* [RW 3] The arbitration scheme of time_slot 10 */ +#define XSEM_REG_TS_10_AS 0x280060 +/* [RW 3] The arbitration scheme of time_slot 11 */ +#define XSEM_REG_TS_11_AS 0x280064 +/* [RW 3] The arbitration scheme of time_slot 12 */ +#define XSEM_REG_TS_12_AS 0x280068 +/* [RW 3] The arbitration scheme of time_slot 13 */ +#define XSEM_REG_TS_13_AS 0x28006c +/* [RW 3] The arbitration scheme of time_slot 14 */ +#define XSEM_REG_TS_14_AS 0x280070 +/* [RW 3] The arbitration scheme of time_slot 15 */ +#define XSEM_REG_TS_15_AS 0x280074 +/* [RW 3] The arbitration scheme of time_slot 16 */ +#define XSEM_REG_TS_16_AS 0x280078 +/* [RW 3] The arbitration scheme of time_slot 17 */ +#define XSEM_REG_TS_17_AS 0x28007c +/* [RW 3] The arbitration scheme of time_slot 18 */ +#define XSEM_REG_TS_18_AS 0x280080 +/* [RW 3] The arbitration scheme of time_slot 1 */ +#define XSEM_REG_TS_1_AS 0x28003c +/* [RW 3] The arbitration scheme of time_slot 2 */ +#define XSEM_REG_TS_2_AS 0x280040 +/* [RW 3] The arbitration scheme of time_slot 3 */ +#define XSEM_REG_TS_3_AS 0x280044 +/* [RW 3] The arbitration scheme of time_slot 4 */ +#define XSEM_REG_TS_4_AS 0x280048 +/* [RW 3] The arbitration scheme of time_slot 5 */ +#define XSEM_REG_TS_5_AS 0x28004c +/* [RW 3] The arbitration scheme of time_slot 6 */ +#define XSEM_REG_TS_6_AS 0x280050 +/* [RW 3] The arbitration scheme of time_slot 7 */ +#define XSEM_REG_TS_7_AS 0x280054 +/* [RW 3] The arbitration scheme of time_slot 8 */ +#define XSEM_REG_TS_8_AS 0x280058 +/* [RW 3] The arbitration scheme of time_slot 9 */ +#define XSEM_REG_TS_9_AS 0x28005c +/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64 + * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */ +#define XSEM_REG_VFPF_ERR_NUM 0x280380 +/* [RW 32] Interrupt mask register #0 read/write */ +#define XSEM_REG_XSEM_INT_MASK_0 0x280110 +#define XSEM_REG_XSEM_INT_MASK_1 0x280120 +/* [R 32] Interrupt register #0 read */ +#define XSEM_REG_XSEM_INT_STS_0 0x280104 +#define XSEM_REG_XSEM_INT_STS_1 0x280114 +/* [RW 32] Parity mask register #0 read/write */ +#define XSEM_REG_XSEM_PRTY_MASK_0 0x280130 +#define XSEM_REG_XSEM_PRTY_MASK_1 0x280140 +/* [R 32] Parity register #0 read */ +#define XSEM_REG_XSEM_PRTY_STS_0 0x280124 +#define XSEM_REG_XSEM_PRTY_STS_1 0x280134 +/* [RC 32] Parity register #0 read clear */ +#define XSEM_REG_XSEM_PRTY_STS_CLR_0 0x280128 +#define XSEM_REG_XSEM_PRTY_STS_CLR_1 0x280138 +#define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0) +#define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1) +#define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0) +#define MCPR_NVM_CFG4_FLASH_SIZE (0x7L<<0) +#define MCPR_NVM_COMMAND_DOIT (1L<<4) +#define MCPR_NVM_COMMAND_DONE (1L<<3) +#define MCPR_NVM_COMMAND_FIRST (1L<<7) +#define MCPR_NVM_COMMAND_LAST (1L<<8) +#define MCPR_NVM_COMMAND_WR (1L<<5) +#define MCPR_NVM_SW_ARB_ARB_ARB1 (1L<<9) +#define MCPR_NVM_SW_ARB_ARB_REQ_CLR1 (1L<<5) +#define MCPR_NVM_SW_ARB_ARB_REQ_SET1 (1L<<1) +#define BIGMAC_REGISTER_BMAC_CONTROL (0x00<<3) +#define BIGMAC_REGISTER_BMAC_XGXS_CONTROL (0x01<<3) +#define BIGMAC_REGISTER_CNT_MAX_SIZE (0x05<<3) +#define BIGMAC_REGISTER_RX_CONTROL (0x21<<3) +#define BIGMAC_REGISTER_RX_LLFC_MSG_FLDS (0x46<<3) +#define BIGMAC_REGISTER_RX_LSS_STATUS (0x43<<3) +#define BIGMAC_REGISTER_RX_MAX_SIZE (0x23<<3) +#define BIGMAC_REGISTER_RX_STAT_GR64 (0x26<<3) +#define BIGMAC_REGISTER_RX_STAT_GRIPJ (0x42<<3) +#define BIGMAC_REGISTER_TX_CONTROL (0x07<<3) +#define BIGMAC_REGISTER_TX_MAX_SIZE (0x09<<3) +#define BIGMAC_REGISTER_TX_PAUSE_THRESHOLD (0x0A<<3) +#define BIGMAC_REGISTER_TX_SOURCE_ADDR (0x08<<3) +#define BIGMAC_REGISTER_TX_STAT_GTBYT (0x20<<3) +#define BIGMAC_REGISTER_TX_STAT_GTPKT (0x0C<<3) +#define BIGMAC2_REGISTER_BMAC_CONTROL (0x00<<3) +#define BIGMAC2_REGISTER_BMAC_XGXS_CONTROL (0x01<<3) +#define BIGMAC2_REGISTER_CNT_MAX_SIZE (0x05<<3) +#define BIGMAC2_REGISTER_PFC_CONTROL (0x06<<3) +#define BIGMAC2_REGISTER_RX_CONTROL (0x3A<<3) +#define BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS (0x62<<3) +#define BIGMAC2_REGISTER_RX_LSS_STAT (0x3E<<3) +#define BIGMAC2_REGISTER_RX_MAX_SIZE (0x3C<<3) +#define BIGMAC2_REGISTER_RX_STAT_GR64 (0x40<<3) +#define BIGMAC2_REGISTER_RX_STAT_GRIPJ (0x5f<<3) +#define BIGMAC2_REGISTER_RX_STAT_GRPP (0x51<<3) +#define BIGMAC2_REGISTER_TX_CONTROL (0x1C<<3) +#define BIGMAC2_REGISTER_TX_MAX_SIZE (0x1E<<3) +#define BIGMAC2_REGISTER_TX_PAUSE_CONTROL (0x20<<3) +#define BIGMAC2_REGISTER_TX_SOURCE_ADDR (0x1D<<3) +#define BIGMAC2_REGISTER_TX_STAT_GTBYT (0x39<<3) +#define BIGMAC2_REGISTER_TX_STAT_GTPOK (0x22<<3) +#define BIGMAC2_REGISTER_TX_STAT_GTPP (0x24<<3) +#define EMAC_LED_1000MB_OVERRIDE (1L<<1) +#define EMAC_LED_100MB_OVERRIDE (1L<<2) +#define EMAC_LED_10MB_OVERRIDE (1L<<3) +#define EMAC_LED_2500MB_OVERRIDE (1L<<12) +#define EMAC_LED_OVERRIDE (1L<<0) +#define EMAC_LED_TRAFFIC (1L<<6) +#define EMAC_MDIO_COMM_COMMAND_ADDRESS (0L<<26) +#define EMAC_MDIO_COMM_COMMAND_READ_22 (2L<<26) +#define EMAC_MDIO_COMM_COMMAND_READ_45 (3L<<26) +#define EMAC_MDIO_COMM_COMMAND_WRITE_22 (1L<<26) +#define EMAC_MDIO_COMM_COMMAND_WRITE_45 (1L<<26) +#define EMAC_MDIO_COMM_DATA (0xffffL<<0) +#define EMAC_MDIO_COMM_START_BUSY (1L<<29) +#define EMAC_MDIO_MODE_AUTO_POLL (1L<<4) +#define EMAC_MDIO_MODE_CLAUSE_45 (1L<<31) +#define EMAC_MDIO_MODE_CLOCK_CNT (0x3ffL<<16) +#define EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT 16 +#define EMAC_MDIO_STATUS_10MB (1L<<1) +#define EMAC_MODE_25G_MODE (1L<<5) +#define EMAC_MODE_HALF_DUPLEX (1L<<1) +#define EMAC_MODE_PORT_GMII (2L<<2) +#define EMAC_MODE_PORT_MII (1L<<2) +#define EMAC_MODE_PORT_MII_10M (3L<<2) +#define EMAC_MODE_RESET (1L<<0) +#define EMAC_REG_EMAC_LED 0xc +#define EMAC_REG_EMAC_MAC_MATCH 0x10 +#define EMAC_REG_EMAC_MDIO_COMM 0xac +#define EMAC_REG_EMAC_MDIO_MODE 0xb4 +#define EMAC_REG_EMAC_MDIO_STATUS 0xb0 +#define EMAC_REG_EMAC_MODE 0x0 +#define EMAC_REG_EMAC_RX_MODE 0xc8 +#define EMAC_REG_EMAC_RX_MTU_SIZE 0x9c +#define EMAC_REG_EMAC_RX_STAT_AC 0x180 +#define EMAC_REG_EMAC_RX_STAT_AC_28 0x1f4 +#define EMAC_REG_EMAC_RX_STAT_AC_COUNT 23 +#define EMAC_REG_EMAC_TX_MODE 0xbc +#define EMAC_REG_EMAC_TX_STAT_AC 0x280 +#define EMAC_REG_EMAC_TX_STAT_AC_COUNT 22 +#define EMAC_REG_RX_PFC_MODE 0x320 +#define EMAC_REG_RX_PFC_MODE_PRIORITIES (1L<<2) +#define EMAC_REG_RX_PFC_MODE_RX_EN (1L<<1) +#define EMAC_REG_RX_PFC_MODE_TX_EN (1L<<0) +#define EMAC_REG_RX_PFC_PARAM 0x324 +#define EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT 0 +#define EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT 16 +#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD 0x328 +#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT (0xffff<<0) +#define EMAC_REG_RX_PFC_STATS_XOFF_SENT 0x330 +#define EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT (0xffff<<0) +#define EMAC_REG_RX_PFC_STATS_XON_RCVD 0x32c +#define EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT (0xffff<<0) +#define EMAC_REG_RX_PFC_STATS_XON_SENT 0x334 +#define EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT (0xffff<<0) +#define EMAC_RX_MODE_FLOW_EN (1L<<2) +#define EMAC_RX_MODE_KEEP_MAC_CONTROL (1L<<3) +#define EMAC_RX_MODE_KEEP_VLAN_TAG (1L<<10) +#define EMAC_RX_MODE_PROMISCUOUS (1L<<8) +#define EMAC_RX_MODE_RESET (1L<<0) +#define EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31) +#define EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3) +#define EMAC_TX_MODE_FLOW_EN (1L<<4) +#define EMAC_TX_MODE_RESET (1L<<0) +#define MISC_REGISTERS_GPIO_0 0 +#define MISC_REGISTERS_GPIO_1 1 +#define MISC_REGISTERS_GPIO_2 2 +#define MISC_REGISTERS_GPIO_3 3 +#define MISC_REGISTERS_GPIO_CLR_POS 16 +#define MISC_REGISTERS_GPIO_FLOAT (0xffL<<24) +#define MISC_REGISTERS_GPIO_FLOAT_POS 24 +#define MISC_REGISTERS_GPIO_HIGH 1 +#define MISC_REGISTERS_GPIO_INPUT_HI_Z 2 +#define MISC_REGISTERS_GPIO_INT_CLR_POS 24 +#define MISC_REGISTERS_GPIO_INT_OUTPUT_CLR 0 +#define MISC_REGISTERS_GPIO_INT_OUTPUT_SET 1 +#define MISC_REGISTERS_GPIO_INT_SET_POS 16 +#define MISC_REGISTERS_GPIO_LOW 0 +#define MISC_REGISTERS_GPIO_OUTPUT_HIGH 1 +#define MISC_REGISTERS_GPIO_OUTPUT_LOW 0 +#define MISC_REGISTERS_GPIO_PORT_SHIFT 4 +#define MISC_REGISTERS_GPIO_SET_POS 8 +#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588 +#define MISC_REGISTERS_RESET_REG_1_RST_HC (0x1<<29) +#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7) +#define MISC_REGISTERS_RESET_REG_1_RST_PXP (0x1<<26) +#define MISC_REGISTERS_RESET_REG_1_RST_PXPV (0x1<<27) +#define MISC_REGISTERS_RESET_REG_1_SET 0x584 +#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598 +#define MISC_REGISTERS_RESET_REG_2_MSTAT0 (0x1<<24) +#define MISC_REGISTERS_RESET_REG_2_MSTAT1 (0x1<<25) +#define MISC_REGISTERS_RESET_REG_2_PGLC (0x1<<19) +#define MISC_REGISTERS_RESET_REG_2_RST_ATC (0x1<<17) +#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0) +#define MISC_REGISTERS_RESET_REG_2_RST_BMAC1 (0x1<<1) +#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0 (0x1<<2) +#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE (0x1<<14) +#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1 (0x1<<3) +#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE (0x1<<15) +#define MISC_REGISTERS_RESET_REG_2_RST_GRC (0x1<<4) +#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B (0x1<<6) +#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE (0x1<<8) +#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU (0x1<<7) +#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE (0x1<<5) +#define MISC_REGISTERS_RESET_REG_2_RST_MDIO (0x1<<13) +#define MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE (0x1<<11) +#define MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO (0x1<<13) +#define MISC_REGISTERS_RESET_REG_2_RST_RBCN (0x1<<9) +#define MISC_REGISTERS_RESET_REG_2_SET 0x594 +#define MISC_REGISTERS_RESET_REG_2_UMAC0 (0x1<<20) +#define MISC_REGISTERS_RESET_REG_2_UMAC1 (0x1<<21) +#define MISC_REGISTERS_RESET_REG_2_XMAC (0x1<<22) +#define MISC_REGISTERS_RESET_REG_2_XMAC_SOFT (0x1<<23) +#define MISC_REGISTERS_RESET_REG_3_CLEAR 0x5a8 +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ (0x1<<1) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN (0x1<<2) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD (0x1<<3) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW (0x1<<0) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ (0x1<<5) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN (0x1<<6) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD (0x1<<7) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW (0x1<<4) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB (0x1<<8) +#define MISC_REGISTERS_RESET_REG_3_SET 0x5a4 +#define MISC_REGISTERS_SPIO_4 4 +#define MISC_REGISTERS_SPIO_5 5 +#define MISC_REGISTERS_SPIO_7 7 +#define MISC_REGISTERS_SPIO_CLR_POS 16 +#define MISC_REGISTERS_SPIO_FLOAT (0xffL<<24) +#define MISC_REGISTERS_SPIO_FLOAT_POS 24 +#define MISC_REGISTERS_SPIO_INPUT_HI_Z 2 +#define MISC_REGISTERS_SPIO_INT_OLD_SET_POS 16 +#define MISC_REGISTERS_SPIO_OUTPUT_HIGH 1 +#define MISC_REGISTERS_SPIO_OUTPUT_LOW 0 +#define MISC_REGISTERS_SPIO_SET_POS 8 +#define HW_LOCK_DRV_FLAGS 10 +#define HW_LOCK_MAX_RESOURCE_VALUE 31 +#define HW_LOCK_RESOURCE_GPIO 1 +#define HW_LOCK_RESOURCE_MDIO 0 +#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3 +#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8 +#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9 +#define HW_LOCK_RESOURCE_SPIO 2 +#define HW_LOCK_RESOURCE_UNDI 5 +#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4) +#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5) +#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18) +#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (0x1<<31) +#define AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR (0x1<<30) +#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (0x1<<9) +#define AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR (0x1<<8) +#define AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT (0x1<<7) +#define AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR (0x1<<6) +#define AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT (0x1<<29) +#define AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR (0x1<<28) +#define AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT (0x1<<1) +#define AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR (0x1<<0) +#define AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR (0x1<<18) +#define AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT (0x1<<11) +#define AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR (0x1<<10) +#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT (0x1<<13) +#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR (0x1<<12) +#define AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 (0x1<<2) +#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (0x1<<12) +#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (0x1<<28) +#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (0x1<<31) +#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (0x1<<29) +#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (0x1<<30) +#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (0x1<<15) +#define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR (0x1<<14) +#define AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR (0x1<<14) +#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (0x1<<20) +#define AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT (0x1<<31) +#define AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR (0x1<<30) +#define AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR (0x1<<0) +#define AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT (0x1<<2) +#define AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR (0x1<<3) +#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT (0x1<<5) +#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR (0x1<<4) +#define AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT (0x1<<3) +#define AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR (0x1<<2) +#define AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT (0x1<<3) +#define AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR (0x1<<2) +#define AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR (0x1<<22) +#define AEU_INPUTS_ATTN_BITS_SPIO5 (0x1<<15) +#define AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT (0x1<<27) +#define AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR (0x1<<26) +#define AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT (0x1<<5) +#define AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR (0x1<<4) +#define AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT (0x1<<25) +#define AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR (0x1<<24) +#define AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT (0x1<<29) +#define AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR (0x1<<28) +#define AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT (0x1<<23) +#define AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR (0x1<<22) +#define AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT (0x1<<27) +#define AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR (0x1<<26) +#define AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT (0x1<<21) +#define AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR (0x1<<20) +#define AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT (0x1<<25) +#define AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR (0x1<<24) +#define AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR (0x1<<16) +#define AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT (0x1<<9) +#define AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR (0x1<<8) +#define AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT (0x1<<7) +#define AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR (0x1<<6) +#define AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT (0x1<<11) +#define AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR (0x1<<10) + +#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 (0x1<<5) +#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 (0x1<<9) + +#define RESERVED_GENERAL_ATTENTION_BIT_0 0 + +#define EVEREST_GEN_ATTN_IN_USE_MASK 0x7ffe0 +#define EVEREST_LATCHED_ATTN_IN_USE_MASK 0xffe00000 + +#define RESERVED_GENERAL_ATTENTION_BIT_6 6 +#define RESERVED_GENERAL_ATTENTION_BIT_7 7 +#define RESERVED_GENERAL_ATTENTION_BIT_8 8 +#define RESERVED_GENERAL_ATTENTION_BIT_9 9 +#define RESERVED_GENERAL_ATTENTION_BIT_10 10 +#define RESERVED_GENERAL_ATTENTION_BIT_11 11 +#define RESERVED_GENERAL_ATTENTION_BIT_12 12 +#define RESERVED_GENERAL_ATTENTION_BIT_13 13 +#define RESERVED_GENERAL_ATTENTION_BIT_14 14 +#define RESERVED_GENERAL_ATTENTION_BIT_15 15 +#define RESERVED_GENERAL_ATTENTION_BIT_16 16 +#define RESERVED_GENERAL_ATTENTION_BIT_17 17 +#define RESERVED_GENERAL_ATTENTION_BIT_18 18 +#define RESERVED_GENERAL_ATTENTION_BIT_19 19 +#define RESERVED_GENERAL_ATTENTION_BIT_20 20 +#define RESERVED_GENERAL_ATTENTION_BIT_21 21 + +/* storm asserts attention bits */ +#define TSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_7 +#define USTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_8 +#define CSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_9 +#define XSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_10 + +/* mcp error attention bit */ +#define MCP_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_11 + +/*E1H NIG status sync attention mapped to group 4-7*/ +#define LINK_SYNC_ATTENTION_BIT_FUNC_0 RESERVED_GENERAL_ATTENTION_BIT_12 +#define LINK_SYNC_ATTENTION_BIT_FUNC_1 RESERVED_GENERAL_ATTENTION_BIT_13 +#define LINK_SYNC_ATTENTION_BIT_FUNC_2 RESERVED_GENERAL_ATTENTION_BIT_14 +#define LINK_SYNC_ATTENTION_BIT_FUNC_3 RESERVED_GENERAL_ATTENTION_BIT_15 +#define LINK_SYNC_ATTENTION_BIT_FUNC_4 RESERVED_GENERAL_ATTENTION_BIT_16 +#define LINK_SYNC_ATTENTION_BIT_FUNC_5 RESERVED_GENERAL_ATTENTION_BIT_17 +#define LINK_SYNC_ATTENTION_BIT_FUNC_6 RESERVED_GENERAL_ATTENTION_BIT_18 +#define LINK_SYNC_ATTENTION_BIT_FUNC_7 RESERVED_GENERAL_ATTENTION_BIT_19 + + +#define LATCHED_ATTN_RBCR 23 +#define LATCHED_ATTN_RBCT 24 +#define LATCHED_ATTN_RBCN 25 +#define LATCHED_ATTN_RBCU 26 +#define LATCHED_ATTN_RBCP 27 +#define LATCHED_ATTN_TIMEOUT_GRC 28 +#define LATCHED_ATTN_RSVD_GRC 29 +#define LATCHED_ATTN_ROM_PARITY_MCP 30 +#define LATCHED_ATTN_UM_RX_PARITY_MCP 31 +#define LATCHED_ATTN_UM_TX_PARITY_MCP 32 +#define LATCHED_ATTN_SCPAD_PARITY_MCP 33 + +#define GENERAL_ATTEN_WORD(atten_name) ((94 + atten_name) / 32) +#define GENERAL_ATTEN_OFFSET(atten_name)\ + (1UL << ((94 + atten_name) % 32)) +/* + * This file defines GRC base address for every block. + * This file is included by chipsim, asm microcode and cpp microcode. + * These values are used in Design.xml on regBase attribute + * Use the base with the generated offsets of specific registers. + */ + +#define GRCBASE_PXPCS 0x000000 +#define GRCBASE_PCICONFIG 0x002000 +#define GRCBASE_PCIREG 0x002400 +#define GRCBASE_EMAC0 0x008000 +#define GRCBASE_EMAC1 0x008400 +#define GRCBASE_DBU 0x008800 +#define GRCBASE_MISC 0x00A000 +#define GRCBASE_DBG 0x00C000 +#define GRCBASE_NIG 0x010000 +#define GRCBASE_XCM 0x020000 +#define GRCBASE_PRS 0x040000 +#define GRCBASE_SRCH 0x040400 +#define GRCBASE_TSDM 0x042000 +#define GRCBASE_TCM 0x050000 +#define GRCBASE_BRB1 0x060000 +#define GRCBASE_MCP 0x080000 +#define GRCBASE_UPB 0x0C1000 +#define GRCBASE_CSDM 0x0C2000 +#define GRCBASE_USDM 0x0C4000 +#define GRCBASE_CCM 0x0D0000 +#define GRCBASE_UCM 0x0E0000 +#define GRCBASE_CDU 0x101000 +#define GRCBASE_DMAE 0x102000 +#define GRCBASE_PXP 0x103000 +#define GRCBASE_CFC 0x104000 +#define GRCBASE_HC 0x108000 +#define GRCBASE_PXP2 0x120000 +#define GRCBASE_PBF 0x140000 +#define GRCBASE_UMAC0 0x160000 +#define GRCBASE_UMAC1 0x160400 +#define GRCBASE_XPB 0x161000 +#define GRCBASE_MSTAT0 0x162000 +#define GRCBASE_MSTAT1 0x162800 +#define GRCBASE_XMAC0 0x163000 +#define GRCBASE_XMAC1 0x163800 +#define GRCBASE_TIMERS 0x164000 +#define GRCBASE_XSDM 0x166000 +#define GRCBASE_QM 0x168000 +#define GRCBASE_DQ 0x170000 +#define GRCBASE_TSEM 0x180000 +#define GRCBASE_CSEM 0x200000 +#define GRCBASE_XSEM 0x280000 +#define GRCBASE_USEM 0x300000 +#define GRCBASE_MISC_AEU GRCBASE_MISC + + +/* offset of configuration space in the pci core register */ +#define PCICFG_OFFSET 0x2000 +#define PCICFG_VENDOR_ID_OFFSET 0x00 +#define PCICFG_DEVICE_ID_OFFSET 0x02 +#define PCICFG_COMMAND_OFFSET 0x04 +#define PCICFG_COMMAND_IO_SPACE (1<<0) +#define PCICFG_COMMAND_MEM_SPACE (1<<1) +#define PCICFG_COMMAND_BUS_MASTER (1<<2) +#define PCICFG_COMMAND_SPECIAL_CYCLES (1<<3) +#define PCICFG_COMMAND_MWI_CYCLES (1<<4) +#define PCICFG_COMMAND_VGA_SNOOP (1<<5) +#define PCICFG_COMMAND_PERR_ENA (1<<6) +#define PCICFG_COMMAND_STEPPING (1<<7) +#define PCICFG_COMMAND_SERR_ENA (1<<8) +#define PCICFG_COMMAND_FAST_B2B (1<<9) +#define PCICFG_COMMAND_INT_DISABLE (1<<10) +#define PCICFG_COMMAND_RESERVED (0x1f<<11) +#define PCICFG_STATUS_OFFSET 0x06 +#define PCICFG_REVESION_ID_OFFSET 0x08 +#define PCICFG_CACHE_LINE_SIZE 0x0c +#define PCICFG_LATENCY_TIMER 0x0d +#define PCICFG_BAR_1_LOW 0x10 +#define PCICFG_BAR_1_HIGH 0x14 +#define PCICFG_BAR_2_LOW 0x18 +#define PCICFG_BAR_2_HIGH 0x1c +#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET 0x2c +#define PCICFG_SUBSYSTEM_ID_OFFSET 0x2e +#define PCICFG_INT_LINE 0x3c +#define PCICFG_INT_PIN 0x3d +#define PCICFG_PM_CAPABILITY 0x48 +#define PCICFG_PM_CAPABILITY_VERSION (0x3<<16) +#define PCICFG_PM_CAPABILITY_CLOCK (1<<19) +#define PCICFG_PM_CAPABILITY_RESERVED (1<<20) +#define PCICFG_PM_CAPABILITY_DSI (1<<21) +#define PCICFG_PM_CAPABILITY_AUX_CURRENT (0x7<<22) +#define PCICFG_PM_CAPABILITY_D1_SUPPORT (1<<25) +#define PCICFG_PM_CAPABILITY_D2_SUPPORT (1<<26) +#define PCICFG_PM_CAPABILITY_PME_IN_D0 (1<<27) +#define PCICFG_PM_CAPABILITY_PME_IN_D1 (1<<28) +#define PCICFG_PM_CAPABILITY_PME_IN_D2 (1<<29) +#define PCICFG_PM_CAPABILITY_PME_IN_D3_HOT (1<<30) +#define PCICFG_PM_CAPABILITY_PME_IN_D3_COLD (1<<31) +#define PCICFG_PM_CSR_OFFSET 0x4c +#define PCICFG_PM_CSR_STATE (0x3<<0) +#define PCICFG_PM_CSR_PME_ENABLE (1<<8) +#define PCICFG_PM_CSR_PME_STATUS (1<<15) +#define PCICFG_MSI_CAP_ID_OFFSET 0x58 +#define PCICFG_MSI_CONTROL_ENABLE (0x1<<16) +#define PCICFG_MSI_CONTROL_MCAP (0x7<<17) +#define PCICFG_MSI_CONTROL_MENA (0x7<<20) +#define PCICFG_MSI_CONTROL_64_BIT_ADDR_CAP (0x1<<23) +#define PCICFG_MSI_CONTROL_MSI_PVMASK_CAPABLE (0x1<<24) +#define PCICFG_GRC_ADDRESS 0x78 +#define PCICFG_GRC_DATA 0x80 +#define PCICFG_MSIX_CAP_ID_OFFSET 0xa0 +#define PCICFG_MSIX_CONTROL_TABLE_SIZE (0x7ff<<16) +#define PCICFG_MSIX_CONTROL_RESERVED (0x7<<27) +#define PCICFG_MSIX_CONTROL_FUNC_MASK (0x1<<30) +#define PCICFG_MSIX_CONTROL_MSIX_ENABLE (0x1<<31) + +#define PCICFG_DEVICE_CONTROL 0xb4 +#define PCICFG_DEVICE_STATUS 0xb6 +#define PCICFG_DEVICE_STATUS_CORR_ERR_DET (1<<0) +#define PCICFG_DEVICE_STATUS_NON_FATAL_ERR_DET (1<<1) +#define PCICFG_DEVICE_STATUS_FATAL_ERR_DET (1<<2) +#define PCICFG_DEVICE_STATUS_UNSUP_REQ_DET (1<<3) +#define PCICFG_DEVICE_STATUS_AUX_PWR_DET (1<<4) +#define PCICFG_DEVICE_STATUS_NO_PEND (1<<5) +#define PCICFG_LINK_CONTROL 0xbc + + +#define BAR_USTRORM_INTMEM 0x400000 +#define BAR_CSTRORM_INTMEM 0x410000 +#define BAR_XSTRORM_INTMEM 0x420000 +#define BAR_TSTRORM_INTMEM 0x430000 + +/* for accessing the IGU in case of status block ACK */ +#define BAR_IGU_INTMEM 0x440000 + +#define BAR_DOORBELL_OFFSET 0x800000 + +#define BAR_ME_REGISTER 0x450000 + +/* config_2 offset */ +#define GRC_CONFIG_2_SIZE_REG 0x408 +#define PCI_CONFIG_2_BAR1_SIZE (0xfL<<0) +#define PCI_CONFIG_2_BAR1_SIZE_DISABLED (0L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_64K (1L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_128K (2L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_256K (3L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_512K (4L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_1M (5L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_2M (6L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_4M (7L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_8M (8L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_16M (9L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_32M (10L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_64M (11L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_128M (12L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_256M (13L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_512M (14L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_1G (15L<<0) +#define PCI_CONFIG_2_BAR1_64ENA (1L<<4) +#define PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5) +#define PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6) +#define PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7) +#define PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED (0L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_2K (1L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_4K (2L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_8K (3L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_16K (4L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_32K (5L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_64K (6L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_128K (7L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_256K (8L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_512K (9L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_1M (10L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_2M (11L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_4M (12L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_8M (13L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_16M (14L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_32M (15L<<8) +#define PCI_CONFIG_2_BAR_PREFETCH (1L<<16) +#define PCI_CONFIG_2_RESERVED0 (0x7fffL<<17) + +/* config_3 offset */ +#define GRC_CONFIG_3_SIZE_REG 0x40c +#define PCI_CONFIG_3_STICKY_BYTE (0xffL<<0) +#define PCI_CONFIG_3_FORCE_PME (1L<<24) +#define PCI_CONFIG_3_PME_STATUS (1L<<25) +#define PCI_CONFIG_3_PME_ENABLE (1L<<26) +#define PCI_CONFIG_3_PM_STATE (0x3L<<27) +#define PCI_CONFIG_3_VAUX_PRESET (1L<<30) +#define PCI_CONFIG_3_PCI_POWER (1L<<31) + +#define GRC_BAR2_CONFIG 0x4e0 +#define PCI_CONFIG_2_BAR2_SIZE (0xfL<<0) +#define PCI_CONFIG_2_BAR2_SIZE_DISABLED (0L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_64K (1L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_128K (2L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_256K (3L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_512K (4L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_1M (5L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_2M (6L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_4M (7L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_8M (8L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_16M (9L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_32M (10L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_64M (11L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_128M (12L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_256M (13L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_512M (14L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_1G (15L<<0) +#define PCI_CONFIG_2_BAR2_64ENA (1L<<4) + +#define PCI_PM_DATA_A 0x410 +#define PCI_PM_DATA_B 0x414 +#define PCI_ID_VAL1 0x434 +#define PCI_ID_VAL2 0x438 + +#define PXPCS_TL_CONTROL_5 0x814 +#define PXPCS_TL_CONTROL_5_UNKNOWNTYPE_ERR_ATTN (1 << 29) /*WC*/ +#define PXPCS_TL_CONTROL_5_BOUNDARY4K_ERR_ATTN (1 << 28) /*WC*/ +#define PXPCS_TL_CONTROL_5_MRRS_ERR_ATTN (1 << 27) /*WC*/ +#define PXPCS_TL_CONTROL_5_MPS_ERR_ATTN (1 << 26) /*WC*/ +#define PXPCS_TL_CONTROL_5_TTX_BRIDGE_FORWARD_ERR (1 << 25) /*WC*/ +#define PXPCS_TL_CONTROL_5_TTX_TXINTF_OVERFLOW (1 << 24) /*WC*/ +#define PXPCS_TL_CONTROL_5_PHY_ERR_ATTN (1 << 23) /*RO*/ +#define PXPCS_TL_CONTROL_5_DL_ERR_ATTN (1 << 22) /*RO*/ +#define PXPCS_TL_CONTROL_5_TTX_ERR_NP_TAG_IN_USE (1 << 21) /*WC*/ +#define PXPCS_TL_CONTROL_5_TRX_ERR_UNEXP_RTAG (1 << 20) /*WC*/ +#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT1 (1 << 19) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 (1 << 18) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_ECRC1 (1 << 17) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP1 (1 << 16) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW1 (1 << 15) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL1 (1 << 14) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT1 (1 << 13) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT1 (1 << 12) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL1 (1 << 11) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP1 (1 << 10) /*WC*/ +#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT (1 << 9) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT (1 << 8) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_ECRC (1 << 7) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP (1 << 6) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW (1 << 5) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL (1 << 4) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT (1 << 3) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT (1 << 2) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL (1 << 1) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP (1 << 0) /*WC*/ + + +#define PXPCS_TL_FUNC345_STAT 0x854 +#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT4 (1 << 29) /* WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4\ + (1 << 28) /* Unsupported Request Error Status in function4, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_ECRC4\ + (1 << 27) /* ECRC Error TLP Status Status in function 4, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP4\ + (1 << 26) /* Malformed TLP Status Status in function 4, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW4\ + (1 << 25) /* Receiver Overflow Status Status in function 4, if \ + set, generate pcie_err_attn output when this error is seen.. WC \ + */ +#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL4\ + (1 << 24) /* Unexpected Completion Status Status in function 4, \ + if set, generate pcie_err_attn output when this error is seen. WC \ + */ +#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT4\ + (1 << 23) /* Receive UR Statusin function 4. If set, generate \ + pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT4\ + (1 << 22) /* Completer Timeout Status Status in function 4, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL4\ + (1 << 21) /* Flow Control Protocol Error Status Status in \ + function 4, if set, generate pcie_err_attn output when this error \ + is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP4\ + (1 << 20) /* Poisoned Error Status Status in function 4, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT3 (1 << 19) /* WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3\ + (1 << 18) /* Unsupported Request Error Status in function3, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_ECRC3\ + (1 << 17) /* ECRC Error TLP Status Status in function 3, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP3\ + (1 << 16) /* Malformed TLP Status Status in function 3, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW3\ + (1 << 15) /* Receiver Overflow Status Status in function 3, if \ + set, generate pcie_err_attn output when this error is seen.. WC \ + */ +#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL3\ + (1 << 14) /* Unexpected Completion Status Status in function 3, \ + if set, generate pcie_err_attn output when this error is seen. WC \ + */ +#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT3\ + (1 << 13) /* Receive UR Statusin function 3. If set, generate \ + pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT3\ + (1 << 12) /* Completer Timeout Status Status in function 3, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL3\ + (1 << 11) /* Flow Control Protocol Error Status Status in \ + function 3, if set, generate pcie_err_attn output when this error \ + is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP3\ + (1 << 10) /* Poisoned Error Status Status in function 3, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT2 (1 << 9) /* WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2\ + (1 << 8) /* Unsupported Request Error Status for Function 2, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_ECRC2\ + (1 << 7) /* ECRC Error TLP Status Status for Function 2, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP2\ + (1 << 6) /* Malformed TLP Status Status for Function 2, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW2\ + (1 << 5) /* Receiver Overflow Status Status for Function 2, if \ + set, generate pcie_err_attn output when this error is seen.. WC \ + */ +#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL2\ + (1 << 4) /* Unexpected Completion Status Status for Function 2, \ + if set, generate pcie_err_attn output when this error is seen. WC \ + */ +#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT2\ + (1 << 3) /* Receive UR Statusfor Function 2. If set, generate \ + pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT2\ + (1 << 2) /* Completer Timeout Status Status for Function 2, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL2\ + (1 << 1) /* Flow Control Protocol Error Status Status for \ + Function 2, if set, generate pcie_err_attn output when this error \ + is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP2\ + (1 << 0) /* Poisoned Error Status Status for Function 2, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ + + +#define PXPCS_TL_FUNC678_STAT 0x85C +#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT7 (1 << 29) /* WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7\ + (1 << 28) /* Unsupported Request Error Status in function7, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_ECRC7\ + (1 << 27) /* ECRC Error TLP Status Status in function 7, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP7\ + (1 << 26) /* Malformed TLP Status Status in function 7, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW7\ + (1 << 25) /* Receiver Overflow Status Status in function 7, if \ + set, generate pcie_err_attn output when this error is seen.. WC \ + */ +#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL7\ + (1 << 24) /* Unexpected Completion Status Status in function 7, \ + if set, generate pcie_err_attn output when this error is seen. WC \ + */ +#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT7\ + (1 << 23) /* Receive UR Statusin function 7. If set, generate \ + pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT7\ + (1 << 22) /* Completer Timeout Status Status in function 7, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL7\ + (1 << 21) /* Flow Control Protocol Error Status Status in \ + function 7, if set, generate pcie_err_attn output when this error \ + is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP7\ + (1 << 20) /* Poisoned Error Status Status in function 7, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT6 (1 << 19) /* WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6\ + (1 << 18) /* Unsupported Request Error Status in function6, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_ECRC6\ + (1 << 17) /* ECRC Error TLP Status Status in function 6, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP6\ + (1 << 16) /* Malformed TLP Status Status in function 6, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW6\ + (1 << 15) /* Receiver Overflow Status Status in function 6, if \ + set, generate pcie_err_attn output when this error is seen.. WC \ + */ +#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL6\ + (1 << 14) /* Unexpected Completion Status Status in function 6, \ + if set, generate pcie_err_attn output when this error is seen. WC \ + */ +#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT6\ + (1 << 13) /* Receive UR Statusin function 6. If set, generate \ + pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT6\ + (1 << 12) /* Completer Timeout Status Status in function 6, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL6\ + (1 << 11) /* Flow Control Protocol Error Status Status in \ + function 6, if set, generate pcie_err_attn output when this error \ + is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP6\ + (1 << 10) /* Poisoned Error Status Status in function 6, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT5 (1 << 9) /* WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5\ + (1 << 8) /* Unsupported Request Error Status for Function 5, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_ECRC5\ + (1 << 7) /* ECRC Error TLP Status Status for Function 5, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP5\ + (1 << 6) /* Malformed TLP Status Status for Function 5, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW5\ + (1 << 5) /* Receiver Overflow Status Status for Function 5, if \ + set, generate pcie_err_attn output when this error is seen.. WC \ + */ +#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL5\ + (1 << 4) /* Unexpected Completion Status Status for Function 5, \ + if set, generate pcie_err_attn output when this error is seen. WC \ + */ +#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT5\ + (1 << 3) /* Receive UR Statusfor Function 5. If set, generate \ + pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT5\ + (1 << 2) /* Completer Timeout Status Status for Function 5, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL5\ + (1 << 1) /* Flow Control Protocol Error Status Status for \ + Function 5, if set, generate pcie_err_attn output when this error \ + is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP5\ + (1 << 0) /* Poisoned Error Status Status for Function 5, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ + + +#define BAR_USTRORM_INTMEM 0x400000 +#define BAR_CSTRORM_INTMEM 0x410000 +#define BAR_XSTRORM_INTMEM 0x420000 +#define BAR_TSTRORM_INTMEM 0x430000 + +/* for accessing the IGU in case of status block ACK */ +#define BAR_IGU_INTMEM 0x440000 + +#define BAR_DOORBELL_OFFSET 0x800000 + +#define BAR_ME_REGISTER 0x450000 +#define ME_REG_PF_NUM_SHIFT 0 +#define ME_REG_PF_NUM\ + (7L<> 1; + } + + /* split the crc into 8 bits */ + for (i = 0; i < 8; i++) { + C[i] = crc & 1; + crc = crc >> 1; + } + + NewCRC[0] = D[31] ^ D[30] ^ D[28] ^ D[23] ^ D[21] ^ D[19] ^ D[18] ^ + D[16] ^ D[14] ^ D[12] ^ D[8] ^ D[7] ^ D[6] ^ D[0] ^ C[4] ^ + C[6] ^ C[7]; + NewCRC[1] = D[30] ^ D[29] ^ D[28] ^ D[24] ^ D[23] ^ D[22] ^ D[21] ^ + D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^ + D[12] ^ D[9] ^ D[6] ^ D[1] ^ D[0] ^ C[0] ^ C[4] ^ C[5] ^ + C[6]; + NewCRC[2] = D[29] ^ D[28] ^ D[25] ^ D[24] ^ D[22] ^ D[17] ^ D[15] ^ + D[13] ^ D[12] ^ D[10] ^ D[8] ^ D[6] ^ D[2] ^ D[1] ^ D[0] ^ + C[0] ^ C[1] ^ C[4] ^ C[5]; + NewCRC[3] = D[30] ^ D[29] ^ D[26] ^ D[25] ^ D[23] ^ D[18] ^ D[16] ^ + D[14] ^ D[13] ^ D[11] ^ D[9] ^ D[7] ^ D[3] ^ D[2] ^ D[1] ^ + C[1] ^ C[2] ^ C[5] ^ C[6]; + NewCRC[4] = D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[19] ^ D[17] ^ + D[15] ^ D[14] ^ D[12] ^ D[10] ^ D[8] ^ D[4] ^ D[3] ^ D[2] ^ + C[0] ^ C[2] ^ C[3] ^ C[6] ^ C[7]; + NewCRC[5] = D[31] ^ D[28] ^ D[27] ^ D[25] ^ D[20] ^ D[18] ^ D[16] ^ + D[15] ^ D[13] ^ D[11] ^ D[9] ^ D[5] ^ D[4] ^ D[3] ^ C[1] ^ + C[3] ^ C[4] ^ C[7]; + NewCRC[6] = D[29] ^ D[28] ^ D[26] ^ D[21] ^ D[19] ^ D[17] ^ D[16] ^ + D[14] ^ D[12] ^ D[10] ^ D[6] ^ D[5] ^ D[4] ^ C[2] ^ C[4] ^ + C[5]; + NewCRC[7] = D[30] ^ D[29] ^ D[27] ^ D[22] ^ D[20] ^ D[18] ^ D[17] ^ + D[15] ^ D[13] ^ D[11] ^ D[7] ^ D[6] ^ D[5] ^ C[3] ^ C[5] ^ + C[6]; + + crc_res = 0; + for (i = 0; i < 8; i++) + crc_res |= (NewCRC[i] << i); + + return crc_res; +} + + +#endif /* BNX2X_REG_H */ diff --cc drivers/net/ethernet/freescale/gianfar.c index 29dff1ec7f2d,000000000000..81d409d08c97 mode 100644,000000..100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@@ -1,3291 -1,0 +1,3296 @@@ +/* + * drivers/net/gianfar.c + * + * Gianfar Ethernet Driver + * This driver is designed for the non-CPM ethernet controllers + * on the 85xx and 83xx family of integrated processors + * Based on 8260_io/fcc_enet.c + * + * Author: Andy Fleming + * Maintainer: Kumar Gala + * Modifier: Sandeep Gopalpet + * + * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc. + * Copyright 2007 MontaVista Software, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * Gianfar: AKA Lambda Draconis, "Dragon" + * RA 11 31 24.2 + * Dec +69 19 52 + * V 3.84 + * B-V +1.62 + * + * Theory of operation + * + * The driver is initialized through of_device. Configuration information + * is therefore conveyed through an OF-style device tree. + * + * The Gianfar Ethernet Controller uses a ring of buffer + * descriptors. The beginning is indicated by a register + * pointing to the physical address of the start of the ring. + * The end is determined by a "wrap" bit being set in the + * last descriptor of the ring. + * + * When a packet is received, the RXF bit in the + * IEVENT register is set, triggering an interrupt when the + * corresponding bit in the IMASK register is also set (if + * interrupt coalescing is active, then the interrupt may not + * happen immediately, but will wait until either a set number + * of frames or amount of time have passed). In NAPI, the + * interrupt handler will signal there is work to be done, and + * exit. This method will start at the last known empty + * descriptor, and process every subsequent descriptor until there + * are none left with data (NAPI will stop after a set number of + * packets to give time to other tasks, but will eventually + * process all the packets). The data arrives inside a + * pre-allocated skb, and so after the skb is passed up to the + * stack, a new skb must be allocated, and the address field in + * the buffer descriptor must be updated to indicate this new + * skb. + * + * When the kernel requests that a packet be transmitted, the + * driver starts where it left off last time, and points the + * descriptor at the buffer which was passed in. The driver + * then informs the DMA engine that there are packets ready to + * be transmitted. Once the controller is finished transmitting + * the packet, an interrupt may be triggered (under the same + * conditions as for reception, but depending on the TXF bit). + * The driver then cleans up the buffer. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#define DEBUG + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gianfar.h" +#include "fsl_pq_mdio.h" + +#define TX_TIMEOUT (1*HZ) +#undef BRIEF_GFAR_ERRORS +#undef VERBOSE_GFAR_ERRORS + +const char gfar_driver_name[] = "Gianfar Ethernet"; +const char gfar_driver_version[] = "1.3"; + +static int gfar_enet_open(struct net_device *dev); +static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); +static void gfar_reset_task(struct work_struct *work); +static void gfar_timeout(struct net_device *dev); +static int gfar_close(struct net_device *dev); +struct sk_buff *gfar_new_skb(struct net_device *dev); +static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, + struct sk_buff *skb); +static int gfar_set_mac_address(struct net_device *dev); +static int gfar_change_mtu(struct net_device *dev, int new_mtu); +static irqreturn_t gfar_error(int irq, void *dev_id); +static irqreturn_t gfar_transmit(int irq, void *dev_id); +static irqreturn_t gfar_interrupt(int irq, void *dev_id); +static void adjust_link(struct net_device *dev); +static void init_registers(struct net_device *dev); +static int init_phy(struct net_device *dev); +static int gfar_probe(struct platform_device *ofdev); +static int gfar_remove(struct platform_device *ofdev); +static void free_skb_resources(struct gfar_private *priv); +static void gfar_set_multi(struct net_device *dev); +static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); +static void gfar_configure_serdes(struct net_device *dev); +static int gfar_poll(struct napi_struct *napi, int budget); +#ifdef CONFIG_NET_POLL_CONTROLLER +static void gfar_netpoll(struct net_device *dev); +#endif +int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); +static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); +static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, + int amount_pull); +void gfar_halt(struct net_device *dev); +static void gfar_halt_nodisable(struct net_device *dev); +void gfar_start(struct net_device *dev); +static void gfar_clear_exact_match(struct net_device *dev); +static void gfar_set_mac_for_addr(struct net_device *dev, int num, + const u8 *addr); +static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); + +MODULE_AUTHOR("Freescale Semiconductor, Inc"); +MODULE_DESCRIPTION("Gianfar Ethernet Driver"); +MODULE_LICENSE("GPL"); + +static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, + dma_addr_t buf) +{ + u32 lstatus; + + bdp->bufPtr = buf; + + lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); + if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) + lstatus |= BD_LFLAG(RXBD_WRAP); + + eieio(); + + bdp->lstatus = lstatus; +} + +static int gfar_init_bds(struct net_device *ndev) +{ + struct gfar_private *priv = netdev_priv(ndev); + struct gfar_priv_tx_q *tx_queue = NULL; + struct gfar_priv_rx_q *rx_queue = NULL; + struct txbd8 *txbdp; + struct rxbd8 *rxbdp; + int i, j; + + for (i = 0; i < priv->num_tx_queues; i++) { + tx_queue = priv->tx_queue[i]; + /* Initialize some variables in our dev structure */ + tx_queue->num_txbdfree = tx_queue->tx_ring_size; + tx_queue->dirty_tx = tx_queue->tx_bd_base; + tx_queue->cur_tx = tx_queue->tx_bd_base; + tx_queue->skb_curtx = 0; + tx_queue->skb_dirtytx = 0; + + /* Initialize Transmit Descriptor Ring */ + txbdp = tx_queue->tx_bd_base; + for (j = 0; j < tx_queue->tx_ring_size; j++) { + txbdp->lstatus = 0; + txbdp->bufPtr = 0; + txbdp++; + } + + /* Set the last descriptor in the ring to indicate wrap */ + txbdp--; + txbdp->status |= TXBD_WRAP; + } + + for (i = 0; i < priv->num_rx_queues; i++) { + rx_queue = priv->rx_queue[i]; + rx_queue->cur_rx = rx_queue->rx_bd_base; + rx_queue->skb_currx = 0; + rxbdp = rx_queue->rx_bd_base; + + for (j = 0; j < rx_queue->rx_ring_size; j++) { + struct sk_buff *skb = rx_queue->rx_skbuff[j]; + + if (skb) { + gfar_init_rxbdp(rx_queue, rxbdp, + rxbdp->bufPtr); + } else { + skb = gfar_new_skb(ndev); + if (!skb) { + netdev_err(ndev, "Can't allocate RX buffers\n"); + goto err_rxalloc_fail; + } + rx_queue->rx_skbuff[j] = skb; + + gfar_new_rxbdp(rx_queue, rxbdp, skb); + } + + rxbdp++; + } + + } + + return 0; + +err_rxalloc_fail: + free_skb_resources(priv); + return -ENOMEM; +} + +static int gfar_alloc_skb_resources(struct net_device *ndev) +{ + void *vaddr; + dma_addr_t addr; + int i, j, k; + struct gfar_private *priv = netdev_priv(ndev); + struct device *dev = &priv->ofdev->dev; + struct gfar_priv_tx_q *tx_queue = NULL; + struct gfar_priv_rx_q *rx_queue = NULL; + + priv->total_tx_ring_size = 0; + for (i = 0; i < priv->num_tx_queues; i++) + priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; + + priv->total_rx_ring_size = 0; + for (i = 0; i < priv->num_rx_queues; i++) + priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; + + /* Allocate memory for the buffer descriptors */ + vaddr = dma_alloc_coherent(dev, + sizeof(struct txbd8) * priv->total_tx_ring_size + + sizeof(struct rxbd8) * priv->total_rx_ring_size, + &addr, GFP_KERNEL); + if (!vaddr) { + netif_err(priv, ifup, ndev, + "Could not allocate buffer descriptors!\n"); + return -ENOMEM; + } + + for (i = 0; i < priv->num_tx_queues; i++) { + tx_queue = priv->tx_queue[i]; + tx_queue->tx_bd_base = vaddr; + tx_queue->tx_bd_dma_base = addr; + tx_queue->dev = ndev; + /* enet DMA only understands physical addresses */ + addr += sizeof(struct txbd8) *tx_queue->tx_ring_size; + vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size; + } + + /* Start the rx descriptor ring where the tx ring leaves off */ + for (i = 0; i < priv->num_rx_queues; i++) { + rx_queue = priv->rx_queue[i]; + rx_queue->rx_bd_base = vaddr; + rx_queue->rx_bd_dma_base = addr; + rx_queue->dev = ndev; + addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; + vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; + } + + /* Setup the skbuff rings */ + for (i = 0; i < priv->num_tx_queues; i++) { + tx_queue = priv->tx_queue[i]; + tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) * + tx_queue->tx_ring_size, GFP_KERNEL); + if (!tx_queue->tx_skbuff) { + netif_err(priv, ifup, ndev, + "Could not allocate tx_skbuff\n"); + goto cleanup; + } + + for (k = 0; k < tx_queue->tx_ring_size; k++) + tx_queue->tx_skbuff[k] = NULL; + } + + for (i = 0; i < priv->num_rx_queues; i++) { + rx_queue = priv->rx_queue[i]; + rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) * + rx_queue->rx_ring_size, GFP_KERNEL); + + if (!rx_queue->rx_skbuff) { + netif_err(priv, ifup, ndev, + "Could not allocate rx_skbuff\n"); + goto cleanup; + } + + for (j = 0; j < rx_queue->rx_ring_size; j++) + rx_queue->rx_skbuff[j] = NULL; + } + + if (gfar_init_bds(ndev)) + goto cleanup; + + return 0; + +cleanup: + free_skb_resources(priv); + return -ENOMEM; +} + +static void gfar_init_tx_rx_base(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 __iomem *baddr; + int i; + + baddr = ®s->tbase0; + for(i = 0; i < priv->num_tx_queues; i++) { + gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); + baddr += 2; + } + + baddr = ®s->rbase0; + for(i = 0; i < priv->num_rx_queues; i++) { + gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); + baddr += 2; + } +} + +static void gfar_init_mac(struct net_device *ndev) +{ + struct gfar_private *priv = netdev_priv(ndev); + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 rctrl = 0; + u32 tctrl = 0; + u32 attrs = 0; + + /* write the tx/rx base registers */ + gfar_init_tx_rx_base(priv); + + /* Configure the coalescing support */ + gfar_configure_coalescing(priv, 0xFF, 0xFF); + + if (priv->rx_filer_enable) { + rctrl |= RCTRL_FILREN; + /* Program the RIR0 reg with the required distribution */ + gfar_write(®s->rir0, DEFAULT_RIR0); + } + + if (ndev->features & NETIF_F_RXCSUM) + rctrl |= RCTRL_CHECKSUMMING; + + if (priv->extended_hash) { + rctrl |= RCTRL_EXTHASH; + + gfar_clear_exact_match(ndev); + rctrl |= RCTRL_EMEN; + } + + if (priv->padding) { + rctrl &= ~RCTRL_PAL_MASK; + rctrl |= RCTRL_PADDING(priv->padding); + } + + /* Insert receive time stamps into padding alignment bytes */ + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) { + rctrl &= ~RCTRL_PAL_MASK; + rctrl |= RCTRL_PADDING(8); + priv->padding = 8; + } + + /* Enable HW time stamping if requested from user space */ + if (priv->hwts_rx_en) + rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE; + + if (ndev->features & NETIF_F_HW_VLAN_RX) + rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; + + /* Init rctrl based on our settings */ + gfar_write(®s->rctrl, rctrl); + + if (ndev->features & NETIF_F_IP_CSUM) + tctrl |= TCTRL_INIT_CSUM; + + tctrl |= TCTRL_TXSCHED_PRIO; + + gfar_write(®s->tctrl, tctrl); + + /* Set the extraction length and index */ + attrs = ATTRELI_EL(priv->rx_stash_size) | + ATTRELI_EI(priv->rx_stash_index); + + gfar_write(®s->attreli, attrs); + + /* Start with defaults, and add stashing or locking + * depending on the approprate variables */ + attrs = ATTR_INIT_SETTINGS; + + if (priv->bd_stash_en) + attrs |= ATTR_BDSTASH; + + if (priv->rx_stash_size != 0) + attrs |= ATTR_BUFSTASH; + + gfar_write(®s->attr, attrs); + + gfar_write(®s->fifo_tx_thr, priv->fifo_threshold); + gfar_write(®s->fifo_tx_starve, priv->fifo_starve); + gfar_write(®s->fifo_tx_starve_shutoff, priv->fifo_starve_off); +} + +static struct net_device_stats *gfar_get_stats(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; + unsigned long tx_packets = 0, tx_bytes = 0; + int i = 0; + + for (i = 0; i < priv->num_rx_queues; i++) { + rx_packets += priv->rx_queue[i]->stats.rx_packets; + rx_bytes += priv->rx_queue[i]->stats.rx_bytes; + rx_dropped += priv->rx_queue[i]->stats.rx_dropped; + } + + dev->stats.rx_packets = rx_packets; + dev->stats.rx_bytes = rx_bytes; + dev->stats.rx_dropped = rx_dropped; + + for (i = 0; i < priv->num_tx_queues; i++) { + tx_bytes += priv->tx_queue[i]->stats.tx_bytes; + tx_packets += priv->tx_queue[i]->stats.tx_packets; + } + + dev->stats.tx_bytes = tx_bytes; + dev->stats.tx_packets = tx_packets; + + return &dev->stats; +} + +static const struct net_device_ops gfar_netdev_ops = { + .ndo_open = gfar_enet_open, + .ndo_start_xmit = gfar_start_xmit, + .ndo_stop = gfar_close, + .ndo_change_mtu = gfar_change_mtu, + .ndo_set_features = gfar_set_features, + .ndo_set_rx_mode = gfar_set_multi, + .ndo_tx_timeout = gfar_timeout, + .ndo_do_ioctl = gfar_ioctl, + .ndo_get_stats = gfar_get_stats, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = gfar_netpoll, +#endif +}; + +void lock_rx_qs(struct gfar_private *priv) +{ + int i = 0x0; + + for (i = 0; i < priv->num_rx_queues; i++) + spin_lock(&priv->rx_queue[i]->rxlock); +} + +void lock_tx_qs(struct gfar_private *priv) +{ + int i = 0x0; + + for (i = 0; i < priv->num_tx_queues; i++) + spin_lock(&priv->tx_queue[i]->txlock); +} + +void unlock_rx_qs(struct gfar_private *priv) +{ + int i = 0x0; + + for (i = 0; i < priv->num_rx_queues; i++) + spin_unlock(&priv->rx_queue[i]->rxlock); +} + +void unlock_tx_qs(struct gfar_private *priv) +{ + int i = 0x0; + + for (i = 0; i < priv->num_tx_queues; i++) + spin_unlock(&priv->tx_queue[i]->txlock); +} + +static bool gfar_is_vlan_on(struct gfar_private *priv) +{ + return (priv->ndev->features & NETIF_F_HW_VLAN_RX) || + (priv->ndev->features & NETIF_F_HW_VLAN_TX); +} + +/* Returns 1 if incoming frames use an FCB */ +static inline int gfar_uses_fcb(struct gfar_private *priv) +{ + return gfar_is_vlan_on(priv) || + (priv->ndev->features & NETIF_F_RXCSUM) || + (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER); +} + +static void free_tx_pointers(struct gfar_private *priv) +{ + int i = 0; + + for (i = 0; i < priv->num_tx_queues; i++) + kfree(priv->tx_queue[i]); +} + +static void free_rx_pointers(struct gfar_private *priv) +{ + int i = 0; + + for (i = 0; i < priv->num_rx_queues; i++) + kfree(priv->rx_queue[i]); +} + +static void unmap_group_regs(struct gfar_private *priv) +{ + int i = 0; + + for (i = 0; i < MAXGROUPS; i++) + if (priv->gfargrp[i].regs) + iounmap(priv->gfargrp[i].regs); +} + +static void disable_napi(struct gfar_private *priv) +{ + int i = 0; + + for (i = 0; i < priv->num_grps; i++) + napi_disable(&priv->gfargrp[i].napi); +} + +static void enable_napi(struct gfar_private *priv) +{ + int i = 0; + + for (i = 0; i < priv->num_grps; i++) + napi_enable(&priv->gfargrp[i].napi); +} + +static int gfar_parse_group(struct device_node *np, + struct gfar_private *priv, const char *model) +{ + u32 *queue_mask; + + priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0); + if (!priv->gfargrp[priv->num_grps].regs) + return -ENOMEM; + + priv->gfargrp[priv->num_grps].interruptTransmit = + irq_of_parse_and_map(np, 0); + + /* If we aren't the FEC we have multiple interrupts */ + if (model && strcasecmp(model, "FEC")) { + priv->gfargrp[priv->num_grps].interruptReceive = + irq_of_parse_and_map(np, 1); + priv->gfargrp[priv->num_grps].interruptError = + irq_of_parse_and_map(np,2); + if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ || + priv->gfargrp[priv->num_grps].interruptReceive == NO_IRQ || + priv->gfargrp[priv->num_grps].interruptError == NO_IRQ) + return -EINVAL; + } + + priv->gfargrp[priv->num_grps].grp_id = priv->num_grps; + priv->gfargrp[priv->num_grps].priv = priv; + spin_lock_init(&priv->gfargrp[priv->num_grps].grplock); + if(priv->mode == MQ_MG_MODE) { + queue_mask = (u32 *)of_get_property(np, + "fsl,rx-bit-map", NULL); + priv->gfargrp[priv->num_grps].rx_bit_map = + queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps); + queue_mask = (u32 *)of_get_property(np, + "fsl,tx-bit-map", NULL); + priv->gfargrp[priv->num_grps].tx_bit_map = + queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps); + } else { + priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF; + priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF; + } + priv->num_grps++; + + return 0; +} + +static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) +{ + const char *model; + const char *ctype; + const void *mac_addr; + int err = 0, i; + struct net_device *dev = NULL; + struct gfar_private *priv = NULL; + struct device_node *np = ofdev->dev.of_node; + struct device_node *child = NULL; + const u32 *stash; + const u32 *stash_len; + const u32 *stash_idx; + unsigned int num_tx_qs, num_rx_qs; + u32 *tx_queues, *rx_queues; + + if (!np || !of_device_is_available(np)) + return -ENODEV; + + /* parse the num of tx and rx queues */ + tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL); + num_tx_qs = tx_queues ? *tx_queues : 1; + + if (num_tx_qs > MAX_TX_QS) { + pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", + num_tx_qs, MAX_TX_QS); + pr_err("Cannot do alloc_etherdev, aborting\n"); + return -EINVAL; + } + + rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); + num_rx_qs = rx_queues ? *rx_queues : 1; + + if (num_rx_qs > MAX_RX_QS) { + pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n", + num_rx_qs, MAX_RX_QS); + pr_err("Cannot do alloc_etherdev, aborting\n"); + return -EINVAL; + } + + *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs); + dev = *pdev; + if (NULL == dev) + return -ENOMEM; + + priv = netdev_priv(dev); + priv->node = ofdev->dev.of_node; + priv->ndev = dev; + + priv->num_tx_queues = num_tx_qs; + netif_set_real_num_rx_queues(dev, num_rx_qs); + priv->num_rx_queues = num_rx_qs; + priv->num_grps = 0x0; + + /* Init Rx queue filer rule set linked list*/ + INIT_LIST_HEAD(&priv->rx_list.list); + priv->rx_list.count = 0; + mutex_init(&priv->rx_queue_access); + + model = of_get_property(np, "model", NULL); + + for (i = 0; i < MAXGROUPS; i++) + priv->gfargrp[i].regs = NULL; + + /* Parse and initialize group specific information */ + if (of_device_is_compatible(np, "fsl,etsec2")) { + priv->mode = MQ_MG_MODE; + for_each_child_of_node(np, child) { + err = gfar_parse_group(child, priv, model); + if (err) + goto err_grp_init; + } + } else { + priv->mode = SQ_SG_MODE; + err = gfar_parse_group(np, priv, model); + if(err) + goto err_grp_init; + } + + for (i = 0; i < priv->num_tx_queues; i++) + priv->tx_queue[i] = NULL; + for (i = 0; i < priv->num_rx_queues; i++) + priv->rx_queue[i] = NULL; + + for (i = 0; i < priv->num_tx_queues; i++) { + priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), + GFP_KERNEL); + if (!priv->tx_queue[i]) { + err = -ENOMEM; + goto tx_alloc_failed; + } + priv->tx_queue[i]->tx_skbuff = NULL; + priv->tx_queue[i]->qindex = i; + priv->tx_queue[i]->dev = dev; + spin_lock_init(&(priv->tx_queue[i]->txlock)); + } + + for (i = 0; i < priv->num_rx_queues; i++) { + priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), + GFP_KERNEL); + if (!priv->rx_queue[i]) { + err = -ENOMEM; + goto rx_alloc_failed; + } + priv->rx_queue[i]->rx_skbuff = NULL; + priv->rx_queue[i]->qindex = i; + priv->rx_queue[i]->dev = dev; + spin_lock_init(&(priv->rx_queue[i]->rxlock)); + } + + + stash = of_get_property(np, "bd-stash", NULL); + + if (stash) { + priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; + priv->bd_stash_en = 1; + } + + stash_len = of_get_property(np, "rx-stash-len", NULL); + + if (stash_len) + priv->rx_stash_size = *stash_len; + + stash_idx = of_get_property(np, "rx-stash-idx", NULL); + + if (stash_idx) + priv->rx_stash_index = *stash_idx; + + if (stash_len || stash_idx) + priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; + + mac_addr = of_get_mac_address(np); + if (mac_addr) + memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN); + + if (model && !strcasecmp(model, "TSEC")) + priv->device_flags = + FSL_GIANFAR_DEV_HAS_GIGABIT | + FSL_GIANFAR_DEV_HAS_COALESCE | + FSL_GIANFAR_DEV_HAS_RMON | + FSL_GIANFAR_DEV_HAS_MULTI_INTR; + if (model && !strcasecmp(model, "eTSEC")) + priv->device_flags = + FSL_GIANFAR_DEV_HAS_GIGABIT | + FSL_GIANFAR_DEV_HAS_COALESCE | + FSL_GIANFAR_DEV_HAS_RMON | + FSL_GIANFAR_DEV_HAS_MULTI_INTR | + FSL_GIANFAR_DEV_HAS_PADDING | + FSL_GIANFAR_DEV_HAS_CSUM | + FSL_GIANFAR_DEV_HAS_VLAN | + FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | + FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | + FSL_GIANFAR_DEV_HAS_TIMER; + + ctype = of_get_property(np, "phy-connection-type", NULL); + + /* We only care about rgmii-id. The rest are autodetected */ + if (ctype && !strcmp(ctype, "rgmii-id")) + priv->interface = PHY_INTERFACE_MODE_RGMII_ID; + else + priv->interface = PHY_INTERFACE_MODE_MII; + + if (of_get_property(np, "fsl,magic-packet", NULL)) + priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; + + priv->phy_node = of_parse_phandle(np, "phy-handle", 0); + + /* Find the TBI PHY. If it's not there, we don't support SGMII */ + priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); + + return 0; + +rx_alloc_failed: + free_rx_pointers(priv); +tx_alloc_failed: + free_tx_pointers(priv); +err_grp_init: + unmap_group_regs(priv); + free_netdev(dev); + return err; +} + +static int gfar_hwtstamp_ioctl(struct net_device *netdev, + struct ifreq *ifr, int cmd) +{ + struct hwtstamp_config config; + struct gfar_private *priv = netdev_priv(netdev); + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + /* reserved for future extensions */ + if (config.flags) + return -EINVAL; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + priv->hwts_tx_en = 0; + break; + case HWTSTAMP_TX_ON: + if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) + return -ERANGE; + priv->hwts_tx_en = 1; + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + if (priv->hwts_rx_en) { + stop_gfar(netdev); + priv->hwts_rx_en = 0; + startup_gfar(netdev); + } + break; + default: + if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) + return -ERANGE; + if (!priv->hwts_rx_en) { + stop_gfar(netdev); + priv->hwts_rx_en = 1; + startup_gfar(netdev); + } + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + } + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +/* Ioctl MII Interface */ +static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct gfar_private *priv = netdev_priv(dev); + + if (!netif_running(dev)) + return -EINVAL; + + if (cmd == SIOCSHWTSTAMP) + return gfar_hwtstamp_ioctl(dev, rq, cmd); + + if (!priv->phydev) + return -ENODEV; + + return phy_mii_ioctl(priv->phydev, rq, cmd); +} + +static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs) +{ + unsigned int new_bit_map = 0x0; + int mask = 0x1 << (max_qs - 1), i; + for (i = 0; i < max_qs; i++) { + if (bit_map & mask) + new_bit_map = new_bit_map + (1 << i); + mask = mask >> 0x1; + } + return new_bit_map; +} + +static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, + u32 class) +{ + u32 rqfpr = FPR_FILER_MASK; + u32 rqfcr = 0x0; + + rqfar--; + rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT; + priv->ftp_rqfpr[rqfar] = rqfpr; + priv->ftp_rqfcr[rqfar] = rqfcr; + gfar_write_filer(priv, rqfar, rqfcr, rqfpr); + + rqfar--; + rqfcr = RQFCR_CMP_NOMATCH; + priv->ftp_rqfpr[rqfar] = rqfpr; + priv->ftp_rqfcr[rqfar] = rqfcr; + gfar_write_filer(priv, rqfar, rqfcr, rqfpr); + + rqfar--; + rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND; + rqfpr = class; + priv->ftp_rqfcr[rqfar] = rqfcr; + priv->ftp_rqfpr[rqfar] = rqfpr; + gfar_write_filer(priv, rqfar, rqfcr, rqfpr); + + rqfar--; + rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND; + rqfpr = class; + priv->ftp_rqfcr[rqfar] = rqfcr; + priv->ftp_rqfpr[rqfar] = rqfpr; + gfar_write_filer(priv, rqfar, rqfcr, rqfpr); + + return rqfar; +} + +static void gfar_init_filer_table(struct gfar_private *priv) +{ + int i = 0x0; + u32 rqfar = MAX_FILER_IDX; + u32 rqfcr = 0x0; + u32 rqfpr = FPR_FILER_MASK; + + /* Default rule */ + rqfcr = RQFCR_CMP_MATCH; + priv->ftp_rqfcr[rqfar] = rqfcr; + priv->ftp_rqfpr[rqfar] = rqfpr; + gfar_write_filer(priv, rqfar, rqfcr, rqfpr); + + rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6); + rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP); + rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP); + rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4); + rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP); + rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP); + + /* cur_filer_idx indicated the first non-masked rule */ + priv->cur_filer_idx = rqfar; + + /* Rest are masked rules */ + rqfcr = RQFCR_CMP_NOMATCH; + for (i = 0; i < rqfar; i++) { + priv->ftp_rqfcr[i] = rqfcr; + priv->ftp_rqfpr[i] = rqfpr; + gfar_write_filer(priv, i, rqfcr, rqfpr); + } +} + +static void gfar_detect_errata(struct gfar_private *priv) +{ + struct device *dev = &priv->ofdev->dev; + unsigned int pvr = mfspr(SPRN_PVR); + unsigned int svr = mfspr(SPRN_SVR); + unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */ + unsigned int rev = svr & 0xffff; + + /* MPC8313 Rev 2.0 and higher; All MPC837x */ + if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) || + (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) + priv->errata |= GFAR_ERRATA_74; + + /* MPC8313 and MPC837x all rev */ + if ((pvr == 0x80850010 && mod == 0x80b0) || + (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) + priv->errata |= GFAR_ERRATA_76; + + /* MPC8313 and MPC837x all rev */ + if ((pvr == 0x80850010 && mod == 0x80b0) || + (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) + priv->errata |= GFAR_ERRATA_A002; + + /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */ + if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) || + (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020)) + priv->errata |= GFAR_ERRATA_12; + + if (priv->errata) + dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", + priv->errata); +} + +/* Set up the ethernet device structure, private data, + * and anything else we need before we start */ +static int gfar_probe(struct platform_device *ofdev) +{ + u32 tempval; + struct net_device *dev = NULL; + struct gfar_private *priv = NULL; + struct gfar __iomem *regs = NULL; + int err = 0, i, grp_idx = 0; + int len_devname; + u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0; + u32 isrg = 0; + u32 __iomem *baddr; + + err = gfar_of_init(ofdev, &dev); + + if (err) + return err; + + priv = netdev_priv(dev); + priv->ndev = dev; + priv->ofdev = ofdev; + priv->node = ofdev->dev.of_node; + SET_NETDEV_DEV(dev, &ofdev->dev); + + spin_lock_init(&priv->bflock); + INIT_WORK(&priv->reset_task, gfar_reset_task); + + dev_set_drvdata(&ofdev->dev, priv); + regs = priv->gfargrp[0].regs; + + gfar_detect_errata(priv); + + /* Stop the DMA engine now, in case it was running before */ + /* (The firmware could have used it, and left it running). */ + gfar_halt(dev); + + /* Reset MAC layer */ + gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET); + + /* We need to delay at least 3 TX clocks */ + udelay(2); + + tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); + gfar_write(®s->maccfg1, tempval); + + /* Initialize MACCFG2. */ + tempval = MACCFG2_INIT_SETTINGS; + if (gfar_has_errata(priv, GFAR_ERRATA_74)) + tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; + gfar_write(®s->maccfg2, tempval); + + /* Initialize ECNTRL */ + gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); + + /* Set the dev->base_addr to the gfar reg region */ + dev->base_addr = (unsigned long) regs; + + SET_NETDEV_DEV(dev, &ofdev->dev); + + /* Fill in the dev structure */ + dev->watchdog_timeo = TX_TIMEOUT; + dev->mtu = 1500; + dev->netdev_ops = &gfar_netdev_ops; + dev->ethtool_ops = &gfar_ethtool_ops; + + /* Register for napi ...We are registering NAPI for each grp */ + for (i = 0; i < priv->num_grps; i++) + netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT); + + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | + NETIF_F_RXCSUM; + dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | + NETIF_F_RXCSUM | NETIF_F_HIGHDMA; + } + + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { + dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + } + + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { + priv->extended_hash = 1; + priv->hash_width = 9; + + priv->hash_regs[0] = ®s->igaddr0; + priv->hash_regs[1] = ®s->igaddr1; + priv->hash_regs[2] = ®s->igaddr2; + priv->hash_regs[3] = ®s->igaddr3; + priv->hash_regs[4] = ®s->igaddr4; + priv->hash_regs[5] = ®s->igaddr5; + priv->hash_regs[6] = ®s->igaddr6; + priv->hash_regs[7] = ®s->igaddr7; + priv->hash_regs[8] = ®s->gaddr0; + priv->hash_regs[9] = ®s->gaddr1; + priv->hash_regs[10] = ®s->gaddr2; + priv->hash_regs[11] = ®s->gaddr3; + priv->hash_regs[12] = ®s->gaddr4; + priv->hash_regs[13] = ®s->gaddr5; + priv->hash_regs[14] = ®s->gaddr6; + priv->hash_regs[15] = ®s->gaddr7; + + } else { + priv->extended_hash = 0; + priv->hash_width = 8; + + priv->hash_regs[0] = ®s->gaddr0; + priv->hash_regs[1] = ®s->gaddr1; + priv->hash_regs[2] = ®s->gaddr2; + priv->hash_regs[3] = ®s->gaddr3; + priv->hash_regs[4] = ®s->gaddr4; + priv->hash_regs[5] = ®s->gaddr5; + priv->hash_regs[6] = ®s->gaddr6; + priv->hash_regs[7] = ®s->gaddr7; + } + + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) + priv->padding = DEFAULT_PADDING; + else + priv->padding = 0; + + if (dev->features & NETIF_F_IP_CSUM || + priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) + dev->hard_header_len += GMAC_FCB_LEN; + + /* Program the isrg regs only if number of grps > 1 */ + if (priv->num_grps > 1) { + baddr = ®s->isrg0; + for (i = 0; i < priv->num_grps; i++) { + isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX); + isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX); + gfar_write(baddr, isrg); + baddr++; + isrg = 0x0; + } + } + + /* Need to reverse the bit maps as bit_map's MSB is q0 + * but, for_each_set_bit parses from right to left, which + * basically reverses the queue numbers */ + for (i = 0; i< priv->num_grps; i++) { + priv->gfargrp[i].tx_bit_map = reverse_bitmap( + priv->gfargrp[i].tx_bit_map, MAX_TX_QS); + priv->gfargrp[i].rx_bit_map = reverse_bitmap( + priv->gfargrp[i].rx_bit_map, MAX_RX_QS); + } + + /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, + * also assign queues to groups */ + for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { + priv->gfargrp[grp_idx].num_rx_queues = 0x0; + for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map, + priv->num_rx_queues) { + priv->gfargrp[grp_idx].num_rx_queues++; + priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx]; + rstat = rstat | (RSTAT_CLEAR_RHALT >> i); + rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i); + } + priv->gfargrp[grp_idx].num_tx_queues = 0x0; + for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map, + priv->num_tx_queues) { + priv->gfargrp[grp_idx].num_tx_queues++; + priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx]; + tstat = tstat | (TSTAT_CLEAR_THALT >> i); + tqueue = tqueue | (TQUEUE_EN0 >> i); + } + priv->gfargrp[grp_idx].rstat = rstat; + priv->gfargrp[grp_idx].tstat = tstat; + rstat = tstat =0; + } + + gfar_write(®s->rqueue, rqueue); + gfar_write(®s->tqueue, tqueue); + + priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; + + /* Initializing some of the rx/tx queue level parameters */ + for (i = 0; i < priv->num_tx_queues; i++) { + priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; + priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; + priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; + priv->tx_queue[i]->txic = DEFAULT_TXIC; + } + + for (i = 0; i < priv->num_rx_queues; i++) { + priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; + priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; + priv->rx_queue[i]->rxic = DEFAULT_RXIC; + } + + /* always enable rx filer*/ + priv->rx_filer_enable = 1; + /* Enable most messages by default */ + priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; + + /* Carrier starts down, phylib will bring it up */ + netif_carrier_off(dev); + + err = register_netdev(dev); + + if (err) { + pr_err("%s: Cannot register net device, aborting\n", dev->name); + goto register_fail; + } + + device_init_wakeup(&dev->dev, + priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); + + /* fill out IRQ number and name fields */ + len_devname = strlen(dev->name); + for (i = 0; i < priv->num_grps; i++) { + strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name, + len_devname); + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { + strncpy(&priv->gfargrp[i].int_name_tx[len_devname], + "_g", sizeof("_g")); + priv->gfargrp[i].int_name_tx[ + strlen(priv->gfargrp[i].int_name_tx)] = i+48; + strncpy(&priv->gfargrp[i].int_name_tx[strlen( + priv->gfargrp[i].int_name_tx)], + "_tx", sizeof("_tx") + 1); + + strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name, + len_devname); + strncpy(&priv->gfargrp[i].int_name_rx[len_devname], + "_g", sizeof("_g")); + priv->gfargrp[i].int_name_rx[ + strlen(priv->gfargrp[i].int_name_rx)] = i+48; + strncpy(&priv->gfargrp[i].int_name_rx[strlen( + priv->gfargrp[i].int_name_rx)], + "_rx", sizeof("_rx") + 1); + + strncpy(&priv->gfargrp[i].int_name_er[0], dev->name, + len_devname); + strncpy(&priv->gfargrp[i].int_name_er[len_devname], + "_g", sizeof("_g")); + priv->gfargrp[i].int_name_er[strlen( + priv->gfargrp[i].int_name_er)] = i+48; + strncpy(&priv->gfargrp[i].int_name_er[strlen(\ + priv->gfargrp[i].int_name_er)], + "_er", sizeof("_er") + 1); + } else + priv->gfargrp[i].int_name_tx[len_devname] = '\0'; + } + + /* Initialize the filer table */ + gfar_init_filer_table(priv); + + /* Create all the sysfs files */ + gfar_init_sysfs(dev); + + /* Print out the device info */ + netdev_info(dev, "mac: %pM\n", dev->dev_addr); + + /* Even more device info helps when determining which kernel */ + /* provided which set of benchmarks. */ + netdev_info(dev, "Running with NAPI enabled\n"); + for (i = 0; i < priv->num_rx_queues; i++) + netdev_info(dev, "RX BD ring size for Q[%d]: %d\n", + i, priv->rx_queue[i]->rx_ring_size); + for(i = 0; i < priv->num_tx_queues; i++) + netdev_info(dev, "TX BD ring size for Q[%d]: %d\n", + i, priv->tx_queue[i]->tx_ring_size); + + return 0; + +register_fail: + unmap_group_regs(priv); + free_tx_pointers(priv); + free_rx_pointers(priv); + if (priv->phy_node) + of_node_put(priv->phy_node); + if (priv->tbi_node) + of_node_put(priv->tbi_node); + free_netdev(dev); + return err; +} + +static int gfar_remove(struct platform_device *ofdev) +{ + struct gfar_private *priv = dev_get_drvdata(&ofdev->dev); + + if (priv->phy_node) + of_node_put(priv->phy_node); + if (priv->tbi_node) + of_node_put(priv->tbi_node); + + dev_set_drvdata(&ofdev->dev, NULL); + + unregister_netdev(priv->ndev); + unmap_group_regs(priv); + free_netdev(priv->ndev); + + return 0; +} + +#ifdef CONFIG_PM + +static int gfar_suspend(struct device *dev) +{ + struct gfar_private *priv = dev_get_drvdata(dev); + struct net_device *ndev = priv->ndev; + struct gfar __iomem *regs = priv->gfargrp[0].regs; + unsigned long flags; + u32 tempval; + + int magic_packet = priv->wol_en && + (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); + + netif_device_detach(ndev); + + if (netif_running(ndev)) { + + local_irq_save(flags); + lock_tx_qs(priv); + lock_rx_qs(priv); + + gfar_halt_nodisable(ndev); + + /* Disable Tx, and Rx if wake-on-LAN is disabled. */ + tempval = gfar_read(®s->maccfg1); + + tempval &= ~MACCFG1_TX_EN; + + if (!magic_packet) + tempval &= ~MACCFG1_RX_EN; + + gfar_write(®s->maccfg1, tempval); + + unlock_rx_qs(priv); + unlock_tx_qs(priv); + local_irq_restore(flags); + + disable_napi(priv); + + if (magic_packet) { + /* Enable interrupt on Magic Packet */ + gfar_write(®s->imask, IMASK_MAG); + + /* Enable Magic Packet mode */ + tempval = gfar_read(®s->maccfg2); + tempval |= MACCFG2_MPEN; + gfar_write(®s->maccfg2, tempval); + } else { + phy_stop(priv->phydev); + } + } + + return 0; +} + +static int gfar_resume(struct device *dev) +{ + struct gfar_private *priv = dev_get_drvdata(dev); + struct net_device *ndev = priv->ndev; + struct gfar __iomem *regs = priv->gfargrp[0].regs; + unsigned long flags; + u32 tempval; + int magic_packet = priv->wol_en && + (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); + + if (!netif_running(ndev)) { + netif_device_attach(ndev); + return 0; + } + + if (!magic_packet && priv->phydev) + phy_start(priv->phydev); + + /* Disable Magic Packet mode, in case something + * else woke us up. + */ + local_irq_save(flags); + lock_tx_qs(priv); + lock_rx_qs(priv); + + tempval = gfar_read(®s->maccfg2); + tempval &= ~MACCFG2_MPEN; + gfar_write(®s->maccfg2, tempval); + + gfar_start(ndev); + + unlock_rx_qs(priv); + unlock_tx_qs(priv); + local_irq_restore(flags); + + netif_device_attach(ndev); + + enable_napi(priv); + + return 0; +} + +static int gfar_restore(struct device *dev) +{ + struct gfar_private *priv = dev_get_drvdata(dev); + struct net_device *ndev = priv->ndev; + + if (!netif_running(ndev)) + return 0; + + gfar_init_bds(ndev); + init_registers(ndev); + gfar_set_mac_address(ndev); + gfar_init_mac(ndev); + gfar_start(ndev); + + priv->oldlink = 0; + priv->oldspeed = 0; + priv->oldduplex = -1; + + if (priv->phydev) + phy_start(priv->phydev); + + netif_device_attach(ndev); + enable_napi(priv); + + return 0; +} + +static struct dev_pm_ops gfar_pm_ops = { + .suspend = gfar_suspend, + .resume = gfar_resume, + .freeze = gfar_suspend, + .thaw = gfar_resume, + .restore = gfar_restore, +}; + +#define GFAR_PM_OPS (&gfar_pm_ops) + +#else + +#define GFAR_PM_OPS NULL + +#endif + +/* Reads the controller's registers to determine what interface + * connects it to the PHY. + */ +static phy_interface_t gfar_get_interface(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 ecntrl; + + ecntrl = gfar_read(®s->ecntrl); + + if (ecntrl & ECNTRL_SGMII_MODE) + return PHY_INTERFACE_MODE_SGMII; + + if (ecntrl & ECNTRL_TBI_MODE) { + if (ecntrl & ECNTRL_REDUCED_MODE) + return PHY_INTERFACE_MODE_RTBI; + else + return PHY_INTERFACE_MODE_TBI; + } + + if (ecntrl & ECNTRL_REDUCED_MODE) { + if (ecntrl & ECNTRL_REDUCED_MII_MODE) + return PHY_INTERFACE_MODE_RMII; + else { + phy_interface_t interface = priv->interface; + + /* + * This isn't autodetected right now, so it must + * be set by the device tree or platform code. + */ + if (interface == PHY_INTERFACE_MODE_RGMII_ID) + return PHY_INTERFACE_MODE_RGMII_ID; + + return PHY_INTERFACE_MODE_RGMII; + } + } + + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) + return PHY_INTERFACE_MODE_GMII; + + return PHY_INTERFACE_MODE_MII; +} + + +/* Initializes driver's PHY state, and attaches to the PHY. + * Returns 0 on success. + */ +static int init_phy(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + uint gigabit_support = + priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? + SUPPORTED_1000baseT_Full : 0; + phy_interface_t interface; + + priv->oldlink = 0; + priv->oldspeed = 0; + priv->oldduplex = -1; + + interface = gfar_get_interface(dev); + + priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, + interface); + if (!priv->phydev) + priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link, + interface); + if (!priv->phydev) { + dev_err(&dev->dev, "could not attach to PHY\n"); + return -ENODEV; + } + + if (interface == PHY_INTERFACE_MODE_SGMII) + gfar_configure_serdes(dev); + + /* Remove any features not supported by the controller */ + priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support); + priv->phydev->advertising = priv->phydev->supported; + + return 0; +} + +/* + * Initialize TBI PHY interface for communicating with the + * SERDES lynx PHY on the chip. We communicate with this PHY + * through the MDIO bus on each controller, treating it as a + * "normal" PHY at the address found in the TBIPA register. We assume + * that the TBIPA register is valid. Either the MDIO bus code will set + * it to a value that doesn't conflict with other PHYs on the bus, or the + * value doesn't matter, as there are no other PHYs on the bus. + */ +static void gfar_configure_serdes(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + struct phy_device *tbiphy; + + if (!priv->tbi_node) { + dev_warn(&dev->dev, "error: SGMII mode requires that the " + "device tree specify a tbi-handle\n"); + return; + } + + tbiphy = of_phy_find_device(priv->tbi_node); + if (!tbiphy) { + dev_err(&dev->dev, "error: Could not get TBI device\n"); + return; + } + + /* + * If the link is already up, we must already be ok, and don't need to + * configure and reset the TBI<->SerDes link. Maybe U-Boot configured + * everything for us? Resetting it takes the link down and requires + * several seconds for it to come back. + */ + if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) + return; + + /* Single clk mode, mii mode off(for serdes communication) */ + phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); + + phy_write(tbiphy, MII_ADVERTISE, + ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | + ADVERTISE_1000XPSE_ASYM); + + phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE | + BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000); +} + +static void init_registers(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + struct gfar __iomem *regs = NULL; + int i = 0; + + for (i = 0; i < priv->num_grps; i++) { + regs = priv->gfargrp[i].regs; + /* Clear IEVENT */ + gfar_write(®s->ievent, IEVENT_INIT_CLEAR); + + /* Initialize IMASK */ + gfar_write(®s->imask, IMASK_INIT_CLEAR); + } + + regs = priv->gfargrp[0].regs; + /* Init hash registers to zero */ + gfar_write(®s->igaddr0, 0); + gfar_write(®s->igaddr1, 0); + gfar_write(®s->igaddr2, 0); + gfar_write(®s->igaddr3, 0); + gfar_write(®s->igaddr4, 0); + gfar_write(®s->igaddr5, 0); + gfar_write(®s->igaddr6, 0); + gfar_write(®s->igaddr7, 0); + + gfar_write(®s->gaddr0, 0); + gfar_write(®s->gaddr1, 0); + gfar_write(®s->gaddr2, 0); + gfar_write(®s->gaddr3, 0); + gfar_write(®s->gaddr4, 0); + gfar_write(®s->gaddr5, 0); + gfar_write(®s->gaddr6, 0); + gfar_write(®s->gaddr7, 0); + + /* Zero out the rmon mib registers if it has them */ + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { + memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib)); + + /* Mask off the CAM interrupts */ + gfar_write(®s->rmon.cam1, 0xffffffff); + gfar_write(®s->rmon.cam2, 0xffffffff); + } + + /* Initialize the max receive buffer length */ + gfar_write(®s->mrblr, priv->rx_buffer_size); + + /* Initialize the Minimum Frame Length Register */ + gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); +} + +static int __gfar_is_rx_idle(struct gfar_private *priv) +{ + u32 res; + + /* + * Normaly TSEC should not hang on GRS commands, so we should + * actually wait for IEVENT_GRSC flag. + */ + if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002))) + return 0; + + /* + * Read the eTSEC register at offset 0xD1C. If bits 7-14 are + * the same as bits 23-30, the eTSEC Rx is assumed to be idle + * and the Rx can be safely reset. + */ + res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c); + res &= 0x7f807f80; + if ((res & 0xffff) == (res >> 16)) + return 1; + + return 0; +} + +/* Halt the receive and transmit queues */ +static void gfar_halt_nodisable(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + struct gfar __iomem *regs = NULL; + u32 tempval; + int i = 0; + + for (i = 0; i < priv->num_grps; i++) { + regs = priv->gfargrp[i].regs; + /* Mask all interrupts */ + gfar_write(®s->imask, IMASK_INIT_CLEAR); + + /* Clear all interrupts */ + gfar_write(®s->ievent, IEVENT_INIT_CLEAR); + } + + regs = priv->gfargrp[0].regs; + /* Stop the DMA, and wait for it to stop */ + tempval = gfar_read(®s->dmactrl); + if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) + != (DMACTRL_GRS | DMACTRL_GTS)) { + int ret; + + tempval |= (DMACTRL_GRS | DMACTRL_GTS); + gfar_write(®s->dmactrl, tempval); + + do { + ret = spin_event_timeout(((gfar_read(®s->ievent) & + (IEVENT_GRSC | IEVENT_GTSC)) == + (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0); + if (!ret && !(gfar_read(®s->ievent) & IEVENT_GRSC)) + ret = __gfar_is_rx_idle(priv); + } while (!ret); + } +} + +/* Halt the receive and transmit queues */ +void gfar_halt(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 tempval; + + gfar_halt_nodisable(dev); + + /* Disable Rx and Tx */ + tempval = gfar_read(®s->maccfg1); + tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); + gfar_write(®s->maccfg1, tempval); +} + +static void free_grp_irqs(struct gfar_priv_grp *grp) +{ + free_irq(grp->interruptError, grp); + free_irq(grp->interruptTransmit, grp); + free_irq(grp->interruptReceive, grp); +} + +void stop_gfar(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + unsigned long flags; + int i; + + phy_stop(priv->phydev); + + + /* Lock it down */ + local_irq_save(flags); + lock_tx_qs(priv); + lock_rx_qs(priv); + + gfar_halt(dev); + + unlock_rx_qs(priv); + unlock_tx_qs(priv); + local_irq_restore(flags); + + /* Free the IRQs */ + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { + for (i = 0; i < priv->num_grps; i++) + free_grp_irqs(&priv->gfargrp[i]); + } else { + for (i = 0; i < priv->num_grps; i++) + free_irq(priv->gfargrp[i].interruptTransmit, + &priv->gfargrp[i]); + } + + free_skb_resources(priv); +} + +static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) +{ + struct txbd8 *txbdp; + struct gfar_private *priv = netdev_priv(tx_queue->dev); + int i, j; + + txbdp = tx_queue->tx_bd_base; + + for (i = 0; i < tx_queue->tx_ring_size; i++) { + if (!tx_queue->tx_skbuff[i]) + continue; + + dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr, + txbdp->length, DMA_TO_DEVICE); + txbdp->lstatus = 0; + for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; + j++) { + txbdp++; + dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr, + txbdp->length, DMA_TO_DEVICE); + } + txbdp++; + dev_kfree_skb_any(tx_queue->tx_skbuff[i]); + tx_queue->tx_skbuff[i] = NULL; + } + kfree(tx_queue->tx_skbuff); +} + +static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) +{ + struct rxbd8 *rxbdp; + struct gfar_private *priv = netdev_priv(rx_queue->dev); + int i; + + rxbdp = rx_queue->rx_bd_base; + + for (i = 0; i < rx_queue->rx_ring_size; i++) { + if (rx_queue->rx_skbuff[i]) { + dma_unmap_single(&priv->ofdev->dev, + rxbdp->bufPtr, priv->rx_buffer_size, + DMA_FROM_DEVICE); + dev_kfree_skb_any(rx_queue->rx_skbuff[i]); + rx_queue->rx_skbuff[i] = NULL; + } + rxbdp->lstatus = 0; + rxbdp->bufPtr = 0; + rxbdp++; + } + kfree(rx_queue->rx_skbuff); +} + +/* If there are any tx skbs or rx skbs still around, free them. + * Then free tx_skbuff and rx_skbuff */ +static void free_skb_resources(struct gfar_private *priv) +{ + struct gfar_priv_tx_q *tx_queue = NULL; + struct gfar_priv_rx_q *rx_queue = NULL; + int i; + + /* Go through all the buffer descriptors and free their data buffers */ + for (i = 0; i < priv->num_tx_queues; i++) { + tx_queue = priv->tx_queue[i]; + if(tx_queue->tx_skbuff) + free_skb_tx_queue(tx_queue); + } + + for (i = 0; i < priv->num_rx_queues; i++) { + rx_queue = priv->rx_queue[i]; + if(rx_queue->rx_skbuff) + free_skb_rx_queue(rx_queue); + } + + dma_free_coherent(&priv->ofdev->dev, + sizeof(struct txbd8) * priv->total_tx_ring_size + + sizeof(struct rxbd8) * priv->total_rx_ring_size, + priv->tx_queue[0]->tx_bd_base, + priv->tx_queue[0]->tx_bd_dma_base); + skb_queue_purge(&priv->rx_recycle); +} + +void gfar_start(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 tempval; + int i = 0; + + /* Enable Rx and Tx in MACCFG1 */ + tempval = gfar_read(®s->maccfg1); + tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); + gfar_write(®s->maccfg1, tempval); + + /* Initialize DMACTRL to have WWR and WOP */ + tempval = gfar_read(®s->dmactrl); + tempval |= DMACTRL_INIT_SETTINGS; + gfar_write(®s->dmactrl, tempval); + + /* Make sure we aren't stopped */ + tempval = gfar_read(®s->dmactrl); + tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); + gfar_write(®s->dmactrl, tempval); + + for (i = 0; i < priv->num_grps; i++) { + regs = priv->gfargrp[i].regs; + /* Clear THLT/RHLT, so that the DMA starts polling now */ + gfar_write(®s->tstat, priv->gfargrp[i].tstat); + gfar_write(®s->rstat, priv->gfargrp[i].rstat); + /* Unmask the interrupts we look for */ + gfar_write(®s->imask, IMASK_DEFAULT); + } + + dev->trans_start = jiffies; /* prevent tx timeout */ +} + +void gfar_configure_coalescing(struct gfar_private *priv, + unsigned long tx_mask, unsigned long rx_mask) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 __iomem *baddr; + int i = 0; + + /* Backward compatible case ---- even if we enable + * multiple queues, there's only single reg to program + */ + gfar_write(®s->txic, 0); + if(likely(priv->tx_queue[0]->txcoalescing)) + gfar_write(®s->txic, priv->tx_queue[0]->txic); + + gfar_write(®s->rxic, 0); + if(unlikely(priv->rx_queue[0]->rxcoalescing)) + gfar_write(®s->rxic, priv->rx_queue[0]->rxic); + + if (priv->mode == MQ_MG_MODE) { + baddr = ®s->txic0; + for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { + if (likely(priv->tx_queue[i]->txcoalescing)) { + gfar_write(baddr + i, 0); + gfar_write(baddr + i, priv->tx_queue[i]->txic); + } + } + + baddr = ®s->rxic0; + for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { + if (likely(priv->rx_queue[i]->rxcoalescing)) { + gfar_write(baddr + i, 0); + gfar_write(baddr + i, priv->rx_queue[i]->rxic); + } + } + } +} + +static int register_grp_irqs(struct gfar_priv_grp *grp) +{ + struct gfar_private *priv = grp->priv; + struct net_device *dev = priv->ndev; + int err; + + /* If the device has multiple interrupts, register for + * them. Otherwise, only register for the one */ + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { + /* Install our interrupt handlers for Error, + * Transmit, and Receive */ + if ((err = request_irq(grp->interruptError, gfar_error, 0, + grp->int_name_er,grp)) < 0) { + netif_err(priv, intr, dev, "Can't get IRQ %d\n", + grp->interruptError); + + goto err_irq_fail; + } + + if ((err = request_irq(grp->interruptTransmit, gfar_transmit, + 0, grp->int_name_tx, grp)) < 0) { + netif_err(priv, intr, dev, "Can't get IRQ %d\n", + grp->interruptTransmit); + goto tx_irq_fail; + } + + if ((err = request_irq(grp->interruptReceive, gfar_receive, 0, + grp->int_name_rx, grp)) < 0) { + netif_err(priv, intr, dev, "Can't get IRQ %d\n", + grp->interruptReceive); + goto rx_irq_fail; + } + } else { + if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0, + grp->int_name_tx, grp)) < 0) { + netif_err(priv, intr, dev, "Can't get IRQ %d\n", + grp->interruptTransmit); + goto err_irq_fail; + } + } + + return 0; + +rx_irq_fail: + free_irq(grp->interruptTransmit, grp); +tx_irq_fail: + free_irq(grp->interruptError, grp); +err_irq_fail: + return err; + +} + +/* Bring the controller up and running */ +int startup_gfar(struct net_device *ndev) +{ + struct gfar_private *priv = netdev_priv(ndev); + struct gfar __iomem *regs = NULL; + int err, i, j; + + for (i = 0; i < priv->num_grps; i++) { + regs= priv->gfargrp[i].regs; + gfar_write(®s->imask, IMASK_INIT_CLEAR); + } + + regs= priv->gfargrp[0].regs; + err = gfar_alloc_skb_resources(ndev); + if (err) + return err; + + gfar_init_mac(ndev); + + for (i = 0; i < priv->num_grps; i++) { + err = register_grp_irqs(&priv->gfargrp[i]); + if (err) { + for (j = 0; j < i; j++) + free_grp_irqs(&priv->gfargrp[j]); + goto irq_fail; + } + } + + /* Start the controller */ + gfar_start(ndev); + + phy_start(priv->phydev); + + gfar_configure_coalescing(priv, 0xFF, 0xFF); + + return 0; + +irq_fail: + free_skb_resources(priv); + return err; +} + +/* Called when something needs to use the ethernet device */ +/* Returns 0 for success. */ +static int gfar_enet_open(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + int err; + + enable_napi(priv); + + skb_queue_head_init(&priv->rx_recycle); + + /* Initialize a bunch of registers */ + init_registers(dev); + + gfar_set_mac_address(dev); + + err = init_phy(dev); + + if (err) { + disable_napi(priv); + return err; + } + + err = startup_gfar(dev); + if (err) { + disable_napi(priv); + return err; + } + + netif_tx_start_all_queues(dev); + + device_set_wakeup_enable(&dev->dev, priv->wol_en); + + return err; +} + +static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) +{ + struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN); + + memset(fcb, 0, GMAC_FCB_LEN); + + return fcb; +} + +static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb) +{ + u8 flags = 0; + + /* If we're here, it's a IP packet with a TCP or UDP + * payload. We set it to checksum, using a pseudo-header + * we provide + */ + flags = TXFCB_DEFAULT; + + /* Tell the controller what the protocol is */ + /* And provide the already calculated phcs */ + if (ip_hdr(skb)->protocol == IPPROTO_UDP) { + flags |= TXFCB_UDP; + fcb->phcs = udp_hdr(skb)->check; + } else + fcb->phcs = tcp_hdr(skb)->check; + + /* l3os is the distance between the start of the + * frame (skb->data) and the start of the IP hdr. + * l4os is the distance between the start of the + * l3 hdr and the l4 hdr */ + fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN); + fcb->l4os = skb_network_header_len(skb); + + fcb->flags = flags; +} + +void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) +{ + fcb->flags |= TXFCB_VLN; + fcb->vlctl = vlan_tx_tag_get(skb); +} + +static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, + struct txbd8 *base, int ring_size) +{ + struct txbd8 *new_bd = bdp + stride; + + return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd; +} + +static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, + int ring_size) +{ + return skip_txbd(bdp, 1, base, ring_size); +} + +/* This is called by the kernel when a frame is ready for transmission. */ +/* It is pointed to by the dev->hard_start_xmit function pointer */ +static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + struct gfar_priv_tx_q *tx_queue = NULL; + struct netdev_queue *txq; + struct gfar __iomem *regs = NULL; + struct txfcb *fcb = NULL; + struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL; + u32 lstatus; + int i, rq = 0, do_tstamp = 0; + u32 bufaddr; + unsigned long flags; + unsigned int nr_frags, nr_txbds, length; + + /* + * TOE=1 frames larger than 2500 bytes may see excess delays + * before start of transmission. + */ + if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) && + skb->ip_summed == CHECKSUM_PARTIAL && + skb->len > 2500)) { + int ret; + + ret = skb_checksum_help(skb); + if (ret) + return ret; + } + + rq = skb->queue_mapping; + tx_queue = priv->tx_queue[rq]; + txq = netdev_get_tx_queue(dev, rq); + base = tx_queue->tx_bd_base; + regs = tx_queue->grp->regs; + + /* check if time stamp should be generated */ + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + priv->hwts_tx_en)) + do_tstamp = 1; + + /* make space for additional header when fcb is needed */ + if (((skb->ip_summed == CHECKSUM_PARTIAL) || + vlan_tx_tag_present(skb) || + unlikely(do_tstamp)) && + (skb_headroom(skb) < GMAC_FCB_LEN)) { + struct sk_buff *skb_new; + + skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN); + if (!skb_new) { + dev->stats.tx_errors++; + kfree_skb(skb); + return NETDEV_TX_OK; + } + kfree_skb(skb); + skb = skb_new; + } + + /* total number of fragments in the SKB */ + nr_frags = skb_shinfo(skb)->nr_frags; + + /* calculate the required number of TxBDs for this skb */ + if (unlikely(do_tstamp)) + nr_txbds = nr_frags + 2; + else + nr_txbds = nr_frags + 1; + + /* check if there is space to queue this packet */ + if (nr_txbds > tx_queue->num_txbdfree) { + /* no space, stop the queue */ + netif_tx_stop_queue(txq); + dev->stats.tx_fifo_errors++; + return NETDEV_TX_BUSY; + } + + /* Update transmit stats */ + tx_queue->stats.tx_bytes += skb->len; + tx_queue->stats.tx_packets++; + + txbdp = txbdp_start = tx_queue->cur_tx; + lstatus = txbdp->lstatus; + + /* Time stamp insertion requires one additional TxBD */ + if (unlikely(do_tstamp)) + txbdp_tstamp = txbdp = next_txbd(txbdp, base, + tx_queue->tx_ring_size); + + if (nr_frags == 0) { + if (unlikely(do_tstamp)) + txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST | + TXBD_INTERRUPT); + else + lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); + } else { + /* Place the fragment addresses and lengths into the TxBDs */ + for (i = 0; i < nr_frags; i++) { + /* Point at the next BD, wrapping as needed */ + txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); + + length = skb_shinfo(skb)->frags[i].size; + + lstatus = txbdp->lstatus | length | + BD_LFLAG(TXBD_READY); + + /* Handle the last BD specially */ + if (i == nr_frags - 1) + lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); + + bufaddr = dma_map_page(&priv->ofdev->dev, + skb_shinfo(skb)->frags[i].page, + skb_shinfo(skb)->frags[i].page_offset, + length, + DMA_TO_DEVICE); + + /* set the TxBD length and buffer pointer */ + txbdp->bufPtr = bufaddr; + txbdp->lstatus = lstatus; + } + + lstatus = txbdp_start->lstatus; + } + + /* Set up checksumming */ + if (CHECKSUM_PARTIAL == skb->ip_summed) { + fcb = gfar_add_fcb(skb); + /* as specified by errata */ + if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12) + && ((unsigned long)fcb % 0x20) > 0x18)) { + __skb_pull(skb, GMAC_FCB_LEN); + skb_checksum_help(skb); + } else { + lstatus |= BD_LFLAG(TXBD_TOE); + gfar_tx_checksum(skb, fcb); + } + } + + if (vlan_tx_tag_present(skb)) { + if (unlikely(NULL == fcb)) { + fcb = gfar_add_fcb(skb); + lstatus |= BD_LFLAG(TXBD_TOE); + } + + gfar_tx_vlan(skb, fcb); + } + + /* Setup tx hardware time stamping if requested */ + if (unlikely(do_tstamp)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + if (fcb == NULL) + fcb = gfar_add_fcb(skb); + fcb->ptp = 1; + lstatus |= BD_LFLAG(TXBD_TOE); + } + + txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + + /* + * If time stamping is requested one additional TxBD must be set up. The + * first TxBD points to the FCB and must have a data length of + * GMAC_FCB_LEN. The second TxBD points to the actual frame data with + * the full frame length. + */ + if (unlikely(do_tstamp)) { + txbdp_tstamp->bufPtr = txbdp_start->bufPtr + GMAC_FCB_LEN; + txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) | + (skb_headlen(skb) - GMAC_FCB_LEN); + lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; + } else { + lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); + } + + /* + * We can work in parallel with gfar_clean_tx_ring(), except + * when modifying num_txbdfree. Note that we didn't grab the lock + * when we were reading the num_txbdfree and checking for available + * space, that's because outside of this function it can only grow, + * and once we've got needed space, it cannot suddenly disappear. + * + * The lock also protects us from gfar_error(), which can modify + * regs->tstat and thus retrigger the transfers, which is why we + * also must grab the lock before setting ready bit for the first + * to be transmitted BD. + */ + spin_lock_irqsave(&tx_queue->txlock, flags); + + /* + * The powerpc-specific eieio() is used, as wmb() has too strong + * semantics (it requires synchronization between cacheable and + * uncacheable mappings, which eieio doesn't provide and which we + * don't need), thus requiring a more expensive sync instruction. At + * some point, the set of architecture-independent barrier functions + * should be expanded to include weaker barriers. + */ + eieio(); + + txbdp_start->lstatus = lstatus; + + eieio(); /* force lstatus write before tx_skbuff */ + + tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; + + /* Update the current skb pointer to the next entry we will use + * (wrapping if necessary) */ + tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & + TX_RING_MOD_MASK(tx_queue->tx_ring_size); + + tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); + + /* reduce TxBD free count */ + tx_queue->num_txbdfree -= (nr_txbds); + + /* If the next BD still needs to be cleaned up, then the bds + are full. We need to tell the kernel to stop sending us stuff. */ + if (!tx_queue->num_txbdfree) { + netif_tx_stop_queue(txq); + + dev->stats.tx_fifo_errors++; + } + + /* Tell the DMA to go go go */ + gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); + + /* Unlock priv */ + spin_unlock_irqrestore(&tx_queue->txlock, flags); + + return NETDEV_TX_OK; +} + +/* Stops the kernel queue, and halts the controller */ +static int gfar_close(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + + disable_napi(priv); + + cancel_work_sync(&priv->reset_task); + stop_gfar(dev); + + /* Disconnect from the PHY */ + phy_disconnect(priv->phydev); + priv->phydev = NULL; + + netif_tx_stop_all_queues(dev); + + return 0; +} + +/* Changes the mac address if the controller is not running. */ +static int gfar_set_mac_address(struct net_device *dev) +{ + gfar_set_mac_for_addr(dev, 0, dev->dev_addr); + + return 0; +} + +/* Check if rx parser should be activated */ +void gfar_check_rx_parser_mode(struct gfar_private *priv) +{ + struct gfar __iomem *regs; + u32 tempval; + + regs = priv->gfargrp[0].regs; + + tempval = gfar_read(®s->rctrl); + /* If parse is no longer required, then disable parser */ + if (tempval & RCTRL_REQ_PARSER) + tempval |= RCTRL_PRSDEP_INIT; + else + tempval &= ~RCTRL_PRSDEP_INIT; + gfar_write(®s->rctrl, tempval); +} + +/* Enables and disables VLAN insertion/extraction */ +void gfar_vlan_mode(struct net_device *dev, u32 features) +{ + struct gfar_private *priv = netdev_priv(dev); + struct gfar __iomem *regs = NULL; + unsigned long flags; + u32 tempval; + + regs = priv->gfargrp[0].regs; + local_irq_save(flags); + lock_rx_qs(priv); + + if (features & NETIF_F_HW_VLAN_TX) { + /* Enable VLAN tag insertion */ + tempval = gfar_read(®s->tctrl); + tempval |= TCTRL_VLINS; + gfar_write(®s->tctrl, tempval); + } else { + /* Disable VLAN tag insertion */ + tempval = gfar_read(®s->tctrl); + tempval &= ~TCTRL_VLINS; + gfar_write(®s->tctrl, tempval); + } + + if (features & NETIF_F_HW_VLAN_RX) { + /* Enable VLAN tag extraction */ + tempval = gfar_read(®s->rctrl); + tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); + gfar_write(®s->rctrl, tempval); + } else { + /* Disable VLAN tag extraction */ + tempval = gfar_read(®s->rctrl); + tempval &= ~RCTRL_VLEX; + gfar_write(®s->rctrl, tempval); + + gfar_check_rx_parser_mode(priv); + } + + gfar_change_mtu(dev, dev->mtu); + + unlock_rx_qs(priv); + local_irq_restore(flags); +} + +static int gfar_change_mtu(struct net_device *dev, int new_mtu) +{ + int tempsize, tempval; + struct gfar_private *priv = netdev_priv(dev); + struct gfar __iomem *regs = priv->gfargrp[0].regs; + int oldsize = priv->rx_buffer_size; + int frame_size = new_mtu + ETH_HLEN; + + if (gfar_is_vlan_on(priv)) + frame_size += VLAN_HLEN; + + if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { + netif_err(priv, drv, dev, "Invalid MTU setting\n"); + return -EINVAL; + } + + if (gfar_uses_fcb(priv)) + frame_size += GMAC_FCB_LEN; + + frame_size += priv->padding; + + tempsize = + (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + + INCREMENTAL_BUFFER_SIZE; + + /* Only stop and start the controller if it isn't already + * stopped, and we changed something */ + if ((oldsize != tempsize) && (dev->flags & IFF_UP)) + stop_gfar(dev); + + priv->rx_buffer_size = tempsize; + + dev->mtu = new_mtu; + + gfar_write(®s->mrblr, priv->rx_buffer_size); + gfar_write(®s->maxfrm, priv->rx_buffer_size); + + /* If the mtu is larger than the max size for standard + * ethernet frames (ie, a jumbo frame), then set maccfg2 + * to allow huge frames, and to check the length */ + tempval = gfar_read(®s->maccfg2); + + if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || + gfar_has_errata(priv, GFAR_ERRATA_74)) + tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); + else + tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); + + gfar_write(®s->maccfg2, tempval); + + if ((oldsize != tempsize) && (dev->flags & IFF_UP)) + startup_gfar(dev); + + return 0; +} + +/* gfar_reset_task gets scheduled when a packet has not been + * transmitted after a set amount of time. + * For now, assume that clearing out all the structures, and + * starting over will fix the problem. + */ +static void gfar_reset_task(struct work_struct *work) +{ + struct gfar_private *priv = container_of(work, struct gfar_private, + reset_task); + struct net_device *dev = priv->ndev; + + if (dev->flags & IFF_UP) { + netif_tx_stop_all_queues(dev); + stop_gfar(dev); + startup_gfar(dev); + netif_tx_start_all_queues(dev); + } + + netif_tx_schedule_all(dev); +} + +static void gfar_timeout(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + + dev->stats.tx_errors++; + schedule_work(&priv->reset_task); +} + +static void gfar_align_skb(struct sk_buff *skb) +{ + /* We need the data buffer to be aligned properly. We will reserve + * as many bytes as needed to align the data properly + */ + skb_reserve(skb, RXBUF_ALIGNMENT - + (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1))); +} + +/* Interrupt Handler for Transmit complete */ +static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) +{ + struct net_device *dev = tx_queue->dev; + struct gfar_private *priv = netdev_priv(dev); + struct gfar_priv_rx_q *rx_queue = NULL; + struct txbd8 *bdp, *next = NULL; + struct txbd8 *lbdp = NULL; + struct txbd8 *base = tx_queue->tx_bd_base; + struct sk_buff *skb; + int skb_dirtytx; + int tx_ring_size = tx_queue->tx_ring_size; + int frags = 0, nr_txbds = 0; + int i; + int howmany = 0; + u32 lstatus; + size_t buflen; + + rx_queue = priv->rx_queue[tx_queue->qindex]; + bdp = tx_queue->dirty_tx; + skb_dirtytx = tx_queue->skb_dirtytx; + + while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { + unsigned long flags; + + frags = skb_shinfo(skb)->nr_frags; + + /* + * When time stamping, one additional TxBD must be freed. + * Also, we need to dma_unmap_single() the TxPAL. + */ + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) + nr_txbds = frags + 2; + else + nr_txbds = frags + 1; + + lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size); + + lstatus = lbdp->lstatus; + + /* Only clean completed frames */ + if ((lstatus & BD_LFLAG(TXBD_READY)) && + (lstatus & BD_LENGTH_MASK)) + break; + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { + next = next_txbd(bdp, base, tx_ring_size); + buflen = next->length + GMAC_FCB_LEN; + } else + buflen = bdp->length; + + dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, + buflen, DMA_TO_DEVICE); + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { + struct skb_shared_hwtstamps shhwtstamps; + u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7); + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = ns_to_ktime(*ns); + skb_tstamp_tx(skb, &shhwtstamps); + bdp->lstatus &= BD_LFLAG(TXBD_WRAP); + bdp = next; + } + + bdp->lstatus &= BD_LFLAG(TXBD_WRAP); + bdp = next_txbd(bdp, base, tx_ring_size); + + for (i = 0; i < frags; i++) { + dma_unmap_page(&priv->ofdev->dev, + bdp->bufPtr, + bdp->length, + DMA_TO_DEVICE); + bdp->lstatus &= BD_LFLAG(TXBD_WRAP); + bdp = next_txbd(bdp, base, tx_ring_size); + } + + /* + * If there's room in the queue (limit it to rx_buffer_size) + * we add this skb back into the pool, if it's the right size + */ + if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size && + skb_recycle_check(skb, priv->rx_buffer_size + + RXBUF_ALIGNMENT)) { + gfar_align_skb(skb); + skb_queue_head(&priv->rx_recycle, skb); + } else + dev_kfree_skb_any(skb); + + tx_queue->tx_skbuff[skb_dirtytx] = NULL; + + skb_dirtytx = (skb_dirtytx + 1) & + TX_RING_MOD_MASK(tx_ring_size); + + howmany++; + spin_lock_irqsave(&tx_queue->txlock, flags); + tx_queue->num_txbdfree += nr_txbds; + spin_unlock_irqrestore(&tx_queue->txlock, flags); + } + + /* If we freed a buffer, we can restart transmission, if necessary */ + if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree) + netif_wake_subqueue(dev, tx_queue->qindex); + + /* Update dirty indicators */ + tx_queue->skb_dirtytx = skb_dirtytx; + tx_queue->dirty_tx = bdp; + + return howmany; +} + +static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp) +{ + unsigned long flags; + + spin_lock_irqsave(&gfargrp->grplock, flags); + if (napi_schedule_prep(&gfargrp->napi)) { + gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED); + __napi_schedule(&gfargrp->napi); + } else { + /* + * Clear IEVENT, so interrupts aren't called again + * because of the packets that have already arrived. + */ + gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK); + } + spin_unlock_irqrestore(&gfargrp->grplock, flags); + +} + +/* Interrupt Handler for Transmit complete */ +static irqreturn_t gfar_transmit(int irq, void *grp_id) +{ + gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); + return IRQ_HANDLED; +} + +static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, + struct sk_buff *skb) +{ + struct net_device *dev = rx_queue->dev; + struct gfar_private *priv = netdev_priv(dev); + dma_addr_t buf; + + buf = dma_map_single(&priv->ofdev->dev, skb->data, + priv->rx_buffer_size, DMA_FROM_DEVICE); + gfar_init_rxbdp(rx_queue, bdp, buf); +} + +static struct sk_buff * gfar_alloc_skb(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + struct sk_buff *skb = NULL; + + skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); + if (!skb) + return NULL; + + gfar_align_skb(skb); + + return skb; +} + +struct sk_buff * gfar_new_skb(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + struct sk_buff *skb = NULL; + + skb = skb_dequeue(&priv->rx_recycle); + if (!skb) + skb = gfar_alloc_skb(dev); + + return skb; +} + +static inline void count_errors(unsigned short status, struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + struct gfar_extra_stats *estats = &priv->extra_stats; + + /* If the packet was truncated, none of the other errors + * matter */ + if (status & RXBD_TRUNCATED) { + stats->rx_length_errors++; + + estats->rx_trunc++; + + return; + } + /* Count the errors, if there were any */ + if (status & (RXBD_LARGE | RXBD_SHORT)) { + stats->rx_length_errors++; + + if (status & RXBD_LARGE) + estats->rx_large++; + else + estats->rx_short++; + } + if (status & RXBD_NONOCTET) { + stats->rx_frame_errors++; + estats->rx_nonoctet++; + } + if (status & RXBD_CRCERR) { + estats->rx_crcerr++; + stats->rx_crc_errors++; + } + if (status & RXBD_OVERRUN) { + estats->rx_overrun++; + stats->rx_crc_errors++; + } +} + +irqreturn_t gfar_receive(int irq, void *grp_id) +{ + gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); + return IRQ_HANDLED; +} + +static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) +{ + /* If valid headers were found, and valid sums + * were verified, then we tell the kernel that no + * checksumming is necessary. Otherwise, it is */ + if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); +} + + +/* gfar_process_frame() -- handle one incoming packet if skb + * isn't NULL. */ +static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, + int amount_pull) +{ + struct gfar_private *priv = netdev_priv(dev); + struct rxfcb *fcb = NULL; + + int ret; + + /* fcb is at the beginning if exists */ + fcb = (struct rxfcb *)skb->data; + + /* Remove the FCB from the skb */ + /* Remove the padded bytes, if there are any */ + if (amount_pull) { + skb_record_rx_queue(skb, fcb->rq); + skb_pull(skb, amount_pull); + } + + /* Get receive timestamp from the skb */ + if (priv->hwts_rx_en) { + struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); + u64 *ns = (u64 *) skb->data; + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + shhwtstamps->hwtstamp = ns_to_ktime(*ns); + } + + if (priv->padding) + skb_pull(skb, priv->padding); + + if (dev->features & NETIF_F_RXCSUM) + gfar_rx_checksum(skb, fcb); + + /* Tell the skb what kind of packet this is */ + skb->protocol = eth_type_trans(skb, dev); + - /* Set vlan tag */ - if (fcb->flags & RXFCB_VLN) ++ /* ++ * There's need to check for NETIF_F_HW_VLAN_RX here. ++ * Even if vlan rx accel is disabled, on some chips ++ * RXFCB_VLN is pseudo randomly set. ++ */ ++ if (dev->features & NETIF_F_HW_VLAN_RX && ++ fcb->flags & RXFCB_VLN) + __vlan_hwaccel_put_tag(skb, fcb->vlctl); + + /* Send the packet up the stack */ + ret = netif_receive_skb(skb); + + if (NET_RX_DROP == ret) + priv->extra_stats.kernel_dropped++; + + return 0; +} + +/* gfar_clean_rx_ring() -- Processes each frame in the rx ring + * until the budget/quota has been reached. Returns the number + * of frames handled + */ +int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) +{ + struct net_device *dev = rx_queue->dev; + struct rxbd8 *bdp, *base; + struct sk_buff *skb; + int pkt_len; + int amount_pull; + int howmany = 0; + struct gfar_private *priv = netdev_priv(dev); + + /* Get the first full descriptor */ + bdp = rx_queue->cur_rx; + base = rx_queue->rx_bd_base; + + amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0); + + while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { + struct sk_buff *newskb; + rmb(); + + /* Add another skb for the future */ + newskb = gfar_new_skb(dev); + + skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; + + dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, + priv->rx_buffer_size, DMA_FROM_DEVICE); + + if (unlikely(!(bdp->status & RXBD_ERR) && + bdp->length > priv->rx_buffer_size)) + bdp->status = RXBD_LARGE; + + /* We drop the frame if we failed to allocate a new buffer */ + if (unlikely(!newskb || !(bdp->status & RXBD_LAST) || + bdp->status & RXBD_ERR)) { + count_errors(bdp->status, dev); + + if (unlikely(!newskb)) + newskb = skb; + else if (skb) + skb_queue_head(&priv->rx_recycle, skb); + } else { + /* Increment the number of packets */ + rx_queue->stats.rx_packets++; + howmany++; + + if (likely(skb)) { + pkt_len = bdp->length - ETH_FCS_LEN; + /* Remove the FCS from the packet length */ + skb_put(skb, pkt_len); + rx_queue->stats.rx_bytes += pkt_len; + skb_record_rx_queue(skb, rx_queue->qindex); + gfar_process_frame(dev, skb, amount_pull); + + } else { + netif_warn(priv, rx_err, dev, "Missing skb!\n"); + rx_queue->stats.rx_dropped++; + priv->extra_stats.rx_skbmissing++; + } + + } + + rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb; + + /* Setup the new bdp */ + gfar_new_rxbdp(rx_queue, bdp, newskb); + + /* Update to the next pointer */ + bdp = next_bd(bdp, base, rx_queue->rx_ring_size); + + /* update to point at the next skb */ + rx_queue->skb_currx = + (rx_queue->skb_currx + 1) & + RX_RING_MOD_MASK(rx_queue->rx_ring_size); + } + + /* Update the current rxbd pointer to be the next one */ + rx_queue->cur_rx = bdp; + + return howmany; +} + +static int gfar_poll(struct napi_struct *napi, int budget) +{ + struct gfar_priv_grp *gfargrp = container_of(napi, + struct gfar_priv_grp, napi); + struct gfar_private *priv = gfargrp->priv; + struct gfar __iomem *regs = gfargrp->regs; + struct gfar_priv_tx_q *tx_queue = NULL; + struct gfar_priv_rx_q *rx_queue = NULL; + int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0; + int tx_cleaned = 0, i, left_over_budget = budget; + unsigned long serviced_queues = 0; + int num_queues = 0; + + num_queues = gfargrp->num_rx_queues; + budget_per_queue = budget/num_queues; + + /* Clear IEVENT, so interrupts aren't called again + * because of the packets that have already arrived */ + gfar_write(®s->ievent, IEVENT_RTX_MASK); + + while (num_queues && left_over_budget) { + + budget_per_queue = left_over_budget/num_queues; + left_over_budget = 0; + + for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { + if (test_bit(i, &serviced_queues)) + continue; + rx_queue = priv->rx_queue[i]; + tx_queue = priv->tx_queue[rx_queue->qindex]; + + tx_cleaned += gfar_clean_tx_ring(tx_queue); + rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue, + budget_per_queue); + rx_cleaned += rx_cleaned_per_queue; + if(rx_cleaned_per_queue < budget_per_queue) { + left_over_budget = left_over_budget + + (budget_per_queue - rx_cleaned_per_queue); + set_bit(i, &serviced_queues); + num_queues--; + } + } + } + + if (tx_cleaned) + return budget; + + if (rx_cleaned < budget) { + napi_complete(napi); + + /* Clear the halt bit in RSTAT */ + gfar_write(®s->rstat, gfargrp->rstat); + + gfar_write(®s->imask, IMASK_DEFAULT); + + /* If we are coalescing interrupts, update the timer */ + /* Otherwise, clear it */ + gfar_configure_coalescing(priv, + gfargrp->rx_bit_map, gfargrp->tx_bit_map); + } + + return rx_cleaned; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void gfar_netpoll(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + int i = 0; + + /* If the device has multiple interrupts, run tx/rx */ + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { + for (i = 0; i < priv->num_grps; i++) { + disable_irq(priv->gfargrp[i].interruptTransmit); + disable_irq(priv->gfargrp[i].interruptReceive); + disable_irq(priv->gfargrp[i].interruptError); + gfar_interrupt(priv->gfargrp[i].interruptTransmit, + &priv->gfargrp[i]); + enable_irq(priv->gfargrp[i].interruptError); + enable_irq(priv->gfargrp[i].interruptReceive); + enable_irq(priv->gfargrp[i].interruptTransmit); + } + } else { + for (i = 0; i < priv->num_grps; i++) { + disable_irq(priv->gfargrp[i].interruptTransmit); + gfar_interrupt(priv->gfargrp[i].interruptTransmit, + &priv->gfargrp[i]); + enable_irq(priv->gfargrp[i].interruptTransmit); + } + } +} +#endif + +/* The interrupt handler for devices with one interrupt */ +static irqreturn_t gfar_interrupt(int irq, void *grp_id) +{ + struct gfar_priv_grp *gfargrp = grp_id; + + /* Save ievent for future reference */ + u32 events = gfar_read(&gfargrp->regs->ievent); + + /* Check for reception */ + if (events & IEVENT_RX_MASK) + gfar_receive(irq, grp_id); + + /* Check for transmit completion */ + if (events & IEVENT_TX_MASK) + gfar_transmit(irq, grp_id); + + /* Check for errors */ + if (events & IEVENT_ERR_MASK) + gfar_error(irq, grp_id); + + return IRQ_HANDLED; +} + +/* Called every time the controller might need to be made + * aware of new link state. The PHY code conveys this + * information through variables in the phydev structure, and this + * function converts those variables into the appropriate + * register values, and can bring down the device if needed. + */ +static void adjust_link(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + struct gfar __iomem *regs = priv->gfargrp[0].regs; + unsigned long flags; + struct phy_device *phydev = priv->phydev; + int new_state = 0; + + local_irq_save(flags); + lock_tx_qs(priv); + + if (phydev->link) { + u32 tempval = gfar_read(®s->maccfg2); + u32 ecntrl = gfar_read(®s->ecntrl); + + /* Now we make sure that we can be in full duplex mode. + * If not, we operate in half-duplex mode. */ + if (phydev->duplex != priv->oldduplex) { + new_state = 1; + if (!(phydev->duplex)) + tempval &= ~(MACCFG2_FULL_DUPLEX); + else + tempval |= MACCFG2_FULL_DUPLEX; + + priv->oldduplex = phydev->duplex; + } + + if (phydev->speed != priv->oldspeed) { + new_state = 1; + switch (phydev->speed) { + case 1000: + tempval = + ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); + + ecntrl &= ~(ECNTRL_R100); + break; + case 100: + case 10: + tempval = + ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); + + /* Reduced mode distinguishes + * between 10 and 100 */ + if (phydev->speed == SPEED_100) + ecntrl |= ECNTRL_R100; + else + ecntrl &= ~(ECNTRL_R100); + break; + default: + netif_warn(priv, link, dev, + "Ack! Speed (%d) is not 10/100/1000!\n", + phydev->speed); + break; + } + + priv->oldspeed = phydev->speed; + } + + gfar_write(®s->maccfg2, tempval); + gfar_write(®s->ecntrl, ecntrl); + + if (!priv->oldlink) { + new_state = 1; + priv->oldlink = 1; + } + } else if (priv->oldlink) { + new_state = 1; + priv->oldlink = 0; + priv->oldspeed = 0; + priv->oldduplex = -1; + } + + if (new_state && netif_msg_link(priv)) + phy_print_status(phydev); + unlock_tx_qs(priv); + local_irq_restore(flags); +} + +/* Update the hash table based on the current list of multicast + * addresses we subscribe to. Also, change the promiscuity of + * the device based on the flags (this function is called + * whenever dev->flags is changed */ +static void gfar_set_multi(struct net_device *dev) +{ + struct netdev_hw_addr *ha; + struct gfar_private *priv = netdev_priv(dev); + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 tempval; + + if (dev->flags & IFF_PROMISC) { + /* Set RCTRL to PROM */ + tempval = gfar_read(®s->rctrl); + tempval |= RCTRL_PROM; + gfar_write(®s->rctrl, tempval); + } else { + /* Set RCTRL to not PROM */ + tempval = gfar_read(®s->rctrl); + tempval &= ~(RCTRL_PROM); + gfar_write(®s->rctrl, tempval); + } + + if (dev->flags & IFF_ALLMULTI) { + /* Set the hash to rx all multicast frames */ + gfar_write(®s->igaddr0, 0xffffffff); + gfar_write(®s->igaddr1, 0xffffffff); + gfar_write(®s->igaddr2, 0xffffffff); + gfar_write(®s->igaddr3, 0xffffffff); + gfar_write(®s->igaddr4, 0xffffffff); + gfar_write(®s->igaddr5, 0xffffffff); + gfar_write(®s->igaddr6, 0xffffffff); + gfar_write(®s->igaddr7, 0xffffffff); + gfar_write(®s->gaddr0, 0xffffffff); + gfar_write(®s->gaddr1, 0xffffffff); + gfar_write(®s->gaddr2, 0xffffffff); + gfar_write(®s->gaddr3, 0xffffffff); + gfar_write(®s->gaddr4, 0xffffffff); + gfar_write(®s->gaddr5, 0xffffffff); + gfar_write(®s->gaddr6, 0xffffffff); + gfar_write(®s->gaddr7, 0xffffffff); + } else { + int em_num; + int idx; + + /* zero out the hash */ + gfar_write(®s->igaddr0, 0x0); + gfar_write(®s->igaddr1, 0x0); + gfar_write(®s->igaddr2, 0x0); + gfar_write(®s->igaddr3, 0x0); + gfar_write(®s->igaddr4, 0x0); + gfar_write(®s->igaddr5, 0x0); + gfar_write(®s->igaddr6, 0x0); + gfar_write(®s->igaddr7, 0x0); + gfar_write(®s->gaddr0, 0x0); + gfar_write(®s->gaddr1, 0x0); + gfar_write(®s->gaddr2, 0x0); + gfar_write(®s->gaddr3, 0x0); + gfar_write(®s->gaddr4, 0x0); + gfar_write(®s->gaddr5, 0x0); + gfar_write(®s->gaddr6, 0x0); + gfar_write(®s->gaddr7, 0x0); + + /* If we have extended hash tables, we need to + * clear the exact match registers to prepare for + * setting them */ + if (priv->extended_hash) { + em_num = GFAR_EM_NUM + 1; + gfar_clear_exact_match(dev); + idx = 1; + } else { + idx = 0; + em_num = 0; + } + + if (netdev_mc_empty(dev)) + return; + + /* Parse the list, and set the appropriate bits */ + netdev_for_each_mc_addr(ha, dev) { + if (idx < em_num) { + gfar_set_mac_for_addr(dev, idx, ha->addr); + idx++; + } else + gfar_set_hash_for_addr(dev, ha->addr); + } + } +} + + +/* Clears each of the exact match registers to zero, so they + * don't interfere with normal reception */ +static void gfar_clear_exact_match(struct net_device *dev) +{ + int idx; + static const u8 zero_arr[MAC_ADDR_LEN] = {0, 0, 0, 0, 0, 0}; + + for(idx = 1;idx < GFAR_EM_NUM + 1;idx++) + gfar_set_mac_for_addr(dev, idx, zero_arr); +} + +/* Set the appropriate hash bit for the given addr */ +/* The algorithm works like so: + * 1) Take the Destination Address (ie the multicast address), and + * do a CRC on it (little endian), and reverse the bits of the + * result. + * 2) Use the 8 most significant bits as a hash into a 256-entry + * table. The table is controlled through 8 32-bit registers: + * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is + * gaddr7. This means that the 3 most significant bits in the + * hash index which gaddr register to use, and the 5 other bits + * indicate which bit (assuming an IBM numbering scheme, which + * for PowerPC (tm) is usually the case) in the register holds + * the entry. */ +static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) +{ + u32 tempval; + struct gfar_private *priv = netdev_priv(dev); + u32 result = ether_crc(MAC_ADDR_LEN, addr); + int width = priv->hash_width; + u8 whichbit = (result >> (32 - width)) & 0x1f; + u8 whichreg = result >> (32 - width + 5); + u32 value = (1 << (31-whichbit)); + + tempval = gfar_read(priv->hash_regs[whichreg]); + tempval |= value; + gfar_write(priv->hash_regs[whichreg], tempval); +} + + +/* There are multiple MAC Address register pairs on some controllers + * This function sets the numth pair to a given address + */ +static void gfar_set_mac_for_addr(struct net_device *dev, int num, + const u8 *addr) +{ + struct gfar_private *priv = netdev_priv(dev); + struct gfar __iomem *regs = priv->gfargrp[0].regs; + int idx; + char tmpbuf[MAC_ADDR_LEN]; + u32 tempval; + u32 __iomem *macptr = ®s->macstnaddr1; + + macptr += num*2; + + /* Now copy it into the mac registers backwards, cuz */ + /* little endian is silly */ + for (idx = 0; idx < MAC_ADDR_LEN; idx++) + tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx]; + + gfar_write(macptr, *((u32 *) (tmpbuf))); + + tempval = *((u32 *) (tmpbuf + 4)); + + gfar_write(macptr+1, tempval); +} + +/* GFAR error interrupt handler */ +static irqreturn_t gfar_error(int irq, void *grp_id) +{ + struct gfar_priv_grp *gfargrp = grp_id; + struct gfar __iomem *regs = gfargrp->regs; + struct gfar_private *priv= gfargrp->priv; + struct net_device *dev = priv->ndev; + + /* Save ievent for future reference */ + u32 events = gfar_read(®s->ievent); + + /* Clear IEVENT */ + gfar_write(®s->ievent, events & IEVENT_ERR_MASK); + + /* Magic Packet is not an error. */ + if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && + (events & IEVENT_MAG)) + events &= ~IEVENT_MAG; + + /* Hmm... */ + if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) + netdev_dbg(dev, "error interrupt (ievent=0x%08x imask=0x%08x)\n", + events, gfar_read(®s->imask)); + + /* Update the error counters */ + if (events & IEVENT_TXE) { + dev->stats.tx_errors++; + + if (events & IEVENT_LC) + dev->stats.tx_window_errors++; + if (events & IEVENT_CRL) + dev->stats.tx_aborted_errors++; + if (events & IEVENT_XFUN) { + unsigned long flags; + + netif_dbg(priv, tx_err, dev, + "TX FIFO underrun, packet dropped\n"); + dev->stats.tx_dropped++; + priv->extra_stats.tx_underrun++; + + local_irq_save(flags); + lock_tx_qs(priv); + + /* Reactivate the Tx Queues */ + gfar_write(®s->tstat, gfargrp->tstat); + + unlock_tx_qs(priv); + local_irq_restore(flags); + } + netif_dbg(priv, tx_err, dev, "Transmit Error\n"); + } + if (events & IEVENT_BSY) { + dev->stats.rx_errors++; + priv->extra_stats.rx_bsy++; + + gfar_receive(irq, grp_id); + + netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n", + gfar_read(®s->rstat)); + } + if (events & IEVENT_BABR) { + dev->stats.rx_errors++; + priv->extra_stats.rx_babr++; + + netif_dbg(priv, rx_err, dev, "babbling RX error\n"); + } + if (events & IEVENT_EBERR) { + priv->extra_stats.eberr++; + netif_dbg(priv, rx_err, dev, "bus error\n"); + } + if (events & IEVENT_RXC) + netif_dbg(priv, rx_status, dev, "control frame\n"); + + if (events & IEVENT_BABT) { + priv->extra_stats.tx_babt++; + netif_dbg(priv, tx_err, dev, "babbling TX error\n"); + } + return IRQ_HANDLED; +} + +static struct of_device_id gfar_match[] = +{ + { + .type = "network", + .compatible = "gianfar", + }, + { + .compatible = "fsl,etsec2", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, gfar_match); + +/* Structure for a device driver */ +static struct platform_driver gfar_driver = { + .driver = { + .name = "fsl-gianfar", + .owner = THIS_MODULE, + .pm = GFAR_PM_OPS, + .of_match_table = gfar_match, + }, + .probe = gfar_probe, + .remove = gfar_remove, +}; + +static int __init gfar_init(void) +{ + return platform_driver_register(&gfar_driver); +} + +static void __exit gfar_exit(void) +{ + platform_driver_unregister(&gfar_driver); +} + +module_init(gfar_init); +module_exit(gfar_exit); + diff --cc drivers/net/ethernet/freescale/gianfar_ethtool.c index 6e350692d118,000000000000..25a8c2adb001 mode 100644,000000..100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@@ -1,1747 -1,0 +1,1763 @@@ +/* + * drivers/net/gianfar_ethtool.c + * + * Gianfar Ethernet Driver + * Ethtool support for Gianfar Enet + * Based on e1000 ethtool support + * + * Author: Andy Fleming + * Maintainer: Kumar Gala + * Modifier: Sandeep Gopalpet + * + * Copyright 2003-2006, 2008-2009, 2011 Freescale Semiconductor, Inc. + * + * This software may be used and distributed according to + * the terms of the GNU Public License, Version 2, incorporated herein + * by reference. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gianfar.h" + +extern void gfar_start(struct net_device *dev); +extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); + +#define GFAR_MAX_COAL_USECS 0xffff +#define GFAR_MAX_COAL_FRAMES 0xff +static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, + u64 * buf); +static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf); +static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals); +static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals); +static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals); +static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals); +static void gfar_gdrvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo); + +static char stat_gstrings[][ETH_GSTRING_LEN] = { + "rx-dropped-by-kernel", + "rx-large-frame-errors", + "rx-short-frame-errors", + "rx-non-octet-errors", + "rx-crc-errors", + "rx-overrun-errors", + "rx-busy-errors", + "rx-babbling-errors", + "rx-truncated-frames", + "ethernet-bus-error", + "tx-babbling-errors", + "tx-underrun-errors", + "rx-skb-missing-errors", + "tx-timeout-errors", + "tx-rx-64-frames", + "tx-rx-65-127-frames", + "tx-rx-128-255-frames", + "tx-rx-256-511-frames", + "tx-rx-512-1023-frames", + "tx-rx-1024-1518-frames", + "tx-rx-1519-1522-good-vlan", + "rx-bytes", + "rx-packets", + "rx-fcs-errors", + "receive-multicast-packet", + "receive-broadcast-packet", + "rx-control-frame-packets", + "rx-pause-frame-packets", + "rx-unknown-op-code", + "rx-alignment-error", + "rx-frame-length-error", + "rx-code-error", + "rx-carrier-sense-error", + "rx-undersize-packets", + "rx-oversize-packets", + "rx-fragmented-frames", + "rx-jabber-frames", + "rx-dropped-frames", + "tx-byte-counter", + "tx-packets", + "tx-multicast-packets", + "tx-broadcast-packets", + "tx-pause-control-frames", + "tx-deferral-packets", + "tx-excessive-deferral-packets", + "tx-single-collision-packets", + "tx-multiple-collision-packets", + "tx-late-collision-packets", + "tx-excessive-collision-packets", + "tx-total-collision", + "reserved", + "tx-dropped-frames", + "tx-jabber-frames", + "tx-fcs-errors", + "tx-control-frames", + "tx-oversize-frames", + "tx-undersize-frames", + "tx-fragmented-frames", +}; + +/* Fill in a buffer with the strings which correspond to the + * stats */ +static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf) +{ + struct gfar_private *priv = netdev_priv(dev); + + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) + memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN); + else + memcpy(buf, stat_gstrings, + GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN); +} + +/* Fill in an array of 64-bit statistics from various sources. + * This array will be appended to the end of the ethtool_stats + * structure, and returned to user space + */ +static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 * buf) +{ + int i; + struct gfar_private *priv = netdev_priv(dev); + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u64 *extra = (u64 *) & priv->extra_stats; + + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { + u32 __iomem *rmon = (u32 __iomem *) ®s->rmon; + struct gfar_stats *stats = (struct gfar_stats *) buf; + + for (i = 0; i < GFAR_RMON_LEN; i++) + stats->rmon[i] = (u64) gfar_read(&rmon[i]); + + for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++) + stats->extra[i] = extra[i]; + } else + for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++) + buf[i] = extra[i]; +} + +static int gfar_sset_count(struct net_device *dev, int sset) +{ + struct gfar_private *priv = netdev_priv(dev); + + switch (sset) { + case ETH_SS_STATS: + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) + return GFAR_STATS_LEN; + else + return GFAR_EXTRA_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +/* Fills in the drvinfo structure with some basic info */ +static void gfar_gdrvinfo(struct net_device *dev, struct + ethtool_drvinfo *drvinfo) +{ + strncpy(drvinfo->driver, DRV_NAME, GFAR_INFOSTR_LEN); + strncpy(drvinfo->version, gfar_driver_version, GFAR_INFOSTR_LEN); + strncpy(drvinfo->fw_version, "N/A", GFAR_INFOSTR_LEN); + strncpy(drvinfo->bus_info, "N/A", GFAR_INFOSTR_LEN); + drvinfo->regdump_len = 0; + drvinfo->eedump_len = 0; +} + + +static int gfar_ssettings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct gfar_private *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phydev; + + if (NULL == phydev) + return -ENODEV; + + return phy_ethtool_sset(phydev, cmd); +} + + +/* Return the current settings in the ethtool_cmd structure */ +static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct gfar_private *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phydev; + struct gfar_priv_rx_q *rx_queue = NULL; + struct gfar_priv_tx_q *tx_queue = NULL; + + if (NULL == phydev) + return -ENODEV; + tx_queue = priv->tx_queue[0]; + rx_queue = priv->rx_queue[0]; + + /* etsec-1.7 and older versions have only one txic + * and rxic regs although they support multiple queues */ + cmd->maxtxpkt = get_icft_value(tx_queue->txic); + cmd->maxrxpkt = get_icft_value(rx_queue->rxic); + + return phy_ethtool_gset(phydev, cmd); +} + +/* Return the length of the register structure */ +static int gfar_reglen(struct net_device *dev) +{ + return sizeof (struct gfar); +} + +/* Return a dump of the GFAR register space */ +static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf) +{ + int i; + struct gfar_private *priv = netdev_priv(dev); + u32 __iomem *theregs = (u32 __iomem *) priv->gfargrp[0].regs; + u32 *buf = (u32 *) regbuf; + + for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++) + buf[i] = gfar_read(&theregs[i]); +} + +/* Convert microseconds to ethernet clock ticks, which changes + * depending on what speed the controller is running at */ +static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int usecs) +{ + unsigned int count; + + /* The timer is different, depending on the interface speed */ + switch (priv->phydev->speed) { + case SPEED_1000: + count = GFAR_GBIT_TIME; + break; + case SPEED_100: + count = GFAR_100_TIME; + break; + case SPEED_10: + default: + count = GFAR_10_TIME; + break; + } + + /* Make sure we return a number greater than 0 + * if usecs > 0 */ + return (usecs * 1000 + count - 1) / count; +} + +/* Convert ethernet clock ticks to microseconds */ +static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int ticks) +{ + unsigned int count; + + /* The timer is different, depending on the interface speed */ + switch (priv->phydev->speed) { + case SPEED_1000: + count = GFAR_GBIT_TIME; + break; + case SPEED_100: + count = GFAR_100_TIME; + break; + case SPEED_10: + default: + count = GFAR_10_TIME; + break; + } + + /* Make sure we return a number greater than 0 */ + /* if ticks is > 0 */ + return (ticks * count) / 1000; +} + +/* Get the coalescing parameters, and put them in the cvals + * structure. */ +static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals) +{ + struct gfar_private *priv = netdev_priv(dev); + struct gfar_priv_rx_q *rx_queue = NULL; + struct gfar_priv_tx_q *tx_queue = NULL; + unsigned long rxtime; + unsigned long rxcount; + unsigned long txtime; + unsigned long txcount; + + if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE)) + return -EOPNOTSUPP; + + if (NULL == priv->phydev) + return -ENODEV; + + rx_queue = priv->rx_queue[0]; + tx_queue = priv->tx_queue[0]; + + rxtime = get_ictt_value(rx_queue->rxic); + rxcount = get_icft_value(rx_queue->rxic); + txtime = get_ictt_value(tx_queue->txic); + txcount = get_icft_value(tx_queue->txic); + cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime); + cvals->rx_max_coalesced_frames = rxcount; + + cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, txtime); + cvals->tx_max_coalesced_frames = txcount; + + cvals->use_adaptive_rx_coalesce = 0; + cvals->use_adaptive_tx_coalesce = 0; + + cvals->pkt_rate_low = 0; + cvals->rx_coalesce_usecs_low = 0; + cvals->rx_max_coalesced_frames_low = 0; + cvals->tx_coalesce_usecs_low = 0; + cvals->tx_max_coalesced_frames_low = 0; + + /* When the packet rate is below pkt_rate_high but above + * pkt_rate_low (both measured in packets per second) the + * normal {rx,tx}_* coalescing parameters are used. + */ + + /* When the packet rate is (measured in packets per second) + * is above pkt_rate_high, the {rx,tx}_*_high parameters are + * used. + */ + cvals->pkt_rate_high = 0; + cvals->rx_coalesce_usecs_high = 0; + cvals->rx_max_coalesced_frames_high = 0; + cvals->tx_coalesce_usecs_high = 0; + cvals->tx_max_coalesced_frames_high = 0; + + /* How often to do adaptive coalescing packet rate sampling, + * measured in seconds. Must not be zero. + */ + cvals->rate_sample_interval = 0; + + return 0; +} + +/* Change the coalescing values. + * Both cvals->*_usecs and cvals->*_frames have to be > 0 + * in order for coalescing to be active + */ +static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals) +{ + struct gfar_private *priv = netdev_priv(dev); + int i = 0; + + if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE)) + return -EOPNOTSUPP; + + /* Set up rx coalescing */ + /* As of now, we will enable/disable coalescing for all + * queues together in case of eTSEC2, this will be modified + * along with the ethtool interface */ + if ((cvals->rx_coalesce_usecs == 0) || + (cvals->rx_max_coalesced_frames == 0)) { + for (i = 0; i < priv->num_rx_queues; i++) + priv->rx_queue[i]->rxcoalescing = 0; + } else { + for (i = 0; i < priv->num_rx_queues; i++) + priv->rx_queue[i]->rxcoalescing = 1; + } + + if (NULL == priv->phydev) + return -ENODEV; + + /* Check the bounds of the values */ + if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) { + pr_info("Coalescing is limited to %d microseconds\n", + GFAR_MAX_COAL_USECS); + return -EINVAL; + } + + if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) { + pr_info("Coalescing is limited to %d frames\n", + GFAR_MAX_COAL_FRAMES); + return -EINVAL; + } + + for (i = 0; i < priv->num_rx_queues; i++) { + priv->rx_queue[i]->rxic = mk_ic_value( + cvals->rx_max_coalesced_frames, + gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs)); + } + + /* Set up tx coalescing */ + if ((cvals->tx_coalesce_usecs == 0) || + (cvals->tx_max_coalesced_frames == 0)) { + for (i = 0; i < priv->num_tx_queues; i++) + priv->tx_queue[i]->txcoalescing = 0; + } else { + for (i = 0; i < priv->num_tx_queues; i++) + priv->tx_queue[i]->txcoalescing = 1; + } + + /* Check the bounds of the values */ + if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) { + pr_info("Coalescing is limited to %d microseconds\n", + GFAR_MAX_COAL_USECS); + return -EINVAL; + } + + if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) { + pr_info("Coalescing is limited to %d frames\n", + GFAR_MAX_COAL_FRAMES); + return -EINVAL; + } + + for (i = 0; i < priv->num_tx_queues; i++) { + priv->tx_queue[i]->txic = mk_ic_value( + cvals->tx_max_coalesced_frames, + gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs)); + } + + gfar_configure_coalescing(priv, 0xFF, 0xFF); + + return 0; +} + +/* Fills in rvals with the current ring parameters. Currently, + * rx, rx_mini, and rx_jumbo rings are the same size, as mini and + * jumbo are ignored by the driver */ +static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals) +{ + struct gfar_private *priv = netdev_priv(dev); + struct gfar_priv_tx_q *tx_queue = NULL; + struct gfar_priv_rx_q *rx_queue = NULL; + + tx_queue = priv->tx_queue[0]; + rx_queue = priv->rx_queue[0]; + + rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE; + rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE; + rvals->rx_jumbo_max_pending = GFAR_RX_MAX_RING_SIZE; + rvals->tx_max_pending = GFAR_TX_MAX_RING_SIZE; + + /* Values changeable by the user. The valid values are + * in the range 1 to the "*_max_pending" counterpart above. + */ + rvals->rx_pending = rx_queue->rx_ring_size; + rvals->rx_mini_pending = rx_queue->rx_ring_size; + rvals->rx_jumbo_pending = rx_queue->rx_ring_size; + rvals->tx_pending = tx_queue->tx_ring_size; +} + +/* Change the current ring parameters, stopping the controller if + * necessary so that we don't mess things up while we're in + * motion. We wait for the ring to be clean before reallocating + * the rings. */ +static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals) +{ + struct gfar_private *priv = netdev_priv(dev); + int err = 0, i = 0; + + if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE) + return -EINVAL; + + if (!is_power_of_2(rvals->rx_pending)) { + netdev_err(dev, "Ring sizes must be a power of 2\n"); + return -EINVAL; + } + + if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE) + return -EINVAL; + + if (!is_power_of_2(rvals->tx_pending)) { + netdev_err(dev, "Ring sizes must be a power of 2\n"); + return -EINVAL; + } + + + if (dev->flags & IFF_UP) { + unsigned long flags; + + /* Halt TX and RX, and process the frames which + * have already been received */ + local_irq_save(flags); + lock_tx_qs(priv); + lock_rx_qs(priv); + + gfar_halt(dev); + + unlock_rx_qs(priv); + unlock_tx_qs(priv); + local_irq_restore(flags); + + for (i = 0; i < priv->num_rx_queues; i++) + gfar_clean_rx_ring(priv->rx_queue[i], + priv->rx_queue[i]->rx_ring_size); + + /* Now we take down the rings to rebuild them */ + stop_gfar(dev); + } + + /* Change the size */ + for (i = 0; i < priv->num_rx_queues; i++) { + priv->rx_queue[i]->rx_ring_size = rvals->rx_pending; + priv->tx_queue[i]->tx_ring_size = rvals->tx_pending; + priv->tx_queue[i]->num_txbdfree = priv->tx_queue[i]->tx_ring_size; + } + + /* Rebuild the rings with the new size */ + if (dev->flags & IFF_UP) { + err = startup_gfar(dev); + netif_tx_wake_all_queues(dev); + } + return err; +} + +int gfar_set_features(struct net_device *dev, u32 features) +{ + struct gfar_private *priv = netdev_priv(dev); + unsigned long flags; + int err = 0, i = 0; + u32 changed = dev->features ^ features; + + if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX)) + gfar_vlan_mode(dev, features); + + if (!(changed & NETIF_F_RXCSUM)) + return 0; + + if (dev->flags & IFF_UP) { + /* Halt TX and RX, and process the frames which + * have already been received */ + local_irq_save(flags); + lock_tx_qs(priv); + lock_rx_qs(priv); + + gfar_halt(dev); + + unlock_tx_qs(priv); + unlock_rx_qs(priv); + local_irq_restore(flags); + + for (i = 0; i < priv->num_rx_queues; i++) + gfar_clean_rx_ring(priv->rx_queue[i], + priv->rx_queue[i]->rx_ring_size); + + /* Now we take down the rings to rebuild them */ + stop_gfar(dev); + + dev->features = features; + + err = startup_gfar(dev); + netif_tx_wake_all_queues(dev); + } + return err; +} + +static uint32_t gfar_get_msglevel(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + return priv->msg_enable; +} + +static void gfar_set_msglevel(struct net_device *dev, uint32_t data) +{ + struct gfar_private *priv = netdev_priv(dev); + priv->msg_enable = data; +} + +#ifdef CONFIG_PM +static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct gfar_private *priv = netdev_priv(dev); + + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) { + wol->supported = WAKE_MAGIC; + wol->wolopts = priv->wol_en ? WAKE_MAGIC : 0; + } else { + wol->supported = wol->wolopts = 0; + } +} + +static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct gfar_private *priv = netdev_priv(dev); + unsigned long flags; + + if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && + wol->wolopts != 0) + return -EINVAL; + + if (wol->wolopts & ~WAKE_MAGIC) + return -EINVAL; + + device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC); + + spin_lock_irqsave(&priv->bflock, flags); + priv->wol_en = !!device_may_wakeup(&dev->dev); + spin_unlock_irqrestore(&priv->bflock, flags); + + return 0; +} +#endif + +static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow) +{ + u32 fcr = 0x0, fpr = FPR_FILER_MASK; + + if (ethflow & RXH_L2DA) { + fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH | + RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0; + priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; + priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; + gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); + priv->cur_filer_idx = priv->cur_filer_idx - 1; + + fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH | + RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0; + priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; + priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; + gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); + priv->cur_filer_idx = priv->cur_filer_idx - 1; + } + + if (ethflow & RXH_VLAN) { + fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH | + RQFCR_AND | RQFCR_HASHTBL_0; + gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); + priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; + priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; + priv->cur_filer_idx = priv->cur_filer_idx - 1; + } + + if (ethflow & RXH_IP_SRC) { + fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH | + RQFCR_AND | RQFCR_HASHTBL_0; + priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; + priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; + gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); + priv->cur_filer_idx = priv->cur_filer_idx - 1; + } + + if (ethflow & (RXH_IP_DST)) { + fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH | + RQFCR_AND | RQFCR_HASHTBL_0; + priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; + priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; + gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); + priv->cur_filer_idx = priv->cur_filer_idx - 1; + } + + if (ethflow & RXH_L3_PROTO) { + fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH | + RQFCR_AND | RQFCR_HASHTBL_0; + priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; + priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; + gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); + priv->cur_filer_idx = priv->cur_filer_idx - 1; + } + + if (ethflow & RXH_L4_B_0_1) { + fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH | + RQFCR_AND | RQFCR_HASHTBL_0; + priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; + priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; + gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); + priv->cur_filer_idx = priv->cur_filer_idx - 1; + } + + if (ethflow & RXH_L4_B_2_3) { + fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH | + RQFCR_AND | RQFCR_HASHTBL_0; + priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; + priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; + gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); + priv->cur_filer_idx = priv->cur_filer_idx - 1; + } +} + +static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u64 class) +{ + unsigned int last_rule_idx = priv->cur_filer_idx; + unsigned int cmp_rqfpr; - unsigned int local_rqfpr[MAX_FILER_IDX + 1]; - unsigned int local_rqfcr[MAX_FILER_IDX + 1]; ++ unsigned int *local_rqfpr; ++ unsigned int *local_rqfcr; + int i = 0x0, k = 0x0; + int j = MAX_FILER_IDX, l = 0x0; ++ int ret = 1; ++ ++ local_rqfpr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1), ++ GFP_KERNEL); ++ local_rqfcr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1), ++ GFP_KERNEL); ++ if (!local_rqfpr || !local_rqfcr) { ++ pr_err("Out of memory\n"); ++ ret = 0; ++ goto err; ++ } + + switch (class) { + case TCP_V4_FLOW: + cmp_rqfpr = RQFPR_IPV4 |RQFPR_TCP; + break; + case UDP_V4_FLOW: + cmp_rqfpr = RQFPR_IPV4 |RQFPR_UDP; + break; + case TCP_V6_FLOW: + cmp_rqfpr = RQFPR_IPV6 |RQFPR_TCP; + break; + case UDP_V6_FLOW: + cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP; + break; + default: + pr_err("Right now this class is not supported\n"); - return 0; ++ ret = 0; ++ goto err; + } + + for (i = 0; i < MAX_FILER_IDX + 1; i++) { + local_rqfpr[j] = priv->ftp_rqfpr[i]; + local_rqfcr[j] = priv->ftp_rqfcr[i]; + j--; + if ((priv->ftp_rqfcr[i] == (RQFCR_PID_PARSE | + RQFCR_CLE |RQFCR_AND)) && + (priv->ftp_rqfpr[i] == cmp_rqfpr)) + break; + } + + if (i == MAX_FILER_IDX + 1) { + pr_err("No parse rule found, can't create hash rules\n"); - return 0; ++ ret = 0; ++ goto err; + } + + /* If a match was found, then it begins the starting of a cluster rule + * if it was already programmed, we need to overwrite these rules + */ + for (l = i+1; l < MAX_FILER_IDX; l++) { + if ((priv->ftp_rqfcr[l] & RQFCR_CLE) && + !(priv->ftp_rqfcr[l] & RQFCR_AND)) { + priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT | + RQFCR_HASHTBL_0 | RQFCR_PID_MASK; + priv->ftp_rqfpr[l] = FPR_FILER_MASK; + gfar_write_filer(priv, l, priv->ftp_rqfcr[l], + priv->ftp_rqfpr[l]); + break; + } + + if (!(priv->ftp_rqfcr[l] & RQFCR_CLE) && + (priv->ftp_rqfcr[l] & RQFCR_AND)) + continue; + else { + local_rqfpr[j] = priv->ftp_rqfpr[l]; + local_rqfcr[j] = priv->ftp_rqfcr[l]; + j--; + } + } + + priv->cur_filer_idx = l - 1; + last_rule_idx = l; + + /* hash rules */ + ethflow_to_filer_rules(priv, ethflow); + + /* Write back the popped out rules again */ + for (k = j+1; k < MAX_FILER_IDX; k++) { + priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k]; + priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k]; + gfar_write_filer(priv, priv->cur_filer_idx, + local_rqfcr[k], local_rqfpr[k]); + if (!priv->cur_filer_idx) + break; + priv->cur_filer_idx = priv->cur_filer_idx - 1; + } + - return 1; ++err: ++ kfree(local_rqfcr); ++ kfree(local_rqfpr); ++ return ret; +} + +static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd) +{ + /* write the filer rules here */ + if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type)) + return -EINVAL; + + return 0; +} + +static int gfar_check_filer_hardware(struct gfar_private *priv) +{ + struct gfar __iomem *regs = NULL; + u32 i; + + regs = priv->gfargrp[0].regs; + + /* Check if we are in FIFO mode */ + i = gfar_read(®s->ecntrl); + i &= ECNTRL_FIFM; + if (i == ECNTRL_FIFM) { + netdev_notice(priv->ndev, "Interface in FIFO mode\n"); + i = gfar_read(®s->rctrl); + i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM; + if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) { + netdev_info(priv->ndev, + "Receive Queue Filtering enabled\n"); + } else { + netdev_warn(priv->ndev, + "Receive Queue Filtering disabled\n"); + return -EOPNOTSUPP; + } + } + /* Or in standard mode */ + else { + i = gfar_read(®s->rctrl); + i &= RCTRL_PRSDEP_MASK; + if (i == RCTRL_PRSDEP_MASK) { + netdev_info(priv->ndev, + "Receive Queue Filtering enabled\n"); + } else { + netdev_warn(priv->ndev, + "Receive Queue Filtering disabled\n"); + return -EOPNOTSUPP; + } + } + + /* Sets the properties for arbitrary filer rule + * to the first 4 Layer 4 Bytes */ + regs->rbifx = 0xC0C1C2C3; + return 0; +} + +static int gfar_comp_asc(const void *a, const void *b) +{ + return memcmp(a, b, 4); +} + +static int gfar_comp_desc(const void *a, const void *b) +{ + return -memcmp(a, b, 4); +} + +static void gfar_swap(void *a, void *b, int size) +{ + u32 *_a = a; + u32 *_b = b; + + swap(_a[0], _b[0]); + swap(_a[1], _b[1]); + swap(_a[2], _b[2]); + swap(_a[3], _b[3]); +} + +/* Write a mask to filer cache */ +static void gfar_set_mask(u32 mask, struct filer_table *tab) +{ + tab->fe[tab->index].ctrl = RQFCR_AND | RQFCR_PID_MASK | RQFCR_CMP_EXACT; + tab->fe[tab->index].prop = mask; + tab->index++; +} + +/* Sets parse bits (e.g. IP or TCP) */ +static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab) +{ + gfar_set_mask(mask, tab); + tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE + | RQFCR_AND; + tab->fe[tab->index].prop = value; + tab->index++; +} + +static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag, + struct filer_table *tab) +{ + gfar_set_mask(mask, tab); + tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag; + tab->fe[tab->index].prop = value; + tab->index++; +} + +/* + * For setting a tuple of value and mask of type flag + * Example: + * IP-Src = 10.0.0.0/255.0.0.0 + * value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4 + * + * Ethtool gives us a value=0 and mask=~0 for don't care a tuple + * For a don't care mask it gives us a 0 + * + * The check if don't care and the mask adjustment if mask=0 is done for VLAN + * and MAC stuff on an upper level (due to missing information on this level). + * For these guys we can discard them if they are value=0 and mask=0. + * + * Further the all masks are one-padded for better hardware efficiency. + */ +static void gfar_set_attribute(u32 value, u32 mask, u32 flag, + struct filer_table *tab) +{ + switch (flag) { + /* 3bit */ + case RQFCR_PID_PRI: + if (!(value | mask)) + return; + mask |= RQFCR_PID_PRI_MASK; + break; + /* 8bit */ + case RQFCR_PID_L4P: + case RQFCR_PID_TOS: + if (!~(mask | RQFCR_PID_L4P_MASK)) + return; + if (!mask) + mask = ~0; + else + mask |= RQFCR_PID_L4P_MASK; + break; + /* 12bit */ + case RQFCR_PID_VID: + if (!(value | mask)) + return; + mask |= RQFCR_PID_VID_MASK; + break; + /* 16bit */ + case RQFCR_PID_DPT: + case RQFCR_PID_SPT: + case RQFCR_PID_ETY: + if (!~(mask | RQFCR_PID_PORT_MASK)) + return; + if (!mask) + mask = ~0; + else + mask |= RQFCR_PID_PORT_MASK; + break; + /* 24bit */ + case RQFCR_PID_DAH: + case RQFCR_PID_DAL: + case RQFCR_PID_SAH: + case RQFCR_PID_SAL: + if (!(value | mask)) + return; + mask |= RQFCR_PID_MAC_MASK; + break; + /* for all real 32bit masks */ + default: + if (!~mask) + return; + if (!mask) + mask = ~0; + break; + } + gfar_set_general_attribute(value, mask, flag, tab); +} + +/* Translates value and mask for UDP, TCP or SCTP */ +static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value, + struct ethtool_tcpip4_spec *mask, struct filer_table *tab) +{ + gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab); + gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab); + gfar_set_attribute(value->pdst, mask->pdst, RQFCR_PID_DPT, tab); + gfar_set_attribute(value->psrc, mask->psrc, RQFCR_PID_SPT, tab); + gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab); +} + +/* Translates value and mask for RAW-IP4 */ +static void gfar_set_user_ip(struct ethtool_usrip4_spec *value, + struct ethtool_usrip4_spec *mask, struct filer_table *tab) +{ + gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab); + gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab); + gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab); + gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab); + gfar_set_attribute(value->l4_4_bytes, mask->l4_4_bytes, RQFCR_PID_ARB, + tab); + +} + +/* Translates value and mask for ETHER spec */ +static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask, + struct filer_table *tab) +{ + u32 upper_temp_mask = 0; + u32 lower_temp_mask = 0; + /* Source address */ + if (!is_broadcast_ether_addr(mask->h_source)) { + + if (is_zero_ether_addr(mask->h_source)) { + upper_temp_mask = 0xFFFFFFFF; + lower_temp_mask = 0xFFFFFFFF; + } else { + upper_temp_mask = mask->h_source[0] << 16 + | mask->h_source[1] << 8 + | mask->h_source[2]; + lower_temp_mask = mask->h_source[3] << 16 + | mask->h_source[4] << 8 + | mask->h_source[5]; + } + /* Upper 24bit */ + gfar_set_attribute( + value->h_source[0] << 16 | value->h_source[1] + << 8 | value->h_source[2], + upper_temp_mask, RQFCR_PID_SAH, tab); + /* And the same for the lower part */ + gfar_set_attribute( + value->h_source[3] << 16 | value->h_source[4] + << 8 | value->h_source[5], + lower_temp_mask, RQFCR_PID_SAL, tab); + } + /* Destination address */ + if (!is_broadcast_ether_addr(mask->h_dest)) { + + /* Special for destination is limited broadcast */ + if ((is_broadcast_ether_addr(value->h_dest) + && is_zero_ether_addr(mask->h_dest))) { + gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab); + } else { + + if (is_zero_ether_addr(mask->h_dest)) { + upper_temp_mask = 0xFFFFFFFF; + lower_temp_mask = 0xFFFFFFFF; + } else { + upper_temp_mask = mask->h_dest[0] << 16 + | mask->h_dest[1] << 8 + | mask->h_dest[2]; + lower_temp_mask = mask->h_dest[3] << 16 + | mask->h_dest[4] << 8 + | mask->h_dest[5]; + } + + /* Upper 24bit */ + gfar_set_attribute( + value->h_dest[0] << 16 + | value->h_dest[1] << 8 + | value->h_dest[2], + upper_temp_mask, RQFCR_PID_DAH, tab); + /* And the same for the lower part */ + gfar_set_attribute( + value->h_dest[3] << 16 + | value->h_dest[4] << 8 + | value->h_dest[5], + lower_temp_mask, RQFCR_PID_DAL, tab); + } + } + + gfar_set_attribute(value->h_proto, mask->h_proto, RQFCR_PID_ETY, tab); + +} + +/* Convert a rule to binary filter format of gianfar */ +static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule, + struct filer_table *tab) +{ + u32 vlan = 0, vlan_mask = 0; + u32 id = 0, id_mask = 0; + u32 cfi = 0, cfi_mask = 0; + u32 prio = 0, prio_mask = 0; + + u32 old_index = tab->index; + + /* Check if vlan is wanted */ + if ((rule->flow_type & FLOW_EXT) && (rule->m_ext.vlan_tci != 0xFFFF)) { + if (!rule->m_ext.vlan_tci) + rule->m_ext.vlan_tci = 0xFFFF; + + vlan = RQFPR_VLN; + vlan_mask = RQFPR_VLN; + + /* Separate the fields */ + id = rule->h_ext.vlan_tci & VLAN_VID_MASK; + id_mask = rule->m_ext.vlan_tci & VLAN_VID_MASK; + cfi = rule->h_ext.vlan_tci & VLAN_CFI_MASK; + cfi_mask = rule->m_ext.vlan_tci & VLAN_CFI_MASK; + prio = (rule->h_ext.vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + prio_mask = (rule->m_ext.vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + + if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) { + vlan |= RQFPR_CFI; + vlan_mask |= RQFPR_CFI; + } else if (cfi != VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) { + vlan_mask |= RQFPR_CFI; + } + } + + switch (rule->flow_type & ~FLOW_EXT) { + case TCP_V4_FLOW: + gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan, + RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab); + gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec, + &rule->m_u.tcp_ip4_spec, tab); + break; + case UDP_V4_FLOW: + gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan, + RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab); + gfar_set_basic_ip(&rule->h_u.udp_ip4_spec, + &rule->m_u.udp_ip4_spec, tab); + break; + case SCTP_V4_FLOW: + gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask, + tab); + gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab); + gfar_set_basic_ip((struct ethtool_tcpip4_spec *) &rule->h_u, + (struct ethtool_tcpip4_spec *) &rule->m_u, tab); + break; + case IP_USER_FLOW: + gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask, + tab); + gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u, + (struct ethtool_usrip4_spec *) &rule->m_u, tab); + break; + case ETHER_FLOW: + if (vlan) + gfar_set_parse_bits(vlan, vlan_mask, tab); + gfar_set_ether((struct ethhdr *) &rule->h_u, + (struct ethhdr *) &rule->m_u, tab); + break; + default: + return -1; + } + + /* Set the vlan attributes in the end */ + if (vlan) { + gfar_set_attribute(id, id_mask, RQFCR_PID_VID, tab); + gfar_set_attribute(prio, prio_mask, RQFCR_PID_PRI, tab); + } + + /* If there has been nothing written till now, it must be a default */ + if (tab->index == old_index) { + gfar_set_mask(0xFFFFFFFF, tab); + tab->fe[tab->index].ctrl = 0x20; + tab->fe[tab->index].prop = 0x0; + tab->index++; + } + + /* Remove last AND */ + tab->fe[tab->index - 1].ctrl &= (~RQFCR_AND); + + /* Specify which queue to use or to drop */ + if (rule->ring_cookie == RX_CLS_FLOW_DISC) + tab->fe[tab->index - 1].ctrl |= RQFCR_RJE; + else + tab->fe[tab->index - 1].ctrl |= (rule->ring_cookie << 10); + + /* Only big enough entries can be clustered */ + if (tab->index > (old_index + 2)) { + tab->fe[old_index + 1].ctrl |= RQFCR_CLE; + tab->fe[tab->index - 1].ctrl |= RQFCR_CLE; + } + + /* In rare cases the cache can be full while there is free space in hw */ + if (tab->index > MAX_FILER_CACHE_IDX - 1) + return -EBUSY; + + return 0; +} + +/* Copy size filer entries */ +static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0], + struct gfar_filer_entry src[0], s32 size) +{ + while (size > 0) { + size--; + dst[size].ctrl = src[size].ctrl; + dst[size].prop = src[size].prop; + } +} + +/* Delete the contents of the filer-table between start and end + * and collapse them */ +static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab) +{ + int length; + if (end > MAX_FILER_CACHE_IDX || end < begin) + return -EINVAL; + + end++; + length = end - begin; + + /* Copy */ + while (end < tab->index) { + tab->fe[begin].ctrl = tab->fe[end].ctrl; + tab->fe[begin++].prop = tab->fe[end++].prop; + + } + /* Fill up with don't cares */ + while (begin < tab->index) { + tab->fe[begin].ctrl = 0x60; + tab->fe[begin].prop = 0xFFFFFFFF; + begin++; + } + + tab->index -= length; + return 0; +} + +/* Make space on the wanted location */ +static int gfar_expand_filer_entries(u32 begin, u32 length, + struct filer_table *tab) +{ + if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX || begin + > MAX_FILER_CACHE_IDX) + return -EINVAL; + + gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]), + tab->index - length + 1); + + tab->index += length; + return 0; +} + +static int gfar_get_next_cluster_start(int start, struct filer_table *tab) +{ + for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); start++) { + if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) + == (RQFCR_AND | RQFCR_CLE)) + return start; + } + return -1; +} + +static int gfar_get_next_cluster_end(int start, struct filer_table *tab) +{ + for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); start++) { + if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) + == (RQFCR_CLE)) + return start; + } + return -1; +} + +/* + * Uses hardwares clustering option to reduce + * the number of filer table entries + */ +static void gfar_cluster_filer(struct filer_table *tab) +{ + s32 i = -1, j, iend, jend; + + while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) { + j = i; + while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) { + /* + * The cluster entries self and the previous one + * (a mask) must be identical! + */ + if (tab->fe[i].ctrl != tab->fe[j].ctrl) + break; + if (tab->fe[i].prop != tab->fe[j].prop) + break; + if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl) + break; + if (tab->fe[i - 1].prop != tab->fe[j - 1].prop) + break; + iend = gfar_get_next_cluster_end(i, tab); + jend = gfar_get_next_cluster_end(j, tab); + if (jend == -1 || iend == -1) + break; + /* + * First we make some free space, where our cluster + * element should be. Then we copy it there and finally + * delete in from its old location. + */ + + if (gfar_expand_filer_entries(iend, (jend - j), tab) + == -EINVAL) + break; + + gfar_copy_filer_entries(&(tab->fe[iend + 1]), + &(tab->fe[jend + 1]), jend - j); + + if (gfar_trim_filer_entries(jend - 1, + jend + (jend - j), tab) == -EINVAL) + return; + + /* Mask out cluster bit */ + tab->fe[iend].ctrl &= ~(RQFCR_CLE); + } + } +} + +/* Swaps the masked bits of a1<>a2 and b1<>b2 */ +static void gfar_swap_bits(struct gfar_filer_entry *a1, + struct gfar_filer_entry *a2, struct gfar_filer_entry *b1, + struct gfar_filer_entry *b2, u32 mask) +{ + u32 temp[4]; + temp[0] = a1->ctrl & mask; + temp[1] = a2->ctrl & mask; + temp[2] = b1->ctrl & mask; + temp[3] = b2->ctrl & mask; + + a1->ctrl &= ~mask; + a2->ctrl &= ~mask; + b1->ctrl &= ~mask; + b2->ctrl &= ~mask; + + a1->ctrl |= temp[1]; + a2->ctrl |= temp[0]; + b1->ctrl |= temp[3]; + b2->ctrl |= temp[2]; +} + +/* + * Generate a list consisting of masks values with their start and + * end of validity and block as indicator for parts belonging + * together (glued by ANDs) in mask_table + */ +static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table, + struct filer_table *tab) +{ + u32 i, and_index = 0, block_index = 1; + + for (i = 0; i < tab->index; i++) { + + /* LSByte of control = 0 sets a mask */ + if (!(tab->fe[i].ctrl & 0xF)) { + mask_table[and_index].mask = tab->fe[i].prop; + mask_table[and_index].start = i; + mask_table[and_index].block = block_index; + if (and_index >= 1) + mask_table[and_index - 1].end = i - 1; + and_index++; + } + /* cluster starts and ends will be separated because they should + * hold their position */ + if (tab->fe[i].ctrl & RQFCR_CLE) + block_index++; + /* A not set AND indicates the end of a depended block */ + if (!(tab->fe[i].ctrl & RQFCR_AND)) + block_index++; + + } + + mask_table[and_index - 1].end = i - 1; + + return and_index; +} + +/* + * Sorts the entries of mask_table by the values of the masks. + * Important: The 0xFF80 flags of the first and last entry of a + * block must hold their position (which queue, CLusterEnable, ReJEct, + * AND) + */ +static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table, + struct filer_table *temp_table, u32 and_index) +{ + /* Pointer to compare function (_asc or _desc) */ + int (*gfar_comp)(const void *, const void *); + + u32 i, size = 0, start = 0, prev = 1; + u32 old_first, old_last, new_first, new_last; + + gfar_comp = &gfar_comp_desc; + + for (i = 0; i < and_index; i++) { + + if (prev != mask_table[i].block) { + old_first = mask_table[start].start + 1; + old_last = mask_table[i - 1].end; + sort(mask_table + start, size, + sizeof(struct gfar_mask_entry), + gfar_comp, &gfar_swap); + + /* Toggle order for every block. This makes the + * thing more efficient! */ + if (gfar_comp == gfar_comp_desc) + gfar_comp = &gfar_comp_asc; + else + gfar_comp = &gfar_comp_desc; + + new_first = mask_table[start].start + 1; + new_last = mask_table[i - 1].end; + + gfar_swap_bits(&temp_table->fe[new_first], + &temp_table->fe[old_first], + &temp_table->fe[new_last], + &temp_table->fe[old_last], + RQFCR_QUEUE | RQFCR_CLE | + RQFCR_RJE | RQFCR_AND + ); + + start = i; + size = 0; + } + size++; + prev = mask_table[i].block; + } + +} + +/* + * Reduces the number of masks needed in the filer table to save entries + * This is done by sorting the masks of a depended block. A depended block is + * identified by gluing ANDs or CLE. The sorting order toggles after every + * block. Of course entries in scope of a mask must change their location with + * it. + */ +static int gfar_optimize_filer_masks(struct filer_table *tab) +{ + struct filer_table *temp_table; + struct gfar_mask_entry *mask_table; + + u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0; + s32 ret = 0; + + /* We need a copy of the filer table because + * we want to change its order */ + temp_table = kmalloc(sizeof(*temp_table), GFP_KERNEL); + if (temp_table == NULL) + return -ENOMEM; + memcpy(temp_table, tab, sizeof(*temp_table)); + + mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1, + sizeof(struct gfar_mask_entry), GFP_KERNEL); + + if (mask_table == NULL) { + ret = -ENOMEM; + goto end; + } + + and_index = gfar_generate_mask_table(mask_table, tab); + + gfar_sort_mask_table(mask_table, temp_table, and_index); + + /* Now we can copy the data from our duplicated filer table to + * the real one in the order the mask table says */ + for (i = 0; i < and_index; i++) { + size = mask_table[i].end - mask_table[i].start + 1; + gfar_copy_filer_entries(&(tab->fe[j]), + &(temp_table->fe[mask_table[i].start]), size); + j += size; + } + + /* And finally we just have to check for duplicated masks and drop the + * second ones */ + for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) { + if (tab->fe[i].ctrl == 0x80) { + previous_mask = i++; + break; + } + } + for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) { + if (tab->fe[i].ctrl == 0x80) { + if (tab->fe[i].prop == tab->fe[previous_mask].prop) { + /* Two identical ones found! + * So drop the second one! */ + gfar_trim_filer_entries(i, i, tab); + } else + /* Not identical! */ + previous_mask = i; + } + } + + kfree(mask_table); +end: kfree(temp_table); + return ret; +} + +/* Write the bit-pattern from software's buffer to hardware registers */ +static int gfar_write_filer_table(struct gfar_private *priv, + struct filer_table *tab) +{ + u32 i = 0; + if (tab->index > MAX_FILER_IDX - 1) + return -EBUSY; + + /* Avoid inconsistent filer table to be processed */ + lock_rx_qs(priv); + + /* Fill regular entries */ + for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl); i++) + gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop); + /* Fill the rest with fall-troughs */ + for (; i < MAX_FILER_IDX - 1; i++) + gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF); + /* Last entry must be default accept + * because that's what people expect */ + gfar_write_filer(priv, i, 0x20, 0x0); + + unlock_rx_qs(priv); + + return 0; +} + +static int gfar_check_capability(struct ethtool_rx_flow_spec *flow, + struct gfar_private *priv) +{ + + if (flow->flow_type & FLOW_EXT) { + if (~flow->m_ext.data[0] || ~flow->m_ext.data[1]) + netdev_warn(priv->ndev, + "User-specific data not supported!\n"); + if (~flow->m_ext.vlan_etype) + netdev_warn(priv->ndev, + "VLAN-etype not supported!\n"); + } + if (flow->flow_type == IP_USER_FLOW) + if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4) + netdev_warn(priv->ndev, + "IP-Version differing from IPv4 not supported!\n"); + + return 0; +} + +static int gfar_process_filer_changes(struct gfar_private *priv) +{ + struct ethtool_flow_spec_container *j; + struct filer_table *tab; + s32 i = 0; + s32 ret = 0; + + /* So index is set to zero, too! */ + tab = kzalloc(sizeof(*tab), GFP_KERNEL); + if (tab == NULL) + return -ENOMEM; + + /* Now convert the existing filer data from flow_spec into + * filer tables binary format */ + list_for_each_entry(j, &priv->rx_list.list, list) { + ret = gfar_convert_to_filer(&j->fs, tab); + if (ret == -EBUSY) { + netdev_err(priv->ndev, "Rule not added: No free space!\n"); + goto end; + } + if (ret == -1) { + netdev_err(priv->ndev, "Rule not added: Unsupported Flow-type!\n"); + goto end; + } + } + + i = tab->index; + + /* Optimizations to save entries */ + gfar_cluster_filer(tab); + gfar_optimize_filer_masks(tab); + + pr_debug("\n\tSummary:\n" + "\tData on hardware: %d\n" + "\tCompression rate: %d%%\n", + tab->index, 100 - (100 * tab->index) / i); + + /* Write everything to hardware */ + ret = gfar_write_filer_table(priv, tab); + if (ret == -EBUSY) { + netdev_err(priv->ndev, "Rule not added: No free space!\n"); + goto end; + } + +end: kfree(tab); + return ret; +} + +static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow) +{ + u32 i = 0; + + for (i = 0; i < sizeof(flow->m_u); i++) + flow->m_u.hdata[i] ^= 0xFF; + + flow->m_ext.vlan_etype ^= 0xFFFF; + flow->m_ext.vlan_tci ^= 0xFFFF; + flow->m_ext.data[0] ^= ~0; + flow->m_ext.data[1] ^= ~0; +} + +static int gfar_add_cls(struct gfar_private *priv, + struct ethtool_rx_flow_spec *flow) +{ + struct ethtool_flow_spec_container *temp, *comp; + int ret = 0; + + temp = kmalloc(sizeof(*temp), GFP_KERNEL); + if (temp == NULL) + return -ENOMEM; + memcpy(&temp->fs, flow, sizeof(temp->fs)); + + gfar_invert_masks(&temp->fs); + ret = gfar_check_capability(&temp->fs, priv); + if (ret) + goto clean_mem; + /* Link in the new element at the right @location */ + if (list_empty(&priv->rx_list.list)) { + ret = gfar_check_filer_hardware(priv); + if (ret != 0) + goto clean_mem; + list_add(&temp->list, &priv->rx_list.list); + goto process; + } else { + + list_for_each_entry(comp, &priv->rx_list.list, list) { + if (comp->fs.location > flow->location) { + list_add_tail(&temp->list, &comp->list); + goto process; + } + if (comp->fs.location == flow->location) { + netdev_err(priv->ndev, + "Rule not added: ID %d not free!\n", + flow->location); + ret = -EBUSY; + goto clean_mem; + } + } + list_add_tail(&temp->list, &priv->rx_list.list); + } + +process: + ret = gfar_process_filer_changes(priv); + if (ret) + goto clean_list; + priv->rx_list.count++; + return ret; + +clean_list: + list_del(&temp->list); +clean_mem: + kfree(temp); + return ret; +} + +static int gfar_del_cls(struct gfar_private *priv, u32 loc) +{ + struct ethtool_flow_spec_container *comp; + u32 ret = -EINVAL; + + if (list_empty(&priv->rx_list.list)) + return ret; + + list_for_each_entry(comp, &priv->rx_list.list, list) { + if (comp->fs.location == loc) { + list_del(&comp->list); + kfree(comp); + priv->rx_list.count--; + gfar_process_filer_changes(priv); + ret = 0; + break; + } + } + + return ret; + +} + +static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd) +{ + struct ethtool_flow_spec_container *comp; + u32 ret = -EINVAL; + + list_for_each_entry(comp, &priv->rx_list.list, list) { + if (comp->fs.location == cmd->fs.location) { + memcpy(&cmd->fs, &comp->fs, sizeof(cmd->fs)); + gfar_invert_masks(&cmd->fs); + ret = 0; + break; + } + } + + return ret; +} + +static int gfar_get_cls_all(struct gfar_private *priv, + struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct ethtool_flow_spec_container *comp; + u32 i = 0; + + list_for_each_entry(comp, &priv->rx_list.list, list) { + if (i <= cmd->rule_cnt) { + rule_locs[i] = comp->fs.location; + i++; + } + } + + cmd->data = MAX_FILER_IDX; + + return 0; +} + +static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct gfar_private *priv = netdev_priv(dev); + int ret = 0; + + mutex_lock(&priv->rx_queue_access); + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + ret = gfar_set_hash_opts(priv, cmd); + break; + case ETHTOOL_SRXCLSRLINS: + if (cmd->fs.ring_cookie != RX_CLS_FLOW_DISC && + cmd->fs.ring_cookie >= priv->num_rx_queues) { + ret = -EINVAL; + break; + } + ret = gfar_add_cls(priv, &cmd->fs); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = gfar_del_cls(priv, cmd->fs.location); + break; + default: + ret = -EINVAL; + } + + mutex_unlock(&priv->rx_queue_access); + + return ret; +} + +static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + void *rule_locs) +{ + struct gfar_private *priv = netdev_priv(dev); + int ret = 0; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = priv->num_rx_queues; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = priv->rx_list.count; + break; + case ETHTOOL_GRXCLSRULE: + ret = gfar_get_cls(priv, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = gfar_get_cls_all(priv, cmd, (u32 *) rule_locs); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +const struct ethtool_ops gfar_ethtool_ops = { + .get_settings = gfar_gsettings, + .set_settings = gfar_ssettings, + .get_drvinfo = gfar_gdrvinfo, + .get_regs_len = gfar_reglen, + .get_regs = gfar_get_regs, + .get_link = ethtool_op_get_link, + .get_coalesce = gfar_gcoalesce, + .set_coalesce = gfar_scoalesce, + .get_ringparam = gfar_gringparam, + .set_ringparam = gfar_sringparam, + .get_strings = gfar_gstrings, + .get_sset_count = gfar_sset_count, + .get_ethtool_stats = gfar_fill_stats, + .get_msglevel = gfar_get_msglevel, + .set_msglevel = gfar_set_msglevel, +#ifdef CONFIG_PM + .get_wol = gfar_get_wol, + .set_wol = gfar_set_wol, +#endif + .set_rxnfc = gfar_set_nfc, + .get_rxnfc = gfar_get_nfc, +}; diff --cc drivers/net/ethernet/intel/e1000e/82571.c index 480f2592f8a5,000000000000..536b3a55c45f mode 100644,000000..100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@@ -1,2115 -1,0 +1,2117 @@@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* + * 82571EB Gigabit Ethernet Controller + * 82571EB Gigabit Ethernet Controller (Copper) + * 82571EB Gigabit Ethernet Controller (Fiber) + * 82571EB Dual Port Gigabit Mezzanine Adapter + * 82571EB Quad Port Gigabit Mezzanine Adapter + * 82571PT Gigabit PT Quad Port Server ExpressModule + * 82572EI Gigabit Ethernet Controller (Copper) + * 82572EI Gigabit Ethernet Controller (Fiber) + * 82572EI Gigabit Ethernet Controller + * 82573V Gigabit Ethernet Controller (Copper) + * 82573E Gigabit Ethernet Controller (Copper) + * 82573L Gigabit Ethernet Controller + * 82574L Gigabit Network Connection + * 82583V Gigabit Network Connection + */ + +#include "e1000.h" + +#define ID_LED_RESERVED_F746 0xF746 +#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_OFF1_ON2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) + +#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 +#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */ +#define E1000_BASE1000T_STATUS 10 +#define E1000_IDLE_ERROR_COUNT_MASK 0xFF +#define E1000_RECEIVE_ERROR_COUNTER 21 +#define E1000_RECEIVE_ERROR_MAX 0xFFFF + +#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */ + +static s32 e1000_get_phy_id_82571(struct e1000_hw *hw); +static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw); +static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw); +static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw); +static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw); +static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw); +static s32 e1000_setup_link_82571(struct e1000_hw *hw); +static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw); +static void e1000_clear_vfta_82571(struct e1000_hw *hw); +static bool e1000_check_mng_mode_82574(struct e1000_hw *hw); +static s32 e1000_led_on_82574(struct e1000_hw *hw); +static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw); +static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw); +static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw); +static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw); +static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw); +static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active); +static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active); + +/** + * e1000_init_phy_params_82571 - Init PHY func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_phy_params_82571(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + return 0; + } + + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_82571; + + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + phy->type = e1000_phy_igp_2; + break; + case e1000_82573: + phy->type = e1000_phy_m88; + break; + case e1000_82574: + case e1000_82583: + phy->type = e1000_phy_bm; + phy->ops.acquire = e1000_get_hw_semaphore_82574; + phy->ops.release = e1000_put_hw_semaphore_82574; + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574; + break; + default: + return -E1000_ERR_PHY; + break; + } + + /* This can only be done after all function pointers are setup. */ + ret_val = e1000_get_phy_id_82571(hw); + if (ret_val) { + e_dbg("Error getting PHY ID\n"); + return ret_val; + } + + /* Verify phy id */ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + if (phy->id != IGP01E1000_I_PHY_ID) + ret_val = -E1000_ERR_PHY; + break; + case e1000_82573: + if (phy->id != M88E1111_I_PHY_ID) + ret_val = -E1000_ERR_PHY; + break; + case e1000_82574: + case e1000_82583: + if (phy->id != BME1000_E_PHY_ID_R2) + ret_val = -E1000_ERR_PHY; + break; + default: + ret_val = -E1000_ERR_PHY; + break; + } + + if (ret_val) + e_dbg("PHY ID unknown: type = 0x%08x\n", phy->id); + + return ret_val; +} + +/** + * e1000_init_nvm_params_82571 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + u16 size; + + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; + break; + } + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + if (((eecd >> 15) & 0x3) == 0x3) { + nvm->type = e1000_nvm_flash_hw; + nvm->word_size = 2048; + /* + * Autonomous Flash update bit must be cleared due + * to Flash update issue. + */ + eecd &= ~E1000_EECD_AUPDEN; + ew32(EECD, eecd); + break; + } + /* Fall Through */ + default: + nvm->type = e1000_nvm_eeprom_spi; + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + /* + * Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* EEPROM access above 16k is unsupported */ + if (size > 14) + size = 14; + nvm->word_size = 1 << size; + break; + } + + /* Function Pointers */ + switch (hw->mac.type) { + case e1000_82574: + case e1000_82583: + nvm->ops.acquire = e1000_get_hw_semaphore_82574; + nvm->ops.release = e1000_put_hw_semaphore_82574; + break; + default: + break; + } + + return 0; +} + +/** + * e1000_init_mac_params_82571 - Init MAC func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &hw->mac; + struct e1000_mac_operations *func = &mac->ops; + u32 swsm = 0; + u32 swsm2 = 0; + bool force_clear_smbi = false; + + /* Set media type */ + switch (adapter->pdev->device) { + case E1000_DEV_ID_82571EB_FIBER: + case E1000_DEV_ID_82572EI_FIBER: + case E1000_DEV_ID_82571EB_QUAD_FIBER: + hw->phy.media_type = e1000_media_type_fiber; + break; + case E1000_DEV_ID_82571EB_SERDES: + case E1000_DEV_ID_82572EI_SERDES: + case E1000_DEV_ID_82571EB_SERDES_DUAL: + case E1000_DEV_ID_82571EB_SERDES_QUAD: + hw->phy.media_type = e1000_media_type_internal_serdes; + break; + default: + hw->phy.media_type = e1000_media_type_copper; + break; + } + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES; + /* Adaptive IFS supported */ + mac->adaptive_ifs = true; + + /* check for link */ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + func->setup_physical_interface = e1000_setup_copper_link_82571; + func->check_for_link = e1000e_check_for_copper_link; + func->get_link_up_info = e1000e_get_speed_and_duplex_copper; + break; + case e1000_media_type_fiber: + func->setup_physical_interface = + e1000_setup_fiber_serdes_link_82571; + func->check_for_link = e1000e_check_for_fiber_link; + func->get_link_up_info = + e1000e_get_speed_and_duplex_fiber_serdes; + break; + case e1000_media_type_internal_serdes: + func->setup_physical_interface = + e1000_setup_fiber_serdes_link_82571; + func->check_for_link = e1000_check_for_serdes_link_82571; + func->get_link_up_info = + e1000e_get_speed_and_duplex_fiber_serdes; + break; + default: + return -E1000_ERR_CONFIG; + break; + } + + switch (hw->mac.type) { + case e1000_82573: + func->set_lan_id = e1000_set_lan_id_single_port; + func->check_mng_mode = e1000e_check_mng_mode_generic; + func->led_on = e1000e_led_on_generic; + func->blink_led = e1000e_blink_led_generic; + + /* FWSM register */ + mac->has_fwsm = true; + /* + * ARC supported; valid only if manageability features are + * enabled. + */ + mac->arc_subsystem_valid = + (er32(FWSM) & E1000_FWSM_MODE_MASK) + ? true : false; + break; + case e1000_82574: + case e1000_82583: + func->set_lan_id = e1000_set_lan_id_single_port; + func->check_mng_mode = e1000_check_mng_mode_82574; + func->led_on = e1000_led_on_82574; + break; + default: + func->check_mng_mode = e1000e_check_mng_mode_generic; + func->led_on = e1000e_led_on_generic; + func->blink_led = e1000e_blink_led_generic; + + /* FWSM register */ + mac->has_fwsm = true; + break; + } + + /* + * Ensure that the inter-port SWSM.SMBI lock bit is clear before + * first NVM or PHY access. This should be done for single-port + * devices, and for one port only on dual-port devices so that + * for those devices we can still use the SMBI lock to synchronize + * inter-port accesses to the PHY & NVM. + */ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + swsm2 = er32(SWSM2); + + if (!(swsm2 & E1000_SWSM2_LOCK)) { + /* Only do this for the first interface on this card */ + ew32(SWSM2, + swsm2 | E1000_SWSM2_LOCK); + force_clear_smbi = true; + } else + force_clear_smbi = false; + break; + default: + force_clear_smbi = true; + break; + } + + if (force_clear_smbi) { + /* Make sure SWSM.SMBI is clear */ + swsm = er32(SWSM); + if (swsm & E1000_SWSM_SMBI) { + /* This bit should not be set on a first interface, and + * indicates that the bootagent or EFI code has + * improperly left this bit enabled + */ + e_dbg("Please update your 82571 Bootagent\n"); + } + ew32(SWSM, swsm & ~E1000_SWSM_SMBI); + } + + /* + * Initialize device specific counter of SMBI acquisition + * timeouts. + */ + hw->dev_spec.e82571.smb_counter = 0; + + return 0; +} + +static s32 e1000_get_variants_82571(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + static int global_quad_port_a; /* global port a indication */ + struct pci_dev *pdev = adapter->pdev; + int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1; + s32 rc; + + rc = e1000_init_mac_params_82571(adapter); + if (rc) + return rc; + + rc = e1000_init_nvm_params_82571(hw); + if (rc) + return rc; + + rc = e1000_init_phy_params_82571(hw); + if (rc) + return rc; + + /* tag quad port adapters first, it's used below */ + switch (pdev->device) { + case E1000_DEV_ID_82571EB_QUAD_COPPER: + case E1000_DEV_ID_82571EB_QUAD_FIBER: + case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: + case E1000_DEV_ID_82571PT_QUAD_COPPER: + adapter->flags |= FLAG_IS_QUAD_PORT; + /* mark the first port */ + if (global_quad_port_a == 0) + adapter->flags |= FLAG_IS_QUAD_PORT_A; + /* Reset for multiple quad port adapters */ + global_quad_port_a++; + if (global_quad_port_a == 4) + global_quad_port_a = 0; + break; + default: + break; + } + + switch (adapter->hw.mac.type) { + case e1000_82571: + /* these dual ports don't have WoL on port B at all */ + if (((pdev->device == E1000_DEV_ID_82571EB_FIBER) || + (pdev->device == E1000_DEV_ID_82571EB_SERDES) || + (pdev->device == E1000_DEV_ID_82571EB_COPPER)) && + (is_port_b)) + adapter->flags &= ~FLAG_HAS_WOL; + /* quad ports only support WoL on port A */ + if (adapter->flags & FLAG_IS_QUAD_PORT && + (!(adapter->flags & FLAG_IS_QUAD_PORT_A))) + adapter->flags &= ~FLAG_HAS_WOL; + /* Does not support WoL on any port */ + if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) + adapter->flags &= ~FLAG_HAS_WOL; + break; + case e1000_82573: + if (pdev->device == E1000_DEV_ID_82573L) { + adapter->flags |= FLAG_HAS_JUMBO_FRAMES; + adapter->max_hw_frame_size = DEFAULT_JUMBO; + } + break; + default: + break; + } + + return 0; +} + +/** + * e1000_get_phy_id_82571 - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +static s32 e1000_get_phy_id_82571(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_id = 0; + + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + /* + * The 82571 firmware may still be configuring the PHY. + * In this case, we cannot access the PHY until the + * configuration is done. So we explicitly set the + * PHY ID. + */ + phy->id = IGP01E1000_I_PHY_ID; + break; + case e1000_82573: + return e1000e_get_phy_id(hw); + break; + case e1000_82574: + case e1000_82583: + ret_val = e1e_rphy(hw, PHY_ID1, &phy_id); + if (ret_val) + return ret_val; + + phy->id = (u32)(phy_id << 16); + udelay(20); + ret_val = e1e_rphy(hw, PHY_ID2, &phy_id); + if (ret_val) + return ret_val; + + phy->id |= (u32)(phy_id); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + break; + default: + return -E1000_ERR_PHY; + break; + } + + return 0; +} + +/** + * e1000_get_hw_semaphore_82571 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw) +{ + u32 swsm; + s32 sw_timeout = hw->nvm.word_size + 1; + s32 fw_timeout = hw->nvm.word_size + 1; + s32 i = 0; + + /* + * If we have timedout 3 times on trying to acquire + * the inter-port SMBI semaphore, there is old code + * operating on the other port, and it is not + * releasing SMBI. Modify the number of times that + * we try for the semaphore to interwork with this + * older code. + */ + if (hw->dev_spec.e82571.smb_counter > 2) + sw_timeout = 1; + + /* Get the SW semaphore */ + while (i < sw_timeout) { + swsm = er32(SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + udelay(50); + i++; + } + + if (i == sw_timeout) { + e_dbg("Driver can't access device - SMBI bit is set.\n"); + hw->dev_spec.e82571.smb_counter++; + } + /* Get the FW semaphore. */ + for (i = 0; i < fw_timeout; i++) { + swsm = er32(SWSM); + ew32(SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (er32(SWSM) & E1000_SWSM_SWESMBI) + break; + + udelay(50); + } + + if (i == fw_timeout) { + /* Release semaphores */ + e1000_put_hw_semaphore_82571(hw); + e_dbg("Driver can't access the NVM\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000_put_hw_semaphore_82571 - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw) +{ + u32 swsm; + + swsm = er32(SWSM); + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + ew32(SWSM, swsm); +} +/** + * e1000_get_hw_semaphore_82573 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore during reset. + * + **/ +static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw) +{ + u32 extcnf_ctrl; + s32 ret_val = 0; + s32 i = 0; + + extcnf_ctrl = er32(EXTCNF_CTRL); + extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; + do { + ew32(EXTCNF_CTRL, extcnf_ctrl); + extcnf_ctrl = er32(EXTCNF_CTRL); + + if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP) + break; + + extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; + + usleep_range(2000, 4000); + i++; + } while (i < MDIO_OWNERSHIP_TIMEOUT); + + if (i == MDIO_OWNERSHIP_TIMEOUT) { + /* Release semaphores */ + e1000_put_hw_semaphore_82573(hw); + e_dbg("Driver can't access the PHY\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_put_hw_semaphore_82573 - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used during reset. + * + **/ +static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw) +{ + u32 extcnf_ctrl; + + extcnf_ctrl = er32(EXTCNF_CTRL); + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; + ew32(EXTCNF_CTRL, extcnf_ctrl); +} + +static DEFINE_MUTEX(swflag_mutex); + +/** + * e1000_get_hw_semaphore_82574 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM. + * + **/ +static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw) +{ + s32 ret_val; + + mutex_lock(&swflag_mutex); + ret_val = e1000_get_hw_semaphore_82573(hw); + if (ret_val) + mutex_unlock(&swflag_mutex); + return ret_val; +} + +/** + * e1000_put_hw_semaphore_82574 - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + * + **/ +static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw) +{ + e1000_put_hw_semaphore_82573(hw); + mutex_unlock(&swflag_mutex); +} + +/** + * e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. + * LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active) +{ + u16 data = er32(POEMB); + + if (active) + data |= E1000_PHY_CTRL_D0A_LPLU; + else + data &= ~E1000_PHY_CTRL_D0A_LPLU; + + ew32(POEMB, data); + return 0; +} + +/** + * e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * The low power link up (lplu) state is set to the power management level D3 + * when active is true, else clear lplu for D3. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active) +{ + u16 data = er32(POEMB); + + if (!active) { + data &= ~E1000_PHY_CTRL_NOND0A_LPLU; + } else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) || + (hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= E1000_PHY_CTRL_NOND0A_LPLU; + } + + ew32(POEMB, data); + return 0; +} + +/** + * e1000_acquire_nvm_82571 - Request for access to the EEPROM + * @hw: pointer to the HW structure + * + * To gain access to the EEPROM, first we must obtain a hardware semaphore. + * Then for non-82573 hardware, set the EEPROM access request bit and wait + * for EEPROM access grant bit. If the access grant bit is not set, release + * hardware semaphore. + **/ +static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw) +{ + s32 ret_val; + + ret_val = e1000_get_hw_semaphore_82571(hw); + if (ret_val) + return ret_val; + + switch (hw->mac.type) { + case e1000_82573: + break; + default: + ret_val = e1000e_acquire_nvm(hw); + break; + } + + if (ret_val) + e1000_put_hw_semaphore_82571(hw); + + return ret_val; +} + +/** + * e1000_release_nvm_82571 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +static void e1000_release_nvm_82571(struct e1000_hw *hw) +{ + e1000e_release_nvm(hw); + e1000_put_hw_semaphore_82571(hw); +} + +/** + * e1000_write_nvm_82571 - Write to EEPROM using appropriate interface + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * For non-82573 silicon, write data to EEPROM at offset using SPI interface. + * + * If e1000e_update_nvm_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + **/ +static s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 ret_val; + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data); + break; + case e1000_82571: + case e1000_82572: + ret_val = e1000e_write_nvm_spi(hw, offset, words, data); + break; + default: + ret_val = -E1000_ERR_NVM; + break; + } + + return ret_val; +} + +/** + * e1000_update_nvm_checksum_82571 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw) +{ + u32 eecd; + s32 ret_val; + u16 i; + + ret_val = e1000e_update_nvm_checksum_generic(hw); + if (ret_val) + return ret_val; + + /* + * If our nvm is an EEPROM, then we're done + * otherwise, commit the checksum to the flash NVM. + */ + if (hw->nvm.type != e1000_nvm_flash_hw) + return ret_val; + + /* Check for pending operations. */ + for (i = 0; i < E1000_FLASH_UPDATES; i++) { + usleep_range(1000, 2000); + if ((er32(EECD) & E1000_EECD_FLUPD) == 0) + break; + } + + if (i == E1000_FLASH_UPDATES) + return -E1000_ERR_NVM; + + /* Reset the firmware if using STM opcode. */ + if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) { + /* + * The enabling of and the actual reset must be done + * in two write cycles. + */ + ew32(HICR, E1000_HICR_FW_RESET_ENABLE); + e1e_flush(); + ew32(HICR, E1000_HICR_FW_RESET); + } + + /* Commit the write to flash */ + eecd = er32(EECD) | E1000_EECD_FLUPD; + ew32(EECD, eecd); + + for (i = 0; i < E1000_FLASH_UPDATES; i++) { + usleep_range(1000, 2000); + if ((er32(EECD) & E1000_EECD_FLUPD) == 0) + break; + } + + if (i == E1000_FLASH_UPDATES) + return -E1000_ERR_NVM; + + return 0; +} + +/** + * e1000_validate_nvm_checksum_82571 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +static s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw) +{ + if (hw->nvm.type == e1000_nvm_flash_hw) + e1000_fix_nvm_checksum_82571(hw); + + return e1000e_validate_nvm_checksum_generic(hw); +} + +/** + * e1000_write_nvm_eewr_82571 - Write to EEPROM for 82573 silicon + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * After checking for invalid values, poll the EEPROM to ensure the previous + * command has completed before trying to write the next word. After write + * poll for completion. + * + * If e1000e_update_nvm_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + **/ +static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eewr = 0; + s32 ret_val = 0; + + /* + * A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + for (i = 0; i < words; i++) { + eewr = (data[i] << E1000_NVM_RW_REG_DATA) | + ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | + E1000_NVM_RW_REG_START; + + ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); + if (ret_val) + break; + + ew32(EEWR, eewr); + + ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); + if (ret_val) + break; + } + + return ret_val; +} + +/** + * e1000_get_cfg_done_82571 - Poll for configuration done + * @hw: pointer to the HW structure + * + * Reads the management control register for the config done bit to be set. + **/ +static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + + while (timeout) { + if (er32(EEMNGCTL) & + E1000_NVM_CFG_DONE_PORT_0) + break; + usleep_range(1000, 2000); + timeout--; + } + if (!timeout) { + e_dbg("MNG configuration cycle has not completed.\n"); + return -E1000_ERR_RESET; + } + + return 0; +} + +/** + * e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When activating LPLU + * this function also disables smart speed and vice versa. LPLU will not be + * activated unless the device autonegotiation advertisement meets standards + * of either 10 or 10/100 or 10/100/1000 at all duplexes. This is a function + * pointer entry point only called by PHY setup routines. + **/ +static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + return ret_val; + + if (active) { + data |= IGP02E1000_PM_D0_LPLU; + ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); + if (ret_val) + return ret_val; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); + if (ret_val) + return ret_val; + } else { + data &= ~IGP02E1000_PM_D0_LPLU; + ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } + + return 0; +} + +/** + * e1000_reset_hw_82571 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. + **/ +static s32 e1000_reset_hw_82571(struct e1000_hw *hw) +{ + u32 ctrl, ctrl_ext; + s32 ret_val; + + /* + * Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000e_disable_pcie_master(hw); + if (ret_val) + e_dbg("PCI-E Master disable polling has failed.\n"); + + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + ew32(RCTL, 0); + ew32(TCTL, E1000_TCTL_PSP); + e1e_flush(); + + usleep_range(10000, 20000); + + /* + * Must acquire the MDIO ownership before MAC reset. + * Ownership defaults to firmware after a reset. + */ + switch (hw->mac.type) { + case e1000_82573: + ret_val = e1000_get_hw_semaphore_82573(hw); + break; + case e1000_82574: + case e1000_82583: + ret_val = e1000_get_hw_semaphore_82574(hw); + break; + default: + break; + } + if (ret_val) + e_dbg("Cannot acquire MDIO ownership\n"); + + ctrl = er32(CTRL); + + e_dbg("Issuing a global reset to MAC\n"); + ew32(CTRL, ctrl | E1000_CTRL_RST); + + /* Must release MDIO ownership and mutex after MAC reset. */ + switch (hw->mac.type) { + case e1000_82574: + case e1000_82583: + e1000_put_hw_semaphore_82574(hw); + break; + default: + break; + } + + if (hw->nvm.type == e1000_nvm_flash_hw) { + udelay(10); + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); + } + + ret_val = e1000e_get_auto_rd_done(hw); + if (ret_val) + /* We don't want to continue accessing MAC registers. */ + return ret_val; + + /* + * Phy configuration from NVM just starts after EECD_AUTO_RD is set. + * Need to wait for Phy configuration completion before accessing + * NVM and Phy. + */ + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + msleep(25); + break; + default: + break; + } + + /* Clear any pending interrupt events. */ + ew32(IMC, 0xffffffff); + er32(ICR); + + if (hw->mac.type == e1000_82571) { + /* Install any alternate MAC address into RAR0 */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + if (ret_val) + return ret_val; + + e1000e_set_laa_state_82571(hw, true); + } + + /* Reinitialize the 82571 serdes link state machine */ + if (hw->phy.media_type == e1000_media_type_internal_serdes) + hw->mac.serdes_link_state = e1000_serdes_link_down; + + return 0; +} + +/** + * e1000_init_hw_82571 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +static s32 e1000_init_hw_82571(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 reg_data; + s32 ret_val; + u16 i, rar_count = mac->rar_entry_count; + + e1000_initialize_hw_bits_82571(hw); + + /* Initialize identification LED */ + ret_val = e1000e_id_led_init(hw); + if (ret_val) + e_dbg("Error initializing identification LED\n"); + /* This is not fatal and we should not stop init due to this */ + + /* Disabling VLAN filtering */ + e_dbg("Initializing the IEEE VLAN\n"); + mac->ops.clear_vfta(hw); + + /* Setup the receive address. */ + /* + * If, however, a locally administered address was assigned to the + * 82571, we must reserve a RAR for it to work around an issue where + * resetting one port will reload the MAC on the other port. + */ + if (e1000e_get_laa_state_82571(hw)) + rar_count--; + e1000e_init_rx_addrs(hw, rar_count); + + /* Zero out the Multicast HASH table */ + e_dbg("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* Setup link and flow control */ + ret_val = e1000_setup_link_82571(hw); + + /* Set the transmit descriptor write-back policy */ + reg_data = er32(TXDCTL(0)); + reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | + E1000_TXDCTL_COUNT_DESC; + ew32(TXDCTL(0), reg_data); + + /* ...for both queues. */ + switch (mac->type) { + case e1000_82573: + e1000e_enable_tx_pkt_filtering(hw); + /* fall through */ + case e1000_82574: + case e1000_82583: + reg_data = er32(GCR); + reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX; + ew32(GCR, reg_data); + break; + default: + reg_data = er32(TXDCTL(1)); + reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | + E1000_TXDCTL_COUNT_DESC; + ew32(TXDCTL(1), reg_data); + break; + } + + /* + * Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_82571(hw); + + return ret_val; +} + +/** + * e1000_initialize_hw_bits_82571 - Initialize hardware-dependent bits + * @hw: pointer to the HW structure + * + * Initializes required hardware-dependent bits needed for normal operation. + **/ +static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) +{ + u32 reg; + + /* Transmit Descriptor Control 0 */ + reg = er32(TXDCTL(0)); + reg |= (1 << 22); + ew32(TXDCTL(0), reg); + + /* Transmit Descriptor Control 1 */ + reg = er32(TXDCTL(1)); + reg |= (1 << 22); + ew32(TXDCTL(1), reg); + + /* Transmit Arbitration Control 0 */ + reg = er32(TARC(0)); + reg &= ~(0xF << 27); /* 30:27 */ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26); + break; + default: + break; + } + ew32(TARC(0), reg); + + /* Transmit Arbitration Control 1 */ + reg = er32(TARC(1)); + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + reg &= ~((1 << 29) | (1 << 30)); + reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26); + if (er32(TCTL) & E1000_TCTL_MULR) + reg &= ~(1 << 28); + else + reg |= (1 << 28); + ew32(TARC(1), reg); + break; + default: + break; + } + + /* Device Control */ + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + reg = er32(CTRL); + reg &= ~(1 << 29); + ew32(CTRL, reg); + break; + default: + break; + } + + /* Extended Device Control */ + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + reg = er32(CTRL_EXT); + reg &= ~(1 << 23); + reg |= (1 << 22); + ew32(CTRL_EXT, reg); + break; + default: + break; + } + + if (hw->mac.type == e1000_82571) { + reg = er32(PBA_ECC); + reg |= E1000_PBA_ECC_CORR_EN; + ew32(PBA_ECC, reg); + } + /* + * Workaround for hardware errata. + * Ensure that DMA Dynamic Clock gating is disabled on 82571 and 82572 + */ + + if ((hw->mac.type == e1000_82571) || + (hw->mac.type == e1000_82572)) { + reg = er32(CTRL_EXT); + reg &= ~E1000_CTRL_EXT_DMA_DYN_CLK_EN; + ew32(CTRL_EXT, reg); + } + + + /* PCI-Ex Control Registers */ + switch (hw->mac.type) { + case e1000_82574: + case e1000_82583: + reg = er32(GCR); + reg |= (1 << 22); + ew32(GCR, reg); + + /* + * Workaround for hardware errata. + * apply workaround for hardware errata documented in errata + * docs Fixes issue where some error prone or unreliable PCIe + * completions are occurring, particularly with ASPM enabled. + * Without fix, issue can cause Tx timeouts. + */ + reg = er32(GCR2); + reg |= 1; + ew32(GCR2, reg); + break; + default: + break; + } +} + +/** + * e1000_clear_vfta_82571 - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +static void e1000_clear_vfta_82571(struct e1000_hw *hw) +{ + u32 offset; + u32 vfta_value = 0; + u32 vfta_offset = 0; + u32 vfta_bit_in_reg = 0; + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + if (hw->mng_cookie.vlan_id != 0) { + /* + * The VFTA is a 4096b bit-field, each identifying + * a single VLAN ID. The following operations + * determine which 32b entry (i.e. offset) into the + * array we want to set the VLAN ID (i.e. bit) of + * the manageability unit. + */ + vfta_offset = (hw->mng_cookie.vlan_id >> + E1000_VFTA_ENTRY_SHIFT) & + E1000_VFTA_ENTRY_MASK; + vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id & + E1000_VFTA_ENTRY_BIT_SHIFT_MASK); + } + break; + default: + break; + } + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + /* + * If the offset we want to clear is the same offset of the + * manageability VLAN ID, then clear all bits except that of + * the manageability unit. + */ + vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, vfta_value); + e1e_flush(); + } +} + +/** + * e1000_check_mng_mode_82574 - Check manageability is enabled + * @hw: pointer to the HW structure + * + * Reads the NVM Initialization Control Word 2 and returns true + * (>0) if any manageability is enabled, else false (0). + **/ +static bool e1000_check_mng_mode_82574(struct e1000_hw *hw) +{ + u16 data; + + e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data); + return (data & E1000_NVM_INIT_CTRL2_MNGM) != 0; +} + +/** + * e1000_led_on_82574 - Turn LED on + * @hw: pointer to the HW structure + * + * Turn LED on. + **/ +static s32 e1000_led_on_82574(struct e1000_hw *hw) +{ + u32 ctrl; + u32 i; + + ctrl = hw->mac.ledctl_mode2; + if (!(E1000_STATUS_LU & er32(STATUS))) { + /* + * If no link, then turn LED on by setting the invert bit + * for each LED that's "on" (0x0E) in ledctl_mode2. + */ + for (i = 0; i < 4; i++) + if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == + E1000_LEDCTL_MODE_LED_ON) + ctrl |= (E1000_LEDCTL_LED0_IVRT << (i * 8)); + } + ew32(LEDCTL, ctrl); + + return 0; +} + +/** + * e1000_check_phy_82574 - check 82574 phy hung state + * @hw: pointer to the HW structure + * + * Returns whether phy is hung or not + **/ +bool e1000_check_phy_82574(struct e1000_hw *hw) +{ + u16 status_1kbt = 0; + u16 receive_errors = 0; + bool phy_hung = false; + s32 ret_val = 0; + + /* + * Read PHY Receive Error counter first, if its is max - all F's then + * read the Base1000T status register If both are max then PHY is hung. + */ + ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors); + + if (ret_val) + goto out; + if (receive_errors == E1000_RECEIVE_ERROR_MAX) { + ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt); + if (ret_val) + goto out; + if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) == + E1000_IDLE_ERROR_COUNT_MASK) + phy_hung = true; + } +out: + return phy_hung; +} + +/** + * e1000_setup_link_82571 - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +static s32 e1000_setup_link_82571(struct e1000_hw *hw) +{ + /* + * 82573 does not have a word in the NVM to determine + * the default flow control setting, so we explicitly + * set it to full. + */ + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + if (hw->fc.requested_mode == e1000_fc_default) + hw->fc.requested_mode = e1000_fc_full; + break; + default: + break; + } + + return e1000e_setup_link(hw); +} + +/** + * e1000_setup_copper_link_82571 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + **/ +static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + + switch (hw->phy.type) { + case e1000_phy_m88: + case e1000_phy_bm: + ret_val = e1000e_copper_link_setup_m88(hw); + break; + case e1000_phy_igp_2: + ret_val = e1000e_copper_link_setup_igp(hw); + break; + default: + return -E1000_ERR_PHY; + break; + } + + if (ret_val) + return ret_val; + + ret_val = e1000e_setup_copper_link(hw); + + return ret_val; +} + +/** + * e1000_setup_fiber_serdes_link_82571 - Setup link for fiber/serdes + * @hw: pointer to the HW structure + * + * Configures collision distance and flow control for fiber and serdes links. + * Upon successful setup, poll for link. + **/ +static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw) +{ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + /* + * If SerDes loopback mode is entered, there is no form + * of reset to take the adapter out of that mode. So we + * have to explicitly take the adapter out of loopback + * mode. This prevents drivers from twiddling their thumbs + * if another tool failed to take it out of loopback mode. + */ + ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); + break; + default: + break; + } + + return e1000e_setup_fiber_serdes_link(hw); +} + +/** + * e1000_check_for_serdes_link_82571 - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Reports the link state as up or down. + * + * If autonegotiation is supported by the link partner, the link state is + * determined by the result of autonegotiation. This is the most likely case. + * If autonegotiation is not supported by the link partner, and the link + * has a valid signal, force the link up. + * + * The link state is represented internally here by 4 states: + * + * 1) down + * 2) autoneg_progress + * 3) autoneg_complete (the link successfully autonegotiated) + * 4) forced_up (the link has been forced up, it did not autonegotiate) + * + **/ +static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + u32 txcw; + u32 i; + s32 ret_val = 0; + + ctrl = er32(CTRL); + status = er32(STATUS); + rxcw = er32(RXCW); + + if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) { + + /* Receiver is synchronized with no invalid bits. */ + switch (mac->serdes_link_state) { + case e1000_serdes_link_autoneg_complete: + if (!(status & E1000_STATUS_LU)) { + /* + * We have lost link, retry autoneg before + * reporting link failure + */ + mac->serdes_link_state = + e1000_serdes_link_autoneg_progress; + mac->serdes_has_link = false; + e_dbg("AN_UP -> AN_PROG\n"); + } else { + mac->serdes_has_link = true; + } + break; + + case e1000_serdes_link_forced_up: + /* + * If we are receiving /C/ ordered sets, re-enable + * auto-negotiation in the TXCW register and disable + * forced link in the Device Control register in an + * attempt to auto-negotiate with our link partner. + * If the partner code word is null, stop forcing + * and restart auto negotiation. + */ + if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) { + /* Enable autoneg, and unforce link up */ + ew32(TXCW, mac->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + mac->serdes_link_state = + e1000_serdes_link_autoneg_progress; + mac->serdes_has_link = false; + e_dbg("FORCED_UP -> AN_PROG\n"); + } else { + mac->serdes_has_link = true; + } + break; + + case e1000_serdes_link_autoneg_progress: + if (rxcw & E1000_RXCW_C) { + /* + * We received /C/ ordered sets, meaning the + * link partner has autonegotiated, and we can + * trust the Link Up (LU) status bit. + */ + if (status & E1000_STATUS_LU) { + mac->serdes_link_state = + e1000_serdes_link_autoneg_complete; + e_dbg("AN_PROG -> AN_UP\n"); + mac->serdes_has_link = true; + } else { + /* Autoneg completed, but failed. */ + mac->serdes_link_state = + e1000_serdes_link_down; + e_dbg("AN_PROG -> DOWN\n"); + } + } else { + /* + * The link partner did not autoneg. + * Force link up and full duplex, and change + * state to forced. + */ + ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after link up. */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error config flow control\n"); + break; + } + mac->serdes_link_state = + e1000_serdes_link_forced_up; + mac->serdes_has_link = true; + e_dbg("AN_PROG -> FORCED_UP\n"); + } + break; + + case e1000_serdes_link_down: + default: + /* + * The link was down but the receiver has now gained + * valid sync, so lets see if we can bring the link + * up. + */ + ew32(TXCW, mac->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + mac->serdes_link_state = + e1000_serdes_link_autoneg_progress; + mac->serdes_has_link = false; + e_dbg("DOWN -> AN_PROG\n"); + break; + } + } else { + if (!(rxcw & E1000_RXCW_SYNCH)) { + mac->serdes_has_link = false; + mac->serdes_link_state = e1000_serdes_link_down; + e_dbg("ANYSTATE -> DOWN\n"); + } else { + /* + * Check several times, if Sync and Config + * both are consistently 1 then simply ignore + * the Invalid bit and restart Autoneg + */ + for (i = 0; i < AN_RETRY_COUNT; i++) { + udelay(10); + rxcw = er32(RXCW); + if ((rxcw & E1000_RXCW_IV) && + !((rxcw & E1000_RXCW_SYNCH) && + (rxcw & E1000_RXCW_C))) { + mac->serdes_has_link = false; + mac->serdes_link_state = + e1000_serdes_link_down; + e_dbg("ANYSTATE -> DOWN\n"); + break; + } + } + + if (i == AN_RETRY_COUNT) { + txcw = er32(TXCW); + txcw |= E1000_TXCW_ANE; + ew32(TXCW, txcw); + mac->serdes_link_state = + e1000_serdes_link_autoneg_progress; + mac->serdes_has_link = false; + e_dbg("ANYSTATE -> AN_PROG\n"); + } + } + } + + return ret_val; +} + +/** + * e1000_valid_led_default_82571 - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + if (*data == ID_LED_RESERVED_F746) + *data = ID_LED_DEFAULT_82573; + break; + default: + if (*data == ID_LED_RESERVED_0000 || + *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT; + break; + } + + return 0; +} + +/** + * e1000e_get_laa_state_82571 - Get locally administered address state + * @hw: pointer to the HW structure + * + * Retrieve and return the current locally administered address state. + **/ +bool e1000e_get_laa_state_82571(struct e1000_hw *hw) +{ + if (hw->mac.type != e1000_82571) + return false; + + return hw->dev_spec.e82571.laa_is_present; +} + +/** + * e1000e_set_laa_state_82571 - Set locally administered address state + * @hw: pointer to the HW structure + * @state: enable/disable locally administered address + * + * Enable/Disable the current locally administered address state. + **/ +void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state) +{ + if (hw->mac.type != e1000_82571) + return; + + hw->dev_spec.e82571.laa_is_present = state; + + /* If workaround is activated... */ + if (state) + /* + * Hold a copy of the LAA in RAR[14] This is done so that + * between the time RAR[0] gets clobbered and the time it + * gets fixed, the actual LAA is in one of the RARs and no + * incoming packets directed to this port are dropped. + * Eventually the LAA will be in RAR[0] and RAR[14]. + */ + e1000e_rar_set(hw, hw->mac.addr, hw->mac.rar_entry_count - 1); +} + +/** + * e1000_fix_nvm_checksum_82571 - Fix EEPROM checksum + * @hw: pointer to the HW structure + * + * Verifies that the EEPROM has completed the update. After updating the + * EEPROM, we need to check bit 15 in work 0x23 for the checksum fix. If + * the checksum fix is not implemented, we need to set the bit and update + * the checksum. Otherwise, if bit 15 is set and the checksum is incorrect, + * we need to return bad checksum. + **/ +static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val; + u16 data; + + if (nvm->type != e1000_nvm_flash_hw) + return 0; + + /* + * Check bit 4 of word 10h. If it is 0, firmware is done updating + * 10h-12h. Checksum may need to be fixed. + */ + ret_val = e1000_read_nvm(hw, 0x10, 1, &data); + if (ret_val) + return ret_val; + + if (!(data & 0x10)) { + /* + * Read 0x23 and check bit 15. This bit is a 1 + * when the checksum has already been fixed. If + * the checksum is still wrong and this bit is a + * 1, we need to return bad checksum. Otherwise, + * we need to set this bit to a 1 and update the + * checksum. + */ + ret_val = e1000_read_nvm(hw, 0x23, 1, &data); + if (ret_val) + return ret_val; + + if (!(data & 0x8000)) { + data |= 0x8000; + ret_val = e1000_write_nvm(hw, 0x23, 1, &data); + if (ret_val) + return ret_val; + ret_val = e1000e_update_nvm_checksum(hw); + } + } + + return 0; +} + +/** + * e1000_read_mac_addr_82571 - Read device MAC address + * @hw: pointer to the HW structure + **/ +static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + if (hw->mac.type == e1000_82571) { + /* + * If there's an alternate MAC address place it in RAR0 + * so that it will override the Si installed default perm + * address. + */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + if (ret_val) + goto out; + } + + ret_val = e1000_read_mac_addr_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_power_down_phy_copper_82571 - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + struct e1000_mac_info *mac = &hw->mac; + + if (!(phy->ops.check_reset_block)) + return; + + /* If the management interface is not enabled, then power down */ + if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw))) + e1000_power_down_phy_copper(hw); +} + +/** + * e1000_clear_hw_cntrs_82571 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw) +{ + e1000e_clear_hw_cntrs_base(hw); + + er32(PRC64); + er32(PRC127); + er32(PRC255); + er32(PRC511); + er32(PRC1023); + er32(PRC1522); + er32(PTC64); + er32(PTC127); + er32(PTC255); + er32(PTC511); + er32(PTC1023); + er32(PTC1522); + + er32(ALGNERRC); + er32(RXERRC); + er32(TNCRS); + er32(CEXTERR); + er32(TSCTC); + er32(TSCTFC); + + er32(MGTPRC); + er32(MGTPDC); + er32(MGTPTC); + + er32(IAC); + er32(ICRXOC); + + er32(ICRXPTC); + er32(ICRXATC); + er32(ICTXPTC); + er32(ICTXATC); + er32(ICTXQEC); + er32(ICTXQMTC); + er32(ICRXDMTC); +} + +static struct e1000_mac_operations e82571_mac_ops = { + /* .check_mng_mode: mac type dependent */ + /* .check_for_link: media type dependent */ + .id_led_init = e1000e_id_led_init, + .cleanup_led = e1000e_cleanup_led_generic, + .clear_hw_cntrs = e1000_clear_hw_cntrs_82571, + .get_bus_info = e1000e_get_bus_info_pcie, + .set_lan_id = e1000_set_lan_id_multi_port_pcie, + /* .get_link_up_info: media type dependent */ + /* .led_on: mac type dependent */ + .led_off = e1000e_led_off_generic, + .update_mc_addr_list = e1000e_update_mc_addr_list_generic, + .write_vfta = e1000_write_vfta_generic, + .clear_vfta = e1000_clear_vfta_82571, + .reset_hw = e1000_reset_hw_82571, + .init_hw = e1000_init_hw_82571, + .setup_link = e1000_setup_link_82571, + /* .setup_physical_interface: media type dependent */ + .setup_led = e1000e_setup_led_generic, + .read_mac_addr = e1000_read_mac_addr_82571, +}; + +static struct e1000_phy_operations e82_phy_ops_igp = { + .acquire = e1000_get_hw_semaphore_82571, + .check_polarity = e1000_check_polarity_igp, + .check_reset_block = e1000e_check_reset_block_generic, + .commit = NULL, + .force_speed_duplex = e1000e_phy_force_speed_duplex_igp, + .get_cfg_done = e1000_get_cfg_done_82571, + .get_cable_length = e1000e_get_cable_length_igp_2, + .get_info = e1000e_get_phy_info_igp, + .read_reg = e1000e_read_phy_reg_igp, + .release = e1000_put_hw_semaphore_82571, + .reset = e1000e_phy_hw_reset_generic, + .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, + .set_d3_lplu_state = e1000e_set_d3_lplu_state, + .write_reg = e1000e_write_phy_reg_igp, + .cfg_on_link_up = NULL, +}; + +static struct e1000_phy_operations e82_phy_ops_m88 = { + .acquire = e1000_get_hw_semaphore_82571, + .check_polarity = e1000_check_polarity_m88, + .check_reset_block = e1000e_check_reset_block_generic, + .commit = e1000e_phy_sw_reset, + .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, + .get_cfg_done = e1000e_get_cfg_done, + .get_cable_length = e1000e_get_cable_length_m88, + .get_info = e1000e_get_phy_info_m88, + .read_reg = e1000e_read_phy_reg_m88, + .release = e1000_put_hw_semaphore_82571, + .reset = e1000e_phy_hw_reset_generic, + .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, + .set_d3_lplu_state = e1000e_set_d3_lplu_state, + .write_reg = e1000e_write_phy_reg_m88, + .cfg_on_link_up = NULL, +}; + +static struct e1000_phy_operations e82_phy_ops_bm = { + .acquire = e1000_get_hw_semaphore_82571, + .check_polarity = e1000_check_polarity_m88, + .check_reset_block = e1000e_check_reset_block_generic, + .commit = e1000e_phy_sw_reset, + .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, + .get_cfg_done = e1000e_get_cfg_done, + .get_cable_length = e1000e_get_cable_length_m88, + .get_info = e1000e_get_phy_info_m88, + .read_reg = e1000e_read_phy_reg_bm2, + .release = e1000_put_hw_semaphore_82571, + .reset = e1000e_phy_hw_reset_generic, + .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, + .set_d3_lplu_state = e1000e_set_d3_lplu_state, + .write_reg = e1000e_write_phy_reg_bm2, + .cfg_on_link_up = NULL, +}; + +static struct e1000_nvm_operations e82571_nvm_ops = { + .acquire = e1000_acquire_nvm_82571, + .read = e1000e_read_nvm_eerd, + .release = e1000_release_nvm_82571, + .update = e1000_update_nvm_checksum_82571, + .valid_led_default = e1000_valid_led_default_82571, + .validate = e1000_validate_nvm_checksum_82571, + .write = e1000_write_nvm_82571, +}; + +struct e1000_info e1000_82571_info = { + .mac = e1000_82571, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_SMART_POWER_DOWN + | FLAG_RESET_OVERWRITES_LAA /* errata */ + | FLAG_TARC_SPEED_MODE_BIT /* errata */ + | FLAG_APME_CHECK_PORT_B, + .flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */ + | FLAG2_DMA_BURST, + .pba = 38, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_igp, + .nvm_ops = &e82571_nvm_ops, +}; + +struct e1000_info e1000_82572_info = { + .mac = e1000_82572, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_TARC_SPEED_MODE_BIT, /* errata */ + .flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */ + | FLAG2_DMA_BURST, + .pba = 38, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_igp, + .nvm_ops = &e82571_nvm_ops, +}; + +struct e1000_info e1000_82573_info = { + .mac = e1000_82573, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_SMART_POWER_DOWN + | FLAG_HAS_AMT + | FLAG_HAS_SWSM_ON_LOAD, + .flags2 = FLAG2_DISABLE_ASPM_L1 + | FLAG2_DISABLE_ASPM_L0S, + .pba = 20, + .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, + .get_variants = e1000_get_variants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_m88, + .nvm_ops = &e82571_nvm_ops, +}; + +struct e1000_info e1000_82574_info = { + .mac = e1000_82574, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_MSIX + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_SMART_POWER_DOWN + | FLAG_HAS_AMT + | FLAG_HAS_CTRLEXT_ON_LOAD, + .flags2 = FLAG2_CHECK_PHY_HANG - | FLAG2_DISABLE_ASPM_L0S, ++ | FLAG2_DISABLE_ASPM_L0S ++ | FLAG2_NO_DISABLE_RX, + .pba = 32, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_bm, + .nvm_ops = &e82571_nvm_ops, +}; + +struct e1000_info e1000_82583_info = { + .mac = e1000_82583, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_SMART_POWER_DOWN + | FLAG_HAS_AMT + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_CTRLEXT_ON_LOAD, - .flags2 = FLAG2_DISABLE_ASPM_L0S, ++ .flags2 = FLAG2_DISABLE_ASPM_L0S ++ | FLAG2_NO_DISABLE_RX, + .pba = 32, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_bm, + .nvm_ops = &e82571_nvm_ops, +}; + diff --cc drivers/net/ethernet/intel/e1000e/e1000.h index 638d175792cf,000000000000..8533ad7f3559 mode 100644,000000..100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@@ -1,736 -1,0 +1,741 @@@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _E1000_H_ +#define _E1000_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hw.h" + +struct e1000_info; + +#define e_dbg(format, arg...) \ + netdev_dbg(hw->adapter->netdev, format, ## arg) +#define e_err(format, arg...) \ + netdev_err(adapter->netdev, format, ## arg) +#define e_info(format, arg...) \ + netdev_info(adapter->netdev, format, ## arg) +#define e_warn(format, arg...) \ + netdev_warn(adapter->netdev, format, ## arg) +#define e_notice(format, arg...) \ + netdev_notice(adapter->netdev, format, ## arg) + + +/* Interrupt modes, as used by the IntMode parameter */ +#define E1000E_INT_MODE_LEGACY 0 +#define E1000E_INT_MODE_MSI 1 +#define E1000E_INT_MODE_MSIX 2 + +/* Tx/Rx descriptor defines */ +#define E1000_DEFAULT_TXD 256 +#define E1000_MAX_TXD 4096 +#define E1000_MIN_TXD 64 + +#define E1000_DEFAULT_RXD 256 +#define E1000_MAX_RXD 4096 +#define E1000_MIN_RXD 64 + +#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */ +#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */ + +/* Early Receive defines */ +#define E1000_ERT_2048 0x100 + +#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */ + +/* How many Tx Descriptors do we need to call netif_wake_queue ? */ +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define AUTO_ALL_MODES 0 +#define E1000_EEPROM_APME 0x0400 + +#define E1000_MNG_VLAN_NONE (-1) + +/* Number of packet split data buffers (not including the header buffer) */ +#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) + +#define DEFAULT_JUMBO 9234 + +/* BM/HV Specific Registers */ +#define BM_PORT_CTRL_PAGE 769 + +#define PHY_UPPER_SHIFT 21 +#define BM_PHY_REG(page, reg) \ + (((reg) & MAX_PHY_REG_ADDRESS) |\ + (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\ + (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT))) + +/* PHY Wakeup Registers and defines */ +#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17) +#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0) +#define BM_WUC PHY_REG(BM_WUC_PAGE, 1) +#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2) +#define BM_WUS PHY_REG(BM_WUC_PAGE, 3) +#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2))) +#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2))) +#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2))) +#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2))) +#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1))) + +#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */ +#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */ +#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */ +#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */ +#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */ +#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */ +#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */ + +#define HV_STATS_PAGE 778 +#define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision Count */ +#define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17) +#define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. Count */ +#define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19) +#define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Coll. Count */ +#define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21) +#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision Count */ +#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24) +#define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision Count */ +#define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26) +#define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */ +#define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28) +#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Transmit with no CRS */ +#define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30) + +#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */ + +/* BM PHY Copper Specific Status */ +#define BM_CS_STATUS 17 +#define BM_CS_STATUS_LINK_UP 0x0400 +#define BM_CS_STATUS_RESOLVED 0x0800 +#define BM_CS_STATUS_SPEED_MASK 0xC000 +#define BM_CS_STATUS_SPEED_1000 0x8000 + +/* 82577 Mobile Phy Status Register */ +#define HV_M_STATUS 26 +#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000 +#define HV_M_STATUS_SPEED_MASK 0x0300 +#define HV_M_STATUS_SPEED_1000 0x0200 +#define HV_M_STATUS_LINK_UP 0x0040 + ++#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */ ++#define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000 ++ +/* Time to wait before putting the device into D3 if there's no link (in ms). */ +#define LINK_TIMEOUT 100 + +#define DEFAULT_RDTR 0 +#define DEFAULT_RADV 8 +#define BURST_RDTR 0x20 +#define BURST_RADV 0x20 + +/* + * in the case of WTHRESH, it appears at least the 82571/2 hardware + * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when + * WTHRESH=4, and since we want 64 bytes at a time written back, set + * it to 5 + */ +#define E1000_TXDCTL_DMA_BURST_ENABLE \ + (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \ + E1000_TXDCTL_COUNT_DESC | \ + (5 << 16) | /* wthresh must be +1 more than desired */\ + (1 << 8) | /* hthresh */ \ + 0x1f) /* pthresh */ + +#define E1000_RXDCTL_DMA_BURST_ENABLE \ + (0x01000000 | /* set descriptor granularity */ \ + (4 << 16) | /* set writeback threshold */ \ + (4 << 8) | /* set prefetch threshold */ \ + 0x20) /* set hthresh */ + +#define E1000_TIDV_FPD (1 << 31) +#define E1000_RDTR_FPD (1 << 31) + +enum e1000_boards { + board_82571, + board_82572, + board_82573, + board_82574, + board_82583, + board_80003es2lan, + board_ich8lan, + board_ich9lan, + board_ich10lan, + board_pchlan, + board_pch2lan, +}; + +struct e1000_ps_page { + struct page *page; + u64 dma; /* must be u64 - written to hw */ +}; + +/* + * wrappers around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct e1000_buffer { + dma_addr_t dma; + struct sk_buff *skb; + union { + /* Tx */ + struct { + unsigned long time_stamp; + u16 length; + u16 next_to_watch; + unsigned int segs; + unsigned int bytecount; + u16 mapped_as_page; + }; + /* Rx */ + struct { + /* arrays of page information for packet split */ + struct e1000_ps_page *ps_pages; + struct page *page; + }; + }; +}; + +struct e1000_ring { + void *desc; /* pointer to ring memory */ + dma_addr_t dma; /* phys address of ring */ + unsigned int size; /* length of ring in bytes */ + unsigned int count; /* number of desc. in ring */ + + u16 next_to_use; + u16 next_to_clean; + + u16 head; + u16 tail; + + /* array of buffer information structs */ + struct e1000_buffer *buffer_info; + + char name[IFNAMSIZ + 5]; + u32 ims_val; + u32 itr_val; + u16 itr_register; + int set_itr; + + struct sk_buff *rx_skb_top; +}; + +/* PHY register snapshot values */ +struct e1000_phy_regs { + u16 bmcr; /* basic mode control register */ + u16 bmsr; /* basic mode status register */ + u16 advertise; /* auto-negotiation advertisement */ + u16 lpa; /* link partner ability register */ + u16 expansion; /* auto-negotiation expansion reg */ + u16 ctrl1000; /* 1000BASE-T control register */ + u16 stat1000; /* 1000BASE-T status register */ + u16 estatus; /* extended status register */ +}; + +/* board specific private data structure */ +struct e1000_adapter { + struct timer_list watchdog_timer; + struct timer_list phy_info_timer; + struct timer_list blink_timer; + + struct work_struct reset_task; + struct work_struct watchdog_task; + + const struct e1000_info *ei; + + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u32 bd_number; + u32 rx_buffer_len; + u16 mng_vlan_id; + u16 link_speed; + u16 link_duplex; + u16 eeprom_vers; + + /* track device up/down/testing state */ + unsigned long state; + + /* Interrupt Throttle Rate */ + u32 itr; + u32 itr_setting; + u16 tx_itr; + u16 rx_itr; + + /* + * Tx + */ + struct e1000_ring *tx_ring /* One per active queue */ + ____cacheline_aligned_in_smp; + + struct napi_struct napi; + + unsigned int restart_queue; + u32 txd_cmd; + + bool detect_tx_hung; + u8 tx_timeout_factor; + + u32 tx_int_delay; + u32 tx_abs_int_delay; + + unsigned int total_tx_bytes; + unsigned int total_tx_packets; + unsigned int total_rx_bytes; + unsigned int total_rx_packets; + + /* Tx stats */ + u64 tpt_old; + u64 colc_old; + u32 gotc; + u64 gotc_old; + u32 tx_timeout_count; + u32 tx_fifo_head; + u32 tx_head_addr; + u32 tx_fifo_size; + u32 tx_dma_failed; + + /* + * Rx + */ + bool (*clean_rx) (struct e1000_adapter *adapter, + int *work_done, int work_to_do) + ____cacheline_aligned_in_smp; + void (*alloc_rx_buf) (struct e1000_adapter *adapter, + int cleaned_count, gfp_t gfp); + struct e1000_ring *rx_ring; + + u32 rx_int_delay; + u32 rx_abs_int_delay; + + /* Rx stats */ + u64 hw_csum_err; + u64 hw_csum_good; + u64 rx_hdr_split; + u32 gorc; + u64 gorc_old; + u32 alloc_rx_buff_failed; + u32 rx_dma_failed; + + unsigned int rx_ps_pages; + u16 rx_ps_bsize0; + u32 max_frame_size; + u32 min_frame_size; + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + + spinlock_t stats64_lock; + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + struct e1000_phy_stats phy_stats; + + /* Snapshot of PHY registers */ + struct e1000_phy_regs phy_regs; + + struct e1000_ring test_tx_ring; + struct e1000_ring test_rx_ring; + u32 test_icr; + + u32 msg_enable; + unsigned int num_vectors; + struct msix_entry *msix_entries; + int int_mode; + u32 eiac_mask; + + u32 eeprom_wol; + u32 wol; + u32 pba; + u32 max_hw_frame_size; + + bool fc_autoneg; + + unsigned int flags; + unsigned int flags2; + struct work_struct downshift_task; + struct work_struct update_phy_task; + struct work_struct print_hang_task; + + bool idle_check; + int phy_hang_count; +}; + +struct e1000_info { + enum e1000_mac_type mac; + unsigned int flags; + unsigned int flags2; + u32 pba; + u32 max_hw_frame_size; + s32 (*get_variants)(struct e1000_adapter *); + struct e1000_mac_operations *mac_ops; + struct e1000_phy_operations *phy_ops; + struct e1000_nvm_operations *nvm_ops; +}; + +/* hardware capability, feature, and workaround flags */ +#define FLAG_HAS_AMT (1 << 0) +#define FLAG_HAS_FLASH (1 << 1) +#define FLAG_HAS_HW_VLAN_FILTER (1 << 2) +#define FLAG_HAS_WOL (1 << 3) +#define FLAG_HAS_ERT (1 << 4) +#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5) +#define FLAG_HAS_SWSM_ON_LOAD (1 << 6) +#define FLAG_HAS_JUMBO_FRAMES (1 << 7) +#define FLAG_READ_ONLY_NVM (1 << 8) +#define FLAG_IS_ICH (1 << 9) +#define FLAG_HAS_MSIX (1 << 10) +#define FLAG_HAS_SMART_POWER_DOWN (1 << 11) +#define FLAG_IS_QUAD_PORT_A (1 << 12) +#define FLAG_IS_QUAD_PORT (1 << 13) +#define FLAG_TIPG_MEDIUM_FOR_80003ESLAN (1 << 14) +#define FLAG_APME_IN_WUC (1 << 15) +#define FLAG_APME_IN_CTRL3 (1 << 16) +#define FLAG_APME_CHECK_PORT_B (1 << 17) +#define FLAG_DISABLE_FC_PAUSE_TIME (1 << 18) +#define FLAG_NO_WAKE_UCAST (1 << 19) +#define FLAG_MNG_PT_ENABLED (1 << 20) +#define FLAG_RESET_OVERWRITES_LAA (1 << 21) +#define FLAG_TARC_SPEED_MODE_BIT (1 << 22) +#define FLAG_TARC_SET_BIT_ZERO (1 << 23) +#define FLAG_RX_NEEDS_RESTART (1 << 24) +#define FLAG_LSC_GIG_SPEED_DROP (1 << 25) +#define FLAG_SMART_POWER_DOWN (1 << 26) +#define FLAG_MSI_ENABLED (1 << 27) +#define FLAG_RX_CSUM_ENABLED (1 << 28) +#define FLAG_TSO_FORCE (1 << 29) +#define FLAG_RX_RESTART_NOW (1 << 30) +#define FLAG_MSI_TEST_FAILED (1 << 31) + +/* CRC Stripping defines */ +#define FLAG2_CRC_STRIPPING (1 << 0) +#define FLAG2_HAS_PHY_WAKEUP (1 << 1) +#define FLAG2_IS_DISCARDING (1 << 2) +#define FLAG2_DISABLE_ASPM_L1 (1 << 3) +#define FLAG2_HAS_PHY_STATS (1 << 4) +#define FLAG2_HAS_EEE (1 << 5) +#define FLAG2_DMA_BURST (1 << 6) +#define FLAG2_DISABLE_ASPM_L0S (1 << 7) +#define FLAG2_DISABLE_AIM (1 << 8) +#define FLAG2_CHECK_PHY_HANG (1 << 9) ++#define FLAG2_NO_DISABLE_RX (1 << 10) ++#define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11) + +#define E1000_RX_DESC_PS(R, i) \ + (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) +#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) +#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc) +#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) +#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc) + +enum e1000_state_t { + __E1000_TESTING, + __E1000_RESETTING, + __E1000_DOWN +}; + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +extern char e1000e_driver_name[]; +extern const char e1000e_driver_version[]; + +extern void e1000e_check_options(struct e1000_adapter *adapter); +extern void e1000e_set_ethtool_ops(struct net_device *netdev); + +extern int e1000e_up(struct e1000_adapter *adapter); +extern void e1000e_down(struct e1000_adapter *adapter); +extern void e1000e_reinit_locked(struct e1000_adapter *adapter); +extern void e1000e_reset(struct e1000_adapter *adapter); +extern void e1000e_power_up_phy(struct e1000_adapter *adapter); +extern int e1000e_setup_rx_resources(struct e1000_adapter *adapter); +extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter); +extern void e1000e_free_rx_resources(struct e1000_adapter *adapter); +extern void e1000e_free_tx_resources(struct e1000_adapter *adapter); +extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 + *stats); +extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); +extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); +extern void e1000e_get_hw_control(struct e1000_adapter *adapter); +extern void e1000e_release_hw_control(struct e1000_adapter *adapter); + +extern unsigned int copybreak; + +extern char *e1000e_get_hw_dev_name(struct e1000_hw *hw); + +extern struct e1000_info e1000_82571_info; +extern struct e1000_info e1000_82572_info; +extern struct e1000_info e1000_82573_info; +extern struct e1000_info e1000_82574_info; +extern struct e1000_info e1000_82583_info; +extern struct e1000_info e1000_ich8_info; +extern struct e1000_info e1000_ich9_info; +extern struct e1000_info e1000_ich10_info; +extern struct e1000_info e1000_pch_info; +extern struct e1000_info e1000_pch2_info; +extern struct e1000_info e1000_es2_info; + +extern s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, + u32 pba_num_size); + +extern s32 e1000e_commit_phy(struct e1000_hw *hw); + +extern bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw); + +extern bool e1000e_get_laa_state_82571(struct e1000_hw *hw); +extern void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state); + +extern void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw); +extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, + bool state); +extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); +extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); +extern void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw); +extern void e1000_resume_workarounds_pchlan(struct e1000_hw *hw); +extern s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable); +extern s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable); +extern void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw); + +extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw); +extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw); +extern s32 e1000e_check_for_serdes_link(struct e1000_hw *hw); +extern s32 e1000e_setup_led_generic(struct e1000_hw *hw); +extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw); +extern s32 e1000e_led_on_generic(struct e1000_hw *hw); +extern s32 e1000e_led_off_generic(struct e1000_hw *hw); +extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw); +extern void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw); +extern void e1000_set_lan_id_single_port(struct e1000_hw *hw); +extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex); +extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex); +extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw); +extern s32 e1000e_get_auto_rd_done(struct e1000_hw *hw); +extern s32 e1000e_id_led_init(struct e1000_hw *hw); +extern void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw); +extern s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw); +extern s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw); +extern s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw); +extern s32 e1000e_setup_link(struct e1000_hw *hw); +extern void e1000_clear_vfta_generic(struct e1000_hw *hw); +extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); +extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, + u8 *mc_addr_list, + u32 mc_addr_count); +extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); +extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw); +extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop); +extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw); +extern s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data); +extern void e1000e_config_collision_dist(struct e1000_hw *hw); +extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw); +extern s32 e1000e_force_mac_fc(struct e1000_hw *hw); +extern s32 e1000e_blink_led_generic(struct e1000_hw *hw); +extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value); +extern s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw); +extern void e1000e_reset_adaptive(struct e1000_hw *hw); +extern void e1000e_update_adaptive(struct e1000_hw *hw); + +extern s32 e1000e_setup_copper_link(struct e1000_hw *hw); +extern s32 e1000e_get_phy_id(struct e1000_hw *hw); +extern void e1000e_put_hw_semaphore(struct e1000_hw *hw); +extern s32 e1000e_check_reset_block_generic(struct e1000_hw *hw); +extern s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw); +extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw); +extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw); +extern s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page); +extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); +extern s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, + u16 *data); +extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw); +extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active); +extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); +extern s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, + u16 data); +extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw); +extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw); +extern s32 e1000e_get_cfg_done(struct e1000_hw *hw); +extern s32 e1000e_get_cable_length_m88(struct e1000_hw *hw); +extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw); +extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); +extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); +extern s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw); +extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id); +extern s32 e1000e_determine_phy_address(struct e1000_hw *hw); +extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data); +extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data); +extern s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, + u16 *phy_reg); +extern s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, + u16 *phy_reg); +extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data); +extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data); +extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); +extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); +extern s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, + u16 data); +extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); +extern s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, + u16 *data); +extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw); +extern void e1000_power_up_phy_copper(struct e1000_hw *hw); +extern void e1000_power_down_phy_copper(struct e1000_hw *hw); +extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); +extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); +extern s32 e1000e_check_downshift(struct e1000_hw *hw); +extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data); +extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, + u16 *data); +extern s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, + u16 *data); +extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data); +extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, + u16 data); +extern s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, + u16 data); +extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw); +extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw); +extern s32 e1000_check_polarity_82577(struct e1000_hw *hw); +extern s32 e1000_get_phy_info_82577(struct e1000_hw *hw); +extern s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw); +extern s32 e1000_get_cable_length_82577(struct e1000_hw *hw); + +extern s32 e1000_check_polarity_m88(struct e1000_hw *hw); +extern s32 e1000_get_phy_info_ife(struct e1000_hw *hw); +extern s32 e1000_check_polarity_ife(struct e1000_hw *hw); +extern s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw); +extern s32 e1000_check_polarity_igp(struct e1000_hw *hw); +extern bool e1000_check_phy_82574(struct e1000_hw *hw); + +static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw) +{ + return hw->phy.ops.reset(hw); +} + +static inline s32 e1000_check_reset_block(struct e1000_hw *hw) +{ + return hw->phy.ops.check_reset_block(hw); +} + +static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return hw->phy.ops.read_reg(hw, offset, data); +} + +static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data) +{ + return hw->phy.ops.write_reg(hw, offset, data); +} + +static inline s32 e1000_get_cable_length(struct e1000_hw *hw) +{ + return hw->phy.ops.get_cable_length(hw); +} + +extern s32 e1000e_acquire_nvm(struct e1000_hw *hw); +extern s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +extern s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw); +extern s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg); +extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw); +extern void e1000e_release_nvm(struct e1000_hw *hw); +extern void e1000e_reload_nvm(struct e1000_hw *hw); +extern s32 e1000_read_mac_addr_generic(struct e1000_hw *hw); + +static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw) +{ + if (hw->mac.ops.read_mac_addr) + return hw->mac.ops.read_mac_addr(hw); + + return e1000_read_mac_addr_generic(hw); +} + +static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw) +{ + return hw->nvm.ops.validate(hw); +} + +static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw) +{ + return hw->nvm.ops.update(hw); +} + +static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + return hw->nvm.ops.read(hw, offset, words, data); +} + +static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + return hw->nvm.ops.write(hw, offset, words, data); +} + +static inline s32 e1000_get_phy_info(struct e1000_hw *hw) +{ + return hw->phy.ops.get_info(hw); +} + +static inline s32 e1000e_check_mng_mode(struct e1000_hw *hw) +{ + return hw->mac.ops.check_mng_mode(hw); +} + +extern bool e1000e_check_mng_mode_generic(struct e1000_hw *hw); +extern bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw); +extern s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length); + +static inline u32 __er32(struct e1000_hw *hw, unsigned long reg) +{ + return readl(hw->hw_addr + reg); +} + +static inline void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val) +{ + writel(val, hw->hw_addr + reg); +} + +#endif /* _E1000_H_ */ diff --cc drivers/net/ethernet/intel/e1000e/ethtool.c index 06d88f316dce,000000000000..6a0526a59a8a mode 100644,000000..100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@@ -1,2081 -1,0 +1,2082 @@@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* ethtool support for e1000 */ + +#include +#include +#include +#include +#include +#include + +#include "e1000.h" + +enum {NETDEV_STATS, E1000_STATS}; + +struct e1000_stats { + char stat_string[ETH_GSTRING_LEN]; + int type; + int sizeof_stat; + int stat_offset; +}; + +#define E1000_STAT(str, m) { \ + .stat_string = str, \ + .type = E1000_STATS, \ + .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \ + .stat_offset = offsetof(struct e1000_adapter, m) } +#define E1000_NETDEV_STAT(str, m) { \ + .stat_string = str, \ + .type = NETDEV_STATS, \ + .sizeof_stat = sizeof(((struct rtnl_link_stats64 *)0)->m), \ + .stat_offset = offsetof(struct rtnl_link_stats64, m) } + +static const struct e1000_stats e1000_gstrings_stats[] = { + E1000_STAT("rx_packets", stats.gprc), + E1000_STAT("tx_packets", stats.gptc), + E1000_STAT("rx_bytes", stats.gorc), + E1000_STAT("tx_bytes", stats.gotc), + E1000_STAT("rx_broadcast", stats.bprc), + E1000_STAT("tx_broadcast", stats.bptc), + E1000_STAT("rx_multicast", stats.mprc), + E1000_STAT("tx_multicast", stats.mptc), + E1000_NETDEV_STAT("rx_errors", rx_errors), + E1000_NETDEV_STAT("tx_errors", tx_errors), + E1000_NETDEV_STAT("tx_dropped", tx_dropped), + E1000_STAT("multicast", stats.mprc), + E1000_STAT("collisions", stats.colc), + E1000_NETDEV_STAT("rx_length_errors", rx_length_errors), + E1000_NETDEV_STAT("rx_over_errors", rx_over_errors), + E1000_STAT("rx_crc_errors", stats.crcerrs), + E1000_NETDEV_STAT("rx_frame_errors", rx_frame_errors), + E1000_STAT("rx_no_buffer_count", stats.rnbc), + E1000_STAT("rx_missed_errors", stats.mpc), + E1000_STAT("tx_aborted_errors", stats.ecol), + E1000_STAT("tx_carrier_errors", stats.tncrs), + E1000_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors), + E1000_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors), + E1000_STAT("tx_window_errors", stats.latecol), + E1000_STAT("tx_abort_late_coll", stats.latecol), + E1000_STAT("tx_deferred_ok", stats.dc), + E1000_STAT("tx_single_coll_ok", stats.scc), + E1000_STAT("tx_multi_coll_ok", stats.mcc), + E1000_STAT("tx_timeout_count", tx_timeout_count), + E1000_STAT("tx_restart_queue", restart_queue), + E1000_STAT("rx_long_length_errors", stats.roc), + E1000_STAT("rx_short_length_errors", stats.ruc), + E1000_STAT("rx_align_errors", stats.algnerrc), + E1000_STAT("tx_tcp_seg_good", stats.tsctc), + E1000_STAT("tx_tcp_seg_failed", stats.tsctfc), + E1000_STAT("rx_flow_control_xon", stats.xonrxc), + E1000_STAT("rx_flow_control_xoff", stats.xoffrxc), + E1000_STAT("tx_flow_control_xon", stats.xontxc), + E1000_STAT("tx_flow_control_xoff", stats.xofftxc), + E1000_STAT("rx_long_byte_count", stats.gorc), + E1000_STAT("rx_csum_offload_good", hw_csum_good), + E1000_STAT("rx_csum_offload_errors", hw_csum_err), + E1000_STAT("rx_header_split", rx_hdr_split), + E1000_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), + E1000_STAT("tx_smbus", stats.mgptc), + E1000_STAT("rx_smbus", stats.mgprc), + E1000_STAT("dropped_smbus", stats.mgpdc), + E1000_STAT("rx_dma_failed", rx_dma_failed), + E1000_STAT("tx_dma_failed", tx_dma_failed), +}; + +#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats) +#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN) +static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; +#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) + +static int e1000_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 speed; + + if (hw->phy.media_type == e1000_media_type_copper) { + + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_TP); + if (hw->phy.type == e1000_phy_ife) + ecmd->supported &= ~SUPPORTED_1000baseT_Full; + ecmd->advertising = ADVERTISED_TP; + + if (hw->mac.autoneg == 1) { + ecmd->advertising |= ADVERTISED_Autoneg; + /* the e1000 autoneg seems to match ethtool nicely */ + ecmd->advertising |= hw->phy.autoneg_advertised; + } + + ecmd->port = PORT_TP; + ecmd->phy_address = hw->phy.addr; + ecmd->transceiver = XCVR_INTERNAL; + + } else { + ecmd->supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg); + + ecmd->advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg); + + ecmd->port = PORT_FIBRE; + ecmd->transceiver = XCVR_EXTERNAL; + } + + speed = -1; + ecmd->duplex = -1; + + if (netif_running(netdev)) { + if (netif_carrier_ok(netdev)) { + speed = adapter->link_speed; + ecmd->duplex = adapter->link_duplex - 1; + } + } else { + u32 status = er32(STATUS); + if (status & E1000_STATUS_LU) { + if (status & E1000_STATUS_SPEED_1000) + speed = SPEED_1000; + else if (status & E1000_STATUS_SPEED_100) + speed = SPEED_100; + else + speed = SPEED_10; + + if (status & E1000_STATUS_FD) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } + } + + ethtool_cmd_speed_set(ecmd, speed); + ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) || + hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; + + /* MDI-X => 2; MDI =>1; Invalid =>0 */ + if ((hw->phy.media_type == e1000_media_type_copper) && + netif_carrier_ok(netdev)) + ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : + ETH_TP_MDI; + else + ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; + + return 0; +} + +static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) +{ + struct e1000_mac_info *mac = &adapter->hw.mac; + + mac->autoneg = 0; + + /* Make sure dplx is at most 1 bit and lsb of speed is not set + * for the switch() below to work */ + if ((spd & 1) || (dplx & ~1)) + goto err_inval; + + /* Fiber NICs only allow 1000 gbps Full duplex */ + if ((adapter->hw.phy.media_type == e1000_media_type_fiber) && + spd != SPEED_1000 && + dplx != DUPLEX_FULL) { + goto err_inval; + } + + switch (spd + dplx) { + case SPEED_10 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_10_HALF; + break; + case SPEED_10 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_10_FULL; + break; + case SPEED_100 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_100_HALF; + break; + case SPEED_100 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_100_FULL; + break; + case SPEED_1000 + DUPLEX_FULL: + mac->autoneg = 1; + adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; + break; + case SPEED_1000 + DUPLEX_HALF: /* not supported */ + default: + goto err_inval; + } + return 0; + +err_inval: + e_err("Unsupported Speed/Duplex configuration\n"); + return -EINVAL; +} + +static int e1000_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + /* + * When SoL/IDER sessions are active, autoneg/speed/duplex + * cannot be changed + */ + if (e1000_check_reset_block(hw)) { + e_err("Cannot change link characteristics when SoL/IDER is " + "active.\n"); + return -EINVAL; + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (ecmd->autoneg == AUTONEG_ENABLE) { + hw->mac.autoneg = 1; + if (hw->phy.media_type == e1000_media_type_fiber) + hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg; + else + hw->phy.autoneg_advertised = ecmd->advertising | + ADVERTISED_TP | + ADVERTISED_Autoneg; + ecmd->advertising = hw->phy.autoneg_advertised; + if (adapter->fc_autoneg) + hw->fc.requested_mode = e1000_fc_default; + } else { + u32 speed = ethtool_cmd_speed(ecmd); + if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) { + clear_bit(__E1000_RESETTING, &adapter->state); + return -EINVAL; + } + } + + /* reset the link */ + + if (netif_running(adapter->netdev)) { + e1000e_down(adapter); + e1000e_up(adapter); + } else { + e1000e_reset(adapter); + } + + clear_bit(__E1000_RESETTING, &adapter->state); + return 0; +} + +static void e1000_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc.current_mode == e1000_fc_rx_pause) { + pause->rx_pause = 1; + } else if (hw->fc.current_mode == e1000_fc_tx_pause) { + pause->tx_pause = 1; + } else if (hw->fc.current_mode == e1000_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int e1000_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int retval = 0; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + hw->fc.requested_mode = e1000_fc_default; + if (netif_running(adapter->netdev)) { + e1000e_down(adapter); + e1000e_up(adapter); + } else { + e1000e_reset(adapter); + } + } else { + if (pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = e1000_fc_full; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = e1000_fc_rx_pause; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = e1000_fc_tx_pause; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = e1000_fc_none; + + hw->fc.current_mode = hw->fc.requested_mode; + + if (hw->phy.media_type == e1000_media_type_fiber) { + retval = hw->mac.ops.setup_link(hw); + /* implicit goto out */ + } else { + retval = e1000e_force_mac_fc(hw); + if (retval) + goto out; + e1000e_set_fc_watermarks(hw); + } + } + +out: + clear_bit(__E1000_RESETTING, &adapter->state); + return retval; +} + +static u32 e1000_get_rx_csum(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + return adapter->flags & FLAG_RX_CSUM_ENABLED; +} + +static int e1000_set_rx_csum(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (data) + adapter->flags |= FLAG_RX_CSUM_ENABLED; + else + adapter->flags &= ~FLAG_RX_CSUM_ENABLED; + + if (netif_running(netdev)) + e1000e_reinit_locked(adapter); + else + e1000e_reset(adapter); + return 0; +} + +static u32 e1000_get_tx_csum(struct net_device *netdev) +{ + return (netdev->features & NETIF_F_HW_CSUM) != 0; +} + +static int e1000_set_tx_csum(struct net_device *netdev, u32 data) +{ + if (data) + netdev->features |= NETIF_F_HW_CSUM; + else + netdev->features &= ~NETIF_F_HW_CSUM; + + return 0; +} + +static int e1000_set_tso(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (data) { + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + } else { + netdev->features &= ~NETIF_F_TSO; + netdev->features &= ~NETIF_F_TSO6; + } + + adapter->flags |= FLAG_TSO_FORCE; + return 0; +} + +static u32 e1000_get_msglevel(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void e1000_set_msglevel(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +static int e1000_get_regs_len(struct net_device *netdev) +{ +#define E1000_REGS_LEN 32 /* overestimate */ + return E1000_REGS_LEN * sizeof(u32); +} + +static void e1000_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u16 phy_data; + + memset(p, 0, E1000_REGS_LEN * sizeof(u32)); + + regs->version = (1 << 24) | (adapter->pdev->revision << 16) | + adapter->pdev->device; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RCTL); + regs_buff[3] = er32(RDLEN); + regs_buff[4] = er32(RDH); + regs_buff[5] = er32(RDT); + regs_buff[6] = er32(RDTR); + + regs_buff[7] = er32(TCTL); + regs_buff[8] = er32(TDLEN); + regs_buff[9] = er32(TDH); + regs_buff[10] = er32(TDT); + regs_buff[11] = er32(TIDV); + + regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */ + + /* ethtool doesn't use anything past this point, so all this + * code is likely legacy junk for apps that may or may not + * exist */ + if (hw->phy.type == e1000_phy_m88) { + e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + regs_buff[13] = (u32)phy_data; /* cable length */ + regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + regs_buff[17] = (u32)phy_data; /* extended 10bt distance */ + regs_buff[18] = regs_buff[13]; /* cable polarity */ + regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[20] = regs_buff[17]; /* polarity correction */ + /* phy receive errors */ + regs_buff[22] = adapter->phy_stats.receive_errors; + regs_buff[23] = regs_buff[13]; /* mdix mode */ + } + regs_buff[21] = 0; /* was idle_errors */ + e1e_rphy(hw, PHY_1000T_STATUS, &phy_data); + regs_buff[24] = (u32)phy_data; /* phy local receiver status */ + regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ +} + +static int e1000_get_eeprom_len(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + return adapter->hw.nvm.word_size * 2; +} + +static int e1000_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word; + int last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = adapter->pdev->vendor | (adapter->pdev->device << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc(sizeof(u16) * + (last_word - first_word + 1), GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->nvm.type == e1000_nvm_eeprom_spi) { + ret_val = e1000_read_nvm(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + } else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = e1000_read_nvm(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + if (ret_val) { + /* a read error occurred, throw away the result */ + memset(eeprom_buff, 0xff, sizeof(u16) * + (last_word - first_word + 1)); + } else { + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + } + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int e1000_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len; + int first_word; + int last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (eeprom->magic != (adapter->pdev->vendor | (adapter->pdev->device << 16))) + return -EFAULT; + + if (adapter->flags & FLAG_READ_ONLY_NVM) + return -EINVAL; + + max_len = hw->nvm.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word */ + /* only the second byte of the word is being modified */ + ret_val = e1000_read_nvm(hw, first_word, 1, &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) + /* need read/modify/write of last changed EEPROM word */ + /* only the first byte of the word is being modified */ + ret_val = e1000_read_nvm(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + + if (ret_val) + goto out; + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); + + ret_val = e1000_write_nvm(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + if (ret_val) + goto out; + + /* + * Update the checksum over the first part of the EEPROM if needed + * and flush shadow RAM for applicable controllers + */ + if ((first_word <= NVM_CHECKSUM_REG) || + (hw->mac.type == e1000_82583) || + (hw->mac.type == e1000_82574) || + (hw->mac.type == e1000_82573)) + ret_val = e1000e_update_nvm_checksum(hw); + +out: + kfree(eeprom_buff); + return ret_val; +} + +static void e1000_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + char firmware_version[32]; + + strncpy(drvinfo->driver, e1000e_driver_name, + sizeof(drvinfo->driver) - 1); + strncpy(drvinfo->version, e1000e_driver_version, + sizeof(drvinfo->version) - 1); + + /* + * EEPROM image version # is reported as firmware version # for + * PCI-E controllers + */ + snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d", + (adapter->eeprom_vers & 0xF000) >> 12, + (adapter->eeprom_vers & 0x0FF0) >> 4, + (adapter->eeprom_vers & 0x000F)); + + strncpy(drvinfo->fw_version, firmware_version, + sizeof(drvinfo->fw_version) - 1); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info) - 1); + drvinfo->regdump_len = e1000_get_regs_len(netdev); + drvinfo->eedump_len = e1000_get_eeprom_len(netdev); +} + +static void e1000_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_ring *rx_ring = adapter->rx_ring; + + ring->rx_max_pending = E1000_MAX_RXD; + ring->tx_max_pending = E1000_MAX_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = rx_ring->count; + ring->tx_pending = tx_ring->count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static int e1000_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_ring *tx_ring, *tx_old; + struct e1000_ring *rx_ring, *rx_old; + int err; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (netif_running(adapter->netdev)) + e1000e_down(adapter); + + tx_old = adapter->tx_ring; + rx_old = adapter->rx_ring; + + err = -ENOMEM; + tx_ring = kmemdup(tx_old, sizeof(struct e1000_ring), GFP_KERNEL); + if (!tx_ring) + goto err_alloc_tx; + + rx_ring = kmemdup(rx_old, sizeof(struct e1000_ring), GFP_KERNEL); + if (!rx_ring) + goto err_alloc_rx; + + adapter->tx_ring = tx_ring; + adapter->rx_ring = rx_ring; + + rx_ring->count = max(ring->rx_pending, (u32)E1000_MIN_RXD); + rx_ring->count = min(rx_ring->count, (u32)(E1000_MAX_RXD)); + rx_ring->count = ALIGN(rx_ring->count, REQ_RX_DESCRIPTOR_MULTIPLE); + + tx_ring->count = max(ring->tx_pending, (u32)E1000_MIN_TXD); + tx_ring->count = min(tx_ring->count, (u32)(E1000_MAX_TXD)); + tx_ring->count = ALIGN(tx_ring->count, REQ_TX_DESCRIPTOR_MULTIPLE); + + if (netif_running(adapter->netdev)) { + /* Try to get new resources before deleting old */ + err = e1000e_setup_rx_resources(adapter); + if (err) + goto err_setup_rx; + err = e1000e_setup_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* + * restore the old in order to free it, + * then add in the new + */ + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + e1000e_free_rx_resources(adapter); + e1000e_free_tx_resources(adapter); + kfree(tx_old); + kfree(rx_old); + adapter->rx_ring = rx_ring; + adapter->tx_ring = tx_ring; + err = e1000e_up(adapter); + if (err) + goto err_setup; + } + + clear_bit(__E1000_RESETTING, &adapter->state); + return 0; +err_setup_tx: + e1000e_free_rx_resources(adapter); +err_setup_rx: + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + kfree(rx_ring); +err_alloc_rx: + kfree(tx_ring); +err_alloc_tx: + e1000e_up(adapter); +err_setup: + clear_bit(__E1000_RESETTING, &adapter->state); + return err; +} + +static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, + int reg, int offset, u32 mask, u32 write) +{ + u32 pat, val; + static const u32 test[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; + for (pat = 0; pat < ARRAY_SIZE(test); pat++) { + E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset, + (test[pat] & write)); + val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset); + if (val != (test[pat] & write & mask)) { + e_err("pattern test reg %04X failed: got 0x%08X " + "expected 0x%08X\n", reg + offset, val, + (test[pat] & write & mask)); + *data = reg; + return 1; + } + } + return 0; +} + +static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, + int reg, u32 mask, u32 write) +{ + u32 val; + __ew32(&adapter->hw, reg, write & mask); + val = __er32(&adapter->hw, reg); + if ((write & mask) != (val & mask)) { + e_err("set/check reg %04X test failed: got 0x%08X " + "expected 0x%08X\n", reg, (val & mask), (write & mask)); + *data = reg; + return 1; + } + return 0; +} +#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \ + do { \ + if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \ + return 1; \ + } while (0) +#define REG_PATTERN_TEST(reg, mask, write) \ + REG_PATTERN_TEST_ARRAY(reg, 0, mask, write) + +#define REG_SET_AND_CHECK(reg, mask, write) \ + do { \ + if (reg_set_and_check(adapter, data, reg, mask, write)) \ + return 1; \ + } while (0) + +static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &adapter->hw.mac; + u32 value; + u32 before; + u32 after; + u32 i; + u32 toggle; + u32 mask; + + /* + * The status register is Read Only, so a write should fail. + * Some bits that get toggled are ignored. + */ + switch (mac->type) { + /* there are several bits on newer hardware that are r/w */ + case e1000_82571: + case e1000_82572: + case e1000_80003es2lan: + toggle = 0x7FFFF3FF; + break; + default: + toggle = 0x7FFFF033; + break; + } + + before = er32(STATUS); + value = (er32(STATUS) & toggle); + ew32(STATUS, toggle); + after = er32(STATUS) & toggle; + if (value != after) { + e_err("failed STATUS register test got: 0x%08X expected: " + "0x%08X\n", after, value); + *data = 1; + return 1; + } + /* restore previous status */ + ew32(STATUS, before); + + if (!(adapter->flags & FLAG_IS_ICH)) { + REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_VET, 0x0000FFFF, 0xFFFFFFFF); + } + + REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_RDLEN, 0x000FFF80, 0x000FFFFF); + REG_PATTERN_TEST(E1000_RDH, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_RDT, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8); + REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF); + REG_PATTERN_TEST(E1000_TDBAH, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_TDLEN, 0x000FFF80, 0x000FFFFF); + + REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000); + + before = ((adapter->flags & FLAG_IS_ICH) ? 0x06C3B33E : 0x06DFB3FE); + REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB); + REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000); + + REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + if (!(adapter->flags & FLAG_IS_ICH)) + REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF); + mask = 0x8003FFFF; + switch (mac->type) { + case e1000_ich10lan: + case e1000_pchlan: + case e1000_pch2lan: + mask |= (1 << 18); + break; + default: + break; + } + for (i = 0; i < mac->rar_entry_count; i++) + REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), + mask, 0xFFFFFFFF); + + for (i = 0; i < mac->mta_reg_count; i++) + REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF); + + *data = 0; + return 0; +} + +static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) +{ + u16 temp; + u16 checksum = 0; + u16 i; + + *data = 0; + /* Read and add up the contents of the EEPROM */ + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) { + *data = 1; + return *data; + } + checksum += temp; + } + + /* If Checksum is not Correct return error else test passed */ + if ((checksum != (u16) NVM_SUM) && !(*data)) + *data = 2; + + return *data; +} + +static irqreturn_t e1000_test_intr(int irq, void *data) +{ + struct net_device *netdev = (struct net_device *) data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + adapter->test_icr |= er32(ICR); + + return IRQ_HANDLED; +} + +static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 mask; + u32 shared_int = 1; + u32 irq = adapter->pdev->irq; + int i; + int ret_val = 0; + int int_mode = E1000E_INT_MODE_LEGACY; + + *data = 0; + + /* NOTE: we don't test MSI/MSI-X interrupts here, yet */ + if (adapter->int_mode == E1000E_INT_MODE_MSIX) { + int_mode = adapter->int_mode; + e1000e_reset_interrupt_capability(adapter); + adapter->int_mode = E1000E_INT_MODE_LEGACY; + e1000e_set_interrupt_capability(adapter); + } + /* Hook up test interrupt handler just for this test */ + if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, + netdev)) { + shared_int = 0; + } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, + netdev->name, netdev)) { + *data = 1; + ret_val = -1; + goto out; + } + e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared")); + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + e1e_flush(); + usleep_range(10000, 20000); + + /* Test each interrupt */ + for (i = 0; i < 10; i++) { + /* Interrupt to test */ + mask = 1 << i; + + if (adapter->flags & FLAG_IS_ICH) { + switch (mask) { + case E1000_ICR_RXSEQ: + continue; + case 0x00000100: + if (adapter->hw.mac.type == e1000_ich8lan || + adapter->hw.mac.type == e1000_ich9lan) + continue; + break; + default: + break; + } + } + + if (!shared_int) { + /* + * Disable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, mask); + ew32(ICS, mask); + e1e_flush(); + usleep_range(10000, 20000); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* + * Enable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was not posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMS, mask); + ew32(ICS, mask); + e1e_flush(); + usleep_range(10000, 20000); + + if (!(adapter->test_icr & mask)) { + *data = 4; + break; + } + + if (!shared_int) { + /* + * Disable the other interrupts to be reported in + * the cause register and then force the other + * interrupts and see if any get posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, ~mask & 0x00007FFF); + ew32(ICS, ~mask & 0x00007FFF); + e1e_flush(); + usleep_range(10000, 20000); + + if (adapter->test_icr) { + *data = 5; + break; + } + } + } + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + e1e_flush(); + usleep_range(10000, 20000); + + /* Unhook test interrupt handler */ + free_irq(irq, netdev); + +out: + if (int_mode == E1000E_INT_MODE_MSIX) { + e1000e_reset_interrupt_capability(adapter); + adapter->int_mode = int_mode; + e1000e_set_interrupt_capability(adapter); + } + + return ret_val; +} + +static void e1000_free_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_ring *tx_ring = &adapter->test_tx_ring; + struct e1000_ring *rx_ring = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + int i; + + if (tx_ring->desc && tx_ring->buffer_info) { + for (i = 0; i < tx_ring->count; i++) { + if (tx_ring->buffer_info[i].dma) + dma_unmap_single(&pdev->dev, + tx_ring->buffer_info[i].dma, + tx_ring->buffer_info[i].length, + DMA_TO_DEVICE); + if (tx_ring->buffer_info[i].skb) + dev_kfree_skb(tx_ring->buffer_info[i].skb); + } + } + + if (rx_ring->desc && rx_ring->buffer_info) { + for (i = 0; i < rx_ring->count; i++) { + if (rx_ring->buffer_info[i].dma) + dma_unmap_single(&pdev->dev, + rx_ring->buffer_info[i].dma, + 2048, DMA_FROM_DEVICE); + if (rx_ring->buffer_info[i].skb) + dev_kfree_skb(rx_ring->buffer_info[i].skb); + } + } + + if (tx_ring->desc) { + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + tx_ring->desc = NULL; + } + if (rx_ring->desc) { + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + rx_ring->desc = NULL; + } + + kfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + kfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; +} + +static int e1000_setup_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_ring *tx_ring = &adapter->test_tx_ring; + struct e1000_ring *rx_ring = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + int i; + int ret_val; + + /* Setup Tx descriptor ring and Tx buffers */ + + if (!tx_ring->count) + tx_ring->count = E1000_DEFAULT_TXD; + + tx_ring->buffer_info = kcalloc(tx_ring->count, + sizeof(struct e1000_buffer), + GFP_KERNEL); + if (!(tx_ring->buffer_info)) { + ret_val = 1; + goto err_nomem; + } + + tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) { + ret_val = 2; + goto err_nomem; + } + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + ew32(TDBAL, ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); + ew32(TDBAH, ((u64) tx_ring->dma >> 32)); + ew32(TDLEN, tx_ring->count * sizeof(struct e1000_tx_desc)); + ew32(TDH, 0); + ew32(TDT, 0); + ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR | + E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | + E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT); + + for (i = 0; i < tx_ring->count; i++) { + struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); + struct sk_buff *skb; + unsigned int skb_size = 1024; + + skb = alloc_skb(skb_size, GFP_KERNEL); + if (!skb) { + ret_val = 3; + goto err_nomem; + } + skb_put(skb, skb_size); + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[i].length = skb->len; + tx_ring->buffer_info[i].dma = + dma_map_single(&pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, + tx_ring->buffer_info[i].dma)) { + ret_val = 4; + goto err_nomem; + } + tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma); + tx_desc->lower.data = cpu_to_le32(skb->len); + tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | + E1000_TXD_CMD_IFCS | + E1000_TXD_CMD_RS); + tx_desc->upper.data = 0; + } + + /* Setup Rx descriptor ring and Rx buffers */ + + if (!rx_ring->count) + rx_ring->count = E1000_DEFAULT_RXD; + + rx_ring->buffer_info = kcalloc(rx_ring->count, + sizeof(struct e1000_buffer), + GFP_KERNEL); + if (!(rx_ring->buffer_info)) { + ret_val = 5; + goto err_nomem; + } + + rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc); + rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) { + ret_val = 6; + goto err_nomem; + } + rx_ring->next_to_use = 0; + rx_ring->next_to_clean = 0; + + rctl = er32(RCTL); - ew32(RCTL, rctl & ~E1000_RCTL_EN); ++ if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) ++ ew32(RCTL, rctl & ~E1000_RCTL_EN); + ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF)); + ew32(RDBAH, ((u64) rx_ring->dma >> 32)); + ew32(RDLEN, rx_ring->size); + ew32(RDH, 0); + ew32(RDT, 0); + rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | + E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE | + E1000_RCTL_SBP | E1000_RCTL_SECRC | + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + ew32(RCTL, rctl); + + for (i = 0; i < rx_ring->count; i++) { + struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); + struct sk_buff *skb; + + skb = alloc_skb(2048 + NET_IP_ALIGN, GFP_KERNEL); + if (!skb) { + ret_val = 7; + goto err_nomem; + } + skb_reserve(skb, NET_IP_ALIGN); + rx_ring->buffer_info[i].skb = skb; + rx_ring->buffer_info[i].dma = + dma_map_single(&pdev->dev, skb->data, 2048, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, + rx_ring->buffer_info[i].dma)) { + ret_val = 8; + goto err_nomem; + } + rx_desc->buffer_addr = + cpu_to_le64(rx_ring->buffer_info[i].dma); + memset(skb->data, 0x00, skb->len); + } + + return 0; + +err_nomem: + e1000_free_desc_rings(adapter); + return ret_val; +} + +static void e1000_phy_disable_receiver(struct e1000_adapter *adapter) +{ + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + e1e_wphy(&adapter->hw, 29, 0x001F); + e1e_wphy(&adapter->hw, 30, 0x8FFC); + e1e_wphy(&adapter->hw, 29, 0x001A); + e1e_wphy(&adapter->hw, 30, 0x8FF0); +} + +static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg = 0; + u16 phy_reg = 0; + s32 ret_val = 0; + + hw->mac.autoneg = 0; + + if (hw->phy.type == e1000_phy_ife) { + /* force 100, set loopback */ + e1e_wphy(hw, PHY_CONTROL, 0x6100); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg = er32(CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_100 |/* Force Speed to 100 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + ew32(CTRL, ctrl_reg); + e1e_flush(); + udelay(500); + + return 0; + } + + /* Specific PHY configuration for loopback */ + switch (hw->phy.type) { + case e1000_phy_m88: + /* Auto-MDI/MDIX Off */ + e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); + /* reset to update Auto-MDI/MDIX */ + e1e_wphy(hw, PHY_CONTROL, 0x9140); + /* autoneg off */ + e1e_wphy(hw, PHY_CONTROL, 0x8140); + break; + case e1000_phy_gg82563: + e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC); + break; + case e1000_phy_bm: + /* Set Default MAC Interface speed to 1GB */ + e1e_rphy(hw, PHY_REG(2, 21), &phy_reg); + phy_reg &= ~0x0007; + phy_reg |= 0x006; + e1e_wphy(hw, PHY_REG(2, 21), phy_reg); + /* Assert SW reset for above settings to take effect */ + e1000e_commit_phy(hw); + mdelay(1); + /* Force Full Duplex */ + e1e_rphy(hw, PHY_REG(769, 16), &phy_reg); + e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C); + /* Set Link Up (in force link) */ + e1e_rphy(hw, PHY_REG(776, 16), &phy_reg); + e1e_wphy(hw, PHY_REG(776, 16), phy_reg | 0x0040); + /* Force Link */ + e1e_rphy(hw, PHY_REG(769, 16), &phy_reg); + e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x0040); + /* Set Early Link Enable */ + e1e_rphy(hw, PHY_REG(769, 20), &phy_reg); + e1e_wphy(hw, PHY_REG(769, 20), phy_reg | 0x0400); + break; + case e1000_phy_82577: + case e1000_phy_82578: + /* Workaround: K1 must be disabled for stable 1Gbps operation */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + e_err("Cannot setup 1Gbps loopback.\n"); + return ret_val; + } + e1000_configure_k1_ich8lan(hw, false); + hw->phy.ops.release(hw); + break; + case e1000_phy_82579: + /* Disable PHY energy detect power down */ + e1e_rphy(hw, PHY_REG(0, 21), &phy_reg); + e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~(1 << 3)); + /* Disable full chip energy detect */ + e1e_rphy(hw, PHY_REG(776, 18), &phy_reg); + e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1); + /* Enable loopback on the PHY */ +#define I82577_PHY_LBK_CTRL 19 + e1e_wphy(hw, I82577_PHY_LBK_CTRL, 0x8001); + break; + default: + break; + } + + /* force 1000, set loopback */ + e1e_wphy(hw, PHY_CONTROL, 0x4140); + mdelay(250); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg = er32(CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + if (adapter->flags & FLAG_IS_ICH) + ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */ + + if (hw->phy.media_type == e1000_media_type_copper && + hw->phy.type == e1000_phy_m88) { + ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ + } else { + /* + * Set the ILOS bit on the fiber Nic if half duplex link is + * detected. + */ + if ((er32(STATUS) & E1000_STATUS_FD) == 0) + ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); + } + + ew32(CTRL, ctrl_reg); + + /* + * Disable the receiver on the PHY so when a cable is plugged in, the + * PHY does not begin to autoneg when a cable is reconnected to the NIC. + */ + if (hw->phy.type == e1000_phy_m88) + e1000_phy_disable_receiver(adapter); + + udelay(500); + + return 0; +} + +static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl = er32(CTRL); + int link = 0; + + /* special requirements for 82571/82572 fiber adapters */ + + /* + * jump through hoops to make sure link is up because serdes + * link is hardwired up + */ + ctrl |= E1000_CTRL_SLU; + ew32(CTRL, ctrl); + + /* disable autoneg */ + ctrl = er32(TXCW); + ctrl &= ~(1 << 31); + ew32(TXCW, ctrl); + + link = (er32(STATUS) & E1000_STATUS_LU); + + if (!link) { + /* set invert loss of signal */ + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_ILOS; + ew32(CTRL, ctrl); + } + + /* + * special write to serdes control register to enable SerDes analog + * loopback + */ +#define E1000_SERDES_LB_ON 0x410 + ew32(SCTL, E1000_SERDES_LB_ON); + e1e_flush(); + usleep_range(10000, 20000); + + return 0; +} + +/* only call this for fiber/serdes connections to es2lan */ +static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrlext = er32(CTRL_EXT); + u32 ctrl = er32(CTRL); + + /* + * save CTRL_EXT to restore later, reuse an empty variable (unused + * on mac_type 80003es2lan) + */ + adapter->tx_fifo_head = ctrlext; + + /* clear the serdes mode bits, putting the device into mac loopback */ + ctrlext &= ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + ew32(CTRL_EXT, ctrlext); + + /* force speed to 1000/FD, link up */ + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | + E1000_CTRL_SPD_1000 | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* set mac loopback */ + ctrl = er32(RCTL); + ctrl |= E1000_RCTL_LBM_MAC; + ew32(RCTL, ctrl); + + /* set testing mode parameters (no need to reset later) */ +#define KMRNCTRLSTA_OPMODE (0x1F << 16) +#define KMRNCTRLSTA_OPMODE_1GB_FD_GMII 0x0582 + ew32(KMRNCTRLSTA, + (KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII)); + + return 0; +} + +static int e1000_setup_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (hw->phy.media_type == e1000_media_type_fiber || + hw->phy.media_type == e1000_media_type_internal_serdes) { + switch (hw->mac.type) { + case e1000_80003es2lan: + return e1000_set_es2lan_mac_loopback(adapter); + break; + case e1000_82571: + case e1000_82572: + return e1000_set_82571_fiber_loopback(adapter); + break; + default: + rctl = er32(RCTL); + rctl |= E1000_RCTL_LBM_TCVR; + ew32(RCTL, rctl); + return 0; + } + } else if (hw->phy.media_type == e1000_media_type_copper) { + return e1000_integrated_phy_loopback(adapter); + } + + return 7; +} + +static void e1000_loopback_cleanup(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + u16 phy_reg; + + rctl = er32(RCTL); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); + ew32(RCTL, rctl); + + switch (hw->mac.type) { + case e1000_80003es2lan: + if (hw->phy.media_type == e1000_media_type_fiber || + hw->phy.media_type == e1000_media_type_internal_serdes) { + /* restore CTRL_EXT, stealing space from tx_fifo_head */ + ew32(CTRL_EXT, adapter->tx_fifo_head); + adapter->tx_fifo_head = 0; + } + /* fall through */ + case e1000_82571: + case e1000_82572: + if (hw->phy.media_type == e1000_media_type_fiber || + hw->phy.media_type == e1000_media_type_internal_serdes) { +#define E1000_SERDES_LB_OFF 0x400 + ew32(SCTL, E1000_SERDES_LB_OFF); + e1e_flush(); + usleep_range(10000, 20000); + break; + } + /* Fall Through */ + default: + hw->mac.autoneg = 1; + if (hw->phy.type == e1000_phy_gg82563) + e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x180); + e1e_rphy(hw, PHY_CONTROL, &phy_reg); + if (phy_reg & MII_CR_LOOPBACK) { + phy_reg &= ~MII_CR_LOOPBACK; + e1e_wphy(hw, PHY_CONTROL, phy_reg); + e1000e_commit_phy(hw); + } + break; + } +} + +static void e1000_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size &= ~1; + memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); + memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); + memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); +} + +static int e1000_check_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + frame_size &= ~1; + if (*(skb->data + 3) == 0xFF) + if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && + (*(skb->data + frame_size / 2 + 12) == 0xAF)) + return 0; + return 13; +} + +static int e1000_run_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_ring *tx_ring = &adapter->test_tx_ring; + struct e1000_ring *rx_ring = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + int i, j, k, l; + int lc; + int good_cnt; + int ret_val = 0; + unsigned long time; + + ew32(RDT, rx_ring->count - 1); + + /* + * Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rx_ring->count <= tx_ring->count) + lc = ((tx_ring->count / 64) * 2) + 1; + else + lc = ((rx_ring->count / 64) * 2) + 1; + + k = 0; + l = 0; + for (j = 0; j <= lc; j++) { /* loop count loop */ + for (i = 0; i < 64; i++) { /* send the packets */ + e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb, + 1024); + dma_sync_single_for_device(&pdev->dev, + tx_ring->buffer_info[k].dma, + tx_ring->buffer_info[k].length, + DMA_TO_DEVICE); + k++; + if (k == tx_ring->count) + k = 0; + } + ew32(TDT, k); + e1e_flush(); + msleep(200); + time = jiffies; /* set the start time for the receive */ + good_cnt = 0; + do { /* receive the sent packets */ + dma_sync_single_for_cpu(&pdev->dev, + rx_ring->buffer_info[l].dma, 2048, + DMA_FROM_DEVICE); + + ret_val = e1000_check_lbtest_frame( + rx_ring->buffer_info[l].skb, 1024); + if (!ret_val) + good_cnt++; + l++; + if (l == rx_ring->count) + l = 0; + /* + * time + 20 msecs (200 msecs on 2.4) is more than + * enough time to complete the receives, if it's + * exceeded, break and error off + */ + } while ((good_cnt < 64) && !time_after(jiffies, time + 20)); + if (good_cnt != 64) { + ret_val = 13; /* ret_val is the same as mis-compare */ + break; + } + if (jiffies >= (time + 20)) { + ret_val = 14; /* error code for time out error */ + break; + } + } /* end loop count loop */ + return ret_val; +} + +static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) +{ + /* + * PHY loopback cannot be performed if SoL/IDER + * sessions are active + */ + if (e1000_check_reset_block(&adapter->hw)) { + e_err("Cannot do PHY loopback test when SoL/IDER is active.\n"); + *data = 0; + goto out; + } + + *data = e1000_setup_desc_rings(adapter); + if (*data) + goto out; + + *data = e1000_setup_loopback_test(adapter); + if (*data) + goto err_loopback; + + *data = e1000_run_loopback_test(adapter); + e1000_loopback_cleanup(adapter); + +err_loopback: + e1000_free_desc_rings(adapter); +out: + return *data; +} + +static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + + *data = 0; + if (hw->phy.media_type == e1000_media_type_internal_serdes) { + int i = 0; + hw->mac.serdes_has_link = false; + + /* + * On some blade server designs, link establishment + * could take as long as 2-3 minutes + */ + do { + hw->mac.ops.check_for_link(hw); + if (hw->mac.serdes_has_link) + return *data; + msleep(20); + } while (i++ < 3750); + + *data = 1; + } else { + hw->mac.ops.check_for_link(hw); + if (hw->mac.autoneg) + /* + * On some Phy/switch combinations, link establishment + * can take a few seconds more than expected. + */ + msleep(5000); + + if (!(er32(STATUS) & E1000_STATUS_LU)) + *data = 1; + } + return *data; +} + +static int e1000e_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return E1000_TEST_LEN; + case ETH_SS_STATS: + return E1000_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void e1000_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + u16 autoneg_advertised; + u8 forced_speed_duplex; + u8 autoneg; + bool if_running = netif_running(netdev); + + set_bit(__E1000_TESTING, &adapter->state); + + if (!if_running) { + /* Get control of and reset hardware */ + if (adapter->flags & FLAG_HAS_AMT) + e1000e_get_hw_control(adapter); + + e1000e_power_up_phy(adapter); + + adapter->hw.phy.autoneg_wait_to_complete = 1; + e1000e_reset(adapter); + adapter->hw.phy.autoneg_wait_to_complete = 0; + } + + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + + /* save speed, duplex, autoneg settings */ + autoneg_advertised = adapter->hw.phy.autoneg_advertised; + forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; + autoneg = adapter->hw.mac.autoneg; + + e_info("offline testing starting\n"); + + if (if_running) + /* indicate we're in test mode */ + dev_close(netdev); + + if (e1000_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000e_reset(adapter); + if (e1000_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000e_reset(adapter); + if (e1000_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000e_reset(adapter); + if (e1000_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* force this routine to wait until autoneg complete/timeout */ + adapter->hw.phy.autoneg_wait_to_complete = 1; + e1000e_reset(adapter); + adapter->hw.phy.autoneg_wait_to_complete = 0; + + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* restore speed, duplex, autoneg settings */ + adapter->hw.phy.autoneg_advertised = autoneg_advertised; + adapter->hw.mac.forced_speed_duplex = forced_speed_duplex; + adapter->hw.mac.autoneg = autoneg; + e1000e_reset(adapter); + + clear_bit(__E1000_TESTING, &adapter->state); + if (if_running) + dev_open(netdev); + } else { + /* Online tests */ + + e_info("online testing starting\n"); + + /* register, eeprom, intr and loopback tests not run online */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + clear_bit(__E1000_TESTING, &adapter->state); + } + + if (!if_running) { + e1000e_reset(adapter); + + if (adapter->flags & FLAG_HAS_AMT) + e1000e_release_hw_control(adapter); + } + + msleep_interruptible(4 * 1000); +} + +static void e1000_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + wol->supported = 0; + wol->wolopts = 0; + + if (!(adapter->flags & FLAG_HAS_WOL) || + !device_can_wakeup(&adapter->pdev->dev)) + return; + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC | WAKE_PHY; + + /* apply any specific unsupported masks here */ + if (adapter->flags & FLAG_NO_WAKE_UCAST) { + wol->supported &= ~WAKE_UCAST; + + if (adapter->wol & E1000_WUFC_EX) + e_err("Interface does not support directed (unicast) " + "frame wake-up packets\n"); + } + + if (adapter->wol & E1000_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & E1000_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & E1000_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & E1000_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + if (adapter->wol & E1000_WUFC_LNKC) + wol->wolopts |= WAKE_PHY; +} + +static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!(adapter->flags & FLAG_HAS_WOL) || + !device_can_wakeup(&adapter->pdev->dev) || + (wol->wolopts & ~(WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | + WAKE_MAGIC | WAKE_PHY))) + return -EOPNOTSUPP; + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= E1000_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= E1000_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= E1000_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= E1000_WUFC_MAG; + if (wol->wolopts & WAKE_PHY) + adapter->wol |= E1000_WUFC_LNKC; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static int e1000_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + if (!hw->mac.ops.blink_led) + return 2; /* cycle on/off twice per second */ + + hw->mac.ops.blink_led(hw); + break; + + case ETHTOOL_ID_INACTIVE: + if (hw->phy.type == e1000_phy_ife) + e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); + hw->mac.ops.led_off(hw); + hw->mac.ops.cleanup_led(hw); + break; + + case ETHTOOL_ID_ON: + adapter->hw.mac.ops.led_on(&adapter->hw); + break; + + case ETHTOOL_ID_OFF: + adapter->hw.mac.ops.led_off(&adapter->hw); + break; + } + return 0; +} + +static int e1000_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (adapter->itr_setting <= 4) + ec->rx_coalesce_usecs = adapter->itr_setting; + else + ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting; + + return 0; +} + +static int e1000_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) || + ((ec->rx_coalesce_usecs > 4) && + (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) || + (ec->rx_coalesce_usecs == 2)) + return -EINVAL; + + if (ec->rx_coalesce_usecs == 4) { + adapter->itr = adapter->itr_setting = 4; + } else if (ec->rx_coalesce_usecs <= 3) { + adapter->itr = 20000; + adapter->itr_setting = ec->rx_coalesce_usecs; + } else { + adapter->itr = (1000000 / ec->rx_coalesce_usecs); + adapter->itr_setting = adapter->itr & ~3; + } + + if (adapter->itr_setting != 0) + ew32(ITR, 1000000000 / (adapter->itr * 256)); + else + ew32(ITR, 0); + + return 0; +} + +static int e1000_nway_reset(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!netif_running(netdev)) + return -EAGAIN; + + if (!adapter->hw.mac.autoneg) + return -EINVAL; + + e1000e_reinit_locked(adapter); + + return 0; +} + +static void e1000_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, + u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct rtnl_link_stats64 net_stats; + int i; + char *p = NULL; + + e1000e_get_stats64(netdev, &net_stats); + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { + switch (e1000_gstrings_stats[i].type) { + case NETDEV_STATS: + p = (char *) &net_stats + + e1000_gstrings_stats[i].stat_offset; + break; + case E1000_STATS: + p = (char *) adapter + + e1000_gstrings_stats[i].stat_offset; + break; + default: + data[i] = 0; + continue; + } + + data[i] = (e1000_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } +} + +static void e1000_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test)); + break; + case ETH_SS_STATS: + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { + memcpy(p, e1000_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static int e1000e_set_flags(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + bool need_reset = false; + int rc; + + need_reset = (data & ETH_FLAG_RXVLAN) != + (netdev->features & NETIF_F_HW_VLAN_RX); + + rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_RXVLAN | + ETH_FLAG_TXVLAN); + + if (rc) + return rc; + + if (need_reset) { + if (netif_running(netdev)) + e1000e_reinit_locked(adapter); + else + e1000e_reset(adapter); + } + + return 0; +} + +static const struct ethtool_ops e1000_ethtool_ops = { + .get_settings = e1000_get_settings, + .set_settings = e1000_set_settings, + .get_drvinfo = e1000_get_drvinfo, + .get_regs_len = e1000_get_regs_len, + .get_regs = e1000_get_regs, + .get_wol = e1000_get_wol, + .set_wol = e1000_set_wol, + .get_msglevel = e1000_get_msglevel, + .set_msglevel = e1000_set_msglevel, + .nway_reset = e1000_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = e1000_get_eeprom_len, + .get_eeprom = e1000_get_eeprom, + .set_eeprom = e1000_set_eeprom, + .get_ringparam = e1000_get_ringparam, + .set_ringparam = e1000_set_ringparam, + .get_pauseparam = e1000_get_pauseparam, + .set_pauseparam = e1000_set_pauseparam, + .get_rx_csum = e1000_get_rx_csum, + .set_rx_csum = e1000_set_rx_csum, + .get_tx_csum = e1000_get_tx_csum, + .set_tx_csum = e1000_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, + .get_tso = ethtool_op_get_tso, + .set_tso = e1000_set_tso, + .self_test = e1000_diag_test, + .get_strings = e1000_get_strings, + .set_phys_id = e1000_set_phys_id, + .get_ethtool_stats = e1000_get_ethtool_stats, + .get_sset_count = e1000e_get_sset_count, + .get_coalesce = e1000_get_coalesce, + .set_coalesce = e1000_set_coalesce, + .get_flags = ethtool_op_get_flags, + .set_flags = e1000e_set_flags, +}; + +void e1000e_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops); +} diff --cc drivers/net/ethernet/intel/e1000e/ich8lan.c index 4e36978b8fd8,000000000000..54add27c8f76 mode 100644,000000..100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@@ -1,4111 -1,0 +1,4150 @@@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* + * 82562G 10/100 Network Connection + * 82562G-2 10/100 Network Connection + * 82562GT 10/100 Network Connection + * 82562GT-2 10/100 Network Connection + * 82562V 10/100 Network Connection + * 82562V-2 10/100 Network Connection + * 82566DC-2 Gigabit Network Connection + * 82566DC Gigabit Network Connection + * 82566DM-2 Gigabit Network Connection + * 82566DM Gigabit Network Connection + * 82566MC Gigabit Network Connection + * 82566MM Gigabit Network Connection + * 82567LM Gigabit Network Connection + * 82567LF Gigabit Network Connection + * 82567V Gigabit Network Connection + * 82567LM-2 Gigabit Network Connection + * 82567LF-2 Gigabit Network Connection + * 82567V-2 Gigabit Network Connection + * 82567LF-3 Gigabit Network Connection + * 82567LM-3 Gigabit Network Connection + * 82567LM-4 Gigabit Network Connection + * 82577LM Gigabit Network Connection + * 82577LC Gigabit Network Connection + * 82578DM Gigabit Network Connection + * 82578DC Gigabit Network Connection + * 82579LM Gigabit Network Connection + * 82579V Gigabit Network Connection + */ + +#include "e1000.h" + +#define ICH_FLASH_GFPREG 0x0000 +#define ICH_FLASH_HSFSTS 0x0004 +#define ICH_FLASH_HSFCTL 0x0006 +#define ICH_FLASH_FADDR 0x0008 +#define ICH_FLASH_FDATA0 0x0010 +#define ICH_FLASH_PR0 0x0074 + +#define ICH_FLASH_READ_COMMAND_TIMEOUT 500 +#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 500 +#define ICH_FLASH_ERASE_COMMAND_TIMEOUT 3000000 +#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF +#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 + +#define ICH_CYCLE_READ 0 +#define ICH_CYCLE_WRITE 2 +#define ICH_CYCLE_ERASE 3 + +#define FLASH_GFPREG_BASE_MASK 0x1FFF +#define FLASH_SECTOR_ADDR_SHIFT 12 + +#define ICH_FLASH_SEG_SIZE_256 256 +#define ICH_FLASH_SEG_SIZE_4K 4096 +#define ICH_FLASH_SEG_SIZE_8K 8192 +#define ICH_FLASH_SEG_SIZE_64K 65536 + + +#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */ +/* FW established a valid mode */ +#define E1000_ICH_FWSM_FW_VALID 0x00008000 + +#define E1000_ICH_MNG_IAMT_MODE 0x2 + +#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_DEF1_OFF2 << 8) | \ + (ID_LED_DEF1_ON2 << 4) | \ + (ID_LED_DEF1_DEF2)) + +#define E1000_ICH_NVM_SIG_WORD 0x13 +#define E1000_ICH_NVM_SIG_MASK 0xC000 +#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0 +#define E1000_ICH_NVM_SIG_VALUE 0x80 + +#define E1000_ICH8_LAN_INIT_TIMEOUT 1500 + +#define E1000_FEXTNVM_SW_CONFIG 1 +#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */ + +#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7 +#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7 +#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 + +#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL + +#define E1000_ICH_RAR_ENTRIES 7 + +#define PHY_PAGE_SHIFT 5 +#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ + ((reg) & MAX_PHY_REG_ADDRESS)) +#define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */ +#define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */ + +#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 +#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300 +#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200 + +#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */ + +#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */ + +/* SMBus Address Phy Register */ +#define HV_SMB_ADDR PHY_REG(768, 26) +#define HV_SMB_ADDR_MASK 0x007F +#define HV_SMB_ADDR_PEC_EN 0x0200 +#define HV_SMB_ADDR_VALID 0x0080 + +/* PHY Power Management Control */ +#define HV_PM_CTRL PHY_REG(770, 17) + +/* PHY Low Power Idle Control */ - #define I82579_LPI_CTRL PHY_REG(772, 20) - #define I82579_LPI_CTRL_ENABLE_MASK 0x6000 ++#define I82579_LPI_CTRL PHY_REG(772, 20) ++#define I82579_LPI_CTRL_ENABLE_MASK 0x6000 ++#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80 + +/* EMI Registers */ +#define I82579_EMI_ADDR 0x10 +#define I82579_EMI_DATA 0x11 +#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */ + +/* Strapping Option Register - RO */ +#define E1000_STRAP 0x0000C +#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000 +#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17 + +/* OEM Bits Phy Register */ +#define HV_OEM_BITS PHY_REG(768, 25) +#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */ +#define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */ +#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */ + +#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */ +#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */ + +/* KMRN Mode Control */ +#define HV_KMRN_MODE_CTRL PHY_REG(769, 16) +#define HV_KMRN_MDIO_SLOW 0x0400 + ++/* KMRN FIFO Control and Status */ ++#define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16) ++#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000 ++#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12 ++ +/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ +/* Offset 04h HSFSTS */ +union ich8_hws_flash_status { + struct ich8_hsfsts { + u16 flcdone :1; /* bit 0 Flash Cycle Done */ + u16 flcerr :1; /* bit 1 Flash Cycle Error */ + u16 dael :1; /* bit 2 Direct Access error Log */ + u16 berasesz :2; /* bit 4:3 Sector Erase Size */ + u16 flcinprog :1; /* bit 5 flash cycle in Progress */ + u16 reserved1 :2; /* bit 13:6 Reserved */ + u16 reserved2 :6; /* bit 13:6 Reserved */ + u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */ + u16 flockdn :1; /* bit 15 Flash Config Lock-Down */ + } hsf_status; + u16 regval; +}; + +/* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */ +/* Offset 06h FLCTL */ +union ich8_hws_flash_ctrl { + struct ich8_hsflctl { + u16 flcgo :1; /* 0 Flash Cycle Go */ + u16 flcycle :2; /* 2:1 Flash Cycle */ + u16 reserved :5; /* 7:3 Reserved */ + u16 fldbcount :2; /* 9:8 Flash Data Byte Count */ + u16 flockdn :6; /* 15:10 Reserved */ + } hsf_ctrl; + u16 regval; +}; + +/* ICH Flash Region Access Permissions */ +union ich8_hws_flash_regacc { + struct ich8_flracc { + u32 grra :8; /* 0:7 GbE region Read Access */ + u32 grwa :8; /* 8:15 GbE region Write Access */ + u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */ + u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */ + } hsf_flregacc; + u16 regval; +}; + +/* ICH Flash Protected Region */ +union ich8_flash_protected_range { + struct ich8_pr { + u32 base:13; /* 0:12 Protected Range Base */ + u32 reserved1:2; /* 13:14 Reserved */ + u32 rpe:1; /* 15 Read Protection Enable */ + u32 limit:13; /* 16:28 Protected Range Limit */ + u32 reserved2:2; /* 29:30 Reserved */ + u32 wpe:1; /* 31 Write Protection Enable */ + } range; + u32 regval; +}; + +static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw); +static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); +static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); +static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank); +static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, + u32 offset, u8 byte); +static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, + u8 *data); +static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, + u16 *data); +static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16 *data); +static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw); +static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); +static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw); +static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw); +static s32 e1000_led_on_ich8lan(struct e1000_hw *hw); +static s32 e1000_led_off_ich8lan(struct e1000_hw *hw); +static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw); +static s32 e1000_setup_led_pchlan(struct e1000_hw *hw); +static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw); +static s32 e1000_led_on_pchlan(struct e1000_hw *hw); +static s32 e1000_led_off_pchlan(struct e1000_hw *hw); +static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active); +static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw); +static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw); +static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); +static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); +static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); +static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); +static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); +static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); + +static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) +{ + return readw(hw->flash_address + reg); +} + +static inline u32 __er32flash(struct e1000_hw *hw, unsigned long reg) +{ + return readl(hw->flash_address + reg); +} + +static inline void __ew16flash(struct e1000_hw *hw, unsigned long reg, u16 val) +{ + writew(val, hw->flash_address + reg); +} + +static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val) +{ + writel(val, hw->flash_address + reg); +} + +#define er16flash(reg) __er16flash(hw, (reg)) +#define er32flash(reg) __er32flash(hw, (reg)) +#define ew16flash(reg,val) __ew16flash(hw, (reg), (val)) +#define ew32flash(reg,val) __ew32flash(hw, (reg), (val)) + +static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw) +{ + u32 ctrl; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; + ctrl &= ~E1000_CTRL_LANPHYPC_VALUE; + ew32(CTRL, ctrl); + e1e_flush(); + udelay(10); + ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE; + ew32(CTRL, ctrl); +} + +/** + * e1000_init_phy_params_pchlan - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific PHY parameters and function pointers. + **/ +static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 fwsm; + s32 ret_val = 0; + + phy->addr = 1; + phy->reset_delay_us = 100; + + phy->ops.set_page = e1000_set_page_igp; + phy->ops.read_reg = e1000_read_phy_reg_hv; + phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked; + phy->ops.read_reg_page = e1000_read_phy_reg_page_hv; + phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; + phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; + phy->ops.write_reg = e1000_write_phy_reg_hv; + phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked; + phy->ops.write_reg_page = e1000_write_phy_reg_page_hv; + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + + /* + * The MAC-PHY interconnect may still be in SMBus mode + * after Sx->S0. If the manageability engine (ME) is + * disabled, then toggle the LANPHYPC Value bit to force + * the interconnect to PCIe mode. + */ + fwsm = er32(FWSM); + if (!(fwsm & E1000_ICH_FWSM_FW_VALID) && !e1000_check_reset_block(hw)) { + e1000_toggle_lanphypc_value_ich8lan(hw); + msleep(50); + + /* + * Gate automatic PHY configuration by hardware on + * non-managed 82579 + */ + if (hw->mac.type == e1000_pch2lan) + e1000_gate_hw_phy_config_ich8lan(hw, true); + } + + /* + * Reset the PHY before any access to it. Doing so, ensures that + * the PHY is in a known good state before we read/write PHY registers. + * The generic reset is sufficient here, because we haven't determined + * the PHY type yet. + */ + ret_val = e1000e_phy_hw_reset_generic(hw); + if (ret_val) + goto out; + + /* Ungate automatic PHY configuration on non-managed 82579 */ + if ((hw->mac.type == e1000_pch2lan) && + !(fwsm & E1000_ICH_FWSM_FW_VALID)) { + usleep_range(10000, 20000); + e1000_gate_hw_phy_config_ich8lan(hw, false); + } + + phy->id = e1000_phy_unknown; + switch (hw->mac.type) { + default: + ret_val = e1000e_get_phy_id(hw); + if (ret_val) + goto out; + if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK)) + break; + /* fall-through */ + case e1000_pch2lan: + /* + * In case the PHY needs to be in mdio slow mode, + * set slow mode and try to get the PHY id again. + */ + ret_val = e1000_set_mdio_slow_mode_hv(hw); + if (ret_val) + goto out; + ret_val = e1000e_get_phy_id(hw); + if (ret_val) + goto out; + break; + } + phy->type = e1000e_get_phy_type_from_id(phy->id); + + switch (phy->type) { + case e1000_phy_82577: + case e1000_phy_82579: + phy->ops.check_polarity = e1000_check_polarity_82577; + phy->ops.force_speed_duplex = + e1000_phy_force_speed_duplex_82577; + phy->ops.get_cable_length = e1000_get_cable_length_82577; + phy->ops.get_info = e1000_get_phy_info_82577; + phy->ops.commit = e1000e_phy_sw_reset; + break; + case e1000_phy_82578: + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88; + phy->ops.get_cable_length = e1000e_get_cable_length_m88; + phy->ops.get_info = e1000e_get_phy_info_m88; + break; + default: + ret_val = -E1000_ERR_PHY; + break; + } + +out: + return ret_val; +} + +/** + * e1000_init_phy_params_ich8lan - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific PHY parameters and function pointers. + **/ +static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 i = 0; + + phy->addr = 1; + phy->reset_delay_us = 100; + + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; + + /* + * We may need to do this twice - once for IGP and if that fails, + * we'll set BM func pointers and try again + */ + ret_val = e1000e_determine_phy_address(hw); + if (ret_val) { + phy->ops.write_reg = e1000e_write_phy_reg_bm; + phy->ops.read_reg = e1000e_read_phy_reg_bm; + ret_val = e1000e_determine_phy_address(hw); + if (ret_val) { + e_dbg("Cannot determine PHY addr. Erroring out\n"); + return ret_val; + } + } + + phy->id = 0; + while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) && + (i++ < 100)) { + usleep_range(1000, 2000); + ret_val = e1000e_get_phy_id(hw); + if (ret_val) + return ret_val; + } + + /* Verify phy id */ + switch (phy->id) { + case IGP03E1000_E_PHY_ID: + phy->type = e1000_phy_igp_3; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked; + phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked; + phy->ops.get_info = e1000e_get_phy_info_igp; + phy->ops.check_polarity = e1000_check_polarity_igp; + phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_igp; + break; + case IFE_E_PHY_ID: + case IFE_PLUS_E_PHY_ID: + case IFE_C_E_PHY_ID: + phy->type = e1000_phy_ife; + phy->autoneg_mask = E1000_ALL_NOT_GIG; + phy->ops.get_info = e1000_get_phy_info_ife; + phy->ops.check_polarity = e1000_check_polarity_ife; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife; + break; + case BME1000_E_PHY_ID: + phy->type = e1000_phy_bm; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->ops.read_reg = e1000e_read_phy_reg_bm; + phy->ops.write_reg = e1000e_write_phy_reg_bm; + phy->ops.commit = e1000e_phy_sw_reset; + phy->ops.get_info = e1000e_get_phy_info_m88; + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88; + break; + default: + return -E1000_ERR_PHY; + break; + } + + return 0; +} + +/** + * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific NVM parameters and function + * pointers. + **/ +static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 gfpreg, sector_base_addr, sector_end_addr; + u16 i; + + /* Can't read flash registers if the register set isn't mapped. */ + if (!hw->flash_address) { + e_dbg("ERROR: Flash registers not mapped\n"); + return -E1000_ERR_CONFIG; + } + + nvm->type = e1000_nvm_flash_sw; + + gfpreg = er32flash(ICH_FLASH_GFPREG); + + /* + * sector_X_addr is a "sector"-aligned address (4096 bytes) + * Add 1 to sector_end_addr since this sector is included in + * the overall size. + */ + sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; + sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; + + /* flash_base_addr is byte-aligned */ + nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT; + + /* + * find total size of the NVM, then cut in half since the total + * size represents two separate NVM banks. + */ + nvm->flash_bank_size = (sector_end_addr - sector_base_addr) + << FLASH_SECTOR_ADDR_SHIFT; + nvm->flash_bank_size /= 2; + /* Adjust to word count */ + nvm->flash_bank_size /= sizeof(u16); + + nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS; + + /* Clear shadow ram */ + for (i = 0; i < nvm->word_size; i++) { + dev_spec->shadow_ram[i].modified = false; + dev_spec->shadow_ram[i].value = 0xFFFF; + } + + return 0; +} + +/** + * e1000_init_mac_params_ich8lan - Initialize MAC function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific MAC parameters and function + * pointers. + **/ +static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &hw->mac; + + /* Set media type function pointer */ + hw->phy.media_type = e1000_media_type_copper; + + /* Set mta register count */ + mac->mta_reg_count = 32; + /* Set rar entry count */ + mac->rar_entry_count = E1000_ICH_RAR_ENTRIES; + if (mac->type == e1000_ich8lan) + mac->rar_entry_count--; + /* FWSM register */ + mac->has_fwsm = true; + /* ARC subsystem not supported */ + mac->arc_subsystem_valid = false; + /* Adaptive IFS supported */ + mac->adaptive_ifs = true; + + /* LED operations */ + switch (mac->type) { + case e1000_ich8lan: + case e1000_ich9lan: + case e1000_ich10lan: + /* check management mode */ + mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan; + /* ID LED init */ + mac->ops.id_led_init = e1000e_id_led_init; + /* blink LED */ + mac->ops.blink_led = e1000e_blink_led_generic; + /* setup LED */ + mac->ops.setup_led = e1000e_setup_led_generic; + /* cleanup LED */ + mac->ops.cleanup_led = e1000_cleanup_led_ich8lan; + /* turn on/off LED */ + mac->ops.led_on = e1000_led_on_ich8lan; + mac->ops.led_off = e1000_led_off_ich8lan; + break; + case e1000_pchlan: + case e1000_pch2lan: + /* check management mode */ + mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; + /* ID LED init */ + mac->ops.id_led_init = e1000_id_led_init_pchlan; + /* setup LED */ + mac->ops.setup_led = e1000_setup_led_pchlan; + /* cleanup LED */ + mac->ops.cleanup_led = e1000_cleanup_led_pchlan; + /* turn on/off LED */ + mac->ops.led_on = e1000_led_on_pchlan; + mac->ops.led_off = e1000_led_off_pchlan; + break; + default: + break; + } + + /* Enable PCS Lock-loss workaround for ICH8 */ + if (mac->type == e1000_ich8lan) + e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); + + /* Gate automatic PHY configuration by hardware on managed 82579 */ + if ((mac->type == e1000_pch2lan) && + (er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) + e1000_gate_hw_phy_config_ich8lan(hw, true); + + return 0; +} + +/** + * e1000_set_eee_pchlan - Enable/disable EEE support + * @hw: pointer to the HW structure + * + * Enable/disable EEE based on setting in dev_spec structure. The bits in + * the LPI Control register will remain set only if/when link is up. + **/ +static s32 e1000_set_eee_pchlan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 phy_reg; + + if (hw->phy.type != e1000_phy_82579) + goto out; + + ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg); + if (ret_val) + goto out; + + if (hw->dev_spec.ich8lan.eee_disable) + phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK; + else + phy_reg |= I82579_LPI_CTRL_ENABLE_MASK; + + ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg); +out: + return ret_val; +} + +/** + * e1000_check_for_copper_link_ich8lan - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + **/ +static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; ++ u16 phy_reg; + + /* + * We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) { + ret_val = 0; + goto out; + } + + /* + * First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (hw->mac.type == e1000_pchlan) { + ret_val = e1000_k1_gig_workaround_hv(hw, link); + if (ret_val) + goto out; + } + + if (!link) + goto out; /* No link detected */ + + mac->get_link_status = false; + - if (hw->phy.type == e1000_phy_82578) { - ret_val = e1000_link_stall_workaround_hv(hw); - if (ret_val) - goto out; - } - - if (hw->mac.type == e1000_pch2lan) { ++ switch (hw->mac.type) { ++ case e1000_pch2lan: + ret_val = e1000_k1_workaround_lv(hw); + if (ret_val) + goto out; ++ /* fall-thru */ ++ case e1000_pchlan: ++ if (hw->phy.type == e1000_phy_82578) { ++ ret_val = e1000_link_stall_workaround_hv(hw); ++ if (ret_val) ++ goto out; ++ } ++ ++ /* ++ * Workaround for PCHx parts in half-duplex: ++ * Set the number of preambles removed from the packet ++ * when it is passed from the PHY to the MAC to prevent ++ * the MAC from misinterpreting the packet type. ++ */ ++ e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg); ++ phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK; ++ ++ if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD) ++ phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT); ++ ++ e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg); ++ break; ++ default: ++ break; + } + + /* + * Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000e_check_downshift(hw); + + /* Enable/Disable EEE after link up */ + ret_val = e1000_set_eee_pchlan(hw); + if (ret_val) + goto out; + + /* + * If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + /* + * Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + e1000e_config_collision_dist(hw); + + /* + * Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) + e_dbg("Error configuring flow control\n"); + +out: + return ret_val; +} + +static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + s32 rc; + + rc = e1000_init_mac_params_ich8lan(adapter); + if (rc) + return rc; + + rc = e1000_init_nvm_params_ich8lan(hw); + if (rc) + return rc; + + switch (hw->mac.type) { + case e1000_ich8lan: + case e1000_ich9lan: + case e1000_ich10lan: + rc = e1000_init_phy_params_ich8lan(hw); + break; + case e1000_pchlan: + case e1000_pch2lan: + rc = e1000_init_phy_params_pchlan(hw); + break; + default: + break; + } + if (rc) + return rc; + + /* + * Disable Jumbo Frame support on parts with Intel 10/100 PHY or + * on parts with MACsec enabled in NVM (reflected in CTRL_EXT). + */ + if ((adapter->hw.phy.type == e1000_phy_ife) || + ((adapter->hw.mac.type >= e1000_pch2lan) && + (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) { + adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES; + adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN; + + hw->mac.ops.blink_led = NULL; + } + + if ((adapter->hw.mac.type == e1000_ich8lan) && + (adapter->hw.phy.type == e1000_phy_igp_3)) + adapter->flags |= FLAG_LSC_GIG_SPEED_DROP; + ++ /* Enable workaround for 82579 w/ ME enabled */ ++ if ((adapter->hw.mac.type == e1000_pch2lan) && ++ (er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) ++ adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA; ++ + /* Disable EEE by default until IEEE802.3az spec is finalized */ + if (adapter->flags2 & FLAG2_HAS_EEE) + adapter->hw.dev_spec.ich8lan.eee_disable = true; + + return 0; +} + +static DEFINE_MUTEX(nvm_mutex); + +/** + * e1000_acquire_nvm_ich8lan - Acquire NVM mutex + * @hw: pointer to the HW structure + * + * Acquires the mutex for performing NVM operations. + **/ +static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw) +{ + mutex_lock(&nvm_mutex); + + return 0; +} + +/** + * e1000_release_nvm_ich8lan - Release NVM mutex + * @hw: pointer to the HW structure + * + * Releases the mutex used while performing NVM operations. + **/ +static void e1000_release_nvm_ich8lan(struct e1000_hw *hw) +{ + mutex_unlock(&nvm_mutex); +} + +static DEFINE_MUTEX(swflag_mutex); + +/** + * e1000_acquire_swflag_ich8lan - Acquire software control flag + * @hw: pointer to the HW structure + * + * Acquires the software control flag for performing PHY and select + * MAC CSR accesses. + **/ +static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) +{ + u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT; + s32 ret_val = 0; + + mutex_lock(&swflag_mutex); + + while (timeout) { + extcnf_ctrl = er32(EXTCNF_CTRL); + if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) + break; + + mdelay(1); + timeout--; + } + + if (!timeout) { + e_dbg("SW/FW/HW has locked the resource for too long.\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + timeout = SW_FLAG_TIMEOUT; + + extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; + ew32(EXTCNF_CTRL, extcnf_ctrl); + + while (timeout) { + extcnf_ctrl = er32(EXTCNF_CTRL); + if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) + break; + + mdelay(1); + timeout--; + } + + if (!timeout) { + e_dbg("Failed to acquire the semaphore.\n"); + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; + ew32(EXTCNF_CTRL, extcnf_ctrl); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + +out: + if (ret_val) + mutex_unlock(&swflag_mutex); + + return ret_val; +} + +/** + * e1000_release_swflag_ich8lan - Release software control flag + * @hw: pointer to the HW structure + * + * Releases the software control flag for performing PHY and select + * MAC CSR accesses. + **/ +static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) +{ + u32 extcnf_ctrl; + + extcnf_ctrl = er32(EXTCNF_CTRL); + + if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) { + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; + ew32(EXTCNF_CTRL, extcnf_ctrl); + } else { + e_dbg("Semaphore unexpectedly released by sw/fw/hw\n"); + } + + mutex_unlock(&swflag_mutex); +} + +/** + * e1000_check_mng_mode_ich8lan - Checks management mode + * @hw: pointer to the HW structure + * + * This checks if the adapter has any manageability enabled. + * This is a function pointer entry point only called by read/write + * routines for the PHY and NVM parts. + **/ +static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw) +{ + u32 fwsm; + + fwsm = er32(FWSM); + return (fwsm & E1000_ICH_FWSM_FW_VALID) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); +} + +/** + * e1000_check_mng_mode_pchlan - Checks management mode + * @hw: pointer to the HW structure + * + * This checks if the adapter has iAMT enabled. + * This is a function pointer entry point only called by read/write + * routines for the PHY and NVM parts. + **/ +static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw) +{ + u32 fwsm; + + fwsm = er32(FWSM); + return (fwsm & E1000_ICH_FWSM_FW_VALID) && + (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); +} + +/** + * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Checks if firmware is blocking the reset of the PHY. + * This is a function pointer entry point only called by + * reset routines. + **/ +static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) +{ + u32 fwsm; + + fwsm = er32(FWSM); + + return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? 0 : E1000_BLK_PHY_RESET; +} + +/** + * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states + * @hw: pointer to the HW structure + * + * Assumes semaphore already acquired. + * + **/ +static s32 e1000_write_smbus_addr(struct e1000_hw *hw) +{ + u16 phy_data; + u32 strap = er32(STRAP); + s32 ret_val = 0; + + strap &= E1000_STRAP_SMBUS_ADDRESS_MASK; + + ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data); + if (ret_val) + goto out; + + phy_data &= ~HV_SMB_ADDR_MASK; + phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT); + phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; + ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data); + +out: + return ret_val; +} + +/** + * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration + * @hw: pointer to the HW structure + * + * SW should configure the LCD from the NVM extended configuration region + * as a workaround for certain parts. + **/ +static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; + s32 ret_val = 0; + u16 word_addr, reg_data, reg_addr, phy_page = 0; + + /* + * Initialize the PHY from the NVM on ICH platforms. This + * is needed due to an issue where the NVM configuration is + * not properly autoloaded after power transitions. + * Therefore, after each PHY reset, we will load the + * configuration data out of the NVM manually. + */ + switch (hw->mac.type) { + case e1000_ich8lan: + if (phy->type != e1000_phy_igp_3) + return ret_val; + + if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) || + (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) { + sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; + break; + } + /* Fall-thru */ + case e1000_pchlan: + case e1000_pch2lan: + sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; + break; + default: + return ret_val; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + data = er32(FEXTNVM); + if (!(data & sw_cfg_mask)) + goto out; + + /* + * Make sure HW does not configure LCD from PHY + * extended configuration before SW configuration + */ + data = er32(EXTCNF_CTRL); + if (!(hw->mac.type == e1000_pch2lan)) { + if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) + goto out; + } + + cnf_size = er32(EXTCNF_SIZE); + cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; + cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT; + if (!cnf_size) + goto out; + + cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; + cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; + + if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) && + (hw->mac.type == e1000_pchlan)) || + (hw->mac.type == e1000_pch2lan)) { + /* + * HW configures the SMBus address and LEDs when the + * OEM and LCD Write Enable bits are set in the NVM. + * When both NVM bits are cleared, SW will configure + * them instead. + */ + ret_val = e1000_write_smbus_addr(hw); + if (ret_val) + goto out; + + data = er32(LEDCTL); + ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG, + (u16)data); + if (ret_val) + goto out; + } + + /* Configure LCD from extended configuration region. */ + + /* cnf_base_addr is in DWORD */ + word_addr = (u16)(cnf_base_addr << 1); + + for (i = 0; i < cnf_size; i++) { + ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, + ®_data); + if (ret_val) + goto out; + + ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1), + 1, ®_addr); + if (ret_val) + goto out; + + /* Save off the PHY page for future writes. */ + if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) { + phy_page = reg_data; + continue; + } + + reg_addr &= PHY_REG_MASK; + reg_addr |= phy_page; + + ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr, + reg_data); + if (ret_val) + goto out; + } + +out: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000_k1_gig_workaround_hv - K1 Si workaround + * @hw: pointer to the HW structure + * @link: link up bool flag + * + * If K1 is enabled for 1Gbps, the MAC might stall when transitioning + * from a lower speed. This workaround disables K1 whenever link is at 1Gig + * If link is down, the function will restore the default K1 setting located + * in the NVM. + **/ +static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) +{ + s32 ret_val = 0; + u16 status_reg = 0; + bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled; + + if (hw->mac.type != e1000_pchlan) + goto out; + + /* Wrap the whole flow with the sw flag */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ + if (link) { + if (hw->phy.type == e1000_phy_82578) { + ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS, + &status_reg); + if (ret_val) + goto release; + + status_reg &= BM_CS_STATUS_LINK_UP | + BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_MASK; + + if (status_reg == (BM_CS_STATUS_LINK_UP | + BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_1000)) + k1_enable = false; + } + + if (hw->phy.type == e1000_phy_82577) { + ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS, + &status_reg); + if (ret_val) + goto release; + + status_reg &= HV_M_STATUS_LINK_UP | + HV_M_STATUS_AUTONEG_COMPLETE | + HV_M_STATUS_SPEED_MASK; + + if (status_reg == (HV_M_STATUS_LINK_UP | + HV_M_STATUS_AUTONEG_COMPLETE | + HV_M_STATUS_SPEED_1000)) + k1_enable = false; + } + + /* Link stall fix for link up */ + ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), + 0x0100); + if (ret_val) + goto release; + + } else { + /* Link stall fix for link down */ + ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), + 0x4100); + if (ret_val) + goto release; + } + + ret_val = e1000_configure_k1_ich8lan(hw, k1_enable); + +release: + hw->phy.ops.release(hw); +out: + return ret_val; +} + +/** + * e1000_configure_k1_ich8lan - Configure K1 power state + * @hw: pointer to the HW structure + * @enable: K1 state to configure + * + * Configure the K1 power state based on the provided parameter. + * Assumes semaphore already acquired. + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + **/ +s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) +{ + s32 ret_val = 0; + u32 ctrl_reg = 0; + u32 ctrl_ext = 0; + u32 reg = 0; + u16 kmrn_reg = 0; + + ret_val = e1000e_read_kmrn_reg_locked(hw, + E1000_KMRNCTRLSTA_K1_CONFIG, + &kmrn_reg); + if (ret_val) + goto out; + + if (k1_enable) + kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE; + else + kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE; + + ret_val = e1000e_write_kmrn_reg_locked(hw, + E1000_KMRNCTRLSTA_K1_CONFIG, + kmrn_reg); + if (ret_val) + goto out; + + udelay(20); + ctrl_ext = er32(CTRL_EXT); + ctrl_reg = er32(CTRL); + + reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + reg |= E1000_CTRL_FRCSPD; + ew32(CTRL, reg); + + ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS); + e1e_flush(); + udelay(20); + ew32(CTRL, ctrl_reg); + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); + udelay(20); + +out: + return ret_val; +} + +/** + * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration + * @hw: pointer to the HW structure + * @d0_state: boolean if entering d0 or d3 device state + * + * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are + * collectively called OEM bits. The OEM Write Enable bit and SW Config bit + * in NVM determines whether HW should configure LPLU and Gbe Disable. + **/ +static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) +{ + s32 ret_val = 0; + u32 mac_reg; + u16 oem_reg; + + if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan)) + return ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + if (!(hw->mac.type == e1000_pch2lan)) { + mac_reg = er32(EXTCNF_CTRL); + if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) + goto out; + } + + mac_reg = er32(FEXTNVM); + if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M)) + goto out; + + mac_reg = er32(PHY_CTRL); + + ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg); + if (ret_val) + goto out; + + oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU); + + if (d0_state) { + if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE) + oem_reg |= HV_OEM_BITS_GBE_DIS; + + if (mac_reg & E1000_PHY_CTRL_D0A_LPLU) + oem_reg |= HV_OEM_BITS_LPLU; + } else { + if (mac_reg & E1000_PHY_CTRL_NOND0A_GBE_DISABLE) + oem_reg |= HV_OEM_BITS_GBE_DIS; + + if (mac_reg & E1000_PHY_CTRL_NOND0A_LPLU) + oem_reg |= HV_OEM_BITS_LPLU; + } + /* Restart auto-neg to activate the bits */ + if (!e1000_check_reset_block(hw)) + oem_reg |= HV_OEM_BITS_RESTART_AN; + ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg); + +out: + hw->phy.ops.release(hw); + + return ret_val; +} + + +/** + * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode + * @hw: pointer to the HW structure + **/ +static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw) +{ + s32 ret_val; + u16 data; + + ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data); + if (ret_val) + return ret_val; + + data |= HV_KMRN_MDIO_SLOW; + + ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data); + + return ret_val; +} + +/** + * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be + * done after every PHY reset. + **/ +static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 phy_data; + + if (hw->mac.type != e1000_pchlan) + return ret_val; + + /* Set MDIO slow mode before any other MDIO access */ + if (hw->phy.type == e1000_phy_82577) { + ret_val = e1000_set_mdio_slow_mode_hv(hw); + if (ret_val) + goto out; + } + + if (((hw->phy.type == e1000_phy_82577) && + ((hw->phy.revision == 1) || (hw->phy.revision == 2))) || + ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) { + /* Disable generation of early preamble */ + ret_val = e1e_wphy(hw, PHY_REG(769, 25), 0x4431); + if (ret_val) + return ret_val; + + /* Preamble tuning for SSC */ - ret_val = e1e_wphy(hw, PHY_REG(770, 16), 0xA204); ++ ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204); + if (ret_val) + return ret_val; + } + + if (hw->phy.type == e1000_phy_82578) { + /* + * Return registers to default by doing a soft reset then + * writing 0x3140 to the control register. + */ + if (hw->phy.revision < 2) { + e1000e_phy_sw_reset(hw); + ret_val = e1e_wphy(hw, PHY_CONTROL, 0x3140); + } + } + + /* Select page 0 */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + hw->phy.addr = 1; + ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); + hw->phy.ops.release(hw); + if (ret_val) + goto out; + + /* + * Configure the K1 Si workaround during phy reset assuming there is + * link so that it disables K1 if link is in 1Gbps. + */ + ret_val = e1000_k1_gig_workaround_hv(hw, true); + if (ret_val) + goto out; + + /* Workaround for link disconnects on a busy hub in half duplex */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data); + if (ret_val) + goto release; + ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG, + phy_data & 0x00FF); +release: + hw->phy.ops.release(hw); +out: + return ret_val; +} + +/** + * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY + * @hw: pointer to the HW structure + **/ +void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw) +{ + u32 mac_reg; + u16 i, phy_reg = 0; + s32 ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return; + ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); + if (ret_val) + goto release; + + /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */ + for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { + mac_reg = er32(RAL(i)); + hw->phy.ops.write_reg_page(hw, BM_RAR_L(i), + (u16)(mac_reg & 0xFFFF)); + hw->phy.ops.write_reg_page(hw, BM_RAR_M(i), + (u16)((mac_reg >> 16) & 0xFFFF)); + + mac_reg = er32(RAH(i)); + hw->phy.ops.write_reg_page(hw, BM_RAR_H(i), + (u16)(mac_reg & 0xFFFF)); + hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i), + (u16)((mac_reg & E1000_RAH_AV) + >> 16)); + } + + e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); + +release: + hw->phy.ops.release(hw); +} + +/** + * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation + * with 82579 PHY + * @hw: pointer to the HW structure + * @enable: flag to enable/disable workaround when enabling/disabling jumbos + **/ +s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) +{ + s32 ret_val = 0; + u16 phy_reg, data; + u32 mac_reg; + u16 i; + + if (hw->mac.type != e1000_pch2lan) + goto out; + + /* disable Rx path while enabling/disabling workaround */ + e1e_rphy(hw, PHY_REG(769, 20), &phy_reg); + ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14)); + if (ret_val) + goto out; + + if (enable) { + /* + * Write Rx addresses (rar_entry_count for RAL/H, +4 for + * SHRAL/H) and initial CRC values to the MAC + */ + for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { + u8 mac_addr[ETH_ALEN] = {0}; + u32 addr_high, addr_low; + + addr_high = er32(RAH(i)); + if (!(addr_high & E1000_RAH_AV)) + continue; + addr_low = er32(RAL(i)); + mac_addr[0] = (addr_low & 0xFF); + mac_addr[1] = ((addr_low >> 8) & 0xFF); + mac_addr[2] = ((addr_low >> 16) & 0xFF); + mac_addr[3] = ((addr_low >> 24) & 0xFF); + mac_addr[4] = (addr_high & 0xFF); + mac_addr[5] = ((addr_high >> 8) & 0xFF); + + ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr)); + } + + /* Write Rx addresses to the PHY */ + e1000_copy_rx_addrs_to_phy_ich8lan(hw); + + /* Enable jumbo frame workaround in the MAC */ + mac_reg = er32(FFLT_DBG); + mac_reg &= ~(1 << 14); + mac_reg |= (7 << 15); + ew32(FFLT_DBG, mac_reg); + + mac_reg = er32(RCTL); + mac_reg |= E1000_RCTL_SECRC; + ew32(RCTL, mac_reg); + + ret_val = e1000e_read_kmrn_reg(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + &data); + if (ret_val) + goto out; + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + data | (1 << 0)); + if (ret_val) + goto out; + ret_val = e1000e_read_kmrn_reg(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + &data); + if (ret_val) + goto out; + data &= ~(0xF << 8); + data |= (0xB << 8); + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + data); + if (ret_val) + goto out; + + /* Enable jumbo frame workaround in the PHY */ + e1e_rphy(hw, PHY_REG(769, 23), &data); + data &= ~(0x7F << 5); + data |= (0x37 << 5); + ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); + if (ret_val) + goto out; + e1e_rphy(hw, PHY_REG(769, 16), &data); + data &= ~(1 << 13); + ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); + if (ret_val) + goto out; + e1e_rphy(hw, PHY_REG(776, 20), &data); + data &= ~(0x3FF << 2); + data |= (0x1A << 2); + ret_val = e1e_wphy(hw, PHY_REG(776, 20), data); + if (ret_val) + goto out; + ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xFE00); + if (ret_val) + goto out; + e1e_rphy(hw, HV_PM_CTRL, &data); + ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10)); + if (ret_val) + goto out; + } else { + /* Write MAC register values back to h/w defaults */ + mac_reg = er32(FFLT_DBG); + mac_reg &= ~(0xF << 14); + ew32(FFLT_DBG, mac_reg); + + mac_reg = er32(RCTL); + mac_reg &= ~E1000_RCTL_SECRC; + ew32(RCTL, mac_reg); + + ret_val = e1000e_read_kmrn_reg(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + &data); + if (ret_val) + goto out; + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + data & ~(1 << 0)); + if (ret_val) + goto out; + ret_val = e1000e_read_kmrn_reg(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + &data); + if (ret_val) + goto out; + data &= ~(0xF << 8); + data |= (0xB << 8); + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + data); + if (ret_val) + goto out; + + /* Write PHY register values back to h/w defaults */ + e1e_rphy(hw, PHY_REG(769, 23), &data); + data &= ~(0x7F << 5); + ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); + if (ret_val) + goto out; + e1e_rphy(hw, PHY_REG(769, 16), &data); + data |= (1 << 13); + ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); + if (ret_val) + goto out; + e1e_rphy(hw, PHY_REG(776, 20), &data); + data &= ~(0x3FF << 2); + data |= (0x8 << 2); + ret_val = e1e_wphy(hw, PHY_REG(776, 20), data); + if (ret_val) + goto out; + ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00); + if (ret_val) + goto out; + e1e_rphy(hw, HV_PM_CTRL, &data); + ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10)); + if (ret_val) + goto out; + } + + /* re-enable Rx path after enabling/disabling workaround */ + ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14)); + +out: + return ret_val; +} + +/** + * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be + * done after every PHY reset. + **/ +static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + if (hw->mac.type != e1000_pch2lan) + goto out; + + /* Set MDIO slow mode before any other MDIO access */ + ret_val = e1000_set_mdio_slow_mode_hv(hw); + +out: + return ret_val; +} + +/** + * e1000_k1_gig_workaround_lv - K1 Si workaround + * @hw: pointer to the HW structure + * + * Workaround to set the K1 beacon duration for 82579 parts + **/ +static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 status_reg = 0; + u32 mac_reg; ++ u16 phy_reg; + + if (hw->mac.type != e1000_pch2lan) + goto out; + + /* Set K1 beacon duration based on 1Gbps speed or otherwise */ + ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg); + if (ret_val) + goto out; + + if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) + == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { + mac_reg = er32(FEXTNVM4); + mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; + - if (status_reg & HV_M_STATUS_SPEED_1000) ++ ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg); ++ if (ret_val) ++ goto out; ++ ++ if (status_reg & HV_M_STATUS_SPEED_1000) { + mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; - else ++ phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; ++ } else { + mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; - ++ phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; ++ } + ew32(FEXTNVM4, mac_reg); ++ ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg); + } + +out: + return ret_val; +} + +/** + * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware + * @hw: pointer to the HW structure + * @gate: boolean set to true to gate, false to ungate + * + * Gate/ungate the automatic PHY configuration via hardware; perform + * the configuration via software instead. + **/ +static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate) +{ + u32 extcnf_ctrl; + + if (hw->mac.type != e1000_pch2lan) + return; + + extcnf_ctrl = er32(EXTCNF_CTRL); + + if (gate) + extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; + else + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG; + + ew32(EXTCNF_CTRL, extcnf_ctrl); + return; +} + +/** + * e1000_lan_init_done_ich8lan - Check for PHY config completion + * @hw: pointer to the HW structure + * + * Check the appropriate indication the MAC has finished configuring the + * PHY after a software reset. + **/ +static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw) +{ + u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT; + + /* Wait for basic configuration completes before proceeding */ + do { + data = er32(STATUS); + data &= E1000_STATUS_LAN_INIT_DONE; + udelay(100); + } while ((!data) && --loop); + + /* + * If basic configuration is incomplete before the above loop + * count reaches 0, loading the configuration from NVM will + * leave the PHY in a bad state possibly resulting in no link. + */ + if (loop == 0) + e_dbg("LAN_INIT_DONE not set, increase timeout\n"); + + /* Clear the Init Done bit for the next init event */ + data = er32(STATUS); + data &= ~E1000_STATUS_LAN_INIT_DONE; + ew32(STATUS, data); +} + +/** + * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset + * @hw: pointer to the HW structure + **/ +static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 reg; + + if (e1000_check_reset_block(hw)) + goto out; + + /* Allow time for h/w to get to quiescent state after reset */ + usleep_range(10000, 20000); + + /* Perform any necessary post-reset workarounds */ + switch (hw->mac.type) { + case e1000_pchlan: + ret_val = e1000_hv_phy_workarounds_ich8lan(hw); + if (ret_val) + goto out; + break; + case e1000_pch2lan: + ret_val = e1000_lv_phy_workarounds_ich8lan(hw); + if (ret_val) + goto out; + break; + default: + break; + } + + /* Clear the host wakeup bit after lcd reset */ + if (hw->mac.type >= e1000_pchlan) { + e1e_rphy(hw, BM_PORT_GEN_CFG, ®); + reg &= ~BM_WUC_HOST_WU_BIT; + e1e_wphy(hw, BM_PORT_GEN_CFG, reg); + } + + /* Configure the LCD with the extended configuration region in NVM */ + ret_val = e1000_sw_lcd_config_ich8lan(hw); + if (ret_val) + goto out; + + /* Configure the LCD with the OEM bits in NVM */ + ret_val = e1000_oem_bits_config_ich8lan(hw, true); + + if (hw->mac.type == e1000_pch2lan) { + /* Ungate automatic PHY configuration on non-managed 82579 */ + if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { + usleep_range(10000, 20000); + e1000_gate_hw_phy_config_ich8lan(hw, false); + } + + /* Set EEE LPI Update Timer to 200usec */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, + I82579_LPI_UPDATE_TIMER); + if (ret_val) + goto release; + ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, + 0x1387); +release: + hw->phy.ops.release(hw); + } + +out: + return ret_val; +} + +/** + * e1000_phy_hw_reset_ich8lan - Performs a PHY reset + * @hw: pointer to the HW structure + * + * Resets the PHY + * This is a function pointer entry point called by drivers + * or other shared routines. + **/ +static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + /* Gate automatic PHY configuration by hardware on non-managed 82579 */ + if ((hw->mac.type == e1000_pch2lan) && + !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) + e1000_gate_hw_phy_config_ich8lan(hw, true); + + ret_val = e1000e_phy_hw_reset_generic(hw); + if (ret_val) + goto out; + + ret_val = e1000_post_phy_reset_ich8lan(hw); + +out: + return ret_val; +} + +/** + * e1000_set_lplu_state_pchlan - Set Low Power Link Up state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU state according to the active flag. For PCH, if OEM write + * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set + * the phy speed. This function will manually set the LPLU bit and restart + * auto-neg as hw would do. D3 and D0 LPLU will call the same function + * since it configures the same bit. + **/ +static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active) +{ + s32 ret_val = 0; + u16 oem_reg; + + ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg); + if (ret_val) + goto out; + + if (active) + oem_reg |= HV_OEM_BITS_LPLU; + else + oem_reg &= ~HV_OEM_BITS_LPLU; + + oem_reg |= HV_OEM_BITS_RESTART_AN; + ret_val = e1e_wphy(hw, HV_OEM_BITS, oem_reg); + +out: + return ret_val; +} + +/** + * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 phy_ctrl; + s32 ret_val = 0; + u16 data; + + if (phy->type == e1000_phy_ife) + return ret_val; + + phy_ctrl = er32(PHY_CTRL); + + if (active) { + phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; + ew32(PHY_CTRL, phy_ctrl); + + if (phy->type != e1000_phy_igp_3) + return 0; + + /* + * Call gig speed drop workaround on LPLU before accessing + * any PHY registers + */ + if (hw->mac.type == e1000_ich8lan) + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); + if (ret_val) + return ret_val; + } else { + phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; + ew32(PHY_CTRL, phy_ctrl); + + if (phy->type != e1000_phy_igp_3) + return 0; + + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } + + return 0; +} + +/** + * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D3 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 phy_ctrl; + s32 ret_val; + u16 data; + + phy_ctrl = er32(PHY_CTRL); + + if (!active) { + phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; + ew32(PHY_CTRL, phy_ctrl); + + if (phy->type != e1000_phy_igp_3) + return 0; + + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; + ew32(PHY_CTRL, phy_ctrl); + + if (phy->type != e1000_phy_igp_3) + return 0; + + /* + * Call gig speed drop workaround on LPLU before accessing + * any PHY registers + */ + if (hw->mac.type == e1000_ich8lan) + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); + } + + return 0; +} + +/** + * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1 + * @hw: pointer to the HW structure + * @bank: pointer to the variable that returns the active bank + * + * Reads signature byte from the NVM using the flash access registers. + * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank. + **/ +static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) +{ + u32 eecd; + struct e1000_nvm_info *nvm = &hw->nvm; + u32 bank1_offset = nvm->flash_bank_size * sizeof(u16); + u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1; + u8 sig_byte = 0; + s32 ret_val = 0; + + switch (hw->mac.type) { + case e1000_ich8lan: + case e1000_ich9lan: + eecd = er32(EECD); + if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) == + E1000_EECD_SEC1VAL_VALID_MASK) { + if (eecd & E1000_EECD_SEC1VAL) + *bank = 1; + else + *bank = 0; + + return 0; + } + e_dbg("Unable to determine valid NVM bank via EEC - " + "reading flash signature\n"); + /* fall-thru */ + default: + /* set bank to 0 in case flash read fails */ + *bank = 0; + + /* Check bank 0 */ + ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset, + &sig_byte); + if (ret_val) + return ret_val; + if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == + E1000_ICH_NVM_SIG_VALUE) { + *bank = 0; + return 0; + } + + /* Check bank 1 */ + ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset + + bank1_offset, + &sig_byte); + if (ret_val) + return ret_val; + if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == + E1000_ICH_NVM_SIG_VALUE) { + *bank = 1; + return 0; + } + + e_dbg("ERROR: No valid NVM bank present\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000_read_nvm_ich8lan - Read word(s) from the NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the word(s) to read. + * @words: Size of data to read in words + * @data: Pointer to the word(s) to read at offset. + * + * Reads a word(s) from the NVM using the flash access registers. + **/ +static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 act_offset; + s32 ret_val = 0; + u32 bank = 0; + u16 i, word; + + if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + nvm->ops.acquire(hw); + + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val) { + e_dbg("Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } + + act_offset = (bank) ? nvm->flash_bank_size : 0; + act_offset += offset; + + ret_val = 0; + for (i = 0; i < words; i++) { + if (dev_spec->shadow_ram[offset+i].modified) { + data[i] = dev_spec->shadow_ram[offset+i].value; + } else { + ret_val = e1000_read_flash_word_ich8lan(hw, + act_offset + i, + &word); + if (ret_val) + break; + data[i] = word; + } + } + + nvm->ops.release(hw); + +out: + if (ret_val) + e_dbg("NVM read error: %d\n", ret_val); + + return ret_val; +} + +/** + * e1000_flash_cycle_init_ich8lan - Initialize flash + * @hw: pointer to the HW structure + * + * This function does initial flash setup so that a new read/write/erase cycle + * can be started. + **/ +static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) +{ + union ich8_hws_flash_status hsfsts; + s32 ret_val = -E1000_ERR_NVM; + + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + + /* Check if the flash descriptor is valid */ + if (hsfsts.hsf_status.fldesvalid == 0) { + e_dbg("Flash descriptor invalid. " + "SW Sequencing must be used.\n"); + return -E1000_ERR_NVM; + } + + /* Clear FCERR and DAEL in hw status by writing 1 */ + hsfsts.hsf_status.flcerr = 1; + hsfsts.hsf_status.dael = 1; + + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + + /* + * Either we should have a hardware SPI cycle in progress + * bit to check against, in order to start a new cycle or + * FDONE bit should be changed in the hardware so that it + * is 1 after hardware reset, which can then be used as an + * indication whether a cycle is in progress or has been + * completed. + */ + + if (hsfsts.hsf_status.flcinprog == 0) { + /* + * There is no cycle running at present, + * so we can start a cycle. + * Begin by setting Flash Cycle Done. + */ + hsfsts.hsf_status.flcdone = 1; + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + ret_val = 0; + } else { + s32 i = 0; + + /* + * Otherwise poll for sometime so the current + * cycle has a chance to end before giving up. + */ + for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { + hsfsts.regval = __er16flash(hw, ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcinprog == 0) { + ret_val = 0; + break; + } + udelay(1); + } + if (ret_val == 0) { + /* + * Successful in waiting for previous cycle to timeout, + * now set the Flash Cycle Done. + */ + hsfsts.hsf_status.flcdone = 1; + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + } else { + e_dbg("Flash controller busy, cannot get access\n"); + } + } + + return ret_val; +} + +/** + * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase) + * @hw: pointer to the HW structure + * @timeout: maximum time to wait for completion + * + * This function starts a flash cycle and waits for its completion. + **/ +static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) +{ + union ich8_hws_flash_ctrl hsflctl; + union ich8_hws_flash_status hsfsts; + s32 ret_val = -E1000_ERR_NVM; + u32 i = 0; + + /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + hsflctl.hsf_ctrl.flcgo = 1; + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + /* wait till FDONE bit is set to 1 */ + do { + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcdone == 1) + break; + udelay(1); + } while (i++ < timeout); + + if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) + return 0; + + return ret_val; +} + +/** + * e1000_read_flash_word_ich8lan - Read word from flash + * @hw: pointer to the HW structure + * @offset: offset to data location + * @data: pointer to the location for storing the data + * + * Reads the flash word at offset into data. Offset is converted + * to bytes before read. + **/ +static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, + u16 *data) +{ + /* Must convert offset into bytes. */ + offset <<= 1; + + return e1000_read_flash_data_ich8lan(hw, offset, 2, data); +} + +/** + * e1000_read_flash_byte_ich8lan - Read byte from flash + * @hw: pointer to the HW structure + * @offset: The offset of the byte to read. + * @data: Pointer to a byte to store the value read. + * + * Reads a single byte from the NVM using the flash access registers. + **/ +static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, + u8 *data) +{ + s32 ret_val; + u16 word = 0; + + ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); + if (ret_val) + return ret_val; + + *data = (u8)word; + + return 0; +} + +/** + * e1000_read_flash_data_ich8lan - Read byte or word from NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the byte or word to read. + * @size: Size of data to read, 1=byte 2=word + * @data: Pointer to the word to store the value read. + * + * Reads a byte or word from the NVM using the flash access registers. + **/ +static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16 *data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + u32 flash_data = 0; + s32 ret_val = -E1000_ERR_NVM; + u8 count = 0; + + if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + + flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr; + + do { + udelay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val != 0) + break; + + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ + hsflctl.hsf_ctrl.fldbcount = size - 1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + ret_val = e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_READ_COMMAND_TIMEOUT); + + /* + * Check if FCERR is set to 1, if set to 1, clear it + * and try the whole sequence a few more times, else + * read in (shift in) the Flash Data0, the order is + * least significant byte first msb to lsb + */ + if (ret_val == 0) { + flash_data = er32flash(ICH_FLASH_FDATA0); + if (size == 1) + *data = (u8)(flash_data & 0x000000FF); + else if (size == 2) + *data = (u16)(flash_data & 0x0000FFFF); + break; + } else { + /* + * If we've gotten here, then things are probably + * completely hosed, but if the error condition is + * detected, it won't hurt to give it another try... + * ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr == 1) { + /* Repeat for some time before giving up. */ + continue; + } else if (hsfsts.hsf_status.flcdone == 0) { + e_dbg("Timeout error - flash cycle " + "did not complete.\n"); + break; + } + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + +/** + * e1000_write_nvm_ich8lan - Write word(s) to the NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the word(s) to write. + * @words: Size of data to write in words + * @data: Pointer to the word(s) to write at offset. + * + * Writes a byte or word to the NVM using the flash access registers. + **/ +static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u16 i; + + if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + nvm->ops.acquire(hw); + + for (i = 0; i < words; i++) { + dev_spec->shadow_ram[offset+i].modified = true; + dev_spec->shadow_ram[offset+i].value = data[i]; + } + + nvm->ops.release(hw); + + return 0; +} + +/** + * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM + * @hw: pointer to the HW structure + * + * The NVM checksum is updated by calling the generic update_nvm_checksum, + * which writes the checksum to the shadow ram. The changes in the shadow + * ram are then committed to the EEPROM by processing each bank at a time + * checking for the modified bit and writing only the pending changes. + * After a successful commit, the shadow ram is cleared and is ready for + * future writes. + **/ +static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 i, act_offset, new_bank_offset, old_bank_offset, bank; + s32 ret_val; + u16 data; + + ret_val = e1000e_update_nvm_checksum_generic(hw); + if (ret_val) + goto out; + + if (nvm->type != e1000_nvm_flash_sw) + goto out; + + nvm->ops.acquire(hw); + + /* + * We're writing to the opposite bank so if we're on bank 1, + * write to bank 0 etc. We also need to erase the segment that + * is going to be written + */ + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val) { + e_dbg("Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } + + if (bank == 0) { + new_bank_offset = nvm->flash_bank_size; + old_bank_offset = 0; + ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); + if (ret_val) + goto release; + } else { + old_bank_offset = nvm->flash_bank_size; + new_bank_offset = 0; + ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); + if (ret_val) + goto release; + } + + for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { + /* + * Determine whether to write the value stored + * in the other NVM bank or a modified value stored + * in the shadow RAM + */ + if (dev_spec->shadow_ram[i].modified) { + data = dev_spec->shadow_ram[i].value; + } else { + ret_val = e1000_read_flash_word_ich8lan(hw, i + + old_bank_offset, + &data); + if (ret_val) + break; + } + + /* + * If the word is 0x13, then make sure the signature bits + * (15:14) are 11b until the commit has completed. + * This will allow us to write 10b which indicates the + * signature is valid. We want to do this after the write + * has completed so that we don't mark the segment valid + * while the write is still in progress + */ + if (i == E1000_ICH_NVM_SIG_WORD) + data |= E1000_ICH_NVM_SIG_MASK; + + /* Convert offset to bytes. */ + act_offset = (i + new_bank_offset) << 1; + + udelay(100); + /* Write the bytes to the new bank. */ + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, + act_offset, + (u8)data); + if (ret_val) + break; + + udelay(100); + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, + act_offset + 1, + (u8)(data >> 8)); + if (ret_val) + break; + } + + /* + * Don't bother writing the segment valid bits if sector + * programming failed. + */ + if (ret_val) { + /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ + e_dbg("Flash commit failed.\n"); + goto release; + } + + /* + * Finally validate the new segment by setting bit 15:14 + * to 10b in word 0x13 , this can be done without an + * erase as well since these bits are 11 to start with + * and we need to change bit 14 to 0b + */ + act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; + ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); + if (ret_val) + goto release; + + data &= 0xBFFF; + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, + act_offset * 2 + 1, + (u8)(data >> 8)); + if (ret_val) + goto release; + + /* + * And invalidate the previously valid segment by setting + * its signature word (0x13) high_byte to 0b. This can be + * done without an erase because flash erase sets all bits + * to 1's. We can write 1's to 0's without an erase + */ + act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); + if (ret_val) + goto release; + + /* Great! Everything worked, we can now clear the cached entries. */ + for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { + dev_spec->shadow_ram[i].modified = false; + dev_spec->shadow_ram[i].value = 0xFFFF; + } + +release: + nvm->ops.release(hw); + + /* + * Reload the EEPROM, or else modifications will not appear + * until after the next adapter reset. + */ + if (!ret_val) { + e1000e_reload_nvm(hw); + usleep_range(10000, 20000); + } + +out: + if (ret_val) + e_dbg("NVM update error: %d\n", ret_val); + + return ret_val; +} + +/** + * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19. + * If the bit is 0, that the EEPROM had been modified, but the checksum was not + * calculated, in which case we need to calculate the checksum and set bit 6. + **/ +static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 data; + + /* + * Read 0x19 and check bit 6. If this bit is 0, the checksum + * needs to be fixed. This bit is an indication that the NVM + * was prepared by OEM software and did not calculate the + * checksum...a likely scenario. + */ + ret_val = e1000_read_nvm(hw, 0x19, 1, &data); + if (ret_val) + return ret_val; + + if ((data & 0x40) == 0) { + data |= 0x40; + ret_val = e1000_write_nvm(hw, 0x19, 1, &data); + if (ret_val) + return ret_val; + ret_val = e1000e_update_nvm_checksum(hw); + if (ret_val) + return ret_val; + } + + return e1000e_validate_nvm_checksum_generic(hw); +} + +/** + * e1000e_write_protect_nvm_ich8lan - Make the NVM read-only + * @hw: pointer to the HW structure + * + * To prevent malicious write/erase of the NVM, set it to be read-only + * so that the hardware ignores all write/erase cycles of the NVM via + * the flash control registers. The shadow-ram copy of the NVM will + * still be updated, however any updates to this copy will not stick + * across driver reloads. + **/ +void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + union ich8_flash_protected_range pr0; + union ich8_hws_flash_status hsfsts; + u32 gfpreg; + + nvm->ops.acquire(hw); + + gfpreg = er32flash(ICH_FLASH_GFPREG); + + /* Write-protect GbE Sector of NVM */ + pr0.regval = er32flash(ICH_FLASH_PR0); + pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK; + pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK); + pr0.range.wpe = true; + ew32flash(ICH_FLASH_PR0, pr0.regval); + + /* + * Lock down a subset of GbE Flash Control Registers, e.g. + * PR0 to prevent the write-protection from being lifted. + * Once FLOCKDN is set, the registers protected by it cannot + * be written until FLOCKDN is cleared by a hardware reset. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + hsfsts.hsf_status.flockdn = true; + ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval); + + nvm->ops.release(hw); +} + +/** + * e1000_write_flash_data_ich8lan - Writes bytes to the NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the byte/word to read. + * @size: Size of data to read, 1=byte 2=word + * @data: The byte(s) to write to the NVM. + * + * Writes one/two bytes to the NVM using the flash access registers. + **/ +static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16 data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + u32 flash_data = 0; + s32 ret_val; + u8 count = 0; + + if (size < 1 || size > 2 || data > size * 0xff || + offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + + flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr; + + do { + udelay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + break; + + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ + hsflctl.hsf_ctrl.fldbcount = size -1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + if (size == 1) + flash_data = (u32)data & 0x00FF; + else + flash_data = (u32)data; + + ew32flash(ICH_FLASH_FDATA0, flash_data); + + /* + * check if FCERR is set to 1 , if set to 1, clear it + * and try the whole sequence a few more times else done + */ + ret_val = e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_WRITE_COMMAND_TIMEOUT); + if (!ret_val) + break; + + /* + * If we're here, then things are most likely + * completely hosed, but if the error condition + * is detected, it won't hurt to give it another + * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr == 1) + /* Repeat for some time before giving up. */ + continue; + if (hsfsts.hsf_status.flcdone == 0) { + e_dbg("Timeout error - flash cycle " + "did not complete."); + break; + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + +/** + * e1000_write_flash_byte_ich8lan - Write a single byte to NVM + * @hw: pointer to the HW structure + * @offset: The index of the byte to read. + * @data: The byte to write to the NVM. + * + * Writes a single byte to the NVM using the flash access registers. + **/ +static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, + u8 data) +{ + u16 word = (u16)data; + + return e1000_write_flash_data_ich8lan(hw, offset, 1, word); +} + +/** + * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM + * @hw: pointer to the HW structure + * @offset: The offset of the byte to write. + * @byte: The byte to write to the NVM. + * + * Writes a single byte to the NVM using the flash access registers. + * Goes through a retry algorithm before giving up. + **/ +static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, + u32 offset, u8 byte) +{ + s32 ret_val; + u16 program_retries; + + ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); + if (!ret_val) + return ret_val; + + for (program_retries = 0; program_retries < 100; program_retries++) { + e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset); + udelay(100); + ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); + if (!ret_val) + break; + } + if (program_retries == 100) + return -E1000_ERR_NVM; + + return 0; +} + +/** + * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM + * @hw: pointer to the HW structure + * @bank: 0 for first bank, 1 for second bank, etc. + * + * Erases the bank specified. Each bank is a 4k block. Banks are 0 based. + * bank N is 4096 * N + flash_reg_addr. + **/ +static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + /* bank size is in 16bit words - adjust to bytes */ + u32 flash_bank_size = nvm->flash_bank_size * 2; + s32 ret_val; + s32 count = 0; + s32 j, iteration, sector_size; + + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + + /* + * Determine HW Sector size: Read BERASE bits of hw flash status + * register + * 00: The Hw sector is 256 bytes, hence we need to erase 16 + * consecutive sectors. The start index for the nth Hw sector + * can be calculated as = bank * 4096 + n * 256 + * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector. + * The start index for the nth Hw sector can be calculated + * as = bank * 4096 + * 10: The Hw sector is 8K bytes, nth sector = bank * 8192 + * (ich9 only, otherwise error condition) + * 11: The Hw sector is 64K bytes, nth sector = bank * 65536 + */ + switch (hsfsts.hsf_status.berasesz) { + case 0: + /* Hw sector size 256 */ + sector_size = ICH_FLASH_SEG_SIZE_256; + iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256; + break; + case 1: + sector_size = ICH_FLASH_SEG_SIZE_4K; + iteration = 1; + break; + case 2: + sector_size = ICH_FLASH_SEG_SIZE_8K; + iteration = 1; + break; + case 3: + sector_size = ICH_FLASH_SEG_SIZE_64K; + iteration = 1; + break; + default: + return -E1000_ERR_NVM; + } + + /* Start with the base address, then add the sector offset. */ + flash_linear_addr = hw->nvm.flash_base_addr; + flash_linear_addr += (bank) ? flash_bank_size : 0; + + for (j = 0; j < iteration ; j++) { + do { + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + return ret_val; + + /* + * Write a value 11 (block Erase) in Flash + * Cycle field in hw flash control + */ + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + /* + * Write the last 24 bits of an index within the + * block into Flash Linear address field in Flash + * Address. + */ + flash_linear_addr += (j * sector_size); + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + ret_val = e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_ERASE_COMMAND_TIMEOUT); + if (ret_val == 0) + break; + + /* + * Check if FCERR is set to 1. If 1, + * clear it and try the whole sequence + * a few more times else Done + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr == 1) + /* repeat for some time before giving up */ + continue; + else if (hsfsts.hsf_status.flcdone == 0) + return ret_val; + } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT); + } + + return 0; +} + +/** + * e1000_valid_led_default_ich8lan - Set the default LED settings + * @hw: pointer to the HW structure + * @data: Pointer to the LED settings + * + * Reads the LED default settings from the NVM to data. If the NVM LED + * settings is all 0's or F's, set the LED default to a valid LED default + * setting. + **/ +static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + if (*data == ID_LED_RESERVED_0000 || + *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT_ICH8LAN; + + return 0; +} + +/** + * e1000_id_led_init_pchlan - store LED configurations + * @hw: pointer to the HW structure + * + * PCH does not control LEDs via the LEDCTL register, rather it uses + * the PHY LED configuration register. + * + * PCH also does not have an "always on" or "always off" mode which + * complicates the ID feature. Instead of using the "on" mode to indicate + * in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init()), + * use "link_up" mode. The LEDs will still ID on request if there is no + * link based on logic in e1000_led_[on|off]_pchlan(). + **/ +static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP; + const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT; + u16 data, i, temp, shift; + + /* Get default ID LED modes */ + ret_val = hw->nvm.ops.valid_led_default(hw, &data); + if (ret_val) + goto out; + + mac->ledctl_default = er32(LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + + for (i = 0; i < 4; i++) { + temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK; + shift = (i * 5); + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); + mac->ledctl_mode1 |= (ledctl_on << shift); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); + mac->ledctl_mode1 |= (ledctl_off << shift); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); + mac->ledctl_mode2 |= (ledctl_on << shift); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); + mac->ledctl_mode2 |= (ledctl_off << shift); + break; + default: + /* Do nothing */ + break; + } + } + +out: + return ret_val; +} + +/** + * e1000_get_bus_info_ich8lan - Get/Set the bus type and width + * @hw: pointer to the HW structure + * + * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability + * register, so the the bus width is hard coded. + **/ +static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + s32 ret_val; + + ret_val = e1000e_get_bus_info_pcie(hw); + + /* + * ICH devices are "PCI Express"-ish. They have + * a configuration space, but do not contain + * PCI Express Capability registers, so bus width + * must be hardcoded. + */ + if (bus->width == e1000_bus_width_unknown) + bus->width = e1000_bus_width_pcie_x1; + + return ret_val; +} + +/** + * e1000_reset_hw_ich8lan - Reset the hardware + * @hw: pointer to the HW structure + * + * Does a full reset of the hardware which includes a reset of the PHY and + * MAC. + **/ +static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u16 reg; + u32 ctrl, kab; + s32 ret_val; + + /* + * Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000e_disable_pcie_master(hw); + if (ret_val) + e_dbg("PCI-E Master disable polling has failed.\n"); + + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + /* + * Disable the Transmit and Receive units. Then delay to allow + * any pending transactions to complete before we hit the MAC + * with the global reset. + */ + ew32(RCTL, 0); + ew32(TCTL, E1000_TCTL_PSP); + e1e_flush(); + + usleep_range(10000, 20000); + + /* Workaround for ICH8 bit corruption issue in FIFO memory */ + if (hw->mac.type == e1000_ich8lan) { + /* Set Tx and Rx buffer allocation to 8k apiece. */ + ew32(PBA, E1000_PBA_8K); + /* Set Packet Buffer Size to 16k. */ + ew32(PBS, E1000_PBS_16K); + } + + if (hw->mac.type == e1000_pchlan) { + /* Save the NVM K1 bit setting*/ + ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, ®); + if (ret_val) + return ret_val; + + if (reg & E1000_NVM_K1_ENABLE) + dev_spec->nvm_k1_enabled = true; + else + dev_spec->nvm_k1_enabled = false; + } + + ctrl = er32(CTRL); + + if (!e1000_check_reset_block(hw)) { + /* + * Full-chip reset requires MAC and PHY reset at the same + * time to make sure the interface between MAC and the + * external PHY is reset. + */ + ctrl |= E1000_CTRL_PHY_RST; + + /* + * Gate automatic PHY configuration by hardware on + * non-managed 82579 + */ + if ((hw->mac.type == e1000_pch2lan) && + !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) + e1000_gate_hw_phy_config_ich8lan(hw, true); + } + ret_val = e1000_acquire_swflag_ich8lan(hw); + e_dbg("Issuing a global reset to ich8lan\n"); + ew32(CTRL, (ctrl | E1000_CTRL_RST)); + /* cannot issue a flush here because it hangs the hardware */ + msleep(20); + + if (!ret_val) + mutex_unlock(&swflag_mutex); + + if (ctrl & E1000_CTRL_PHY_RST) { + ret_val = hw->phy.ops.get_cfg_done(hw); + if (ret_val) + goto out; + + ret_val = e1000_post_phy_reset_ich8lan(hw); + if (ret_val) + goto out; + } + + /* + * For PCH, this write will make sure that any noise + * will be detected as a CRC error and be dropped rather than show up + * as a bad packet to the DMA engine. + */ + if (hw->mac.type == e1000_pchlan) + ew32(CRC_OFFSET, 0x65656565); + + ew32(IMC, 0xffffffff); + er32(ICR); + + kab = er32(KABGTXD); + kab |= E1000_KABGTXD_BGSQLBIAS; + ew32(KABGTXD, kab); + +out: + return ret_val; +} + +/** + * e1000_init_hw_ich8lan - Initialize the hardware + * @hw: pointer to the HW structure + * + * Prepares the hardware for transmit and receive by doing the following: + * - initialize hardware bits + * - initialize LED identification + * - setup receive address registers + * - setup flow control + * - setup transmit descriptors + * - clear statistics + **/ +static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl_ext, txdctl, snoop; + s32 ret_val; + u16 i; + + e1000_initialize_hw_bits_ich8lan(hw); + + /* Initialize identification LED */ + ret_val = mac->ops.id_led_init(hw); + if (ret_val) + e_dbg("Error initializing identification LED\n"); + /* This is not fatal and we should not stop init due to this */ + + /* Setup the receive address. */ + e1000e_init_rx_addrs(hw, mac->rar_entry_count); + + /* Zero out the Multicast HASH table */ + e_dbg("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* + * The 82578 Rx buffer will stall if wakeup is enabled in host and + * the ME. Disable wakeup by clearing the host wakeup bit. + * Reset the phy after disabling host wakeup to reset the Rx buffer. + */ + if (hw->phy.type == e1000_phy_82578) { + e1e_rphy(hw, BM_PORT_GEN_CFG, &i); + i &= ~BM_WUC_HOST_WU_BIT; + e1e_wphy(hw, BM_PORT_GEN_CFG, i); + ret_val = e1000_phy_hw_reset_ich8lan(hw); + if (ret_val) + return ret_val; + } + + /* Setup link and flow control */ + ret_val = e1000_setup_link_ich8lan(hw); + + /* Set the transmit descriptor write-back policy for both queues */ + txdctl = er32(TXDCTL(0)); + txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB; + txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | + E1000_TXDCTL_MAX_TX_DESC_PREFETCH; + ew32(TXDCTL(0), txdctl); + txdctl = er32(TXDCTL(1)); + txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB; + txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | + E1000_TXDCTL_MAX_TX_DESC_PREFETCH; + ew32(TXDCTL(1), txdctl); + + /* + * ICH8 has opposite polarity of no_snoop bits. + * By default, we should use snoop behavior. + */ + if (mac->type == e1000_ich8lan) + snoop = PCIE_ICH8_SNOOP_ALL; + else + snoop = (u32) ~(PCIE_NO_SNOOP_ALL); + e1000e_set_pcie_no_snoop(hw, snoop); + + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_RO_DIS; + ew32(CTRL_EXT, ctrl_ext); + + /* + * Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_ich8lan(hw); + + return 0; +} +/** + * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits + * @hw: pointer to the HW structure + * + * Sets/Clears required hardware bits necessary for correctly setting up the + * hardware for transmit and receive. + **/ +static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) +{ + u32 reg; + + /* Extended Device Control */ + reg = er32(CTRL_EXT); + reg |= (1 << 22); + /* Enable PHY low-power state when MAC is at D3 w/o WoL */ + if (hw->mac.type >= e1000_pchlan) + reg |= E1000_CTRL_EXT_PHYPDEN; + ew32(CTRL_EXT, reg); + + /* Transmit Descriptor Control 0 */ + reg = er32(TXDCTL(0)); + reg |= (1 << 22); + ew32(TXDCTL(0), reg); + + /* Transmit Descriptor Control 1 */ + reg = er32(TXDCTL(1)); + reg |= (1 << 22); + ew32(TXDCTL(1), reg); + + /* Transmit Arbitration Control 0 */ + reg = er32(TARC(0)); + if (hw->mac.type == e1000_ich8lan) + reg |= (1 << 28) | (1 << 29); + reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27); + ew32(TARC(0), reg); + + /* Transmit Arbitration Control 1 */ + reg = er32(TARC(1)); + if (er32(TCTL) & E1000_TCTL_MULR) + reg &= ~(1 << 28); + else + reg |= (1 << 28); + reg |= (1 << 24) | (1 << 26) | (1 << 30); + ew32(TARC(1), reg); + + /* Device Status */ + if (hw->mac.type == e1000_ich8lan) { + reg = er32(STATUS); + reg &= ~(1 << 31); + ew32(STATUS, reg); + } + + /* + * work-around descriptor data corruption issue during nfs v2 udp + * traffic, just disable the nfs filtering capability + */ + reg = er32(RFCTL); + reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS); + ew32(RFCTL, reg); +} + +/** + * e1000_setup_link_ich8lan - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val; + + if (e1000_check_reset_block(hw)) + return 0; + + /* + * ICH parts do not have a word in the NVM to determine + * the default flow control setting, so we explicitly + * set it to full. + */ + if (hw->fc.requested_mode == e1000_fc_default) { + /* Workaround h/w hang when Tx flow control enabled */ + if (hw->mac.type == e1000_pchlan) + hw->fc.requested_mode = e1000_fc_rx_pause; + else + hw->fc.requested_mode = e1000_fc_full; + } + + /* + * Save off the requested flow control mode for use later. Depending + * on the link partner's capabilities, we may or may not use this mode. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + e_dbg("After fix-ups FlowControl is now = %x\n", + hw->fc.current_mode); + + /* Continue to configure the copper link. */ + ret_val = e1000_setup_copper_link_ich8lan(hw); + if (ret_val) + return ret_val; + + ew32(FCTTV, hw->fc.pause_time); + if ((hw->phy.type == e1000_phy_82578) || + (hw->phy.type == e1000_phy_82579) || + (hw->phy.type == e1000_phy_82577)) { + ew32(FCRTV_PCH, hw->fc.refresh_time); + + ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27), + hw->fc.pause_time); + if (ret_val) + return ret_val; + } + + return e1000e_set_fc_watermarks(hw); +} + +/** + * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface + * @hw: pointer to the HW structure + * + * Configures the kumeran interface to the PHY to wait the appropriate time + * when polling the PHY, then call the generic setup_copper_link to finish + * configuring the copper link. + **/ +static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 reg_data; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + + /* + * Set the mac to wait the maximum time between each iteration + * and increase the max iterations when polling the phy; + * this fixes erroneous timeouts at 10Mbps. + */ + ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF); + if (ret_val) + return ret_val; + ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + ®_data); + if (ret_val) + return ret_val; + reg_data |= 0x3F; + ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + reg_data); + if (ret_val) + return ret_val; + + switch (hw->phy.type) { + case e1000_phy_igp_3: + ret_val = e1000e_copper_link_setup_igp(hw); + if (ret_val) + return ret_val; + break; + case e1000_phy_bm: + case e1000_phy_82578: + ret_val = e1000e_copper_link_setup_m88(hw); + if (ret_val) + return ret_val; + break; + case e1000_phy_82577: + case e1000_phy_82579: + ret_val = e1000_copper_link_setup_82577(hw); + if (ret_val) + return ret_val; + break; + case e1000_phy_ife: + ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, ®_data); + if (ret_val) + return ret_val; + + reg_data &= ~IFE_PMC_AUTO_MDIX; + + switch (hw->phy.mdix) { + case 1: + reg_data &= ~IFE_PMC_FORCE_MDIX; + break; + case 2: + reg_data |= IFE_PMC_FORCE_MDIX; + break; + case 0: + default: + reg_data |= IFE_PMC_AUTO_MDIX; + break; + } + ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data); + if (ret_val) + return ret_val; + break; + default: + break; + } + return e1000e_setup_copper_link(hw); +} + +/** + * e1000_get_link_up_info_ich8lan - Get current link speed and duplex + * @hw: pointer to the HW structure + * @speed: pointer to store current link speed + * @duplex: pointer to store the current link duplex + * + * Calls the generic get_speed_and_duplex to retrieve the current link + * information and then calls the Kumeran lock loss workaround for links at + * gigabit speeds. + **/ +static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex); + if (ret_val) + return ret_val; + + if ((hw->mac.type == e1000_ich8lan) && + (hw->phy.type == e1000_phy_igp_3) && + (*speed == SPEED_1000)) { + ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw); + } + + return ret_val; +} + +/** + * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround + * @hw: pointer to the HW structure + * + * Work-around for 82566 Kumeran PCS lock loss: + * On link status change (i.e. PCI reset, speed change) and link is up and + * speed is gigabit- + * 0) if workaround is optionally disabled do nothing + * 1) wait 1ms for Kumeran link to come up + * 2) check Kumeran Diagnostic register PCS lock loss bit + * 3) if not set the link is locked (all is good), otherwise... + * 4) reset the PHY + * 5) repeat up to 10 times + * Note: this is only called for IGP3 copper when speed is 1gb. + **/ +static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 phy_ctrl; + s32 ret_val; + u16 i, data; + bool link; + + if (!dev_spec->kmrn_lock_loss_workaround_enabled) + return 0; + + /* + * Make sure link is up before proceeding. If not just return. + * Attempting this while link is negotiating fouled up link + * stability + */ + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (!link) + return 0; + + for (i = 0; i < 10; i++) { + /* read once to clear */ + ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data); + if (ret_val) + return ret_val; + /* and again to get new status */ + ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data); + if (ret_val) + return ret_val; + + /* check for PCS lock */ + if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) + return 0; + + /* Issue PHY reset */ + e1000_phy_hw_reset(hw); + mdelay(5); + } + /* Disable GigE link negotiation */ + phy_ctrl = er32(PHY_CTRL); + phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE | + E1000_PHY_CTRL_NOND0A_GBE_DISABLE); + ew32(PHY_CTRL, phy_ctrl); + + /* + * Call gig speed drop workaround on Gig disable before accessing + * any PHY registers + */ + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* unable to acquire PCS lock */ + return -E1000_ERR_PHY; +} + +/** + * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state + * @hw: pointer to the HW structure + * @state: boolean value used to set the current Kumeran workaround state + * + * If ICH8, set the current Kumeran workaround state (enabled - true + * /disabled - false). + **/ +void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, + bool state) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + + if (hw->mac.type != e1000_ich8lan) { + e_dbg("Workaround applies to ICH8 only.\n"); + return; + } + + dev_spec->kmrn_lock_loss_workaround_enabled = state; +} + +/** + * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3 + * @hw: pointer to the HW structure + * + * Workaround for 82566 power-down on D3 entry: + * 1) disable gigabit link + * 2) write VR power-down enable + * 3) read it back + * Continue if successful, else issue LCD reset and repeat + **/ +void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw) +{ + u32 reg; + u16 data; + u8 retry = 0; + + if (hw->phy.type != e1000_phy_igp_3) + return; + + /* Try the workaround twice (if needed) */ + do { + /* Disable link */ + reg = er32(PHY_CTRL); + reg |= (E1000_PHY_CTRL_GBE_DISABLE | + E1000_PHY_CTRL_NOND0A_GBE_DISABLE); + ew32(PHY_CTRL, reg); + + /* + * Call gig speed drop workaround on Gig disable before + * accessing any PHY registers + */ + if (hw->mac.type == e1000_ich8lan) + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* Write VR power-down enable */ + e1e_rphy(hw, IGP3_VR_CTRL, &data); + data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; + e1e_wphy(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN); + + /* Read it back and test */ + e1e_rphy(hw, IGP3_VR_CTRL, &data); + data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; + if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry) + break; + + /* Issue PHY reset and repeat at most one more time */ + reg = er32(CTRL); + ew32(CTRL, reg | E1000_CTRL_PHY_RST); + retry++; + } while (retry); +} + +/** + * e1000e_gig_downshift_workaround_ich8lan - WoL from S5 stops working + * @hw: pointer to the HW structure + * + * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC), + * LPLU, Gig disable, MDIC PHY reset): + * 1) Set Kumeran Near-end loopback + * 2) Clear Kumeran Near-end loopback + * Should only be called for ICH8[m] devices with IGP_3 Phy. + **/ +void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 reg_data; + + if ((hw->mac.type != e1000_ich8lan) || + (hw->phy.type != e1000_phy_igp_3)) + return; + + ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, + ®_data); + if (ret_val) + return; + reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK; + ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, + reg_data); + if (ret_val) + return; + reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK; + ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, + reg_data); +} + +/** + * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx + * @hw: pointer to the HW structure + * + * During S0 to Sx transition, it is possible the link remains at gig + * instead of negotiating to a lower speed. Before going to Sx, set + * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation + * to a lower speed. For PCH and newer parts, the OEM bits PHY register + * (LED, GbE disable and LPLU configurations) also needs to be written. + **/ +void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) +{ + u32 phy_ctrl; + s32 ret_val; + + phy_ctrl = er32(PHY_CTRL); + phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE; + ew32(PHY_CTRL, phy_ctrl); + + if (hw->mac.type >= e1000_pchlan) { + e1000_oem_bits_config_ich8lan(hw, false); + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return; + e1000_write_smbus_addr(hw); + hw->phy.ops.release(hw); + } +} + +/** + * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0 + * @hw: pointer to the HW structure + * + * During Sx to S0 transitions on non-managed devices or managed devices + * on which PHY resets are not blocked, if the PHY registers cannot be + * accessed properly by the s/w toggle the LANPHYPC value to power cycle + * the PHY. + **/ +void e1000_resume_workarounds_pchlan(struct e1000_hw *hw) +{ + u32 fwsm; + + if (hw->mac.type != e1000_pch2lan) + return; + + fwsm = er32(FWSM); + if (!(fwsm & E1000_ICH_FWSM_FW_VALID) || !e1000_check_reset_block(hw)) { + u16 phy_id1, phy_id2; + s32 ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + e_dbg("Failed to acquire PHY semaphore in resume\n"); + return; + } + + /* Test access to the PHY registers by reading the ID regs */ + ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_id1); + if (ret_val) + goto release; + ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_id2); + if (ret_val) + goto release; + + if (hw->phy.id == ((u32)(phy_id1 << 16) | + (u32)(phy_id2 & PHY_REVISION_MASK))) + goto release; + + e1000_toggle_lanphypc_value_ich8lan(hw); + + hw->phy.ops.release(hw); + msleep(50); + e1000_phy_hw_reset(hw); + msleep(50); + return; + } + +release: + hw->phy.ops.release(hw); + + return; +} + +/** + * e1000_cleanup_led_ich8lan - Restore the default LED operation + * @hw: pointer to the HW structure + * + * Return the LED back to the default configuration. + **/ +static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw) +{ + if (hw->phy.type == e1000_phy_ife) + return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); + + ew32(LEDCTL, hw->mac.ledctl_default); + return 0; +} + +/** + * e1000_led_on_ich8lan - Turn LEDs on + * @hw: pointer to the HW structure + * + * Turn on the LEDs. + **/ +static s32 e1000_led_on_ich8lan(struct e1000_hw *hw) +{ + if (hw->phy.type == e1000_phy_ife) + return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, + (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON)); + + ew32(LEDCTL, hw->mac.ledctl_mode2); + return 0; +} + +/** + * e1000_led_off_ich8lan - Turn LEDs off + * @hw: pointer to the HW structure + * + * Turn off the LEDs. + **/ +static s32 e1000_led_off_ich8lan(struct e1000_hw *hw) +{ + if (hw->phy.type == e1000_phy_ife) + return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, + (IFE_PSCL_PROBE_MODE | + IFE_PSCL_PROBE_LEDS_OFF)); + + ew32(LEDCTL, hw->mac.ledctl_mode1); + return 0; +} + +/** + * e1000_setup_led_pchlan - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use. + **/ +static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) +{ + return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1); +} + +/** + * e1000_cleanup_led_pchlan - Restore the default LED operation + * @hw: pointer to the HW structure + * + * Return the LED back to the default configuration. + **/ +static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) +{ + return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default); +} + +/** + * e1000_led_on_pchlan - Turn LEDs on + * @hw: pointer to the HW structure + * + * Turn on the LEDs. + **/ +static s32 e1000_led_on_pchlan(struct e1000_hw *hw) +{ + u16 data = (u16)hw->mac.ledctl_mode2; + u32 i, led; + + /* + * If no link, then turn LED on by setting the invert bit + * for each LED that's mode is "link_up" in ledctl_mode2. + */ + if (!(er32(STATUS) & E1000_STATUS_LU)) { + for (i = 0; i < 3; i++) { + led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; + if ((led & E1000_PHY_LED0_MODE_MASK) != + E1000_LEDCTL_MODE_LINK_UP) + continue; + if (led & E1000_PHY_LED0_IVRT) + data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); + else + data |= (E1000_PHY_LED0_IVRT << (i * 5)); + } + } + + return e1e_wphy(hw, HV_LED_CONFIG, data); +} + +/** + * e1000_led_off_pchlan - Turn LEDs off + * @hw: pointer to the HW structure + * + * Turn off the LEDs. + **/ +static s32 e1000_led_off_pchlan(struct e1000_hw *hw) +{ + u16 data = (u16)hw->mac.ledctl_mode1; + u32 i, led; + + /* + * If no link, then turn LED off by clearing the invert bit + * for each LED that's mode is "link_up" in ledctl_mode1. + */ + if (!(er32(STATUS) & E1000_STATUS_LU)) { + for (i = 0; i < 3; i++) { + led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; + if ((led & E1000_PHY_LED0_MODE_MASK) != + E1000_LEDCTL_MODE_LINK_UP) + continue; + if (led & E1000_PHY_LED0_IVRT) + data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); + else + data |= (E1000_PHY_LED0_IVRT << (i * 5)); + } + } + + return e1e_wphy(hw, HV_LED_CONFIG, data); +} + +/** + * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset + * @hw: pointer to the HW structure + * + * Read appropriate register for the config done bit for completion status + * and configure the PHY through s/w for EEPROM-less parts. + * + * NOTE: some silicon which is EEPROM-less will fail trying to read the + * config done bit, so only an error is logged and continues. If we were + * to return with error, EEPROM-less silicon would not be able to be reset + * or change link. + **/ +static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u32 bank = 0; + u32 status; + + e1000e_get_cfg_done(hw); + + /* Wait for indication from h/w that it has completed basic config */ + if (hw->mac.type >= e1000_ich10lan) { + e1000_lan_init_done_ich8lan(hw); + } else { + ret_val = e1000e_get_auto_rd_done(hw); + if (ret_val) { + /* + * When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + e_dbg("Auto Read Done did not complete\n"); + ret_val = 0; + } + } + + /* Clear PHY Reset Asserted bit */ + status = er32(STATUS); + if (status & E1000_STATUS_PHYRA) + ew32(STATUS, status & ~E1000_STATUS_PHYRA); + else + e_dbg("PHY Reset Asserted not set - needs delay\n"); + + /* If EEPROM is not marked present, init the IGP 3 PHY manually */ + if (hw->mac.type <= e1000_ich9lan) { + if (((er32(EECD) & E1000_EECD_PRES) == 0) && + (hw->phy.type == e1000_phy_igp_3)) { + e1000e_phy_init_script_igp3(hw); + } + } else { + if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) { + /* Maybe we should do a basic PHY config */ + e_dbg("EEPROM not present\n"); + ret_val = -E1000_ERR_CONFIG; + } + } + + return ret_val; +} + +/** + * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(hw->mac.ops.check_mng_mode(hw) || + hw->phy.ops.check_reset_block(hw))) + e1000_power_down_phy_copper(hw); +} + +/** + * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters + * @hw: pointer to the HW structure + * + * Clears hardware counters specific to the silicon family and calls + * clear_hw_cntrs_generic to clear all general purpose counters. + **/ +static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) +{ + u16 phy_data; + s32 ret_val; + + e1000e_clear_hw_cntrs_base(hw); + + er32(ALGNERRC); + er32(RXERRC); + er32(TNCRS); + er32(CEXTERR); + er32(TSCTC); + er32(TSCTFC); + + er32(MGTPRC); + er32(MGTPDC); + er32(MGTPTC); + + er32(IAC); + er32(ICRXOC); + + /* Clear PHY statistics registers */ + if ((hw->phy.type == e1000_phy_82578) || + (hw->phy.type == e1000_phy_82579) || + (hw->phy.type == e1000_phy_82577)) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return; + ret_val = hw->phy.ops.set_page(hw, + HV_STATS_PAGE << IGP_PAGE_SHIFT); + if (ret_val) + goto release; + hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); +release: + hw->phy.ops.release(hw); + } +} + +static struct e1000_mac_operations ich8_mac_ops = { + .id_led_init = e1000e_id_led_init, + /* check_mng_mode dependent on mac type */ + .check_for_link = e1000_check_for_copper_link_ich8lan, + /* cleanup_led dependent on mac type */ + .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan, + .get_bus_info = e1000_get_bus_info_ich8lan, + .set_lan_id = e1000_set_lan_id_single_port, + .get_link_up_info = e1000_get_link_up_info_ich8lan, + /* led_on dependent on mac type */ + /* led_off dependent on mac type */ + .update_mc_addr_list = e1000e_update_mc_addr_list_generic, + .reset_hw = e1000_reset_hw_ich8lan, + .init_hw = e1000_init_hw_ich8lan, + .setup_link = e1000_setup_link_ich8lan, + .setup_physical_interface= e1000_setup_copper_link_ich8lan, + /* id_led_init dependent on mac type */ +}; + +static struct e1000_phy_operations ich8_phy_ops = { + .acquire = e1000_acquire_swflag_ich8lan, + .check_reset_block = e1000_check_reset_block_ich8lan, + .commit = NULL, + .get_cfg_done = e1000_get_cfg_done_ich8lan, + .get_cable_length = e1000e_get_cable_length_igp_2, + .read_reg = e1000e_read_phy_reg_igp, + .release = e1000_release_swflag_ich8lan, + .reset = e1000_phy_hw_reset_ich8lan, + .set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan, + .set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan, + .write_reg = e1000e_write_phy_reg_igp, +}; + +static struct e1000_nvm_operations ich8_nvm_ops = { + .acquire = e1000_acquire_nvm_ich8lan, + .read = e1000_read_nvm_ich8lan, + .release = e1000_release_nvm_ich8lan, + .update = e1000_update_nvm_checksum_ich8lan, + .valid_led_default = e1000_valid_led_default_ich8lan, + .validate = e1000_validate_nvm_checksum_ich8lan, + .write = e1000_write_nvm_ich8lan, +}; + +struct e1000_info e1000_ich8_info = { + .mac = e1000_ich8lan, + .flags = FLAG_HAS_WOL + | FLAG_IS_ICH + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_APME_IN_WUC, + .pba = 8, + .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + +struct e1000_info e1000_ich9_info = { + .mac = e1000_ich9lan, + .flags = FLAG_HAS_JUMBO_FRAMES + | FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_ERT + | FLAG_HAS_FLASH + | FLAG_APME_IN_WUC, + .pba = 10, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + +struct e1000_info e1000_ich10_info = { + .mac = e1000_ich10lan, + .flags = FLAG_HAS_JUMBO_FRAMES + | FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_ERT + | FLAG_HAS_FLASH + | FLAG_APME_IN_WUC, + .pba = 10, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + +struct e1000_info e1000_pch_info = { + .mac = e1000_pchlan, + .flags = FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_HAS_JUMBO_FRAMES + | FLAG_DISABLE_FC_PAUSE_TIME /* errata */ + | FLAG_APME_IN_WUC, + .flags2 = FLAG2_HAS_PHY_STATS, + .pba = 26, + .max_hw_frame_size = 4096, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + +struct e1000_info e1000_pch2_info = { + .mac = e1000_pch2lan, + .flags = FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_HAS_JUMBO_FRAMES + | FLAG_APME_IN_WUC, + .flags2 = FLAG2_HAS_PHY_STATS + | FLAG2_HAS_EEE, + .pba = 26, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; diff --cc drivers/net/ethernet/intel/e1000e/lib.c index 7898a67d6505,000000000000..0893ab107adf mode 100644,000000..100644 --- a/drivers/net/ethernet/intel/e1000e/lib.c +++ b/drivers/net/ethernet/intel/e1000e/lib.c @@@ -1,2692 -1,0 +1,2693 @@@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000.h" + +enum e1000_mng_mode { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_if_only +}; + +#define E1000_FACTPS_MNGCG 0x20000000 + +/* Intel(R) Active Management Technology signature */ +#define E1000_IAMT_SIGNATURE 0x544D4149 + +/** + * e1000e_get_bus_info_pcie - Get PCIe bus information + * @hw: pointer to the HW structure + * + * Determines and stores the system bus information for a particular + * network interface. The following bus information is determined and stored: + * bus speed, bus width, type (PCIe), and PCIe function. + **/ +s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_bus_info *bus = &hw->bus; + struct e1000_adapter *adapter = hw->adapter; + u16 pcie_link_status, cap_offset; + + cap_offset = adapter->pdev->pcie_cap; + if (!cap_offset) { + bus->width = e1000_bus_width_unknown; + } else { + pci_read_config_word(adapter->pdev, + cap_offset + PCIE_LINK_STATUS, + &pcie_link_status); + bus->width = (enum e1000_bus_width)((pcie_link_status & + PCIE_LINK_WIDTH_MASK) >> + PCIE_LINK_WIDTH_SHIFT); + } + + mac->ops.set_lan_id(hw); + + return 0; +} + +/** + * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices + * + * @hw: pointer to the HW structure + * + * Determines the LAN function id by reading memory-mapped registers + * and swaps the port value if requested. + **/ +void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + u32 reg; + + /* + * The status register reports the correct function number + * for the device regardless of function swap state. + */ + reg = er32(STATUS); + bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; +} + +/** + * e1000_set_lan_id_single_port - Set LAN id for a single port device + * @hw: pointer to the HW structure + * + * Sets the LAN function id to zero for a single port device. + **/ +void e1000_set_lan_id_single_port(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + + bus->func = 0; +} + +/** + * e1000_clear_vfta_generic - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +void e1000_clear_vfta_generic(struct e1000_hw *hw) +{ + u32 offset; + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); + e1e_flush(); + } +} + +/** + * e1000_write_vfta_generic - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value) +{ + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); + e1e_flush(); +} + +/** + * e1000e_init_rx_addrs - Initialize receive address's + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setup the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + **/ +void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) +{ + u32 i; + u8 mac_addr[ETH_ALEN] = {0}; + + /* Setup the receive address */ + e_dbg("Programming MAC Address into RAR[0]\n"); + + e1000e_rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + e_dbg("Clearing RAR[1-%u]\n", rar_count-1); + for (i = 1; i < rar_count; i++) + e1000e_rar_set(hw, mac_addr, i); +} + +/** + * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr + * @hw: pointer to the HW structure + * + * Checks the nvm for an alternate MAC address. An alternate MAC address + * can be setup by pre-boot software and must be treated like a permanent + * address and must override the actual permanent MAC address. If an + * alternate MAC address is found it is programmed into RAR0, replacing + * the permanent address that was installed into RAR0 by the Si on reset. + * This function will return SUCCESS unless it encounters an error while + * reading the EEPROM. + **/ +s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) +{ + u32 i; + s32 ret_val = 0; + u16 offset, nvm_alt_mac_addr_offset, nvm_data; + u8 alt_mac_addr[ETH_ALEN]; + + ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data); + if (ret_val) + goto out; + + /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */ + if (!((nvm_data & NVM_COMPAT_LOM) || + (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) || - (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD))) ++ (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) || ++ (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES))) + goto out; + + ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, + &nvm_alt_mac_addr_offset); + if (ret_val) { + e_dbg("NVM Read Error\n"); + goto out; + } + - if (nvm_alt_mac_addr_offset == 0xFFFF) { ++ if ((nvm_alt_mac_addr_offset == 0xFFFF) || ++ (nvm_alt_mac_addr_offset == 0x0000)) + /* There is no Alternate MAC Address */ + goto out; - } + + if (hw->bus.func == E1000_FUNC_1) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; + for (i = 0; i < ETH_ALEN; i += 2) { + offset = nvm_alt_mac_addr_offset + (i >> 1); + ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + goto out; + } + + alt_mac_addr[i] = (u8)(nvm_data & 0xFF); + alt_mac_addr[i + 1] = (u8)(nvm_data >> 8); + } + + /* if multicast bit is set, the alternate address will not be used */ + if (is_multicast_ether_addr(alt_mac_addr)) { + e_dbg("Ignoring Alternate Mac Address with MC bit set\n"); + goto out; + } + + /* + * We have a valid alternate MAC address, and we want to treat it the + * same as the normal permanent MAC address stored by the HW into the + * RAR. Do this by mapping this address into RAR0. + */ + e1000e_rar_set(hw, alt_mac_addr, 0); + +out: + return ret_val; +} + +/** + * e1000e_rar_set - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + **/ +void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* + * HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | + ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + /* + * Some bridges will combine consecutive 32-bit writes into + * a single burst write, which will malfunction on some parts. + * The flushes avoid this. + */ + ew32(RAL(index), rar_low); + e1e_flush(); + ew32(RAH(index), rar_high); + e1e_flush(); +} + +/** + * e1000_hash_mc_addr - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. See + * e1000_mta_set_generic() + **/ +static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* + * For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + /* + * The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + * + * For example, given the following Destination MAC Address and an + * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ + switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + } + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16) mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * e1000e_update_mc_addr_list_generic - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates entire Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count) +{ + u32 hash_value, hash_bit, hash_reg; + int i; + + /* clear mta_shadow */ + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* update mta_shadow from mc_addr_list */ + for (i = 0; (u32) i < mc_addr_count; i++) { + hash_value = e1000_hash_mc_addr(hw, mc_addr_list); + + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); + mc_addr_list += (ETH_ALEN); + } + + /* replace the entire MTA table */ + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]); + e1e_flush(); +} + +/** + * e1000e_clear_hw_cntrs_base - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + **/ +void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw) +{ + er32(CRCERRS); + er32(SYMERRS); + er32(MPC); + er32(SCC); + er32(ECOL); + er32(MCC); + er32(LATECOL); + er32(COLC); + er32(DC); + er32(SEC); + er32(RLEC); + er32(XONRXC); + er32(XONTXC); + er32(XOFFRXC); + er32(XOFFTXC); + er32(FCRUC); + er32(GPRC); + er32(BPRC); + er32(MPRC); + er32(GPTC); + er32(GORCL); + er32(GORCH); + er32(GOTCL); + er32(GOTCH); + er32(RNBC); + er32(RUC); + er32(RFC); + er32(ROC); + er32(RJC); + er32(TORL); + er32(TORH); + er32(TOTL); + er32(TOTH); + er32(TPR); + er32(TPT); + er32(MPTC); + er32(BPTC); +} + +/** + * e1000e_check_for_copper_link - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + **/ +s32 e1000e_check_for_copper_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + + /* + * We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) + return 0; + + /* + * First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) + return ret_val; /* No link detected */ + + mac->get_link_status = false; + + /* + * Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000e_check_downshift(hw); + + /* + * If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + ret_val = -E1000_ERR_CONFIG; + return ret_val; + } + + /* + * Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + e1000e_config_collision_dist(hw); + + /* + * Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) + e_dbg("Error configuring flow control\n"); + + return ret_val; +} + +/** + * e1000e_check_for_fiber_link - Check for link (Fiber) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val; + + ctrl = er32(CTRL); + status = er32(STATUS); + rxcw = er32(RXCW); + + /* + * If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), the cable is plugged in (we have signal), + * and our link partner is not trying to auto-negotiate with us (we + * are receiving idles or data), we need to force link up. We also + * need to give auto-negotiation time to complete, in case the cable + * was just plugged in. The autoneg_failed flag does this. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) && + (!(rxcw & E1000_RXCW_C))) { + if (mac->autoneg_failed == 0) { + mac->autoneg_failed = 1; + return 0; + } + e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + return ret_val; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* + * If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); + ew32(TXCW, mac->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = true; + } + + return 0; +} + +/** + * e1000e_check_for_serdes_link - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val; + + ctrl = er32(CTRL); + status = er32(STATUS); + rxcw = er32(RXCW); + + /* + * If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), and our link partner is not trying to + * auto-negotiate with us (we are receiving idles or data), + * we need to force link up. We also need to give auto-negotiation + * time to complete. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) { + if (mac->autoneg_failed == 0) { + mac->autoneg_failed = 1; + return 0; + } + e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + return ret_val; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* + * If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); + ew32(TXCW, mac->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = true; + } else if (!(E1000_TXCW_ANE & er32(TXCW))) { + /* + * If we force link for non-auto-negotiation switch, check + * link status based on MAC synchronization for internal + * serdes media type. + */ + /* SYNCH bit and IV bit are sticky. */ + udelay(10); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + mac->serdes_has_link = true; + e_dbg("SERDES: Link up - forced.\n"); + } + } else { + mac->serdes_has_link = false; + e_dbg("SERDES: Link down - force failed.\n"); + } + } + + if (E1000_TXCW_ANE & er32(TXCW)) { + status = er32(STATUS); + if (status & E1000_STATUS_LU) { + /* SYNCH bit and IV bit are sticky, so reread rxcw. */ + udelay(10); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + mac->serdes_has_link = true; + e_dbg("SERDES: Link up - autoneg " + "completed successfully.\n"); + } else { + mac->serdes_has_link = false; + e_dbg("SERDES: Link down - invalid" + "codewords detected in autoneg.\n"); + } + } else { + mac->serdes_has_link = false; + e_dbg("SERDES: Link down - no sync.\n"); + } + } else { + mac->serdes_has_link = false; + e_dbg("SERDES: Link down - autoneg failed\n"); + } + } + + return 0; +} + +/** + * e1000_set_default_fc_generic - Set flow control default values + * @hw: pointer to the HW structure + * + * Read the EEPROM for the default values for flow control and store the + * values. + **/ +static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 nvm_data; + + /* + * Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); + + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) + hw->fc.requested_mode = e1000_fc_none; + else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == + NVM_WORD0F_ASM_DIR) + hw->fc.requested_mode = e1000_fc_tx_pause; + else + hw->fc.requested_mode = e1000_fc_full; + + return 0; +} + +/** + * e1000e_setup_link - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +s32 e1000e_setup_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + + /* + * In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ + if (e1000_check_reset_block(hw)) + return 0; + + /* + * If requested flow control is set to default, set flow control + * based on the EEPROM flow control settings. + */ + if (hw->fc.requested_mode == e1000_fc_default) { + ret_val = e1000_set_default_fc_generic(hw); + if (ret_val) + return ret_val; + } + + /* + * Save off the requested flow control mode for use later. Depending + * on the link partner's capabilities, we may or may not use this mode. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + e_dbg("After fix-ups FlowControl is now = %x\n", + hw->fc.current_mode); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = mac->ops.setup_physical_interface(hw); + if (ret_val) + return ret_val; + + /* + * Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + e_dbg("Initializing the Flow Control address, type and timer regs\n"); + ew32(FCT, FLOW_CONTROL_TYPE); + ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); + ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); + + ew32(FCTTV, hw->fc.pause_time); + + return e1000e_set_fc_watermarks(hw); +} + +/** + * e1000_commit_fc_settings_generic - Configure flow control + * @hw: pointer to the HW structure + * + * Write the flow control settings to the Transmit Config Word Register (TXCW) + * base on the flow control settings in e1000_mac_info. + **/ +static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 txcw; + + /* + * Check for a software override of the flow control settings, and + * setup the device accordingly. If auto-negotiation is enabled, then + * software will have to set the "PAUSE" bits to the correct value in + * the Transmit Config Word Register (TXCW) and re-start auto- + * negotiation. However, if auto-negotiation is disabled, then + * software will have to manually configure the two flow control enable + * bits in the CTRL register. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we + * do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + */ + switch (hw->fc.current_mode) { + case e1000_fc_none: + /* Flow control completely disabled by a software over-ride. */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); + break; + case e1000_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is disabled + * by a software over-ride. Since there really isn't a way to + * advertise that we are capable of Rx Pause ONLY, we will + * advertise that we support both symmetric and asymmetric Rx + * PAUSE. Later, we will disable the adapter's ability to send + * PAUSE frames. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + case e1000_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is disabled, + * by a software over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); + break; + case e1000_fc_full: + /* + * Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + break; + } + + ew32(TXCW, txcw); + mac->txcw = txcw; + + return 0; +} + +/** + * e1000_poll_fiber_serdes_link_generic - Poll for link up + * @hw: pointer to the HW structure + * + * Polls for link up by reading the status register, if link fails to come + * up with auto-negotiation, then the link is forced if a signal is detected. + **/ +static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 i, status; + s32 ret_val; + + /* + * If we have a signal (the cable is plugged in, or assumed true for + * serdes media) then poll for a "Link-Up" indication in the Device + * Status Register. Time-out if a link isn't seen in 500 milliseconds + * seconds (Auto-negotiation should complete in less than 500 + * milliseconds even if the other end is doing it in SW). + */ + for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) { + usleep_range(10000, 20000); + status = er32(STATUS); + if (status & E1000_STATUS_LU) + break; + } + if (i == FIBER_LINK_UP_LIMIT) { + e_dbg("Never got a valid link from auto-neg!!!\n"); + mac->autoneg_failed = 1; + /* + * AutoNeg failed to achieve a link, so we'll call + * mac->check_for_link. This routine will force the + * link up if we detect a signal. This will allow us to + * communicate with non-autonegotiating link partners. + */ + ret_val = mac->ops.check_for_link(hw); + if (ret_val) { + e_dbg("Error while checking for link\n"); + return ret_val; + } + mac->autoneg_failed = 0; + } else { + mac->autoneg_failed = 0; + e_dbg("Valid Link Found\n"); + } + + return 0; +} + +/** + * e1000e_setup_fiber_serdes_link - Setup link for fiber/serdes + * @hw: pointer to the HW structure + * + * Configures collision distance and flow control for fiber and serdes + * links. Upon successful setup, poll for link. + **/ +s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + ctrl = er32(CTRL); + + /* Take the link out of reset */ + ctrl &= ~E1000_CTRL_LRST; + + e1000e_config_collision_dist(hw); + + ret_val = e1000_commit_fc_settings_generic(hw); + if (ret_val) + return ret_val; + + /* + * Since auto-negotiation is enabled, take the link out of reset (the + * link will be in reset, because we previously reset the chip). This + * will restart auto-negotiation. If auto-negotiation is successful + * then the link-up status bit will be set and the flow control enable + * bits (RFCE and TFCE) will be set according to their negotiated value. + */ + e_dbg("Auto-negotiation enabled\n"); + + ew32(CTRL, ctrl); + e1e_flush(); + usleep_range(1000, 2000); + + /* + * For these adapters, the SW definable pin 1 is set when the optics + * detect a signal. If we have a signal, then poll for a "Link-Up" + * indication. + */ + if (hw->phy.media_type == e1000_media_type_internal_serdes || + (er32(CTRL) & E1000_CTRL_SWDPIN1)) { + ret_val = e1000_poll_fiber_serdes_link_generic(hw); + } else { + e_dbg("No signal detected\n"); + } + + return 0; +} + +/** + * e1000e_config_collision_dist - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +void e1000e_config_collision_dist(struct e1000_hw *hw) +{ + u32 tctl; + + tctl = er32(TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; + + ew32(TCTL, tctl); + e1e_flush(); +} + +/** + * e1000e_set_fc_watermarks - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * transmission as well. + **/ +s32 e1000e_set_fc_watermarks(struct e1000_hw *hw) +{ + u32 fcrtl = 0, fcrth = 0; + + /* + * Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (hw->fc.current_mode & e1000_fc_tx_pause) { + /* + * We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = hw->fc.low_water; + fcrtl |= E1000_FCRTL_XONE; + fcrth = hw->fc.high_water; + } + ew32(FCRTL, fcrtl); + ew32(FCRTH, fcrth); + + return 0; +} + +/** + * e1000e_force_mac_fc - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the + * device control register to reflect the adapter settings. TFCE and RFCE + * need to be explicitly set by software when a copper PHY is used because + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + **/ +s32 e1000e_force_mac_fc(struct e1000_hw *hw) +{ + u32 ctrl; + + ctrl = er32(CTRL); + + /* + * Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc.current_mode" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and Tx flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + e_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); + + switch (hw->fc.current_mode) { + case e1000_fc_none: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case e1000_fc_rx_pause: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case e1000_fc_tx_pause: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case e1000_fc_full: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + ew32(CTRL, ctrl); + + return 0; +} + +/** + * e1000e_config_fc_after_link_up - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the + * speed and duplex were not forced. If the link needed to be forced, then + * flow control needs to be forced also. If auto-negotiation is enabled + * and did not fail, then we configure flow control based on our link + * partner. + **/ +s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = 0; + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + u16 speed, duplex; + + /* + * Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) { + if (hw->phy.media_type == e1000_media_type_fiber || + hw->phy.media_type == e1000_media_type_internal_serdes) + ret_val = e1000e_force_mac_fc(hw); + } else { + if (hw->phy.media_type == e1000_media_type_copper) + ret_val = e1000e_force_mac_fc(hw); + } + + if (ret_val) { + e_dbg("Error forcing flow control settings\n"); + return ret_val; + } + + /* + * Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { + /* + * Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { + e_dbg("Copper PHY and Auto Neg " + "has not completed.\n"); + return ret_val; + } + + /* + * The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (Address 4) and the Auto_Negotiation Base + * Page Ability Register (Address 5) to determine how + * flow control was negotiated. + */ + ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg); + if (ret_val) + return ret_val; + ret_val = + e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg); + if (ret_val) + return ret_val; + + /* + * Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | E1000_fc_full + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* + * Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise Rx + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + e_dbg("Flow Control = FULL.\r\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + e_dbg("Flow Control = " + "Rx PAUSE frames only.\r\n"); + } + } + /* + * For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; + e_dbg("Flow Control = Tx PAUSE frames only.\r\n"); + } + /* + * For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; + e_dbg("Flow Control = Rx PAUSE frames only.\r\n"); + } else { + /* + * Per the IEEE spec, at this point flow control + * should be disabled. + */ + hw->fc.current_mode = e1000_fc_none; + e_dbg("Flow Control = NONE.\r\n"); + } + + /* + * Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); + if (ret_val) { + e_dbg("Error getting link speed and duplex\n"); + return ret_val; + } + + if (duplex == HALF_DUPLEX) + hw->fc.current_mode = e1000_fc_none; + + /* + * Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = e1000e_force_mac_fc(hw); + if (ret_val) { + e_dbg("Error forcing flow control settings\n"); + return ret_val; + } + } + + return 0; +} + +/** + * e1000e_get_speed_and_duplex_copper - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + **/ +s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex) +{ + u32 status; + + status = er32(STATUS); + if (status & E1000_STATUS_SPEED_1000) + *speed = SPEED_1000; + else if (status & E1000_STATUS_SPEED_100) + *speed = SPEED_100; + else + *speed = SPEED_10; + + if (status & E1000_STATUS_FD) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + e_dbg("%u Mbps, %s Duplex\n", + *speed == SPEED_1000 ? 1000 : *speed == SPEED_100 ? 100 : 10, + *duplex == FULL_DUPLEX ? "Full" : "Half"); + + return 0; +} + +/** + * e1000e_get_speed_and_duplex_fiber_serdes - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Sets the speed and duplex to gigabit full duplex (the only possible option) + * for fiber/serdes links. + **/ +s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex) +{ + *speed = SPEED_1000; + *duplex = FULL_DUPLEX; + + return 0; +} + +/** + * e1000e_get_hw_semaphore - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) +{ + u32 swsm; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = er32(SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + udelay(50); + i++; + } + + if (i == timeout) { + e_dbg("Driver can't access device - SMBI bit is set.\n"); + return -E1000_ERR_NVM; + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = er32(SWSM); + ew32(SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (er32(SWSM) & E1000_SWSM_SWESMBI) + break; + + udelay(50); + } + + if (i == timeout) { + /* Release semaphores */ + e1000e_put_hw_semaphore(hw); + e_dbg("Driver can't access the NVM\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000e_put_hw_semaphore - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +void e1000e_put_hw_semaphore(struct e1000_hw *hw) +{ + u32 swsm; + + swsm = er32(SWSM); + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + ew32(SWSM, swsm); +} + +/** + * e1000e_get_auto_rd_done - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + **/ +s32 e1000e_get_auto_rd_done(struct e1000_hw *hw) +{ + s32 i = 0; + + while (i < AUTO_READ_DONE_TIMEOUT) { + if (er32(EECD) & E1000_EECD_AUTO_RD) + break; + usleep_range(1000, 2000); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { + e_dbg("Auto read by HW from NVM has not completed.\n"); + return -E1000_ERR_RESET; + } + + return 0; +} + +/** + * e1000e_valid_led_default - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT; + + return 0; +} + +/** + * e1000e_id_led_init - + * @hw: pointer to the HW structure + * + **/ +s32 e1000e_id_led_init(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 data, i, temp; + const u16 led_mask = 0x0F; + + ret_val = hw->nvm.ops.valid_led_default(hw, &data); + if (ret_val) + return ret_val; + + mac->ledctl_default = er32(LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + + for (i = 0; i < 4; i++) { + temp = (data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + + return 0; +} + +/** + * e1000e_setup_led_generic - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use and saves the current state + * of the LED so it can be later restored. + **/ +s32 e1000e_setup_led_generic(struct e1000_hw *hw) +{ + u32 ledctl; + + if (hw->mac.ops.setup_led != e1000e_setup_led_generic) + return -E1000_ERR_CONFIG; + + if (hw->phy.media_type == e1000_media_type_fiber) { + ledctl = er32(LEDCTL); + hw->mac.ledctl_default = ledctl; + /* Turn off LED0 */ + ledctl &= ~(E1000_LEDCTL_LED0_IVRT | + E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_LED0_MODE_MASK); + ledctl |= (E1000_LEDCTL_MODE_LED_OFF << + E1000_LEDCTL_LED0_MODE_SHIFT); + ew32(LEDCTL, ledctl); + } else if (hw->phy.media_type == e1000_media_type_copper) { + ew32(LEDCTL, hw->mac.ledctl_mode1); + } + + return 0; +} + +/** + * e1000e_cleanup_led_generic - Set LED config to default operation + * @hw: pointer to the HW structure + * + * Remove the current LED configuration and set the LED configuration + * to the default value, saved from the EEPROM. + **/ +s32 e1000e_cleanup_led_generic(struct e1000_hw *hw) +{ + ew32(LEDCTL, hw->mac.ledctl_default); + return 0; +} + +/** + * e1000e_blink_led_generic - Blink LED + * @hw: pointer to the HW structure + * + * Blink the LEDs which are set to be on. + **/ +s32 e1000e_blink_led_generic(struct e1000_hw *hw) +{ + u32 ledctl_blink = 0; + u32 i; + + if (hw->phy.media_type == e1000_media_type_fiber) { + /* always blink LED0 for PCI-E fiber */ + ledctl_blink = E1000_LEDCTL_LED0_BLINK | + (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); + } else { + /* + * set the blink bit for each LED that's "on" (0x0E) + * in ledctl_mode2 + */ + ledctl_blink = hw->mac.ledctl_mode2; + for (i = 0; i < 4; i++) + if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == + E1000_LEDCTL_MODE_LED_ON) + ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << + (i * 8)); + } + + ew32(LEDCTL, ledctl_blink); + + return 0; +} + +/** + * e1000e_led_on_generic - Turn LED on + * @hw: pointer to the HW structure + * + * Turn LED on. + **/ +s32 e1000e_led_on_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + switch (hw->phy.media_type) { + case e1000_media_type_fiber: + ctrl = er32(CTRL); + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + ew32(CTRL, ctrl); + break; + case e1000_media_type_copper: + ew32(LEDCTL, hw->mac.ledctl_mode2); + break; + default: + break; + } + + return 0; +} + +/** + * e1000e_led_off_generic - Turn LED off + * @hw: pointer to the HW structure + * + * Turn LED off. + **/ +s32 e1000e_led_off_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + switch (hw->phy.media_type) { + case e1000_media_type_fiber: + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + ew32(CTRL, ctrl); + break; + case e1000_media_type_copper: + ew32(LEDCTL, hw->mac.ledctl_mode1); + break; + default: + break; + } + + return 0; +} + +/** + * e1000e_set_pcie_no_snoop - Set PCI-express capabilities + * @hw: pointer to the HW structure + * @no_snoop: bitmap of snoop events + * + * Set the PCI-express register to snoop for events enabled in 'no_snoop'. + **/ +void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop) +{ + u32 gcr; + + if (no_snoop) { + gcr = er32(GCR); + gcr &= ~(PCIE_NO_SNOOP_ALL); + gcr |= no_snoop; + ew32(GCR, gcr); + } +} + +/** + * e1000e_disable_pcie_master - Disables PCI-express master access + * @hw: pointer to the HW structure + * + * Returns 0 if successful, else returns -10 + * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + **/ +s32 e1000e_disable_pcie_master(struct e1000_hw *hw) +{ + u32 ctrl; + s32 timeout = MASTER_DISABLE_TIMEOUT; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; + ew32(CTRL, ctrl); + + while (timeout) { + if (!(er32(STATUS) & + E1000_STATUS_GIO_MASTER_ENABLE)) + break; + udelay(100); + timeout--; + } + + if (!timeout) { + e_dbg("Master requests are pending.\n"); + return -E1000_ERR_MASTER_REQUESTS_PENDING; + } + + return 0; +} + +/** + * e1000e_reset_adaptive - Reset Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * + * Reset the Adaptive Interframe Spacing throttle to default values. + **/ +void e1000e_reset_adaptive(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + if (!mac->adaptive_ifs) { + e_dbg("Not in Adaptive IFS mode!\n"); + goto out; + } + + mac->current_ifs_val = 0; + mac->ifs_min_val = IFS_MIN; + mac->ifs_max_val = IFS_MAX; + mac->ifs_step_size = IFS_STEP; + mac->ifs_ratio = IFS_RATIO; + + mac->in_ifs_mode = false; + ew32(AIT, 0); +out: + return; +} + +/** + * e1000e_update_adaptive - Update Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * + * Update the Adaptive Interframe Spacing Throttle value based on the + * time between transmitted packets and time between collisions. + **/ +void e1000e_update_adaptive(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + if (!mac->adaptive_ifs) { + e_dbg("Not in Adaptive IFS mode!\n"); + goto out; + } + + if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { + if (mac->tx_packet_delta > MIN_NUM_XMITS) { + mac->in_ifs_mode = true; + if (mac->current_ifs_val < mac->ifs_max_val) { + if (!mac->current_ifs_val) + mac->current_ifs_val = mac->ifs_min_val; + else + mac->current_ifs_val += + mac->ifs_step_size; + ew32(AIT, mac->current_ifs_val); + } + } + } else { + if (mac->in_ifs_mode && + (mac->tx_packet_delta <= MIN_NUM_XMITS)) { + mac->current_ifs_val = 0; + mac->in_ifs_mode = false; + ew32(AIT, 0); + } + } +out: + return; +} + +/** + * e1000_raise_eec_clk - Raise EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Enable/Raise the EEPROM clock bit. + **/ +static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd | E1000_EECD_SK; + ew32(EECD, *eecd); + e1e_flush(); + udelay(hw->nvm.delay_usec); +} + +/** + * e1000_lower_eec_clk - Lower EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Clear/Lower the EEPROM clock bit. + **/ +static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd & ~E1000_EECD_SK; + ew32(EECD, *eecd); + e1e_flush(); + udelay(hw->nvm.delay_usec); +} + +/** + * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM + * @hw: pointer to the HW structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out + * + * We need to shift 'count' bits out to the EEPROM. So, the value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + **/ +static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + u32 mask; + + mask = 0x01 << (count - 1); + if (nvm->type == e1000_nvm_eeprom_spi) + eecd |= E1000_EECD_DO; + + do { + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + ew32(EECD, eecd); + e1e_flush(); + + udelay(nvm->delay_usec); + + e1000_raise_eec_clk(hw, &eecd); + e1000_lower_eec_clk(hw, &eecd); + + mask >>= 1; + } while (mask); + + eecd &= ~E1000_EECD_DI; + ew32(EECD, eecd); +} + +/** + * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM + * @hw: pointer to the HW structure + * @count: number of bits to shift in + * + * In order to read a register from the EEPROM, we need to shift 'count' bits + * in from the EEPROM. Bits are "shifted in" by raising the clock input to + * the EEPROM (setting the SK bit), and then reading the value of the data out + * "DO" bit. During this "shifting in" process the data in "DI" bit should + * always be clear. + **/ +static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + eecd = er32(EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data <<= 1; + e1000_raise_eec_clk(hw, &eecd); + + eecd = er32(EECD); + + eecd &= ~E1000_EECD_DI; + if (eecd & E1000_EECD_DO) + data |= 1; + + e1000_lower_eec_clk(hw, &eecd); + } + + return data; +} + +/** + * e1000e_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + **/ +s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) +{ + u32 attempts = 100000; + u32 i, reg = 0; + + for (i = 0; i < attempts; i++) { + if (ee_reg == E1000_NVM_POLL_READ) + reg = er32(EERD); + else + reg = er32(EEWR); + + if (reg & E1000_NVM_RW_REG_DONE) + return 0; + + udelay(5); + } + + return -E1000_ERR_NVM; +} + +/** + * e1000e_acquire_nvm - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +s32 e1000e_acquire_nvm(struct e1000_hw *hw) +{ + u32 eecd = er32(EECD); + s32 timeout = E1000_NVM_GRANT_ATTEMPTS; + + ew32(EECD, eecd | E1000_EECD_REQ); + eecd = er32(EECD); + + while (timeout) { + if (eecd & E1000_EECD_GNT) + break; + udelay(5); + eecd = er32(EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); + e_dbg("Could not acquire NVM grant\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000_standby_nvm - Return EEPROM to standby state + * @hw: pointer to the HW structure + * + * Return the EEPROM to a standby state. + **/ +static void e1000_standby_nvm(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + + if (nvm->type == e1000_nvm_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + e1e_flush(); + udelay(nvm->delay_usec); + eecd &= ~E1000_EECD_CS; + ew32(EECD, eecd); + e1e_flush(); + udelay(nvm->delay_usec); + } +} + +/** + * e1000_stop_nvm - Terminate EEPROM command + * @hw: pointer to the HW structure + * + * Terminates the current command by inverting the EEPROM's chip select pin. + **/ +static void e1000_stop_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + eecd = er32(EECD); + if (hw->nvm.type == e1000_nvm_eeprom_spi) { + /* Pull CS high */ + eecd |= E1000_EECD_CS; + e1000_lower_eec_clk(hw, &eecd); + } +} + +/** + * e1000e_release_nvm - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +void e1000e_release_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + e1000_stop_nvm(hw); + + eecd = er32(EECD); + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); +} + +/** + * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write + * @hw: pointer to the HW structure + * + * Setups the EEPROM for reading and writing. + **/ +static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + u8 spi_stat_reg; + + if (nvm->type == e1000_nvm_eeprom_spi) { + u16 timeout = NVM_MAX_RETRY_SPI; + + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + ew32(EECD, eecd); + e1e_flush(); + udelay(1); + + /* + * Read "Status Register" repeatedly until the LSB is cleared. + * The EEPROM will signal that the command has been completed + * by clearing bit 0 of the internal status register. If it's + * not cleared within 'timeout', then error out. + */ + while (timeout) { + e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, + hw->nvm.opcode_bits); + spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8); + if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) + break; + + udelay(5); + e1000_standby_nvm(hw); + timeout--; + } + + if (!timeout) { + e_dbg("SPI NVM Status error\n"); + return -E1000_ERR_NVM; + } + } + + return 0; +} + +/** + * e1000e_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; + s32 ret_val = 0; + + /* + * A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + for (i = 0; i < words; i++) { + eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + + E1000_NVM_RW_REG_START; + + ew32(EERD, eerd); + ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); + if (ret_val) + break; + + data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA); + } + + return ret_val; +} + +/** + * e1000e_write_nvm_spi - Write to EEPROM using SPI + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * Writes data to EEPROM at offset using SPI interface. + * + * If e1000e_update_nvm_checksum is not called after this function , the + * EEPROM will most likely contain an invalid checksum. + **/ +s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val; + u16 widx = 0; + + /* + * A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + return ret_val; + + while (widx < words) { + u8 write_opcode = NVM_WRITE_OPCODE_SPI; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) { + nvm->ops.release(hw); + return ret_val; + } + + e1000_standby_nvm(hw); + + /* Send the WRITE ENABLE command (8 bit opcode) */ + e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, + nvm->opcode_bits); + + e1000_standby_nvm(hw); + + /* + * Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((nvm->address_bits == 8) && (offset >= 128)) + write_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); + e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), + nvm->address_bits); + + /* Loop to allow for up to whole page write of eeprom */ + while (widx < words) { + u16 word_out = data[widx]; + word_out = (word_out >> 8) | (word_out << 8); + e1000_shift_out_eec_bits(hw, word_out, 16); + widx++; + + if ((((offset + widx) * 2) % nvm->page_size) == 0) { + e1000_standby_nvm(hw); + break; + } + } + } + + usleep_range(10000, 20000); + nvm->ops.release(hw); + return 0; +} + +/** + * e1000_read_pba_string_generic - Read device part number + * @hw: pointer to the HW structure + * @pba_num: pointer to device part number + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in pba_num. + **/ +s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, + u32 pba_num_size) +{ + s32 ret_val; + u16 nvm_data; + u16 pba_ptr; + u16 offset; + u16 length; + + if (pba_num == NULL) { + e_dbg("PBA string buffer was null\n"); + ret_val = E1000_ERR_INVALID_ARGUMENT; + goto out; + } + + ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + goto out; + } + + ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); + if (ret_val) { + e_dbg("NVM Read Error\n"); + goto out; + } + + /* + * if nvm_data is not ptr guard the PBA must be in legacy format which + * means pba_ptr is actually our second data word for the PBA number + * and we can decode it into an ascii string + */ + if (nvm_data != NVM_PBA_PTR_GUARD) { + e_dbg("NVM PBA number is not stored as string\n"); + + /* we will need 11 characters to store the PBA */ + if (pba_num_size < 11) { + e_dbg("PBA string buffer too small\n"); + return E1000_ERR_NO_SPACE; + } + + /* extract hex string from data and pba_ptr */ + pba_num[0] = (nvm_data >> 12) & 0xF; + pba_num[1] = (nvm_data >> 8) & 0xF; + pba_num[2] = (nvm_data >> 4) & 0xF; + pba_num[3] = nvm_data & 0xF; + pba_num[4] = (pba_ptr >> 12) & 0xF; + pba_num[5] = (pba_ptr >> 8) & 0xF; + pba_num[6] = '-'; + pba_num[7] = 0; + pba_num[8] = (pba_ptr >> 4) & 0xF; + pba_num[9] = pba_ptr & 0xF; + + /* put a null character on the end of our string */ + pba_num[10] = '\0'; + + /* switch all the data but the '-' to hex char */ + for (offset = 0; offset < 10; offset++) { + if (pba_num[offset] < 0xA) + pba_num[offset] += '0'; + else if (pba_num[offset] < 0x10) + pba_num[offset] += 'A' - 0xA; + } + + goto out; + } + + ret_val = e1000_read_nvm(hw, pba_ptr, 1, &length); + if (ret_val) { + e_dbg("NVM Read Error\n"); + goto out; + } + + if (length == 0xFFFF || length == 0) { + e_dbg("NVM PBA number section invalid length\n"); + ret_val = E1000_ERR_NVM_PBA_SECTION; + goto out; + } + /* check if pba_num buffer is big enough */ + if (pba_num_size < (((u32)length * 2) - 1)) { + e_dbg("PBA string buffer too small\n"); + ret_val = E1000_ERR_NO_SPACE; + goto out; + } + + /* trim pba length from start of string */ + pba_ptr++; + length--; + + for (offset = 0; offset < length; offset++) { + ret_val = e1000_read_nvm(hw, pba_ptr + offset, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + goto out; + } + pba_num[offset * 2] = (u8)(nvm_data >> 8); + pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); + } + pba_num[offset * 2] = '\0'; + +out: + return ret_val; +} + +/** + * e1000_read_mac_addr_generic - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the EEPROM and stores the value. + * Since devices with two ports use the same EEPROM, we increment the + * last bit in the MAC address for the second port. + **/ +s32 e1000_read_mac_addr_generic(struct e1000_hw *hw) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = er32(RAH(0)); + rar_low = er32(RAL(0)); + + for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8)); + + for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8)); + + for (i = 0; i < ETH_ALEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return 0; +} + +/** + * e1000e_validate_nvm_checksum_generic - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + e_dbg("NVM Checksum Invalid\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000e_update_nvm_checksum_generic - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error while updating checksum.\n"); + return ret_val; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) + e_dbg("NVM Write Error while updating checksum.\n"); + + return ret_val; +} + +/** + * e1000e_reload_nvm - Reloads EEPROM + * @hw: pointer to the HW structure + * + * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the + * extended control register. + **/ +void e1000e_reload_nvm(struct e1000_hw *hw) +{ + u32 ctrl_ext; + + udelay(10); + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); +} + +/** + * e1000_calculate_checksum - Calculate checksum for buffer + * @buffer: pointer to EEPROM + * @length: size of EEPROM to calculate a checksum for + * + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. + **/ +static u8 e1000_calculate_checksum(u8 *buffer, u32 length) +{ + u32 i; + u8 sum = 0; + + if (!buffer) + return 0; + + for (i = 0; i < length; i++) + sum += buffer[i]; + + return (u8) (0 - sum); +} + +/** + * e1000_mng_enable_host_if - Checks host interface is enabled + * @hw: pointer to the HW structure + * + * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND + * + * This function checks whether the HOST IF is enabled for command operation + * and also checks whether the previous command is completed. It busy waits + * in case of previous command is not completed. + **/ +static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) +{ + u32 hicr; + u8 i; + + if (!(hw->mac.arc_subsystem_valid)) { + e_dbg("ARC subsystem not valid.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Check that the host interface is enabled. */ + hicr = er32(HICR); + if ((hicr & E1000_HICR_EN) == 0) { + e_dbg("E1000_HOST_EN bit disabled.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + /* check the previous command is completed */ + for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { + hicr = er32(HICR); + if (!(hicr & E1000_HICR_C)) + break; + mdelay(1); + } + + if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { + e_dbg("Previous command timeout failed .\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + return 0; +} + +/** + * e1000e_check_mng_mode_generic - check management mode + * @hw: pointer to the HW structure + * + * Reads the firmware semaphore register and returns true (>0) if + * manageability is enabled, else false (0). + **/ +bool e1000e_check_mng_mode_generic(struct e1000_hw *hw) +{ + u32 fwsm = er32(FWSM); + + return (fwsm & E1000_FWSM_MODE_MASK) == + (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); +} + +/** + * e1000e_enable_tx_pkt_filtering - Enable packet filtering on Tx + * @hw: pointer to the HW structure + * + * Enables packet filtering on transmit packets if manageability is enabled + * and host interface is enabled. + **/ +bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) +{ + struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie; + u32 *buffer = (u32 *)&hw->mng_cookie; + u32 offset; + s32 ret_val, hdr_csum, csum; + u8 i, len; + + hw->mac.tx_pkt_filtering = true; + + /* No manageability, no filtering */ + if (!e1000e_check_mng_mode(hw)) { + hw->mac.tx_pkt_filtering = false; + goto out; + } + + /* + * If we can't read from the host interface for whatever + * reason, disable filtering. + */ + ret_val = e1000_mng_enable_host_if(hw); + if (ret_val) { + hw->mac.tx_pkt_filtering = false; + goto out; + } + + /* Read in the header. Length and offset are in dwords. */ + len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2; + offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2; + for (i = 0; i < len; i++) + *(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset + i); + hdr_csum = hdr->checksum; + hdr->checksum = 0; + csum = e1000_calculate_checksum((u8 *)hdr, + E1000_MNG_DHCP_COOKIE_LENGTH); + /* + * If either the checksums or signature don't match, then + * the cookie area isn't considered valid, in which case we + * take the safe route of assuming Tx filtering is enabled. + */ + if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { + hw->mac.tx_pkt_filtering = true; + goto out; + } + + /* Cookie area is valid, make the final check for filtering. */ + if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) { + hw->mac.tx_pkt_filtering = false; + goto out; + } + +out: + return hw->mac.tx_pkt_filtering; +} + +/** + * e1000_mng_write_cmd_header - Writes manageability command header + * @hw: pointer to the HW structure + * @hdr: pointer to the host interface command header + * + * Writes the command header after does the checksum calculation. + **/ +static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr) +{ + u16 i, length = sizeof(struct e1000_host_mng_command_header); + + /* Write the whole command header structure with new checksum. */ + + hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length); + + length >>= 2; + /* Write the relevant command block into the ram area. */ + for (i = 0; i < length; i++) { + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i, + *((u32 *) hdr + i)); + e1e_flush(); + } + + return 0; +} + +/** + * e1000_mng_host_if_write - Write to the manageability host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface buffer + * @length: size of the buffer + * @offset: location in the buffer to write to + * @sum: sum of the data (not checksum) + * + * This function writes the buffer content at the offset given on the host if. + * It also does alignment considerations to do the writes in most efficient + * way. Also fills up the sum of the buffer in *buffer parameter. + **/ +static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, + u16 length, u16 offset, u8 *sum) +{ + u8 *tmp; + u8 *bufptr = buffer; + u32 data = 0; + u16 remaining, i, j, prev_bytes; + + /* sum = only sum of the data and it is not checksum */ + + if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) + return -E1000_ERR_PARAM; + + tmp = (u8 *)&data; + prev_bytes = offset & 0x3; + offset >>= 2; + + if (prev_bytes) { + data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset); + for (j = prev_bytes; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data); + length -= j - prev_bytes; + offset++; + } + + remaining = length & 0x3; + length -= remaining; + + /* Calculate length in DWORDs */ + length >>= 2; + + /* + * The device driver writes the relevant command block into the + * ram area. + */ + for (i = 0; i < length; i++) { + for (j = 0; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data); + } + if (remaining) { + for (j = 0; j < sizeof(u32); j++) { + if (j < remaining) + *(tmp + j) = *bufptr++; + else + *(tmp + j) = 0; + + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data); + } + + return 0; +} + +/** + * e1000e_mng_write_dhcp_info - Writes DHCP info to host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface + * @length: size of the buffer + * + * Writes the DHCP information to the host interface. + **/ +s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length) +{ + struct e1000_host_mng_command_header hdr; + s32 ret_val; + u32 hicr; + + hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; + hdr.command_length = length; + hdr.reserved1 = 0; + hdr.reserved2 = 0; + hdr.checksum = 0; + + /* Enable the host interface */ + ret_val = e1000_mng_enable_host_if(hw); + if (ret_val) + return ret_val; + + /* Populate the host interface with the contents of "buffer". */ + ret_val = e1000_mng_host_if_write(hw, buffer, length, + sizeof(hdr), &(hdr.checksum)); + if (ret_val) + return ret_val; + + /* Write the manageability command header */ + ret_val = e1000_mng_write_cmd_header(hw, &hdr); + if (ret_val) + return ret_val; + + /* Tell the ARC a new command is pending. */ + hicr = er32(HICR); + ew32(HICR, hicr | E1000_HICR_C); + + return 0; +} + +/** + * e1000e_enable_mng_pass_thru - Check if management passthrough is needed + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. + **/ +bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + u32 fwsm, factps; + bool ret_val = false; + + manc = er32(MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN)) + goto out; + + if (hw->mac.has_fwsm) { + fwsm = er32(FWSM); + factps = er32(FACTPS); + + if (!(factps & E1000_FACTPS_MNGCG) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { + ret_val = true; + goto out; + } + } else if ((hw->mac.type == e1000_82574) || + (hw->mac.type == e1000_82583)) { + u16 data; + + factps = er32(FACTPS); + e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data); + + if (!(factps & E1000_FACTPS_MNGCG) && + ((data & E1000_NVM_INIT_CTRL2_MNGM) == + (e1000_mng_mode_pt << 13))) { + ret_val = true; + goto out; + } + } else if ((manc & E1000_MANC_SMBUS_EN) && + !(manc & E1000_MANC_ASF_EN)) { + ret_val = true; + goto out; + } + +out: + return ret_val; +} diff --cc drivers/net/ethernet/intel/e1000e/netdev.c index d0fdb512e849,000000000000..b1f925bfb8b6 mode 100644,000000..100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@@ -1,6312 -1,0 +1,6387 @@@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "e1000.h" + +#define DRV_EXTRAVERSION "-k" + - #define DRV_VERSION "1.3.16" DRV_EXTRAVERSION ++#define DRV_VERSION "1.4.4" DRV_EXTRAVERSION +char e1000e_driver_name[] = "e1000e"; +const char e1000e_driver_version[] = DRV_VERSION; + +static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state); + +static const struct e1000_info *e1000_info_tbl[] = { + [board_82571] = &e1000_82571_info, + [board_82572] = &e1000_82572_info, + [board_82573] = &e1000_82573_info, + [board_82574] = &e1000_82574_info, + [board_82583] = &e1000_82583_info, + [board_80003es2lan] = &e1000_es2_info, + [board_ich8lan] = &e1000_ich8_info, + [board_ich9lan] = &e1000_ich9_info, + [board_ich10lan] = &e1000_ich10_info, + [board_pchlan] = &e1000_pch_info, + [board_pch2lan] = &e1000_pch2_info, +}; + +struct e1000_reg_info { + u32 ofs; + char *name; +}; + +#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ +#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ +#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ +#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ +#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ + +#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ + +static const struct e1000_reg_info e1000_reg_info_tbl[] = { + + /* General Registers */ + {E1000_CTRL, "CTRL"}, + {E1000_STATUS, "STATUS"}, + {E1000_CTRL_EXT, "CTRL_EXT"}, + + /* Interrupt Registers */ + {E1000_ICR, "ICR"}, + + /* Rx Registers */ + {E1000_RCTL, "RCTL"}, + {E1000_RDLEN, "RDLEN"}, + {E1000_RDH, "RDH"}, + {E1000_RDT, "RDT"}, + {E1000_RDTR, "RDTR"}, + {E1000_RXDCTL(0), "RXDCTL"}, + {E1000_ERT, "ERT"}, + {E1000_RDBAL, "RDBAL"}, + {E1000_RDBAH, "RDBAH"}, + {E1000_RDFH, "RDFH"}, + {E1000_RDFT, "RDFT"}, + {E1000_RDFHS, "RDFHS"}, + {E1000_RDFTS, "RDFTS"}, + {E1000_RDFPC, "RDFPC"}, + + /* Tx Registers */ + {E1000_TCTL, "TCTL"}, + {E1000_TDBAL, "TDBAL"}, + {E1000_TDBAH, "TDBAH"}, + {E1000_TDLEN, "TDLEN"}, + {E1000_TDH, "TDH"}, + {E1000_TDT, "TDT"}, + {E1000_TIDV, "TIDV"}, + {E1000_TXDCTL(0), "TXDCTL"}, + {E1000_TADV, "TADV"}, + {E1000_TARC(0), "TARC"}, + {E1000_TDFH, "TDFH"}, + {E1000_TDFT, "TDFT"}, + {E1000_TDFHS, "TDFHS"}, + {E1000_TDFTS, "TDFTS"}, + {E1000_TDFPC, "TDFPC"}, + + /* List Terminator */ + {} +}; + +/* + * e1000_regdump - register printout routine + */ +static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) +{ + int n = 0; + char rname[16]; + u32 regs[8]; + + switch (reginfo->ofs) { + case E1000_RXDCTL(0): + for (n = 0; n < 2; n++) + regs[n] = __er32(hw, E1000_RXDCTL(n)); + break; + case E1000_TXDCTL(0): + for (n = 0; n < 2; n++) + regs[n] = __er32(hw, E1000_TXDCTL(n)); + break; + case E1000_TARC(0): + for (n = 0; n < 2; n++) + regs[n] = __er32(hw, E1000_TARC(n)); + break; + default: + printk(KERN_INFO "%-15s %08x\n", + reginfo->name, __er32(hw, reginfo->ofs)); + return; + } + + snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]"); + printk(KERN_INFO "%-15s ", rname); + for (n = 0; n < 2; n++) + printk(KERN_CONT "%08x ", regs[n]); + printk(KERN_CONT "\n"); +} + +/* + * e1000e_dump - Print registers, Tx-ring and Rx-ring + */ +static void e1000e_dump(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct e1000_reg_info *reginfo; + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_tx_desc *tx_desc; + struct my_u0 { + u64 a; + u64 b; + } *u0; + struct e1000_buffer *buffer_info; + struct e1000_ring *rx_ring = adapter->rx_ring; + union e1000_rx_desc_packet_split *rx_desc_ps; + struct e1000_rx_desc *rx_desc; + struct my_u1 { + u64 a; + u64 b; + u64 c; + u64 d; + } *u1; + u32 staterr; + int i = 0; + + if (!netif_msg_hw(adapter)) + return; + + /* Print netdevice Info */ + if (netdev) { + dev_info(&adapter->pdev->dev, "Net device Info\n"); + printk(KERN_INFO "Device Name state " + "trans_start last_rx\n"); + printk(KERN_INFO "%-15s %016lX %016lX %016lX\n", + netdev->name, netdev->state, netdev->trans_start, + netdev->last_rx); + } + + /* Print Registers */ + dev_info(&adapter->pdev->dev, "Register Dump\n"); + printk(KERN_INFO " Register Name Value\n"); + for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl; + reginfo->name; reginfo++) { + e1000_regdump(hw, reginfo); + } + + /* Print Tx Ring Summary */ + if (!netdev || !netif_running(netdev)) + goto exit; + + dev_info(&adapter->pdev->dev, "Tx Ring Summary\n"); + printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]" + " leng ntw timestamp\n"); + buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; + printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", + 0, tx_ring->next_to_use, tx_ring->next_to_clean, + (unsigned long long)buffer_info->dma, + buffer_info->length, + buffer_info->next_to_watch, + (unsigned long long)buffer_info->time_stamp); + + /* Print Tx Ring */ + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + dev_info(&adapter->pdev->dev, "Tx Ring Dump\n"); + + /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) + * + * Legacy Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] (Reserved on Write Back) | + * +--------------------------------------------------------------+ + * 8 | Special | CSS | Status | CMD | CSO | Length | + * +--------------------------------------------------------------+ + * 63 48 47 36 35 32 31 24 23 16 15 0 + * + * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload + * 63 48 47 40 39 32 31 16 15 8 7 0 + * +----------------------------------------------------------------+ + * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | + * +----------------------------------------------------------------+ + * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + * + * Extended Data Descriptor (DTYP=0x1) + * +----------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +----------------------------------------------------------------+ + * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + */ + printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]" + " [bi->dma ] leng ntw timestamp bi->skb " + "<-- Legacy format\n"); + printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]" + " [bi->dma ] leng ntw timestamp bi->skb " + "<-- Ext Context format\n"); + printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]" + " [bi->dma ] leng ntw timestamp bi->skb " + "<-- Ext Data format\n"); + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + tx_desc = E1000_TX_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + u0 = (struct my_u0 *)tx_desc; + printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX " + "%04X %3X %016llX %p", + (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' : + ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i, + (unsigned long long)le64_to_cpu(u0->a), + (unsigned long long)le64_to_cpu(u0->b), + (unsigned long long)buffer_info->dma, + buffer_info->length, buffer_info->next_to_watch, + (unsigned long long)buffer_info->time_stamp, + buffer_info->skb); + if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) + printk(KERN_CONT " NTC/U\n"); + else if (i == tx_ring->next_to_use) + printk(KERN_CONT " NTU\n"); + else if (i == tx_ring->next_to_clean) + printk(KERN_CONT " NTC\n"); + else + printk(KERN_CONT "\n"); + + if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, + 16, 1, phys_to_virt(buffer_info->dma), + buffer_info->length, true); + } + + /* Print Rx Ring Summary */ +rx_ring_summary: + dev_info(&adapter->pdev->dev, "Rx Ring Summary\n"); + printk(KERN_INFO "Queue [NTU] [NTC]\n"); + printk(KERN_INFO " %5d %5X %5X\n", 0, + rx_ring->next_to_use, rx_ring->next_to_clean); + + /* Print Rx Ring */ + if (!netif_msg_rx_status(adapter)) + goto exit; + + dev_info(&adapter->pdev->dev, "Rx Ring Dump\n"); + switch (adapter->rx_ps_pages) { + case 1: + case 2: + case 3: + /* [Extended] Packet Split Receive Descriptor Format + * + * +-----------------------------------------------------+ + * 0 | Buffer Address 0 [63:0] | + * +-----------------------------------------------------+ + * 8 | Buffer Address 1 [63:0] | + * +-----------------------------------------------------+ + * 16 | Buffer Address 2 [63:0] | + * +-----------------------------------------------------+ + * 24 | Buffer Address 3 [63:0] | + * +-----------------------------------------------------+ + */ + printk(KERN_INFO "R [desc] [buffer 0 63:0 ] " + "[buffer 1 63:0 ] " + "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] " + "[bi->skb] <-- Ext Pkt Split format\n"); + /* [Extended] Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 13 12 8 7 4 3 0 + * +------------------------------------------------------+ + * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS | + * | Checksum | Ident | | Queue | | Type | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length | Extended Error | Extended Status | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] " + "[vl l0 ee es] " + "[ l3 l2 l1 hs] [reserved ] ---------------- " + "[bi->skb] <-- Ext Rx Write-Back format\n"); + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); + u1 = (struct my_u1 *)rx_desc_ps; + staterr = + le32_to_cpu(rx_desc_ps->wb.middle.status_error); + if (staterr & E1000_RXD_STAT_DD) { + /* Descriptor Done */ + printk(KERN_INFO "RWB[0x%03X] %016llX " + "%016llX %016llX %016llX " + "---------------- %p", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + (unsigned long long)le64_to_cpu(u1->c), + (unsigned long long)le64_to_cpu(u1->d), + buffer_info->skb); + } else { + printk(KERN_INFO "R [0x%03X] %016llX " + "%016llX %016llX %016llX %016llX %p", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + (unsigned long long)le64_to_cpu(u1->c), + (unsigned long long)le64_to_cpu(u1->d), + (unsigned long long)buffer_info->dma, + buffer_info->skb); + + if (netif_msg_pktdata(adapter)) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, 1, + phys_to_virt(buffer_info->dma), + adapter->rx_ps_bsize0, true); + } + + if (i == rx_ring->next_to_use) + printk(KERN_CONT " NTU\n"); + else if (i == rx_ring->next_to_clean) + printk(KERN_CONT " NTC\n"); + else + printk(KERN_CONT "\n"); + } + break; + default: + case 0: + /* Legacy Receive Descriptor Format + * + * +-----------------------------------------------------+ + * | Buffer Address [63:0] | + * +-----------------------------------------------------+ + * | VLAN Tag | Errors | Status 0 | Packet csum | Length | + * +-----------------------------------------------------+ + * 63 48 47 40 39 32 31 16 15 0 + */ + printk(KERN_INFO "Rl[desc] [address 63:0 ] " + "[vl er S cks ln] [bi->dma ] [bi->skb] " + "<-- Legacy format\n"); + for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + u0 = (struct my_u0 *)rx_desc; + printk(KERN_INFO "Rl[0x%03X] %016llX %016llX " + "%016llX %p", i, + (unsigned long long)le64_to_cpu(u0->a), + (unsigned long long)le64_to_cpu(u0->b), + (unsigned long long)buffer_info->dma, + buffer_info->skb); + if (i == rx_ring->next_to_use) + printk(KERN_CONT " NTU\n"); + else if (i == rx_ring->next_to_clean) + printk(KERN_CONT " NTC\n"); + else + printk(KERN_CONT "\n"); + + if (netif_msg_pktdata(adapter)) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, + phys_to_virt(buffer_info->dma), + adapter->rx_buffer_len, true); + } + } + +exit: + return; +} + +/** + * e1000_desc_unused - calculate if we have unused descriptors + **/ +static int e1000_desc_unused(struct e1000_ring *ring) +{ + if (ring->next_to_clean > ring->next_to_use) + return ring->next_to_clean - ring->next_to_use - 1; + + return ring->count + ring->next_to_clean - ring->next_to_use - 1; +} + +/** + * e1000_receive_skb - helper function to handle Rx indications + * @adapter: board private structure + * @status: descriptor status field as written by hardware + * @vlan: descriptor vlan field as written by hardware (no le/be conversion) + * @skb: pointer to sk_buff to be indicated to stack + **/ +static void e1000_receive_skb(struct e1000_adapter *adapter, + struct net_device *netdev, struct sk_buff *skb, + u8 status, __le16 vlan) +{ + u16 tag = le16_to_cpu(vlan); + skb->protocol = eth_type_trans(skb, netdev); + + if (status & E1000_RXD_STAT_VP) + __vlan_hwaccel_put_tag(skb, tag); + + napi_gro_receive(&adapter->napi, skb); +} + +/** + * e1000_rx_checksum - Receive Checksum Offload + * @adapter: board private structure + * @status_err: receive descriptor status and error fields + * @csum: receive descriptor csum field + * @sk_buff: socket buffer with received data + **/ +static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, + u32 csum, struct sk_buff *skb) +{ + u16 status = (u16)status_err; + u8 errors = (u8)(status_err >> 24); + + skb_checksum_none_assert(skb); + + /* Ignore Checksum bit is set */ + if (status & E1000_RXD_STAT_IXSM) + return; + /* TCP/UDP checksum error bit is set */ + if (errors & E1000_RXD_ERR_TCPE) { + /* let the stack verify checksum errors */ + adapter->hw_csum_err++; + return; + } + + /* TCP/UDP Checksum has not been calculated */ + if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) + return; + + /* It must be a TCP or UDP packet with a valid checksum */ + if (status & E1000_RXD_STAT_TCPCS) { + /* TCP checksum is good */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + /* + * IP fragment with UDP payload + * Hardware complements the payload checksum, so we undo it + * and then put the value in host order for further stack use. + */ + __sum16 sum = (__force __sum16)htons(csum); + skb->csum = csum_unfold(~sum); + skb->ip_summed = CHECKSUM_COMPLETE; + } + adapter->hw_csum_good++; +} + ++/** ++ * e1000e_update_tail_wa - helper function for e1000e_update_[rt]dt_wa() ++ * @hw: pointer to the HW structure ++ * @tail: address of tail descriptor register ++ * @i: value to write to tail descriptor register ++ * ++ * When updating the tail register, the ME could be accessing Host CSR ++ * registers at the same time. Normally, this is handled in h/w by an ++ * arbiter but on some parts there is a bug that acknowledges Host accesses ++ * later than it should which could result in the descriptor register to ++ * have an incorrect value. Workaround this by checking the FWSM register ++ * which has bit 24 set while ME is accessing Host CSR registers, wait ++ * if it is set and try again a number of times. ++ **/ ++static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, u8 __iomem * tail, ++ unsigned int i) ++{ ++ unsigned int j = 0; ++ ++ while ((j++ < E1000_ICH_FWSM_PCIM2PCI_COUNT) && ++ (er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI)) ++ udelay(50); ++ ++ writel(i, tail); ++ ++ if ((j == E1000_ICH_FWSM_PCIM2PCI_COUNT) && (i != readl(tail))) ++ return E1000_ERR_SWFW_SYNC; ++ ++ return 0; ++} ++ ++static void e1000e_update_rdt_wa(struct e1000_adapter *adapter, unsigned int i) ++{ ++ u8 __iomem *tail = (adapter->hw.hw_addr + adapter->rx_ring->tail); ++ struct e1000_hw *hw = &adapter->hw; ++ ++ if (e1000e_update_tail_wa(hw, tail, i)) { ++ u32 rctl = er32(RCTL); ++ ew32(RCTL, rctl & ~E1000_RCTL_EN); ++ e_err("ME firmware caused invalid RDT - resetting\n"); ++ schedule_work(&adapter->reset_task); ++ } ++} ++ ++static void e1000e_update_tdt_wa(struct e1000_adapter *adapter, unsigned int i) ++{ ++ u8 __iomem *tail = (adapter->hw.hw_addr + adapter->tx_ring->tail); ++ struct e1000_hw *hw = &adapter->hw; ++ ++ if (e1000e_update_tail_wa(hw, tail, i)) { ++ u32 tctl = er32(TCTL); ++ ew32(TCTL, tctl & ~E1000_TCTL_EN); ++ e_err("ME firmware caused invalid TDT - resetting\n"); ++ schedule_work(&adapter->reset_task); ++ } ++} ++ +/** + * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended + * @adapter: address of board private structure + **/ +static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + int cleaned_count, gfp_t gfp) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_rx_desc *rx_desc; + struct e1000_buffer *buffer_info; + struct sk_buff *skb; + unsigned int i; + unsigned int bufsz = adapter->rx_buffer_len; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + skb = buffer_info->skb; + if (skb) { + skb_trim(skb, 0); + goto map_skb; + } + + skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp); + if (!skb) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + buffer_info->skb = skb; +map_skb: + buffer_info->dma = dma_map_single(&pdev->dev, skb->data, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + dev_err(&pdev->dev, "Rx DMA map failed\n"); + adapter->rx_dma_failed++; + break; + } + + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); - writel(i, adapter->hw.hw_addr + rx_ring->tail); ++ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) ++ e1000e_update_rdt_wa(adapter, i); ++ else ++ writel(i, adapter->hw.hw_addr + rx_ring->tail); + } + i++; + if (i == rx_ring->count) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + rx_ring->next_to_use = i; +} + +/** + * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split + * @adapter: address of board private structure + **/ +static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, + int cleaned_count, gfp_t gfp) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + union e1000_rx_desc_packet_split *rx_desc; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_buffer *buffer_info; + struct e1000_ps_page *ps_page; + struct sk_buff *skb; + unsigned int i, j; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + rx_desc = E1000_RX_DESC_PS(*rx_ring, i); + + for (j = 0; j < PS_PAGE_BUFFERS; j++) { + ps_page = &buffer_info->ps_pages[j]; + if (j >= adapter->rx_ps_pages) { + /* all unused desc entries get hw null ptr */ + rx_desc->read.buffer_addr[j + 1] = + ~cpu_to_le64(0); + continue; + } + if (!ps_page->page) { + ps_page->page = alloc_page(gfp); + if (!ps_page->page) { + adapter->alloc_rx_buff_failed++; + goto no_buffers; + } + ps_page->dma = dma_map_page(&pdev->dev, + ps_page->page, + 0, PAGE_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, + ps_page->dma)) { + dev_err(&adapter->pdev->dev, + "Rx DMA page map failed\n"); + adapter->rx_dma_failed++; + goto no_buffers; + } + } + /* + * Refresh the desc even if buffer_addrs + * didn't change because each write-back + * erases this info. + */ + rx_desc->read.buffer_addr[j + 1] = + cpu_to_le64(ps_page->dma); + } + + skb = __netdev_alloc_skb_ip_align(netdev, + adapter->rx_ps_bsize0, + gfp); + + if (!skb) { + adapter->alloc_rx_buff_failed++; + break; + } + + buffer_info->skb = skb; + buffer_info->dma = dma_map_single(&pdev->dev, skb->data, + adapter->rx_ps_bsize0, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + dev_err(&pdev->dev, "Rx DMA map failed\n"); + adapter->rx_dma_failed++; + /* cleanup skb */ + dev_kfree_skb_any(skb); + buffer_info->skb = NULL; + break; + } + + rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); + + if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); - writel(i << 1, adapter->hw.hw_addr + rx_ring->tail); ++ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) ++ e1000e_update_rdt_wa(adapter, i << 1); ++ else ++ writel(i << 1, ++ adapter->hw.hw_addr + rx_ring->tail); + } + + i++; + if (i == rx_ring->count) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + +no_buffers: + rx_ring->next_to_use = i; +} + +/** + * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers + * @adapter: address of board private structure + * @cleaned_count: number of buffers to allocate this pass + **/ + +static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, + int cleaned_count, gfp_t gfp) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_buffer *buffer_info; + struct sk_buff *skb; + unsigned int i; + unsigned int bufsz = 256 - 16 /* for skb_reserve */; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + skb = buffer_info->skb; + if (skb) { + skb_trim(skb, 0); + goto check_page; + } + + skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp); + if (unlikely(!skb)) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + buffer_info->skb = skb; +check_page: + /* allocate a new page if necessary */ + if (!buffer_info->page) { + buffer_info->page = alloc_page(gfp); + if (unlikely(!buffer_info->page)) { + adapter->alloc_rx_buff_failed++; + break; + } + } + + if (!buffer_info->dma) + buffer_info->dma = dma_map_page(&pdev->dev, + buffer_info->page, 0, + PAGE_SIZE, + DMA_FROM_DEVICE); + + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); - writel(i, adapter->hw.hw_addr + rx_ring->tail); ++ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) ++ e1000e_update_rdt_wa(adapter, i); ++ else ++ writel(i, adapter->hw.hw_addr + rx_ring->tail); + } +} + +/** + * e1000_clean_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_buffer *buffer_info, *next_buffer; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = 0; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + rmb(); /* read descriptor and rx_buffer_info after status DD */ + + status = rx_desc->status; + skb = buffer_info->skb; + buffer_info->skb = NULL; + + prefetch(skb->data - NET_IP_ALIGN); + + i++; + if (i == rx_ring->count) + i = 0; + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = 1; + cleaned_count++; + dma_unmap_single(&pdev->dev, + buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->length); + + /* + * !EOP means multiple descriptors were used to store a single + * packet, if that's the case we need to toss it. In fact, we + * need to toss every packet with the EOP bit clear and the + * next frame that _does_ have the EOP bit set, as it is by + * definition only a frame fragment + */ + if (unlikely(!(status & E1000_RXD_STAT_EOP))) + adapter->flags2 |= FLAG2_IS_DISCARDING; + + if (adapter->flags2 & FLAG2_IS_DISCARDING) { + /* All receives must fit into a single buffer */ + e_dbg("Receive packet consumed multiple buffers\n"); + /* recycle */ + buffer_info->skb = skb; + if (status & E1000_RXD_STAT_EOP) + adapter->flags2 &= ~FLAG2_IS_DISCARDING; + goto next_desc; + } + + if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) { + /* recycle */ + buffer_info->skb = skb; + goto next_desc; + } + + /* adjust length to remove Ethernet CRC */ + if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) + length -= 4; + + total_rx_bytes += length; + total_rx_packets++; + + /* + * code added for copybreak, this should improve + * performance for small packets with large amounts + * of reassembly being done in the stack + */ + if (length < copybreak) { + struct sk_buff *new_skb = + netdev_alloc_skb_ip_align(netdev, length); + if (new_skb) { + skb_copy_to_linear_data_offset(new_skb, + -NET_IP_ALIGN, + (skb->data - + NET_IP_ALIGN), + (length + + NET_IP_ALIGN)); + /* save the skb in buffer_info as good */ + buffer_info->skb = skb; + skb = new_skb; + } + /* else just continue with the old one */ + } + /* end copybreak code */ + skb_put(skb, length); + + /* Receive Checksum Offload */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + + e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special); + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= E1000_RX_BUFFER_WRITE) { + adapter->alloc_rx_buf(adapter, cleaned_count, + GFP_ATOMIC); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = e1000_desc_unused(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC); + + adapter->total_rx_bytes += total_rx_bytes; + adapter->total_rx_packets += total_rx_packets; + return cleaned; +} + +static void e1000_put_txbuf(struct e1000_adapter *adapter, + struct e1000_buffer *buffer_info) +{ + if (buffer_info->dma) { + if (buffer_info->mapped_as_page) + dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, DMA_TO_DEVICE); + else + dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, DMA_TO_DEVICE); + buffer_info->dma = 0; + } + if (buffer_info->skb) { + dev_kfree_skb_any(buffer_info->skb); + buffer_info->skb = NULL; + } + buffer_info->time_stamp = 0; +} + +static void e1000_print_hw_hang(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + print_hang_task); + struct e1000_ring *tx_ring = adapter->tx_ring; + unsigned int i = tx_ring->next_to_clean; + unsigned int eop = tx_ring->buffer_info[i].next_to_watch; + struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); + struct e1000_hw *hw = &adapter->hw; + u16 phy_status, phy_1000t_status, phy_ext_status; + u16 pci_status; + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + e1e_rphy(hw, PHY_STATUS, &phy_status); + e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); + e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); + + pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status); + + /* detected Hardware unit hang */ + e_err("Detected Hardware Unit Hang:\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]:\n" + " time_stamp <%lx>\n" + " next_to_watch <%x>\n" + " jiffies <%lx>\n" + " next_to_watch.status <%x>\n" + "MAC Status <%x>\n" + "PHY Status <%x>\n" + "PHY 1000BASE-T Status <%x>\n" + "PHY Extended Status <%x>\n" + "PCI Status <%x>\n", + readl(adapter->hw.hw_addr + tx_ring->head), + readl(adapter->hw.hw_addr + tx_ring->tail), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_ring->buffer_info[eop].time_stamp, + eop, + jiffies, + eop_desc->upper.fields.status, + er32(STATUS), + phy_status, + phy_1000t_status, + phy_ext_status, + pci_status); +} + +/** + * e1000_clean_tx_irq - Reclaim resources after transmit completes + * @adapter: board private structure + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_tx_desc *tx_desc, *eop_desc; + struct e1000_buffer *buffer_info; + unsigned int i, eop; + unsigned int count = 0; + unsigned int total_tx_bytes = 0, total_tx_packets = 0; + + i = tx_ring->next_to_clean; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + + while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && + (count < tx_ring->count)) { + bool cleaned = false; + rmb(); /* read buffer_info after eop_desc */ + for (; !cleaned; count++) { + tx_desc = E1000_TX_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + cleaned = (i == eop); + + if (cleaned) { + total_tx_packets += buffer_info->segs; + total_tx_bytes += buffer_info->bytecount; + } + + e1000_put_txbuf(adapter, buffer_info); + tx_desc->upper.data = 0; + + i++; + if (i == tx_ring->count) + i = 0; + } + + if (i == tx_ring->next_to_use) + break; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + } + + tx_ring->next_to_clean = i; + +#define TX_WAKE_THRESHOLD 32 + if (count && netif_carrier_ok(netdev) && + e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (netif_queue_stopped(netdev) && + !(test_bit(__E1000_DOWN, &adapter->state))) { + netif_wake_queue(netdev); + ++adapter->restart_queue; + } + } + + if (adapter->detect_tx_hung) { + /* + * Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i + */ + adapter->detect_tx_hung = 0; + if (tx_ring->buffer_info[i].time_stamp && + time_after(jiffies, tx_ring->buffer_info[i].time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(er32(STATUS) & E1000_STATUS_TXOFF)) { + schedule_work(&adapter->print_hang_task); + netif_stop_queue(netdev); + } + } + adapter->total_tx_bytes += total_tx_bytes; + adapter->total_tx_packets += total_tx_packets; + return count < tx_ring->count; +} + +/** + * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split + * @adapter: board private structure + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, + int *work_done, int work_to_do) +{ + struct e1000_hw *hw = &adapter->hw; + union e1000_rx_desc_packet_split *rx_desc, *next_rxd; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_buffer *buffer_info, *next_buffer; + struct e1000_ps_page *ps_page; + struct sk_buff *skb; + unsigned int i, j; + u32 length, staterr; + int cleaned_count = 0; + bool cleaned = 0; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC_PS(*rx_ring, i); + staterr = le32_to_cpu(rx_desc->wb.middle.status_error); + buffer_info = &rx_ring->buffer_info[i]; + + while (staterr & E1000_RXD_STAT_DD) { + if (*work_done >= work_to_do) + break; + (*work_done)++; + skb = buffer_info->skb; + rmb(); /* read descriptor and rx_buffer_info after status DD */ + + /* in the packet split case this is header only */ + prefetch(skb->data - NET_IP_ALIGN); + + i++; + if (i == rx_ring->count) + i = 0; + next_rxd = E1000_RX_DESC_PS(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = 1; + cleaned_count++; + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_ps_bsize0, DMA_FROM_DEVICE); + buffer_info->dma = 0; + + /* see !EOP comment in other Rx routine */ + if (!(staterr & E1000_RXD_STAT_EOP)) + adapter->flags2 |= FLAG2_IS_DISCARDING; + + if (adapter->flags2 & FLAG2_IS_DISCARDING) { + e_dbg("Packet Split buffers didn't pick up the full " + "packet\n"); + dev_kfree_skb_irq(skb); + if (staterr & E1000_RXD_STAT_EOP) + adapter->flags2 &= ~FLAG2_IS_DISCARDING; + goto next_desc; + } + + if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { + dev_kfree_skb_irq(skb); + goto next_desc; + } + + length = le16_to_cpu(rx_desc->wb.middle.length0); + + if (!length) { + e_dbg("Last part of the packet spanning multiple " + "descriptors\n"); + dev_kfree_skb_irq(skb); + goto next_desc; + } + + /* Good Receive */ + skb_put(skb, length); + + { + /* + * this looks ugly, but it seems compiler issues make it + * more efficient than reusing j + */ + int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); + + /* + * page alloc/put takes too long and effects small packet + * throughput, so unsplit small packets and save the alloc/put + * only valid in softirq (napi) context to call kmap_* + */ + if (l1 && (l1 <= copybreak) && + ((length + l1) <= adapter->rx_ps_bsize0)) { + u8 *vaddr; + + ps_page = &buffer_info->ps_pages[0]; + + /* + * there is no documentation about how to call + * kmap_atomic, so we can't hold the mapping + * very long + */ + dma_sync_single_for_cpu(&pdev->dev, ps_page->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ); + memcpy(skb_tail_pointer(skb), vaddr, l1); + kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); + dma_sync_single_for_device(&pdev->dev, ps_page->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + + /* remove the CRC */ + if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) + l1 -= 4; + + skb_put(skb, l1); + goto copydone; + } /* if */ + } + + for (j = 0; j < PS_PAGE_BUFFERS; j++) { + length = le16_to_cpu(rx_desc->wb.upper.length[j]); + if (!length) + break; + + ps_page = &buffer_info->ps_pages[j]; + dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, + DMA_FROM_DEVICE); + ps_page->dma = 0; + skb_fill_page_desc(skb, j, ps_page->page, 0, length); + ps_page->page = NULL; + skb->len += length; + skb->data_len += length; + skb->truesize += length; + } + + /* strip the ethernet crc, problem is we're using pages now so + * this whole operation can get a little cpu intensive + */ + if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) + pskb_trim(skb, skb->len - 4); + +copydone: + total_rx_bytes += skb->len; + total_rx_packets++; + + e1000_rx_checksum(adapter, staterr, le16_to_cpu( + rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); + + if (rx_desc->wb.upper.header_status & + cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) + adapter->rx_hdr_split++; + + e1000_receive_skb(adapter, netdev, skb, + staterr, rx_desc->wb.middle.vlan); + +next_desc: + rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); + buffer_info->skb = NULL; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= E1000_RX_BUFFER_WRITE) { + adapter->alloc_rx_buf(adapter, cleaned_count, + GFP_ATOMIC); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + + staterr = le32_to_cpu(rx_desc->wb.middle.status_error); + } + rx_ring->next_to_clean = i; + + cleaned_count = e1000_desc_unused(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC); + + adapter->total_rx_bytes += total_rx_bytes; + adapter->total_rx_packets += total_rx_packets; + return cleaned; +} + +/** + * e1000_consume_page - helper function + **/ +static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, + u16 length) +{ + bi->page = NULL; + skb->len += length; + skb->data_len += length; + skb->truesize += length; +} + +/** + * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ + +static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_buffer *buffer_info, *next_buffer; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes=0, total_rx_packets=0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + rmb(); /* read descriptor and rx_buffer_info after status DD */ + + status = rx_desc->status; + skb = buffer_info->skb; + buffer_info->skb = NULL; + + ++i; + if (i == rx_ring->count) + i = 0; + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->length); + + /* errors is only valid for DD + EOP descriptors */ + if (unlikely((status & E1000_RXD_STAT_EOP) && + (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { + /* recycle both page and skb */ + buffer_info->skb = skb; + /* an error means any chain goes out the window + * too */ + if (rx_ring->rx_skb_top) + dev_kfree_skb_irq(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + goto next_desc; + } + +#define rxtop (rx_ring->rx_skb_top) + if (!(status & E1000_RXD_STAT_EOP)) { + /* this descriptor is only the beginning (or middle) */ + if (!rxtop) { + /* this is the beginning of a chain */ + rxtop = skb; + skb_fill_page_desc(rxtop, 0, buffer_info->page, + 0, length); + } else { + /* this is the middle of a chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->page, 0, length); + /* re-use the skb, only consumed the page */ + buffer_info->skb = skb; + } + e1000_consume_page(buffer_info, rxtop, length); + goto next_desc; + } else { + if (rxtop) { + /* end of the chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->page, 0, length); + /* re-use the current skb, we only consumed the + * page */ + buffer_info->skb = skb; + skb = rxtop; + rxtop = NULL; + e1000_consume_page(buffer_info, skb, length); + } else { + /* no chain, got EOP, this buf is the packet + * copybreak to save the put_page/alloc_page */ + if (length <= copybreak && + skb_tailroom(skb) >= length) { + u8 *vaddr; + vaddr = kmap_atomic(buffer_info->page, + KM_SKB_DATA_SOFTIRQ); + memcpy(skb_tail_pointer(skb), vaddr, + length); + kunmap_atomic(vaddr, + KM_SKB_DATA_SOFTIRQ); + /* re-use the page, so don't erase + * buffer_info->page */ + skb_put(skb, length); + } else { + skb_fill_page_desc(skb, 0, + buffer_info->page, 0, + length); + e1000_consume_page(buffer_info, skb, + length); + } + } + } + + /* Receive Checksum Offload XXX recompute due to CRC strip? */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + total_rx_packets++; + + /* eth type trans needs skb->data to point to something */ + if (!pskb_may_pull(skb, ETH_HLEN)) { + e_err("pskb_may_pull failed.\n"); + dev_kfree_skb_irq(skb); + goto next_desc; + } + + e1000_receive_skb(adapter, netdev, skb, status, + rx_desc->special); + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(adapter, cleaned_count, + GFP_ATOMIC); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = e1000_desc_unused(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC); + + adapter->total_rx_bytes += total_rx_bytes; + adapter->total_rx_packets += total_rx_packets; + return cleaned; +} + +/** + * e1000_clean_rx_ring - Free Rx Buffers per Queue + * @adapter: board private structure + **/ +static void e1000_clean_rx_ring(struct e1000_adapter *adapter) +{ + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_buffer *buffer_info; + struct e1000_ps_page *ps_page; + struct pci_dev *pdev = adapter->pdev; + unsigned int i, j; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + if (buffer_info->dma) { + if (adapter->clean_rx == e1000_clean_rx_irq) + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) + dma_unmap_page(&pdev->dev, buffer_info->dma, + PAGE_SIZE, + DMA_FROM_DEVICE); + else if (adapter->clean_rx == e1000_clean_rx_irq_ps) + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_ps_bsize0, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + } + + if (buffer_info->page) { + put_page(buffer_info->page); + buffer_info->page = NULL; + } + + if (buffer_info->skb) { + dev_kfree_skb(buffer_info->skb); + buffer_info->skb = NULL; + } + + for (j = 0; j < PS_PAGE_BUFFERS; j++) { + ps_page = &buffer_info->ps_pages[j]; + if (!ps_page->page) + break; + dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, + DMA_FROM_DEVICE); + ps_page->dma = 0; + put_page(ps_page->page); + ps_page->page = NULL; + } + } + + /* there also may be some cached data from a chained receive */ + if (rx_ring->rx_skb_top) { + dev_kfree_skb(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + } + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + adapter->flags2 &= ~FLAG2_IS_DISCARDING; + + writel(0, adapter->hw.hw_addr + rx_ring->head); + writel(0, adapter->hw.hw_addr + rx_ring->tail); +} + +static void e1000e_downshift_workaround(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, downshift_task); + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); +} + +/** + * e1000_intr_msi - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t e1000_intr_msi(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 icr = er32(ICR); + + /* + * read ICR disables interrupts using IAM + */ + + if (icr & E1000_ICR_LSC) { + hw->mac.get_link_status = 1; + /* + * ICH8 workaround-- Call gig speed drop workaround on cable + * disconnect (LSC) before accessing any PHY registers + */ + if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && + (!(er32(STATUS) & E1000_STATUS_LU))) + schedule_work(&adapter->downshift_task); + + /* + * 80003ES2LAN workaround-- For packet buffer work-around on + * link down event; disable receives here in the ISR and reset + * adapter in watchdog + */ + if (netif_carrier_ok(netdev) && + adapter->flags & FLAG_RX_NEEDS_RESTART) { + /* disable receives */ + u32 rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + adapter->flags |= FLAG_RX_RESTART_NOW; + } + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (napi_schedule_prep(&adapter->napi)) { + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->napi); + } + + return IRQ_HANDLED; +} + +/** + * e1000_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t e1000_intr(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 rctl, icr = er32(ICR); + + if (!icr || test_bit(__E1000_DOWN, &adapter->state)) + return IRQ_NONE; /* Not our interrupt */ + + /* + * IMS will not auto-mask if INT_ASSERTED is not set, and if it is + * not set, then the adapter didn't send an interrupt + */ + if (!(icr & E1000_ICR_INT_ASSERTED)) + return IRQ_NONE; + + /* + * Interrupt Auto-Mask...upon reading ICR, + * interrupts are masked. No need for the + * IMC write + */ + + if (icr & E1000_ICR_LSC) { + hw->mac.get_link_status = 1; + /* + * ICH8 workaround-- Call gig speed drop workaround on cable + * disconnect (LSC) before accessing any PHY registers + */ + if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && + (!(er32(STATUS) & E1000_STATUS_LU))) + schedule_work(&adapter->downshift_task); + + /* + * 80003ES2LAN workaround-- + * For packet buffer work-around on link down event; + * disable receives here in the ISR and + * reset adapter in watchdog + */ + if (netif_carrier_ok(netdev) && + (adapter->flags & FLAG_RX_NEEDS_RESTART)) { + /* disable receives */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + adapter->flags |= FLAG_RX_RESTART_NOW; + } + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (napi_schedule_prep(&adapter->napi)) { + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->napi); + } + + return IRQ_HANDLED; +} + +static irqreturn_t e1000_msix_other(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 icr = er32(ICR); + + if (!(icr & E1000_ICR_INT_ASSERTED)) { + if (!test_bit(__E1000_DOWN, &adapter->state)) + ew32(IMS, E1000_IMS_OTHER); + return IRQ_NONE; + } + + if (icr & adapter->eiac_mask) + ew32(ICS, (icr & adapter->eiac_mask)); + + if (icr & E1000_ICR_OTHER) { + if (!(icr & E1000_ICR_LSC)) + goto no_link_interrupt; + hw->mac.get_link_status = 1; + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + +no_link_interrupt: + if (!test_bit(__E1000_DOWN, &adapter->state)) + ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER); + + return IRQ_HANDLED; +} + + +static irqreturn_t e1000_intr_msix_tx(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *tx_ring = adapter->tx_ring; + + + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + + if (!e1000_clean_tx_irq(adapter)) + /* Ring was not completely cleaned, so fire another interrupt */ + ew32(ICS, tx_ring->ims_val); + + return IRQ_HANDLED; +} + +static irqreturn_t e1000_intr_msix_rx(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* Write the ITR value calculated at the end of the + * previous interrupt. + */ + if (adapter->rx_ring->set_itr) { + writel(1000000000 / (adapter->rx_ring->itr_val * 256), + adapter->hw.hw_addr + adapter->rx_ring->itr_register); + adapter->rx_ring->set_itr = 0; + } + + if (napi_schedule_prep(&adapter->napi)) { + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->napi); + } + return IRQ_HANDLED; +} + +/** + * e1000_configure_msix - Configure MSI-X hardware + * + * e1000_configure_msix sets up the hardware to properly + * generate MSI-X interrupts. + **/ +static void e1000_configure_msix(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_ring *tx_ring = adapter->tx_ring; + int vector = 0; + u32 ctrl_ext, ivar = 0; + + adapter->eiac_mask = 0; + + /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */ + if (hw->mac.type == e1000_82574) { + u32 rfctl = er32(RFCTL); + rfctl |= E1000_RFCTL_ACK_DIS; + ew32(RFCTL, rfctl); + } + +#define E1000_IVAR_INT_ALLOC_VALID 0x8 + /* Configure Rx vector */ + rx_ring->ims_val = E1000_IMS_RXQ0; + adapter->eiac_mask |= rx_ring->ims_val; + if (rx_ring->itr_val) + writel(1000000000 / (rx_ring->itr_val * 256), + hw->hw_addr + rx_ring->itr_register); + else + writel(1, hw->hw_addr + rx_ring->itr_register); + ivar = E1000_IVAR_INT_ALLOC_VALID | vector; + + /* Configure Tx vector */ + tx_ring->ims_val = E1000_IMS_TXQ0; + vector++; + if (tx_ring->itr_val) + writel(1000000000 / (tx_ring->itr_val * 256), + hw->hw_addr + tx_ring->itr_register); + else + writel(1, hw->hw_addr + tx_ring->itr_register); + adapter->eiac_mask |= tx_ring->ims_val; + ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8); + + /* set vector for Other Causes, e.g. link changes */ + vector++; + ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16); + if (rx_ring->itr_val) + writel(1000000000 / (rx_ring->itr_val * 256), + hw->hw_addr + E1000_EITR_82574(vector)); + else + writel(1, hw->hw_addr + E1000_EITR_82574(vector)); + + /* Cause Tx interrupts on every write back */ + ivar |= (1 << 31); + + ew32(IVAR, ivar); + + /* enable MSI-X PBA support */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_PBA_CLR; + + /* Auto-Mask Other interrupts upon ICR read */ +#define E1000_EIAC_MASK_82574 0x01F00000 + ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER); + ctrl_ext |= E1000_CTRL_EXT_EIAME; + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); +} + +void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter) +{ + if (adapter->msix_entries) { + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else if (adapter->flags & FLAG_MSI_ENABLED) { + pci_disable_msi(adapter->pdev); + adapter->flags &= ~FLAG_MSI_ENABLED; + } +} + +/** + * e1000e_set_interrupt_capability - set MSI or MSI-X if supported + * + * Attempt to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) +{ + int err; + int i; + + switch (adapter->int_mode) { + case E1000E_INT_MODE_MSIX: + if (adapter->flags & FLAG_HAS_MSIX) { + adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */ + adapter->msix_entries = kcalloc(adapter->num_vectors, + sizeof(struct msix_entry), + GFP_KERNEL); + if (adapter->msix_entries) { + for (i = 0; i < adapter->num_vectors; i++) + adapter->msix_entries[i].entry = i; + + err = pci_enable_msix(adapter->pdev, + adapter->msix_entries, + adapter->num_vectors); + if (err == 0) + return; + } + /* MSI-X failed, so fall through and try MSI */ + e_err("Failed to initialize MSI-X interrupts. " + "Falling back to MSI interrupts.\n"); + e1000e_reset_interrupt_capability(adapter); + } + adapter->int_mode = E1000E_INT_MODE_MSI; + /* Fall through */ + case E1000E_INT_MODE_MSI: + if (!pci_enable_msi(adapter->pdev)) { + adapter->flags |= FLAG_MSI_ENABLED; + } else { + adapter->int_mode = E1000E_INT_MODE_LEGACY; + e_err("Failed to initialize MSI interrupts. Falling " + "back to legacy interrupts.\n"); + } + /* Fall through */ + case E1000E_INT_MODE_LEGACY: + /* Don't do anything; this is the system default */ + break; + } + + /* store the number of vectors being used */ + adapter->num_vectors = 1; +} + +/** + * e1000_request_msix - Initialize MSI-X interrupts + * + * e1000_request_msix allocates MSI-X vectors and requests interrupts from the + * kernel. + **/ +static int e1000_request_msix(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err = 0, vector = 0; + + if (strlen(netdev->name) < (IFNAMSIZ - 5)) + snprintf(adapter->rx_ring->name, + sizeof(adapter->rx_ring->name) - 1, + "%s-rx-0", netdev->name); + else + memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); + err = request_irq(adapter->msix_entries[vector].vector, + e1000_intr_msix_rx, 0, adapter->rx_ring->name, + netdev); + if (err) + goto out; + adapter->rx_ring->itr_register = E1000_EITR_82574(vector); + adapter->rx_ring->itr_val = adapter->itr; + vector++; + + if (strlen(netdev->name) < (IFNAMSIZ - 5)) + snprintf(adapter->tx_ring->name, + sizeof(adapter->tx_ring->name) - 1, + "%s-tx-0", netdev->name); + else + memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); + err = request_irq(adapter->msix_entries[vector].vector, + e1000_intr_msix_tx, 0, adapter->tx_ring->name, + netdev); + if (err) + goto out; + adapter->tx_ring->itr_register = E1000_EITR_82574(vector); + adapter->tx_ring->itr_val = adapter->itr; + vector++; + + err = request_irq(adapter->msix_entries[vector].vector, + e1000_msix_other, 0, netdev->name, netdev); + if (err) + goto out; + + e1000_configure_msix(adapter); + return 0; +out: + return err; +} + +/** + * e1000_request_irq - initialize interrupts + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int e1000_request_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + if (adapter->msix_entries) { + err = e1000_request_msix(adapter); + if (!err) + return err; + /* fall back to MSI */ + e1000e_reset_interrupt_capability(adapter); + adapter->int_mode = E1000E_INT_MODE_MSI; + e1000e_set_interrupt_capability(adapter); + } + if (adapter->flags & FLAG_MSI_ENABLED) { + err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0, + netdev->name, netdev); + if (!err) + return err; + + /* fall back to legacy interrupt */ + e1000e_reset_interrupt_capability(adapter); + adapter->int_mode = E1000E_INT_MODE_LEGACY; + } + + err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED, + netdev->name, netdev); + if (err) + e_err("Unable to allocate interrupt, Error: %d\n", err); + + return err; +} + +static void e1000_free_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + if (adapter->msix_entries) { + int vector = 0; + + free_irq(adapter->msix_entries[vector].vector, netdev); + vector++; + + free_irq(adapter->msix_entries[vector].vector, netdev); + vector++; + + /* Other Causes interrupt vector */ + free_irq(adapter->msix_entries[vector].vector, netdev); + return; + } + + free_irq(adapter->pdev->irq, netdev); +} + +/** + * e1000_irq_disable - Mask off interrupt generation on the NIC + **/ +static void e1000_irq_disable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + ew32(IMC, ~0); + if (adapter->msix_entries) + ew32(EIAC_82574, 0); + e1e_flush(); + + if (adapter->msix_entries) { + int i; + for (i = 0; i < adapter->num_vectors; i++) + synchronize_irq(adapter->msix_entries[i].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } +} + +/** + * e1000_irq_enable - Enable default interrupt generation settings + **/ +static void e1000_irq_enable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->msix_entries) { + ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); + ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); + } else { + ew32(IMS, IMS_ENABLE_MASK); + } + e1e_flush(); +} + +/** + * e1000e_get_hw_control - get control of the h/w from f/w + * @adapter: address of board private structure + * + * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that + * the driver is loaded. For AMT version (only with 82573) + * of the f/w this means that the network i/f is open. + **/ +void e1000e_get_hw_control(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext; + u32 swsm; + + /* Let firmware know the driver has taken over */ + if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { + swsm = er32(SWSM); + ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD); + } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { + ctrl_ext = er32(CTRL_EXT); + ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); + } +} + +/** + * e1000e_release_hw_control - release control of the h/w to f/w + * @adapter: address of board private structure + * + * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. For AMT version (only with 82573) i + * of the f/w this means that the network i/f is closed. + * + **/ +void e1000e_release_hw_control(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext; + u32 swsm; + + /* Let firmware taken over control of h/w */ + if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { + swsm = er32(SWSM); + ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD); + } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { + ctrl_ext = er32(CTRL_EXT); + ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); + } +} + +/** + * @e1000_alloc_ring - allocate memory for a ring structure + **/ +static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, + struct e1000_ring *ring) +{ + struct pci_dev *pdev = adapter->pdev; + + ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, + GFP_KERNEL); + if (!ring->desc) + return -ENOMEM; + + return 0; +} + +/** + * e1000e_setup_tx_resources - allocate Tx resources (Descriptors) + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +int e1000e_setup_tx_resources(struct e1000_adapter *adapter) +{ + struct e1000_ring *tx_ring = adapter->tx_ring; + int err = -ENOMEM, size; + + size = sizeof(struct e1000_buffer) * tx_ring->count; + tx_ring->buffer_info = vzalloc(size); + if (!tx_ring->buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + err = e1000_alloc_ring_dma(adapter, tx_ring); + if (err) + goto err; + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + return 0; +err: + vfree(tx_ring->buffer_info); + e_err("Unable to allocate memory for the transmit descriptor ring\n"); + return err; +} + +/** + * e1000e_setup_rx_resources - allocate Rx resources (Descriptors) + * @adapter: board private structure + * + * Returns 0 on success, negative on failure + **/ +int e1000e_setup_rx_resources(struct e1000_adapter *adapter) +{ + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_buffer *buffer_info; + int i, size, desc_len, err = -ENOMEM; + + size = sizeof(struct e1000_buffer) * rx_ring->count; + rx_ring->buffer_info = vzalloc(size); + if (!rx_ring->buffer_info) + goto err; + + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS, + sizeof(struct e1000_ps_page), + GFP_KERNEL); + if (!buffer_info->ps_pages) + goto err_pages; + } + + desc_len = sizeof(union e1000_rx_desc_packet_split); + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * desc_len; + rx_ring->size = ALIGN(rx_ring->size, 4096); + + err = e1000_alloc_ring_dma(adapter, rx_ring); + if (err) + goto err_pages; + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + rx_ring->rx_skb_top = NULL; + + return 0; + +err_pages: + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + kfree(buffer_info->ps_pages); + } +err: + vfree(rx_ring->buffer_info); + e_err("Unable to allocate memory for the receive descriptor ring\n"); + return err; +} + +/** + * e1000_clean_tx_ring - Free Tx Buffers + * @adapter: board private structure + **/ +static void e1000_clean_tx_ring(struct e1000_adapter *adapter) +{ + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_buffer *buffer_info; + unsigned long size; + unsigned int i; + + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + e1000_put_txbuf(adapter, buffer_info); + } + + size = sizeof(struct e1000_buffer) * tx_ring->count; + memset(tx_ring->buffer_info, 0, size); + + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + writel(0, adapter->hw.hw_addr + tx_ring->head); + writel(0, adapter->hw.hw_addr + tx_ring->tail); +} + +/** + * e1000e_free_tx_resources - Free Tx Resources per Queue + * @adapter: board private structure + * + * Free all transmit software resources + **/ +void e1000e_free_tx_resources(struct e1000_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_ring *tx_ring = adapter->tx_ring; + + e1000_clean_tx_ring(adapter); + + vfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + tx_ring->desc = NULL; +} + +/** + * e1000e_free_rx_resources - Free Rx Resources + * @adapter: board private structure + * + * Free all receive software resources + **/ + +void e1000e_free_rx_resources(struct e1000_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_ring *rx_ring = adapter->rx_ring; + int i; + + e1000_clean_rx_ring(adapter); + + for (i = 0; i < rx_ring->count; i++) + kfree(rx_ring->buffer_info[i].ps_pages); + + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + rx_ring->desc = NULL; +} + +/** + * e1000_update_itr - update the dynamic ITR value based on statistics + * @adapter: pointer to adapter + * @itr_setting: current adapter->itr + * @packets: the number of packets during this measurement interval + * @bytes: the number of bytes during this measurement interval + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. This functionality is controlled + * by the InterruptThrottleRate module parameter. + **/ +static unsigned int e1000_update_itr(struct e1000_adapter *adapter, + u16 itr_setting, int packets, + int bytes) +{ + unsigned int retval = itr_setting; + + if (packets == 0) + goto update_itr_done; + + switch (itr_setting) { + case lowest_latency: + /* handle TSO and jumbo frames */ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + retval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* this if handles the TSO accounting */ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 10) || ((bytes/packets) > 1200)) + retval = bulk_latency; + else if ((packets > 35)) + retval = lowest_latency; + } else if (bytes/packets > 2000) { + retval = bulk_latency; + } else if (packets <= 2 && bytes < 512) { + retval = lowest_latency; + } + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + retval = low_latency; + } else if (bytes < 6000) { + retval = low_latency; + } + break; + } + +update_itr_done: + return retval; +} + +static void e1000_set_itr(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 current_itr; + u32 new_itr = adapter->itr; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + if (adapter->link_speed != SPEED_1000) { + current_itr = 0; + new_itr = 4000; + goto set_itr_now; + } + + if (adapter->flags2 & FLAG2_DISABLE_AIM) { + new_itr = 0; + goto set_itr_now; + } + + adapter->tx_itr = e1000_update_itr(adapter, + adapter->tx_itr, + adapter->total_tx_packets, + adapter->total_tx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) + adapter->tx_itr = low_latency; + + adapter->rx_itr = e1000_update_itr(adapter, + adapter->rx_itr, + adapter->total_rx_packets, + adapter->total_rx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) + adapter->rx_itr = low_latency; + + current_itr = max(adapter->rx_itr, adapter->tx_itr); + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = 70000; + break; + case low_latency: + new_itr = 20000; /* aka hwitr = ~200 */ + break; + case bulk_latency: + new_itr = 4000; + break; + default: + break; + } + +set_itr_now: + if (new_itr != adapter->itr) { + /* + * this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > adapter->itr ? + min(adapter->itr + (new_itr >> 2), new_itr) : + new_itr; + adapter->itr = new_itr; + adapter->rx_ring->itr_val = new_itr; + if (adapter->msix_entries) + adapter->rx_ring->set_itr = 1; + else + if (new_itr) + ew32(ITR, 1000000000 / (new_itr * 256)); + else + ew32(ITR, 0); + } +} + +/** + * e1000_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + **/ +static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter) +{ + adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); + if (!adapter->tx_ring) + goto err; + + adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); + if (!adapter->rx_ring) + goto err; + + return 0; +err: + e_err("Unable to allocate memory for queues\n"); + kfree(adapter->rx_ring); + kfree(adapter->tx_ring); + return -ENOMEM; +} + +/** + * e1000_clean - NAPI Rx polling callback + * @napi: struct associated with this polling callback + * @budget: amount of packets driver is allowed to process this poll + **/ +static int e1000_clean(struct napi_struct *napi, int budget) +{ + struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); + struct e1000_hw *hw = &adapter->hw; + struct net_device *poll_dev = adapter->netdev; + int tx_cleaned = 1, work_done = 0; + + adapter = netdev_priv(poll_dev); + + if (adapter->msix_entries && + !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) + goto clean_rx; + + tx_cleaned = e1000_clean_tx_irq(adapter); + +clean_rx: + adapter->clean_rx(adapter, &work_done, budget); + + if (!tx_cleaned) + work_done = budget; + + /* If budget not fully consumed, exit the polling mode */ + if (work_done < budget) { + if (adapter->itr_setting & 3) + e1000_set_itr(adapter); + napi_complete(napi); + if (!test_bit(__E1000_DOWN, &adapter->state)) { + if (adapter->msix_entries) + ew32(IMS, adapter->rx_ring->ims_val); + else + e1000_irq_enable(adapter); + } + } + + return work_done; +} + +static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + /* don't update vlan cookie if already programmed */ + if ((adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && + (vid == adapter->mng_vlan_id)) + return; + + /* add VID to filter table */ + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); + vfta |= (1 << (vid & 0x1F)); + hw->mac.ops.write_vfta(hw, index, vfta); + } + + set_bit(vid, adapter->active_vlans); +} + +static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + if ((adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && + (vid == adapter->mng_vlan_id)) { + /* release control to f/w */ + e1000e_release_hw_control(adapter); + return; + } + + /* remove VID from filter table */ + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); + vfta &= ~(1 << (vid & 0x1F)); + hw->mac.ops.write_vfta(hw, index, vfta); + } + + clear_bit(vid, adapter->active_vlans); +} + +/** + * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering + * @adapter: board private structure to initialize + **/ +static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { + /* disable VLAN receive filtering */ + rctl = er32(RCTL); + rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN); + ew32(RCTL, rctl); + + if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) { + e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + } + } +} + +/** + * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering + * @adapter: board private structure to initialize + **/ +static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { + /* enable VLAN receive filtering */ + rctl = er32(RCTL); + rctl |= E1000_RCTL_VFE; + rctl &= ~E1000_RCTL_CFIEN; + ew32(RCTL, rctl); + } +} + +/** + * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping + * @adapter: board private structure to initialize + **/ +static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl; + + /* disable VLAN tag insert/strip */ + ctrl = er32(CTRL); + ctrl &= ~E1000_CTRL_VME; + ew32(CTRL, ctrl); +} + +/** + * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping + * @adapter: board private structure to initialize + **/ +static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl; + + /* enable VLAN tag insert/strip */ + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_VME; + ew32(CTRL, ctrl); +} + +static void e1000_update_mng_vlan(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u16 vid = adapter->hw.mng_cookie.vlan_id; + u16 old_vid = adapter->mng_vlan_id; + + if (adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { + e1000_vlan_rx_add_vid(netdev, vid); + adapter->mng_vlan_id = vid; + } + + if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid)) + e1000_vlan_rx_kill_vid(netdev, old_vid); +} + +static void e1000_restore_vlan(struct e1000_adapter *adapter) +{ + u16 vid; + + e1000_vlan_rx_add_vid(adapter->netdev, 0); + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + e1000_vlan_rx_add_vid(adapter->netdev, vid); +} + +static void e1000_init_manageability_pt(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 manc, manc2h, mdef, i, j; + + if (!(adapter->flags & FLAG_MNG_PT_ENABLED)) + return; + + manc = er32(MANC); + + /* + * enable receiving management packets to the host. this will probably + * generate destination unreachable messages from the host OS, but + * the packets will be handled on SMBUS + */ + manc |= E1000_MANC_EN_MNG2HOST; + manc2h = er32(MANC2H); + + switch (hw->mac.type) { + default: + manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664); + break; + case e1000_82574: + case e1000_82583: + /* + * Check if IPMI pass-through decision filter already exists; + * if so, enable it. + */ + for (i = 0, j = 0; i < 8; i++) { + mdef = er32(MDEF(i)); + + /* Ignore filters with anything other than IPMI ports */ + if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) + continue; + + /* Enable this decision filter in MANC2H */ + if (mdef) + manc2h |= (1 << i); + + j |= mdef; + } + + if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) + break; + + /* Create new decision filter in an empty filter */ + for (i = 0, j = 0; i < 8; i++) + if (er32(MDEF(i)) == 0) { + ew32(MDEF(i), (E1000_MDEF_PORT_623 | + E1000_MDEF_PORT_664)); + manc2h |= (1 << 1); + j++; + break; + } + + if (!j) + e_warn("Unable to create IPMI pass-through filter\n"); + break; + } + + ew32(MANC2H, manc2h); + ew32(MANC, manc); +} + +/** + * e1000_configure_tx - Configure Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void e1000_configure_tx(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *tx_ring = adapter->tx_ring; + u64 tdba; + u32 tdlen, tctl, tipg, tarc; + u32 ipgr1, ipgr2; + + /* Setup the HW Tx Head and Tail descriptor pointers */ + tdba = tx_ring->dma; + tdlen = tx_ring->count * sizeof(struct e1000_tx_desc); + ew32(TDBAL, (tdba & DMA_BIT_MASK(32))); + ew32(TDBAH, (tdba >> 32)); + ew32(TDLEN, tdlen); + ew32(TDH, 0); + ew32(TDT, 0); + tx_ring->head = E1000_TDH; + tx_ring->tail = E1000_TDT; + + /* Set the default values for the Tx Inter Packet Gap timer */ + tipg = DEFAULT_82543_TIPG_IPGT_COPPER; /* 8 */ + ipgr1 = DEFAULT_82543_TIPG_IPGR1; /* 8 */ + ipgr2 = DEFAULT_82543_TIPG_IPGR2; /* 6 */ + + if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN) + ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /* 7 */ + + tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; + tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; + ew32(TIPG, tipg); + + /* Set the Tx Interrupt Delay register */ + ew32(TIDV, adapter->tx_int_delay); + /* Tx irq moderation */ + ew32(TADV, adapter->tx_abs_int_delay); + + if (adapter->flags2 & FLAG2_DMA_BURST) { + u32 txdctl = er32(TXDCTL(0)); + txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH | + E1000_TXDCTL_WTHRESH); + /* + * set up some performance related parameters to encourage the + * hardware to use the bus more efficiently in bursts, depends + * on the tx_int_delay to be enabled, + * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time + * hthresh = 1 ==> prefetch when one or more available + * pthresh = 0x1f ==> prefetch if internal cache 31 or less + * BEWARE: this seems to work but should be considered first if + * there are Tx hangs or other Tx related bugs + */ + txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE; + ew32(TXDCTL(0), txdctl); + /* erratum work around: set txdctl the same for both queues */ + ew32(TXDCTL(1), txdctl); + } + + /* Program the Transmit Control Register */ + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + + if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { + tarc = er32(TARC(0)); + /* + * set the speed mode bit, we'll clear it if we're not at + * gigabit link later + */ +#define SPEED_MODE_BIT (1 << 21) + tarc |= SPEED_MODE_BIT; + ew32(TARC(0), tarc); + } + + /* errata: program both queues to unweighted RR */ + if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) { + tarc = er32(TARC(0)); + tarc |= 1; + ew32(TARC(0), tarc); + tarc = er32(TARC(1)); + tarc |= 1; + ew32(TARC(1), tarc); + } + + /* Setup Transmit Descriptor Settings for eop descriptor */ + adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; + + /* only set IDE if we are delaying interrupts using the timers */ + if (adapter->tx_int_delay) + adapter->txd_cmd |= E1000_TXD_CMD_IDE; + + /* enable Report Status bit */ + adapter->txd_cmd |= E1000_TXD_CMD_RS; + + ew32(TCTL, tctl); + + e1000e_config_collision_dist(hw); +} + +/** + * e1000_setup_rctl - configure the receive control registers + * @adapter: Board private structure + **/ +#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ + (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) +static void e1000_setup_rctl(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl, rfctl; + u32 pages = 0; + + /* Workaround Si errata on 82579 - configure jumbo frame flow */ + if (hw->mac.type == e1000_pch2lan) { + s32 ret_val; + + if (adapter->netdev->mtu > ETH_DATA_LEN) + ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); + else + ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); + + if (ret_val) + e_dbg("failed to enable jumbo frame workaround mode\n"); + } + + /* Program MC offset vector base */ + rctl = er32(RCTL); + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + + /* Do not Store bad packets */ + rctl &= ~E1000_RCTL_SBP; + + /* Enable Long Packet receive */ + if (adapter->netdev->mtu <= ETH_DATA_LEN) + rctl &= ~E1000_RCTL_LPE; + else + rctl |= E1000_RCTL_LPE; + + /* Some systems expect that the CRC is included in SMBUS traffic. The + * hardware strips the CRC before sending to both SMBUS (BMC) and to + * host memory when this is enabled + */ + if (adapter->flags2 & FLAG2_CRC_STRIPPING) + rctl |= E1000_RCTL_SECRC; + + /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */ + if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) { + u16 phy_data; + + e1e_rphy(hw, PHY_REG(770, 26), &phy_data); + phy_data &= 0xfff8; + phy_data |= (1 << 2); + e1e_wphy(hw, PHY_REG(770, 26), phy_data); + + e1e_rphy(hw, 22, &phy_data); + phy_data &= 0x0fff; + phy_data |= (1 << 14); + e1e_wphy(hw, 0x10, 0x2823); + e1e_wphy(hw, 0x11, 0x0003); + e1e_wphy(hw, 22, phy_data); + } + + /* Setup buffer sizes */ + rctl &= ~E1000_RCTL_SZ_4096; + rctl |= E1000_RCTL_BSEX; + switch (adapter->rx_buffer_len) { + case 2048: + default: + rctl |= E1000_RCTL_SZ_2048; + rctl &= ~E1000_RCTL_BSEX; + break; + case 4096: + rctl |= E1000_RCTL_SZ_4096; + break; + case 8192: + rctl |= E1000_RCTL_SZ_8192; + break; + case 16384: + rctl |= E1000_RCTL_SZ_16384; + break; + } + + /* + * 82571 and greater support packet-split where the protocol + * header is placed in skb->data and the packet data is + * placed in pages hanging off of skb_shinfo(skb)->nr_frags. + * In the case of a non-split, skb->data is linearly filled, + * followed by the page buffers. Therefore, skb->data is + * sized to hold the largest protocol header. + * + * allocations using alloc_page take too long for regular MTU + * so only enable packet split for jumbo frames + * + * Using pages when the page size is greater than 16k wastes + * a lot of memory, since we allocate 3 pages at all times + * per packet. + */ + pages = PAGE_USE_COUNT(adapter->netdev->mtu); + if (!(adapter->flags & FLAG_HAS_ERT) && (pages <= 3) && + (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) + adapter->rx_ps_pages = pages; + else + adapter->rx_ps_pages = 0; + + if (adapter->rx_ps_pages) { + u32 psrctl = 0; + + /* Configure extra packet-split registers */ + rfctl = er32(RFCTL); + rfctl |= E1000_RFCTL_EXTEN; + /* + * disable packet split support for IPv6 extension headers, + * because some malformed IPv6 headers can hang the Rx + */ + rfctl |= (E1000_RFCTL_IPV6_EX_DIS | + E1000_RFCTL_NEW_IPV6_EXT_DIS); + + ew32(RFCTL, rfctl); + + /* Enable Packet split descriptors */ + rctl |= E1000_RCTL_DTYP_PS; + + psrctl |= adapter->rx_ps_bsize0 >> + E1000_PSRCTL_BSIZE0_SHIFT; + + switch (adapter->rx_ps_pages) { + case 3: + psrctl |= PAGE_SIZE << + E1000_PSRCTL_BSIZE3_SHIFT; + case 2: + psrctl |= PAGE_SIZE << + E1000_PSRCTL_BSIZE2_SHIFT; + case 1: + psrctl |= PAGE_SIZE >> + E1000_PSRCTL_BSIZE1_SHIFT; + break; + } + + ew32(PSRCTL, psrctl); + } + + ew32(RCTL, rctl); + /* just started the receive unit, no need to restart */ + adapter->flags &= ~FLAG_RX_RESTART_NOW; +} + +/** + * e1000_configure_rx - Configure Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void e1000_configure_rx(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *rx_ring = adapter->rx_ring; + u64 rdba; + u32 rdlen, rctl, rxcsum, ctrl_ext; + + if (adapter->rx_ps_pages) { + /* this is a 32 byte descriptor */ + rdlen = rx_ring->count * + sizeof(union e1000_rx_desc_packet_split); + adapter->clean_rx = e1000_clean_rx_irq_ps; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; + } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { + rdlen = rx_ring->count * sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_jumbo_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; + } else { + rdlen = rx_ring->count * sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers; + } + + /* disable receives while setting up the descriptors */ + rctl = er32(RCTL); - ew32(RCTL, rctl & ~E1000_RCTL_EN); ++ if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) ++ ew32(RCTL, rctl & ~E1000_RCTL_EN); + e1e_flush(); + usleep_range(10000, 20000); + + if (adapter->flags2 & FLAG2_DMA_BURST) { + /* + * set the writeback threshold (only takes effect if the RDTR + * is set). set GRAN=1 and write back up to 0x4 worth, and + * enable prefetching of 0x20 Rx descriptors + * granularity = 01 + * wthresh = 04, + * hthresh = 04, + * pthresh = 0x20 + */ + ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE); + ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE); + + /* + * override the delay timers for enabling bursting, only if + * the value was not set by the user via module options + */ + if (adapter->rx_int_delay == DEFAULT_RDTR) + adapter->rx_int_delay = BURST_RDTR; + if (adapter->rx_abs_int_delay == DEFAULT_RADV) + adapter->rx_abs_int_delay = BURST_RADV; + } + + /* set the Receive Delay Timer Register */ + ew32(RDTR, adapter->rx_int_delay); + + /* irq moderation */ + ew32(RADV, adapter->rx_abs_int_delay); + if ((adapter->itr_setting != 0) && (adapter->itr != 0)) + ew32(ITR, 1000000000 / (adapter->itr * 256)); + + ctrl_ext = er32(CTRL_EXT); + /* Auto-Mask interrupts upon ICR access */ + ctrl_ext |= E1000_CTRL_EXT_IAME; + ew32(IAM, 0xffffffff); + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + rdba = rx_ring->dma; + ew32(RDBAL, (rdba & DMA_BIT_MASK(32))); + ew32(RDBAH, (rdba >> 32)); + ew32(RDLEN, rdlen); + ew32(RDH, 0); + ew32(RDT, 0); + rx_ring->head = E1000_RDH; + rx_ring->tail = E1000_RDT; + + /* Enable Receive Checksum Offload for TCP and UDP */ + rxcsum = er32(RXCSUM); + if (adapter->flags & FLAG_RX_CSUM_ENABLED) { + rxcsum |= E1000_RXCSUM_TUOFL; + + /* + * IPv4 payload checksum for UDP fragments must be + * used in conjunction with packet-split. + */ + if (adapter->rx_ps_pages) + rxcsum |= E1000_RXCSUM_IPPCSE; + } else { + rxcsum &= ~E1000_RXCSUM_TUOFL; + /* no need to clear IPPCSE as it defaults to 0 */ + } + ew32(RXCSUM, rxcsum); + + /* + * Enable early receives on supported devices, only takes effect when + * packet size is equal or larger than the specified value (in 8 byte + * units), e.g. using jumbo frames when setting to E1000_ERT_2048 + */ + if ((adapter->flags & FLAG_HAS_ERT) || + (adapter->hw.mac.type == e1000_pch2lan)) { + if (adapter->netdev->mtu > ETH_DATA_LEN) { + u32 rxdctl = er32(RXDCTL(0)); + ew32(RXDCTL(0), rxdctl | 0x3); + if (adapter->flags & FLAG_HAS_ERT) + ew32(ERT, E1000_ERT_2048 | (1 << 13)); + /* + * With jumbo frames and early-receive enabled, + * excessive C-state transition latencies result in + * dropped transactions. + */ + pm_qos_update_request(&adapter->netdev->pm_qos_req, 55); + } else { + pm_qos_update_request(&adapter->netdev->pm_qos_req, + PM_QOS_DEFAULT_VALUE); + } + } + + /* Enable Receives */ + ew32(RCTL, rctl); +} + +/** + * e1000_update_mc_addr_list - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates the Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count) +{ + hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count); +} + +/** + * e1000_set_multi - Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_multi entry point is called whenever the multicast address + * list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void e1000_set_multi(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u8 *mta_list; + u32 rctl; + + /* Check for Promiscuous and All Multicast modes */ + + rctl = er32(RCTL); + + if (netdev->flags & IFF_PROMISC) { + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + rctl &= ~E1000_RCTL_VFE; + /* Do not hardware filter VLANs in promisc mode */ + e1000e_vlan_filter_disable(adapter); + } else { + if (netdev->flags & IFF_ALLMULTI) { + rctl |= E1000_RCTL_MPE; + rctl &= ~E1000_RCTL_UPE; + } else { + rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE); + } + e1000e_vlan_filter_enable(adapter); + } + + ew32(RCTL, rctl); + + if (!netdev_mc_empty(netdev)) { + int i = 0; + + mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC); + if (!mta_list) + return; + + /* prepare a packed array of only addresses. */ + netdev_for_each_mc_addr(ha, netdev) + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); + + e1000_update_mc_addr_list(hw, mta_list, i); + kfree(mta_list); + } else { + /* + * if we're called from probe, we might not have + * anything to do here, so clear out the list + */ + e1000_update_mc_addr_list(hw, NULL, 0); + } + + if (netdev->features & NETIF_F_HW_VLAN_RX) + e1000e_vlan_strip_enable(adapter); + else + e1000e_vlan_strip_disable(adapter); +} + +/** + * e1000_configure - configure the hardware for Rx and Tx + * @adapter: private board structure + **/ +static void e1000_configure(struct e1000_adapter *adapter) +{ + e1000_set_multi(adapter->netdev); + + e1000_restore_vlan(adapter); + e1000_init_manageability_pt(adapter); + + e1000_configure_tx(adapter); + e1000_setup_rctl(adapter); + e1000_configure_rx(adapter); + adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring), + GFP_KERNEL); +} + +/** + * e1000e_power_up_phy - restore link in case the phy was powered down + * @adapter: address of board private structure + * + * The phy may be powered down to save power and turn off link when the + * driver is unloaded and wake on lan is not enabled (among others) + * *** this routine MUST be followed by a call to e1000e_reset *** + **/ +void e1000e_power_up_phy(struct e1000_adapter *adapter) +{ + if (adapter->hw.phy.ops.power_up) + adapter->hw.phy.ops.power_up(&adapter->hw); + + adapter->hw.mac.ops.setup_link(&adapter->hw); +} + +/** + * e1000_power_down_phy - Power down the PHY + * + * Power down the PHY so no link is implied when interface is down. + * The PHY cannot be powered down if management or WoL is active. + */ +static void e1000_power_down_phy(struct e1000_adapter *adapter) +{ + /* WoL is enabled */ + if (adapter->wol) + return; + + if (adapter->hw.phy.ops.power_down) + adapter->hw.phy.ops.power_down(&adapter->hw); +} + +/** + * e1000e_reset - bring the hardware into a known good state + * + * This function boots the hardware and enables some settings that + * require a configuration cycle of the hardware - those cannot be + * set/changed during runtime. After reset the device needs to be + * properly configured for Rx, Tx etc. + */ +void e1000e_reset(struct e1000_adapter *adapter) +{ + struct e1000_mac_info *mac = &adapter->hw.mac; + struct e1000_fc_info *fc = &adapter->hw.fc; + struct e1000_hw *hw = &adapter->hw; + u32 tx_space, min_tx_space, min_rx_space; + u32 pba = adapter->pba; + u16 hwm; + + /* reset Packet Buffer Allocation to default */ + ew32(PBA, pba); + + if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { + /* + * To maintain wire speed transmits, the Tx FIFO should be + * large enough to accommodate two full transmit packets, + * rounded up to the next 1KB and expressed in KB. Likewise, + * the Rx FIFO should be large enough to accommodate at least + * one full receive packet and is similarly rounded up and + * expressed in KB. + */ + pba = er32(PBA); + /* upper 16 bits has Tx packet buffer allocation size in KB */ + tx_space = pba >> 16; + /* lower 16 bits has Rx packet buffer allocation size in KB */ + pba &= 0xffff; + /* + * the Tx fifo also stores 16 bytes of information about the Tx + * but don't include ethernet FCS because hardware appends it + */ + min_tx_space = (adapter->max_frame_size + + sizeof(struct e1000_tx_desc) - + ETH_FCS_LEN) * 2; + min_tx_space = ALIGN(min_tx_space, 1024); + min_tx_space >>= 10; + /* software strips receive CRC, so leave room for it */ + min_rx_space = adapter->max_frame_size; + min_rx_space = ALIGN(min_rx_space, 1024); + min_rx_space >>= 10; + + /* + * If current Tx allocation is less than the min Tx FIFO size, + * and the min Tx FIFO size is less than the current Rx FIFO + * allocation, take space away from current Rx allocation + */ + if ((tx_space < min_tx_space) && + ((min_tx_space - tx_space) < pba)) { + pba -= min_tx_space - tx_space; + + /* + * if short on Rx space, Rx wins and must trump Tx + * adjustment or use Early Receive if available + */ + if ((pba < min_rx_space) && + (!(adapter->flags & FLAG_HAS_ERT))) + /* ERT enabled in e1000_configure_rx */ + pba = min_rx_space; + } + + ew32(PBA, pba); + } + + /* + * flow control settings + * + * The high water mark must be low enough to fit one full frame + * (or the size used for early receive) above it in the Rx FIFO. + * Set it to the lower of: + * - 90% of the Rx FIFO size, and + * - the full Rx FIFO size minus the early receive size (for parts + * with ERT support assuming ERT set to E1000_ERT_2048), or + * - the full Rx FIFO size minus one full frame + */ + if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) + fc->pause_time = 0xFFFF; + else + fc->pause_time = E1000_FC_PAUSE_TIME; + fc->send_xon = 1; + fc->current_mode = fc->requested_mode; + + switch (hw->mac.type) { + default: + if ((adapter->flags & FLAG_HAS_ERT) && + (adapter->netdev->mtu > ETH_DATA_LEN)) + hwm = min(((pba << 10) * 9 / 10), + ((pba << 10) - (E1000_ERT_2048 << 3))); + else + hwm = min(((pba << 10) * 9 / 10), + ((pba << 10) - adapter->max_frame_size)); + + fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ + fc->low_water = fc->high_water - 8; + break; + case e1000_pchlan: + /* + * Workaround PCH LOM adapter hangs with certain network + * loads. If hangs persist, try disabling Tx flow control. + */ + if (adapter->netdev->mtu > ETH_DATA_LEN) { + fc->high_water = 0x3500; + fc->low_water = 0x1500; + } else { + fc->high_water = 0x5000; + fc->low_water = 0x3000; + } + fc->refresh_time = 0x1000; + break; + case e1000_pch2lan: + fc->high_water = 0x05C20; + fc->low_water = 0x05048; + fc->pause_time = 0x0650; + fc->refresh_time = 0x0400; + if (adapter->netdev->mtu > ETH_DATA_LEN) { + pba = 14; + ew32(PBA, pba); + } + break; + } + + /* + * Disable Adaptive Interrupt Moderation if 2 full packets cannot + * fit in receive buffer and early-receive not supported. + */ + if (adapter->itr_setting & 0x3) { + if (((adapter->max_frame_size * 2) > (pba << 10)) && + !(adapter->flags & FLAG_HAS_ERT)) { + if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) { + dev_info(&adapter->pdev->dev, + "Interrupt Throttle Rate turned off\n"); + adapter->flags2 |= FLAG2_DISABLE_AIM; + ew32(ITR, 0); + } + } else if (adapter->flags2 & FLAG2_DISABLE_AIM) { + dev_info(&adapter->pdev->dev, + "Interrupt Throttle Rate turned on\n"); + adapter->flags2 &= ~FLAG2_DISABLE_AIM; + adapter->itr = 20000; + ew32(ITR, 1000000000 / (adapter->itr * 256)); + } + } + + /* Allow time for pending master requests to run */ + mac->ops.reset_hw(hw); + + /* + * For parts with AMT enabled, let the firmware know + * that the network interface is in control + */ + if (adapter->flags & FLAG_HAS_AMT) + e1000e_get_hw_control(adapter); + + ew32(WUC, 0); + + if (mac->ops.init_hw(hw)) + e_err("Hardware Error\n"); + + e1000_update_mng_vlan(adapter); + + /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ + ew32(VET, ETH_P_8021Q); + + e1000e_reset_adaptive(hw); + + if (!netif_running(adapter->netdev) && + !test_bit(__E1000_TESTING, &adapter->state)) { + e1000_power_down_phy(adapter); + return; + } + + e1000_get_phy_info(hw); + + if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && + !(adapter->flags & FLAG_SMART_POWER_DOWN)) { + u16 phy_data = 0; + /* + * speed up time to link by disabling smart power down, ignore + * the return value of this function because there is nothing + * different we would do if it failed + */ + e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); + phy_data &= ~IGP02E1000_PM_SPD; + e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); + } +} + +int e1000e_up(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* hardware has been reset, we need to reload some things */ + e1000_configure(adapter); + + clear_bit(__E1000_DOWN, &adapter->state); + + napi_enable(&adapter->napi); + if (adapter->msix_entries) + e1000_configure_msix(adapter); + e1000_irq_enable(adapter); + + netif_start_queue(adapter->netdev); + + /* fire a link change interrupt to start the watchdog */ + if (adapter->msix_entries) + ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); + else + ew32(ICS, E1000_ICS_LSC); + + return 0; +} + +static void e1000e_flush_descriptors(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (!(adapter->flags2 & FLAG2_DMA_BURST)) + return; + + /* flush pending descriptor writebacks to memory */ + ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); + ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); + + /* execute the writes immediately */ + e1e_flush(); +} + +static void e1000e_update_stats(struct e1000_adapter *adapter); + +void e1000e_down(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 tctl, rctl; + + /* + * signal that we're down so the interrupt handler does not + * reschedule our watchdog timer + */ + set_bit(__E1000_DOWN, &adapter->state); + + /* disable receives in the hardware */ + rctl = er32(RCTL); - ew32(RCTL, rctl & ~E1000_RCTL_EN); ++ if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) ++ ew32(RCTL, rctl & ~E1000_RCTL_EN); + /* flush and sleep below */ + + netif_stop_queue(netdev); + + /* disable transmits in the hardware */ + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_EN; + ew32(TCTL, tctl); ++ + /* flush both disables and wait for them to finish */ + e1e_flush(); + usleep_range(10000, 20000); + + napi_disable(&adapter->napi); + e1000_irq_disable(adapter); + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + netif_carrier_off(netdev); + + spin_lock(&adapter->stats64_lock); + e1000e_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + + e1000e_flush_descriptors(adapter); + e1000_clean_tx_ring(adapter); + e1000_clean_rx_ring(adapter); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + if (!pci_channel_offline(adapter->pdev)) + e1000e_reset(adapter); + + /* + * TODO: for power management, we could drop the link and + * pci_disable_device here. + */ +} + +void e1000e_reinit_locked(struct e1000_adapter *adapter) +{ + might_sleep(); + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + e1000e_down(adapter); + e1000e_up(adapter); + clear_bit(__E1000_RESETTING, &adapter->state); +} + +/** + * e1000_sw_init - Initialize general software structures (struct e1000_adapter) + * @adapter: board private structure to initialize + * + * e1000_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int __devinit e1000_sw_init(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; + adapter->rx_ps_bsize0 = 128; + adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + + spin_lock_init(&adapter->stats64_lock); + + e1000e_set_interrupt_capability(adapter); + + if (e1000_alloc_queues(adapter)) + return -ENOMEM; + + /* Explicitly disable IRQ since the NIC can be in any state. */ + e1000_irq_disable(adapter); + + set_bit(__E1000_DOWN, &adapter->state); + return 0; +} + +/** + * e1000_intr_msi_test - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t e1000_intr_msi_test(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 icr = er32(ICR); + + e_dbg("icr is %08X\n", icr); + if (icr & E1000_ICR_RXSEQ) { + adapter->flags &= ~FLAG_MSI_TEST_FAILED; + wmb(); + } + + return IRQ_HANDLED; +} + +/** + * e1000_test_msi_interrupt - Returns 0 for successful test + * @adapter: board private struct + * + * code flow taken from tg3.c + **/ +static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + int err; + + /* poll_enable hasn't been called yet, so don't need disable */ + /* clear any pending events */ + er32(ICR); + + /* free the real vector and request a test handler */ + e1000_free_irq(adapter); + e1000e_reset_interrupt_capability(adapter); + + /* Assume that the test fails, if it succeeds then the test + * MSI irq handler will unset this flag */ + adapter->flags |= FLAG_MSI_TEST_FAILED; + + err = pci_enable_msi(adapter->pdev); + if (err) + goto msi_test_failed; + + err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0, + netdev->name, netdev); + if (err) { + pci_disable_msi(adapter->pdev); + goto msi_test_failed; + } + + wmb(); + + e1000_irq_enable(adapter); + + /* fire an unusual interrupt on the test handler */ + ew32(ICS, E1000_ICS_RXSEQ); + e1e_flush(); + msleep(50); + + e1000_irq_disable(adapter); + + rmb(); + + if (adapter->flags & FLAG_MSI_TEST_FAILED) { + adapter->int_mode = E1000E_INT_MODE_LEGACY; + e_info("MSI interrupt test failed, using legacy interrupt.\n"); + } else + e_dbg("MSI interrupt test succeeded!\n"); + + free_irq(adapter->pdev->irq, netdev); + pci_disable_msi(adapter->pdev); + +msi_test_failed: + e1000e_set_interrupt_capability(adapter); + return e1000_request_irq(adapter); +} + +/** + * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored + * @adapter: board private struct + * + * code flow taken from tg3.c, called with e1000 interrupts disabled. + **/ +static int e1000_test_msi(struct e1000_adapter *adapter) +{ + int err; + u16 pci_cmd; + + if (!(adapter->flags & FLAG_MSI_ENABLED)) + return 0; + + /* disable SERR in case the MSI write causes a master abort */ + pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); + if (pci_cmd & PCI_COMMAND_SERR) + pci_write_config_word(adapter->pdev, PCI_COMMAND, + pci_cmd & ~PCI_COMMAND_SERR); + + err = e1000_test_msi_interrupt(adapter); + + /* re-enable SERR */ + if (pci_cmd & PCI_COMMAND_SERR) { + pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); + pci_cmd |= PCI_COMMAND_SERR; + pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd); + } + + return err; +} + +/** + * e1000_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int e1000_open(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + int err; + + /* disallow open during test */ + if (test_bit(__E1000_TESTING, &adapter->state)) + return -EBUSY; + + pm_runtime_get_sync(&pdev->dev); + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = e1000e_setup_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = e1000e_setup_rx_resources(adapter); + if (err) + goto err_setup_rx; + + /* + * If AMT is enabled, let the firmware know that the network + * interface is now open and reset the part to a known state. + */ + if (adapter->flags & FLAG_HAS_AMT) { + e1000e_get_hw_control(adapter); + e1000e_reset(adapter); + } + + e1000e_power_up_phy(adapter); + + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + if ((adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) + e1000_update_mng_vlan(adapter); + + /* DMA latency requirement to workaround early-receive/jumbo issue */ + if ((adapter->flags & FLAG_HAS_ERT) || + (adapter->hw.mac.type == e1000_pch2lan)) + pm_qos_add_request(&adapter->netdev->pm_qos_req, + PM_QOS_CPU_DMA_LATENCY, + PM_QOS_DEFAULT_VALUE); + + /* + * before we allocate an interrupt, we must be ready to handle it. + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt + * as soon as we call pci_request_irq, so we have to setup our + * clean_rx handler before we do so. + */ + e1000_configure(adapter); + + err = e1000_request_irq(adapter); + if (err) + goto err_req_irq; + + /* + * Work around PCIe errata with MSI interrupts causing some chipsets to + * ignore e1000e MSI messages, which means we need to test our MSI + * interrupt now + */ + if (adapter->int_mode != E1000E_INT_MODE_LEGACY) { + err = e1000_test_msi(adapter); + if (err) { + e_err("Interrupt allocation failed\n"); + goto err_req_irq; + } + } + + /* From here on the code is the same as e1000e_up() */ + clear_bit(__E1000_DOWN, &adapter->state); + + napi_enable(&adapter->napi); + + e1000_irq_enable(adapter); + + netif_start_queue(netdev); + + adapter->idle_check = true; + pm_runtime_put(&pdev->dev); + + /* fire a link status change interrupt to start the watchdog */ + if (adapter->msix_entries) + ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); + else + ew32(ICS, E1000_ICS_LSC); + + return 0; + +err_req_irq: + e1000e_release_hw_control(adapter); + e1000_power_down_phy(adapter); + e1000e_free_rx_resources(adapter); +err_setup_rx: + e1000e_free_tx_resources(adapter); +err_setup_tx: + e1000e_reset(adapter); + pm_runtime_put_sync(&pdev->dev); + + return err; +} + +/** + * e1000_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int e1000_close(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + + WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); + + pm_runtime_get_sync(&pdev->dev); + + if (!test_bit(__E1000_DOWN, &adapter->state)) { + e1000e_down(adapter); + e1000_free_irq(adapter); + } + e1000_power_down_phy(adapter); + + e1000e_free_tx_resources(adapter); + e1000e_free_rx_resources(adapter); + + /* + * kill manageability vlan ID if supported, but not if a vlan with + * the same ID is registered on the host OS (let 8021q kill it) + */ + if (adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN) + e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); + + /* + * If AMT is enabled, let the firmware know that the network + * interface is now closed + */ + if ((adapter->flags & FLAG_HAS_AMT) && + !test_bit(__E1000_TESTING, &adapter->state)) + e1000e_release_hw_control(adapter); + + if ((adapter->flags & FLAG_HAS_ERT) || + (adapter->hw.mac.type == e1000_pch2lan)) + pm_qos_remove_request(&adapter->netdev->pm_qos_req); + + pm_runtime_put_sync(&pdev->dev); + + return 0; +} +/** + * e1000_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int e1000_set_mac(struct net_device *netdev, void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); + + e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); + + if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) { + /* activate the work around */ + e1000e_set_laa_state_82571(&adapter->hw, 1); + + /* + * Hold a copy of the LAA in RAR[14] This is done so that + * between the time RAR[0] gets clobbered and the time it + * gets fixed (in e1000_watchdog), the actual LAA is in one + * of the RARs and no incoming packets directed to this port + * are dropped. Eventually the LAA will be in RAR[0] and + * RAR[14] + */ + e1000e_rar_set(&adapter->hw, + adapter->hw.mac.addr, + adapter->hw.mac.rar_entry_count - 1); + } + + return 0; +} + +/** + * e1000e_update_phy_task - work thread to update phy + * @work: pointer to our work struct + * + * this worker thread exists because we must acquire a + * semaphore to read the phy, which we could msleep while + * waiting for it, and we can't msleep in a timer. + **/ +static void e1000e_update_phy_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, update_phy_task); + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + e1000_get_phy_info(&adapter->hw); +} + +/* + * Need to wait a few seconds after link up to get diagnostic information from + * the phy + */ +static void e1000_update_phy_info(unsigned long data) +{ + struct e1000_adapter *adapter = (struct e1000_adapter *) data; + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + schedule_work(&adapter->update_phy_task); +} + +/** + * e1000e_update_phy_stats - Update the PHY statistics counters + * @adapter: board private structure + * + * Read/clear the upper 16-bit PHY registers and read/accumulate lower + **/ +static void e1000e_update_phy_stats(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + s32 ret_val; + u16 phy_data; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return; + + /* + * A page set is expensive so check if already on desired page. + * If not, set to the page with the PHY status registers. + */ + hw->phy.addr = 1; + ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, + &phy_data); + if (ret_val) + goto release; + if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) { + ret_val = hw->phy.ops.set_page(hw, + HV_STATS_PAGE << IGP_PAGE_SHIFT); + if (ret_val) + goto release; + } + + /* Single Collision Count */ + hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); + if (!ret_val) + adapter->stats.scc += phy_data; + + /* Excessive Collision Count */ + hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); + if (!ret_val) + adapter->stats.ecol += phy_data; + + /* Multiple Collision Count */ + hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); + if (!ret_val) + adapter->stats.mcc += phy_data; + + /* Late Collision Count */ + hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); + if (!ret_val) + adapter->stats.latecol += phy_data; + + /* Collision Count - also used for adaptive IFS */ + hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); + if (!ret_val) + hw->mac.collision_delta = phy_data; + + /* Defer Count */ + hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); + if (!ret_val) + adapter->stats.dc += phy_data; + + /* Transmit with no CRS */ + hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); + if (!ret_val) + adapter->stats.tncrs += phy_data; + +release: + hw->phy.ops.release(hw); +} + +/** + * e1000e_update_stats - Update the board statistics counters + * @adapter: board private structure + **/ +static void e1000e_update_stats(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + + /* + * Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + adapter->stats.crcerrs += er32(CRCERRS); + adapter->stats.gprc += er32(GPRC); + adapter->stats.gorc += er32(GORCL); + er32(GORCH); /* Clear gorc */ + adapter->stats.bprc += er32(BPRC); + adapter->stats.mprc += er32(MPRC); + adapter->stats.roc += er32(ROC); + + adapter->stats.mpc += er32(MPC); + + /* Half-duplex statistics */ + if (adapter->link_duplex == HALF_DUPLEX) { + if (adapter->flags2 & FLAG2_HAS_PHY_STATS) { + e1000e_update_phy_stats(adapter); + } else { + adapter->stats.scc += er32(SCC); + adapter->stats.ecol += er32(ECOL); + adapter->stats.mcc += er32(MCC); + adapter->stats.latecol += er32(LATECOL); + adapter->stats.dc += er32(DC); + + hw->mac.collision_delta = er32(COLC); + + if ((hw->mac.type != e1000_82574) && + (hw->mac.type != e1000_82583)) + adapter->stats.tncrs += er32(TNCRS); + } + adapter->stats.colc += hw->mac.collision_delta; + } + + adapter->stats.xonrxc += er32(XONRXC); + adapter->stats.xontxc += er32(XONTXC); + adapter->stats.xoffrxc += er32(XOFFRXC); + adapter->stats.xofftxc += er32(XOFFTXC); + adapter->stats.gptc += er32(GPTC); + adapter->stats.gotc += er32(GOTCL); + er32(GOTCH); /* Clear gotc */ + adapter->stats.rnbc += er32(RNBC); + adapter->stats.ruc += er32(RUC); + + adapter->stats.mptc += er32(MPTC); + adapter->stats.bptc += er32(BPTC); + + /* used for adaptive IFS */ + + hw->mac.tx_packet_delta = er32(TPT); + adapter->stats.tpt += hw->mac.tx_packet_delta; + + adapter->stats.algnerrc += er32(ALGNERRC); + adapter->stats.rxerrc += er32(RXERRC); + adapter->stats.cexterr += er32(CEXTERR); + adapter->stats.tsctc += er32(TSCTC); + adapter->stats.tsctfc += er32(TSCTFC); + + /* Fill out the OS statistics structure */ + netdev->stats.multicast = adapter->stats.mprc; + netdev->stats.collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* + * RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + netdev->stats.rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + netdev->stats.rx_length_errors = adapter->stats.ruc + + adapter->stats.roc; + netdev->stats.rx_crc_errors = adapter->stats.crcerrs; + netdev->stats.rx_frame_errors = adapter->stats.algnerrc; + netdev->stats.rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + netdev->stats.tx_errors = adapter->stats.ecol + + adapter->stats.latecol; + netdev->stats.tx_aborted_errors = adapter->stats.ecol; + netdev->stats.tx_window_errors = adapter->stats.latecol; + netdev->stats.tx_carrier_errors = adapter->stats.tncrs; + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Management Stats */ + adapter->stats.mgptc += er32(MGTPTC); + adapter->stats.mgprc += er32(MGTPRC); + adapter->stats.mgpdc += er32(MGTPDC); +} + +/** + * e1000_phy_read_status - Update the PHY register status snapshot + * @adapter: board private structure + **/ +static void e1000_phy_read_status(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_phy_regs *phy = &adapter->phy_regs; + + if ((er32(STATUS) & E1000_STATUS_LU) && + (adapter->hw.phy.media_type == e1000_media_type_copper)) { + int ret_val; + + ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr); + ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr); + ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise); + ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa); + ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion); + ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000); + ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000); + ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus); + if (ret_val) + e_warn("Error reading PHY register\n"); + } else { + /* + * Do not read PHY registers if link is not up + * Set values to typical power-on defaults + */ + phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX); + phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | + BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE | + BMSR_ERCAP); + phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP | + ADVERTISE_ALL | ADVERTISE_CSMA); + phy->lpa = 0; + phy->expansion = EXPANSION_ENABLENPAGE; + phy->ctrl1000 = ADVERTISE_1000FULL; + phy->stat1000 = 0; + phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF); + } +} + +static void e1000_print_link_info(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl = er32(CTRL); + + /* Link status message must follow this format for user tools */ + printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s, " + "Flow Control: %s\n", + adapter->netdev->name, + adapter->link_speed, + (adapter->link_duplex == FULL_DUPLEX) ? + "Full Duplex" : "Half Duplex", + ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? + "Rx/Tx" : + ((ctrl & E1000_CTRL_RFCE) ? "Rx" : + ((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None"))); +} + +static bool e1000e_has_link(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + bool link_active = 0; + s32 ret_val = 0; + + /* + * get_link_status is set on LSC (link status) interrupt or + * Rx sequence error interrupt. get_link_status will stay + * false until the check_for_link establishes link + * for copper adapters ONLY + */ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + if (hw->mac.get_link_status) { + ret_val = hw->mac.ops.check_for_link(hw); + link_active = !hw->mac.get_link_status; + } else { + link_active = 1; + } + break; + case e1000_media_type_fiber: + ret_val = hw->mac.ops.check_for_link(hw); + link_active = !!(er32(STATUS) & E1000_STATUS_LU); + break; + case e1000_media_type_internal_serdes: + ret_val = hw->mac.ops.check_for_link(hw); + link_active = adapter->hw.mac.serdes_has_link; + break; + default: + case e1000_media_type_unknown: + break; + } + + if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && + (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { + /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ + e_info("Gigabit has been disabled, downgrading speed\n"); + } + + return link_active; +} + +static void e1000e_enable_receives(struct e1000_adapter *adapter) +{ + /* make sure the receive unit is started */ + if ((adapter->flags & FLAG_RX_NEEDS_RESTART) && + (adapter->flags & FLAG_RX_RESTART_NOW)) { + struct e1000_hw *hw = &adapter->hw; + u32 rctl = er32(RCTL); + ew32(RCTL, rctl | E1000_RCTL_EN); + adapter->flags &= ~FLAG_RX_RESTART_NOW; + } +} + +static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* + * With 82574 controllers, PHY needs to be checked periodically + * for hung state and reset, if two calls return true + */ + if (e1000_check_phy_82574(hw)) + adapter->phy_hang_count++; + else + adapter->phy_hang_count = 0; + + if (adapter->phy_hang_count > 1) { + adapter->phy_hang_count = 0; + schedule_work(&adapter->reset_task); + } +} + +/** + * e1000_watchdog - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void e1000_watchdog(unsigned long data) +{ + struct e1000_adapter *adapter = (struct e1000_adapter *) data; + + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->watchdog_task); + + /* TODO: make this use queue_delayed_work() */ +} + +static void e1000_watchdog_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, watchdog_task); + struct net_device *netdev = adapter->netdev; + struct e1000_mac_info *mac = &adapter->hw.mac; + struct e1000_phy_info *phy = &adapter->hw.phy; + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_hw *hw = &adapter->hw; + u32 link, tctl; + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + link = e1000e_has_link(adapter); + if ((netif_carrier_ok(netdev)) && link) { + /* Cancel scheduled suspend requests. */ + pm_runtime_resume(netdev->dev.parent); + + e1000e_enable_receives(adapter); + goto link_up; + } + + if ((e1000e_enable_tx_pkt_filtering(hw)) && + (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)) + e1000_update_mng_vlan(adapter); + + if (link) { + if (!netif_carrier_ok(netdev)) { + bool txb2b = 1; + + /* Cancel scheduled suspend requests. */ + pm_runtime_resume(netdev->dev.parent); + + /* update snapshot of PHY registers on LSC */ + e1000_phy_read_status(adapter); + mac->ops.get_link_up_info(&adapter->hw, + &adapter->link_speed, + &adapter->link_duplex); + e1000_print_link_info(adapter); + /* + * On supported PHYs, check for duplex mismatch only + * if link has autonegotiated at 10/100 half + */ + if ((hw->phy.type == e1000_phy_igp_3 || + hw->phy.type == e1000_phy_bm) && + (hw->mac.autoneg == true) && + (adapter->link_speed == SPEED_10 || + adapter->link_speed == SPEED_100) && + (adapter->link_duplex == HALF_DUPLEX)) { + u16 autoneg_exp; + + e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp); + + if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS)) + e_info("Autonegotiated half duplex but" + " link partner cannot autoneg. " + " Try forcing full duplex if " + "link gets many collisions.\n"); + } + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + txb2b = 0; + adapter->tx_timeout_factor = 16; + break; + case SPEED_100: + txb2b = 0; + adapter->tx_timeout_factor = 10; + break; + } + + /* + * workaround: re-program speed mode bit after + * link-up event + */ + if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && + !txb2b) { + u32 tarc0; + tarc0 = er32(TARC(0)); + tarc0 &= ~SPEED_MODE_BIT; + ew32(TARC(0), tarc0); + } + + /* + * disable TSO for pcie and 10/100 speeds, to avoid + * some hardware issues + */ + if (!(adapter->flags & FLAG_TSO_FORCE)) { + switch (adapter->link_speed) { + case SPEED_10: + case SPEED_100: + e_info("10/100 speed: disabling TSO\n"); + netdev->features &= ~NETIF_F_TSO; + netdev->features &= ~NETIF_F_TSO6; + break; + case SPEED_1000: + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + break; + default: + /* oops */ + break; + } + } + + /* + * enable transmits in the hardware, need to do this + * after setting TARC(0) + */ + tctl = er32(TCTL); + tctl |= E1000_TCTL_EN; + ew32(TCTL, tctl); + + /* + * Perform any post-link-up configuration before + * reporting link up. + */ + if (phy->ops.cfg_on_link_up) + phy->ops.cfg_on_link_up(hw); + + netif_carrier_on(netdev); + + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + /* Link status message must follow this format */ + printk(KERN_INFO "e1000e: %s NIC Link is Down\n", + adapter->netdev->name); + netif_carrier_off(netdev); + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + + if (adapter->flags & FLAG_RX_NEEDS_RESTART) + schedule_work(&adapter->reset_task); + else + pm_schedule_suspend(netdev->dev.parent, + LINK_TIMEOUT); + } + } + +link_up: + spin_lock(&adapter->stats64_lock); + e1000e_update_stats(adapter); + + mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; + adapter->tpt_old = adapter->stats.tpt; + mac->collision_delta = adapter->stats.colc - adapter->colc_old; + adapter->colc_old = adapter->stats.colc; + + adapter->gorc = adapter->stats.gorc - adapter->gorc_old; + adapter->gorc_old = adapter->stats.gorc; + adapter->gotc = adapter->stats.gotc - adapter->gotc_old; + adapter->gotc_old = adapter->stats.gotc; + spin_unlock(&adapter->stats64_lock); + + e1000e_update_adaptive(&adapter->hw); + + if (!netif_carrier_ok(netdev) && + (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) { + /* + * We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + schedule_work(&adapter->reset_task); + /* return immediately since reset is imminent */ + return; + } + + /* Simple mode for Interrupt Throttle Rate (ITR) */ + if (adapter->itr_setting == 4) { + /* + * Symmetric Tx/Rx gets a reduced ITR=2000; + * Total asymmetrical Tx or Rx gets ITR=8000; + * everyone else is between 2000-8000. + */ + u32 goc = (adapter->gotc + adapter->gorc) / 10000; + u32 dif = (adapter->gotc > adapter->gorc ? + adapter->gotc - adapter->gorc : + adapter->gorc - adapter->gotc) / 10000; + u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; + + ew32(ITR, 1000000000 / (itr * 256)); + } + + /* Cause software interrupt to ensure Rx ring is cleaned */ + if (adapter->msix_entries) + ew32(ICS, adapter->rx_ring->ims_val); + else + ew32(ICS, E1000_ICS_RXDMT0); + + /* flush pending descriptors to memory before detecting Tx hang */ + e1000e_flush_descriptors(adapter); + + /* Force detection of hung controller every watchdog period */ + adapter->detect_tx_hung = 1; + + /* + * With 82571 controllers, LAA may be overwritten due to controller + * reset from the other port. Set the appropriate LAA in RAR[0] + */ + if (e1000e_get_laa_state_82571(hw)) + e1000e_rar_set(hw, adapter->hw.mac.addr, 0); + + if (adapter->flags2 & FLAG2_CHECK_PHY_HANG) + e1000e_check_82574_phy_workaround(adapter); + + /* Reset the timer */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); +} + +#define E1000_TX_FLAGS_CSUM 0x00000001 +#define E1000_TX_FLAGS_VLAN 0x00000002 +#define E1000_TX_FLAGS_TSO 0x00000004 +#define E1000_TX_FLAGS_IPV4 0x00000008 +#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 +#define E1000_TX_FLAGS_VLAN_SHIFT 16 + +static int e1000_tso(struct e1000_adapter *adapter, + struct sk_buff *skb) +{ + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_context_desc *context_desc; + struct e1000_buffer *buffer_info; + unsigned int i; + u32 cmd_length = 0; + u16 ipcse = 0, tucse, mss; + u8 ipcss, ipcso, tucss, tucso, hdr_len; + + if (!skb_is_gso(skb)) + return 0; + + if (skb_header_cloned(skb)) { + int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + + if (err) + return err; + } + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + mss = skb_shinfo(skb)->gso_size; + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, + 0, IPPROTO_TCP, 0); + cmd_length = E1000_TXD_CMD_IP; + ipcse = skb_transport_offset(skb) - 1; + } else if (skb_is_gso_v6(skb)) { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + ipcse = 0; + } + ipcss = skb_network_offset(skb); + ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; + tucss = skb_transport_offset(skb); + tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; + tucse = 0; + + cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | + E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); + + i = tx_ring->next_to_use; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + + context_desc->lower_setup.ip_fields.ipcss = ipcss; + context_desc->lower_setup.ip_fields.ipcso = ipcso; + context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); + context_desc->upper_setup.tcp_fields.tucss = tucss; + context_desc->upper_setup.tcp_fields.tucso = tucso; + context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); + context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); + context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; + context_desc->cmd_and_length = cpu_to_le32(cmd_length); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + i++; + if (i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; + + return 1; +} + +static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) +{ + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_context_desc *context_desc; + struct e1000_buffer *buffer_info; + unsigned int i; + u8 css; + u32 cmd_len = E1000_TXD_CMD_DEXT; + __be16 protocol; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) + protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; + else + protocol = skb->protocol; + + switch (protocol) { + case cpu_to_be16(ETH_P_IP): + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + case cpu_to_be16(ETH_P_IPV6): + /* XXX not handling all IPV6 headers */ + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + default: + if (unlikely(net_ratelimit())) + e_warn("checksum_partial proto=%x!\n", + be16_to_cpu(protocol)); + break; + } + + css = skb_checksum_start_offset(skb); + + i = tx_ring->next_to_use; + buffer_info = &tx_ring->buffer_info[i]; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + + context_desc->lower_setup.ip_config = 0; + context_desc->upper_setup.tcp_fields.tucss = css; + context_desc->upper_setup.tcp_fields.tucso = + css + skb->csum_offset; + context_desc->upper_setup.tcp_fields.tucse = 0; + context_desc->tcp_seg_setup.data = 0; + context_desc->cmd_and_length = cpu_to_le32(cmd_len); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + i++; + if (i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; + + return 1; +} + +#define E1000_MAX_PER_TXD 8192 +#define E1000_MAX_TXD_PWR 12 + +static int e1000_tx_map(struct e1000_adapter *adapter, + struct sk_buff *skb, unsigned int first, + unsigned int max_per_txd, unsigned int nr_frags, + unsigned int mss) +{ + struct e1000_ring *tx_ring = adapter->tx_ring; + struct pci_dev *pdev = adapter->pdev; + struct e1000_buffer *buffer_info; + unsigned int len = skb_headlen(skb); + unsigned int offset = 0, size, count = 0, i; + unsigned int f, bytecount, segs; + + i = tx_ring->next_to_use; + + while (len) { + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + + buffer_info->length = size; + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + buffer_info->dma = dma_map_single(&pdev->dev, + skb->data + offset, + size, DMA_TO_DEVICE); + buffer_info->mapped_as_page = false; + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + + len -= size; + offset += size; + count++; + + if (len) { + i++; + if (i == tx_ring->count) + i = 0; + } + } + + for (f = 0; f < nr_frags; f++) { + struct skb_frag_struct *frag; + + frag = &skb_shinfo(skb)->frags[f]; + len = frag->size; + offset = frag->page_offset; + + while (len) { + i++; + if (i == tx_ring->count) + i = 0; + + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + + buffer_info->length = size; + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + buffer_info->dma = dma_map_page(&pdev->dev, frag->page, + offset, size, + DMA_TO_DEVICE); + buffer_info->mapped_as_page = true; + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + + len -= size; + offset += size; + count++; + } + } + + segs = skb_shinfo(skb)->gso_segs ? : 1; + /* multiply data chunks by size of headers */ + bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; + + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[i].segs = segs; + tx_ring->buffer_info[i].bytecount = bytecount; + tx_ring->buffer_info[first].next_to_watch = i; + + return count; + +dma_error: + dev_err(&pdev->dev, "Tx DMA map failed\n"); + buffer_info->dma = 0; + if (count) + count--; + + while (count--) { + if (i == 0) + i += tx_ring->count; + i--; + buffer_info = &tx_ring->buffer_info[i]; + e1000_put_txbuf(adapter, buffer_info); + } + + return 0; +} + +static void e1000_tx_queue(struct e1000_adapter *adapter, + int tx_flags, int count) +{ + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_tx_desc *tx_desc = NULL; + struct e1000_buffer *buffer_info; + u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; + unsigned int i; + + if (tx_flags & E1000_TX_FLAGS_TSO) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | + E1000_TXD_CMD_TSE; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + + if (tx_flags & E1000_TX_FLAGS_IPV4) + txd_upper |= E1000_TXD_POPTS_IXSM << 8; + } + + if (tx_flags & E1000_TX_FLAGS_CSUM) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + } + + if (tx_flags & E1000_TX_FLAGS_VLAN) { + txd_lower |= E1000_TXD_CMD_VLE; + txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); + } + + i = tx_ring->next_to_use; + + do { + buffer_info = &tx_ring->buffer_info[i]; + tx_desc = E1000_TX_DESC(*tx_ring, i); + tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + tx_desc->lower.data = + cpu_to_le32(txd_lower | buffer_info->length); + tx_desc->upper.data = cpu_to_le32(txd_upper); + + i++; + if (i == tx_ring->count) + i = 0; + } while (--count > 0); + + tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); + + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + + tx_ring->next_to_use = i; - writel(i, adapter->hw.hw_addr + tx_ring->tail); ++ ++ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) ++ e1000e_update_tdt_wa(adapter, i); ++ else ++ writel(i, adapter->hw.hw_addr + tx_ring->tail); ++ + /* + * we need this if more than one processor can write to our tail + * at a time, it synchronizes IO on IA64/Altix systems + */ + mmiowb(); +} + +#define MINIMUM_DHCP_PACKET_SIZE 282 +static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, + struct sk_buff *skb) +{ + struct e1000_hw *hw = &adapter->hw; + u16 length, offset; + + if (vlan_tx_tag_present(skb)) { + if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && + (adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) + return 0; + } + + if (skb->len <= MINIMUM_DHCP_PACKET_SIZE) + return 0; + + if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP)) + return 0; + + { + const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14); + struct udphdr *udp; + + if (ip->protocol != IPPROTO_UDP) + return 0; + + udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); + if (ntohs(udp->dest) != 67) + return 0; + + offset = (u8 *)udp + 8 - skb->data; + length = skb->len - offset; + return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length); + } + + return 0; +} + +static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + netif_stop_queue(netdev); + /* + * Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* + * We need to check again in a case another CPU has just + * made room available. + */ + if (e1000_desc_unused(adapter->tx_ring) < size) + return -EBUSY; + + /* A reprieve! */ + netif_start_queue(netdev); + ++adapter->restart_queue; + return 0; +} + +static int e1000_maybe_stop_tx(struct net_device *netdev, int size) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (e1000_desc_unused(adapter->tx_ring) >= size) + return 0; + return __e1000_maybe_stop_tx(netdev, size); +} + +#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) +static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_ring *tx_ring = adapter->tx_ring; + unsigned int first; + unsigned int max_per_txd = E1000_MAX_PER_TXD; + unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; + unsigned int tx_flags = 0; + unsigned int len = skb_headlen(skb); + unsigned int nr_frags; + unsigned int mss; + int count = 0; + int tso; + unsigned int f; + + if (test_bit(__E1000_DOWN, &adapter->state)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (skb->len <= 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + mss = skb_shinfo(skb)->gso_size; + /* + * The controller does a simple calculation to + * make sure there is enough room in the FIFO before + * initiating the DMA for each buffer. The calc is: + * 4 = ceil(buffer len/mss). To make sure we don't + * overrun the FIFO, adjust the max buffer len if mss + * drops. + */ + if (mss) { + u8 hdr_len; + max_per_txd = min(mss << 2, max_per_txd); + max_txd_pwr = fls(max_per_txd) - 1; + + /* + * TSO Workaround for 82571/2/3 Controllers -- if skb->data + * points to just header, pull a few bytes of payload from + * frags into skb->data + */ + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + /* + * we do this workaround for ES2LAN, but it is un-necessary, + * avoiding it could save a lot of cycles + */ + if (skb->data_len && (hdr_len == len)) { + unsigned int pull_size; + + pull_size = min((unsigned int)4, skb->data_len); + if (!__pskb_pull_tail(skb, pull_size)) { + e_err("__pskb_pull_tail failed.\n"); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + len = skb_headlen(skb); + } + } + + /* reserve a descriptor for the offload context */ + if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) + count++; + count++; + + count += TXD_USE_COUNT(len, max_txd_pwr); + + nr_frags = skb_shinfo(skb)->nr_frags; + for (f = 0; f < nr_frags; f++) + count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size, + max_txd_pwr); + + if (adapter->hw.mac.tx_pkt_filtering) + e1000_transfer_dhcp_info(adapter, skb); + + /* + * need: count + 2 desc gap to keep tail from touching + * head, otherwise try next time + */ + if (e1000_maybe_stop_tx(netdev, count + 2)) + return NETDEV_TX_BUSY; + + if (vlan_tx_tag_present(skb)) { + tx_flags |= E1000_TX_FLAGS_VLAN; + tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); + } + + first = tx_ring->next_to_use; + + tso = e1000_tso(adapter, skb); + if (tso < 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (tso) + tx_flags |= E1000_TX_FLAGS_TSO; + else if (e1000_tx_csum(adapter, skb)) + tx_flags |= E1000_TX_FLAGS_CSUM; + + /* + * Old method was to assume IPv4 packet by default if TSO was enabled. + * 82571 hardware supports TSO capabilities for IPv6 as well... + * no longer assume, we must. + */ + if (skb->protocol == htons(ETH_P_IP)) + tx_flags |= E1000_TX_FLAGS_IPV4; + + /* if count is 0 then mapping error has occurred */ + count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss); + if (count) { + e1000_tx_queue(adapter, tx_flags, count); + /* Make sure there is space in the ring for the next send. */ + e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2); + + } else { + dev_kfree_skb_any(skb); + tx_ring->buffer_info[first].time_stamp = 0; + tx_ring->next_to_use = first; + } + + return NETDEV_TX_OK; +} + +/** + * e1000_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void e1000_tx_timeout(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); +} + +static void e1000_reset_task(struct work_struct *work) +{ + struct e1000_adapter *adapter; + adapter = container_of(work, struct e1000_adapter, reset_task); + + /* don't run the task if already down */ + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && + (adapter->flags & FLAG_RX_RESTART_NOW))) { + e1000e_dump(adapter); + e_err("Reset adapter\n"); + } + e1000e_reinit_locked(adapter); +} + +/** + * e1000_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: rtnl_link_stats64 pointer + * + * Returns the address of the device statistics structure. + **/ +struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + memset(stats, 0, sizeof(struct rtnl_link_stats64)); + spin_lock(&adapter->stats64_lock); + e1000e_update_stats(adapter); + /* Fill out the OS statistics structure */ + stats->rx_bytes = adapter->stats.gorc; + stats->rx_packets = adapter->stats.gprc; + stats->tx_bytes = adapter->stats.gotc; + stats->tx_packets = adapter->stats.gptc; + stats->multicast = adapter->stats.mprc; + stats->collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* + * RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + stats->rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + stats->rx_length_errors = adapter->stats.ruc + + adapter->stats.roc; + stats->rx_crc_errors = adapter->stats.crcerrs; + stats->rx_frame_errors = adapter->stats.algnerrc; + stats->rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + stats->tx_errors = adapter->stats.ecol + + adapter->stats.latecol; + stats->tx_aborted_errors = adapter->stats.ecol; + stats->tx_window_errors = adapter->stats.latecol; + stats->tx_carrier_errors = adapter->stats.tncrs; + + /* Tx Dropped needs to be maintained elsewhere */ + + spin_unlock(&adapter->stats64_lock); + return stats; +} + +/** + * e1000_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int e1000_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + + /* Jumbo frame support */ + if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) && + !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { + e_err("Jumbo Frames not supported.\n"); + return -EINVAL; + } + + /* Supported frame sizes */ + if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) || + (max_frame > adapter->max_hw_frame_size)) { + e_err("Unsupported MTU setting\n"); + return -EINVAL; + } + + /* Jumbo frame workaround on 82579 requires CRC be stripped */ + if ((adapter->hw.mac.type == e1000_pch2lan) && + !(adapter->flags2 & FLAG2_CRC_STRIPPING) && + (new_mtu > ETH_DATA_LEN)) { + e_err("Jumbo Frames not supported on 82579 when CRC " + "stripping is disabled.\n"); + return -EINVAL; + } + + /* 82573 Errata 17 */ + if (((adapter->hw.mac.type == e1000_82573) || + (adapter->hw.mac.type == e1000_82574)) && + (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) { + adapter->flags2 |= FLAG2_DISABLE_ASPM_L1; + e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1); + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ + adapter->max_frame_size = max_frame; + e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + if (netif_running(netdev)) + e1000e_down(adapter); + + /* + * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN + * means we reserve 2 more, this pushes us to allocate from the next + * larger slab size. + * i.e. RXBUFFER_2048 --> size-4096 slab + * However with the new *_jumbo_rx* routines, jumbo receives will use + * fragmented skbs + */ + + if (max_frame <= 2048) + adapter->rx_buffer_len = 2048; + else + adapter->rx_buffer_len = 4096; + + /* adjust allocation if LPE protects us, and we aren't using SBP */ + if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || + (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) + adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + + ETH_FCS_LEN; + + if (netif_running(netdev)) + e1000e_up(adapter); + else + e1000e_reset(adapter); + + clear_bit(__E1000_RESETTING, &adapter->state); + + return 0; +} + +static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct mii_ioctl_data *data = if_mii(ifr); + + if (adapter->hw.phy.media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = adapter->hw.phy.addr; + break; + case SIOCGMIIREG: + e1000_phy_read_status(adapter); + + switch (data->reg_num & 0x1F) { + case MII_BMCR: + data->val_out = adapter->phy_regs.bmcr; + break; + case MII_BMSR: + data->val_out = adapter->phy_regs.bmsr; + break; + case MII_PHYSID1: + data->val_out = (adapter->hw.phy.id >> 16); + break; + case MII_PHYSID2: + data->val_out = (adapter->hw.phy.id & 0xFFFF); + break; + case MII_ADVERTISE: + data->val_out = adapter->phy_regs.advertise; + break; + case MII_LPA: + data->val_out = adapter->phy_regs.lpa; + break; + case MII_EXPANSION: + data->val_out = adapter->phy_regs.expansion; + break; + case MII_CTRL1000: + data->val_out = adapter->phy_regs.ctrl1000; + break; + case MII_STAT1000: + data->val_out = adapter->phy_regs.stat1000; + break; + case MII_ESTATUS: + data->val_out = adapter->phy_regs.estatus; + break; + default: + return -EIO; + } + break; + case SIOCSMIIREG: + default: + return -EOPNOTSUPP; + } + return 0; +} + +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return e1000_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) +{ + struct e1000_hw *hw = &adapter->hw; + u32 i, mac_reg; + u16 phy_reg, wuc_enable; + int retval = 0; + + /* copy MAC RARs to PHY RARs */ + e1000_copy_rx_addrs_to_phy_ich8lan(hw); + + retval = hw->phy.ops.acquire(hw); + if (retval) { + e_err("Could not acquire PHY\n"); + return retval; + } + + /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */ + retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable); + if (retval) + goto out; + + /* copy MAC MTA to PHY MTA - only needed for pchlan */ + for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) { + mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i); + hw->phy.ops.write_reg_page(hw, BM_MTA(i), + (u16)(mac_reg & 0xFFFF)); + hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1, + (u16)((mac_reg >> 16) & 0xFFFF)); + } + + /* configure PHY Rx Control register */ + hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg); + mac_reg = er32(RCTL); + if (mac_reg & E1000_RCTL_UPE) + phy_reg |= BM_RCTL_UPE; + if (mac_reg & E1000_RCTL_MPE) + phy_reg |= BM_RCTL_MPE; + phy_reg &= ~(BM_RCTL_MO_MASK); + if (mac_reg & E1000_RCTL_MO_3) + phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT) + << BM_RCTL_MO_SHIFT); + if (mac_reg & E1000_RCTL_BAM) + phy_reg |= BM_RCTL_BAM; + if (mac_reg & E1000_RCTL_PMCF) + phy_reg |= BM_RCTL_PMCF; + mac_reg = er32(CTRL); + if (mac_reg & E1000_CTRL_RFCE) + phy_reg |= BM_RCTL_RFCE; + hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg); + + /* enable PHY wakeup in MAC register */ + ew32(WUFC, wufc); + ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN); + + /* configure and enable PHY wakeup in PHY registers */ + hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc); + hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN); + + /* activate PHY wakeup */ + wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT; + retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable); + if (retval) + e_err("Could not set PHY Host Wakeup bit\n"); +out: + hw->phy.ops.release(hw); + + return retval; +} + +static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, + bool runtime) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, ctrl_ext, rctl, status; + /* Runtime suspend should only enable wakeup for link changes */ + u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; + int retval = 0; + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); + e1000e_down(adapter); + e1000_free_irq(adapter); + } + e1000e_reset_interrupt_capability(adapter); + + retval = pci_save_state(pdev); + if (retval) + return retval; + + status = er32(STATUS); + if (status & E1000_STATUS_LU) + wufc &= ~E1000_WUFC_LNKC; + + if (wufc) { + e1000_setup_rctl(adapter); + e1000_set_multi(netdev); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & E1000_WUFC_MC) { + rctl = er32(RCTL); + rctl |= E1000_RCTL_MPE; + ew32(RCTL, rctl); + } + + ctrl = er32(CTRL); + /* advertise wake from D3Cold */ + #define E1000_CTRL_ADVD3WUC 0x00100000 + /* phy power management enable */ + #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 + ctrl |= E1000_CTRL_ADVD3WUC; + if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)) + ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT; + ew32(CTRL, ctrl); + + if (adapter->hw.phy.media_type == e1000_media_type_fiber || + adapter->hw.phy.media_type == + e1000_media_type_internal_serdes) { + /* keep the laser running in D3 */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA; + ew32(CTRL_EXT, ctrl_ext); + } + + if (adapter->flags & FLAG_IS_ICH) + e1000_suspend_workarounds_ich8lan(&adapter->hw); + + /* Allow time for pending master requests to run */ + e1000e_disable_pcie_master(&adapter->hw); + + if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { + /* enable wakeup by the PHY */ + retval = e1000_init_phy_wakeup(adapter, wufc); + if (retval) + return retval; + } else { + /* enable wakeup by the MAC */ + ew32(WUFC, wufc); + ew32(WUC, E1000_WUC_PME_EN); + } + } else { + ew32(WUC, 0); + ew32(WUFC, 0); + } + + *enable_wake = !!wufc; + + /* make sure adapter isn't asleep if manageability is enabled */ + if ((adapter->flags & FLAG_MNG_PT_ENABLED) || + (hw->mac.ops.check_mng_mode(hw))) + *enable_wake = true; + + if (adapter->hw.phy.type == e1000_phy_igp_3) + e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); + + /* + * Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + e1000e_release_hw_control(adapter); + + pci_disable_device(pdev); + + return 0; +} + +static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake) +{ + if (sleep && wake) { + pci_prepare_to_sleep(pdev); + return; + } + + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); +} + +static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, + bool wake) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* + * The pci-e switch on some quad port adapters will report a + * correctable error when the MAC transitions from D0 to D3. To + * prevent this we need to mask off the correctable errors on the + * downstream port of the pci-e switch. + */ + if (adapter->flags & FLAG_IS_QUAD_PORT) { + struct pci_dev *us_dev = pdev->bus->self; + int pos = pci_pcie_cap(us_dev); + u16 devctl; + + pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl); + pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, + (devctl & ~PCI_EXP_DEVCTL_CERE)); + + e1000_power_off(pdev, sleep, wake); + + pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl); + } else { + e1000_power_off(pdev, sleep, wake); + } +} + +#ifdef CONFIG_PCIEASPM +static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) +{ + pci_disable_link_state_locked(pdev, state); +} +#else +static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) +{ + int pos; + u16 reg16; + + /* + * Both device and parent should have the same ASPM setting. + * Disable ASPM in downstream component first and then upstream. + */ + pos = pci_pcie_cap(pdev); + pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); + reg16 &= ~state; + pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); + + if (!pdev->bus->self) + return; + + pos = pci_pcie_cap(pdev->bus->self); + pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, ®16); + reg16 &= ~state; + pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16); +} +#endif +static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) +{ + dev_info(&pdev->dev, "Disabling ASPM %s %s\n", + (state & PCIE_LINK_STATE_L0S) ? "L0s" : "", + (state & PCIE_LINK_STATE_L1) ? "L1" : ""); + + __e1000e_disable_aspm(pdev, state); +} + +#ifdef CONFIG_PM +static bool e1000e_pm_ready(struct e1000_adapter *adapter) +{ + return !!adapter->tx_ring->buffer_info; +} + +static int __e1000_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 aspm_disable_flag = 0; + u32 err; + + if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) + aspm_disable_flag = PCIE_LINK_STATE_L0S; + if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) + aspm_disable_flag |= PCIE_LINK_STATE_L1; + if (aspm_disable_flag) + e1000e_disable_aspm(pdev, aspm_disable_flag); + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_save_state(pdev); + + e1000e_set_interrupt_capability(adapter); + if (netif_running(netdev)) { + err = e1000_request_irq(adapter); + if (err) + return err; + } + + if (hw->mac.type == e1000_pch2lan) + e1000_resume_workarounds_pchlan(&adapter->hw); + + e1000e_power_up_phy(adapter); + + /* report the system wakeup cause from S3/S4 */ + if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { + u16 phy_data; + + e1e_rphy(&adapter->hw, BM_WUS, &phy_data); + if (phy_data) { + e_info("PHY Wakeup cause - %s\n", + phy_data & E1000_WUS_EX ? "Unicast Packet" : + phy_data & E1000_WUS_MC ? "Multicast Packet" : + phy_data & E1000_WUS_BC ? "Broadcast Packet" : + phy_data & E1000_WUS_MAG ? "Magic Packet" : + phy_data & E1000_WUS_LNKC ? "Link Status " + " Change" : "other"); + } + e1e_wphy(&adapter->hw, BM_WUS, ~0); + } else { + u32 wus = er32(WUS); + if (wus) { + e_info("MAC Wakeup cause - %s\n", + wus & E1000_WUS_EX ? "Unicast Packet" : + wus & E1000_WUS_MC ? "Multicast Packet" : + wus & E1000_WUS_BC ? "Broadcast Packet" : + wus & E1000_WUS_MAG ? "Magic Packet" : + wus & E1000_WUS_LNKC ? "Link Status Change" : + "other"); + } + ew32(WUS, ~0); + } + + e1000e_reset(adapter); + + e1000_init_manageability_pt(adapter); + + if (netif_running(netdev)) + e1000e_up(adapter); + + netif_device_attach(netdev); + + /* + * If the controller has AMT, do not set DRV_LOAD until the interface + * is up. For all other cases, let the f/w know that the h/w is now + * under the control of the driver. + */ + if (!(adapter->flags & FLAG_HAS_AMT)) + e1000e_get_hw_control(adapter); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int e1000_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int retval; + bool wake; + + retval = __e1000_shutdown(pdev, &wake, false); + if (!retval) + e1000_complete_shutdown(pdev, true, wake); + + return retval; +} + +static int e1000_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (e1000e_pm_ready(adapter)) + adapter->idle_check = true; + + return __e1000_resume(pdev); +} +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM_RUNTIME +static int e1000_runtime_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (e1000e_pm_ready(adapter)) { + bool wake; + + __e1000_shutdown(pdev, &wake, true); + } + + return 0; +} + +static int e1000_idle(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!e1000e_pm_ready(adapter)) + return 0; + + if (adapter->idle_check) { + adapter->idle_check = false; + if (!e1000e_has_link(adapter)) + pm_schedule_suspend(dev, MSEC_PER_SEC); + } + + return -EBUSY; +} + +static int e1000_runtime_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!e1000e_pm_ready(adapter)) + return 0; + + adapter->idle_check = !dev->power.runtime_auto; + return __e1000_resume(pdev); +} +#endif /* CONFIG_PM_RUNTIME */ +#endif /* CONFIG_PM */ + +static void e1000_shutdown(struct pci_dev *pdev) +{ + bool wake = false; + + __e1000_shutdown(pdev, &wake, false); + + if (system_state == SYSTEM_POWER_OFF) + e1000_complete_shutdown(pdev, false, wake); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER + +static irqreturn_t e1000_intr_msix(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (adapter->msix_entries) { + int vector, msix_irq; + + vector = 0; + msix_irq = adapter->msix_entries[vector].vector; + disable_irq(msix_irq); + e1000_intr_msix_rx(msix_irq, netdev); + enable_irq(msix_irq); + + vector++; + msix_irq = adapter->msix_entries[vector].vector; + disable_irq(msix_irq); + e1000_intr_msix_tx(msix_irq, netdev); + enable_irq(msix_irq); + + vector++; + msix_irq = adapter->msix_entries[vector].vector; + disable_irq(msix_irq); + e1000_msix_other(msix_irq, netdev); + enable_irq(msix_irq); + } + + return IRQ_HANDLED; +} + +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void e1000_netpoll(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + switch (adapter->int_mode) { + case E1000E_INT_MODE_MSIX: + e1000_intr_msix(adapter->pdev->irq, netdev); + break; + case E1000E_INT_MODE_MSI: + disable_irq(adapter->pdev->irq); + e1000_intr_msi(adapter->pdev->irq, netdev); + enable_irq(adapter->pdev->irq); + break; + default: /* E1000E_INT_MODE_LEGACY */ + disable_irq(adapter->pdev->irq); + e1000_intr(adapter->pdev->irq, netdev); + enable_irq(adapter->pdev->irq); + break; + } +} +#endif + +/** + * e1000_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + e1000e_down(adapter); + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e1000_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the e1000_resume routine. + */ +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 aspm_disable_flag = 0; + int err; + pci_ers_result_t result; + + if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) + aspm_disable_flag = PCIE_LINK_STATE_L0S; + if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) + aspm_disable_flag |= PCIE_LINK_STATE_L1; + if (aspm_disable_flag) + e1000e_disable_aspm(pdev, aspm_disable_flag); + + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + pci_set_master(pdev); + pdev->state_saved = true; + pci_restore_state(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + e1000e_reset(adapter); + ew32(WUS, ~0); + result = PCI_ERS_RESULT_RECOVERED; + } + + pci_cleanup_aer_uncorrect_error_status(pdev); + + return result; +} + +/** + * e1000_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the e1000_resume routine. + */ +static void e1000_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + e1000_init_manageability_pt(adapter); + + if (netif_running(netdev)) { + if (e1000e_up(adapter)) { + dev_err(&pdev->dev, + "can't bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); + + /* + * If the controller has AMT, do not set DRV_LOAD until the interface + * is up. For all other cases, let the f/w know that the h/w is now + * under the control of the driver. + */ + if (!(adapter->flags & FLAG_HAS_AMT)) + e1000e_get_hw_control(adapter); + +} + +static void e1000_print_device_info(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 ret_val; + u8 pba_str[E1000_PBANUM_LENGTH]; + + /* print bus type/speed/width info */ + e_info("(PCI Express:2.5GT/s:%s) %pM\n", + /* bus width */ + ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : + "Width x1"), + /* MAC address */ + netdev->dev_addr); + e_info("Intel(R) PRO/%s Network Connection\n", + (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000"); + ret_val = e1000_read_pba_string_generic(hw, pba_str, + E1000_PBANUM_LENGTH); + if (ret_val) + strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1); + e_info("MAC: %d, PHY: %d, PBA No: %s\n", + hw->mac.type, hw->phy.type, pba_str); +} + +static void e1000_eeprom_checks(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int ret_val; + u16 buf = 0; + + if (hw->mac.type != e1000_82573) + return; + + ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf); + if (!ret_val && (!(le16_to_cpu(buf) & (1 << 0)))) { + /* Deep Smart Power Down (DSPD) */ + dev_warn(&adapter->pdev->dev, + "Warning: detected DSPD enabled in EEPROM\n"); + } +} + +static const struct net_device_ops e1000e_netdev_ops = { + .ndo_open = e1000_open, + .ndo_stop = e1000_close, + .ndo_start_xmit = e1000_xmit_frame, + .ndo_get_stats64 = e1000e_get_stats64, + .ndo_set_rx_mode = e1000_set_multi, + .ndo_set_mac_address = e1000_set_mac, + .ndo_change_mtu = e1000_change_mtu, + .ndo_do_ioctl = e1000_ioctl, + .ndo_tx_timeout = e1000_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + + .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = e1000_netpoll, +#endif +}; + +/** + * e1000_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in e1000_pci_tbl + * + * Returns 0 on success, negative on failure + * + * e1000_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int __devinit e1000_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct e1000_adapter *adapter; + struct e1000_hw *hw; + const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; + resource_size_t mmio_start, mmio_len; + resource_size_t flash_start, flash_len; + + static int cards_found; + u16 aspm_disable_flag = 0; + int i, err, pci_using_dac; + u16 eeprom_data = 0; + u16 eeprom_apme_mask = E1000_EEPROM_APME; + + if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S) + aspm_disable_flag = PCIE_LINK_STATE_L0S; + if (ei->flags2 & FLAG2_DISABLE_ASPM_L1) + aspm_disable_flag |= PCIE_LINK_STATE_L1; + if (aspm_disable_flag) + e1000e_disable_aspm(pdev, aspm_disable_flag); + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + pci_using_dac = 0; + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + if (!err) { + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); + if (!err) + pci_using_dac = 1; + } else { + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + err = dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "No usable DMA " + "configuration, aborting\n"); + goto err_dma; + } + } + } + + err = pci_request_selected_regions_exclusive(pdev, + pci_select_bars(pdev, IORESOURCE_MEM), + e1000e_driver_name); + if (err) + goto err_pci_reg; + + /* AER (Advanced Error Reporting) hooks */ + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + /* PCI config space info */ + err = pci_save_state(pdev); + if (err) + goto err_alloc_etherdev; + + err = -ENOMEM; + netdev = alloc_etherdev(sizeof(struct e1000_adapter)); + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + netdev->irq = pdev->irq; + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + hw = &adapter->hw; + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->ei = ei; + adapter->pba = ei->pba; + adapter->flags = ei->flags; + adapter->flags2 = ei->flags2; + adapter->hw.adapter = adapter; + adapter->hw.mac.type = ei->mac; + adapter->max_hw_frame_size = ei->max_hw_frame_size; + adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; + + mmio_start = pci_resource_start(pdev, 0); + mmio_len = pci_resource_len(pdev, 0); + + err = -EIO; + adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); + if (!adapter->hw.hw_addr) + goto err_ioremap; + + if ((adapter->flags & FLAG_HAS_FLASH) && + (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { + flash_start = pci_resource_start(pdev, 1); + flash_len = pci_resource_len(pdev, 1); + adapter->hw.flash_address = ioremap(flash_start, flash_len); + if (!adapter->hw.flash_address) + goto err_flashmap; + } + + /* construct the net_device struct */ + netdev->netdev_ops = &e1000e_netdev_ops; + e1000e_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + netdev->mem_start = mmio_start; + netdev->mem_end = mmio_start + mmio_len; + + adapter->bd_number = cards_found++; + + e1000e_check_options(adapter); + + /* setup adapter struct */ + err = e1000_sw_init(adapter); + if (err) + goto err_sw_init; + + memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); + memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); + memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); + + err = ei->get_variants(adapter); + if (err) + goto err_hw_init; + + if ((adapter->flags & FLAG_IS_ICH) && + (adapter->flags & FLAG_READ_ONLY_NVM)) + e1000e_write_protect_nvm_ich8lan(&adapter->hw); + + hw->mac.ops.get_bus_info(&adapter->hw); + + adapter->hw.phy.autoneg_wait_to_complete = 0; + + /* Copper options */ + if (adapter->hw.phy.media_type == e1000_media_type_copper) { + adapter->hw.phy.mdix = AUTO_ALL_MODES; + adapter->hw.phy.disable_polarity_correction = 0; + adapter->hw.phy.ms_type = e1000_ms_hw_default; + } + + if (e1000_check_reset_block(&adapter->hw)) + e_info("PHY reset is blocked due to SOL/IDER session.\n"); + + netdev->features = NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_RX; + + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) + netdev->features |= NETIF_F_HW_VLAN_FILTER; + + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + + netdev->vlan_features |= NETIF_F_TSO; + netdev->vlan_features |= NETIF_F_TSO6; + netdev->vlan_features |= NETIF_F_HW_CSUM; + netdev->vlan_features |= NETIF_F_SG; + + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } + + if (e1000e_enable_mng_pass_thru(&adapter->hw)) + adapter->flags |= FLAG_MNG_PT_ENABLED; + + /* + * before reading the NVM, reset the controller to + * put the device in a known good starting state + */ + adapter->hw.mac.ops.reset_hw(&adapter->hw); + + /* + * systems with ASPM and others may see the checksum fail on the first + * attempt. Let's give it a few tries + */ + for (i = 0;; i++) { + if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) + break; + if (i == 2) { + e_err("The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + } + + e1000_eeprom_checks(adapter); + + /* copy the MAC address */ + if (e1000e_read_mac_addr(&adapter->hw)) + e_err("NVM Read Error while reading MAC address\n"); + + memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); + memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->perm_addr)) { + e_err("Invalid MAC Address: %pM\n", netdev->perm_addr); + err = -EIO; + goto err_eeprom; + } + + init_timer(&adapter->watchdog_timer); + adapter->watchdog_timer.function = e1000_watchdog; + adapter->watchdog_timer.data = (unsigned long) adapter; + + init_timer(&adapter->phy_info_timer); + adapter->phy_info_timer.function = e1000_update_phy_info; + adapter->phy_info_timer.data = (unsigned long) adapter; + + INIT_WORK(&adapter->reset_task, e1000_reset_task); + INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); + INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); + INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); + INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); + + /* Initialize link parameters. User can change them with ethtool */ + adapter->hw.mac.autoneg = 1; + adapter->fc_autoneg = 1; + adapter->hw.fc.requested_mode = e1000_fc_default; + adapter->hw.fc.current_mode = e1000_fc_default; + adapter->hw.phy.autoneg_advertised = 0x2f; + + /* ring size defaults */ + adapter->rx_ring->count = 256; + adapter->tx_ring->count = 256; + + /* + * Initial Wake on LAN setting - If APM wake is enabled in + * the EEPROM, enable the ACPI Magic Packet filter + */ + if (adapter->flags & FLAG_APME_IN_WUC) { + /* APME bit in EEPROM is mapped to WUC.APME */ + eeprom_data = er32(WUC); + eeprom_apme_mask = E1000_WUC_APME; + if ((hw->mac.type > e1000_ich10lan) && + (eeprom_data & E1000_WUC_PHY_WAKE)) + adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; + } else if (adapter->flags & FLAG_APME_IN_CTRL3) { + if (adapter->flags & FLAG_APME_CHECK_PORT_B && + (adapter->hw.bus.func == 1)) + e1000_read_nvm(&adapter->hw, + NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); + else + e1000_read_nvm(&adapter->hw, + NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); + } + + /* fetch WoL from EEPROM */ + if (eeprom_data & eeprom_apme_mask) + adapter->eeprom_wol |= E1000_WUFC_MAG; + + /* + * now that we have the eeprom settings, apply the special cases + * where the eeprom may be wrong or the board simply won't support + * wake on lan on a particular port + */ + if (!(adapter->flags & FLAG_HAS_WOL)) + adapter->eeprom_wol = 0; + + /* initialize the wol settings based on the eeprom settings */ + adapter->wol = adapter->eeprom_wol; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + /* save off EEPROM version number */ + e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); + + /* reset the hardware with the new settings */ + e1000e_reset(adapter); + + /* + * If the controller has AMT, do not set DRV_LOAD until the interface + * is up. For all other cases, let the f/w know that the h/w is now + * under the control of the driver. + */ + if (!(adapter->flags & FLAG_HAS_AMT)) + e1000e_get_hw_control(adapter); + + strncpy(netdev->name, "eth%d", sizeof(netdev->name) - 1); + err = register_netdev(netdev); + if (err) + goto err_register; + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + e1000_print_device_info(adapter); + + if (pci_dev_run_wake(pdev)) + pm_runtime_put_noidle(&pdev->dev); + + return 0; + +err_register: + if (!(adapter->flags & FLAG_HAS_AMT)) + e1000e_release_hw_control(adapter); +err_eeprom: + if (!e1000_check_reset_block(&adapter->hw)) + e1000_phy_hw_reset(&adapter->hw); +err_hw_init: + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); +err_sw_init: + if (adapter->hw.flash_address) + iounmap(adapter->hw.flash_address); + e1000e_reset_interrupt_capability(adapter); +err_flashmap: + iounmap(adapter->hw.hw_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * e1000_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * e1000_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void __devexit e1000_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + bool down = test_bit(__E1000_DOWN, &adapter->state); + + /* + * The timers may be rescheduled, so explicitly disable them + * from being rescheduled. + */ + if (!down) + set_bit(__E1000_DOWN, &adapter->state); + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->watchdog_task); + cancel_work_sync(&adapter->downshift_task); + cancel_work_sync(&adapter->update_phy_task); + cancel_work_sync(&adapter->print_hang_task); + + if (!(netdev->flags & IFF_UP)) + e1000_power_down_phy(adapter); + + /* Don't lie to e1000_close() down the road. */ + if (!down) + clear_bit(__E1000_DOWN, &adapter->state); + unregister_netdev(netdev); + + if (pci_dev_run_wake(pdev)) + pm_runtime_get_noresume(&pdev->dev); + + /* + * Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + e1000e_release_hw_control(adapter); + + e1000e_reset_interrupt_capability(adapter); + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); + + iounmap(adapter->hw.hw_addr); + if (adapter->hw.flash_address) + iounmap(adapter->hw.flash_address); + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); + + free_netdev(netdev); + + /* AER disable */ + pci_disable_pcie_error_reporting(pdev); + + pci_disable_device(pdev); +} + +/* PCI Error Recovery (ERS) */ +static struct pci_error_handlers e1000_err_handler = { + .error_detected = e1000_io_error_detected, + .slot_reset = e1000_io_slot_reset, + .resume = e1000_io_resume, +}; + +static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT), + board_80003es2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT), + board_80003es2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT), + board_80003es2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT), + board_80003es2lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan }, + + { } /* terminate list */ +}; +MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); + +#ifdef CONFIG_PM +static const struct dev_pm_ops e1000_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume) + SET_RUNTIME_PM_OPS(e1000_runtime_suspend, + e1000_runtime_resume, e1000_idle) +}; +#endif + +/* PCI Device API Driver */ +static struct pci_driver e1000_driver = { + .name = e1000e_driver_name, + .id_table = e1000_pci_tbl, + .probe = e1000_probe, + .remove = __devexit_p(e1000_remove), +#ifdef CONFIG_PM + .driver.pm = &e1000_pm_ops, +#endif + .shutdown = e1000_shutdown, + .err_handler = &e1000_err_handler +}; + +/** + * e1000_init_module - Driver Registration Routine + * + * e1000_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init e1000_init_module(void) +{ + int ret; + pr_info("Intel(R) PRO/1000 Network Driver - %s\n", + e1000e_driver_version); + pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n"); + ret = pci_register_driver(&e1000_driver); + + return ret; +} +module_init(e1000_init_module); + +/** + * e1000_exit_module - Driver Exit Cleanup Routine + * + * e1000_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit e1000_exit_module(void) +{ + pci_unregister_driver(&e1000_driver); +} +module_exit(e1000_exit_module); + + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +/* e1000_main.c */ diff --cc drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index faa83cea7331,000000000000..b73194c1c44a mode 100644,000000..100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@@ -1,7936 -1,0 +1,7938 @@@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ixgbe.h" +#include "ixgbe_common.h" +#include "ixgbe_dcb_82599.h" +#include "ixgbe_sriov.h" + +char ixgbe_driver_name[] = "ixgbe"; +static const char ixgbe_driver_string[] = + "Intel(R) 10 Gigabit PCI Express Network Driver"; +#define MAJ 3 +#define MIN 4 +#define BUILD 8 +#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ + __stringify(BUILD) "-k" +const char ixgbe_driver_version[] = DRV_VERSION; +static const char ixgbe_copyright[] = + "Copyright (c) 1999-2011 Intel Corporation."; + +static const struct ixgbe_info *ixgbe_info_tbl[] = { + [board_82598] = &ixgbe_82598_info, + [board_82599] = &ixgbe_82599_info, + [board_X540] = &ixgbe_X540_info, +}; + +/* ixgbe_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = { + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), + board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), + board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), + board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), + board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), + board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), + board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), + board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), + board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), + board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), + board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), + board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), + board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), + board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), + board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), + board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), + board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), + board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), + board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), + board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), + board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), + board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), + board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), + board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), + board_X540 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), + board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), + board_82599 }, + + /* required last entry */ + {0, } +}; +MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); + +#ifdef CONFIG_IXGBE_DCA +static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, + void *p); +static struct notifier_block dca_notifier = { + .notifier_call = ixgbe_notify_dca, + .next = NULL, + .priority = 0 +}; +#endif + +#ifdef CONFIG_PCI_IOV +static unsigned int max_vfs; +module_param(max_vfs, uint, 0); +MODULE_PARM_DESC(max_vfs, + "Maximum number of virtual functions to allocate per physical function"); +#endif /* CONFIG_PCI_IOV */ + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +#define DEFAULT_DEBUG_LEVEL_SHIFT 3 + +static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 gcr; + u32 gpie; + u32 vmdctl; + +#ifdef CONFIG_PCI_IOV + /* disable iov and allow time for transactions to clear */ + pci_disable_sriov(adapter->pdev); +#endif + + /* turn off device IOV mode */ + gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); + gcr &= ~(IXGBE_GCR_EXT_SRIOV); + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr); + gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); + gpie &= ~IXGBE_GPIE_VTMODE_MASK; + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); + + /* set default pool back to 0 */ + vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); + vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); + IXGBE_WRITE_FLUSH(hw); + + /* take a breather then clean up driver data */ + msleep(100); + + kfree(adapter->vfinfo); + adapter->vfinfo = NULL; + + adapter->num_vfs = 0; + adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; +} + +static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter) +{ + if (!test_bit(__IXGBE_DOWN, &adapter->state) && + !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state)) + schedule_work(&adapter->service_task); +} + +static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter) +{ + BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); + + /* flush memory to make sure state is correct before next watchog */ + smp_mb__before_clear_bit(); + clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); +} + +struct ixgbe_reg_info { + u32 ofs; + char *name; +}; + +static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = { + + /* General Registers */ + {IXGBE_CTRL, "CTRL"}, + {IXGBE_STATUS, "STATUS"}, + {IXGBE_CTRL_EXT, "CTRL_EXT"}, + + /* Interrupt Registers */ + {IXGBE_EICR, "EICR"}, + + /* RX Registers */ + {IXGBE_SRRCTL(0), "SRRCTL"}, + {IXGBE_DCA_RXCTRL(0), "DRXCTL"}, + {IXGBE_RDLEN(0), "RDLEN"}, + {IXGBE_RDH(0), "RDH"}, + {IXGBE_RDT(0), "RDT"}, + {IXGBE_RXDCTL(0), "RXDCTL"}, + {IXGBE_RDBAL(0), "RDBAL"}, + {IXGBE_RDBAH(0), "RDBAH"}, + + /* TX Registers */ + {IXGBE_TDBAL(0), "TDBAL"}, + {IXGBE_TDBAH(0), "TDBAH"}, + {IXGBE_TDLEN(0), "TDLEN"}, + {IXGBE_TDH(0), "TDH"}, + {IXGBE_TDT(0), "TDT"}, + {IXGBE_TXDCTL(0), "TXDCTL"}, + + /* List Terminator */ + {} +}; + + +/* + * ixgbe_regdump - register printout routine + */ +static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo) +{ + int i = 0, j = 0; + char rname[16]; + u32 regs[64]; + + switch (reginfo->ofs) { + case IXGBE_SRRCTL(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); + break; + case IXGBE_DCA_RXCTRL(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); + break; + case IXGBE_RDLEN(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); + break; + case IXGBE_RDH(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); + break; + case IXGBE_RDT(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); + break; + case IXGBE_RXDCTL(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + break; + case IXGBE_RDBAL(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); + break; + case IXGBE_RDBAH(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); + break; + case IXGBE_TDBAL(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); + break; + case IXGBE_TDBAH(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); + break; + case IXGBE_TDLEN(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); + break; + case IXGBE_TDH(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); + break; + case IXGBE_TDT(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); + break; + case IXGBE_TXDCTL(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); + break; + default: + pr_info("%-15s %08x\n", reginfo->name, + IXGBE_READ_REG(hw, reginfo->ofs)); + return; + } + + for (i = 0; i < 8; i++) { + snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7); + pr_err("%-15s", rname); + for (j = 0; j < 8; j++) + pr_cont(" %08x", regs[i*8+j]); + pr_cont("\n"); + } + +} + +/* + * ixgbe_dump - Print registers, tx-rings and rx-rings + */ +static void ixgbe_dump(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_reg_info *reginfo; + int n = 0; + struct ixgbe_ring *tx_ring; + struct ixgbe_tx_buffer *tx_buffer_info; + union ixgbe_adv_tx_desc *tx_desc; + struct my_u0 { u64 a; u64 b; } *u0; + struct ixgbe_ring *rx_ring; + union ixgbe_adv_rx_desc *rx_desc; + struct ixgbe_rx_buffer *rx_buffer_info; + u32 staterr; + int i = 0; + + if (!netif_msg_hw(adapter)) + return; + + /* Print netdevice Info */ + if (netdev) { + dev_info(&adapter->pdev->dev, "Net device Info\n"); + pr_info("Device Name state " + "trans_start last_rx\n"); + pr_info("%-15s %016lX %016lX %016lX\n", + netdev->name, + netdev->state, + netdev->trans_start, + netdev->last_rx); + } + + /* Print Registers */ + dev_info(&adapter->pdev->dev, "Register Dump\n"); + pr_info(" Register Name Value\n"); + for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl; + reginfo->name; reginfo++) { + ixgbe_regdump(hw, reginfo); + } + + /* Print TX Ring Summary */ + if (!netdev || !netif_running(netdev)) + goto exit; + + dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); + pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + tx_buffer_info = + &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; + pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n", + n, tx_ring->next_to_use, tx_ring->next_to_clean, + (u64)tx_buffer_info->dma, + tx_buffer_info->length, + tx_buffer_info->next_to_watch, + (u64)tx_buffer_info->time_stamp); + } + + /* Print TX Rings */ + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); + + /* Transmit Descriptor Formats + * + * Advanced Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +--------------------------------------------------------------+ + * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN | + * +--------------------------------------------------------------+ + * 63 46 45 40 39 36 35 32 31 24 23 20 19 0 + */ + + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + pr_info("------------------------------------\n"); + pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("T [desc] [address 63:0 ] " + "[PlPOIdStDDt Ln] [bi->dma ] " + "leng ntw timestamp bi->skb\n"); + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + u0 = (struct my_u0 *)tx_desc; + pr_info("T [0x%03X] %016llX %016llX %016llX" + " %04X %3X %016llX %p", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)tx_buffer_info->dma, + tx_buffer_info->length, + tx_buffer_info->next_to_watch, + (u64)tx_buffer_info->time_stamp, + tx_buffer_info->skb); + if (i == tx_ring->next_to_use && + i == tx_ring->next_to_clean) + pr_cont(" NTC/U\n"); + else if (i == tx_ring->next_to_use) + pr_cont(" NTU\n"); + else if (i == tx_ring->next_to_clean) + pr_cont(" NTC\n"); + else + pr_cont("\n"); + + if (netif_msg_pktdata(adapter) && + tx_buffer_info->dma != 0) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, 1, + phys_to_virt(tx_buffer_info->dma), + tx_buffer_info->length, true); + } + } + + /* Print RX Rings Summary */ +rx_ring_summary: + dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); + pr_info("Queue [NTU] [NTC]\n"); + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + pr_info("%5d %5X %5X\n", + n, rx_ring->next_to_use, rx_ring->next_to_clean); + } + + /* Print RX Rings */ + if (!netif_msg_rx_status(adapter)) + goto exit; + + dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); + + /* Advanced Receive Descriptor (Read) Format + * 63 1 0 + * +-----------------------------------------------------+ + * 0 | Packet Buffer Address [63:1] |A0/NSE| + * +----------------------------------------------+------+ + * 8 | Header Buffer Address [63:1] | DD | + * +-----------------------------------------------------+ + * + * + * Advanced Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 30 21 20 16 15 4 3 0 + * +------------------------------------------------------+ + * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | + * | Checksum Ident | | | | Type | Type | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length | Extended Error | Extended Status | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + pr_info("------------------------------------\n"); + pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("R [desc] [ PktBuf A0] " + "[ HeadBuf DD] [bi->dma ] [bi->skb] " + "<-- Adv Rx Read format\n"); + pr_info("RWB[desc] [PcsmIpSHl PtRs] " + "[vl er S cks ln] ---------------- [bi->skb] " + "<-- Adv Rx Write-Back format\n"); + + for (i = 0; i < rx_ring->count; i++) { + rx_buffer_info = &rx_ring->rx_buffer_info[i]; + rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); + u0 = (struct my_u0 *)rx_desc; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + if (staterr & IXGBE_RXD_STAT_DD) { + /* Descriptor Done */ + pr_info("RWB[0x%03X] %016llX " + "%016llX ---------------- %p", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + rx_buffer_info->skb); + } else { + pr_info("R [0x%03X] %016llX " + "%016llX %016llX %p", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)rx_buffer_info->dma, + rx_buffer_info->skb); + + if (netif_msg_pktdata(adapter)) { + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, 1, + phys_to_virt(rx_buffer_info->dma), + rx_ring->rx_buf_len, true); + + if (rx_ring->rx_buf_len + < IXGBE_RXBUFFER_2048) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, 1, + phys_to_virt( + rx_buffer_info->page_dma + + rx_buffer_info->page_offset + ), + PAGE_SIZE/2, true); + } + } + + if (i == rx_ring->next_to_use) + pr_cont(" NTU\n"); + else if (i == rx_ring->next_to_clean) + pr_cont(" NTC\n"); + else + pr_cont("\n"); + + } + } + +exit: + return; +} + +static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) +{ + u32 ctrl_ext; + + /* Let firmware take over control of h/w */ + ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, + ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); +} + +static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) +{ + u32 ctrl_ext; + + /* Let firmware know the driver has taken over */ + ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, + ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); +} + +/* + * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors + * @adapter: pointer to adapter struct + * @direction: 0 for Rx, 1 for Tx, -1 for other causes + * @queue: queue to map the corresponding interrupt to + * @msix_vector: the vector to map to the corresponding queue + * + */ +static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, + u8 queue, u8 msix_vector) +{ + u32 ivar, index; + struct ixgbe_hw *hw = &adapter->hw; + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + if (direction == -1) + direction = 0; + index = (((direction * 64) + queue) >> 2) & 0x1F; + ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); + ivar &= ~(0xFF << (8 * (queue & 0x3))); + ivar |= (msix_vector << (8 * (queue & 0x3))); + IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + if (direction == -1) { + /* other causes */ + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + index = ((queue & 1) * 8); + ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC); + ivar &= ~(0xFF << index); + ivar |= (msix_vector << index); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar); + break; + } else { + /* tx or rx causes */ + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + index = ((16 * (queue & 1)) + (8 * direction)); + ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); + ivar &= ~(0xFF << index); + ivar |= (msix_vector << index); + IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar); + break; + } + default: + break; + } +} + +static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, + u64 qmask) +{ + u32 mask; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + mask = (IXGBE_EIMS_RTX_QUEUE & qmask); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + mask = (qmask & 0xFFFFFFFF); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); + mask = (qmask >> 32); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); + break; + default: + break; + } +} + +void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring, + struct ixgbe_tx_buffer *tx_buffer_info) +{ + if (tx_buffer_info->dma) { + if (tx_buffer_info->mapped_as_page) + dma_unmap_page(tx_ring->dev, + tx_buffer_info->dma, + tx_buffer_info->length, + DMA_TO_DEVICE); + else + dma_unmap_single(tx_ring->dev, + tx_buffer_info->dma, + tx_buffer_info->length, + DMA_TO_DEVICE); + tx_buffer_info->dma = 0; + } + if (tx_buffer_info->skb) { + dev_kfree_skb_any(tx_buffer_info->skb); + tx_buffer_info->skb = NULL; + } + tx_buffer_info->time_stamp = 0; + /* tx_buffer_info must be completely set up in the transmit path */ +} + +static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_hw_stats *hwstats = &adapter->stats; + u32 data = 0; + u32 xoff[8] = {0}; + int i; + + if ((hw->fc.current_mode == ixgbe_fc_full) || + (hw->fc.current_mode == ixgbe_fc_rx_pause)) { + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); + break; + default: + data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); + } + hwstats->lxoffrxc += data; + + /* refill credits (no tx hang) if we received xoff */ + if (!data) + return; + + for (i = 0; i < adapter->num_tx_queues; i++) + clear_bit(__IXGBE_HANG_CHECK_ARMED, + &adapter->tx_ring[i]->state); + return; + } else if (!(adapter->dcb_cfg.pfc_mode_enable)) + return; + + /* update stats for each tc, only valid with PFC enabled */ + for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); + break; + default: + xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); + } + hwstats->pxoffrxc[i] += xoff[i]; + } + + /* disarm tx queues that have received xoff frames */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; + u8 tc = tx_ring->dcb_tc; + + if (xoff[tc]) + clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); + } +} + +static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring) +{ + return ring->tx_stats.completed; +} + +static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring) +{ + struct ixgbe_adapter *adapter = netdev_priv(ring->netdev); + struct ixgbe_hw *hw = &adapter->hw; + + u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx)); + u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx)); + + if (head != tail) + return (head < tail) ? + tail - head : (tail + ring->count - head); + + return 0; +} + +static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) +{ + u32 tx_done = ixgbe_get_tx_completed(tx_ring); + u32 tx_done_old = tx_ring->tx_stats.tx_done_old; + u32 tx_pending = ixgbe_get_tx_pending(tx_ring); + bool ret = false; + + clear_check_for_tx_hang(tx_ring); + + /* + * Check for a hung queue, but be thorough. This verifies + * that a transmit has been completed since the previous + * check AND there is at least one packet pending. The + * ARMED bit is set to indicate a potential hang. The + * bit is cleared if a pause frame is received to remove + * false hang detection due to PFC or 802.3x frames. By + * requiring this to fail twice we avoid races with + * pfc clearing the ARMED bit and conditions where we + * run the check_tx_hang logic with a transmit completion + * pending but without time to complete it yet. + */ + if ((tx_done_old == tx_done) && tx_pending) { + /* make sure it is true for two checks in a row */ + ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED, + &tx_ring->state); + } else { + /* update completed stats and continue */ + tx_ring->tx_stats.tx_done_old = tx_done; + /* reset the countdown */ + clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); + } + + return ret; +} + +/** + * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout + * @adapter: driver private struct + **/ +static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter) +{ + + /* Do the reset outside of interrupt context */ + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; + ixgbe_service_event_schedule(adapter); + } +} + +/** + * ixgbe_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: structure containing interrupt and ring information + * @tx_ring: tx ring to clean + **/ +static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, + struct ixgbe_ring *tx_ring) +{ + struct ixgbe_adapter *adapter = q_vector->adapter; + union ixgbe_adv_tx_desc *tx_desc, *eop_desc; + struct ixgbe_tx_buffer *tx_buffer_info; + unsigned int total_bytes = 0, total_packets = 0; + u16 i, eop, count = 0; + + i = tx_ring->next_to_clean; + eop = tx_ring->tx_buffer_info[i].next_to_watch; + eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); + + while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && + (count < q_vector->tx.work_limit)) { + bool cleaned = false; + rmb(); /* read buffer_info after eop_desc */ + for ( ; !cleaned; count++) { + tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + + tx_desc->wb.status = 0; + cleaned = (i == eop); + + i++; + if (i == tx_ring->count) + i = 0; + + if (cleaned && tx_buffer_info->skb) { + total_bytes += tx_buffer_info->bytecount; + total_packets += tx_buffer_info->gso_segs; + } + + ixgbe_unmap_and_free_tx_resource(tx_ring, + tx_buffer_info); + } + + tx_ring->tx_stats.completed++; + eop = tx_ring->tx_buffer_info[i].next_to_watch; + eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); + } + + tx_ring->next_to_clean = i; + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_begin(&tx_ring->syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + + if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { + /* schedule immediate reset if we believe we hung */ + struct ixgbe_hw *hw = &adapter->hw; + tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); + e_err(drv, "Detected Tx Unit Hang\n" + " Tx Queue <%d>\n" + " TDH, TDT <%x>, <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "tx_buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " jiffies <%lx>\n", + tx_ring->queue_index, + IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), + IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), + tx_ring->next_to_use, eop, + tx_ring->tx_buffer_info[eop].time_stamp, jiffies); + + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + e_info(probe, + "tx hang %d detected on queue %d, resetting adapter\n", + adapter->tx_timeout_count + 1, tx_ring->queue_index); + + /* schedule immediate reset if we believe we hung */ + ixgbe_tx_timeout_reset(adapter); + + /* the adapter is about to reset, no point in enabling stuff */ + return true; + } + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(count && netif_carrier_ok(tx_ring->netdev) && + (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && + !test_bit(__IXGBE_DOWN, &adapter->state)) { + netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + } + } + + return count < q_vector->tx.work_limit; +} + +#ifdef CONFIG_IXGBE_DCA +static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rx_ring, + int cpu) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 rxctrl; + u8 reg_idx = rx_ring->reg_idx; + + rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx)); + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK; + rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599; + rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << + IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599); + break; + default: + break; + } + rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; + rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; + rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN); + IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl); +} + +static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring, + int cpu) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 txctrl; + u8 reg_idx = tx_ring->reg_idx; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx)); + txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; + txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); + txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx)); + txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599; + txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << + IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599); + txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl); + break; + default: + break; + } +} + +static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector) +{ + struct ixgbe_adapter *adapter = q_vector->adapter; + int cpu = get_cpu(); + long r_idx; + int i; + + if (q_vector->cpu == cpu) + goto out_no_update; + + r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues); + for (i = 0; i < q_vector->tx.count; i++) { + ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu); + r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues, + r_idx + 1); + } + + r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); + for (i = 0; i < q_vector->rx.count; i++) { + ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu); + r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues, + r_idx + 1); + } + + q_vector->cpu = cpu; +out_no_update: + put_cpu(); +} + +static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) +{ + int num_q_vectors; + int i; + + if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) + return; + + /* always use CB2 mode, difference is masked in the CB driver */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); + + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) + num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + else + num_q_vectors = 1; + + for (i = 0; i < num_q_vectors; i++) { + adapter->q_vector[i]->cpu = -1; + ixgbe_update_dca(adapter->q_vector[i]); + } +} + +static int __ixgbe_notify_dca(struct device *dev, void *data) +{ + struct ixgbe_adapter *adapter = dev_get_drvdata(dev); + unsigned long event = *(unsigned long *)data; + + if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE)) + return 0; + + switch (event) { + case DCA_PROVIDER_ADD: + /* if we're already enabled, don't do it again */ + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + break; + if (dca_add_requester(dev) == 0) { + adapter->flags |= IXGBE_FLAG_DCA_ENABLED; + ixgbe_setup_dca(adapter); + break; + } + /* Fall Through since DCA is disabled. */ + case DCA_PROVIDER_REMOVE: + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { + dca_remove_requester(dev); + adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); + } + break; + } + + return 0; +} +#endif /* CONFIG_IXGBE_DCA */ + +static inline void ixgbe_rx_hash(union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); +} + +/** + * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type + * @adapter: address of board private structure + * @rx_desc: advanced rx descriptor + * + * Returns : true if it is FCoE pkt + */ +static inline bool ixgbe_rx_is_fcoe(struct ixgbe_adapter *adapter, + union ixgbe_adv_rx_desc *rx_desc) +{ + __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; + + return (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && + ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) == + (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE << + IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT))); +} + +/** + * ixgbe_receive_skb - Send a completed packet up the stack + * @adapter: board private structure + * @skb: packet to send up + * @status: hardware indication of status of receive + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * @rx_desc: rx descriptor + **/ +static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb, u8 status, + struct ixgbe_ring *ring, + union ixgbe_adv_rx_desc *rx_desc) +{ + struct ixgbe_adapter *adapter = q_vector->adapter; + struct napi_struct *napi = &q_vector->napi; + bool is_vlan = (status & IXGBE_RXD_STAT_VP); + u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); + + if (is_vlan && (tag & VLAN_VID_MASK)) + __vlan_hwaccel_put_tag(skb, tag); + + if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) + napi_gro_receive(napi, skb); + else + netif_rx(skb); +} + +/** + * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum + * @adapter: address of board private structure + * @status_err: hardware indication of status of receive + * @skb: skb currently being received and modified + * @status_err: status error value of last descriptor in packet + **/ +static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb, + u32 status_err) +{ + skb->ip_summed = CHECKSUM_NONE; + + /* Rx csum disabled */ + if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) + return; + + /* if IP and error */ + if ((status_err & IXGBE_RXD_STAT_IPCS) && + (status_err & IXGBE_RXDADV_ERR_IPE)) { + adapter->hw_csum_rx_error++; + return; + } + + if (!(status_err & IXGBE_RXD_STAT_L4CS)) + return; + + if (status_err & IXGBE_RXDADV_ERR_TCPE) { + u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; + + /* + * 82599 errata, UDP frames with a 0 checksum can be marked as + * checksum errors. + */ + if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) && + (adapter->hw.mac.type == ixgbe_mac_82599EB)) + return; + + adapter->hw_csum_rx_error++; + return; + } + + /* It must be a TCP or UDP packet with a valid checksum */ + skb->ip_summed = CHECKSUM_UNNECESSARY; +} + +static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val) +{ + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(val, rx_ring->tail); +} + +/** + * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) +{ + union ixgbe_adv_rx_desc *rx_desc; + struct ixgbe_rx_buffer *bi; + struct sk_buff *skb; + u16 i = rx_ring->next_to_use; + + /* do nothing if no valid netdev defined */ + if (!rx_ring->netdev) + return; + + while (cleaned_count--) { + rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + skb = bi->skb; + + if (!skb) { + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_buf_len); + if (!skb) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + goto no_buffers; + } + /* initialize queue mapping */ + skb_record_rx_queue(skb, rx_ring->queue_index); + bi->skb = skb; + } + + if (!bi->dma) { + bi->dma = dma_map_single(rx_ring->dev, + skb->data, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(rx_ring->dev, bi->dma)) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + bi->dma = 0; + goto no_buffers; + } + } + + if (ring_is_ps_enabled(rx_ring)) { + if (!bi->page) { + bi->page = netdev_alloc_page(rx_ring->netdev); + if (!bi->page) { + rx_ring->rx_stats.alloc_rx_page_failed++; + goto no_buffers; + } + } + + if (!bi->page_dma) { + /* use a half page if we're re-using */ + bi->page_offset ^= PAGE_SIZE / 2; + bi->page_dma = dma_map_page(rx_ring->dev, + bi->page, + bi->page_offset, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + if (dma_mapping_error(rx_ring->dev, + bi->page_dma)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + bi->page_dma = 0; + goto no_buffers; + } + } + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); + rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); + } else { + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); + rx_desc->read.hdr_addr = 0; + } + + i++; + if (i == rx_ring->count) + i = 0; + } + +no_buffers: + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; + ixgbe_release_rx_desc(rx_ring, i); + } +} + +static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc) +{ + /* HW will not DMA in data larger than the given buffer, even if it + * parses the (NFS, of course) header to be larger. In that case, it + * fills the header buffer and spills the rest into the page. + */ + u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info); + u16 hlen = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> + IXGBE_RXDADV_HDRBUFLEN_SHIFT; + if (hlen > IXGBE_RX_HDR_SIZE) + hlen = IXGBE_RX_HDR_SIZE; + return hlen; +} + +/** + * ixgbe_transform_rsc_queue - change rsc queue into a full packet + * @skb: pointer to the last skb in the rsc queue + * + * This function changes a queue full of hw rsc buffers into a completed + * packet. It uses the ->prev pointers to find the first packet and then + * turns it into the frag list owner. + **/ +static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb) +{ + unsigned int frag_list_size = 0; + unsigned int skb_cnt = 1; + + while (skb->prev) { + struct sk_buff *prev = skb->prev; + frag_list_size += skb->len; + skb->prev = NULL; + skb = prev; + skb_cnt++; + } + + skb_shinfo(skb)->frag_list = skb->next; + skb->next = NULL; + skb->len += frag_list_size; + skb->data_len += frag_list_size; + skb->truesize += frag_list_size; + IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt; + + return skb; +} + +static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc) +{ + return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) & + IXGBE_RXDADV_RSCCNT_MASK); +} + +static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, + struct ixgbe_ring *rx_ring, + int *work_done, int work_to_do) +{ + struct ixgbe_adapter *adapter = q_vector->adapter; + union ixgbe_adv_rx_desc *rx_desc, *next_rxd; + struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; + struct sk_buff *skb; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + const int current_node = numa_node_id(); +#ifdef IXGBE_FCOE + int ddp_bytes = 0; +#endif /* IXGBE_FCOE */ + u32 staterr; + u16 i; + u16 cleaned_count = 0; + bool pkt_is_rsc = false; + + i = rx_ring->next_to_clean; + rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + + while (staterr & IXGBE_RXD_STAT_DD) { + u32 upper_len = 0; + + rmb(); /* read descriptor and rx_buffer_info after status DD */ + + rx_buffer_info = &rx_ring->rx_buffer_info[i]; + + skb = rx_buffer_info->skb; + rx_buffer_info->skb = NULL; + prefetch(skb->data); + + if (ring_is_rsc_enabled(rx_ring)) + pkt_is_rsc = ixgbe_get_rsc_state(rx_desc); + + /* if this is a skb from previous receive DMA will be 0 */ + if (rx_buffer_info->dma) { + u16 hlen; + if (pkt_is_rsc && + !(staterr & IXGBE_RXD_STAT_EOP) && + !skb->prev) { + /* + * When HWRSC is enabled, delay unmapping + * of the first packet. It carries the + * header information, HW may still + * access the header after the writeback. + * Only unmap it when EOP is reached + */ + IXGBE_RSC_CB(skb)->delay_unmap = true; + IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; + } else { + dma_unmap_single(rx_ring->dev, + rx_buffer_info->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + } + rx_buffer_info->dma = 0; + + if (ring_is_ps_enabled(rx_ring)) { + hlen = ixgbe_get_hlen(rx_desc); + upper_len = le16_to_cpu(rx_desc->wb.upper.length); + } else { + hlen = le16_to_cpu(rx_desc->wb.upper.length); + } + + skb_put(skb, hlen); + } else { + /* assume packet split since header is unmapped */ + upper_len = le16_to_cpu(rx_desc->wb.upper.length); + } + + if (upper_len) { + dma_unmap_page(rx_ring->dev, + rx_buffer_info->page_dma, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + rx_buffer_info->page_dma = 0; + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, + rx_buffer_info->page, + rx_buffer_info->page_offset, + upper_len); + + if ((page_count(rx_buffer_info->page) == 1) && + (page_to_nid(rx_buffer_info->page) == current_node)) + get_page(rx_buffer_info->page); + else + rx_buffer_info->page = NULL; + + skb->len += upper_len; + skb->data_len += upper_len; + skb->truesize += upper_len; + } + + i++; + if (i == rx_ring->count) + i = 0; + + next_rxd = IXGBE_RX_DESC_ADV(rx_ring, i); + prefetch(next_rxd); + cleaned_count++; + + if (pkt_is_rsc) { + u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >> + IXGBE_RXDADV_NEXTP_SHIFT; + next_buffer = &rx_ring->rx_buffer_info[nextp]; + } else { + next_buffer = &rx_ring->rx_buffer_info[i]; + } + + if (!(staterr & IXGBE_RXD_STAT_EOP)) { + if (ring_is_ps_enabled(rx_ring)) { + rx_buffer_info->skb = next_buffer->skb; + rx_buffer_info->dma = next_buffer->dma; + next_buffer->skb = skb; + next_buffer->dma = 0; + } else { + skb->next = next_buffer->skb; + skb->next->prev = skb; + } + rx_ring->rx_stats.non_eop_descs++; + goto next_desc; + } + + if (skb->prev) { + skb = ixgbe_transform_rsc_queue(skb); + /* if we got here without RSC the packet is invalid */ + if (!pkt_is_rsc) { + __pskb_trim(skb, 0); + rx_buffer_info->skb = skb; + goto next_desc; + } + } + + if (ring_is_rsc_enabled(rx_ring)) { + if (IXGBE_RSC_CB(skb)->delay_unmap) { + dma_unmap_single(rx_ring->dev, + IXGBE_RSC_CB(skb)->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + IXGBE_RSC_CB(skb)->dma = 0; + IXGBE_RSC_CB(skb)->delay_unmap = false; + } + } + if (pkt_is_rsc) { + if (ring_is_ps_enabled(rx_ring)) + rx_ring->rx_stats.rsc_count += + skb_shinfo(skb)->nr_frags; + else + rx_ring->rx_stats.rsc_count += + IXGBE_RSC_CB(skb)->skb_cnt; + rx_ring->rx_stats.rsc_flush++; + } + + /* ERR_MASK will only have valid bits if EOP set */ + if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { + dev_kfree_skb_any(skb); + goto next_desc; + } + + ixgbe_rx_checksum(adapter, rx_desc, skb, staterr); + if (adapter->netdev->features & NETIF_F_RXHASH) + ixgbe_rx_hash(rx_desc, skb); + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + total_rx_packets++; + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +#ifdef IXGBE_FCOE + /* if ddp, not passing to ULD unless for FCP_RSP or error */ + if (ixgbe_rx_is_fcoe(adapter, rx_desc)) { + ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb, + staterr); - if (!ddp_bytes) ++ if (!ddp_bytes) { ++ dev_kfree_skb_any(skb); + goto next_desc; ++ } + } +#endif /* IXGBE_FCOE */ + ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); + +next_desc: + rx_desc->wb.upper.status_error = 0; + + (*work_done)++; + if (*work_done >= work_to_do) + break; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { + ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + } + + rx_ring->next_to_clean = i; + cleaned_count = ixgbe_desc_unused(rx_ring); + + if (cleaned_count) + ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); + +#ifdef IXGBE_FCOE + /* include DDPed FCoE data */ + if (ddp_bytes > 0) { + unsigned int mss; + + mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) - + sizeof(struct fc_frame_header) - + sizeof(struct fcoe_crc_eof); + if (mss > 512) + mss &= ~511; + total_rx_bytes += ddp_bytes; + total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss); + } +#endif /* IXGBE_FCOE */ + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; +} + +static int ixgbe_clean_rxonly(struct napi_struct *, int); +/** + * ixgbe_configure_msix - Configure MSI-X hardware + * @adapter: board private structure + * + * ixgbe_configure_msix sets up the hardware to properly generate MSI-X + * interrupts. + **/ +static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) +{ + struct ixgbe_q_vector *q_vector; + int i, q_vectors, v_idx, r_idx; + u32 mask; + + q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + /* + * Populate the IVAR table and set the ITR values to the + * corresponding register. + */ + for (v_idx = 0; v_idx < q_vectors; v_idx++) { + q_vector = adapter->q_vector[v_idx]; + /* XXX for_each_set_bit(...) */ + r_idx = find_first_bit(q_vector->rx.idx, + adapter->num_rx_queues); + + for (i = 0; i < q_vector->rx.count; i++) { + u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx; + ixgbe_set_ivar(adapter, 0, reg_idx, v_idx); + r_idx = find_next_bit(q_vector->rx.idx, + adapter->num_rx_queues, + r_idx + 1); + } + r_idx = find_first_bit(q_vector->tx.idx, + adapter->num_tx_queues); + + for (i = 0; i < q_vector->tx.count; i++) { + u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx; + ixgbe_set_ivar(adapter, 1, reg_idx, v_idx); + r_idx = find_next_bit(q_vector->tx.idx, + adapter->num_tx_queues, + r_idx + 1); + } + + if (q_vector->tx.count && !q_vector->rx.count) + /* tx only */ + q_vector->eitr = adapter->tx_eitr_param; + else if (q_vector->rx.count) + /* rx or mixed */ + q_vector->eitr = adapter->rx_eitr_param; + + ixgbe_write_eitr(q_vector); + /* If ATR is enabled, set interrupt affinity */ + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { + /* + * Allocate the affinity_hint cpumask, assign the mask + * for this vector, and set our affinity_hint for + * this irq. + */ + if (!alloc_cpumask_var(&q_vector->affinity_mask, + GFP_KERNEL)) + return; + cpumask_set_cpu(v_idx, q_vector->affinity_mask); + irq_set_affinity_hint(adapter->msix_entries[v_idx].vector, + q_vector->affinity_mask); + } + } + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, + v_idx); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + ixgbe_set_ivar(adapter, -1, 1, v_idx); + break; + + default: + break; + } + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); + + /* set up to autoclear timer, and the vectors */ + mask = IXGBE_EIMS_ENABLE_MASK; + if (adapter->num_vfs) + mask &= ~(IXGBE_EIMS_OTHER | + IXGBE_EIMS_MAILBOX | + IXGBE_EIMS_LSC); + else + mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +/** + * ixgbe_update_itr - update the dynamic ITR value based on statistics + * @q_vector: structure containing interrupt and ring information + * @ring_container: structure containing ring performance data + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * this functionality is controlled by the InterruptThrottleRate module + * parameter (see ixgbe_param.c) + **/ +static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, + struct ixgbe_ring_container *ring_container) +{ + u64 bytes_perint; + struct ixgbe_adapter *adapter = q_vector->adapter; + int bytes = ring_container->total_bytes; + int packets = ring_container->total_packets; + u32 timepassed_us; + u8 itr_setting = ring_container->itr; + + if (packets == 0) + return; + + /* simple throttlerate management + * 0-20MB/s lowest (100000 ints/s) + * 20-100MB/s low (20000 ints/s) + * 100-1249MB/s bulk (8000 ints/s) + */ + /* what was last interrupt timeslice? */ + timepassed_us = 1000000/q_vector->eitr; + bytes_perint = bytes / timepassed_us; /* bytes/usec */ + + switch (itr_setting) { + case lowest_latency: + if (bytes_perint > adapter->eitr_low) + itr_setting = low_latency; + break; + case low_latency: + if (bytes_perint > adapter->eitr_high) + itr_setting = bulk_latency; + else if (bytes_perint <= adapter->eitr_low) + itr_setting = lowest_latency; + break; + case bulk_latency: + if (bytes_perint <= adapter->eitr_high) + itr_setting = low_latency; + break; + } + + /* clear work counters since we have the values we need */ + ring_container->total_bytes = 0; + ring_container->total_packets = 0; + + /* write updated itr to ring container */ + ring_container->itr = itr_setting; +} + +/** + * ixgbe_write_eitr - write EITR register in hardware specific way + * @q_vector: structure containing interrupt and ring information + * + * This function is made to be called by ethtool and by the driver + * when it needs to update EITR registers at runtime. Hardware + * specific quirks/differences are taken care of here. + */ +void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) +{ + struct ixgbe_adapter *adapter = q_vector->adapter; + struct ixgbe_hw *hw = &adapter->hw; + int v_idx = q_vector->v_idx; + u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr); + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + /* must write high and low 16 bits to reset counter */ + itr_reg |= (itr_reg << 16); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + /* + * 82599 and X540 can support a value of zero, so allow it for + * max interrupt rate, but there is an errata where it can + * not be zero with RSC + */ + if (itr_reg == 8 && + !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) + itr_reg = 0; + + /* + * set the WDIS bit to not clear the timer bits and cause an + * immediate assertion of the interrupt + */ + itr_reg |= IXGBE_EITR_CNT_WDIS; + break; + default: + break; + } + IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg); +} + +static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector) +{ + u32 new_itr = q_vector->eitr; + u8 current_itr; + + ixgbe_update_itr(q_vector, &q_vector->tx); + ixgbe_update_itr(q_vector, &q_vector->rx); + + current_itr = max(q_vector->rx.itr, q_vector->tx.itr); + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = 100000; + break; + case low_latency: + new_itr = 20000; /* aka hwitr = ~200 */ + break; + case bulk_latency: + new_itr = 8000; + break; + default: + break; + } + + if (new_itr != q_vector->eitr) { + /* do an exponential smoothing */ + new_itr = ((q_vector->eitr * 9) + new_itr)/10; + + /* save the algorithm value here */ + q_vector->eitr = new_itr; + + ixgbe_write_eitr(q_vector); + } +} + +/** + * ixgbe_check_overtemp_subtask - check for over tempurature + * @adapter: pointer to adapter + **/ +static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 eicr = adapter->interrupt_event; + + if (test_bit(__IXGBE_DOWN, &adapter->state)) + return; + + if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && + !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT)) + return; + + adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT; + + switch (hw->device_id) { + case IXGBE_DEV_ID_82599_T3_LOM: + /* + * Since the warning interrupt is for both ports + * we don't have to check if: + * - This interrupt wasn't for our port. + * - We may have missed the interrupt so always have to + * check if we got a LSC + */ + if (!(eicr & IXGBE_EICR_GPI_SDP0) && + !(eicr & IXGBE_EICR_LSC)) + return; + + if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) { + u32 autoneg; + bool link_up = false; + + hw->mac.ops.check_link(hw, &autoneg, &link_up, false); + + if (link_up) + return; + } + + /* Check if this is not due to overtemp */ + if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP) + return; + + break; + default: + if (!(eicr & IXGBE_EICR_GPI_SDP0)) + return; + break; + } + e_crit(drv, + "Network adapter has been stopped because it has over heated. " + "Restart the computer. If the problem persists, " + "power off the system and replace the adapter\n"); + + adapter->interrupt_event = 0; +} + +static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) +{ + struct ixgbe_hw *hw = &adapter->hw; + + if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && + (eicr & IXGBE_EICR_GPI_SDP1)) { + e_crit(probe, "Fan has stopped, replace the adapter\n"); + /* write to clear the interrupt */ + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); + } +} + +static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr) +{ + struct ixgbe_hw *hw = &adapter->hw; + + if (eicr & IXGBE_EICR_GPI_SDP2) { + /* Clear the interrupt */ + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2); + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; + ixgbe_service_event_schedule(adapter); + } + } + + if (eicr & IXGBE_EICR_GPI_SDP1) { + /* Clear the interrupt */ + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; + ixgbe_service_event_schedule(adapter); + } + } +} + +static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + adapter->lsc_int++; + adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); + IXGBE_WRITE_FLUSH(hw); + ixgbe_service_event_schedule(adapter); + } +} + +static irqreturn_t ixgbe_msix_lsc(int irq, void *data) +{ + struct ixgbe_adapter *adapter = data; + struct ixgbe_hw *hw = &adapter->hw; + u32 eicr; + + /* + * Workaround for Silicon errata. Use clear-by-write instead + * of clear-by-read. Reading with EICS will return the + * interrupt causes without clearing, which later be done + * with the write to EICR. + */ + eicr = IXGBE_READ_REG(hw, IXGBE_EICS); + IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); + + if (eicr & IXGBE_EICR_LSC) + ixgbe_check_lsc(adapter); + + if (eicr & IXGBE_EICR_MAILBOX) + ixgbe_msg_task(adapter); + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + /* Handle Flow Director Full threshold interrupt */ + if (eicr & IXGBE_EICR_FLOW_DIR) { + int reinit_count = 0; + int i; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *ring = adapter->tx_ring[i]; + if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE, + &ring->state)) + reinit_count++; + } + if (reinit_count) { + /* no more flow director interrupts until after init */ + IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR); + eicr &= ~IXGBE_EICR_FLOW_DIR; + adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT; + ixgbe_service_event_schedule(adapter); + } + } + ixgbe_check_sfp_event(adapter, eicr); + if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && + ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) { + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + adapter->interrupt_event = eicr; + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; + ixgbe_service_event_schedule(adapter); + } + } + break; + default: + break; + } + + ixgbe_check_fan_failure(adapter, eicr); + + /* re-enable the original interrupt state, no lsc, no queues */ + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + IXGBE_WRITE_REG(hw, IXGBE_EIMS, eicr & + ~(IXGBE_EIMS_LSC | IXGBE_EIMS_RTX_QUEUE)); + + return IRQ_HANDLED; +} + +static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, + u64 qmask) +{ + u32 mask; + struct ixgbe_hw *hw = &adapter->hw; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + mask = (IXGBE_EIMS_RTX_QUEUE & qmask); + IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + mask = (qmask & 0xFFFFFFFF); + if (mask) + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); + mask = (qmask >> 32); + if (mask) + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); + break; + default: + break; + } + /* skip the flush */ +} + +static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, + u64 qmask) +{ + u32 mask; + struct ixgbe_hw *hw = &adapter->hw; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + mask = (IXGBE_EIMS_RTX_QUEUE & qmask); + IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + mask = (qmask & 0xFFFFFFFF); + if (mask) + IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); + mask = (qmask >> 32); + if (mask) + IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); + break; + default: + break; + } + /* skip the flush */ +} + +static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) +{ + struct ixgbe_q_vector *q_vector = data; + struct ixgbe_adapter *adapter = q_vector->adapter; + struct ixgbe_ring *tx_ring; + int i, r_idx; + + if (!q_vector->tx.count) + return IRQ_HANDLED; + + r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues); + for (i = 0; i < q_vector->tx.count; i++) { + tx_ring = adapter->tx_ring[r_idx]; + r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues, + r_idx + 1); + } + + /* EIAM disabled interrupts (on this vector) for us */ + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues) + * @irq: unused + * @data: pointer to our q_vector struct for this interrupt vector + **/ +static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) +{ + struct ixgbe_q_vector *q_vector = data; + struct ixgbe_adapter *adapter = q_vector->adapter; + struct ixgbe_ring *rx_ring; + int r_idx; + int i; + +#ifdef CONFIG_IXGBE_DCA + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + ixgbe_update_dca(q_vector); +#endif + + r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); + for (i = 0; i < q_vector->rx.count; i++) { + rx_ring = adapter->rx_ring[r_idx]; + r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues, + r_idx + 1); + } + + if (!q_vector->rx.count) + return IRQ_HANDLED; + + /* EIAM disabled interrupts (on this vector) for us */ + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) +{ + struct ixgbe_q_vector *q_vector = data; + struct ixgbe_adapter *adapter = q_vector->adapter; + struct ixgbe_ring *ring; + int r_idx; + int i; + + if (!q_vector->tx.count && !q_vector->rx.count) + return IRQ_HANDLED; + + r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues); + for (i = 0; i < q_vector->tx.count; i++) { + ring = adapter->tx_ring[r_idx]; + r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues, + r_idx + 1); + } + + r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); + for (i = 0; i < q_vector->rx.count; i++) { + ring = adapter->rx_ring[r_idx]; + r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues, + r_idx + 1); + } + + /* EIAM disabled interrupts (on this vector) for us */ + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function is optimized for cleaning one queue only on a single + * q_vector!!! + **/ +static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) +{ + struct ixgbe_q_vector *q_vector = + container_of(napi, struct ixgbe_q_vector, napi); + struct ixgbe_adapter *adapter = q_vector->adapter; + struct ixgbe_ring *rx_ring = NULL; + int work_done = 0; + long r_idx; + +#ifdef CONFIG_IXGBE_DCA + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + ixgbe_update_dca(q_vector); +#endif + + r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); + rx_ring = adapter->rx_ring[r_idx]; + + ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget); + + /* If all Rx work done, exit the polling mode */ + if (work_done < budget) { + napi_complete(napi); + if (adapter->rx_itr_setting & 1) + ixgbe_set_itr(q_vector); + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_enable_queues(adapter, + ((u64)1 << q_vector->v_idx)); + } + + return work_done; +} + +/** + * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function will clean more than one rx queue associated with a + * q_vector. + **/ +static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) +{ + struct ixgbe_q_vector *q_vector = + container_of(napi, struct ixgbe_q_vector, napi); + struct ixgbe_adapter *adapter = q_vector->adapter; + struct ixgbe_ring *ring = NULL; + int work_done = 0, i; + long r_idx; + bool tx_clean_complete = true; + +#ifdef CONFIG_IXGBE_DCA + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + ixgbe_update_dca(q_vector); +#endif + + r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues); + for (i = 0; i < q_vector->tx.count; i++) { + ring = adapter->tx_ring[r_idx]; + tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring); + r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues, + r_idx + 1); + } + + /* attempt to distribute budget to each queue fairly, but don't allow + * the budget to go below 1 because we'll exit polling */ + budget /= (q_vector->rx.count ?: 1); + budget = max(budget, 1); + r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); + for (i = 0; i < q_vector->rx.count; i++) { + ring = adapter->rx_ring[r_idx]; + ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget); + r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues, + r_idx + 1); + } + + r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); + ring = adapter->rx_ring[r_idx]; + /* If all Rx work done, exit the polling mode */ + if (work_done < budget) { + napi_complete(napi); + if (adapter->rx_itr_setting & 1) + ixgbe_set_itr(q_vector); + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_enable_queues(adapter, + ((u64)1 << q_vector->v_idx)); + return 0; + } + + return work_done; +} + +/** + * ixgbe_clean_txonly - msix (aka one shot) tx clean routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function is optimized for cleaning one queue only on a single + * q_vector!!! + **/ +static int ixgbe_clean_txonly(struct napi_struct *napi, int budget) +{ + struct ixgbe_q_vector *q_vector = + container_of(napi, struct ixgbe_q_vector, napi); + struct ixgbe_adapter *adapter = q_vector->adapter; + struct ixgbe_ring *tx_ring = NULL; + int work_done = 0; + long r_idx; + +#ifdef CONFIG_IXGBE_DCA + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + ixgbe_update_dca(q_vector); +#endif + + r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues); + tx_ring = adapter->tx_ring[r_idx]; + + if (!ixgbe_clean_tx_irq(q_vector, tx_ring)) + work_done = budget; + + /* If all Tx work done, exit the polling mode */ + if (work_done < budget) { + napi_complete(napi); + if (adapter->tx_itr_setting & 1) + ixgbe_set_itr(q_vector); + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_enable_queues(adapter, + ((u64)1 << q_vector->v_idx)); + } + + return work_done; +} + +static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, + int r_idx) +{ + struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; + struct ixgbe_ring *rx_ring = a->rx_ring[r_idx]; + + set_bit(r_idx, q_vector->rx.idx); + q_vector->rx.count++; + rx_ring->q_vector = q_vector; +} + +static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, + int t_idx) +{ + struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; + struct ixgbe_ring *tx_ring = a->tx_ring[t_idx]; + + set_bit(t_idx, q_vector->tx.idx); + q_vector->tx.count++; + tx_ring->q_vector = q_vector; + q_vector->tx.work_limit = a->tx_work_limit; +} + +/** + * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors + * @adapter: board private structure to initialize + * + * This function maps descriptor rings to the queue-specific vectors + * we were allotted through the MSI-X enabling code. Ideally, we'd have + * one vector per ring/queue, but on a constrained vector budget, we + * group the rings as "efficiently" as possible. You would add new + * mapping configurations in here. + **/ +static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter) +{ + int q_vectors; + int v_start = 0; + int rxr_idx = 0, txr_idx = 0; + int rxr_remaining = adapter->num_rx_queues; + int txr_remaining = adapter->num_tx_queues; + int i, j; + int rqpv, tqpv; + int err = 0; + + /* No mapping required if MSI-X is disabled. */ + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) + goto out; + + q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + /* + * The ideal configuration... + * We have enough vectors to map one per queue. + */ + if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { + for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) + map_vector_to_rxq(adapter, v_start, rxr_idx); + + for (; txr_idx < txr_remaining; v_start++, txr_idx++) + map_vector_to_txq(adapter, v_start, txr_idx); + + goto out; + } + + /* + * If we don't have enough vectors for a 1-to-1 + * mapping, we'll have to group them so there are + * multiple queues per vector. + */ + /* Re-adjusting *qpv takes care of the remainder. */ + for (i = v_start; i < q_vectors; i++) { + rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); + for (j = 0; j < rqpv; j++) { + map_vector_to_rxq(adapter, i, rxr_idx); + rxr_idx++; + rxr_remaining--; + } + tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i); + for (j = 0; j < tqpv; j++) { + map_vector_to_txq(adapter, i, txr_idx); + txr_idx++; + txr_remaining--; + } + } +out: + return err; +} + +/** + * ixgbe_request_msix_irqs - Initialize MSI-X interrupts + * @adapter: board private structure + * + * ixgbe_request_msix_irqs allocates MSI-X vectors and requests + * interrupts from the kernel. + **/ +static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + irqreturn_t (*handler)(int, void *); + int i, vector, q_vectors, err; + int ri = 0, ti = 0; + + /* Decrement for Other and TCP Timer vectors */ + q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + err = ixgbe_map_rings_to_vectors(adapter); + if (err) + return err; + +#define SET_HANDLER(_v) (((_v)->rx.count && (_v)->tx.count) \ + ? &ixgbe_msix_clean_many : \ + (_v)->rx.count ? &ixgbe_msix_clean_rx : \ + (_v)->tx.count ? &ixgbe_msix_clean_tx : \ + NULL) + for (vector = 0; vector < q_vectors; vector++) { + struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; + handler = SET_HANDLER(q_vector); + + if (handler == &ixgbe_msix_clean_rx) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", netdev->name, "rx", ri++); + } else if (handler == &ixgbe_msix_clean_tx) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", netdev->name, "tx", ti++); + } else if (handler == &ixgbe_msix_clean_many) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", netdev->name, "TxRx", ri++); + ti++; + } else { + /* skip this unused q_vector */ + continue; + } + err = request_irq(adapter->msix_entries[vector].vector, + handler, 0, q_vector->name, + q_vector); + if (err) { + e_err(probe, "request_irq failed for MSIX interrupt " + "Error: %d\n", err); + goto free_queue_irqs; + } + } + + sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name); + err = request_irq(adapter->msix_entries[vector].vector, + ixgbe_msix_lsc, 0, adapter->lsc_int_name, adapter); + if (err) { + e_err(probe, "request_irq for msix_lsc failed: %d\n", err); + goto free_queue_irqs; + } + + return 0; + +free_queue_irqs: + for (i = vector - 1; i >= 0; i--) + free_irq(adapter->msix_entries[--vector].vector, + adapter->q_vector[i]); + adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + return err; +} + +/** + * ixgbe_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, + bool flush) +{ + u32 mask; + + mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); + if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) + mask |= IXGBE_EIMS_GPI_SDP0; + if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) + mask |= IXGBE_EIMS_GPI_SDP1; + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + mask |= IXGBE_EIMS_ECC; + mask |= IXGBE_EIMS_GPI_SDP1; + mask |= IXGBE_EIMS_GPI_SDP2; + if (adapter->num_vfs) + mask |= IXGBE_EIMS_MAILBOX; + break; + default: + break; + } + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) + mask |= IXGBE_EIMS_FLOW_DIR; + + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); + if (queues) + ixgbe_irq_enable_queues(adapter, ~0); + if (flush) + IXGBE_WRITE_FLUSH(&adapter->hw); + + if (adapter->num_vfs > 32) { + u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel); + } +} + +/** + * ixgbe_intr - legacy mode Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t ixgbe_intr(int irq, void *data) +{ + struct ixgbe_adapter *adapter = data; + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; + u32 eicr; + + /* + * Workaround for silicon errata on 82598. Mask the interrupts + * before the read of EICR. + */ + IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); + + /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read + * therefore no explict interrupt disable is necessary */ + eicr = IXGBE_READ_REG(hw, IXGBE_EICR); + if (!eicr) { + /* + * shared interrupt alert! + * make sure interrupts are enabled because the read will + * have disabled interrupts due to EIAM + * finish the workaround of silicon errata on 82598. Unmask + * the interrupt that we masked before the EICR read. + */ + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_enable(adapter, true, true); + return IRQ_NONE; /* Not our interrupt */ + } + + if (eicr & IXGBE_EICR_LSC) + ixgbe_check_lsc(adapter); + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + ixgbe_check_sfp_event(adapter, eicr); + if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && + ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) { + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + adapter->interrupt_event = eicr; + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; + ixgbe_service_event_schedule(adapter); + } + } + break; + default: + break; + } + + ixgbe_check_fan_failure(adapter, eicr); + + if (napi_schedule_prep(&(q_vector->napi))) { + /* would disable interrupts here but EIAM disabled it */ + __napi_schedule(&(q_vector->napi)); + } + + /* + * re-enable link(maybe) and non-queue interrupts, no flush. + * ixgbe_poll will re-enable the queue interrupts + */ + + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_enable(adapter, false, false); + + return IRQ_HANDLED; +} + +static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter) +{ + int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + for (i = 0; i < q_vectors; i++) { + struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; + bitmap_zero(q_vector->rx.idx, MAX_RX_QUEUES); + bitmap_zero(q_vector->tx.idx, MAX_TX_QUEUES); + q_vector->rx.count = 0; + q_vector->tx.count = 0; + } +} + +/** + * ixgbe_request_irq - initialize interrupts + * @adapter: board private structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int ixgbe_request_irq(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + err = ixgbe_request_msix_irqs(adapter); + } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { + err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, + netdev->name, adapter); + } else { + err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, + netdev->name, adapter); + } + + if (err) + e_err(probe, "request_irq failed, Error %d\n", err); + + return err; +} + +static void ixgbe_free_irq(struct ixgbe_adapter *adapter) +{ + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + int i, q_vectors; + + q_vectors = adapter->num_msix_vectors; + + i = q_vectors - 1; + free_irq(adapter->msix_entries[i].vector, adapter); + + i--; + for (; i >= 0; i--) { + /* free only the irqs that were actually requested */ + if (!adapter->q_vector[i]->rx.count && + !adapter->q_vector[i]->tx.count) + continue; + + free_irq(adapter->msix_entries[i].vector, + adapter->q_vector[i]); + } + + ixgbe_reset_q_vectors(adapter); + } else { + free_irq(adapter->pdev->irq, adapter); + } +} + +/** + * ixgbe_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) +{ + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); + if (adapter->num_vfs > 32) + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0); + break; + default: + break; + } + IXGBE_WRITE_FLUSH(&adapter->hw); + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + int i; + for (i = 0; i < adapter->num_msix_vectors; i++) + synchronize_irq(adapter->msix_entries[i].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } +} + +/** + * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts + * + **/ +static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + IXGBE_WRITE_REG(hw, IXGBE_EITR(0), + EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param)); + + ixgbe_set_ivar(adapter, 0, 0, 0); + ixgbe_set_ivar(adapter, 1, 0, 0); + + map_vector_to_rxq(adapter, 0, 0); + map_vector_to_txq(adapter, 0, 0); + + e_info(hw, "Legacy interrupt IVAR setup done\n"); +} + +/** + * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset + * @adapter: board private structure + * @ring: structure containing ring specific data + * + * Configure the Tx descriptor ring after a reset. + **/ +void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + u64 tdba = ring->dma; + int wait_loop = 10; + u32 txdctl; + u8 reg_idx = ring->reg_idx; + + /* disable queue to avoid issues while updating state */ + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), + txdctl & ~IXGBE_TXDCTL_ENABLE); + IXGBE_WRITE_FLUSH(hw); + + IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx), + (tdba & DMA_BIT_MASK(32))); + IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx), + ring->count * sizeof(union ixgbe_adv_tx_desc)); + IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0); + ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx); + + /* configure fetching thresholds */ + if (adapter->rx_itr_setting == 0) { + /* cannot set wthresh when itr==0 */ + txdctl &= ~0x007F0000; + } else { + /* enable WTHRESH=8 descriptors, to encourage burst writeback */ + txdctl |= (8 << 16); + } + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { + /* PThresh workaround for Tx hang with DFP enabled. */ + txdctl |= 32; + } + + /* reinitialize flowdirector state */ + if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && + adapter->atr_sample_rate) { + ring->atr_sample_rate = adapter->atr_sample_rate; + ring->atr_count = 0; + set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state); + } else { + ring->atr_sample_rate = 0; + } + + clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state); + + /* enable queue */ + txdctl |= IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl); + + /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */ + if (hw->mac.type == ixgbe_mac_82598EB && + !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) + return; + + /* poll to verify queue is enabled */ + do { + usleep_range(1000, 2000); + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); + } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); + if (!wait_loop) + e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); +} + +static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 rttdcs; + u32 reg; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + /* disable the arbiter while setting MTQC */ + rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); + rttdcs |= IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); + + /* set transmit pool layout */ + switch (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + case (IXGBE_FLAG_SRIOV_ENABLED): + IXGBE_WRITE_REG(hw, IXGBE_MTQC, + (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF)); + break; + default: + if (!tcs) + reg = IXGBE_MTQC_64Q_1PB; + else if (tcs <= 4) + reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; + else + reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; + + IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg); + + /* Enable Security TX Buffer IFG for multiple pb */ + if (tcs) { + reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); + reg |= IXGBE_SECTX_DCB; + IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); + } + break; + } + + /* re-enable the arbiter */ + rttdcs &= ~IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); +} + +/** + * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 dmatxctl; + u32 i; + + ixgbe_setup_mtqc(adapter); + + if (hw->mac.type != ixgbe_mac_82598EB) { + /* DMATXCTL.EN must be before Tx queues are enabled */ + dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + dmatxctl |= IXGBE_DMATXCTL_TE; + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); + } + + /* Setup the HW Tx Head and Tail descriptor pointers */ + for (i = 0; i < adapter->num_tx_queues; i++) + ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); +} + +#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 + +static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rx_ring) +{ + u32 srrctl; + u8 reg_idx = rx_ring->reg_idx; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: { + struct ixgbe_ring_feature *feature = adapter->ring_feature; + const int mask = feature[RING_F_RSS].mask; + reg_idx = reg_idx & mask; + } + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + default: + break; + } + + srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx)); + + srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; + srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; + if (adapter->num_vfs) + srrctl |= IXGBE_SRRCTL_DROP_EN; + + srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & + IXGBE_SRRCTL_BSIZEHDR_MASK; + + if (ring_is_ps_enabled(rx_ring)) { +#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER + srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; +#else + srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; +#endif + srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; + } else { + srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> + IXGBE_SRRCTL_BSIZEPKT_SHIFT; + srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; + } + + IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl); +} + +static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D, + 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE, + 0x6A3E67EA, 0x14364D17, 0x3BED200D}; + u32 mrqc = 0, reta = 0; + u32 rxcsum; + int i, j; + u8 tcs = netdev_get_num_tc(adapter->netdev); + int maxq = adapter->ring_feature[RING_F_RSS].indices; + + if (tcs) + maxq = min(maxq, adapter->num_tx_queues / tcs); + + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]); + + /* Fill out redirection table */ + for (i = 0, j = 0; i < 128; i++, j++) { + if (j == maxq) + j = 0; + /* reta = 4-byte sliding window of + * 0x00..(indices-1)(indices-1)00..etc. */ + reta = (reta << 8) | (j * 0x11); + if ((i & 3) == 3) + IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); + } + + /* Disable indicating checksum in descriptor, enables RSS hash */ + rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); + rxcsum |= IXGBE_RXCSUM_PCSD; + IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); + + if (adapter->hw.mac.type == ixgbe_mac_82598EB && + (adapter->flags & IXGBE_FLAG_RSS_ENABLED)) { + mrqc = IXGBE_MRQC_RSSEN; + } else { + int mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED + | IXGBE_FLAG_SRIOV_ENABLED); + + switch (mask) { + case (IXGBE_FLAG_RSS_ENABLED): + if (!tcs) + mrqc = IXGBE_MRQC_RSSEN; + else if (tcs <= 4) + mrqc = IXGBE_MRQC_RTRSS4TCEN; + else + mrqc = IXGBE_MRQC_RTRSS8TCEN; + break; + case (IXGBE_FLAG_SRIOV_ENABLED): + mrqc = IXGBE_MRQC_VMDQEN; + break; + default: + break; + } + } + + /* Perform hash on these packet types */ + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 + | IXGBE_MRQC_RSS_FIELD_IPV4_TCP + | IXGBE_MRQC_RSS_FIELD_IPV6 + | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; + + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); +} + +/** + * ixgbe_configure_rscctl - enable RSC for the indicated ring + * @adapter: address of board private structure + * @index: index of ring to set + **/ +static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 rscctrl; + int rx_buf_len; + u8 reg_idx = ring->reg_idx; + + if (!ring_is_rsc_enabled(ring)) + return; + + rx_buf_len = ring->rx_buf_len; + rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx)); + rscctrl |= IXGBE_RSCCTL_RSCEN; + /* + * we must limit the number of descriptors so that the + * total size of max desc * buf_len is not greater + * than 65535 + */ + if (ring_is_ps_enabled(ring)) { +#if (MAX_SKB_FRAGS > 16) + rscctrl |= IXGBE_RSCCTL_MAXDESC_16; +#elif (MAX_SKB_FRAGS > 8) + rscctrl |= IXGBE_RSCCTL_MAXDESC_8; +#elif (MAX_SKB_FRAGS > 4) + rscctrl |= IXGBE_RSCCTL_MAXDESC_4; +#else + rscctrl |= IXGBE_RSCCTL_MAXDESC_1; +#endif + } else { + if (rx_buf_len < IXGBE_RXBUFFER_4096) + rscctrl |= IXGBE_RSCCTL_MAXDESC_16; + else if (rx_buf_len < IXGBE_RXBUFFER_8192) + rscctrl |= IXGBE_RSCCTL_MAXDESC_8; + else + rscctrl |= IXGBE_RSCCTL_MAXDESC_4; + } + IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); +} + +/** + * ixgbe_set_uta - Set unicast filter table address + * @adapter: board private structure + * + * The unicast table address is a register array of 32-bit registers. + * The table is meant to be used in a way similar to how the MTA is used + * however due to certain limitations in the hardware it is necessary to + * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous + * enable bit to allow vlan tag stripping when promiscuous mode is enabled + **/ +static void ixgbe_set_uta(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int i; + + /* The UTA table only exists on 82599 hardware and newer */ + if (hw->mac.type < ixgbe_mac_82599EB) + return; + + /* we only need to do this if VMDq is enabled */ + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return; + + for (i = 0; i < 128; i++) + IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0); +} + +#define IXGBE_MAX_RX_DESC_POLL 10 +static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + int wait_loop = IXGBE_MAX_RX_DESC_POLL; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */ + if (hw->mac.type == ixgbe_mac_82598EB && + !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) + return; + + do { + usleep_range(1000, 2000); + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); + + if (!wait_loop) { + e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within " + "the polling period\n", reg_idx); + } +} + +void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + int wait_loop = IXGBE_MAX_RX_DESC_POLL; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + rxdctl &= ~IXGBE_RXDCTL_ENABLE; + + /* write value back with RXDCTL.ENABLE bit cleared */ + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); + + if (hw->mac.type == ixgbe_mac_82598EB && + !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) + return; + + /* the hardware may take up to 100us to really disable the rx queue */ + do { + udelay(10); + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); + + if (!wait_loop) { + e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within " + "the polling period\n", reg_idx); + } +} + +void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + u64 rdba = ring->dma; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + /* disable queue to avoid issues while updating state */ + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + ixgbe_disable_rx_queue(adapter, ring); + + IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); + IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx), + ring->count * sizeof(union ixgbe_adv_rx_desc)); + IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0); + ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx); + + ixgbe_configure_srrctl(adapter, ring); + ixgbe_configure_rscctl(adapter, ring); + + /* If operating in IOV mode set RLPML for X540 */ + if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && + hw->mac.type == ixgbe_mac_X540) { + rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK; + rxdctl |= ((ring->netdev->mtu + ETH_HLEN + + ETH_FCS_LEN + VLAN_HLEN) | IXGBE_RXDCTL_RLPML_EN); + } + + if (hw->mac.type == ixgbe_mac_82598EB) { + /* + * enable cache line friendly hardware writes: + * PTHRESH=32 descriptors (half the internal cache), + * this also removes ugly rx_no_buffer_count increment + * HTHRESH=4 descriptors (to minimize latency on fetch) + * WTHRESH=8 burst writeback up to two cache lines + */ + rxdctl &= ~0x3FFFFF; + rxdctl |= 0x080420; + } + + /* enable receive descriptor ring */ + rxdctl |= IXGBE_RXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); + + ixgbe_rx_desc_queue_enable(adapter, ring); + ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring)); +} + +static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int p; + + /* PSRTYPE must be initialized in non 82598 adapters */ + u32 psrtype = IXGBE_PSRTYPE_TCPHDR | + IXGBE_PSRTYPE_UDPHDR | + IXGBE_PSRTYPE_IPV4HDR | + IXGBE_PSRTYPE_L2HDR | + IXGBE_PSRTYPE_IPV6HDR; + + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) + psrtype |= (adapter->num_rx_queues_per_pool << 29); + + for (p = 0; p < adapter->num_rx_pools; p++) + IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p), + psrtype); +} + +static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 gcr_ext; + u32 vt_reg_bits; + u32 reg_offset, vf_shift; + u32 vmdctl; + + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return; + + vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); + vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | IXGBE_VT_CTL_REPLEN; + vt_reg_bits |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT); + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits); + + vf_shift = adapter->num_vfs % 32; + reg_offset = (adapter->num_vfs > 32) ? 1 : 0; + + /* Enable only the PF's pool for Tx/Rx */ + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift)); + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift)); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), 0); + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); + + /* Map PF MAC address in RAR Entry 0 to first pool following VFs */ + hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs); + + /* + * Set up VF register offsets for selected VT Mode, + * i.e. 32 or 64 VFs for SR-IOV + */ + gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); + gcr_ext |= IXGBE_GCR_EXT_MSIX_EN; + gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64; + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); + + /* enable Tx loopback for VF/PF communication */ + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); + /* Enable MAC Anti-Spoofing */ + hw->mac.ops.set_mac_anti_spoofing(hw, + (adapter->antispoofing_enabled = + (adapter->num_vfs != 0)), + adapter->num_vfs); +} + +static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + int rx_buf_len; + struct ixgbe_ring *rx_ring; + int i; + u32 mhadd, hlreg0; + + /* Decide whether to use packet split mode or not */ + /* On by default */ + adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; + + /* Do not use packet split if we're in SR-IOV Mode */ + if (adapter->num_vfs) + adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; + + /* Disable packet split due to 82599 erratum #45 */ + if (hw->mac.type == ixgbe_mac_82599EB) + adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; + + /* Set the RX buffer length according to the mode */ + if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { + rx_buf_len = IXGBE_RX_HDR_SIZE; + } else { + if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && + (netdev->mtu <= ETH_DATA_LEN)) + rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; + else + rx_buf_len = ALIGN(max_frame + VLAN_HLEN, 1024); + } + +#ifdef IXGBE_FCOE + /* adjust max frame to be able to do baby jumbo for FCoE */ + if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && + (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE)) + max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE; + +#endif /* IXGBE_FCOE */ + mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); + if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { + mhadd &= ~IXGBE_MHADD_MFS_MASK; + mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT; + + IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); + } + + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */ + hlreg0 |= IXGBE_HLREG0_JUMBOEN; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + rx_ring = adapter->rx_ring[i]; + rx_ring->rx_buf_len = rx_buf_len; + + if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) + set_ring_ps_enabled(rx_ring); + else + clear_ring_ps_enabled(rx_ring); + + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) + set_ring_rsc_enabled(rx_ring); + else + clear_ring_rsc_enabled(rx_ring); + +#ifdef IXGBE_FCOE + if (netdev->features & NETIF_F_FCOE_MTU) { + struct ixgbe_ring_feature *f; + f = &adapter->ring_feature[RING_F_FCOE]; + if ((i >= f->mask) && (i < f->mask + f->indices)) { + clear_ring_ps_enabled(rx_ring); + if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) + rx_ring->rx_buf_len = + IXGBE_FCOE_JUMBO_FRAME_SIZE; + } else if (!ring_is_rsc_enabled(rx_ring) && + !ring_is_ps_enabled(rx_ring)) { + rx_ring->rx_buf_len = + IXGBE_FCOE_JUMBO_FRAME_SIZE; + } + } +#endif /* IXGBE_FCOE */ + } +} + +static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + /* + * For VMDq support of different descriptor types or + * buffer sizes through the use of multiple SRRCTL + * registers, RDRXCTL.MVMEN must be set to 1 + * + * also, the manual doesn't mention it clearly but DCA hints + * will only use queue 0's tags unless this bit is set. Side + * effects of setting this bit are only that SRRCTL must be + * fully programmed [0..15] + */ + rdrxctl |= IXGBE_RDRXCTL_MVMEN; + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + /* Disable RSC for ACK packets */ + IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, + (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); + rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; + /* hardware requires some bits to be set by default */ + rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX); + rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; + break; + default: + /* We should do nothing since we don't know this hardware */ + return; + } + + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); +} + +/** + * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int i; + u32 rxctrl; + + /* disable receives while setting up the descriptors */ + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); + + ixgbe_setup_psrtype(adapter); + ixgbe_setup_rdrxctl(adapter); + + /* Program registers for the distribution of queues */ + ixgbe_setup_mrqc(adapter); + + ixgbe_set_uta(adapter); + + /* set_rx_buffer_len must be called before ring initialization */ + ixgbe_set_rx_buffer_len(adapter); + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); + + /* disable drop enable for 82598 parts */ + if (hw->mac.type == ixgbe_mac_82598EB) + rxctrl |= IXGBE_RXCTRL_DMBYPS; + + /* enable all receives */ + rxctrl |= IXGBE_RXCTRL_RXEN; + hw->mac.ops.enable_rx_dma(hw, rxctrl); +} + +static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + int pool_ndx = adapter->num_vfs; + + /* add VID to filter table */ + hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true); + set_bit(vid, adapter->active_vlans); +} + +static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + int pool_ndx = adapter->num_vfs; + + /* remove VID from filter table */ + hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false); + clear_bit(vid, adapter->active_vlans); +} + +/** + * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering + * @adapter: driver data + */ +static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vlnctrl; + + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); +} + +/** + * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering + * @adapter: driver data + */ +static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vlnctrl; + + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlnctrl |= IXGBE_VLNCTRL_VFE; + vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); +} + +/** + * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping + * @adapter: driver data + */ +static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vlnctrl; + int i, j; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlnctrl &= ~IXGBE_VLNCTRL_VME; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + for (i = 0; i < adapter->num_rx_queues; i++) { + j = adapter->rx_ring[i]->reg_idx; + vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); + vlnctrl &= ~IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); + } + break; + default: + break; + } +} + +/** + * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping + * @adapter: driver data + */ +static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vlnctrl; + int i, j; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlnctrl |= IXGBE_VLNCTRL_VME; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + for (i = 0; i < adapter->num_rx_queues; i++) { + j = adapter->rx_ring[i]->reg_idx; + vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); + vlnctrl |= IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); + } + break; + default: + break; + } +} + +static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) +{ + u16 vid; + + ixgbe_vlan_rx_add_vid(adapter->netdev, 0); + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + ixgbe_vlan_rx_add_vid(adapter->netdev, vid); +} + +/** + * ixgbe_write_uc_addr_list - write unicast addresses to RAR table + * @netdev: network interface device structure + * + * Writes unicast address list to the RAR table. + * Returns: -ENOMEM on failure/insufficient address space + * 0 on no addresses written + * X on writing X addresses to the RAR table + **/ +static int ixgbe_write_uc_addr_list(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + unsigned int vfn = adapter->num_vfs; + unsigned int rar_entries = IXGBE_MAX_PF_MACVLANS; + int count = 0; + + /* return ENOMEM indicating insufficient memory for addresses */ + if (netdev_uc_count(netdev) > rar_entries) + return -ENOMEM; + + if (!netdev_uc_empty(netdev) && rar_entries) { + struct netdev_hw_addr *ha; + /* return error if we do not support writing to RAR table */ + if (!hw->mac.ops.set_rar) + return -ENOMEM; + + netdev_for_each_uc_addr(ha, netdev) { + if (!rar_entries) + break; + hw->mac.ops.set_rar(hw, rar_entries--, ha->addr, + vfn, IXGBE_RAH_AV); + count++; + } + } + /* write the addresses in reverse order to avoid write combining */ + for (; rar_entries > 0 ; rar_entries--) + hw->mac.ops.clear_rar(hw, rar_entries); + + return count; +} + +/** + * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_method entry point is called whenever the unicast/multicast + * address list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast and + * promiscuous mode. + **/ +void ixgbe_set_rx_mode(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; + int count; + + /* Check for Promiscuous and All Multicast modes */ + + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + + /* set all bits that we expect to always be set */ + fctrl |= IXGBE_FCTRL_BAM; + fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ + fctrl |= IXGBE_FCTRL_PMCF; + + /* clear the bits we are changing the status of */ + fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); + + if (netdev->flags & IFF_PROMISC) { + hw->addr_ctrl.user_set_promisc = true; + fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); + vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE); + /* don't hardware filter vlans in promisc mode */ + ixgbe_vlan_filter_disable(adapter); + } else { + if (netdev->flags & IFF_ALLMULTI) { + fctrl |= IXGBE_FCTRL_MPE; + vmolr |= IXGBE_VMOLR_MPE; + } else { + /* + * Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + hw->mac.ops.update_mc_addr_list(hw, netdev); + vmolr |= IXGBE_VMOLR_ROMPE; + } + ixgbe_vlan_filter_enable(adapter); + hw->addr_ctrl.user_set_promisc = false; + /* + * Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + count = ixgbe_write_uc_addr_list(netdev); + if (count < 0) { + fctrl |= IXGBE_FCTRL_UPE; + vmolr |= IXGBE_VMOLR_ROPE; + } + } + + if (adapter->num_vfs) { + ixgbe_restore_vf_multicasts(adapter); + vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) & + ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE | + IXGBE_VMOLR_ROPE); + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr); + } + + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + + if (netdev->features & NETIF_F_HW_VLAN_RX) + ixgbe_vlan_strip_enable(adapter); + else + ixgbe_vlan_strip_disable(adapter); +} + +static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) +{ + int q_idx; + struct ixgbe_q_vector *q_vector; + int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + /* legacy and MSI only use one vector */ + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) + q_vectors = 1; + + for (q_idx = 0; q_idx < q_vectors; q_idx++) { + struct napi_struct *napi; + q_vector = adapter->q_vector[q_idx]; + napi = &q_vector->napi; + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + if (!q_vector->rx.count || !q_vector->tx.count) { + if (q_vector->tx.count == 1) + napi->poll = &ixgbe_clean_txonly; + else if (q_vector->rx.count == 1) + napi->poll = &ixgbe_clean_rxonly; + } + } + + napi_enable(napi); + } +} + +static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) +{ + int q_idx; + struct ixgbe_q_vector *q_vector; + int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + /* legacy and MSI only use one vector */ + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) + q_vectors = 1; + + for (q_idx = 0; q_idx < q_vectors; q_idx++) { + q_vector = adapter->q_vector[q_idx]; + napi_disable(&q_vector->napi); + } +} + +#ifdef CONFIG_IXGBE_DCB +/* + * ixgbe_configure_dcb - Configure DCB hardware + * @adapter: ixgbe adapter struct + * + * This is called by the driver on open to configure the DCB hardware. + * This is also called by the gennetlink interface when reconfiguring + * the DCB state. + */ +static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + + if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) { + if (hw->mac.type == ixgbe_mac_82598EB) + netif_set_gso_max_size(adapter->netdev, 65536); + return; + } + + if (hw->mac.type == ixgbe_mac_82598EB) + netif_set_gso_max_size(adapter->netdev, 32768); + + + /* Enable VLAN tag insert/strip */ + adapter->netdev->features |= NETIF_F_HW_VLAN_RX; + + hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); + + /* reconfigure the hardware */ + if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) { +#ifdef CONFIG_FCOE + if (adapter->netdev->features & NETIF_F_FCOE_MTU) + max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); +#endif + ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, + DCB_TX_CONFIG); + ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, + DCB_RX_CONFIG); + ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg); + } else { + struct net_device *dev = adapter->netdev; + + if (adapter->ixgbe_ieee_ets) + dev->dcbnl_ops->ieee_setets(dev, + adapter->ixgbe_ieee_ets); + if (adapter->ixgbe_ieee_pfc) + dev->dcbnl_ops->ieee_setpfc(dev, + adapter->ixgbe_ieee_pfc); + } + + /* Enable RSS Hash per TC */ + if (hw->mac.type != ixgbe_mac_82598EB) { + int i; + u32 reg = 0; + + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + u8 msb = 0; + u8 cnt = adapter->netdev->tc_to_txq[i].count; + + while (cnt >>= 1) + msb++; + + reg |= msb << IXGBE_RQTC_SHIFT_TC(i); + } + IXGBE_WRITE_REG(hw, IXGBE_RQTC, reg); + } +} + +#endif + +static void ixgbe_configure_pb(struct ixgbe_adapter *adapter) +{ + int hdrm = 0; + int num_tc = netdev_get_num_tc(adapter->netdev); + struct ixgbe_hw *hw = &adapter->hw; + + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || + adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) + hdrm = 64 << adapter->fdir_pballoc; + + hw->mac.ops.set_rxpba(&adapter->hw, num_tc, hdrm, PBA_STRATEGY_EQUAL); +} + +static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct hlist_node *node, *node2; + struct ixgbe_fdir_filter *filter; + + spin_lock(&adapter->fdir_perfect_lock); + + if (!hlist_empty(&adapter->fdir_filter_list)) + ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask); + + hlist_for_each_entry_safe(filter, node, node2, + &adapter->fdir_filter_list, fdir_node) { + ixgbe_fdir_write_perfect_filter_82599(hw, + &filter->filter, + filter->sw_idx, + (filter->action == IXGBE_FDIR_DROP_QUEUE) ? + IXGBE_FDIR_DROP_QUEUE : + adapter->rx_ring[filter->action]->reg_idx); + } + + spin_unlock(&adapter->fdir_perfect_lock); +} + +static void ixgbe_configure(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + int i; + + ixgbe_configure_pb(adapter); +#ifdef CONFIG_IXGBE_DCB + ixgbe_configure_dcb(adapter); +#endif + + ixgbe_set_rx_mode(netdev); + ixgbe_restore_vlan(adapter); + +#ifdef IXGBE_FCOE + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) + ixgbe_configure_fcoe(adapter); + +#endif /* IXGBE_FCOE */ + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->atr_sample_rate = + adapter->atr_sample_rate; + ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc); + } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { + ixgbe_init_fdir_perfect_82599(&adapter->hw, + adapter->fdir_pballoc); + ixgbe_fdir_filter_restore(adapter); + } + ixgbe_configure_virtualization(adapter); + + ixgbe_configure_tx(adapter); + ixgbe_configure_rx(adapter); +} + +static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) +{ + switch (hw->phy.type) { + case ixgbe_phy_sfp_avago: + case ixgbe_phy_sfp_ftl: + case ixgbe_phy_sfp_intel: + case ixgbe_phy_sfp_unknown: + case ixgbe_phy_sfp_passive_tyco: + case ixgbe_phy_sfp_passive_unknown: + case ixgbe_phy_sfp_active_unknown: + case ixgbe_phy_sfp_ftl_active: + return true; + default: + return false; + } +} + +/** + * ixgbe_sfp_link_config - set up SFP+ link + * @adapter: pointer to private adapter struct + **/ +static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) +{ + /* + * We are assuming the worst case scenerio here, and that + * is that an SFP was inserted/removed after the reset + * but before SFP detection was enabled. As such the best + * solution is to just start searching as soon as we start + */ + if (adapter->hw.mac.type == ixgbe_mac_82598EB) + adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; + + adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; +} + +/** + * ixgbe_non_sfp_link_config - set up non-SFP+ link + * @hw: pointer to private hardware struct + * + * Returns 0 on success, negative on failure + **/ +static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) +{ + u32 autoneg; + bool negotiation, link_up = false; + u32 ret = IXGBE_ERR_LINK_SETUP; + + if (hw->mac.ops.check_link) + ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false); + + if (ret) + goto link_cfg_out; + + autoneg = hw->phy.autoneg_advertised; + if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) + ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, + &negotiation); + if (ret) + goto link_cfg_out; + + if (hw->mac.ops.setup_link) + ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up); +link_cfg_out: + return ret; +} + +static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 gpie = 0; + + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | + IXGBE_GPIE_OCD; + gpie |= IXGBE_GPIE_EIAME; + /* + * use EIAM to auto-mask when MSI-X interrupt is asserted + * this saves a register write for every interrupt + */ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + default: + IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); + IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); + break; + } + } else { + /* legacy interrupts, use EIAM to auto-mask when reading EICR, + * specifically only auto mask tx and rx interrupts */ + IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); + } + + /* XXX: to interrupt immediately for EICS writes, enable this */ + /* gpie |= IXGBE_GPIE_EIMEN; */ + + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + gpie &= ~IXGBE_GPIE_VTMODE_MASK; + gpie |= IXGBE_GPIE_VTMODE_64; + } + + /* Enable fan failure interrupt */ + if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) + gpie |= IXGBE_SDP1_GPIEN; + + if (hw->mac.type == ixgbe_mac_82599EB) { + gpie |= IXGBE_SDP1_GPIEN; + gpie |= IXGBE_SDP2_GPIEN; + } + + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); +} + +static int ixgbe_up_complete(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int err; + u32 ctrl_ext; + + ixgbe_get_hw_control(adapter); + ixgbe_setup_gpie(adapter); + + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) + ixgbe_configure_msix(adapter); + else + ixgbe_configure_msi_and_legacy(adapter); + + /* enable the optics for both mult-speed fiber and 82599 SFP+ fiber */ + if (hw->mac.ops.enable_tx_laser && + ((hw->phy.multispeed_fiber) || + ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) && + (hw->mac.type == ixgbe_mac_82599EB)))) + hw->mac.ops.enable_tx_laser(hw); + + clear_bit(__IXGBE_DOWN, &adapter->state); + ixgbe_napi_enable_all(adapter); + + if (ixgbe_is_sfp(hw)) { + ixgbe_sfp_link_config(adapter); + } else { + err = ixgbe_non_sfp_link_config(hw); + if (err) + e_err(probe, "link_config FAILED %d\n", err); + } + + /* clear any pending interrupts, may auto mask */ + IXGBE_READ_REG(hw, IXGBE_EICR); + ixgbe_irq_enable(adapter, true, true); + + /* + * If this adapter has a fan, check to see if we had a failure + * before we enabled the interrupt. + */ + if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { + u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + if (esdp & IXGBE_ESDP_SDP1) + e_crit(drv, "Fan has stopped, replace the adapter\n"); + } + + /* enable transmits */ + netif_tx_start_all_queues(adapter->netdev); + + /* bring the link up in the watchdog, this could race with our first + * link up interrupt but shouldn't be a problem */ + adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + mod_timer(&adapter->service_timer, jiffies); + + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; + IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); + + return 0; +} + +void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) +{ + WARN_ON(in_interrupt()); + /* put off any impending NetWatchDogTimeout */ + adapter->netdev->trans_start = jiffies; + + while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + ixgbe_down(adapter); + /* + * If SR-IOV enabled then wait a bit before bringing the adapter + * back up to give the VFs time to respond to the reset. The + * two second wait is based upon the watchdog timer cycle in + * the VF driver. + */ + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + msleep(2000); + ixgbe_up(adapter); + clear_bit(__IXGBE_RESETTING, &adapter->state); +} + +int ixgbe_up(struct ixgbe_adapter *adapter) +{ + /* hardware has been reset, we need to reload some things */ + ixgbe_configure(adapter); + + return ixgbe_up_complete(adapter); +} + +void ixgbe_reset(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int err; + + /* lock SFP init bit to prevent race conditions with the watchdog */ + while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) + usleep_range(1000, 2000); + + /* clear all SFP and link config related flags while holding SFP_INIT */ + adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP | + IXGBE_FLAG2_SFP_NEEDS_RESET); + adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; + + err = hw->mac.ops.init_hw(hw); + switch (err) { + case 0: + case IXGBE_ERR_SFP_NOT_PRESENT: + case IXGBE_ERR_SFP_NOT_SUPPORTED: + break; + case IXGBE_ERR_MASTER_REQUESTS_PENDING: + e_dev_err("master disable timed out\n"); + break; + case IXGBE_ERR_EEPROM_VERSION: + /* We are running on a pre-production device, log a warning */ + e_dev_warn("This device is a pre-production adapter/LOM. " + "Please be aware there may be issuesassociated with " + "your hardware. If you are experiencing problems " + "please contact your Intel or hardware " + "representative who provided you with this " + "hardware.\n"); + break; + default: + e_dev_err("Hardware Error: %d\n", err); + } + + clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); + + /* reprogram the RAR[0] in case user changed it. */ + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs, + IXGBE_RAH_AV); +} + +/** + * ixgbe_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + unsigned long size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!rx_ring->rx_buffer_info) + return; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + struct ixgbe_rx_buffer *rx_buffer_info; + + rx_buffer_info = &rx_ring->rx_buffer_info[i]; + if (rx_buffer_info->dma) { + dma_unmap_single(rx_ring->dev, rx_buffer_info->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_buffer_info->dma = 0; + } + if (rx_buffer_info->skb) { + struct sk_buff *skb = rx_buffer_info->skb; + rx_buffer_info->skb = NULL; + do { + struct sk_buff *this = skb; + if (IXGBE_RSC_CB(this)->delay_unmap) { + dma_unmap_single(dev, + IXGBE_RSC_CB(this)->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + IXGBE_RSC_CB(this)->dma = 0; + IXGBE_RSC_CB(skb)->delay_unmap = false; + } + skb = skb->prev; + dev_kfree_skb(this); + } while (skb); + } + if (!rx_buffer_info->page) + continue; + if (rx_buffer_info->page_dma) { + dma_unmap_page(dev, rx_buffer_info->page_dma, + PAGE_SIZE / 2, DMA_FROM_DEVICE); + rx_buffer_info->page_dma = 0; + } + put_page(rx_buffer_info->page); + rx_buffer_info->page = NULL; + rx_buffer_info->page_offset = 0; + } + + size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; + memset(rx_ring->rx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +/** + * ixgbe_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) +{ + struct ixgbe_tx_buffer *tx_buffer_info; + unsigned long size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!tx_ring->tx_buffer_info) + return; + + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) { + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); + } + + size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; + memset(tx_ring->tx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; +} + +/** + * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbe_clean_rx_ring(adapter->rx_ring[i]); +} + +/** + * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + ixgbe_clean_tx_ring(adapter->tx_ring[i]); +} + +static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter) +{ + struct hlist_node *node, *node2; + struct ixgbe_fdir_filter *filter; + + spin_lock(&adapter->fdir_perfect_lock); + + hlist_for_each_entry_safe(filter, node, node2, + &adapter->fdir_filter_list, fdir_node) { + hlist_del(&filter->fdir_node); + kfree(filter); + } + adapter->fdir_filter_count = 0; + + spin_unlock(&adapter->fdir_perfect_lock); +} + +void ixgbe_down(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + u32 rxctrl; + int i; + int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + /* signal that we are down to the interrupt handler */ + set_bit(__IXGBE_DOWN, &adapter->state); + + /* disable receives */ + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); + + /* disable all enabled rx queues */ + for (i = 0; i < adapter->num_rx_queues; i++) + /* this call also flushes the previous write */ + ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]); + + usleep_range(10000, 20000); + + netif_tx_stop_all_queues(netdev); + + /* call carrier off first to avoid false dev_watchdog timeouts */ + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + ixgbe_irq_disable(adapter); + + ixgbe_napi_disable_all(adapter); + + adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT | + IXGBE_FLAG2_RESET_REQUESTED); + adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; + + del_timer_sync(&adapter->service_timer); + + /* disable receive for all VFs and wait one second */ + if (adapter->num_vfs) { + /* ping all the active vfs to let them know we are going down */ + ixgbe_ping_all_vfs(adapter); + + /* Disable all VFTE/VFRE TX/RX */ + ixgbe_disable_tx_rx(adapter); + + /* Mark all the VFs as inactive */ + for (i = 0 ; i < adapter->num_vfs; i++) + adapter->vfinfo[i].clear_to_send = 0; + } + + /* Cleanup the affinity_hint CPU mask memory and callback */ + for (i = 0; i < num_q_vectors; i++) { + struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(adapter->msix_entries[i]. vector, NULL); + /* release the CPU mask memory */ + free_cpumask_var(q_vector->affinity_mask); + } + + /* disable transmits in the hardware now that interrupts are off */ + for (i = 0; i < adapter->num_tx_queues; i++) { + u8 reg_idx = adapter->tx_ring[i]->reg_idx; + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); + } + + /* Disable the Tx DMA engine on 82599 and X540 */ + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, + (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & + ~IXGBE_DMATXCTL_TE)); + break; + default: + break; + } + + if (!pci_channel_offline(adapter->pdev)) + ixgbe_reset(adapter); + + /* power down the optics for multispeed fiber and 82599 SFP+ fiber */ + if (hw->mac.ops.disable_tx_laser && + ((hw->phy.multispeed_fiber) || + ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) && + (hw->mac.type == ixgbe_mac_82599EB)))) + hw->mac.ops.disable_tx_laser(hw); + + ixgbe_clean_all_tx_rings(adapter); + ixgbe_clean_all_rx_rings(adapter); + +#ifdef CONFIG_IXGBE_DCA + /* since we reset the hardware DCA settings were cleared */ + ixgbe_setup_dca(adapter); +#endif +} + +/** + * ixgbe_poll - NAPI Rx polling callback + * @napi: structure for representing this polling device + * @budget: how many packets driver is allowed to clean + * + * This function is used for legacy and MSI, NAPI mode + **/ +static int ixgbe_poll(struct napi_struct *napi, int budget) +{ + struct ixgbe_q_vector *q_vector = + container_of(napi, struct ixgbe_q_vector, napi); + struct ixgbe_adapter *adapter = q_vector->adapter; + int tx_clean_complete, work_done = 0; + +#ifdef CONFIG_IXGBE_DCA + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + ixgbe_update_dca(q_vector); +#endif + + tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]); + ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0], &work_done, budget); + + if (!tx_clean_complete) + work_done = budget; + + /* If budget not fully consumed, exit the polling mode */ + if (work_done < budget) { + napi_complete(napi); + if (adapter->rx_itr_setting & 1) + ixgbe_set_itr(q_vector); + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE); + } + return work_done; +} + +/** + * ixgbe_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void ixgbe_tx_timeout(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + ixgbe_tx_timeout_reset(adapter); +} + +/** + * ixgbe_set_rss_queues: Allocate queues for RSS + * @adapter: board private structure to initialize + * + * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try + * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. + * + **/ +static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) +{ + bool ret = false; + struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS]; + + if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { + f->mask = 0xF; + adapter->num_rx_queues = f->indices; + adapter->num_tx_queues = f->indices; + ret = true; + } else { + ret = false; + } + + return ret; +} + +/** + * ixgbe_set_fdir_queues: Allocate queues for Flow Director + * @adapter: board private structure to initialize + * + * Flow Director is an advanced Rx filter, attempting to get Rx flows back + * to the original CPU that initiated the Tx session. This runs in addition + * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the + * Rx load across CPUs using RSS. + * + **/ +static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) +{ + bool ret = false; + struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR]; + + f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices); + f_fdir->mask = 0; + + /* Flow Director must have RSS enabled */ + if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && + (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) { + adapter->num_tx_queues = f_fdir->indices; + adapter->num_rx_queues = f_fdir->indices; + ret = true; + } else { + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + } + return ret; +} + +#ifdef IXGBE_FCOE +/** + * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE) + * @adapter: board private structure to initialize + * + * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges. + * The ring feature mask is not used as a mask for FCoE, as it can take any 8 + * rx queues out of the max number of rx queues, instead, it is used as the + * index of the first rx queue used by FCoE. + * + **/ +static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) +{ + struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; + + if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) + return false; + + f->indices = min((int)num_online_cpus(), f->indices); + + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + + if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { + e_info(probe, "FCoE enabled with RSS\n"); + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) + ixgbe_set_fdir_queues(adapter); + else + ixgbe_set_rss_queues(adapter); + } + + /* adding FCoE rx rings to the end */ + f->mask = adapter->num_rx_queues; + adapter->num_rx_queues += f->indices; + adapter->num_tx_queues += f->indices; + + return true; +} +#endif /* IXGBE_FCOE */ + +/* Artificial max queue cap per traffic class in DCB mode */ +#define DCB_QUEUE_CAP 8 + +#ifdef CONFIG_IXGBE_DCB +static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) +{ + int per_tc_q, q, i, offset = 0; + struct net_device *dev = adapter->netdev; + int tcs = netdev_get_num_tc(dev); + + if (!tcs) + return false; + + /* Map queue offset and counts onto allocated tx queues */ + per_tc_q = min(dev->num_tx_queues / tcs, (unsigned int)DCB_QUEUE_CAP); + q = min((int)num_online_cpus(), per_tc_q); + + for (i = 0; i < tcs; i++) { + netdev_set_prio_tc_map(dev, i, i); + netdev_set_tc_queue(dev, i, q, offset); + offset += q; + } + + adapter->num_tx_queues = q * tcs; + adapter->num_rx_queues = q * tcs; + +#ifdef IXGBE_FCOE + /* FCoE enabled queues require special configuration indexed + * by feature specific indices and mask. Here we map FCoE + * indices onto the DCB queue pairs allowing FCoE to own + * configuration later. + */ + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { + int tc; + struct ixgbe_ring_feature *f = + &adapter->ring_feature[RING_F_FCOE]; + + tc = netdev_get_prio_tc_map(dev, adapter->fcoe.up); + f->indices = dev->tc_to_txq[tc].count; + f->mask = dev->tc_to_txq[tc].offset; + } +#endif + + return true; +} +#endif + +/** + * ixgbe_set_sriov_queues: Allocate queues for IOV use + * @adapter: board private structure to initialize + * + * IOV doesn't actually use anything, so just NAK the + * request for now and let the other queue routines + * figure out what to do. + */ +static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) +{ + return false; +} + +/* + * ixgbe_set_num_queues: Allocate queues for device, feature dependent + * @adapter: board private structure to initialize + * + * This is the top level queue allocation routine. The order here is very + * important, starting with the "most" number of features turned on at once, + * and ending with the smallest set of features. This way large combinations + * can be allocated if they're turned on, and smaller combinations are the + * fallthrough conditions. + * + **/ +static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter) +{ + /* Start with base case */ + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + adapter->num_rx_pools = adapter->num_rx_queues; + adapter->num_rx_queues_per_pool = 1; + + if (ixgbe_set_sriov_queues(adapter)) + goto done; + +#ifdef CONFIG_IXGBE_DCB + if (ixgbe_set_dcb_queues(adapter)) + goto done; + +#endif +#ifdef IXGBE_FCOE + if (ixgbe_set_fcoe_queues(adapter)) + goto done; + +#endif /* IXGBE_FCOE */ + if (ixgbe_set_fdir_queues(adapter)) + goto done; + + if (ixgbe_set_rss_queues(adapter)) + goto done; + + /* fallback to base case */ + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + +done: + /* Notify the stack of the (possibly) reduced queue counts. */ + netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); + return netif_set_real_num_rx_queues(adapter->netdev, + adapter->num_rx_queues); +} + +static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, + int vectors) +{ + int err, vector_threshold; + + /* We'll want at least 3 (vector_threshold): + * 1) TxQ[0] Cleanup + * 2) RxQ[0] Cleanup + * 3) Other (Link Status Change, etc.) + * 4) TCP Timer (optional) + */ + vector_threshold = MIN_MSIX_COUNT; + + /* The more we get, the more we will assign to Tx/Rx Cleanup + * for the separate queues...where Rx Cleanup >= Tx Cleanup. + * Right now, we simply care about how many we'll get; we'll + * set them up later while requesting irq's. + */ + while (vectors >= vector_threshold) { + err = pci_enable_msix(adapter->pdev, adapter->msix_entries, + vectors); + if (!err) /* Success in acquiring all requested vectors. */ + break; + else if (err < 0) + vectors = 0; /* Nasty failure, quit now */ + else /* err == number of vectors we should try again with */ + vectors = err; + } + + if (vectors < vector_threshold) { + /* Can't allocate enough MSI-X interrupts? Oh well. + * This just means we'll go with either a single MSI + * vector or fall back to legacy interrupts. + */ + netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, + "Unable to allocate MSI-X interrupts\n"); + adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else { + adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ + /* + * Adjust for only the vectors we'll use, which is minimum + * of max_msix_q_vectors + NON_Q_VECTORS, or the number of + * vectors we were allocated. + */ + adapter->num_msix_vectors = min(vectors, + adapter->max_msix_q_vectors + NON_Q_VECTORS); + } +} + +/** + * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for RSS to the assigned rings. + * + **/ +static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) +{ + int i; + + if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) + return false; + + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = i; + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->reg_idx = i; + + return true; +} + +#ifdef CONFIG_IXGBE_DCB + +/* ixgbe_get_first_reg_idx - Return first register index associated with ring */ +static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, + unsigned int *tx, unsigned int *rx) +{ + struct net_device *dev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + u8 num_tcs = netdev_get_num_tc(dev); + + *tx = 0; + *rx = 0; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + *tx = tc << 2; + *rx = tc << 3; + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + if (num_tcs == 8) { + if (tc < 3) { + *tx = tc << 5; + *rx = tc << 4; + } else if (tc < 5) { + *tx = ((tc + 2) << 4); + *rx = tc << 4; + } else if (tc < num_tcs) { + *tx = ((tc + 8) << 3); + *rx = tc << 4; + } + } else if (num_tcs == 4) { + *rx = tc << 5; + switch (tc) { + case 0: + *tx = 0; + break; + case 1: + *tx = 64; + break; + case 2: + *tx = 96; + break; + case 3: + *tx = 112; + break; + default: + break; + } + } + break; + default: + break; + } +} + +/** + * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for DCB to the assigned rings. + * + **/ +static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + int i, j, k; + u8 num_tcs = netdev_get_num_tc(dev); + + if (!num_tcs) + return false; + + for (i = 0, k = 0; i < num_tcs; i++) { + unsigned int tx_s, rx_s; + u16 count = dev->tc_to_txq[i].count; + + ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s); + for (j = 0; j < count; j++, k++) { + adapter->tx_ring[k]->reg_idx = tx_s + j; + adapter->rx_ring[k]->reg_idx = rx_s + j; + adapter->tx_ring[k]->dcb_tc = i; + adapter->rx_ring[k]->dcb_tc = i; + } + } + + return true; +} +#endif + +/** + * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for Flow Director to the assigned rings. + * + **/ +static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) +{ + int i; + bool ret = false; + + if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && + (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) { + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = i; + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->reg_idx = i; + ret = true; + } + + return ret; +} + +#ifdef IXGBE_FCOE +/** + * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for FCoE mode to the assigned rings. + * + */ +static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) +{ + struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; + int i; + u8 fcoe_rx_i = 0, fcoe_tx_i = 0; + + if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) + return false; + + if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) + ixgbe_cache_ring_fdir(adapter); + else + ixgbe_cache_ring_rss(adapter); + + fcoe_rx_i = f->mask; + fcoe_tx_i = f->mask; + } + for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { + adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i; + adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i; + } + return true; +} + +#endif /* IXGBE_FCOE */ +/** + * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov + * @adapter: board private structure to initialize + * + * SR-IOV doesn't use any descriptor rings but changes the default if + * no other mapping is used. + * + */ +static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) +{ + adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2; + adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2; + if (adapter->num_vfs) + return true; + else + return false; +} + +/** + * ixgbe_cache_ring_register - Descriptor ring to register mapping + * @adapter: board private structure to initialize + * + * Once we know the feature-set enabled for the device, we'll cache + * the register offset the descriptor ring is assigned to. + * + * Note, the order the various feature calls is important. It must start with + * the "most" features enabled at the same time, then trickle down to the + * least amount of features turned on at once. + **/ +static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) +{ + /* start with default case */ + adapter->rx_ring[0]->reg_idx = 0; + adapter->tx_ring[0]->reg_idx = 0; + + if (ixgbe_cache_ring_sriov(adapter)) + return; + +#ifdef CONFIG_IXGBE_DCB + if (ixgbe_cache_ring_dcb(adapter)) + return; +#endif + +#ifdef IXGBE_FCOE + if (ixgbe_cache_ring_fcoe(adapter)) + return; +#endif /* IXGBE_FCOE */ + + if (ixgbe_cache_ring_fdir(adapter)) + return; + + if (ixgbe_cache_ring_rss(adapter)) + return; +} + +/** + * ixgbe_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + * We allocate one ring per queue at run-time since we don't know the + * number of queues at compile-time. The polling_netdev array is + * intended for Multiqueue, but should work fine with a single queue. + **/ +static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) +{ + int rx = 0, tx = 0, nid = adapter->node; + + if (nid < 0 || !node_online(nid)) + nid = first_online_node; + + for (; tx < adapter->num_tx_queues; tx++) { + struct ixgbe_ring *ring; + + ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid); + if (!ring) + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) + goto err_allocation; + ring->count = adapter->tx_ring_count; + ring->queue_index = tx; + ring->numa_node = nid; + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + adapter->tx_ring[tx] = ring; + } + + for (; rx < adapter->num_rx_queues; rx++) { + struct ixgbe_ring *ring; + + ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid); + if (!ring) + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) + goto err_allocation; + ring->count = adapter->rx_ring_count; + ring->queue_index = rx; + ring->numa_node = nid; + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + adapter->rx_ring[rx] = ring; + } + + ixgbe_cache_ring_register(adapter); + + return 0; + +err_allocation: + while (tx) + kfree(adapter->tx_ring[--tx]); + + while (rx) + kfree(adapter->rx_ring[--rx]); + return -ENOMEM; +} + +/** + * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported + * @adapter: board private structure to initialize + * + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. + **/ +static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int err = 0; + int vector, v_budget; + + /* + * It's easy to be greedy for MSI-X vectors, but it really + * doesn't do us much good if we have a lot more vectors + * than CPU's. So let's be conservative and only ask for + * (roughly) the same number of vectors as there are CPU's. + */ + v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, + (int)num_online_cpus()) + NON_Q_VECTORS; + + /* + * At the same time, hardware can only support a maximum of + * hw.mac->max_msix_vectors vectors. With features + * such as RSS and VMDq, we can easily surpass the number of Rx and Tx + * descriptor queues supported by our device. Thus, we cap it off in + * those rare cases where the cpu count also exceeds our vector limit. + */ + v_budget = min(v_budget, (int)hw->mac.max_msix_vectors); + + /* A failure in MSI-X entry allocation isn't fatal, but it does + * mean we disable MSI-X capabilities of the adapter. */ + adapter->msix_entries = kcalloc(v_budget, + sizeof(struct msix_entry), GFP_KERNEL); + if (adapter->msix_entries) { + for (vector = 0; vector < v_budget; vector++) + adapter->msix_entries[vector].entry = vector; + + ixgbe_acquire_msix_vectors(adapter, v_budget); + + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) + goto out; + } + + adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; + adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { + e_err(probe, + "ATR is not supported while multiple " + "queues are disabled. Disabling Flow Director\n"); + } + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + adapter->atr_sample_rate = 0; + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + ixgbe_disable_sriov(adapter); + + err = ixgbe_set_num_queues(adapter); + if (err) + return err; + + err = pci_enable_msi(adapter->pdev); + if (!err) { + adapter->flags |= IXGBE_FLAG_MSI_ENABLED; + } else { + netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, + "Unable to allocate MSI interrupt, " + "falling back to legacy. Error: %d\n", err); + /* reset err */ + err = 0; + } + +out: + return err; +} + +/** + * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) +{ + int q_idx, num_q_vectors; + struct ixgbe_q_vector *q_vector; + int (*poll)(struct napi_struct *, int); + + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + poll = &ixgbe_clean_rxtx_many; + } else { + num_q_vectors = 1; + poll = &ixgbe_poll; + } + + for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { + q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector), + GFP_KERNEL, adapter->node); + if (!q_vector) + q_vector = kzalloc(sizeof(struct ixgbe_q_vector), + GFP_KERNEL); + if (!q_vector) + goto err_out; + q_vector->adapter = adapter; + if (q_vector->tx.count && !q_vector->rx.count) + q_vector->eitr = adapter->tx_eitr_param; + else + q_vector->eitr = adapter->rx_eitr_param; + q_vector->v_idx = q_idx; + netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64); + adapter->q_vector[q_idx] = q_vector; + } + + return 0; + +err_out: + while (q_idx) { + q_idx--; + q_vector = adapter->q_vector[q_idx]; + netif_napi_del(&q_vector->napi); + kfree(q_vector); + adapter->q_vector[q_idx] = NULL; + } + return -ENOMEM; +} + +/** + * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) +{ + int q_idx, num_q_vectors; + + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) + num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + else + num_q_vectors = 1; + + for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { + struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx]; + adapter->q_vector[q_idx] = NULL; + netif_napi_del(&q_vector->napi); + kfree(q_vector); + } +} + +static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) +{ + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { + adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; + pci_disable_msi(adapter->pdev); + } +} + +/** + * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme + * @adapter: board private structure to initialize + * + * We determine which interrupt scheme to use based on... + * - Kernel support (MSI, MSI-X) + * - which can be user-defined (via MODULE_PARAM) + * - Hardware queue count (num_*_queues) + * - defined by miscellaneous hardware support/features (RSS, etc.) + **/ +int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) +{ + int err; + + /* Number of supported queues */ + err = ixgbe_set_num_queues(adapter); + if (err) + return err; + + err = ixgbe_set_interrupt_capability(adapter); + if (err) { + e_dev_err("Unable to setup interrupt capabilities\n"); + goto err_set_interrupt; + } + + err = ixgbe_alloc_q_vectors(adapter); + if (err) { + e_dev_err("Unable to allocate memory for queue vectors\n"); + goto err_alloc_q_vectors; + } + + err = ixgbe_alloc_queues(adapter); + if (err) { + e_dev_err("Unable to allocate memory for queues\n"); + goto err_alloc_queues; + } + + e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", + (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", + adapter->num_rx_queues, adapter->num_tx_queues); + + set_bit(__IXGBE_DOWN, &adapter->state); + + return 0; + +err_alloc_queues: + ixgbe_free_q_vectors(adapter); +err_alloc_q_vectors: + ixgbe_reset_interrupt_capability(adapter); +err_set_interrupt: + return err; +} + +/** + * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings + * @adapter: board private structure to clear interrupt scheme on + * + * We go through and clear interrupt specific resources and reset the structure + * to pre-load conditions + **/ +void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + kfree(adapter->tx_ring[i]); + adapter->tx_ring[i] = NULL; + } + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbe_ring *ring = adapter->rx_ring[i]; + + /* ixgbe_get_stats64() might access this ring, we must wait + * a grace period before freeing it. + */ + kfree_rcu(ring, rcu); + adapter->rx_ring[i] = NULL; + } + + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + + ixgbe_free_q_vectors(adapter); + ixgbe_reset_interrupt_capability(adapter); +} + +/** + * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) + * @adapter: board private structure to initialize + * + * ixgbe_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + struct net_device *dev = adapter->netdev; + unsigned int rss; +#ifdef CONFIG_IXGBE_DCB + int j; + struct tc_configuration *tc; +#endif + int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN; + + /* PCI config space info */ + + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + /* Set capability flags */ + rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus()); + adapter->ring_feature[RING_F_RSS].indices = rss; + adapter->flags |= IXGBE_FLAG_RSS_ENABLED; + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + if (hw->device_id == IXGBE_DEV_ID_82598AT) + adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; + adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; + adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; + adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; + if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; + /* Flow Director hash filters enabled */ + adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; + adapter->atr_sample_rate = 20; + adapter->ring_feature[RING_F_FDIR].indices = + IXGBE_MAX_FDIR_INDICES; + adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; +#ifdef IXGBE_FCOE + adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; + adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; + adapter->ring_feature[RING_F_FCOE].indices = 0; +#ifdef CONFIG_IXGBE_DCB + /* Default traffic class to use for FCoE */ + adapter->fcoe.up = IXGBE_FCOE_DEFTC; +#endif +#endif /* IXGBE_FCOE */ + break; + default: + break; + } + + /* n-tuple support exists, always init our spinlock */ + spin_lock_init(&adapter->fdir_perfect_lock); + +#ifdef CONFIG_IXGBE_DCB + /* Configure DCB traffic classes */ + for (j = 0; j < MAX_TRAFFIC_CLASS; j++) { + tc = &adapter->dcb_cfg.tc_config[j]; + tc->path[DCB_TX_CONFIG].bwg_id = 0; + tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1); + tc->path[DCB_RX_CONFIG].bwg_id = 0; + tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1); + tc->dcb_pfc = pfc_disabled; + } + adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; + adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; + adapter->dcb_cfg.pfc_mode_enable = false; + adapter->dcb_set_bitmap = 0x00; + adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; + ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, + MAX_TRAFFIC_CLASS); + +#endif + + /* default flow control settings */ + hw->fc.requested_mode = ixgbe_fc_full; + hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */ +#ifdef CONFIG_DCB + adapter->last_lfc_mode = hw->fc.current_mode; +#endif + hw->fc.high_water = FC_HIGH_WATER(max_frame); + hw->fc.low_water = FC_LOW_WATER(max_frame); + hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; + hw->fc.send_xon = true; + hw->fc.disable_fc_autoneg = false; + + /* enable itr by default in dynamic mode */ + adapter->rx_itr_setting = 1; + adapter->rx_eitr_param = 20000; + adapter->tx_itr_setting = 1; + adapter->tx_eitr_param = 10000; + + /* set defaults for eitr in MegaBytes */ + adapter->eitr_low = 10; + adapter->eitr_high = 20; + + /* set default ring sizes */ + adapter->tx_ring_count = IXGBE_DEFAULT_TXD; + adapter->rx_ring_count = IXGBE_DEFAULT_RXD; + + /* set default work limits */ + adapter->tx_work_limit = adapter->tx_ring_count; + + /* initialize eeprom parameters */ + if (ixgbe_init_eeprom_params_generic(hw)) { + e_dev_err("EEPROM initialization failed\n"); + return -EIO; + } + + /* enable rx csum by default */ + adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; + + /* get assigned NUMA node */ + adapter->node = dev_to_node(&pdev->dev); + + set_bit(__IXGBE_DOWN, &adapter->state); + + return 0; +} + +/** + * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int size; + + size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; + tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node); + if (!tx_ring->tx_buffer_info) + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) + goto err; + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + return 0; + +err: + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); + return -ENOMEM; +} + +/** + * ixgbe_setup_all_tx_resources - allocate all queues Tx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = ixgbe_setup_tx_resources(adapter->tx_ring[i]); + if (!err) + continue; + e_err(probe, "Allocation for Tx Queue %u failed\n", i); + break; + } + + return err; +} + +/** + * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + int size; + + size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; + rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node); + if (!rx_ring->rx_buffer_info) + rx_ring->rx_buffer_info = vzalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + + if (!rx_ring->desc) + goto err; + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + return 0; +err: + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * ixgbe_setup_all_rx_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = ixgbe_setup_rx_resources(adapter->rx_ring[i]); + if (!err) + continue; + e_err(probe, "Allocation for Rx Queue %u failed\n", i); + break; + } + + return err; +} + +/** + * ixgbe_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring) +{ + ixgbe_clean_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (adapter->tx_ring[i]->desc) + ixgbe_free_tx_resources(adapter->tx_ring[i]); +} + +/** + * ixgbe_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring) +{ + ixgbe_clean_rx_ring(rx_ring); + + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->rx_ring[i]->desc) + ixgbe_free_rx_resources(adapter->rx_ring[i]); +} + +/** + * ixgbe_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + + /* MTU < 68 is an error and causes problems on some kernels */ + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED && + hw->mac.type != ixgbe_mac_X540) { + if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE)) + return -EINVAL; + } else { + if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) + return -EINVAL; + } + + e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + /* must set new MTU before calling down or up */ + netdev->mtu = new_mtu; + + hw->fc.high_water = FC_HIGH_WATER(max_frame); + hw->fc.low_water = FC_LOW_WATER(max_frame); + + if (netif_running(netdev)) + ixgbe_reinit_locked(adapter); + + return 0; +} + +/** + * ixgbe_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int ixgbe_open(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int err; + + /* disallow open during test */ + if (test_bit(__IXGBE_TESTING, &adapter->state)) + return -EBUSY; + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = ixgbe_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = ixgbe_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + ixgbe_configure(adapter); + + err = ixgbe_request_irq(adapter); + if (err) + goto err_req_irq; + + err = ixgbe_up_complete(adapter); + if (err) + goto err_up; + + netif_tx_start_all_queues(netdev); + + return 0; + +err_up: + ixgbe_release_hw_control(adapter); + ixgbe_free_irq(adapter); +err_req_irq: +err_setup_rx: + ixgbe_free_all_rx_resources(adapter); +err_setup_tx: + ixgbe_free_all_tx_resources(adapter); + ixgbe_reset(adapter); + + return err; +} + +/** + * ixgbe_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int ixgbe_close(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + ixgbe_down(adapter); + ixgbe_free_irq(adapter); + + ixgbe_fdir_filter_exit(adapter); + + ixgbe_free_all_tx_resources(adapter); + ixgbe_free_all_rx_resources(adapter); + + ixgbe_release_hw_control(adapter); + + return 0; +} + +#ifdef CONFIG_PM +static int ixgbe_resume(struct pci_dev *pdev) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + /* + * pci_restore_state clears dev->state_saved so call + * pci_save_state to restore it. + */ + pci_save_state(pdev); + + err = pci_enable_device_mem(pdev); + if (err) { + e_dev_err("Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + pci_wake_from_d3(pdev, false); + + err = ixgbe_init_interrupt_scheme(adapter); + if (err) { + e_dev_err("Cannot initialize interrupts for device\n"); + return err; + } + + ixgbe_reset(adapter); + + IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); + + if (netif_running(netdev)) { + err = ixgbe_open(netdev); + if (err) + return err; + } + + netif_device_attach(netdev); + + return 0; +} +#endif /* CONFIG_PM */ + +static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + u32 ctrl, fctrl; + u32 wufc = adapter->wol; +#ifdef CONFIG_PM + int retval = 0; +#endif + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + ixgbe_down(adapter); + ixgbe_free_irq(adapter); + ixgbe_free_all_tx_resources(adapter); + ixgbe_free_all_rx_resources(adapter); + } + + ixgbe_clear_interrupt_scheme(adapter); +#ifdef CONFIG_DCB + kfree(adapter->ixgbe_ieee_pfc); + kfree(adapter->ixgbe_ieee_ets); +#endif + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; + +#endif + if (wufc) { + ixgbe_set_rx_mode(netdev); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & IXGBE_WUFC_MC) { + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl |= IXGBE_FCTRL_MPE; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + } + + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + ctrl |= IXGBE_CTRL_GIO_DIS; + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + + IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc); + } else { + IXGBE_WRITE_REG(hw, IXGBE_WUC, 0); + IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); + } + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + pci_wake_from_d3(pdev, false); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + pci_wake_from_d3(pdev, !!wufc); + break; + default: + break; + } + + *enable_wake = !!wufc; + + ixgbe_release_hw_control(adapter); + + pci_disable_device(pdev); + + return 0; +} + +#ifdef CONFIG_PM +static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) +{ + int retval; + bool wake; + + retval = __ixgbe_shutdown(pdev, &wake); + if (retval) + return retval; + + if (wake) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } + + return 0; +} +#endif /* CONFIG_PM */ + +static void ixgbe_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __ixgbe_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +/** + * ixgbe_update_stats - Update the board statistics counters. + * @adapter: board private structure + **/ +void ixgbe_update_stats(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_hw_stats *hwstats = &adapter->stats; + u64 total_mpc = 0; + u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; + u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0; + u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; + u64 bytes = 0, packets = 0; + + if (test_bit(__IXGBE_DOWN, &adapter->state) || + test_bit(__IXGBE_RESETTING, &adapter->state)) + return; + + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { + u64 rsc_count = 0; + u64 rsc_flush = 0; + for (i = 0; i < 16; i++) + adapter->hw_rx_no_dma_resources += + IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); + for (i = 0; i < adapter->num_rx_queues; i++) { + rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; + rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; + } + adapter->rsc_total_count = rsc_count; + adapter->rsc_total_flush = rsc_flush; + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbe_ring *rx_ring = adapter->rx_ring[i]; + non_eop_descs += rx_ring->rx_stats.non_eop_descs; + alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; + alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; + bytes += rx_ring->stats.bytes; + packets += rx_ring->stats.packets; + } + adapter->non_eop_descs = non_eop_descs; + adapter->alloc_rx_page_failed = alloc_rx_page_failed; + adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; + netdev->stats.rx_bytes = bytes; + netdev->stats.rx_packets = packets; + + bytes = 0; + packets = 0; + /* gather some stats to the adapter struct that are per queue */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; + restart_queue += tx_ring->tx_stats.restart_queue; + tx_busy += tx_ring->tx_stats.tx_busy; + bytes += tx_ring->stats.bytes; + packets += tx_ring->stats.packets; + } + adapter->restart_queue = restart_queue; + adapter->tx_busy = tx_busy; + netdev->stats.tx_bytes = bytes; + netdev->stats.tx_packets = packets; + + hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); + for (i = 0; i < 8; i++) { + /* for packet buffers not used, the register should read 0 */ + mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i)); + missed_rx += mpc; + hwstats->mpc[i] += mpc; + total_mpc += hwstats->mpc[i]; + if (hw->mac.type == ixgbe_mac_82598EB) + hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); + hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); + hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); + hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); + hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + hwstats->pxonrxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + hwstats->pxonrxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); + break; + default: + break; + } + hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); + hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); + } + hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); + /* work around hardware counting issue */ + hwstats->gprc -= missed_rx; + + ixgbe_update_xoff_received(adapter); + + /* 82598 hardware only has a 32 bit counter in the high register */ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); + hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); + hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); + hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); + break; + case ixgbe_mac_X540: + /* OS2BMC stats are X540 only*/ + hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC); + hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); + hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC); + hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC); + case ixgbe_mac_82599EB: + hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); + IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */ + hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); + IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */ + hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); + IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ + hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); + hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); + hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); +#ifdef IXGBE_FCOE + hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); + hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); + hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); + hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); + hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); + hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); +#endif /* IXGBE_FCOE */ + break; + default: + break; + } + bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); + hwstats->bprc += bprc; + hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); + if (hw->mac.type == ixgbe_mac_82598EB) + hwstats->mprc -= bprc; + hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); + hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); + hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); + hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); + hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); + hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); + hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); + hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); + lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); + hwstats->lxontxc += lxon; + lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); + hwstats->lxofftxc += lxoff; + hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); + hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); + hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); + /* + * 82598 errata - tx of flow control packets is included in tx counters + */ + xon_off_tot = lxon + lxoff; + hwstats->gptc -= xon_off_tot; + hwstats->mptc -= xon_off_tot; + hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN)); + hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); + hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); + hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); + hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); + hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); + hwstats->ptc64 -= xon_off_tot; + hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); + hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); + hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); + hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); + hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); + hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); + + /* Fill out the OS statistics structure */ + netdev->stats.multicast = hwstats->mprc; + + /* Rx Errors */ + netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec; + netdev->stats.rx_dropped = 0; + netdev->stats.rx_length_errors = hwstats->rlec; + netdev->stats.rx_crc_errors = hwstats->crcerrs; + netdev->stats.rx_missed_errors = total_mpc; +} + +/** + * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table + * @adapter - pointer to the device adapter structure + **/ +static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int i; + + if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) + return; + + adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT; + + /* if interface is down do nothing */ + if (test_bit(__IXGBE_DOWN, &adapter->state)) + return; + + /* do nothing if we are not using signature filters */ + if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) + return; + + adapter->fdir_overflow++; + + if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { + for (i = 0; i < adapter->num_tx_queues; i++) + set_bit(__IXGBE_TX_FDIR_INIT_DONE, + &(adapter->tx_ring[i]->state)); + /* re-enable flow director interrupts */ + IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR); + } else { + e_err(probe, "failed to finish FDIR re-initialization, " + "ignored adding FDIR ATR filters\n"); + } +} + +/** + * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts + * @adapter - pointer to the device adapter structure + * + * This function serves two purposes. First it strobes the interrupt lines + * in order to make certain interrupts are occuring. Secondly it sets the + * bits needed to check for TX hangs. As a result we should immediately + * determine if a hang has occured. + */ +static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u64 eics = 0; + int i; + + /* If we're down or resetting, just bail */ + if (test_bit(__IXGBE_DOWN, &adapter->state) || + test_bit(__IXGBE_RESETTING, &adapter->state)) + return; + + /* Force detection of hung controller */ + if (netif_carrier_ok(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + set_check_for_tx_hang(adapter->tx_ring[i]); + } + + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { + /* + * for legacy and MSI interrupts don't set any bits + * that are enabled for EIAM, because this operation + * would set *both* EIMS and EICS for any bit in EIAM + */ + IXGBE_WRITE_REG(hw, IXGBE_EICS, + (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); + } else { + /* get one bit for every active tx/rx interrupt vector */ + for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { + struct ixgbe_q_vector *qv = adapter->q_vector[i]; + if (qv->rx.count || qv->tx.count) + eics |= ((u64)1 << i); + } + } + + /* Cause software interrupt to ensure rings are cleaned */ + ixgbe_irq_rearm_queues(adapter, eics); + +} + +/** + * ixgbe_watchdog_update_link - update the link status + * @adapter - pointer to the device adapter structure + * @link_speed - pointer to a u32 to store the link_speed + **/ +static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 link_speed = adapter->link_speed; + bool link_up = adapter->link_up; + int i; + + if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) + return; + + if (hw->mac.ops.check_link) { + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + } else { + /* always assume link is up, if no check link function */ + link_speed = IXGBE_LINK_SPEED_10GB_FULL; + link_up = true; + } + if (link_up) { + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) + hw->mac.ops.fc_enable(hw, i); + } else { + hw->mac.ops.fc_enable(hw, 0); + } + } + + if (link_up || + time_after(jiffies, (adapter->link_check_timeout + + IXGBE_TRY_LINK_TIMEOUT))) { + adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; + IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC); + IXGBE_WRITE_FLUSH(hw); + } + + adapter->link_up = link_up; + adapter->link_speed = link_speed; +} + +/** + * ixgbe_watchdog_link_is_up - update netif_carrier status and + * print link up message + * @adapter - pointer to the device adapter structure + **/ +static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + u32 link_speed = adapter->link_speed; + bool flow_rx, flow_tx; + + /* only continue if link was previously down */ + if (netif_carrier_ok(netdev)) + return; + + adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: { + u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); + flow_rx = !!(frctl & IXGBE_FCTRL_RFCE); + flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); + } + break; + case ixgbe_mac_X540: + case ixgbe_mac_82599EB: { + u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); + u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); + flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE); + flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X); + } + break; + default: + flow_tx = false; + flow_rx = false; + break; + } + e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", + (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? + "10 Gbps" : + (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? + "1 Gbps" : + (link_speed == IXGBE_LINK_SPEED_100_FULL ? + "100 Mbps" : + "unknown speed"))), + ((flow_rx && flow_tx) ? "RX/TX" : + (flow_rx ? "RX" : + (flow_tx ? "TX" : "None")))); + + netif_carrier_on(netdev); + ixgbe_check_vf_rate_limit(adapter); +} + +/** + * ixgbe_watchdog_link_is_down - update netif_carrier status and + * print link down message + * @adapter - pointer to the adapter structure + **/ +static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter* adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + + adapter->link_up = false; + adapter->link_speed = 0; + + /* only continue if link was up previously */ + if (!netif_carrier_ok(netdev)) + return; + + /* poll for SFP+ cable when link is down */ + if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB) + adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; + + e_info(drv, "NIC Link is Down\n"); + netif_carrier_off(netdev); +} + +/** + * ixgbe_watchdog_flush_tx - flush queues on link down + * @adapter - pointer to the device adapter structure + **/ +static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) +{ + int i; + int some_tx_pending = 0; + + if (!netif_carrier_ok(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; + if (tx_ring->next_to_use != tx_ring->next_to_clean) { + some_tx_pending = 1; + break; + } + } + + if (some_tx_pending) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; + } + } +} + +static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) +{ + u32 ssvpc; + + /* Do not perform spoof check for 82598 */ + if (adapter->hw.mac.type == ixgbe_mac_82598EB) + return; + + ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC); + + /* + * ssvpc register is cleared on read, if zero then no + * spoofed packets in the last interval. + */ + if (!ssvpc) + return; + + e_warn(drv, "%d Spoofed packets detected\n", ssvpc); +} + +/** + * ixgbe_watchdog_subtask - check and bring link up + * @adapter - pointer to the device adapter structure + **/ +static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter) +{ + /* if interface is down do nothing */ + if (test_bit(__IXGBE_DOWN, &adapter->state)) + return; + + ixgbe_watchdog_update_link(adapter); + + if (adapter->link_up) + ixgbe_watchdog_link_is_up(adapter); + else + ixgbe_watchdog_link_is_down(adapter); + + ixgbe_spoof_check(adapter); + ixgbe_update_stats(adapter); + + ixgbe_watchdog_flush_tx(adapter); +} + +/** + * ixgbe_sfp_detection_subtask - poll for SFP+ cable + * @adapter - the ixgbe adapter structure + **/ +static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + s32 err; + + /* not searching for SFP so there is nothing to do here */ + if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) && + !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) + return; + + /* someone else is in init, wait until next service event */ + if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) + return; + + err = hw->phy.ops.identify_sfp(hw); + if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) + goto sfp_out; + + if (err == IXGBE_ERR_SFP_NOT_PRESENT) { + /* If no cable is present, then we need to reset + * the next time we find a good cable. */ + adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; + } + + /* exit on error */ + if (err) + goto sfp_out; + + /* exit if reset not needed */ + if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) + goto sfp_out; + + adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET; + + /* + * A module may be identified correctly, but the EEPROM may not have + * support for that module. setup_sfp() will fail in that case, so + * we should not allow that module to load. + */ + if (hw->mac.type == ixgbe_mac_82598EB) + err = hw->phy.ops.reset(hw); + else + err = hw->mac.ops.setup_sfp(hw); + + if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) + goto sfp_out; + + adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; + e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type); + +sfp_out: + clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); + + if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) && + (adapter->netdev->reg_state == NETREG_REGISTERED)) { + e_dev_err("failed to initialize because an unsupported " + "SFP+ module type was detected.\n"); + e_dev_err("Reload the driver after installing a " + "supported module.\n"); + unregister_netdev(adapter->netdev); + } +} + +/** + * ixgbe_sfp_link_config_subtask - set up link SFP after module install + * @adapter - the ixgbe adapter structure + **/ +static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 autoneg; + bool negotiation; + + if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG)) + return; + + /* someone else is in init, wait until next service event */ + if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) + return; + + adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; + + autoneg = hw->phy.autoneg_advertised; + if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) + hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation); + hw->mac.autotry_restart = false; + if (hw->mac.ops.setup_link) + hw->mac.ops.setup_link(hw, autoneg, negotiation, true); + + adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); +} + +/** + * ixgbe_service_timer - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void ixgbe_service_timer(unsigned long data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + unsigned long next_event_offset; + + /* poll faster when waiting for link */ + if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) + next_event_offset = HZ / 10; + else + next_event_offset = HZ * 2; + + /* Reset the timer */ + mod_timer(&adapter->service_timer, next_event_offset + jiffies); + + ixgbe_service_event_schedule(adapter); +} + +static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) +{ + if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED)) + return; + + adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED; + + /* If we're already down or resetting, just bail */ + if (test_bit(__IXGBE_DOWN, &adapter->state) || + test_bit(__IXGBE_RESETTING, &adapter->state)) + return; + + ixgbe_dump(adapter); + netdev_err(adapter->netdev, "Reset adapter\n"); + adapter->tx_timeout_count++; + + ixgbe_reinit_locked(adapter); +} + +/** + * ixgbe_service_task - manages and runs subtasks + * @work: pointer to work_struct containing our data + **/ +static void ixgbe_service_task(struct work_struct *work) +{ + struct ixgbe_adapter *adapter = container_of(work, + struct ixgbe_adapter, + service_task); + + ixgbe_reset_subtask(adapter); + ixgbe_sfp_detection_subtask(adapter); + ixgbe_sfp_link_config_subtask(adapter); + ixgbe_check_overtemp_subtask(adapter); + ixgbe_watchdog_subtask(adapter); + ixgbe_fdir_reinit_subtask(adapter); + ixgbe_check_hang_subtask(adapter); + + ixgbe_service_event_complete(adapter); +} + +void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, + u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) +{ + struct ixgbe_adv_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + + context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; + + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); +} + +static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, + u32 tx_flags, __be16 protocol, u8 *hdr_len) +{ + int err; + u32 vlan_macip_lens, type_tucmd; + u32 mss_l4len_idx, l4len; + + if (!skb_is_gso(skb)) + return 0; + + if (skb_header_cloned(skb)) { + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (err) + return err; + } + + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; + + if (protocol == __constant_htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + } else if (skb_is_gso_v6(skb)) { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + } + + l4len = tcp_hdrlen(skb); + *hdr_len = skb_transport_offset(skb) + l4len; + + /* mss_l4len_id: use 1 as index for TSO */ + mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; + mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; + + /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ + vlan_macip_lens = skb_network_header_len(skb); + vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; + + ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, + mss_l4len_idx); + + return 1; +} + +static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring, + struct sk_buff *skb, u32 tx_flags, + __be16 protocol) +{ + u32 vlan_macip_lens = 0; + u32 mss_l4len_idx = 0; + u32 type_tucmd = 0; + + if (skb->ip_summed != CHECKSUM_PARTIAL) { + if (!(tx_flags & IXGBE_TX_FLAGS_VLAN)) + return false; + } else { + u8 l4_hdr = 0; + switch (protocol) { + case __constant_htons(ETH_P_IP): + vlan_macip_lens |= skb_network_header_len(skb); + type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + l4_hdr = ip_hdr(skb)->protocol; + break; + case __constant_htons(ETH_P_IPV6): + vlan_macip_lens |= skb_network_header_len(skb); + l4_hdr = ipv6_hdr(skb)->nexthdr; + break; + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, + "partial checksum but proto=%x!\n", + skb->protocol); + } + break; + } + + switch (l4_hdr) { + case IPPROTO_TCP: + type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; + mss_l4len_idx = tcp_hdrlen(skb) << + IXGBE_ADVTXD_L4LEN_SHIFT; + break; + case IPPROTO_SCTP: + type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; + mss_l4len_idx = sizeof(struct sctphdr) << + IXGBE_ADVTXD_L4LEN_SHIFT; + break; + case IPPROTO_UDP: + mss_l4len_idx = sizeof(struct udphdr) << + IXGBE_ADVTXD_L4LEN_SHIFT; + break; + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, + "partial checksum but l4 proto=%x!\n", + skb->protocol); + } + break; + } + } + + vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; + + ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, + type_tucmd, mss_l4len_idx); + + return (skb->ip_summed == CHECKSUM_PARTIAL); +} + +static int ixgbe_tx_map(struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring, + struct sk_buff *skb, u32 tx_flags, + unsigned int first, const u8 hdr_len) +{ + struct device *dev = tx_ring->dev; + struct ixgbe_tx_buffer *tx_buffer_info; + unsigned int len; + unsigned int total = skb->len; + unsigned int offset = 0, size, count = 0; + unsigned int nr_frags = skb_shinfo(skb)->nr_frags; + unsigned int f; + unsigned int bytecount = skb->len; + u16 gso_segs = 1; + u16 i; + + i = tx_ring->next_to_use; + + if (tx_flags & IXGBE_TX_FLAGS_FCOE) + /* excluding fcoe_crc_eof for FCoE */ + total -= sizeof(struct fcoe_crc_eof); + + len = min(skb_headlen(skb), total); + while (len) { + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); + + tx_buffer_info->length = size; + tx_buffer_info->mapped_as_page = false; + tx_buffer_info->dma = dma_map_single(dev, + skb->data + offset, + size, DMA_TO_DEVICE); + if (dma_mapping_error(dev, tx_buffer_info->dma)) + goto dma_error; + tx_buffer_info->time_stamp = jiffies; + tx_buffer_info->next_to_watch = i; + + len -= size; + total -= size; + offset += size; + count++; + + if (len) { + i++; + if (i == tx_ring->count) + i = 0; + } + } + + for (f = 0; f < nr_frags; f++) { + struct skb_frag_struct *frag; + + frag = &skb_shinfo(skb)->frags[f]; + len = min((unsigned int)frag->size, total); + offset = frag->page_offset; + + while (len) { + i++; + if (i == tx_ring->count) + i = 0; + + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); + + tx_buffer_info->length = size; + tx_buffer_info->dma = dma_map_page(dev, + frag->page, + offset, size, + DMA_TO_DEVICE); + tx_buffer_info->mapped_as_page = true; + if (dma_mapping_error(dev, tx_buffer_info->dma)) + goto dma_error; + tx_buffer_info->time_stamp = jiffies; + tx_buffer_info->next_to_watch = i; + + len -= size; + total -= size; + offset += size; + count++; + } + if (total == 0) + break; + } + + if (tx_flags & IXGBE_TX_FLAGS_TSO) + gso_segs = skb_shinfo(skb)->gso_segs; +#ifdef IXGBE_FCOE + /* adjust for FCoE Sequence Offload */ + else if (tx_flags & IXGBE_TX_FLAGS_FSO) + gso_segs = DIV_ROUND_UP(skb->len - hdr_len, + skb_shinfo(skb)->gso_size); +#endif /* IXGBE_FCOE */ + bytecount += (gso_segs - 1) * hdr_len; + + /* multiply data chunks by size of headers */ + tx_ring->tx_buffer_info[i].bytecount = bytecount; + tx_ring->tx_buffer_info[i].gso_segs = gso_segs; + tx_ring->tx_buffer_info[i].skb = skb; + tx_ring->tx_buffer_info[first].next_to_watch = i; + + return count; + +dma_error: + e_dev_err("TX DMA map failed\n"); + + /* clear timestamp and dma mappings for failed tx_buffer_info map */ + tx_buffer_info->dma = 0; + tx_buffer_info->time_stamp = 0; + tx_buffer_info->next_to_watch = 0; + if (count) + count--; + + /* clear timestamp and dma mappings for remaining portion of packet */ + while (count--) { + if (i == 0) + i += tx_ring->count; + i--; + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); + } + + return 0; +} + +static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring, + int tx_flags, int count, u32 paylen, u8 hdr_len) +{ + union ixgbe_adv_tx_desc *tx_desc = NULL; + struct ixgbe_tx_buffer *tx_buffer_info; + u32 olinfo_status = 0, cmd_type_len = 0; + unsigned int i; + u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS; + + cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA; + + cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; + + if (tx_flags & IXGBE_TX_FLAGS_VLAN) + cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; + + if (tx_flags & IXGBE_TX_FLAGS_TSO) { + cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; + + olinfo_status |= IXGBE_TXD_POPTS_TXSM << + IXGBE_ADVTXD_POPTS_SHIFT; + + /* use index 1 context for tso */ + olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); + if (tx_flags & IXGBE_TX_FLAGS_IPV4) + olinfo_status |= IXGBE_TXD_POPTS_IXSM << + IXGBE_ADVTXD_POPTS_SHIFT; + + } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) + olinfo_status |= IXGBE_TXD_POPTS_TXSM << + IXGBE_ADVTXD_POPTS_SHIFT; + + if (tx_flags & IXGBE_TX_FLAGS_FCOE) { + olinfo_status |= IXGBE_ADVTXD_CC; + olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); + if (tx_flags & IXGBE_TX_FLAGS_FSO) + cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; + } + + olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); + + i = tx_ring->next_to_use; + while (count--) { + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); + tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type_len | tx_buffer_info->length); + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); + i++; + if (i == tx_ring->count) + i = 0; + } + + tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); + + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + + tx_ring->next_to_use = i; + writel(i, tx_ring->tail); +} + +static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb, + u32 tx_flags, __be16 protocol) +{ + struct ixgbe_q_vector *q_vector = ring->q_vector; + union ixgbe_atr_hash_dword input = { .dword = 0 }; + union ixgbe_atr_hash_dword common = { .dword = 0 }; + union { + unsigned char *network; + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + } hdr; + struct tcphdr *th; + __be16 vlan_id; + + /* if ring doesn't have a interrupt vector, cannot perform ATR */ + if (!q_vector) + return; + + /* do nothing if sampling is disabled */ + if (!ring->atr_sample_rate) + return; + + ring->atr_count++; + + /* snag network header to get L4 type and address */ + hdr.network = skb_network_header(skb); + + /* Currently only IPv4/IPv6 with TCP is supported */ + if ((protocol != __constant_htons(ETH_P_IPV6) || + hdr.ipv6->nexthdr != IPPROTO_TCP) && + (protocol != __constant_htons(ETH_P_IP) || + hdr.ipv4->protocol != IPPROTO_TCP)) + return; + + th = tcp_hdr(skb); + + /* skip this packet since the socket is closing */ + if (th->fin) + return; + + /* sample on all syn packets or once every atr sample count */ + if (!th->syn && (ring->atr_count < ring->atr_sample_rate)) + return; + + /* reset sample count */ + ring->atr_count = 0; + + vlan_id = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT); + + /* + * src and dst are inverted, think how the receiver sees them + * + * The input is broken into two sections, a non-compressed section + * containing vm_pool, vlan_id, and flow_type. The rest of the data + * is XORed together and stored in the compressed dword. + */ + input.formatted.vlan_id = vlan_id; + + /* + * since src port and flex bytes occupy the same word XOR them together + * and write the value to source port portion of compressed dword + */ + if (vlan_id) + common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q); + else + common.port.src ^= th->dest ^ protocol; + common.port.dst ^= th->source; + + if (protocol == __constant_htons(ETH_P_IP)) { + input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; + common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; + } else { + input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6; + common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ + hdr.ipv6->saddr.s6_addr32[1] ^ + hdr.ipv6->saddr.s6_addr32[2] ^ + hdr.ipv6->saddr.s6_addr32[3] ^ + hdr.ipv6->daddr.s6_addr32[0] ^ + hdr.ipv6->daddr.s6_addr32[1] ^ + hdr.ipv6->daddr.s6_addr32[2] ^ + hdr.ipv6->daddr.s6_addr32[3]; + } + + /* This assumes the Rx queue and Tx queue are bound to the same CPU */ + ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw, + input, common, ring->queue_index); +} + +static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. */ + if (likely(ixgbe_desc_unused(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + return 0; +} + +static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) +{ + if (likely(ixgbe_desc_unused(tx_ring) >= size)) + return 0; + return __ixgbe_maybe_stop_tx(tx_ring, size); +} + +static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : + smp_processor_id(); +#ifdef IXGBE_FCOE + __be16 protocol = vlan_get_protocol(skb); + + if (((protocol == htons(ETH_P_FCOE)) || + (protocol == htons(ETH_P_FIP))) && + (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { + txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); + txq += adapter->ring_feature[RING_F_FCOE].mask; + return txq; + } +#endif + + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { + while (unlikely(txq >= dev->real_num_tx_queues)) + txq -= dev->real_num_tx_queues; + return txq; + } + + return skb_tx_hash(dev, skb); +} + +netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, + struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring) +{ + int tso; + u32 tx_flags = 0; +#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD + unsigned short f; +#endif + u16 first; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + __be16 protocol; + u8 hdr_len = 0; + + /* + * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, + * + 1 desc for skb_head_len/IXGBE_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ +#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); +#else + count += skb_shinfo(skb)->nr_frags; +#endif + if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) { + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + protocol = vlan_get_protocol(skb); + + if (vlan_tx_tag_present(skb)) { + tx_flags |= vlan_tx_tag_get(skb); + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { + tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; + tx_flags |= tx_ring->dcb_tc << 13; + } + tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; + tx_flags |= IXGBE_TX_FLAGS_VLAN; + } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED && + skb->priority != TC_PRIO_CONTROL) { + tx_flags |= tx_ring->dcb_tc << 13; + tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; + tx_flags |= IXGBE_TX_FLAGS_VLAN; + } + +#ifdef IXGBE_FCOE + /* for FCoE with DCB, we force the priority to what + * was specified by the switch */ + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && + (protocol == htons(ETH_P_FCOE))) + tx_flags |= IXGBE_TX_FLAGS_FCOE; + +#endif + /* record the location of the first descriptor for this packet */ + first = tx_ring->next_to_use; + + if (tx_flags & IXGBE_TX_FLAGS_FCOE) { +#ifdef IXGBE_FCOE + /* setup tx offload for FCoE */ + tso = ixgbe_fso(tx_ring, skb, tx_flags, &hdr_len); + if (tso < 0) + goto out_drop; + else if (tso) + tx_flags |= IXGBE_TX_FLAGS_FSO; +#endif /* IXGBE_FCOE */ + } else { + if (protocol == htons(ETH_P_IP)) + tx_flags |= IXGBE_TX_FLAGS_IPV4; + tso = ixgbe_tso(tx_ring, skb, tx_flags, protocol, &hdr_len); + if (tso < 0) + goto out_drop; + else if (tso) + tx_flags |= IXGBE_TX_FLAGS_TSO; + else if (ixgbe_tx_csum(tx_ring, skb, tx_flags, protocol)) + tx_flags |= IXGBE_TX_FLAGS_CSUM; + } + + count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len); + if (count) { + /* add the ATR filter if ATR is on */ + if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) + ixgbe_atr(tx_ring, skb, tx_flags, protocol); + ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len); + ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); + + } else { + tx_ring->tx_buffer_info[first].time_stamp = 0; + tx_ring->next_to_use = first; + goto out_drop; + } + + return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_ring *tx_ring; + + tx_ring = adapter->tx_ring[skb->queue_mapping]; + return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); +} + +/** + * ixgbe_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int ixgbe_set_mac(struct net_device *netdev, void *p) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs, + IXGBE_RAH_AV); + + return 0; +} + +static int +ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u16 value; + int rc; + + if (prtad != hw->phy.mdio.prtad) + return -EINVAL; + rc = hw->phy.ops.read_reg(hw, addr, devad, &value); + if (!rc) + rc = value; + return rc; +} + +static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad, + u16 addr, u16 value) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + if (prtad != hw->phy.mdio.prtad) + return -EINVAL; + return hw->phy.ops.write_reg(hw, addr, devad, value); +} + +static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); +} + +/** + * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding + * netdev->dev_addrs + * @netdev: network interface device structure + * + * Returns non-zero on failure + **/ +static int ixgbe_add_sanmac_netdev(struct net_device *dev) +{ + int err = 0; + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_mac_info *mac = &adapter->hw.mac; + + if (is_valid_ether_addr(mac->san_addr)) { + rtnl_lock(); + err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); + rtnl_unlock(); + } + return err; +} + +/** + * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding + * netdev->dev_addrs + * @netdev: network interface device structure + * + * Returns non-zero on failure + **/ +static int ixgbe_del_sanmac_netdev(struct net_device *dev) +{ + int err = 0; + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_mac_info *mac = &adapter->hw.mac; + + if (is_valid_ether_addr(mac->san_addr)) { + rtnl_lock(); + err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); + rtnl_unlock(); + } + return err; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void ixgbe_netpoll(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int i; + + /* if interface is down do nothing */ + if (test_bit(__IXGBE_DOWN, &adapter->state)) + return; + + adapter->flags |= IXGBE_FLAG_IN_NETPOLL; + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + for (i = 0; i < num_q_vectors; i++) { + struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; + ixgbe_msix_clean_many(0, q_vector); + } + } else { + ixgbe_intr(adapter->pdev->irq, netdev); + } + adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; +} +#endif + +static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int i; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin_bh(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin_bh(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } + } + rcu_read_unlock(); + /* following stats updated by ixgbe_watchdog_task() */ + stats->multicast = netdev->stats.multicast; + stats->rx_errors = netdev->stats.rx_errors; + stats->rx_length_errors = netdev->stats.rx_length_errors; + stats->rx_crc_errors = netdev->stats.rx_crc_errors; + stats->rx_missed_errors = netdev->stats.rx_missed_errors; + return stats; +} + +/* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. + * #adapter: pointer to ixgbe_adapter + * @tc: number of traffic classes currently enabled + * + * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm + * 802.1Q priority maps to a packet buffer that exists. + */ +static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 reg, rsave; + int i; + + /* 82598 have a static priority to TC mapping that can not + * be changed so no validation is needed. + */ + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); + rsave = reg; + + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT); + + /* If up2tc is out of bounds default to zero */ + if (up2tc > tc) + reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT); + } + + if (reg != rsave) + IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); + + return; +} + + +/* ixgbe_setup_tc - routine to configure net_device for multiple traffic + * classes. + * + * @netdev: net device to configure + * @tc: number of traffic classes to enable + */ +int ixgbe_setup_tc(struct net_device *dev, u8 tc) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_hw *hw = &adapter->hw; + + /* If DCB is anabled do not remove traffic classes, multiple + * traffic classes are required to implement DCB + */ + if (!tc && (adapter->flags & IXGBE_FLAG_DCB_ENABLED)) + return 0; + + /* Hardware supports up to 8 traffic classes */ + if (tc > MAX_TRAFFIC_CLASS || + (hw->mac.type == ixgbe_mac_82598EB && tc < MAX_TRAFFIC_CLASS)) + return -EINVAL; + + /* Hardware has to reinitialize queues and interrupts to + * match packet buffer alignment. Unfortunantly, the + * hardware is not flexible enough to do this dynamically. + */ + if (netif_running(dev)) + ixgbe_close(dev); + ixgbe_clear_interrupt_scheme(adapter); + + if (tc) + netdev_set_num_tc(dev, tc); + else + netdev_reset_tc(dev); + + ixgbe_init_interrupt_scheme(adapter); + ixgbe_validate_rtr(adapter, tc); + if (netif_running(dev)) + ixgbe_open(dev); + + return 0; +} + +void ixgbe_do_reset(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + ixgbe_reinit_locked(adapter); + else + ixgbe_reset(adapter); +} + +static u32 ixgbe_fix_features(struct net_device *netdev, u32 data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + +#ifdef CONFIG_DCB + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) + data &= ~NETIF_F_HW_VLAN_RX; +#endif + + /* return error if RXHASH is being enabled when RSS is not supported */ + if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) + data &= ~NETIF_F_RXHASH; + + /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ + if (!(data & NETIF_F_RXCSUM)) + data &= ~NETIF_F_LRO; + + /* Turn off LRO if not RSC capable or invalid ITR settings */ + if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) { + data &= ~NETIF_F_LRO; + } else if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && + (adapter->rx_itr_setting != 1 && + adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE)) { + data &= ~NETIF_F_LRO; + e_info(probe, "rx-usecs set too low, not enabling RSC\n"); + } + + return data; +} + +static int ixgbe_set_features(struct net_device *netdev, u32 data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + bool need_reset = false; + + /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ + if (!(data & NETIF_F_RXCSUM)) + adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED; + else + adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; + + /* Make sure RSC matches LRO, reset if change */ + if (!!(data & NETIF_F_LRO) != + !!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { + adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED; + switch (adapter->hw.mac.type) { + case ixgbe_mac_X540: + case ixgbe_mac_82599EB: + need_reset = true; + break; + default: + break; + } + } + + /* + * Check if Flow Director n-tuple support was enabled or disabled. If + * the state changed, we need to reset. + */ + if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) { + /* turn off ATR, enable perfect filters and reset */ + if (data & NETIF_F_NTUPLE) { + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; + need_reset = true; + } + } else if (!(data & NETIF_F_NTUPLE)) { + /* turn off Flow Director, set ATR and reset */ + adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; + if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && + !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) + adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; + need_reset = true; + } + + if (need_reset) + ixgbe_do_reset(netdev); + + return 0; + +} + +static const struct net_device_ops ixgbe_netdev_ops = { + .ndo_open = ixgbe_open, + .ndo_stop = ixgbe_close, + .ndo_start_xmit = ixgbe_xmit_frame, + .ndo_select_queue = ixgbe_select_queue, + .ndo_set_rx_mode = ixgbe_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = ixgbe_set_mac, + .ndo_change_mtu = ixgbe_change_mtu, + .ndo_tx_timeout = ixgbe_tx_timeout, + .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, + .ndo_do_ioctl = ixgbe_ioctl, + .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac, + .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, + .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw, + .ndo_get_vf_config = ixgbe_ndo_get_vf_config, + .ndo_get_stats64 = ixgbe_get_stats64, + .ndo_setup_tc = ixgbe_setup_tc, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ixgbe_netpoll, +#endif +#ifdef IXGBE_FCOE + .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, + .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target, + .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, + .ndo_fcoe_enable = ixgbe_fcoe_enable, + .ndo_fcoe_disable = ixgbe_fcoe_disable, + .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn, +#endif /* IXGBE_FCOE */ + .ndo_set_features = ixgbe_set_features, + .ndo_fix_features = ixgbe_fix_features, +}; + +static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter, + const struct ixgbe_info *ii) +{ +#ifdef CONFIG_PCI_IOV + struct ixgbe_hw *hw = &adapter->hw; + int err; + int num_vf_macvlans, i; + struct vf_macvlans *mv_list; + + if (hw->mac.type == ixgbe_mac_82598EB || !max_vfs) + return; + + /* The 82599 supports up to 64 VFs per physical function + * but this implementation limits allocation to 63 so that + * basic networking resources are still available to the + * physical function + */ + adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs; + adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; + err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); + if (err) { + e_err(probe, "Failed to enable PCI sriov: %d\n", err); + goto err_novfs; + } + + num_vf_macvlans = hw->mac.num_rar_entries - + (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs); + + adapter->mv_list = mv_list = kcalloc(num_vf_macvlans, + sizeof(struct vf_macvlans), + GFP_KERNEL); + if (mv_list) { + /* Initialize list of VF macvlans */ + INIT_LIST_HEAD(&adapter->vf_mvs.l); + for (i = 0; i < num_vf_macvlans; i++) { + mv_list->vf = -1; + mv_list->free = true; + mv_list->rar_entry = hw->mac.num_rar_entries - + (i + adapter->num_vfs + 1); + list_add(&mv_list->l, &adapter->vf_mvs.l); + mv_list++; + } + } + + /* If call to enable VFs succeeded then allocate memory + * for per VF control structures. + */ + adapter->vfinfo = + kcalloc(adapter->num_vfs, + sizeof(struct vf_data_storage), GFP_KERNEL); + if (adapter->vfinfo) { + /* Now that we're sure SR-IOV is enabled + * and memory allocated set up the mailbox parameters + */ + ixgbe_init_mbx_params_pf(hw); + memcpy(&hw->mbx.ops, ii->mbx_ops, + sizeof(hw->mbx.ops)); + + /* Disable RSC when in SR-IOV mode */ + adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | + IXGBE_FLAG2_RSC_ENABLED); + return; + } + + /* Oh oh */ + e_err(probe, "Unable to allocate memory for VF Data Storage - " + "SRIOV disabled\n"); + pci_disable_sriov(adapter->pdev); + +err_novfs: + adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; + adapter->num_vfs = 0; +#endif /* CONFIG_PCI_IOV */ +} + +/** + * ixgbe_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in ixgbe_pci_tbl + * + * Returns 0 on success, negative on failure + * + * ixgbe_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int __devinit ixgbe_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct ixgbe_adapter *adapter = NULL; + struct ixgbe_hw *hw; + const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; + static int cards_found; + int i, err, pci_using_dac; + u8 part_str[IXGBE_PBANUM_LENGTH]; + unsigned int indices = num_possible_cpus(); +#ifdef IXGBE_FCOE + u16 device_caps; +#endif + u32 eec; + + /* Catch broken hardware that put the wrong VF device ID in + * the PCIe SR-IOV capability. + */ + if (pdev->is_virtfn) { + WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n", + pci_name(pdev), pdev->vendor, pdev->device); + return -EINVAL; + } + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && + !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { + pci_using_dac = 1; + } else { + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + err = dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; + } + } + pci_using_dac = 0; + } + + err = pci_request_selected_regions(pdev, pci_select_bars(pdev, + IORESOURCE_MEM), ixgbe_driver_name); + if (err) { + dev_err(&pdev->dev, + "pci_request_selected_regions failed 0x%x\n", err); + goto err_pci_reg; + } + + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + pci_save_state(pdev); + +#ifdef CONFIG_IXGBE_DCB + indices *= MAX_TRAFFIC_CLASS; +#endif + + if (ii->mac == ixgbe_mac_82598EB) + indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES); + else + indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES); + +#ifdef IXGBE_FCOE + indices += min_t(unsigned int, num_possible_cpus(), + IXGBE_MAX_FCOE_INDICES); +#endif + netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + adapter = netdev_priv(netdev); + pci_set_drvdata(pdev, adapter); + + adapter->netdev = netdev; + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; + adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; + + hw->hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!hw->hw_addr) { + err = -EIO; + goto err_ioremap; + } + + for (i = 1; i <= 5; i++) { + if (pci_resource_len(pdev, i) == 0) + continue; + } + + netdev->netdev_ops = &ixgbe_netdev_ops; + ixgbe_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + adapter->bd_number = cards_found; + + /* Setup hw api */ + memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); + hw->mac.type = ii->mac; + + /* EEPROM */ + memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops)); + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */ + if (!(eec & (1 << 8))) + hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; + + /* PHY */ + memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops)); + hw->phy.sfp_type = ixgbe_sfp_type_unknown; + /* ixgbe_identify_phy_generic will set prtad and mmds properly */ + hw->phy.mdio.prtad = MDIO_PRTAD_NONE; + hw->phy.mdio.mmds = 0; + hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; + hw->phy.mdio.dev = netdev; + hw->phy.mdio.mdio_read = ixgbe_mdio_read; + hw->phy.mdio.mdio_write = ixgbe_mdio_write; + + ii->get_invariants(hw); + + /* setup the private structure */ + err = ixgbe_sw_init(adapter); + if (err) + goto err_sw_init; + + /* Make it possible the adapter to be woken up via WOL */ + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); + break; + default: + break; + } + + /* + * If there is a fan on this device and it has failed log the + * failure. + */ + if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { + u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + if (esdp & IXGBE_ESDP_SDP1) + e_crit(probe, "Fan has stopped, replace the adapter\n"); + } + + /* reset_hw fills in the perm_addr as well */ + hw->phy.reset_if_overtemp = true; + err = hw->mac.ops.reset_hw(hw); + hw->phy.reset_if_overtemp = false; + if (err == IXGBE_ERR_SFP_NOT_PRESENT && + hw->mac.type == ixgbe_mac_82598EB) { + err = 0; + } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { + e_dev_err("failed to load because an unsupported SFP+ " + "module type was detected.\n"); + e_dev_err("Reload the driver after installing a supported " + "module.\n"); + goto err_sw_init; + } else if (err) { + e_dev_err("HW Init failed: %d\n", err); + goto err_sw_init; + } + + ixgbe_probe_vf(adapter, ii); + + netdev->features = NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_RX | + NETIF_F_HW_VLAN_FILTER | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_GRO | + NETIF_F_RXHASH | + NETIF_F_RXCSUM; + + netdev->hw_features = netdev->features; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + netdev->features |= NETIF_F_SCTP_CSUM; + netdev->hw_features |= NETIF_F_SCTP_CSUM | + NETIF_F_NTUPLE; + break; + default: + break; + } + + netdev->vlan_features |= NETIF_F_TSO; + netdev->vlan_features |= NETIF_F_TSO6; + netdev->vlan_features |= NETIF_F_IP_CSUM; + netdev->vlan_features |= NETIF_F_IPV6_CSUM; + netdev->vlan_features |= NETIF_F_SG; + + netdev->priv_flags |= IFF_UNICAST_FLT; + + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED | + IXGBE_FLAG_DCB_ENABLED); + +#ifdef CONFIG_IXGBE_DCB + netdev->dcbnl_ops = &dcbnl_ops; +#endif + +#ifdef IXGBE_FCOE + if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { + if (hw->mac.ops.get_device_caps) { + hw->mac.ops.get_device_caps(hw, &device_caps); + if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) + adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; + } + } + if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { + netdev->vlan_features |= NETIF_F_FCOE_CRC; + netdev->vlan_features |= NETIF_F_FSO; + netdev->vlan_features |= NETIF_F_FCOE_MTU; + } +#endif /* IXGBE_FCOE */ + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } + + if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) + netdev->hw_features |= NETIF_F_LRO; + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) + netdev->features |= NETIF_F_LRO; + + /* make sure the EEPROM is good */ + if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { + e_dev_err("The EEPROM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + + memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); + memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); + + if (ixgbe_validate_mac_addr(netdev->perm_addr)) { + e_dev_err("invalid MAC address\n"); + err = -EIO; + goto err_eeprom; + } + + /* power down the optics for multispeed fiber and 82599 SFP+ fiber */ + if (hw->mac.ops.disable_tx_laser && + ((hw->phy.multispeed_fiber) || + ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) && + (hw->mac.type == ixgbe_mac_82599EB)))) + hw->mac.ops.disable_tx_laser(hw); + + setup_timer(&adapter->service_timer, &ixgbe_service_timer, + (unsigned long) adapter); + + INIT_WORK(&adapter->service_task, ixgbe_service_task); + clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); + + err = ixgbe_init_interrupt_scheme(adapter); + if (err) + goto err_sw_init; + + if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) { + netdev->hw_features &= ~NETIF_F_RXHASH; + netdev->features &= ~NETIF_F_RXHASH; + } + + switch (pdev->device) { + case IXGBE_DEV_ID_82599_SFP: + /* Only this subdevice supports WOL */ + if (pdev->subsystem_device == IXGBE_SUBDEV_ID_82599_SFP) + adapter->wol = IXGBE_WUFC_MAG; + break; + case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: + /* All except this subdevice support WOL */ + if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) + adapter->wol = IXGBE_WUFC_MAG; + break; + case IXGBE_DEV_ID_82599_KX4: + adapter->wol = IXGBE_WUFC_MAG; + break; + default: + adapter->wol = 0; + break; + } + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + /* pick up the PCI bus settings for reporting later */ + hw->mac.ops.get_bus_info(hw); + + /* print bus type/speed/width info */ + e_dev_info("(PCI Express:%s:%s) %pM\n", + (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" : + hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" : + "Unknown"), + (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" : + hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" : + hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" : + "Unknown"), + netdev->dev_addr); + + err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH); + if (err) + strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH); + if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) + e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n", + hw->mac.type, hw->phy.type, hw->phy.sfp_type, + part_str); + else + e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n", + hw->mac.type, hw->phy.type, part_str); + + if (hw->bus.width <= ixgbe_bus_width_pcie_x4) { + e_dev_warn("PCI-Express bandwidth available for this card is " + "not sufficient for optimal performance.\n"); + e_dev_warn("For optimal performance a x8 PCI-Express slot " + "is required.\n"); + } + + /* save off EEPROM version number */ + hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version); + + /* reset the hardware with the new settings */ + err = hw->mac.ops.start_hw(hw); + + if (err == IXGBE_ERR_EEPROM_VERSION) { + /* We are running on a pre-production device, log a warning */ + e_dev_warn("This device is a pre-production adapter/LOM. " + "Please be aware there may be issues associated " + "with your hardware. If you are experiencing " + "problems please contact your Intel or hardware " + "representative who provided you with this " + "hardware.\n"); + } + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + +#ifdef CONFIG_IXGBE_DCA + if (dca_add_requester(&pdev->dev) == 0) { + adapter->flags |= IXGBE_FLAG_DCA_ENABLED; + ixgbe_setup_dca(adapter); + } +#endif + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs); + for (i = 0; i < adapter->num_vfs; i++) + ixgbe_vf_configuration(pdev, (i | 0x10000000)); + } + + /* Inform firmware of driver version */ + if (hw->mac.ops.set_fw_drv_ver) + hw->mac.ops.set_fw_drv_ver(hw, MAJ, MIN, BUILD, + FW_CEM_UNUSED_VER); + + /* add san mac addr to netdev */ + ixgbe_add_sanmac_netdev(netdev); + + e_dev_info("Intel(R) 10 Gigabit Network Connection\n"); + cards_found++; + return 0; + +err_register: + ixgbe_release_hw_control(adapter); + ixgbe_clear_interrupt_scheme(adapter); +err_sw_init: +err_eeprom: + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + ixgbe_disable_sriov(adapter); + adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; + iounmap(hw->hw_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * ixgbe_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * ixgbe_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void __devexit ixgbe_remove(struct pci_dev *pdev) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + + set_bit(__IXGBE_DOWN, &adapter->state); + cancel_work_sync(&adapter->service_task); + +#ifdef CONFIG_IXGBE_DCA + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { + adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; + dca_remove_requester(&pdev->dev); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); + } + +#endif +#ifdef IXGBE_FCOE + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) + ixgbe_cleanup_fcoe(adapter); + +#endif /* IXGBE_FCOE */ + + /* remove the added san mac */ + ixgbe_del_sanmac_netdev(netdev); + + if (netdev->reg_state == NETREG_REGISTERED) + unregister_netdev(netdev); + + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + ixgbe_disable_sriov(adapter); + + ixgbe_clear_interrupt_scheme(adapter); + + ixgbe_release_hw_control(adapter); + + iounmap(adapter->hw.hw_addr); + pci_release_selected_regions(pdev, pci_select_bars(pdev, + IORESOURCE_MEM)); + + e_dev_info("complete\n"); + + free_netdev(netdev); + + pci_disable_pcie_error_reporting(pdev); + + pci_disable_device(pdev); +} + +/** + * ixgbe_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + ixgbe_down(adapter); + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * ixgbe_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + */ +static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + pci_ers_result_t result; + int err; + + if (pci_enable_device_mem(pdev)) { + e_err(probe, "Cannot re-enable PCI device after reset.\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + pci_wake_from_d3(pdev, false); + + ixgbe_reset(adapter); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); + result = PCI_ERS_RESULT_RECOVERED; + } + + err = pci_cleanup_aer_uncorrect_error_status(pdev); + if (err) { + e_dev_err("pci_cleanup_aer_uncorrect_error_status " + "failed 0x%0x\n", err); + /* non-fatal, continue */ + } + + return result; +} + +/** + * ixgbe_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. + */ +static void ixgbe_io_resume(struct pci_dev *pdev) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + + if (netif_running(netdev)) { + if (ixgbe_up(adapter)) { + e_info(probe, "ixgbe_up failed after reset\n"); + return; + } + } + + netif_device_attach(netdev); +} + +static struct pci_error_handlers ixgbe_err_handler = { + .error_detected = ixgbe_io_error_detected, + .slot_reset = ixgbe_io_slot_reset, + .resume = ixgbe_io_resume, +}; + +static struct pci_driver ixgbe_driver = { + .name = ixgbe_driver_name, + .id_table = ixgbe_pci_tbl, + .probe = ixgbe_probe, + .remove = __devexit_p(ixgbe_remove), +#ifdef CONFIG_PM + .suspend = ixgbe_suspend, + .resume = ixgbe_resume, +#endif + .shutdown = ixgbe_shutdown, + .err_handler = &ixgbe_err_handler +}; + +/** + * ixgbe_init_module - Driver Registration Routine + * + * ixgbe_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init ixgbe_init_module(void) +{ + int ret; + pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version); + pr_info("%s\n", ixgbe_copyright); + +#ifdef CONFIG_IXGBE_DCA + dca_register_notify(&dca_notifier); +#endif + + ret = pci_register_driver(&ixgbe_driver); + return ret; +} + +module_init(ixgbe_init_module); + +/** + * ixgbe_exit_module - Driver Exit Cleanup Routine + * + * ixgbe_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit ixgbe_exit_module(void) +{ +#ifdef CONFIG_IXGBE_DCA + dca_unregister_notify(&dca_notifier); +#endif + pci_unregister_driver(&ixgbe_driver); + rcu_barrier(); /* Wait for completion of call_rcu()'s */ +} + +#ifdef CONFIG_IXGBE_DCA +static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, + void *p) +{ + int ret_val; + + ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event, + __ixgbe_notify_dca); + + return ret_val ? NOTIFY_BAD : NOTIFY_DONE; +} + +#endif /* CONFIG_IXGBE_DCA */ + +module_exit(ixgbe_exit_module); + +/* ixgbe_main.c */ diff --cc drivers/net/ethernet/nvidia/forcedeth.c index 3784a727692e,000000000000..98bb64bc24d9 mode 100644,000000..100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@@ -1,6012 -1,0 +1,6013 @@@ +/* + * forcedeth: Ethernet driver for NVIDIA nForce media access controllers. + * + * Note: This driver is a cleanroom reimplementation based on reverse + * engineered documentation written by Carl-Daniel Hailfinger + * and Andrew de Quincey. + * + * NVIDIA, nForce and other NVIDIA marks are trademarks or registered + * trademarks of NVIDIA Corporation in the United States and other + * countries. + * + * Copyright (C) 2003,4,5 Manfred Spraul + * Copyright (C) 2004 Andrew de Quincey (wol support) + * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane + * IRQ rate fixes, bigendian fixes, cleanups, verification) + * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Known bugs: + * We suspect that on some hardware no TX done interrupts are generated. + * This means recovery from netif_stop_queue only happens if the hw timer + * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT) + * and the timer is active in the IRQMask, or if a rx packet arrives by chance. + * If your hardware reliably generates tx done interrupts, then you can remove + * DEV_NEED_TIMERIRQ from the driver_data flags. + * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few + * superfluous timer interrupts from the nic. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#define FORCEDETH_VERSION "0.64" +#define DRV_NAME "forcedeth" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define TX_WORK_PER_LOOP 64 +#define RX_WORK_PER_LOOP 64 + +/* + * Hardware access: + */ + +#define DEV_NEED_TIMERIRQ 0x0000001 /* set the timer irq flag in the irq mask */ +#define DEV_NEED_LINKTIMER 0x0000002 /* poll link settings. Relies on the timer irq */ +#define DEV_HAS_LARGEDESC 0x0000004 /* device supports jumbo frames and needs packet format 2 */ +#define DEV_HAS_HIGH_DMA 0x0000008 /* device supports 64bit dma */ +#define DEV_HAS_CHECKSUM 0x0000010 /* device supports tx and rx checksum offloads */ +#define DEV_HAS_VLAN 0x0000020 /* device supports vlan tagging and striping */ +#define DEV_HAS_MSI 0x0000040 /* device supports MSI */ +#define DEV_HAS_MSI_X 0x0000080 /* device supports MSI-X */ +#define DEV_HAS_POWER_CNTRL 0x0000100 /* device supports power savings */ +#define DEV_HAS_STATISTICS_V1 0x0000200 /* device supports hw statistics version 1 */ +#define DEV_HAS_STATISTICS_V2 0x0000400 /* device supports hw statistics version 2 */ +#define DEV_HAS_STATISTICS_V3 0x0000800 /* device supports hw statistics version 3 */ +#define DEV_HAS_STATISTICS_V12 0x0000600 /* device supports hw statistics version 1 and 2 */ +#define DEV_HAS_STATISTICS_V123 0x0000e00 /* device supports hw statistics version 1, 2, and 3 */ +#define DEV_HAS_TEST_EXTENDED 0x0001000 /* device supports extended diagnostic test */ +#define DEV_HAS_MGMT_UNIT 0x0002000 /* device supports management unit */ +#define DEV_HAS_CORRECT_MACADDR 0x0004000 /* device supports correct mac address order */ +#define DEV_HAS_COLLISION_FIX 0x0008000 /* device supports tx collision fix */ +#define DEV_HAS_PAUSEFRAME_TX_V1 0x0010000 /* device supports tx pause frames version 1 */ +#define DEV_HAS_PAUSEFRAME_TX_V2 0x0020000 /* device supports tx pause frames version 2 */ +#define DEV_HAS_PAUSEFRAME_TX_V3 0x0040000 /* device supports tx pause frames version 3 */ +#define DEV_NEED_TX_LIMIT 0x0080000 /* device needs to limit tx */ +#define DEV_NEED_TX_LIMIT2 0x0180000 /* device needs to limit tx, expect for some revs */ +#define DEV_HAS_GEAR_MODE 0x0200000 /* device supports gear mode */ +#define DEV_NEED_PHY_INIT_FIX 0x0400000 /* device needs specific phy workaround */ +#define DEV_NEED_LOW_POWER_FIX 0x0800000 /* device needs special power up workaround */ +#define DEV_NEED_MSI_FIX 0x1000000 /* device needs msi workaround */ + +enum { + NvRegIrqStatus = 0x000, +#define NVREG_IRQSTAT_MIIEVENT 0x040 +#define NVREG_IRQSTAT_MASK 0x83ff + NvRegIrqMask = 0x004, +#define NVREG_IRQ_RX_ERROR 0x0001 +#define NVREG_IRQ_RX 0x0002 +#define NVREG_IRQ_RX_NOBUF 0x0004 +#define NVREG_IRQ_TX_ERR 0x0008 +#define NVREG_IRQ_TX_OK 0x0010 +#define NVREG_IRQ_TIMER 0x0020 +#define NVREG_IRQ_LINK 0x0040 +#define NVREG_IRQ_RX_FORCED 0x0080 +#define NVREG_IRQ_TX_FORCED 0x0100 +#define NVREG_IRQ_RECOVER_ERROR 0x8200 +#define NVREG_IRQMASK_THROUGHPUT 0x00df +#define NVREG_IRQMASK_CPU 0x0060 +#define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED) +#define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED) +#define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR) + + NvRegUnknownSetupReg6 = 0x008, +#define NVREG_UNKSETUP6_VAL 3 + +/* + * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic + * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms + */ + NvRegPollingInterval = 0x00c, +#define NVREG_POLL_DEFAULT_THROUGHPUT 65535 /* backup tx cleanup if loop max reached */ +#define NVREG_POLL_DEFAULT_CPU 13 + NvRegMSIMap0 = 0x020, + NvRegMSIMap1 = 0x024, + NvRegMSIIrqMask = 0x030, +#define NVREG_MSI_VECTOR_0_ENABLED 0x01 + NvRegMisc1 = 0x080, +#define NVREG_MISC1_PAUSE_TX 0x01 +#define NVREG_MISC1_HD 0x02 +#define NVREG_MISC1_FORCE 0x3b0f3c + + NvRegMacReset = 0x34, +#define NVREG_MAC_RESET_ASSERT 0x0F3 + NvRegTransmitterControl = 0x084, +#define NVREG_XMITCTL_START 0x01 +#define NVREG_XMITCTL_MGMT_ST 0x40000000 +#define NVREG_XMITCTL_SYNC_MASK 0x000f0000 +#define NVREG_XMITCTL_SYNC_NOT_READY 0x0 +#define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000 +#define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00 +#define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0 +#define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000 +#define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000 +#define NVREG_XMITCTL_HOST_LOADED 0x00004000 +#define NVREG_XMITCTL_TX_PATH_EN 0x01000000 +#define NVREG_XMITCTL_DATA_START 0x00100000 +#define NVREG_XMITCTL_DATA_READY 0x00010000 +#define NVREG_XMITCTL_DATA_ERROR 0x00020000 + NvRegTransmitterStatus = 0x088, +#define NVREG_XMITSTAT_BUSY 0x01 + + NvRegPacketFilterFlags = 0x8c, +#define NVREG_PFF_PAUSE_RX 0x08 +#define NVREG_PFF_ALWAYS 0x7F0000 +#define NVREG_PFF_PROMISC 0x80 +#define NVREG_PFF_MYADDR 0x20 +#define NVREG_PFF_LOOPBACK 0x10 + + NvRegOffloadConfig = 0x90, +#define NVREG_OFFLOAD_HOMEPHY 0x601 +#define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE + NvRegReceiverControl = 0x094, +#define NVREG_RCVCTL_START 0x01 +#define NVREG_RCVCTL_RX_PATH_EN 0x01000000 + NvRegReceiverStatus = 0x98, +#define NVREG_RCVSTAT_BUSY 0x01 + + NvRegSlotTime = 0x9c, +#define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000 +#define NVREG_SLOTTIME_10_100_FULL 0x00007f00 +#define NVREG_SLOTTIME_1000_FULL 0x0003ff00 +#define NVREG_SLOTTIME_HALF 0x0000ff00 +#define NVREG_SLOTTIME_DEFAULT 0x00007f00 +#define NVREG_SLOTTIME_MASK 0x000000ff + + NvRegTxDeferral = 0xA0, +#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f +#define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f +#define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f +#define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f +#define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f +#define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000 + NvRegRxDeferral = 0xA4, +#define NVREG_RX_DEFERRAL_DEFAULT 0x16 + NvRegMacAddrA = 0xA8, + NvRegMacAddrB = 0xAC, + NvRegMulticastAddrA = 0xB0, +#define NVREG_MCASTADDRA_FORCE 0x01 + NvRegMulticastAddrB = 0xB4, + NvRegMulticastMaskA = 0xB8, +#define NVREG_MCASTMASKA_NONE 0xffffffff + NvRegMulticastMaskB = 0xBC, +#define NVREG_MCASTMASKB_NONE 0xffff + + NvRegPhyInterface = 0xC0, +#define PHY_RGMII 0x10000000 + NvRegBackOffControl = 0xC4, +#define NVREG_BKOFFCTRL_DEFAULT 0x70000000 +#define NVREG_BKOFFCTRL_SEED_MASK 0x000003ff +#define NVREG_BKOFFCTRL_SELECT 24 +#define NVREG_BKOFFCTRL_GEAR 12 + + NvRegTxRingPhysAddr = 0x100, + NvRegRxRingPhysAddr = 0x104, + NvRegRingSizes = 0x108, +#define NVREG_RINGSZ_TXSHIFT 0 +#define NVREG_RINGSZ_RXSHIFT 16 + NvRegTransmitPoll = 0x10c, +#define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000 + NvRegLinkSpeed = 0x110, +#define NVREG_LINKSPEED_FORCE 0x10000 +#define NVREG_LINKSPEED_10 1000 +#define NVREG_LINKSPEED_100 100 +#define NVREG_LINKSPEED_1000 50 +#define NVREG_LINKSPEED_MASK (0xFFF) + NvRegUnknownSetupReg5 = 0x130, +#define NVREG_UNKSETUP5_BIT31 (1<<31) + NvRegTxWatermark = 0x13c, +#define NVREG_TX_WM_DESC1_DEFAULT 0x0200010 +#define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000 +#define NVREG_TX_WM_DESC2_3_1000 0xfe08000 + NvRegTxRxControl = 0x144, +#define NVREG_TXRXCTL_KICK 0x0001 +#define NVREG_TXRXCTL_BIT1 0x0002 +#define NVREG_TXRXCTL_BIT2 0x0004 +#define NVREG_TXRXCTL_IDLE 0x0008 +#define NVREG_TXRXCTL_RESET 0x0010 +#define NVREG_TXRXCTL_RXCHECK 0x0400 +#define NVREG_TXRXCTL_DESC_1 0 +#define NVREG_TXRXCTL_DESC_2 0x002100 +#define NVREG_TXRXCTL_DESC_3 0xc02200 +#define NVREG_TXRXCTL_VLANSTRIP 0x00040 +#define NVREG_TXRXCTL_VLANINS 0x00080 + NvRegTxRingPhysAddrHigh = 0x148, + NvRegRxRingPhysAddrHigh = 0x14C, + NvRegTxPauseFrame = 0x170, +#define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080 +#define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010 +#define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0 +#define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880 + NvRegTxPauseFrameLimit = 0x174, +#define NVREG_TX_PAUSEFRAMELIMIT_ENABLE 0x00010000 + NvRegMIIStatus = 0x180, +#define NVREG_MIISTAT_ERROR 0x0001 +#define NVREG_MIISTAT_LINKCHANGE 0x0008 +#define NVREG_MIISTAT_MASK_RW 0x0007 +#define NVREG_MIISTAT_MASK_ALL 0x000f + NvRegMIIMask = 0x184, +#define NVREG_MII_LINKCHANGE 0x0008 + + NvRegAdapterControl = 0x188, +#define NVREG_ADAPTCTL_START 0x02 +#define NVREG_ADAPTCTL_LINKUP 0x04 +#define NVREG_ADAPTCTL_PHYVALID 0x40000 +#define NVREG_ADAPTCTL_RUNNING 0x100000 +#define NVREG_ADAPTCTL_PHYSHIFT 24 + NvRegMIISpeed = 0x18c, +#define NVREG_MIISPEED_BIT8 (1<<8) +#define NVREG_MIIDELAY 5 + NvRegMIIControl = 0x190, +#define NVREG_MIICTL_INUSE 0x08000 +#define NVREG_MIICTL_WRITE 0x00400 +#define NVREG_MIICTL_ADDRSHIFT 5 + NvRegMIIData = 0x194, + NvRegTxUnicast = 0x1a0, + NvRegTxMulticast = 0x1a4, + NvRegTxBroadcast = 0x1a8, + NvRegWakeUpFlags = 0x200, +#define NVREG_WAKEUPFLAGS_VAL 0x7770 +#define NVREG_WAKEUPFLAGS_BUSYSHIFT 24 +#define NVREG_WAKEUPFLAGS_ENABLESHIFT 16 +#define NVREG_WAKEUPFLAGS_D3SHIFT 12 +#define NVREG_WAKEUPFLAGS_D2SHIFT 8 +#define NVREG_WAKEUPFLAGS_D1SHIFT 4 +#define NVREG_WAKEUPFLAGS_D0SHIFT 0 +#define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01 +#define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02 +#define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04 +#define NVREG_WAKEUPFLAGS_ENABLE 0x1111 + + NvRegMgmtUnitGetVersion = 0x204, +#define NVREG_MGMTUNITGETVERSION 0x01 + NvRegMgmtUnitVersion = 0x208, +#define NVREG_MGMTUNITVERSION 0x08 + NvRegPowerCap = 0x268, +#define NVREG_POWERCAP_D3SUPP (1<<30) +#define NVREG_POWERCAP_D2SUPP (1<<26) +#define NVREG_POWERCAP_D1SUPP (1<<25) + NvRegPowerState = 0x26c, +#define NVREG_POWERSTATE_POWEREDUP 0x8000 +#define NVREG_POWERSTATE_VALID 0x0100 +#define NVREG_POWERSTATE_MASK 0x0003 +#define NVREG_POWERSTATE_D0 0x0000 +#define NVREG_POWERSTATE_D1 0x0001 +#define NVREG_POWERSTATE_D2 0x0002 +#define NVREG_POWERSTATE_D3 0x0003 + NvRegMgmtUnitControl = 0x278, +#define NVREG_MGMTUNITCONTROL_INUSE 0x20000 + NvRegTxCnt = 0x280, + NvRegTxZeroReXmt = 0x284, + NvRegTxOneReXmt = 0x288, + NvRegTxManyReXmt = 0x28c, + NvRegTxLateCol = 0x290, + NvRegTxUnderflow = 0x294, + NvRegTxLossCarrier = 0x298, + NvRegTxExcessDef = 0x29c, + NvRegTxRetryErr = 0x2a0, + NvRegRxFrameErr = 0x2a4, + NvRegRxExtraByte = 0x2a8, + NvRegRxLateCol = 0x2ac, + NvRegRxRunt = 0x2b0, + NvRegRxFrameTooLong = 0x2b4, + NvRegRxOverflow = 0x2b8, + NvRegRxFCSErr = 0x2bc, + NvRegRxFrameAlignErr = 0x2c0, + NvRegRxLenErr = 0x2c4, + NvRegRxUnicast = 0x2c8, + NvRegRxMulticast = 0x2cc, + NvRegRxBroadcast = 0x2d0, + NvRegTxDef = 0x2d4, + NvRegTxFrame = 0x2d8, + NvRegRxCnt = 0x2dc, + NvRegTxPause = 0x2e0, + NvRegRxPause = 0x2e4, + NvRegRxDropFrame = 0x2e8, + NvRegVlanControl = 0x300, +#define NVREG_VLANCONTROL_ENABLE 0x2000 + NvRegMSIXMap0 = 0x3e0, + NvRegMSIXMap1 = 0x3e4, + NvRegMSIXIrqStatus = 0x3f0, + + NvRegPowerState2 = 0x600, +#define NVREG_POWERSTATE2_POWERUP_MASK 0x0F15 +#define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001 +#define NVREG_POWERSTATE2_PHY_RESET 0x0004 +#define NVREG_POWERSTATE2_GATE_CLOCKS 0x0F00 +}; + +/* Big endian: should work, but is untested */ +struct ring_desc { + __le32 buf; + __le32 flaglen; +}; + +struct ring_desc_ex { + __le32 bufhigh; + __le32 buflow; + __le32 txvlan; + __le32 flaglen; +}; + +union ring_type { + struct ring_desc *orig; + struct ring_desc_ex *ex; +}; + +#define FLAG_MASK_V1 0xffff0000 +#define FLAG_MASK_V2 0xffffc000 +#define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1) +#define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2) + +#define NV_TX_LASTPACKET (1<<16) +#define NV_TX_RETRYERROR (1<<19) +#define NV_TX_RETRYCOUNT_MASK (0xF<<20) +#define NV_TX_FORCED_INTERRUPT (1<<24) +#define NV_TX_DEFERRED (1<<26) +#define NV_TX_CARRIERLOST (1<<27) +#define NV_TX_LATECOLLISION (1<<28) +#define NV_TX_UNDERFLOW (1<<29) +#define NV_TX_ERROR (1<<30) +#define NV_TX_VALID (1<<31) + +#define NV_TX2_LASTPACKET (1<<29) +#define NV_TX2_RETRYERROR (1<<18) +#define NV_TX2_RETRYCOUNT_MASK (0xF<<19) +#define NV_TX2_FORCED_INTERRUPT (1<<30) +#define NV_TX2_DEFERRED (1<<25) +#define NV_TX2_CARRIERLOST (1<<26) +#define NV_TX2_LATECOLLISION (1<<27) +#define NV_TX2_UNDERFLOW (1<<28) +/* error and valid are the same for both */ +#define NV_TX2_ERROR (1<<30) +#define NV_TX2_VALID (1<<31) +#define NV_TX2_TSO (1<<28) +#define NV_TX2_TSO_SHIFT 14 +#define NV_TX2_TSO_MAX_SHIFT 14 +#define NV_TX2_TSO_MAX_SIZE (1<lock, except the performance + * critical parts: + * - rx is (pseudo-) lockless: it relies on the single-threading provided + * by the arch code for interrupts. + * - tx setup is lockless: it relies on netif_tx_lock. Actual submission + * needs netdev_priv(dev)->lock :-( + * - set_multicast_list: preparation lockless, relies on netif_tx_lock. + */ + +/* in dev: base, irq */ +struct fe_priv { + spinlock_t lock; + + struct net_device *dev; + struct napi_struct napi; + + /* General data: + * Locking: spin_lock(&np->lock); */ + struct nv_ethtool_stats estats; + int in_shutdown; + u32 linkspeed; + int duplex; + int autoneg; + int fixed_mode; + int phyaddr; + int wolenabled; + unsigned int phy_oui; + unsigned int phy_model; + unsigned int phy_rev; + u16 gigabit; + int intr_test; + int recover_error; + int quiet_count; + + /* General data: RO fields */ + dma_addr_t ring_addr; + struct pci_dev *pci_dev; + u32 orig_mac[2]; + u32 events; + u32 irqmask; + u32 desc_ver; + u32 txrxctl_bits; + u32 vlanctl_bits; + u32 driver_data; + u32 device_id; + u32 register_size; + u32 mac_in_use; + int mgmt_version; + int mgmt_sema; + + void __iomem *base; + + /* rx specific fields. + * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); + */ + union ring_type get_rx, put_rx, first_rx, last_rx; + struct nv_skb_map *get_rx_ctx, *put_rx_ctx; + struct nv_skb_map *first_rx_ctx, *last_rx_ctx; + struct nv_skb_map *rx_skb; + + union ring_type rx_ring; + unsigned int rx_buf_sz; + unsigned int pkt_limit; + struct timer_list oom_kick; + struct timer_list nic_poll; + struct timer_list stats_poll; + u32 nic_poll_irq; + int rx_ring_size; + + /* media detection workaround. + * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); + */ + int need_linktimer; + unsigned long link_timeout; + /* + * tx specific fields. + */ + union ring_type get_tx, put_tx, first_tx, last_tx; + struct nv_skb_map *get_tx_ctx, *put_tx_ctx; + struct nv_skb_map *first_tx_ctx, *last_tx_ctx; + struct nv_skb_map *tx_skb; + + union ring_type tx_ring; + u32 tx_flags; + int tx_ring_size; + int tx_limit; + u32 tx_pkts_in_progress; + struct nv_skb_map *tx_change_owner; + struct nv_skb_map *tx_end_flip; + int tx_stop; + + /* msi/msi-x fields */ + u32 msi_flags; + struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; + + /* flow control */ + u32 pause_flags; + + /* power saved state */ + u32 saved_config_space[NV_PCI_REGSZ_MAX/4]; + + /* for different msi-x irq type */ + char name_rx[IFNAMSIZ + 3]; /* -rx */ + char name_tx[IFNAMSIZ + 3]; /* -tx */ + char name_other[IFNAMSIZ + 6]; /* -other */ +}; + +/* + * Maximum number of loops until we assume that a bit in the irq mask + * is stuck. Overridable with module param. + */ +static int max_interrupt_work = 4; + +/* + * Optimization can be either throuput mode or cpu mode + * + * Throughput Mode: Every tx and rx packet will generate an interrupt. + * CPU Mode: Interrupts are controlled by a timer. + */ +enum { + NV_OPTIMIZATION_MODE_THROUGHPUT, + NV_OPTIMIZATION_MODE_CPU, + NV_OPTIMIZATION_MODE_DYNAMIC +}; +static int optimization_mode = NV_OPTIMIZATION_MODE_DYNAMIC; + +/* + * Poll interval for timer irq + * + * This interval determines how frequent an interrupt is generated. + * The is value is determined by [(time_in_micro_secs * 100) / (2^10)] + * Min = 0, and Max = 65535 + */ +static int poll_interval = -1; + +/* + * MSI interrupts + */ +enum { + NV_MSI_INT_DISABLED, + NV_MSI_INT_ENABLED +}; +static int msi = NV_MSI_INT_ENABLED; + +/* + * MSIX interrupts + */ +enum { + NV_MSIX_INT_DISABLED, + NV_MSIX_INT_ENABLED +}; +static int msix = NV_MSIX_INT_ENABLED; + +/* + * DMA 64bit + */ +enum { + NV_DMA_64BIT_DISABLED, + NV_DMA_64BIT_ENABLED +}; +static int dma_64bit = NV_DMA_64BIT_ENABLED; + +/* + * Crossover Detection + * Realtek 8201 phy + some OEM boards do not work properly. + */ +enum { + NV_CROSSOVER_DETECTION_DISABLED, + NV_CROSSOVER_DETECTION_ENABLED +}; +static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED; + +/* + * Power down phy when interface is down (persists through reboot; + * older Linux and other OSes may not power it up again) + */ +static int phy_power_down; + +static inline struct fe_priv *get_nvpriv(struct net_device *dev) +{ + return netdev_priv(dev); +} + +static inline u8 __iomem *get_hwbase(struct net_device *dev) +{ + return ((struct fe_priv *)netdev_priv(dev))->base; +} + +static inline void pci_push(u8 __iomem *base) +{ + /* force out pending posted writes */ + readl(base); +} + +static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) +{ + return le32_to_cpu(prd->flaglen) + & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); +} + +static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) +{ + return le32_to_cpu(prd->flaglen) & LEN_MASK_V2; +} + +static bool nv_optimized(struct fe_priv *np) +{ + if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) + return false; + return true; +} + +static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, + int delay, int delaymax) +{ + u8 __iomem *base = get_hwbase(dev); + + pci_push(base); + do { + udelay(delay); + delaymax -= delay; + if (delaymax < 0) + return 1; + } while ((readl(base + offset) & mask) != target); + return 0; +} + +#define NV_SETUP_RX_RING 0x01 +#define NV_SETUP_TX_RING 0x02 + +static inline u32 dma_low(dma_addr_t addr) +{ + return addr; +} + +static inline u32 dma_high(dma_addr_t addr) +{ + return addr>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */ +} + +static void setup_hw_rings(struct net_device *dev, int rxtx_flags) +{ + struct fe_priv *np = get_nvpriv(dev); + u8 __iomem *base = get_hwbase(dev); + + if (!nv_optimized(np)) { + if (rxtx_flags & NV_SETUP_RX_RING) + writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); + if (rxtx_flags & NV_SETUP_TX_RING) + writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); + } else { + if (rxtx_flags & NV_SETUP_RX_RING) { + writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); + writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh); + } + if (rxtx_flags & NV_SETUP_TX_RING) { + writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); + writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh); + } + } +} + +static void free_rings(struct net_device *dev) +{ + struct fe_priv *np = get_nvpriv(dev); + + if (!nv_optimized(np)) { + if (np->rx_ring.orig) + pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), + np->rx_ring.orig, np->ring_addr); + } else { + if (np->rx_ring.ex) + pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), + np->rx_ring.ex, np->ring_addr); + } + kfree(np->rx_skb); + kfree(np->tx_skb); +} + +static int using_multi_irqs(struct net_device *dev) +{ + struct fe_priv *np = get_nvpriv(dev); + + if (!(np->msi_flags & NV_MSI_X_ENABLED) || + ((np->msi_flags & NV_MSI_X_ENABLED) && + ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) + return 0; + else + return 1; +} + +static void nv_txrx_gate(struct net_device *dev, bool gate) +{ + struct fe_priv *np = get_nvpriv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 powerstate; + + if (!np->mac_in_use && + (np->driver_data & DEV_HAS_POWER_CNTRL)) { + powerstate = readl(base + NvRegPowerState2); + if (gate) + powerstate |= NVREG_POWERSTATE2_GATE_CLOCKS; + else + powerstate &= ~NVREG_POWERSTATE2_GATE_CLOCKS; + writel(powerstate, base + NvRegPowerState2); + } +} + +static void nv_enable_irq(struct net_device *dev) +{ + struct fe_priv *np = get_nvpriv(dev); + + if (!using_multi_irqs(dev)) { + if (np->msi_flags & NV_MSI_X_ENABLED) + enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); + else + enable_irq(np->pci_dev->irq); + } else { + enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); + enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); + enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); + } +} + +static void nv_disable_irq(struct net_device *dev) +{ + struct fe_priv *np = get_nvpriv(dev); + + if (!using_multi_irqs(dev)) { + if (np->msi_flags & NV_MSI_X_ENABLED) + disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); + else + disable_irq(np->pci_dev->irq); + } else { + disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); + disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); + disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); + } +} + +/* In MSIX mode, a write to irqmask behaves as XOR */ +static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask) +{ + u8 __iomem *base = get_hwbase(dev); + + writel(mask, base + NvRegIrqMask); +} + +static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask) +{ + struct fe_priv *np = get_nvpriv(dev); + u8 __iomem *base = get_hwbase(dev); + + if (np->msi_flags & NV_MSI_X_ENABLED) { + writel(mask, base + NvRegIrqMask); + } else { + if (np->msi_flags & NV_MSI_ENABLED) + writel(0, base + NvRegMSIIrqMask); + writel(0, base + NvRegIrqMask); + } +} + +static void nv_napi_enable(struct net_device *dev) +{ + struct fe_priv *np = get_nvpriv(dev); + + napi_enable(&np->napi); +} + +static void nv_napi_disable(struct net_device *dev) +{ + struct fe_priv *np = get_nvpriv(dev); + + napi_disable(&np->napi); +} + +#define MII_READ (-1) +/* mii_rw: read/write a register on the PHY. + * + * Caller must guarantee serialization + */ +static int mii_rw(struct net_device *dev, int addr, int miireg, int value) +{ + u8 __iomem *base = get_hwbase(dev); + u32 reg; + int retval; + + writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus); + + reg = readl(base + NvRegMIIControl); + if (reg & NVREG_MIICTL_INUSE) { + writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl); + udelay(NV_MIIBUSY_DELAY); + } + + reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg; + if (value != MII_READ) { + writel(value, base + NvRegMIIData); + reg |= NVREG_MIICTL_WRITE; + } + writel(reg, base + NvRegMIIControl); + + if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0, + NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX)) { + retval = -1; + } else if (value != MII_READ) { + /* it was a write operation - fewer failures are detectable */ + retval = 0; + } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) { + retval = -1; + } else { + retval = readl(base + NvRegMIIData); + } + + return retval; +} + +static int phy_reset(struct net_device *dev, u32 bmcr_setup) +{ + struct fe_priv *np = netdev_priv(dev); + u32 miicontrol; + unsigned int tries = 0; + + miicontrol = BMCR_RESET | bmcr_setup; + if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) + return -1; + + /* wait for 500ms */ + msleep(500); + + /* must wait till reset is deasserted */ + while (miicontrol & BMCR_RESET) { + usleep_range(10000, 20000); + miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); + /* FIXME: 100 tries seem excessive */ + if (tries++ > 100) + return -1; + } + return 0; +} + +static int init_realtek_8211b(struct net_device *dev, struct fe_priv *np) +{ + static const struct { + int reg; + int init; + } ri[] = { + { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 }, + { PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2 }, + { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3 }, + { PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4 }, + { PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5 }, + { PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6 }, + { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 }, + }; + int i; + + for (i = 0; i < ARRAY_SIZE(ri); i++) { + if (mii_rw(dev, np->phyaddr, ri[i].reg, ri[i].init)) + return PHY_ERROR; + } + + return 0; +} + +static int init_realtek_8211c(struct net_device *dev, struct fe_priv *np) +{ + u32 reg; + u8 __iomem *base = get_hwbase(dev); + u32 powerstate = readl(base + NvRegPowerState2); + + /* need to perform hw phy reset */ + powerstate |= NVREG_POWERSTATE2_PHY_RESET; + writel(powerstate, base + NvRegPowerState2); + msleep(25); + + powerstate &= ~NVREG_POWERSTATE2_PHY_RESET; + writel(powerstate, base + NvRegPowerState2); + msleep(25); + + reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ); + reg |= PHY_REALTEK_INIT9; + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg)) + return PHY_ERROR; + if (mii_rw(dev, np->phyaddr, + PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10)) + return PHY_ERROR; + reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ); + if (!(reg & PHY_REALTEK_INIT11)) { + reg |= PHY_REALTEK_INIT11; + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg)) + return PHY_ERROR; + } + if (mii_rw(dev, np->phyaddr, + PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) + return PHY_ERROR; + + return 0; +} + +static int init_realtek_8201(struct net_device *dev, struct fe_priv *np) +{ + u32 phy_reserved; + + if (np->driver_data & DEV_NEED_PHY_INIT_FIX) { + phy_reserved = mii_rw(dev, np->phyaddr, + PHY_REALTEK_INIT_REG6, MII_READ); + phy_reserved |= PHY_REALTEK_INIT7; + if (mii_rw(dev, np->phyaddr, + PHY_REALTEK_INIT_REG6, phy_reserved)) + return PHY_ERROR; + } + + return 0; +} + +static int init_realtek_8201_cross(struct net_device *dev, struct fe_priv *np) +{ + u32 phy_reserved; + + if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) { + if (mii_rw(dev, np->phyaddr, + PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) + return PHY_ERROR; + phy_reserved = mii_rw(dev, np->phyaddr, + PHY_REALTEK_INIT_REG2, MII_READ); + phy_reserved &= ~PHY_REALTEK_INIT_MSK1; + phy_reserved |= PHY_REALTEK_INIT3; + if (mii_rw(dev, np->phyaddr, + PHY_REALTEK_INIT_REG2, phy_reserved)) + return PHY_ERROR; + if (mii_rw(dev, np->phyaddr, + PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) + return PHY_ERROR; + } + + return 0; +} + +static int init_cicada(struct net_device *dev, struct fe_priv *np, + u32 phyinterface) +{ + u32 phy_reserved; + + if (phyinterface & PHY_RGMII) { + phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); + phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2); + phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4); + if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) + return PHY_ERROR; + phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); + phy_reserved |= PHY_CICADA_INIT5; + if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) + return PHY_ERROR; + } + phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ); + phy_reserved |= PHY_CICADA_INIT6; + if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) + return PHY_ERROR; + + return 0; +} + +static int init_vitesse(struct net_device *dev, struct fe_priv *np) +{ + u32 phy_reserved; + + if (mii_rw(dev, np->phyaddr, + PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) + return PHY_ERROR; + if (mii_rw(dev, np->phyaddr, + PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) + return PHY_ERROR; + phy_reserved = mii_rw(dev, np->phyaddr, + PHY_VITESSE_INIT_REG4, MII_READ); + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) + return PHY_ERROR; + phy_reserved = mii_rw(dev, np->phyaddr, + PHY_VITESSE_INIT_REG3, MII_READ); + phy_reserved &= ~PHY_VITESSE_INIT_MSK1; + phy_reserved |= PHY_VITESSE_INIT3; + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) + return PHY_ERROR; + if (mii_rw(dev, np->phyaddr, + PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) + return PHY_ERROR; + if (mii_rw(dev, np->phyaddr, + PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) + return PHY_ERROR; + phy_reserved = mii_rw(dev, np->phyaddr, + PHY_VITESSE_INIT_REG4, MII_READ); + phy_reserved &= ~PHY_VITESSE_INIT_MSK1; + phy_reserved |= PHY_VITESSE_INIT3; + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) + return PHY_ERROR; + phy_reserved = mii_rw(dev, np->phyaddr, + PHY_VITESSE_INIT_REG3, MII_READ); + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) + return PHY_ERROR; + if (mii_rw(dev, np->phyaddr, + PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) + return PHY_ERROR; + if (mii_rw(dev, np->phyaddr, + PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) + return PHY_ERROR; + phy_reserved = mii_rw(dev, np->phyaddr, + PHY_VITESSE_INIT_REG4, MII_READ); + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) + return PHY_ERROR; + phy_reserved = mii_rw(dev, np->phyaddr, + PHY_VITESSE_INIT_REG3, MII_READ); + phy_reserved &= ~PHY_VITESSE_INIT_MSK2; + phy_reserved |= PHY_VITESSE_INIT8; + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) + return PHY_ERROR; + if (mii_rw(dev, np->phyaddr, + PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) + return PHY_ERROR; + if (mii_rw(dev, np->phyaddr, + PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) + return PHY_ERROR; + + return 0; +} + +static int phy_init(struct net_device *dev) +{ + struct fe_priv *np = get_nvpriv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 phyinterface; + u32 mii_status, mii_control, mii_control_1000, reg; + + /* phy errata for E3016 phy */ + if (np->phy_model == PHY_MODEL_MARVELL_E3016) { + reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); + reg &= ~PHY_MARVELL_E3016_INITMASK; + if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) { + netdev_info(dev, "%s: phy write to errata reg failed\n", + pci_name(np->pci_dev)); + return PHY_ERROR; + } + } + if (np->phy_oui == PHY_OUI_REALTEK) { + if (np->phy_model == PHY_MODEL_REALTEK_8211 && + np->phy_rev == PHY_REV_REALTEK_8211B) { + if (init_realtek_8211b(dev, np)) { + netdev_info(dev, "%s: phy init failed\n", + pci_name(np->pci_dev)); + return PHY_ERROR; + } + } else if (np->phy_model == PHY_MODEL_REALTEK_8211 && + np->phy_rev == PHY_REV_REALTEK_8211C) { + if (init_realtek_8211c(dev, np)) { + netdev_info(dev, "%s: phy init failed\n", + pci_name(np->pci_dev)); + return PHY_ERROR; + } + } else if (np->phy_model == PHY_MODEL_REALTEK_8201) { + if (init_realtek_8201(dev, np)) { + netdev_info(dev, "%s: phy init failed\n", + pci_name(np->pci_dev)); + return PHY_ERROR; + } + } + } + + /* set advertise register */ + reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); + reg |= (ADVERTISE_10HALF | ADVERTISE_10FULL | + ADVERTISE_100HALF | ADVERTISE_100FULL | + ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); + if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { + netdev_info(dev, "%s: phy write to advertise failed\n", + pci_name(np->pci_dev)); + return PHY_ERROR; + } + + /* get phy interface type */ + phyinterface = readl(base + NvRegPhyInterface); + + /* see if gigabit phy */ + mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); + if (mii_status & PHY_GIGABIT) { + np->gigabit = PHY_GIGABIT; + mii_control_1000 = mii_rw(dev, np->phyaddr, + MII_CTRL1000, MII_READ); + mii_control_1000 &= ~ADVERTISE_1000HALF; + if (phyinterface & PHY_RGMII) + mii_control_1000 |= ADVERTISE_1000FULL; + else + mii_control_1000 &= ~ADVERTISE_1000FULL; + + if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) { + netdev_info(dev, "%s: phy init failed\n", + pci_name(np->pci_dev)); + return PHY_ERROR; + } + } else + np->gigabit = 0; + + mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); + mii_control |= BMCR_ANENABLE; + + if (np->phy_oui == PHY_OUI_REALTEK && + np->phy_model == PHY_MODEL_REALTEK_8211 && + np->phy_rev == PHY_REV_REALTEK_8211C) { + /* start autoneg since we already performed hw reset above */ + mii_control |= BMCR_ANRESTART; + if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { + netdev_info(dev, "%s: phy init failed\n", + pci_name(np->pci_dev)); + return PHY_ERROR; + } + } else { + /* reset the phy + * (certain phys need bmcr to be setup with reset) + */ + if (phy_reset(dev, mii_control)) { + netdev_info(dev, "%s: phy reset failed\n", + pci_name(np->pci_dev)); + return PHY_ERROR; + } + } + + /* phy vendor specific configuration */ + if ((np->phy_oui == PHY_OUI_CICADA)) { + if (init_cicada(dev, np, phyinterface)) { + netdev_info(dev, "%s: phy init failed\n", + pci_name(np->pci_dev)); + return PHY_ERROR; + } + } else if (np->phy_oui == PHY_OUI_VITESSE) { + if (init_vitesse(dev, np)) { + netdev_info(dev, "%s: phy init failed\n", + pci_name(np->pci_dev)); + return PHY_ERROR; + } + } else if (np->phy_oui == PHY_OUI_REALTEK) { + if (np->phy_model == PHY_MODEL_REALTEK_8211 && + np->phy_rev == PHY_REV_REALTEK_8211B) { + /* reset could have cleared these out, set them back */ + if (init_realtek_8211b(dev, np)) { + netdev_info(dev, "%s: phy init failed\n", + pci_name(np->pci_dev)); + return PHY_ERROR; + } + } else if (np->phy_model == PHY_MODEL_REALTEK_8201) { + if (init_realtek_8201(dev, np) || + init_realtek_8201_cross(dev, np)) { + netdev_info(dev, "%s: phy init failed\n", + pci_name(np->pci_dev)); + return PHY_ERROR; + } + } + } + + /* some phys clear out pause advertisement on reset, set it back */ + mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg); + + /* restart auto negotiation, power down phy */ + mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); + mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); + if (phy_power_down) + mii_control |= BMCR_PDOWN; + if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) + return PHY_ERROR; + + return 0; +} + +static void nv_start_rx(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 rx_ctrl = readl(base + NvRegReceiverControl); + + /* Already running? Stop it. */ + if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) { + rx_ctrl &= ~NVREG_RCVCTL_START; + writel(rx_ctrl, base + NvRegReceiverControl); + pci_push(base); + } + writel(np->linkspeed, base + NvRegLinkSpeed); + pci_push(base); + rx_ctrl |= NVREG_RCVCTL_START; + if (np->mac_in_use) + rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN; + writel(rx_ctrl, base + NvRegReceiverControl); + pci_push(base); +} + +static void nv_stop_rx(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 rx_ctrl = readl(base + NvRegReceiverControl); + + if (!np->mac_in_use) + rx_ctrl &= ~NVREG_RCVCTL_START; + else + rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN; + writel(rx_ctrl, base + NvRegReceiverControl); + if (reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0, + NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX)) + netdev_info(dev, "%s: ReceiverStatus remained busy\n", + __func__); + + udelay(NV_RXSTOP_DELAY2); + if (!np->mac_in_use) + writel(0, base + NvRegLinkSpeed); +} + +static void nv_start_tx(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 tx_ctrl = readl(base + NvRegTransmitterControl); + + tx_ctrl |= NVREG_XMITCTL_START; + if (np->mac_in_use) + tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN; + writel(tx_ctrl, base + NvRegTransmitterControl); + pci_push(base); +} + +static void nv_stop_tx(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 tx_ctrl = readl(base + NvRegTransmitterControl); + + if (!np->mac_in_use) + tx_ctrl &= ~NVREG_XMITCTL_START; + else + tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN; + writel(tx_ctrl, base + NvRegTransmitterControl); + if (reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0, + NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX)) + netdev_info(dev, "%s: TransmitterStatus remained busy\n", + __func__); + + udelay(NV_TXSTOP_DELAY2); + if (!np->mac_in_use) + writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, + base + NvRegTransmitPoll); +} + +static void nv_start_rxtx(struct net_device *dev) +{ + nv_start_rx(dev); + nv_start_tx(dev); +} + +static void nv_stop_rxtx(struct net_device *dev) +{ + nv_stop_rx(dev); + nv_stop_tx(dev); +} + +static void nv_txrx_reset(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + + writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); + pci_push(base); + udelay(NV_TXRX_RESET_DELAY); + writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); + pci_push(base); +} + +static void nv_mac_reset(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 temp1, temp2, temp3; + + writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); + pci_push(base); + + /* save registers since they will be cleared on reset */ + temp1 = readl(base + NvRegMacAddrA); + temp2 = readl(base + NvRegMacAddrB); + temp3 = readl(base + NvRegTransmitPoll); + + writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset); + pci_push(base); + udelay(NV_MAC_RESET_DELAY); + writel(0, base + NvRegMacReset); + pci_push(base); + udelay(NV_MAC_RESET_DELAY); + + /* restore saved registers */ + writel(temp1, base + NvRegMacAddrA); + writel(temp2, base + NvRegMacAddrB); + writel(temp3, base + NvRegTransmitPoll); + + writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); + pci_push(base); +} + +static void nv_get_hw_stats(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + + np->estats.tx_bytes += readl(base + NvRegTxCnt); + np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt); + np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt); + np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt); + np->estats.tx_late_collision += readl(base + NvRegTxLateCol); + np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow); + np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier); + np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef); + np->estats.tx_retry_error += readl(base + NvRegTxRetryErr); + np->estats.rx_frame_error += readl(base + NvRegRxFrameErr); + np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte); + np->estats.rx_late_collision += readl(base + NvRegRxLateCol); + np->estats.rx_runt += readl(base + NvRegRxRunt); + np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong); + np->estats.rx_over_errors += readl(base + NvRegRxOverflow); + np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr); + np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr); + np->estats.rx_length_error += readl(base + NvRegRxLenErr); + np->estats.rx_unicast += readl(base + NvRegRxUnicast); + np->estats.rx_multicast += readl(base + NvRegRxMulticast); + np->estats.rx_broadcast += readl(base + NvRegRxBroadcast); + np->estats.rx_packets = + np->estats.rx_unicast + + np->estats.rx_multicast + + np->estats.rx_broadcast; + np->estats.rx_errors_total = + np->estats.rx_crc_errors + + np->estats.rx_over_errors + + np->estats.rx_frame_error + + (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) + + np->estats.rx_late_collision + + np->estats.rx_runt + + np->estats.rx_frame_too_long; + np->estats.tx_errors_total = + np->estats.tx_late_collision + + np->estats.tx_fifo_errors + + np->estats.tx_carrier_errors + + np->estats.tx_excess_deferral + + np->estats.tx_retry_error; + + if (np->driver_data & DEV_HAS_STATISTICS_V2) { + np->estats.tx_deferral += readl(base + NvRegTxDef); + np->estats.tx_packets += readl(base + NvRegTxFrame); + np->estats.rx_bytes += readl(base + NvRegRxCnt); + np->estats.tx_pause += readl(base + NvRegTxPause); + np->estats.rx_pause += readl(base + NvRegRxPause); + np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); + } + + if (np->driver_data & DEV_HAS_STATISTICS_V3) { + np->estats.tx_unicast += readl(base + NvRegTxUnicast); + np->estats.tx_multicast += readl(base + NvRegTxMulticast); + np->estats.tx_broadcast += readl(base + NvRegTxBroadcast); + } +} + +/* + * nv_get_stats: dev->get_stats function + * Get latest stats value from the nic. + * Called with read_lock(&dev_base_lock) held for read - + * only synchronized against unregister_netdevice. + */ +static struct net_device_stats *nv_get_stats(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + + /* If the nic supports hw counters then retrieve latest values */ + if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) { + nv_get_hw_stats(dev); + + /* copy to net_device stats */ + dev->stats.tx_bytes = np->estats.tx_bytes; + dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors; + dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors; + dev->stats.rx_crc_errors = np->estats.rx_crc_errors; + dev->stats.rx_over_errors = np->estats.rx_over_errors; + dev->stats.rx_errors = np->estats.rx_errors_total; + dev->stats.tx_errors = np->estats.tx_errors_total; + } + + return &dev->stats; +} + +/* + * nv_alloc_rx: fill rx ring entries. + * Return 1 if the allocations for the skbs failed and the + * rx engine is without Available descriptors + */ +static int nv_alloc_rx(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + struct ring_desc *less_rx; + + less_rx = np->get_rx.orig; + if (less_rx-- == np->first_rx.orig) + less_rx = np->last_rx.orig; + + while (np->put_rx.orig != less_rx) { + struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); + if (skb) { + np->put_rx_ctx->skb = skb; + np->put_rx_ctx->dma = pci_map_single(np->pci_dev, + skb->data, + skb_tailroom(skb), + PCI_DMA_FROMDEVICE); + np->put_rx_ctx->dma_len = skb_tailroom(skb); + np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); + wmb(); + np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); + if (unlikely(np->put_rx.orig++ == np->last_rx.orig)) + np->put_rx.orig = np->first_rx.orig; + if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) + np->put_rx_ctx = np->first_rx_ctx; + } else + return 1; + } + return 0; +} + +static int nv_alloc_rx_optimized(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + struct ring_desc_ex *less_rx; + + less_rx = np->get_rx.ex; + if (less_rx-- == np->first_rx.ex) + less_rx = np->last_rx.ex; + + while (np->put_rx.ex != less_rx) { + struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); + if (skb) { + np->put_rx_ctx->skb = skb; + np->put_rx_ctx->dma = pci_map_single(np->pci_dev, + skb->data, + skb_tailroom(skb), + PCI_DMA_FROMDEVICE); + np->put_rx_ctx->dma_len = skb_tailroom(skb); + np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma)); + np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma)); + wmb(); + np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); + if (unlikely(np->put_rx.ex++ == np->last_rx.ex)) + np->put_rx.ex = np->first_rx.ex; + if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) + np->put_rx_ctx = np->first_rx_ctx; + } else + return 1; + } + return 0; +} + +/* If rx bufs are exhausted called after 50ms to attempt to refresh */ +static void nv_do_rx_refill(unsigned long data) +{ + struct net_device *dev = (struct net_device *) data; + struct fe_priv *np = netdev_priv(dev); + + /* Just reschedule NAPI rx processing */ + napi_schedule(&np->napi); +} + +static void nv_init_rx(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + int i; + + np->get_rx = np->put_rx = np->first_rx = np->rx_ring; + + if (!nv_optimized(np)) + np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1]; + else + np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1]; + np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb; + np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1]; + + for (i = 0; i < np->rx_ring_size; i++) { + if (!nv_optimized(np)) { + np->rx_ring.orig[i].flaglen = 0; + np->rx_ring.orig[i].buf = 0; + } else { + np->rx_ring.ex[i].flaglen = 0; + np->rx_ring.ex[i].txvlan = 0; + np->rx_ring.ex[i].bufhigh = 0; + np->rx_ring.ex[i].buflow = 0; + } + np->rx_skb[i].skb = NULL; + np->rx_skb[i].dma = 0; + } +} + +static void nv_init_tx(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + int i; + + np->get_tx = np->put_tx = np->first_tx = np->tx_ring; + + if (!nv_optimized(np)) + np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1]; + else + np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1]; + np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb; + np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1]; + np->tx_pkts_in_progress = 0; + np->tx_change_owner = NULL; + np->tx_end_flip = NULL; + np->tx_stop = 0; + + for (i = 0; i < np->tx_ring_size; i++) { + if (!nv_optimized(np)) { + np->tx_ring.orig[i].flaglen = 0; + np->tx_ring.orig[i].buf = 0; + } else { + np->tx_ring.ex[i].flaglen = 0; + np->tx_ring.ex[i].txvlan = 0; + np->tx_ring.ex[i].bufhigh = 0; + np->tx_ring.ex[i].buflow = 0; + } + np->tx_skb[i].skb = NULL; + np->tx_skb[i].dma = 0; + np->tx_skb[i].dma_len = 0; + np->tx_skb[i].dma_single = 0; + np->tx_skb[i].first_tx_desc = NULL; + np->tx_skb[i].next_tx_ctx = NULL; + } +} + +static int nv_init_ring(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + + nv_init_tx(dev); + nv_init_rx(dev); + + if (!nv_optimized(np)) + return nv_alloc_rx(dev); + else + return nv_alloc_rx_optimized(dev); +} + +static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb) +{ + if (tx_skb->dma) { + if (tx_skb->dma_single) + pci_unmap_single(np->pci_dev, tx_skb->dma, + tx_skb->dma_len, + PCI_DMA_TODEVICE); + else + pci_unmap_page(np->pci_dev, tx_skb->dma, + tx_skb->dma_len, + PCI_DMA_TODEVICE); + tx_skb->dma = 0; + } +} + +static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb) +{ + nv_unmap_txskb(np, tx_skb); + if (tx_skb->skb) { + dev_kfree_skb_any(tx_skb->skb); + tx_skb->skb = NULL; + return 1; + } + return 0; +} + +static void nv_drain_tx(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + unsigned int i; + + for (i = 0; i < np->tx_ring_size; i++) { + if (!nv_optimized(np)) { + np->tx_ring.orig[i].flaglen = 0; + np->tx_ring.orig[i].buf = 0; + } else { + np->tx_ring.ex[i].flaglen = 0; + np->tx_ring.ex[i].txvlan = 0; + np->tx_ring.ex[i].bufhigh = 0; + np->tx_ring.ex[i].buflow = 0; + } + if (nv_release_txskb(np, &np->tx_skb[i])) + dev->stats.tx_dropped++; + np->tx_skb[i].dma = 0; + np->tx_skb[i].dma_len = 0; + np->tx_skb[i].dma_single = 0; + np->tx_skb[i].first_tx_desc = NULL; + np->tx_skb[i].next_tx_ctx = NULL; + } + np->tx_pkts_in_progress = 0; + np->tx_change_owner = NULL; + np->tx_end_flip = NULL; +} + +static void nv_drain_rx(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + int i; + + for (i = 0; i < np->rx_ring_size; i++) { + if (!nv_optimized(np)) { + np->rx_ring.orig[i].flaglen = 0; + np->rx_ring.orig[i].buf = 0; + } else { + np->rx_ring.ex[i].flaglen = 0; + np->rx_ring.ex[i].txvlan = 0; + np->rx_ring.ex[i].bufhigh = 0; + np->rx_ring.ex[i].buflow = 0; + } + wmb(); + if (np->rx_skb[i].skb) { + pci_unmap_single(np->pci_dev, np->rx_skb[i].dma, + (skb_end_pointer(np->rx_skb[i].skb) - + np->rx_skb[i].skb->data), + PCI_DMA_FROMDEVICE); + dev_kfree_skb(np->rx_skb[i].skb); + np->rx_skb[i].skb = NULL; + } + } +} + +static void nv_drain_rxtx(struct net_device *dev) +{ + nv_drain_tx(dev); + nv_drain_rx(dev); +} + +static inline u32 nv_get_empty_tx_slots(struct fe_priv *np) +{ + return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size)); +} + +static void nv_legacybackoff_reseed(struct net_device *dev) +{ + u8 __iomem *base = get_hwbase(dev); + u32 reg; + u32 low; + int tx_status = 0; + + reg = readl(base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK; + get_random_bytes(&low, sizeof(low)); + reg |= low & NVREG_SLOTTIME_MASK; + + /* Need to stop tx before change takes effect. + * Caller has already gained np->lock. + */ + tx_status = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START; + if (tx_status) + nv_stop_tx(dev); + nv_stop_rx(dev); + writel(reg, base + NvRegSlotTime); + if (tx_status) + nv_start_tx(dev); + nv_start_rx(dev); +} + +/* Gear Backoff Seeds */ +#define BACKOFF_SEEDSET_ROWS 8 +#define BACKOFF_SEEDSET_LFSRS 15 + +/* Known Good seed sets */ +static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = { + {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874}, + {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974}, + {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874}, + {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974}, + {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984}, + {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984}, + {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84}, + {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184} }; + +static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = { + {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, + {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, + {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397}, + {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, + {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, + {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, + {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, + {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395} }; + +static void nv_gear_backoff_reseed(struct net_device *dev) +{ + u8 __iomem *base = get_hwbase(dev); + u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed; + u32 temp, seedset, combinedSeed; + int i; + + /* Setup seed for free running LFSR */ + /* We are going to read the time stamp counter 3 times + and swizzle bits around to increase randomness */ + get_random_bytes(&miniseed1, sizeof(miniseed1)); + miniseed1 &= 0x0fff; + if (miniseed1 == 0) + miniseed1 = 0xabc; + + get_random_bytes(&miniseed2, sizeof(miniseed2)); + miniseed2 &= 0x0fff; + if (miniseed2 == 0) + miniseed2 = 0xabc; + miniseed2_reversed = + ((miniseed2 & 0xF00) >> 8) | + (miniseed2 & 0x0F0) | + ((miniseed2 & 0x00F) << 8); + + get_random_bytes(&miniseed3, sizeof(miniseed3)); + miniseed3 &= 0x0fff; + if (miniseed3 == 0) + miniseed3 = 0xabc; + miniseed3_reversed = + ((miniseed3 & 0xF00) >> 8) | + (miniseed3 & 0x0F0) | + ((miniseed3 & 0x00F) << 8); + + combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) | + (miniseed2 ^ miniseed3_reversed); + + /* Seeds can not be zero */ + if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0) + combinedSeed |= 0x08; + if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0) + combinedSeed |= 0x8000; + + /* No need to disable tx here */ + temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT); + temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK; + temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR; + writel(temp, base + NvRegBackOffControl); + + /* Setup seeds for all gear LFSRs. */ + get_random_bytes(&seedset, sizeof(seedset)); + seedset = seedset % BACKOFF_SEEDSET_ROWS; + for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) { + temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT); + temp |= main_seedset[seedset][i-1] & 0x3ff; + temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR); + writel(temp, base + NvRegBackOffControl); + } +} + +/* + * nv_start_xmit: dev->hard_start_xmit function + * Called with netif_tx_lock held. + */ +static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u32 tx_flags = 0; + u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); + unsigned int fragments = skb_shinfo(skb)->nr_frags; + unsigned int i; + u32 offset = 0; + u32 bcnt; + u32 size = skb_headlen(skb); + u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); + u32 empty_slots; + struct ring_desc *put_tx; + struct ring_desc *start_tx; + struct ring_desc *prev_tx; + struct nv_skb_map *prev_tx_ctx; + unsigned long flags; + + /* add fragments to entries count */ + for (i = 0; i < fragments; i++) { + entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + + ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); + } + + spin_lock_irqsave(&np->lock, flags); + empty_slots = nv_get_empty_tx_slots(np); + if (unlikely(empty_slots <= entries)) { + netif_stop_queue(dev); + np->tx_stop = 1; + spin_unlock_irqrestore(&np->lock, flags); + return NETDEV_TX_BUSY; + } + spin_unlock_irqrestore(&np->lock, flags); + + start_tx = put_tx = np->put_tx.orig; + + /* setup the header buffer */ + do { + prev_tx = put_tx; + prev_tx_ctx = np->put_tx_ctx; + bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; + np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, + PCI_DMA_TODEVICE); + np->put_tx_ctx->dma_len = bcnt; + np->put_tx_ctx->dma_single = 1; + put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); + put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); + + tx_flags = np->tx_flags; + offset += bcnt; + size -= bcnt; + if (unlikely(put_tx++ == np->last_tx.orig)) + put_tx = np->first_tx.orig; + if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) + np->put_tx_ctx = np->first_tx_ctx; + } while (size); + + /* setup the fragments */ + for (i = 0; i < fragments; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + u32 size = frag->size; + offset = 0; + + do { + prev_tx = put_tx; + prev_tx_ctx = np->put_tx_ctx; + bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; + np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, + PCI_DMA_TODEVICE); + np->put_tx_ctx->dma_len = bcnt; + np->put_tx_ctx->dma_single = 0; + put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); + put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); + + offset += bcnt; + size -= bcnt; + if (unlikely(put_tx++ == np->last_tx.orig)) + put_tx = np->first_tx.orig; + if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) + np->put_tx_ctx = np->first_tx_ctx; + } while (size); + } + + /* set last fragment flag */ + prev_tx->flaglen |= cpu_to_le32(tx_flags_extra); + + /* save skb in this slot's context area */ + prev_tx_ctx->skb = skb; + + if (skb_is_gso(skb)) + tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); + else + tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? + NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; + + spin_lock_irqsave(&np->lock, flags); + + /* set tx flags */ + start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); + np->put_tx.orig = put_tx; + + spin_unlock_irqrestore(&np->lock, flags); + + writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); + return NETDEV_TX_OK; +} + +static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, + struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u32 tx_flags = 0; + u32 tx_flags_extra; + unsigned int fragments = skb_shinfo(skb)->nr_frags; + unsigned int i; + u32 offset = 0; + u32 bcnt; + u32 size = skb_headlen(skb); + u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); + u32 empty_slots; + struct ring_desc_ex *put_tx; + struct ring_desc_ex *start_tx; + struct ring_desc_ex *prev_tx; + struct nv_skb_map *prev_tx_ctx; + struct nv_skb_map *start_tx_ctx; + unsigned long flags; + + /* add fragments to entries count */ + for (i = 0; i < fragments; i++) { + entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + + ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); + } + + spin_lock_irqsave(&np->lock, flags); + empty_slots = nv_get_empty_tx_slots(np); + if (unlikely(empty_slots <= entries)) { + netif_stop_queue(dev); + np->tx_stop = 1; + spin_unlock_irqrestore(&np->lock, flags); + return NETDEV_TX_BUSY; + } + spin_unlock_irqrestore(&np->lock, flags); + + start_tx = put_tx = np->put_tx.ex; + start_tx_ctx = np->put_tx_ctx; + + /* setup the header buffer */ + do { + prev_tx = put_tx; + prev_tx_ctx = np->put_tx_ctx; + bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; + np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, + PCI_DMA_TODEVICE); + np->put_tx_ctx->dma_len = bcnt; + np->put_tx_ctx->dma_single = 1; + put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); + put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); + put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); + + tx_flags = NV_TX2_VALID; + offset += bcnt; + size -= bcnt; + if (unlikely(put_tx++ == np->last_tx.ex)) + put_tx = np->first_tx.ex; + if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) + np->put_tx_ctx = np->first_tx_ctx; + } while (size); + + /* setup the fragments */ + for (i = 0; i < fragments; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + u32 size = frag->size; + offset = 0; + + do { + prev_tx = put_tx; + prev_tx_ctx = np->put_tx_ctx; + bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; + np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, + PCI_DMA_TODEVICE); + np->put_tx_ctx->dma_len = bcnt; + np->put_tx_ctx->dma_single = 0; + put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); + put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); + put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); + + offset += bcnt; + size -= bcnt; + if (unlikely(put_tx++ == np->last_tx.ex)) + put_tx = np->first_tx.ex; + if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) + np->put_tx_ctx = np->first_tx_ctx; + } while (size); + } + + /* set last fragment flag */ + prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET); + + /* save skb in this slot's context area */ + prev_tx_ctx->skb = skb; + + if (skb_is_gso(skb)) + tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); + else + tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? + NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; + + /* vlan tag */ + if (vlan_tx_tag_present(skb)) + start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | + vlan_tx_tag_get(skb)); + else + start_tx->txvlan = 0; + + spin_lock_irqsave(&np->lock, flags); + + if (np->tx_limit) { + /* Limit the number of outstanding tx. Setup all fragments, but + * do not set the VALID bit on the first descriptor. Save a pointer + * to that descriptor and also for next skb_map element. + */ + + if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) { + if (!np->tx_change_owner) + np->tx_change_owner = start_tx_ctx; + + /* remove VALID bit */ + tx_flags &= ~NV_TX2_VALID; + start_tx_ctx->first_tx_desc = start_tx; + start_tx_ctx->next_tx_ctx = np->put_tx_ctx; + np->tx_end_flip = np->put_tx_ctx; + } else { + np->tx_pkts_in_progress++; + } + } + + /* set tx flags */ + start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); + np->put_tx.ex = put_tx; + + spin_unlock_irqrestore(&np->lock, flags); + + writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); + return NETDEV_TX_OK; +} + +static inline void nv_tx_flip_ownership(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + + np->tx_pkts_in_progress--; + if (np->tx_change_owner) { + np->tx_change_owner->first_tx_desc->flaglen |= + cpu_to_le32(NV_TX2_VALID); + np->tx_pkts_in_progress++; + + np->tx_change_owner = np->tx_change_owner->next_tx_ctx; + if (np->tx_change_owner == np->tx_end_flip) + np->tx_change_owner = NULL; + + writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); + } +} + +/* + * nv_tx_done: check for completed packets, release the skbs. + * + * Caller must own np->lock. + */ +static int nv_tx_done(struct net_device *dev, int limit) +{ + struct fe_priv *np = netdev_priv(dev); + u32 flags; + int tx_work = 0; + struct ring_desc *orig_get_tx = np->get_tx.orig; + + while ((np->get_tx.orig != np->put_tx.orig) && + !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) && + (tx_work < limit)) { + + nv_unmap_txskb(np, np->get_tx_ctx); + + if (np->desc_ver == DESC_VER_1) { + if (flags & NV_TX_LASTPACKET) { + if (flags & NV_TX_ERROR) { + if (flags & NV_TX_UNDERFLOW) + dev->stats.tx_fifo_errors++; + if (flags & NV_TX_CARRIERLOST) + dev->stats.tx_carrier_errors++; + if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK)) + nv_legacybackoff_reseed(dev); + dev->stats.tx_errors++; + } else { + dev->stats.tx_packets++; + dev->stats.tx_bytes += np->get_tx_ctx->skb->len; + } + dev_kfree_skb_any(np->get_tx_ctx->skb); + np->get_tx_ctx->skb = NULL; + tx_work++; + } + } else { + if (flags & NV_TX2_LASTPACKET) { + if (flags & NV_TX2_ERROR) { + if (flags & NV_TX2_UNDERFLOW) + dev->stats.tx_fifo_errors++; + if (flags & NV_TX2_CARRIERLOST) + dev->stats.tx_carrier_errors++; + if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) + nv_legacybackoff_reseed(dev); + dev->stats.tx_errors++; + } else { + dev->stats.tx_packets++; + dev->stats.tx_bytes += np->get_tx_ctx->skb->len; + } + dev_kfree_skb_any(np->get_tx_ctx->skb); + np->get_tx_ctx->skb = NULL; + tx_work++; + } + } + if (unlikely(np->get_tx.orig++ == np->last_tx.orig)) + np->get_tx.orig = np->first_tx.orig; + if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) + np->get_tx_ctx = np->first_tx_ctx; + } + if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) { + np->tx_stop = 0; + netif_wake_queue(dev); + } + return tx_work; +} + +static int nv_tx_done_optimized(struct net_device *dev, int limit) +{ + struct fe_priv *np = netdev_priv(dev); + u32 flags; + int tx_work = 0; + struct ring_desc_ex *orig_get_tx = np->get_tx.ex; + + while ((np->get_tx.ex != np->put_tx.ex) && + !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) && + (tx_work < limit)) { + + nv_unmap_txskb(np, np->get_tx_ctx); + + if (flags & NV_TX2_LASTPACKET) { + if (!(flags & NV_TX2_ERROR)) + dev->stats.tx_packets++; + else { + if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) { + if (np->driver_data & DEV_HAS_GEAR_MODE) + nv_gear_backoff_reseed(dev); + else + nv_legacybackoff_reseed(dev); + } + } + + dev_kfree_skb_any(np->get_tx_ctx->skb); + np->get_tx_ctx->skb = NULL; + tx_work++; + + if (np->tx_limit) + nv_tx_flip_ownership(dev); + } + if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) + np->get_tx.ex = np->first_tx.ex; + if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) + np->get_tx_ctx = np->first_tx_ctx; + } + if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) { + np->tx_stop = 0; + netif_wake_queue(dev); + } + return tx_work; +} + +/* + * nv_tx_timeout: dev->tx_timeout function + * Called with netif_tx_lock held. + */ +static void nv_tx_timeout(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 status; + union ring_type put_tx; + int saved_tx_limit; + int i; + + if (np->msi_flags & NV_MSI_X_ENABLED) + status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; + else + status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; + + netdev_info(dev, "Got tx_timeout. irq: %08x\n", status); + + netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr); + netdev_info(dev, "Dumping tx registers\n"); + for (i = 0; i <= np->register_size; i += 32) { + netdev_info(dev, + "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", + i, + readl(base + i + 0), readl(base + i + 4), + readl(base + i + 8), readl(base + i + 12), + readl(base + i + 16), readl(base + i + 20), + readl(base + i + 24), readl(base + i + 28)); + } + netdev_info(dev, "Dumping tx ring\n"); + for (i = 0; i < np->tx_ring_size; i += 4) { + if (!nv_optimized(np)) { + netdev_info(dev, + "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", + i, + le32_to_cpu(np->tx_ring.orig[i].buf), + le32_to_cpu(np->tx_ring.orig[i].flaglen), + le32_to_cpu(np->tx_ring.orig[i+1].buf), + le32_to_cpu(np->tx_ring.orig[i+1].flaglen), + le32_to_cpu(np->tx_ring.orig[i+2].buf), + le32_to_cpu(np->tx_ring.orig[i+2].flaglen), + le32_to_cpu(np->tx_ring.orig[i+3].buf), + le32_to_cpu(np->tx_ring.orig[i+3].flaglen)); + } else { + netdev_info(dev, + "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", + i, + le32_to_cpu(np->tx_ring.ex[i].bufhigh), + le32_to_cpu(np->tx_ring.ex[i].buflow), + le32_to_cpu(np->tx_ring.ex[i].flaglen), + le32_to_cpu(np->tx_ring.ex[i+1].bufhigh), + le32_to_cpu(np->tx_ring.ex[i+1].buflow), + le32_to_cpu(np->tx_ring.ex[i+1].flaglen), + le32_to_cpu(np->tx_ring.ex[i+2].bufhigh), + le32_to_cpu(np->tx_ring.ex[i+2].buflow), + le32_to_cpu(np->tx_ring.ex[i+2].flaglen), + le32_to_cpu(np->tx_ring.ex[i+3].bufhigh), + le32_to_cpu(np->tx_ring.ex[i+3].buflow), + le32_to_cpu(np->tx_ring.ex[i+3].flaglen)); + } + } + + spin_lock_irq(&np->lock); + + /* 1) stop tx engine */ + nv_stop_tx(dev); + + /* 2) complete any outstanding tx and do not give HW any limited tx pkts */ + saved_tx_limit = np->tx_limit; + np->tx_limit = 0; /* prevent giving HW any limited pkts */ + np->tx_stop = 0; /* prevent waking tx queue */ + if (!nv_optimized(np)) + nv_tx_done(dev, np->tx_ring_size); + else + nv_tx_done_optimized(dev, np->tx_ring_size); + + /* save current HW position */ + if (np->tx_change_owner) + put_tx.ex = np->tx_change_owner->first_tx_desc; + else + put_tx = np->put_tx; + + /* 3) clear all tx state */ + nv_drain_tx(dev); + nv_init_tx(dev); + + /* 4) restore state to current HW position */ + np->get_tx = np->put_tx = put_tx; + np->tx_limit = saved_tx_limit; + + /* 5) restart tx engine */ + nv_start_tx(dev); + netif_wake_queue(dev); + spin_unlock_irq(&np->lock); +} + +/* + * Called when the nic notices a mismatch between the actual data len on the + * wire and the len indicated in the 802 header + */ +static int nv_getlen(struct net_device *dev, void *packet, int datalen) +{ + int hdrlen; /* length of the 802 header */ + int protolen; /* length as stored in the proto field */ + + /* 1) calculate len according to header */ + if (((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) { + protolen = ntohs(((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto); + hdrlen = VLAN_HLEN; + } else { + protolen = ntohs(((struct ethhdr *)packet)->h_proto); + hdrlen = ETH_HLEN; + } + if (protolen > ETH_DATA_LEN) + return datalen; /* Value in proto field not a len, no checks possible */ + + protolen += hdrlen; + /* consistency checks: */ + if (datalen > ETH_ZLEN) { + if (datalen >= protolen) { + /* more data on wire than in 802 header, trim of + * additional data. + */ + return protolen; + } else { + /* less data on wire than mentioned in header. + * Discard the packet. + */ + return -1; + } + } else { + /* short packet. Accept only if 802 values are also short */ + if (protolen > ETH_ZLEN) { + return -1; + } + return datalen; + } +} + +static int nv_rx_process(struct net_device *dev, int limit) +{ + struct fe_priv *np = netdev_priv(dev); + u32 flags; + int rx_work = 0; + struct sk_buff *skb; + int len; + + while ((np->get_rx.orig != np->put_rx.orig) && + !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) && + (rx_work < limit)) { + + /* + * the packet is for us - immediately tear down the pci mapping. + * TODO: check if a prefetch of the first cacheline improves + * the performance. + */ + pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, + np->get_rx_ctx->dma_len, + PCI_DMA_FROMDEVICE); + skb = np->get_rx_ctx->skb; + np->get_rx_ctx->skb = NULL; + + /* look at what we actually got: */ + if (np->desc_ver == DESC_VER_1) { + if (likely(flags & NV_RX_DESCRIPTORVALID)) { + len = flags & LEN_MASK_V1; + if (unlikely(flags & NV_RX_ERROR)) { + if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) { + len = nv_getlen(dev, skb->data, len); + if (len < 0) { + dev->stats.rx_errors++; + dev_kfree_skb(skb); + goto next_pkt; + } + } + /* framing errors are soft errors */ + else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) { + if (flags & NV_RX_SUBSTRACT1) + len--; + } + /* the rest are hard errors */ + else { + if (flags & NV_RX_MISSEDFRAME) + dev->stats.rx_missed_errors++; + if (flags & NV_RX_CRCERR) + dev->stats.rx_crc_errors++; + if (flags & NV_RX_OVERFLOW) + dev->stats.rx_over_errors++; + dev->stats.rx_errors++; + dev_kfree_skb(skb); + goto next_pkt; + } + } + } else { + dev_kfree_skb(skb); + goto next_pkt; + } + } else { + if (likely(flags & NV_RX2_DESCRIPTORVALID)) { + len = flags & LEN_MASK_V2; + if (unlikely(flags & NV_RX2_ERROR)) { + if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) { + len = nv_getlen(dev, skb->data, len); + if (len < 0) { + dev->stats.rx_errors++; + dev_kfree_skb(skb); + goto next_pkt; + } + } + /* framing errors are soft errors */ + else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { + if (flags & NV_RX2_SUBSTRACT1) + len--; + } + /* the rest are hard errors */ + else { + if (flags & NV_RX2_CRCERR) + dev->stats.rx_crc_errors++; + if (flags & NV_RX2_OVERFLOW) + dev->stats.rx_over_errors++; + dev->stats.rx_errors++; + dev_kfree_skb(skb); + goto next_pkt; + } + } + if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */ + ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + dev_kfree_skb(skb); + goto next_pkt; + } + } + /* got a valid packet - forward it to the network core */ + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, dev); + napi_gro_receive(&np->napi, skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; +next_pkt: + if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) + np->get_rx.orig = np->first_rx.orig; + if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) + np->get_rx_ctx = np->first_rx_ctx; + + rx_work++; + } + + return rx_work; +} + +static int nv_rx_process_optimized(struct net_device *dev, int limit) +{ + struct fe_priv *np = netdev_priv(dev); + u32 flags; + u32 vlanflags = 0; + int rx_work = 0; + struct sk_buff *skb; + int len; + + while ((np->get_rx.ex != np->put_rx.ex) && + !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) && + (rx_work < limit)) { + + /* + * the packet is for us - immediately tear down the pci mapping. + * TODO: check if a prefetch of the first cacheline improves + * the performance. + */ + pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, + np->get_rx_ctx->dma_len, + PCI_DMA_FROMDEVICE); + skb = np->get_rx_ctx->skb; + np->get_rx_ctx->skb = NULL; + + /* look at what we actually got: */ + if (likely(flags & NV_RX2_DESCRIPTORVALID)) { + len = flags & LEN_MASK_V2; + if (unlikely(flags & NV_RX2_ERROR)) { + if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) { + len = nv_getlen(dev, skb->data, len); + if (len < 0) { + dev_kfree_skb(skb); + goto next_pkt; + } + } + /* framing errors are soft errors */ + else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { + if (flags & NV_RX2_SUBSTRACT1) + len--; + } + /* the rest are hard errors */ + else { + dev_kfree_skb(skb); + goto next_pkt; + } + } + + if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */ + ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* got a valid packet - forward it to the network core */ + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, dev); + prefetch(skb->data); + + vlanflags = le32_to_cpu(np->get_rx.ex->buflow); + + /* + * There's need to check for NETIF_F_HW_VLAN_RX here. + * Even if vlan rx accel is disabled, + * NV_RX3_VLAN_TAG_PRESENT is pseudo randomly set. + */ + if (dev->features & NETIF_F_HW_VLAN_RX && + vlanflags & NV_RX3_VLAN_TAG_PRESENT) { + u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK; + + __vlan_hwaccel_put_tag(skb, vid); + } + napi_gro_receive(&np->napi, skb); + + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + } else { + dev_kfree_skb(skb); + } +next_pkt: + if (unlikely(np->get_rx.ex++ == np->last_rx.ex)) + np->get_rx.ex = np->first_rx.ex; + if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) + np->get_rx_ctx = np->first_rx_ctx; + + rx_work++; + } + + return rx_work; +} + +static void set_bufsize(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + + if (dev->mtu <= ETH_DATA_LEN) + np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS; + else + np->rx_buf_sz = dev->mtu + NV_RX_HEADERS; +} + +/* + * nv_change_mtu: dev->change_mtu function + * Called with dev_base_lock held for read. + */ +static int nv_change_mtu(struct net_device *dev, int new_mtu) +{ + struct fe_priv *np = netdev_priv(dev); + int old_mtu; + + if (new_mtu < 64 || new_mtu > np->pkt_limit) + return -EINVAL; + + old_mtu = dev->mtu; + dev->mtu = new_mtu; + + /* return early if the buffer sizes will not change */ + if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) + return 0; + if (old_mtu == new_mtu) + return 0; + + /* synchronized against open : rtnl_lock() held by caller */ + if (netif_running(dev)) { + u8 __iomem *base = get_hwbase(dev); + /* + * It seems that the nic preloads valid ring entries into an + * internal buffer. The procedure for flushing everything is + * guessed, there is probably a simpler approach. + * Changing the MTU is a rare event, it shouldn't matter. + */ + nv_disable_irq(dev); + nv_napi_disable(dev); + netif_tx_lock_bh(dev); + netif_addr_lock(dev); + spin_lock(&np->lock); + /* stop engines */ + nv_stop_rxtx(dev); + nv_txrx_reset(dev); + /* drain rx queue */ + nv_drain_rxtx(dev); + /* reinit driver view of the rx queue */ + set_bufsize(dev); + if (nv_init_ring(dev)) { + if (!np->in_shutdown) + mod_timer(&np->oom_kick, jiffies + OOM_REFILL); + } + /* reinit nic view of the rx queue */ + writel(np->rx_buf_sz, base + NvRegOffloadConfig); + setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); + writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), + base + NvRegRingSizes); + pci_push(base); + writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); + pci_push(base); + + /* restart rx engine */ + nv_start_rxtx(dev); + spin_unlock(&np->lock); + netif_addr_unlock(dev); + netif_tx_unlock_bh(dev); + nv_napi_enable(dev); + nv_enable_irq(dev); + } + return 0; +} + +static void nv_copy_mac_to_hw(struct net_device *dev) +{ + u8 __iomem *base = get_hwbase(dev); + u32 mac[2]; + + mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + + (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); + mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); + + writel(mac[0], base + NvRegMacAddrA); + writel(mac[1], base + NvRegMacAddrB); +} + +/* + * nv_set_mac_address: dev->set_mac_address function + * Called with rtnl_lock() held. + */ +static int nv_set_mac_address(struct net_device *dev, void *addr) +{ + struct fe_priv *np = netdev_priv(dev); + struct sockaddr *macaddr = (struct sockaddr *)addr; + + if (!is_valid_ether_addr(macaddr->sa_data)) + return -EADDRNOTAVAIL; + + /* synchronized against open : rtnl_lock() held by caller */ + memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN); + + if (netif_running(dev)) { + netif_tx_lock_bh(dev); + netif_addr_lock(dev); + spin_lock_irq(&np->lock); + + /* stop rx engine */ + nv_stop_rx(dev); + + /* set mac address */ + nv_copy_mac_to_hw(dev); + + /* restart rx engine */ + nv_start_rx(dev); + spin_unlock_irq(&np->lock); + netif_addr_unlock(dev); + netif_tx_unlock_bh(dev); + } else { + nv_copy_mac_to_hw(dev); + } + return 0; +} + +/* + * nv_set_multicast: dev->set_multicast function + * Called with netif_tx_lock held. + */ +static void nv_set_multicast(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 addr[2]; + u32 mask[2]; + u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX; + + memset(addr, 0, sizeof(addr)); + memset(mask, 0, sizeof(mask)); + + if (dev->flags & IFF_PROMISC) { + pff |= NVREG_PFF_PROMISC; + } else { + pff |= NVREG_PFF_MYADDR; + + if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) { + u32 alwaysOff[2]; + u32 alwaysOn[2]; + + alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff; + if (dev->flags & IFF_ALLMULTI) { + alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0; + } else { + struct netdev_hw_addr *ha; + + netdev_for_each_mc_addr(ha, dev) { + unsigned char *addr = ha->addr; + u32 a, b; + + a = le32_to_cpu(*(__le32 *) addr); + b = le16_to_cpu(*(__le16 *) (&addr[4])); + alwaysOn[0] &= a; + alwaysOff[0] &= ~a; + alwaysOn[1] &= b; + alwaysOff[1] &= ~b; + } + } + addr[0] = alwaysOn[0]; + addr[1] = alwaysOn[1]; + mask[0] = alwaysOn[0] | alwaysOff[0]; + mask[1] = alwaysOn[1] | alwaysOff[1]; + } else { + mask[0] = NVREG_MCASTMASKA_NONE; + mask[1] = NVREG_MCASTMASKB_NONE; + } + } + addr[0] |= NVREG_MCASTADDRA_FORCE; + pff |= NVREG_PFF_ALWAYS; + spin_lock_irq(&np->lock); + nv_stop_rx(dev); + writel(addr[0], base + NvRegMulticastAddrA); + writel(addr[1], base + NvRegMulticastAddrB); + writel(mask[0], base + NvRegMulticastMaskA); + writel(mask[1], base + NvRegMulticastMaskB); + writel(pff, base + NvRegPacketFilterFlags); + nv_start_rx(dev); + spin_unlock_irq(&np->lock); +} + +static void nv_update_pause(struct net_device *dev, u32 pause_flags) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + + np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE); + + if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) { + u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX; + if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) { + writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags); + np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; + } else { + writel(pff, base + NvRegPacketFilterFlags); + } + } + if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) { + u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX; + if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) { + u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1; + if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) + pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2; + if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) { + pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3; + /* limit the number of tx pause frames to a default of 8 */ + writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit); + } + writel(pause_enable, base + NvRegTxPauseFrame); + writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); + np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; + } else { + writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); + writel(regmisc, base + NvRegMisc1); + } + } +} + +/** + * nv_update_linkspeed: Setup the MAC according to the link partner + * @dev: Network device to be configured + * + * The function queries the PHY and checks if there is a link partner. + * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is + * set to 10 MBit HD. + * + * The function returns 0 if there is no link partner and 1 if there is + * a good link partner. + */ +static int nv_update_linkspeed(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + int adv = 0; + int lpa = 0; + int adv_lpa, adv_pause, lpa_pause; + int newls = np->linkspeed; + int newdup = np->duplex; + int mii_status; + int retval = 0; + u32 control_1000, status_1000, phyreg, pause_flags, txreg; + u32 txrxFlags = 0; + u32 phy_exp; + + /* BMSR_LSTATUS is latched, read it twice: + * we want the current value. + */ + mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); + mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); + + if (!(mii_status & BMSR_LSTATUS)) { + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; + newdup = 0; + retval = 0; + goto set_speed; + } + + if (np->autoneg == 0) { + if (np->fixed_mode & LPA_100FULL) { + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; + newdup = 1; + } else if (np->fixed_mode & LPA_100HALF) { + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; + newdup = 0; + } else if (np->fixed_mode & LPA_10FULL) { + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; + newdup = 1; + } else { + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; + newdup = 0; + } + retval = 1; + goto set_speed; + } + /* check auto negotiation is complete */ + if (!(mii_status & BMSR_ANEGCOMPLETE)) { + /* still in autonegotiation - configure nic for 10 MBit HD and wait. */ + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; + newdup = 0; + retval = 0; + goto set_speed; + } + + adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); + lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ); + + retval = 1; + if (np->gigabit == PHY_GIGABIT) { + control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); + status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ); + + if ((control_1000 & ADVERTISE_1000FULL) && + (status_1000 & LPA_1000FULL)) { + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000; + newdup = 1; + goto set_speed; + } + } + + /* FIXME: handle parallel detection properly */ + adv_lpa = lpa & adv; + if (adv_lpa & LPA_100FULL) { + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; + newdup = 1; + } else if (adv_lpa & LPA_100HALF) { + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; + newdup = 0; + } else if (adv_lpa & LPA_10FULL) { + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; + newdup = 1; + } else if (adv_lpa & LPA_10HALF) { + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; + newdup = 0; + } else { + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; + newdup = 0; + } + +set_speed: + if (np->duplex == newdup && np->linkspeed == newls) + return retval; + + np->duplex = newdup; + np->linkspeed = newls; + + /* The transmitter and receiver must be restarted for safe update */ + if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) { + txrxFlags |= NV_RESTART_TX; + nv_stop_tx(dev); + } + if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) { + txrxFlags |= NV_RESTART_RX; + nv_stop_rx(dev); + } + + if (np->gigabit == PHY_GIGABIT) { + phyreg = readl(base + NvRegSlotTime); + phyreg &= ~(0x3FF00); + if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) || + ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)) + phyreg |= NVREG_SLOTTIME_10_100_FULL; + else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) + phyreg |= NVREG_SLOTTIME_1000_FULL; + writel(phyreg, base + NvRegSlotTime); + } + + phyreg = readl(base + NvRegPhyInterface); + phyreg &= ~(PHY_HALF|PHY_100|PHY_1000); + if (np->duplex == 0) + phyreg |= PHY_HALF; + if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) + phyreg |= PHY_100; + else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) + phyreg |= PHY_1000; + writel(phyreg, base + NvRegPhyInterface); + + phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */ + if (phyreg & PHY_RGMII) { + if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) { + txreg = NVREG_TX_DEFERRAL_RGMII_1000; + } else { + if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) { + if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10) + txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10; + else + txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100; + } else { + txreg = NVREG_TX_DEFERRAL_RGMII_10_100; + } + } + } else { + if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) + txreg = NVREG_TX_DEFERRAL_MII_STRETCH; + else + txreg = NVREG_TX_DEFERRAL_DEFAULT; + } + writel(txreg, base + NvRegTxDeferral); + + if (np->desc_ver == DESC_VER_1) { + txreg = NVREG_TX_WM_DESC1_DEFAULT; + } else { + if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) + txreg = NVREG_TX_WM_DESC2_3_1000; + else + txreg = NVREG_TX_WM_DESC2_3_DEFAULT; + } + writel(txreg, base + NvRegTxWatermark); + + writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD), + base + NvRegMisc1); + pci_push(base); + writel(np->linkspeed, base + NvRegLinkSpeed); + pci_push(base); + + pause_flags = 0; + /* setup pause frame */ + if (np->duplex != 0) { + if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { + adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); + lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM); + + switch (adv_pause) { + case ADVERTISE_PAUSE_CAP: + if (lpa_pause & LPA_PAUSE_CAP) { + pause_flags |= NV_PAUSEFRAME_RX_ENABLE; + if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) + pause_flags |= NV_PAUSEFRAME_TX_ENABLE; + } + break; + case ADVERTISE_PAUSE_ASYM: + if (lpa_pause == (LPA_PAUSE_CAP | LPA_PAUSE_ASYM)) + pause_flags |= NV_PAUSEFRAME_TX_ENABLE; + break; + case ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM: + if (lpa_pause & LPA_PAUSE_CAP) { + pause_flags |= NV_PAUSEFRAME_RX_ENABLE; + if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) + pause_flags |= NV_PAUSEFRAME_TX_ENABLE; + } + if (lpa_pause == LPA_PAUSE_ASYM) + pause_flags |= NV_PAUSEFRAME_RX_ENABLE; + break; + } + } else { + pause_flags = np->pause_flags; + } + } + nv_update_pause(dev, pause_flags); + + if (txrxFlags & NV_RESTART_TX) + nv_start_tx(dev); + if (txrxFlags & NV_RESTART_RX) + nv_start_rx(dev); + + return retval; +} + +static void nv_linkchange(struct net_device *dev) +{ + if (nv_update_linkspeed(dev)) { + if (!netif_carrier_ok(dev)) { + netif_carrier_on(dev); + netdev_info(dev, "link up\n"); + nv_txrx_gate(dev, false); + nv_start_rx(dev); + } + } else { + if (netif_carrier_ok(dev)) { + netif_carrier_off(dev); + netdev_info(dev, "link down\n"); + nv_txrx_gate(dev, true); + nv_stop_rx(dev); + } + } +} + +static void nv_link_irq(struct net_device *dev) +{ + u8 __iomem *base = get_hwbase(dev); + u32 miistat; + + miistat = readl(base + NvRegMIIStatus); + writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus); + + if (miistat & (NVREG_MIISTAT_LINKCHANGE)) + nv_linkchange(dev); +} + +static void nv_msi_workaround(struct fe_priv *np) +{ + + /* Need to toggle the msi irq mask within the ethernet device, + * otherwise, future interrupts will not be detected. + */ + if (np->msi_flags & NV_MSI_ENABLED) { + u8 __iomem *base = np->base; + + writel(0, base + NvRegMSIIrqMask); + writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); + } +} + +static inline int nv_change_interrupt_mode(struct net_device *dev, int total_work) +{ + struct fe_priv *np = netdev_priv(dev); + + if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC) { + if (total_work > NV_DYNAMIC_THRESHOLD) { + /* transition to poll based interrupts */ + np->quiet_count = 0; + if (np->irqmask != NVREG_IRQMASK_CPU) { + np->irqmask = NVREG_IRQMASK_CPU; + return 1; + } + } else { + if (np->quiet_count < NV_DYNAMIC_MAX_QUIET_COUNT) { + np->quiet_count++; + } else { + /* reached a period of low activity, switch + to per tx/rx packet interrupts */ + if (np->irqmask != NVREG_IRQMASK_THROUGHPUT) { + np->irqmask = NVREG_IRQMASK_THROUGHPUT; + return 1; + } + } + } + } + return 0; +} + +static irqreturn_t nv_nic_irq(int foo, void *data) +{ + struct net_device *dev = (struct net_device *) data; + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + + if (!(np->msi_flags & NV_MSI_X_ENABLED)) { + np->events = readl(base + NvRegIrqStatus); + writel(np->events, base + NvRegIrqStatus); + } else { + np->events = readl(base + NvRegMSIXIrqStatus); + writel(np->events, base + NvRegMSIXIrqStatus); + } + if (!(np->events & np->irqmask)) + return IRQ_NONE; + + nv_msi_workaround(np); + + if (napi_schedule_prep(&np->napi)) { + /* + * Disable further irq's (msix not enabled with napi) + */ + writel(0, base + NvRegIrqMask); + __napi_schedule(&np->napi); + } + + return IRQ_HANDLED; +} + +/** + * All _optimized functions are used to help increase performance + * (reduce CPU and increase throughput). They use descripter version 3, + * compiler directives, and reduce memory accesses. + */ +static irqreturn_t nv_nic_irq_optimized(int foo, void *data) +{ + struct net_device *dev = (struct net_device *) data; + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + + if (!(np->msi_flags & NV_MSI_X_ENABLED)) { + np->events = readl(base + NvRegIrqStatus); + writel(np->events, base + NvRegIrqStatus); + } else { + np->events = readl(base + NvRegMSIXIrqStatus); + writel(np->events, base + NvRegMSIXIrqStatus); + } + if (!(np->events & np->irqmask)) + return IRQ_NONE; + + nv_msi_workaround(np); + + if (napi_schedule_prep(&np->napi)) { + /* + * Disable further irq's (msix not enabled with napi) + */ + writel(0, base + NvRegIrqMask); + __napi_schedule(&np->napi); + } + + return IRQ_HANDLED; +} + +static irqreturn_t nv_nic_irq_tx(int foo, void *data) +{ + struct net_device *dev = (struct net_device *) data; + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 events; + int i; + unsigned long flags; + + for (i = 0;; i++) { + events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; + writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); + if (!(events & np->irqmask)) + break; + + spin_lock_irqsave(&np->lock, flags); + nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); + spin_unlock_irqrestore(&np->lock, flags); + + if (unlikely(i > max_interrupt_work)) { + spin_lock_irqsave(&np->lock, flags); + /* disable interrupts on the nic */ + writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); + pci_push(base); + + if (!np->in_shutdown) { + np->nic_poll_irq |= NVREG_IRQ_TX_ALL; + mod_timer(&np->nic_poll, jiffies + POLL_WAIT); + } + spin_unlock_irqrestore(&np->lock, flags); + netdev_dbg(dev, "%s: too many iterations (%d)\n", + __func__, i); + break; + } + + } + + return IRQ_RETVAL(i); +} + +static int nv_napi_poll(struct napi_struct *napi, int budget) +{ + struct fe_priv *np = container_of(napi, struct fe_priv, napi); + struct net_device *dev = np->dev; + u8 __iomem *base = get_hwbase(dev); + unsigned long flags; + int retcode; + int rx_count, tx_work = 0, rx_work = 0; + + do { + if (!nv_optimized(np)) { + spin_lock_irqsave(&np->lock, flags); + tx_work += nv_tx_done(dev, np->tx_ring_size); + spin_unlock_irqrestore(&np->lock, flags); + + rx_count = nv_rx_process(dev, budget - rx_work); + retcode = nv_alloc_rx(dev); + } else { + spin_lock_irqsave(&np->lock, flags); + tx_work += nv_tx_done_optimized(dev, np->tx_ring_size); + spin_unlock_irqrestore(&np->lock, flags); + + rx_count = nv_rx_process_optimized(dev, + budget - rx_work); + retcode = nv_alloc_rx_optimized(dev); + } + } while (retcode == 0 && + rx_count > 0 && (rx_work += rx_count) < budget); + + if (retcode) { + spin_lock_irqsave(&np->lock, flags); + if (!np->in_shutdown) + mod_timer(&np->oom_kick, jiffies + OOM_REFILL); + spin_unlock_irqrestore(&np->lock, flags); + } + + nv_change_interrupt_mode(dev, tx_work + rx_work); + + if (unlikely(np->events & NVREG_IRQ_LINK)) { + spin_lock_irqsave(&np->lock, flags); + nv_link_irq(dev); + spin_unlock_irqrestore(&np->lock, flags); + } + if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { + spin_lock_irqsave(&np->lock, flags); + nv_linkchange(dev); + spin_unlock_irqrestore(&np->lock, flags); + np->link_timeout = jiffies + LINK_TIMEOUT; + } + if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) { + spin_lock_irqsave(&np->lock, flags); + if (!np->in_shutdown) { + np->nic_poll_irq = np->irqmask; + np->recover_error = 1; + mod_timer(&np->nic_poll, jiffies + POLL_WAIT); + } + spin_unlock_irqrestore(&np->lock, flags); + napi_complete(napi); + return rx_work; + } + + if (rx_work < budget) { + /* re-enable interrupts + (msix not enabled in napi) */ + napi_complete(napi); + + writel(np->irqmask, base + NvRegIrqMask); + } + return rx_work; +} + +static irqreturn_t nv_nic_irq_rx(int foo, void *data) +{ + struct net_device *dev = (struct net_device *) data; + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 events; + int i; + unsigned long flags; + + for (i = 0;; i++) { + events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; + writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); + if (!(events & np->irqmask)) + break; + + if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) { + if (unlikely(nv_alloc_rx_optimized(dev))) { + spin_lock_irqsave(&np->lock, flags); + if (!np->in_shutdown) + mod_timer(&np->oom_kick, jiffies + OOM_REFILL); + spin_unlock_irqrestore(&np->lock, flags); + } + } + + if (unlikely(i > max_interrupt_work)) { + spin_lock_irqsave(&np->lock, flags); + /* disable interrupts on the nic */ + writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); + pci_push(base); + + if (!np->in_shutdown) { + np->nic_poll_irq |= NVREG_IRQ_RX_ALL; + mod_timer(&np->nic_poll, jiffies + POLL_WAIT); + } + spin_unlock_irqrestore(&np->lock, flags); + netdev_dbg(dev, "%s: too many iterations (%d)\n", + __func__, i); + break; + } + } + + return IRQ_RETVAL(i); +} + +static irqreturn_t nv_nic_irq_other(int foo, void *data) +{ + struct net_device *dev = (struct net_device *) data; + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 events; + int i; + unsigned long flags; + + for (i = 0;; i++) { + events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; + writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); + if (!(events & np->irqmask)) + break; + + /* check tx in case we reached max loop limit in tx isr */ + spin_lock_irqsave(&np->lock, flags); + nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); + spin_unlock_irqrestore(&np->lock, flags); + + if (events & NVREG_IRQ_LINK) { + spin_lock_irqsave(&np->lock, flags); + nv_link_irq(dev); + spin_unlock_irqrestore(&np->lock, flags); + } + if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { + spin_lock_irqsave(&np->lock, flags); + nv_linkchange(dev); + spin_unlock_irqrestore(&np->lock, flags); + np->link_timeout = jiffies + LINK_TIMEOUT; + } + if (events & NVREG_IRQ_RECOVER_ERROR) { + spin_lock_irq(&np->lock); + /* disable interrupts on the nic */ + writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); + pci_push(base); + + if (!np->in_shutdown) { + np->nic_poll_irq |= NVREG_IRQ_OTHER; + np->recover_error = 1; + mod_timer(&np->nic_poll, jiffies + POLL_WAIT); + } + spin_unlock_irq(&np->lock); + break; + } + if (unlikely(i > max_interrupt_work)) { + spin_lock_irqsave(&np->lock, flags); + /* disable interrupts on the nic */ + writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); + pci_push(base); + + if (!np->in_shutdown) { + np->nic_poll_irq |= NVREG_IRQ_OTHER; + mod_timer(&np->nic_poll, jiffies + POLL_WAIT); + } + spin_unlock_irqrestore(&np->lock, flags); + netdev_dbg(dev, "%s: too many iterations (%d)\n", + __func__, i); + break; + } + + } + + return IRQ_RETVAL(i); +} + +static irqreturn_t nv_nic_irq_test(int foo, void *data) +{ + struct net_device *dev = (struct net_device *) data; + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 events; + + if (!(np->msi_flags & NV_MSI_X_ENABLED)) { + events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; + writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus); + } else { + events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; + writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus); + } + pci_push(base); + if (!(events & NVREG_IRQ_TIMER)) + return IRQ_RETVAL(0); + + nv_msi_workaround(np); + + spin_lock(&np->lock); + np->intr_test = 1; + spin_unlock(&np->lock); + + return IRQ_RETVAL(1); +} + +static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) +{ + u8 __iomem *base = get_hwbase(dev); + int i; + u32 msixmap = 0; + + /* Each interrupt bit can be mapped to a MSIX vector (4 bits). + * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents + * the remaining 8 interrupts. + */ + for (i = 0; i < 8; i++) { + if ((irqmask >> i) & 0x1) + msixmap |= vector << (i << 2); + } + writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); + + msixmap = 0; + for (i = 0; i < 8; i++) { + if ((irqmask >> (i + 8)) & 0x1) + msixmap |= vector << (i << 2); + } + writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); +} + +static int nv_request_irq(struct net_device *dev, int intr_test) +{ + struct fe_priv *np = get_nvpriv(dev); + u8 __iomem *base = get_hwbase(dev); + int ret = 1; + int i; + irqreturn_t (*handler)(int foo, void *data); + + if (intr_test) { + handler = nv_nic_irq_test; + } else { + if (nv_optimized(np)) + handler = nv_nic_irq_optimized; + else + handler = nv_nic_irq; + } + + if (np->msi_flags & NV_MSI_X_CAPABLE) { + for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) + np->msi_x_entry[i].entry = i; + ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK)); + if (ret == 0) { + np->msi_flags |= NV_MSI_X_ENABLED; + if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) { + /* Request irq for rx handling */ + sprintf(np->name_rx, "%s-rx", dev->name); + if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, + nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) { + netdev_info(dev, + "request_irq failed for rx %d\n", + ret); + pci_disable_msix(np->pci_dev); + np->msi_flags &= ~NV_MSI_X_ENABLED; + goto out_err; + } + /* Request irq for tx handling */ + sprintf(np->name_tx, "%s-tx", dev->name); + if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, + nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) { + netdev_info(dev, + "request_irq failed for tx %d\n", + ret); + pci_disable_msix(np->pci_dev); + np->msi_flags &= ~NV_MSI_X_ENABLED; + goto out_free_rx; + } + /* Request irq for link and timer handling */ + sprintf(np->name_other, "%s-other", dev->name); + if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, + nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) { + netdev_info(dev, + "request_irq failed for link %d\n", + ret); + pci_disable_msix(np->pci_dev); + np->msi_flags &= ~NV_MSI_X_ENABLED; + goto out_free_tx; + } + /* map interrupts to their respective vector */ + writel(0, base + NvRegMSIXMap0); + writel(0, base + NvRegMSIXMap1); + set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); + set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); + set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); + } else { + /* Request irq for all interrupts */ + if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) { + netdev_info(dev, + "request_irq failed %d\n", + ret); + pci_disable_msix(np->pci_dev); + np->msi_flags &= ~NV_MSI_X_ENABLED; + goto out_err; + } + + /* map interrupts to vector 0 */ + writel(0, base + NvRegMSIXMap0); + writel(0, base + NvRegMSIXMap1); + } + } + } + if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { + ret = pci_enable_msi(np->pci_dev); + if (ret == 0) { + np->msi_flags |= NV_MSI_ENABLED; + dev->irq = np->pci_dev->irq; + if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) { + netdev_info(dev, "request_irq failed %d\n", + ret); + pci_disable_msi(np->pci_dev); + np->msi_flags &= ~NV_MSI_ENABLED; + dev->irq = np->pci_dev->irq; + goto out_err; + } + + /* map interrupts to vector 0 */ + writel(0, base + NvRegMSIMap0); + writel(0, base + NvRegMSIMap1); + /* enable msi vector 0 */ + writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); + } + } + if (ret != 0) { + if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) + goto out_err; + + } + + return 0; +out_free_tx: + free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); +out_free_rx: + free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); +out_err: + return 1; +} + +static void nv_free_irq(struct net_device *dev) +{ + struct fe_priv *np = get_nvpriv(dev); + int i; + + if (np->msi_flags & NV_MSI_X_ENABLED) { + for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) + free_irq(np->msi_x_entry[i].vector, dev); + pci_disable_msix(np->pci_dev); + np->msi_flags &= ~NV_MSI_X_ENABLED; + } else { + free_irq(np->pci_dev->irq, dev); + if (np->msi_flags & NV_MSI_ENABLED) { + pci_disable_msi(np->pci_dev); + np->msi_flags &= ~NV_MSI_ENABLED; + } + } +} + +static void nv_do_nic_poll(unsigned long data) +{ + struct net_device *dev = (struct net_device *) data; + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 mask = 0; + + /* + * First disable irq(s) and then + * reenable interrupts on the nic, we have to do this before calling + * nv_nic_irq because that may decide to do otherwise + */ + + if (!using_multi_irqs(dev)) { + if (np->msi_flags & NV_MSI_X_ENABLED) + disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); + else + disable_irq_lockdep(np->pci_dev->irq); + mask = np->irqmask; + } else { + if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { + disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); + mask |= NVREG_IRQ_RX_ALL; + } + if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { + disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); + mask |= NVREG_IRQ_TX_ALL; + } + if (np->nic_poll_irq & NVREG_IRQ_OTHER) { + disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); + mask |= NVREG_IRQ_OTHER; + } + } + /* disable_irq() contains synchronize_irq, thus no irq handler can run now */ + + if (np->recover_error) { + np->recover_error = 0; + netdev_info(dev, "MAC in recoverable error state\n"); + if (netif_running(dev)) { + netif_tx_lock_bh(dev); + netif_addr_lock(dev); + spin_lock(&np->lock); + /* stop engines */ + nv_stop_rxtx(dev); + if (np->driver_data & DEV_HAS_POWER_CNTRL) + nv_mac_reset(dev); + nv_txrx_reset(dev); + /* drain rx queue */ + nv_drain_rxtx(dev); + /* reinit driver view of the rx queue */ + set_bufsize(dev); + if (nv_init_ring(dev)) { + if (!np->in_shutdown) + mod_timer(&np->oom_kick, jiffies + OOM_REFILL); + } + /* reinit nic view of the rx queue */ + writel(np->rx_buf_sz, base + NvRegOffloadConfig); + setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); + writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), + base + NvRegRingSizes); + pci_push(base); + writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); + pci_push(base); + /* clear interrupts */ + if (!(np->msi_flags & NV_MSI_X_ENABLED)) + writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); + else + writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); + + /* restart rx engine */ + nv_start_rxtx(dev); + spin_unlock(&np->lock); + netif_addr_unlock(dev); + netif_tx_unlock_bh(dev); + } + } + + writel(mask, base + NvRegIrqMask); + pci_push(base); + + if (!using_multi_irqs(dev)) { + np->nic_poll_irq = 0; + if (nv_optimized(np)) + nv_nic_irq_optimized(0, dev); + else + nv_nic_irq(0, dev); + if (np->msi_flags & NV_MSI_X_ENABLED) + enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); + else + enable_irq_lockdep(np->pci_dev->irq); + } else { + if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { + np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL; + nv_nic_irq_rx(0, dev); + enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); + } + if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { + np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL; + nv_nic_irq_tx(0, dev); + enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); + } + if (np->nic_poll_irq & NVREG_IRQ_OTHER) { + np->nic_poll_irq &= ~NVREG_IRQ_OTHER; + nv_nic_irq_other(0, dev); + enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); + } + } + +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void nv_poll_controller(struct net_device *dev) +{ + nv_do_nic_poll((unsigned long) dev); +} +#endif + +static void nv_do_stats_poll(unsigned long data) +{ + struct net_device *dev = (struct net_device *) data; + struct fe_priv *np = netdev_priv(dev); + + nv_get_hw_stats(dev); + + if (!np->in_shutdown) + mod_timer(&np->stats_poll, + round_jiffies(jiffies + STATS_INTERVAL)); +} + +static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct fe_priv *np = netdev_priv(dev); + strcpy(info->driver, DRV_NAME); + strcpy(info->version, FORCEDETH_VERSION); + strcpy(info->bus_info, pci_name(np->pci_dev)); +} + +static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) +{ + struct fe_priv *np = netdev_priv(dev); + wolinfo->supported = WAKE_MAGIC; + + spin_lock_irq(&np->lock); + if (np->wolenabled) + wolinfo->wolopts = WAKE_MAGIC; + spin_unlock_irq(&np->lock); +} + +static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 flags = 0; + + if (wolinfo->wolopts == 0) { + np->wolenabled = 0; + } else if (wolinfo->wolopts & WAKE_MAGIC) { + np->wolenabled = 1; + flags = NVREG_WAKEUPFLAGS_ENABLE; + } + if (netif_running(dev)) { + spin_lock_irq(&np->lock); + writel(flags, base + NvRegWakeUpFlags); + spin_unlock_irq(&np->lock); + } + device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled); + return 0; +} + +static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct fe_priv *np = netdev_priv(dev); + u32 speed; + int adv; + + spin_lock_irq(&np->lock); + ecmd->port = PORT_MII; + if (!netif_running(dev)) { + /* We do not track link speed / duplex setting if the + * interface is disabled. Force a link check */ + if (nv_update_linkspeed(dev)) { + if (!netif_carrier_ok(dev)) + netif_carrier_on(dev); + } else { + if (netif_carrier_ok(dev)) + netif_carrier_off(dev); + } + } + + if (netif_carrier_ok(dev)) { + switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) { + case NVREG_LINKSPEED_10: + speed = SPEED_10; + break; + case NVREG_LINKSPEED_100: + speed = SPEED_100; + break; + case NVREG_LINKSPEED_1000: + speed = SPEED_1000; + break; + default: + speed = -1; + break; + } + ecmd->duplex = DUPLEX_HALF; + if (np->duplex) + ecmd->duplex = DUPLEX_FULL; + } else { + speed = -1; + ecmd->duplex = -1; + } + ethtool_cmd_speed_set(ecmd, speed); + ecmd->autoneg = np->autoneg; + + ecmd->advertising = ADVERTISED_MII; + if (np->autoneg) { + ecmd->advertising |= ADVERTISED_Autoneg; + adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); + if (adv & ADVERTISE_10HALF) + ecmd->advertising |= ADVERTISED_10baseT_Half; + if (adv & ADVERTISE_10FULL) + ecmd->advertising |= ADVERTISED_10baseT_Full; + if (adv & ADVERTISE_100HALF) + ecmd->advertising |= ADVERTISED_100baseT_Half; + if (adv & ADVERTISE_100FULL) + ecmd->advertising |= ADVERTISED_100baseT_Full; + if (np->gigabit == PHY_GIGABIT) { + adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); + if (adv & ADVERTISE_1000FULL) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + } + } + ecmd->supported = (SUPPORTED_Autoneg | + SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | + SUPPORTED_MII); + if (np->gigabit == PHY_GIGABIT) + ecmd->supported |= SUPPORTED_1000baseT_Full; + + ecmd->phy_address = np->phyaddr; + ecmd->transceiver = XCVR_EXTERNAL; + + /* ignore maxtxpkt, maxrxpkt for now */ + spin_unlock_irq(&np->lock); + return 0; +} + +static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct fe_priv *np = netdev_priv(dev); + u32 speed = ethtool_cmd_speed(ecmd); + + if (ecmd->port != PORT_MII) + return -EINVAL; + if (ecmd->transceiver != XCVR_EXTERNAL) + return -EINVAL; + if (ecmd->phy_address != np->phyaddr) { + /* TODO: support switching between multiple phys. Should be + * trivial, but not enabled due to lack of test hardware. */ + return -EINVAL; + } + if (ecmd->autoneg == AUTONEG_ENABLE) { + u32 mask; + + mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; + if (np->gigabit == PHY_GIGABIT) + mask |= ADVERTISED_1000baseT_Full; + + if ((ecmd->advertising & mask) == 0) + return -EINVAL; + + } else if (ecmd->autoneg == AUTONEG_DISABLE) { + /* Note: autonegotiation disable, speed 1000 intentionally + * forbidden - no one should need that. */ + + if (speed != SPEED_10 && speed != SPEED_100) + return -EINVAL; + if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) + return -EINVAL; + } else { + return -EINVAL; + } + + netif_carrier_off(dev); + if (netif_running(dev)) { + unsigned long flags; + + nv_disable_irq(dev); + netif_tx_lock_bh(dev); + netif_addr_lock(dev); + /* with plain spinlock lockdep complains */ + spin_lock_irqsave(&np->lock, flags); + /* stop engines */ + /* FIXME: + * this can take some time, and interrupts are disabled + * due to spin_lock_irqsave, but let's hope no daemon + * is going to change the settings very often... + * Worst case: + * NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX + * + some minor delays, which is up to a second approximately + */ + nv_stop_rxtx(dev); + spin_unlock_irqrestore(&np->lock, flags); + netif_addr_unlock(dev); + netif_tx_unlock_bh(dev); + } + + if (ecmd->autoneg == AUTONEG_ENABLE) { + int adv, bmcr; + + np->autoneg = 1; + + /* advertise only what has been requested */ + adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); + adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); + if (ecmd->advertising & ADVERTISED_10baseT_Half) + adv |= ADVERTISE_10HALF; + if (ecmd->advertising & ADVERTISED_10baseT_Full) + adv |= ADVERTISE_10FULL; + if (ecmd->advertising & ADVERTISED_100baseT_Half) + adv |= ADVERTISE_100HALF; + if (ecmd->advertising & ADVERTISED_100baseT_Full) + adv |= ADVERTISE_100FULL; + if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */ + adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) + adv |= ADVERTISE_PAUSE_ASYM; + mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); + + if (np->gigabit == PHY_GIGABIT) { + adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); + adv &= ~ADVERTISE_1000FULL; + if (ecmd->advertising & ADVERTISED_1000baseT_Full) + adv |= ADVERTISE_1000FULL; + mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); + } + + if (netif_running(dev)) + netdev_info(dev, "link down\n"); + bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); + if (np->phy_model == PHY_MODEL_MARVELL_E3016) { + bmcr |= BMCR_ANENABLE; + /* reset the phy in order for settings to stick, + * and cause autoneg to start */ + if (phy_reset(dev, bmcr)) { + netdev_info(dev, "phy reset failed\n"); + return -EINVAL; + } + } else { + bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); + mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); + } + } else { + int adv, bmcr; + + np->autoneg = 0; + + adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); + adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); + if (speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF) + adv |= ADVERTISE_10HALF; + if (speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL) + adv |= ADVERTISE_10FULL; + if (speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF) + adv |= ADVERTISE_100HALF; + if (speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL) + adv |= ADVERTISE_100FULL; + np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); + if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisements but disable tx pause */ + adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; + } + if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) { + adv |= ADVERTISE_PAUSE_ASYM; + np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; + } + mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); + np->fixed_mode = adv; + + if (np->gigabit == PHY_GIGABIT) { + adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); + adv &= ~ADVERTISE_1000FULL; + mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); + } + + bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); + bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX); + if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL)) + bmcr |= BMCR_FULLDPLX; + if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL)) + bmcr |= BMCR_SPEED100; + if (np->phy_oui == PHY_OUI_MARVELL) { + /* reset the phy in order for forced mode settings to stick */ + if (phy_reset(dev, bmcr)) { + netdev_info(dev, "phy reset failed\n"); + return -EINVAL; + } + } else { + mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); + if (netif_running(dev)) { + /* Wait a bit and then reconfigure the nic. */ + udelay(10); + nv_linkchange(dev); + } + } + } + + if (netif_running(dev)) { + nv_start_rxtx(dev); + nv_enable_irq(dev); + } + + return 0; +} + +#define FORCEDETH_REGS_VER 1 + +static int nv_get_regs_len(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + return np->register_size; +} + +static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 *rbuf = buf; + int i; + + regs->version = FORCEDETH_REGS_VER; + spin_lock_irq(&np->lock); + for (i = 0; i <= np->register_size/sizeof(u32); i++) + rbuf[i] = readl(base + i*sizeof(u32)); + spin_unlock_irq(&np->lock); +} + +static int nv_nway_reset(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + int ret; + + if (np->autoneg) { + int bmcr; + + netif_carrier_off(dev); + if (netif_running(dev)) { + nv_disable_irq(dev); + netif_tx_lock_bh(dev); + netif_addr_lock(dev); + spin_lock(&np->lock); + /* stop engines */ + nv_stop_rxtx(dev); + spin_unlock(&np->lock); + netif_addr_unlock(dev); + netif_tx_unlock_bh(dev); + netdev_info(dev, "link down\n"); + } + + bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); + if (np->phy_model == PHY_MODEL_MARVELL_E3016) { + bmcr |= BMCR_ANENABLE; + /* reset the phy in order for settings to stick*/ + if (phy_reset(dev, bmcr)) { + netdev_info(dev, "phy reset failed\n"); + return -EINVAL; + } + } else { + bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); + mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); + } + + if (netif_running(dev)) { + nv_start_rxtx(dev); + nv_enable_irq(dev); + } + ret = 0; + } else { + ret = -EINVAL; + } + + return ret; +} + +static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) +{ + struct fe_priv *np = netdev_priv(dev); + + ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; + + ring->rx_pending = np->rx_ring_size; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; + ring->tx_pending = np->tx_ring_size; +} + +static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u8 *rxtx_ring, *rx_skbuff, *tx_skbuff; + dma_addr_t ring_addr; + + if (ring->rx_pending < RX_RING_MIN || + ring->tx_pending < TX_RING_MIN || + ring->rx_mini_pending != 0 || + ring->rx_jumbo_pending != 0 || + (np->desc_ver == DESC_VER_1 && + (ring->rx_pending > RING_MAX_DESC_VER_1 || + ring->tx_pending > RING_MAX_DESC_VER_1)) || + (np->desc_ver != DESC_VER_1 && + (ring->rx_pending > RING_MAX_DESC_VER_2_3 || + ring->tx_pending > RING_MAX_DESC_VER_2_3))) { + return -EINVAL; + } + + /* allocate new rings */ + if (!nv_optimized(np)) { + rxtx_ring = pci_alloc_consistent(np->pci_dev, + sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), + &ring_addr); + } else { + rxtx_ring = pci_alloc_consistent(np->pci_dev, + sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), + &ring_addr); + } + rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL); + tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL); + if (!rxtx_ring || !rx_skbuff || !tx_skbuff) { + /* fall back to old rings */ + if (!nv_optimized(np)) { + if (rxtx_ring) + pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), + rxtx_ring, ring_addr); + } else { + if (rxtx_ring) + pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), + rxtx_ring, ring_addr); + } + + kfree(rx_skbuff); + kfree(tx_skbuff); + goto exit; + } + + if (netif_running(dev)) { + nv_disable_irq(dev); + nv_napi_disable(dev); + netif_tx_lock_bh(dev); + netif_addr_lock(dev); + spin_lock(&np->lock); + /* stop engines */ + nv_stop_rxtx(dev); + nv_txrx_reset(dev); + /* drain queues */ + nv_drain_rxtx(dev); + /* delete queues */ + free_rings(dev); + } + + /* set new values */ + np->rx_ring_size = ring->rx_pending; + np->tx_ring_size = ring->tx_pending; + + if (!nv_optimized(np)) { + np->rx_ring.orig = (struct ring_desc *)rxtx_ring; + np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; + } else { + np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring; + np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; + } + np->rx_skb = (struct nv_skb_map *)rx_skbuff; + np->tx_skb = (struct nv_skb_map *)tx_skbuff; + np->ring_addr = ring_addr; + + memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size); + memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size); + + if (netif_running(dev)) { + /* reinit driver view of the queues */ + set_bufsize(dev); + if (nv_init_ring(dev)) { + if (!np->in_shutdown) + mod_timer(&np->oom_kick, jiffies + OOM_REFILL); + } + + /* reinit nic view of the queues */ + writel(np->rx_buf_sz, base + NvRegOffloadConfig); + setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); + writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), + base + NvRegRingSizes); + pci_push(base); + writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); + pci_push(base); + + /* restart engines */ + nv_start_rxtx(dev); + spin_unlock(&np->lock); + netif_addr_unlock(dev); + netif_tx_unlock_bh(dev); + nv_napi_enable(dev); + nv_enable_irq(dev); + } + return 0; +exit: + return -ENOMEM; +} + +static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) +{ + struct fe_priv *np = netdev_priv(dev); + + pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0; + pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0; + pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0; +} + +static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) +{ + struct fe_priv *np = netdev_priv(dev); + int adv, bmcr; + + if ((!np->autoneg && np->duplex == 0) || + (np->autoneg && !pause->autoneg && np->duplex == 0)) { + netdev_info(dev, "can not set pause settings when forced link is in half duplex\n"); + return -EINVAL; + } + if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) { + netdev_info(dev, "hardware does not support tx pause frames\n"); + return -EINVAL; + } + + netif_carrier_off(dev); + if (netif_running(dev)) { + nv_disable_irq(dev); + netif_tx_lock_bh(dev); + netif_addr_lock(dev); + spin_lock(&np->lock); + /* stop engines */ + nv_stop_rxtx(dev); + spin_unlock(&np->lock); + netif_addr_unlock(dev); + netif_tx_unlock_bh(dev); + } + + np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ); + if (pause->rx_pause) + np->pause_flags |= NV_PAUSEFRAME_RX_REQ; + if (pause->tx_pause) + np->pause_flags |= NV_PAUSEFRAME_TX_REQ; + + if (np->autoneg && pause->autoneg) { + np->pause_flags |= NV_PAUSEFRAME_AUTONEG; + + adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); + adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); + if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */ + adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) + adv |= ADVERTISE_PAUSE_ASYM; + mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); + + if (netif_running(dev)) + netdev_info(dev, "link down\n"); + bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); + bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); + mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); + } else { + np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); + if (pause->rx_pause) + np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; + if (pause->tx_pause) + np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; + + if (!netif_running(dev)) + nv_update_linkspeed(dev); + else + nv_update_pause(dev, np->pause_flags); + } + + if (netif_running(dev)) { + nv_start_rxtx(dev); + nv_enable_irq(dev); + } + return 0; +} + +static u32 nv_fix_features(struct net_device *dev, u32 features) +{ + /* vlan is dependent on rx checksum offload */ + if (features & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX)) + features |= NETIF_F_RXCSUM; + + return features; +} + +static void nv_vlan_mode(struct net_device *dev, u32 features) +{ + struct fe_priv *np = get_nvpriv(dev); + + spin_lock_irq(&np->lock); + + if (features & NETIF_F_HW_VLAN_RX) + np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP; + else + np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; + + if (features & NETIF_F_HW_VLAN_TX) + np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS; + else + np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; + + writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); + + spin_unlock_irq(&np->lock); +} + +static int nv_set_features(struct net_device *dev, u32 features) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 changed = dev->features ^ features; + + if (changed & NETIF_F_RXCSUM) { + spin_lock_irq(&np->lock); + + if (features & NETIF_F_RXCSUM) + np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; + else + np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK; + + if (netif_running(dev)) + writel(np->txrxctl_bits, base + NvRegTxRxControl); + + spin_unlock_irq(&np->lock); + } + + if (changed & (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX)) + nv_vlan_mode(dev, features); + + return 0; +} + +static int nv_get_sset_count(struct net_device *dev, int sset) +{ + struct fe_priv *np = netdev_priv(dev); + + switch (sset) { + case ETH_SS_TEST: + if (np->driver_data & DEV_HAS_TEST_EXTENDED) + return NV_TEST_COUNT_EXTENDED; + else + return NV_TEST_COUNT_BASE; + case ETH_SS_STATS: + if (np->driver_data & DEV_HAS_STATISTICS_V3) + return NV_DEV_STATISTICS_V3_COUNT; + else if (np->driver_data & DEV_HAS_STATISTICS_V2) + return NV_DEV_STATISTICS_V2_COUNT; + else if (np->driver_data & DEV_HAS_STATISTICS_V1) + return NV_DEV_STATISTICS_V1_COUNT; + else + return 0; + default: + return -EOPNOTSUPP; + } +} + +static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer) +{ + struct fe_priv *np = netdev_priv(dev); + + /* update stats */ + nv_do_stats_poll((unsigned long)dev); + + memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64)); +} + +static int nv_link_test(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + int mii_status; + + mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); + mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); + + /* check phy link status */ + if (!(mii_status & BMSR_LSTATUS)) + return 0; + else + return 1; +} + +static int nv_register_test(struct net_device *dev) +{ + u8 __iomem *base = get_hwbase(dev); + int i = 0; + u32 orig_read, new_read; + + do { + orig_read = readl(base + nv_registers_test[i].reg); + + /* xor with mask to toggle bits */ + orig_read ^= nv_registers_test[i].mask; + + writel(orig_read, base + nv_registers_test[i].reg); + + new_read = readl(base + nv_registers_test[i].reg); + + if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask)) + return 0; + + /* restore original value */ + orig_read ^= nv_registers_test[i].mask; + writel(orig_read, base + nv_registers_test[i].reg); + + } while (nv_registers_test[++i].reg != 0); + + return 1; +} + +static int nv_interrupt_test(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + int ret = 1; + int testcnt; + u32 save_msi_flags, save_poll_interval = 0; + + if (netif_running(dev)) { + /* free current irq */ + nv_free_irq(dev); + save_poll_interval = readl(base+NvRegPollingInterval); + } + + /* flag to test interrupt handler */ + np->intr_test = 0; + + /* setup test irq */ + save_msi_flags = np->msi_flags; + np->msi_flags &= ~NV_MSI_X_VECTORS_MASK; + np->msi_flags |= 0x001; /* setup 1 vector */ + if (nv_request_irq(dev, 1)) + return 0; + + /* setup timer interrupt */ + writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); + writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); + + nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER); + + /* wait for at least one interrupt */ + msleep(100); + + spin_lock_irq(&np->lock); + + /* flag should be set within ISR */ + testcnt = np->intr_test; + if (!testcnt) + ret = 2; + + nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER); + if (!(np->msi_flags & NV_MSI_X_ENABLED)) + writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); + else + writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); + + spin_unlock_irq(&np->lock); + + nv_free_irq(dev); + + np->msi_flags = save_msi_flags; + + if (netif_running(dev)) { + writel(save_poll_interval, base + NvRegPollingInterval); + writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); + /* restore original irq */ + if (nv_request_irq(dev, 0)) + return 0; + } + + return ret; +} + +static int nv_loopback_test(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + struct sk_buff *tx_skb, *rx_skb; + dma_addr_t test_dma_addr; + u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); + u32 flags; + int len, i, pkt_len; + u8 *pkt_data; + u32 filter_flags = 0; + u32 misc1_flags = 0; + int ret = 1; + + if (netif_running(dev)) { + nv_disable_irq(dev); + filter_flags = readl(base + NvRegPacketFilterFlags); + misc1_flags = readl(base + NvRegMisc1); + } else { + nv_txrx_reset(dev); + } + + /* reinit driver view of the rx queue */ + set_bufsize(dev); + nv_init_ring(dev); + + /* setup hardware for loopback */ + writel(NVREG_MISC1_FORCE, base + NvRegMisc1); + writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags); + + /* reinit nic view of the rx queue */ + writel(np->rx_buf_sz, base + NvRegOffloadConfig); + setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); + writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), + base + NvRegRingSizes); + pci_push(base); + + /* restart rx engine */ + nv_start_rxtx(dev); + + /* setup packet for tx */ + pkt_len = ETH_DATA_LEN; + tx_skb = dev_alloc_skb(pkt_len); + if (!tx_skb) { + netdev_err(dev, "dev_alloc_skb() failed during loopback test\n"); + ret = 0; + goto out; + } + test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data, + skb_tailroom(tx_skb), + PCI_DMA_FROMDEVICE); + pkt_data = skb_put(tx_skb, pkt_len); + for (i = 0; i < pkt_len; i++) + pkt_data[i] = (u8)(i & 0xff); + + if (!nv_optimized(np)) { + np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); + np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); + } else { + np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr)); + np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr)); + np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); + } + writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); + pci_push(get_hwbase(dev)); + + msleep(500); + + /* check for rx of the packet */ + if (!nv_optimized(np)) { + flags = le32_to_cpu(np->rx_ring.orig[0].flaglen); + len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); + + } else { + flags = le32_to_cpu(np->rx_ring.ex[0].flaglen); + len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); + } + + if (flags & NV_RX_AVAIL) { + ret = 0; + } else if (np->desc_ver == DESC_VER_1) { + if (flags & NV_RX_ERROR) + ret = 0; + } else { + if (flags & NV_RX2_ERROR) + ret = 0; + } + + if (ret) { + if (len != pkt_len) { + ret = 0; + } else { + rx_skb = np->rx_skb[0].skb; + for (i = 0; i < pkt_len; i++) { + if (rx_skb->data[i] != (u8)(i & 0xff)) { + ret = 0; + break; + } + } + } + } + + pci_unmap_single(np->pci_dev, test_dma_addr, + (skb_end_pointer(tx_skb) - tx_skb->data), + PCI_DMA_TODEVICE); + dev_kfree_skb_any(tx_skb); + out: + /* stop engines */ + nv_stop_rxtx(dev); + nv_txrx_reset(dev); + /* drain rx queue */ + nv_drain_rxtx(dev); + + if (netif_running(dev)) { + writel(misc1_flags, base + NvRegMisc1); + writel(filter_flags, base + NvRegPacketFilterFlags); + nv_enable_irq(dev); + } + + return ret; +} + +static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + int result; + memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64)); + + if (!nv_link_test(dev)) { + test->flags |= ETH_TEST_FL_FAILED; + buffer[0] = 1; + } + + if (test->flags & ETH_TEST_FL_OFFLINE) { + if (netif_running(dev)) { + netif_stop_queue(dev); + nv_napi_disable(dev); + netif_tx_lock_bh(dev); + netif_addr_lock(dev); + spin_lock_irq(&np->lock); + nv_disable_hw_interrupts(dev, np->irqmask); + if (!(np->msi_flags & NV_MSI_X_ENABLED)) + writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); + else + writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); + /* stop engines */ + nv_stop_rxtx(dev); + nv_txrx_reset(dev); + /* drain rx queue */ + nv_drain_rxtx(dev); + spin_unlock_irq(&np->lock); + netif_addr_unlock(dev); + netif_tx_unlock_bh(dev); + } + + if (!nv_register_test(dev)) { + test->flags |= ETH_TEST_FL_FAILED; + buffer[1] = 1; + } + + result = nv_interrupt_test(dev); + if (result != 1) { + test->flags |= ETH_TEST_FL_FAILED; + buffer[2] = 1; + } + if (result == 0) { + /* bail out */ + return; + } + + if (!nv_loopback_test(dev)) { + test->flags |= ETH_TEST_FL_FAILED; + buffer[3] = 1; + } + + if (netif_running(dev)) { + /* reinit driver view of the rx queue */ + set_bufsize(dev); + if (nv_init_ring(dev)) { + if (!np->in_shutdown) + mod_timer(&np->oom_kick, jiffies + OOM_REFILL); + } + /* reinit nic view of the rx queue */ + writel(np->rx_buf_sz, base + NvRegOffloadConfig); + setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); + writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), + base + NvRegRingSizes); + pci_push(base); + writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); + pci_push(base); + /* restart rx engine */ + nv_start_rxtx(dev); + netif_start_queue(dev); + nv_napi_enable(dev); + nv_enable_hw_interrupts(dev, np->irqmask); + } + } +} + +static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str)); + break; + case ETH_SS_TEST: + memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str)); + break; + } +} + +static const struct ethtool_ops ops = { + .get_drvinfo = nv_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_wol = nv_get_wol, + .set_wol = nv_set_wol, + .get_settings = nv_get_settings, + .set_settings = nv_set_settings, + .get_regs_len = nv_get_regs_len, + .get_regs = nv_get_regs, + .nway_reset = nv_nway_reset, + .get_ringparam = nv_get_ringparam, + .set_ringparam = nv_set_ringparam, + .get_pauseparam = nv_get_pauseparam, + .set_pauseparam = nv_set_pauseparam, + .get_strings = nv_get_strings, + .get_ethtool_stats = nv_get_ethtool_stats, + .get_sset_count = nv_get_sset_count, + .self_test = nv_self_test, +}; + +/* The mgmt unit and driver use a semaphore to access the phy during init */ +static int nv_mgmt_acquire_sema(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + int i; + u32 tx_ctrl, mgmt_sema; + + for (i = 0; i < 10; i++) { + mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK; + if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE) + break; + msleep(500); + } + + if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE) + return 0; + + for (i = 0; i < 2; i++) { + tx_ctrl = readl(base + NvRegTransmitterControl); + tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ; + writel(tx_ctrl, base + NvRegTransmitterControl); + + /* verify that semaphore was acquired */ + tx_ctrl = readl(base + NvRegTransmitterControl); + if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) && + ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) { + np->mgmt_sema = 1; + return 1; + } else + udelay(50); + } + + return 0; +} + +static void nv_mgmt_release_sema(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 tx_ctrl; + + if (np->driver_data & DEV_HAS_MGMT_UNIT) { + if (np->mgmt_sema) { + tx_ctrl = readl(base + NvRegTransmitterControl); + tx_ctrl &= ~NVREG_XMITCTL_HOST_SEMA_ACQ; + writel(tx_ctrl, base + NvRegTransmitterControl); + } + } +} + + +static int nv_mgmt_get_version(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 data_ready = readl(base + NvRegTransmitterControl); + u32 data_ready2 = 0; + unsigned long start; + int ready = 0; + + writel(NVREG_MGMTUNITGETVERSION, base + NvRegMgmtUnitGetVersion); + writel(data_ready ^ NVREG_XMITCTL_DATA_START, base + NvRegTransmitterControl); + start = jiffies; + while (time_before(jiffies, start + 5*HZ)) { + data_ready2 = readl(base + NvRegTransmitterControl); + if ((data_ready & NVREG_XMITCTL_DATA_READY) != (data_ready2 & NVREG_XMITCTL_DATA_READY)) { + ready = 1; + break; + } + schedule_timeout_uninterruptible(1); + } + + if (!ready || (data_ready2 & NVREG_XMITCTL_DATA_ERROR)) + return 0; + + np->mgmt_version = readl(base + NvRegMgmtUnitVersion) & NVREG_MGMTUNITVERSION; + + return 1; +} + +static int nv_open(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + int ret = 1; + int oom, i; + u32 low; + + /* power up phy */ + mii_rw(dev, np->phyaddr, MII_BMCR, + mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN); + + nv_txrx_gate(dev, false); + /* erase previous misconfiguration */ + if (np->driver_data & DEV_HAS_POWER_CNTRL) + nv_mac_reset(dev); + writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); + writel(0, base + NvRegMulticastAddrB); + writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA); + writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB); + writel(0, base + NvRegPacketFilterFlags); + + writel(0, base + NvRegTransmitterControl); + writel(0, base + NvRegReceiverControl); + + writel(0, base + NvRegAdapterControl); + + if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) + writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); + + /* initialize descriptor rings */ + set_bufsize(dev); + oom = nv_init_ring(dev); + + writel(0, base + NvRegLinkSpeed); + writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); + nv_txrx_reset(dev); + writel(0, base + NvRegUnknownSetupReg6); + + np->in_shutdown = 0; + + /* give hw rings */ + setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); + writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), + base + NvRegRingSizes); + + writel(np->linkspeed, base + NvRegLinkSpeed); + if (np->desc_ver == DESC_VER_1) + writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark); + else + writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark); + writel(np->txrxctl_bits, base + NvRegTxRxControl); + writel(np->vlanctl_bits, base + NvRegVlanControl); + pci_push(base); + writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); + if (reg_delay(dev, NvRegUnknownSetupReg5, + NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, + NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX)) + netdev_info(dev, + "%s: SetupReg5, Bit 31 remained off\n", __func__); + + writel(0, base + NvRegMIIMask); + writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); + writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); + + writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); + writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); + writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); + writel(np->rx_buf_sz, base + NvRegOffloadConfig); + + writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); + + get_random_bytes(&low, sizeof(low)); + low &= NVREG_SLOTTIME_MASK; + if (np->desc_ver == DESC_VER_1) { + writel(low|NVREG_SLOTTIME_DEFAULT, base + NvRegSlotTime); + } else { + if (!(np->driver_data & DEV_HAS_GEAR_MODE)) { + /* setup legacy backoff */ + writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, base + NvRegSlotTime); + } else { + writel(NVREG_SLOTTIME_10_100_FULL, base + NvRegSlotTime); + nv_gear_backoff_reseed(dev); + } + } + writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral); + writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral); + if (poll_interval == -1) { + if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) + writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval); + else + writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); + } else + writel(poll_interval & 0xFFFF, base + NvRegPollingInterval); + writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); + writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, + base + NvRegAdapterControl); + writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed); + writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask); + if (np->wolenabled) + writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags); + + i = readl(base + NvRegPowerState); + if ((i & NVREG_POWERSTATE_POWEREDUP) == 0) + writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState); + + pci_push(base); + udelay(10); + writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); + + nv_disable_hw_interrupts(dev, np->irqmask); + pci_push(base); + writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); + writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); + pci_push(base); + + if (nv_request_irq(dev, 0)) + goto out_drain; + + /* ask for interrupts */ + nv_enable_hw_interrupts(dev, np->irqmask); + + spin_lock_irq(&np->lock); + writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); + writel(0, base + NvRegMulticastAddrB); + writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA); + writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB); + writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); + /* One manual link speed update: Interrupts are enabled, future link + * speed changes cause interrupts and are handled by nv_link_irq(). + */ + { + u32 miistat; + miistat = readl(base + NvRegMIIStatus); + writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); + } + /* set linkspeed to invalid value, thus force nv_update_linkspeed + * to init hw */ + np->linkspeed = 0; + ret = nv_update_linkspeed(dev); + nv_start_rxtx(dev); + netif_start_queue(dev); + nv_napi_enable(dev); + + if (ret) { + netif_carrier_on(dev); + } else { + netdev_info(dev, "no link during initialization\n"); + netif_carrier_off(dev); + } + if (oom) + mod_timer(&np->oom_kick, jiffies + OOM_REFILL); + + /* start statistics timer */ + if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) + mod_timer(&np->stats_poll, + round_jiffies(jiffies + STATS_INTERVAL)); + + spin_unlock_irq(&np->lock); + + return 0; +out_drain: + nv_drain_rxtx(dev); + return ret; +} + +static int nv_close(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base; + + spin_lock_irq(&np->lock); + np->in_shutdown = 1; + spin_unlock_irq(&np->lock); + nv_napi_disable(dev); + synchronize_irq(np->pci_dev->irq); + + del_timer_sync(&np->oom_kick); + del_timer_sync(&np->nic_poll); + del_timer_sync(&np->stats_poll); + + netif_stop_queue(dev); + spin_lock_irq(&np->lock); + nv_stop_rxtx(dev); + nv_txrx_reset(dev); + + /* disable interrupts on the nic or we will lock up */ + base = get_hwbase(dev); + nv_disable_hw_interrupts(dev, np->irqmask); + pci_push(base); + + spin_unlock_irq(&np->lock); + + nv_free_irq(dev); + + nv_drain_rxtx(dev); + + if (np->wolenabled || !phy_power_down) { + nv_txrx_gate(dev, false); + writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); + nv_start_rx(dev); + } else { + /* power down phy */ + mii_rw(dev, np->phyaddr, MII_BMCR, + mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN); + nv_txrx_gate(dev, true); + } + + /* FIXME: power down nic */ + + return 0; +} + +static const struct net_device_ops nv_netdev_ops = { + .ndo_open = nv_open, + .ndo_stop = nv_close, + .ndo_get_stats = nv_get_stats, + .ndo_start_xmit = nv_start_xmit, + .ndo_tx_timeout = nv_tx_timeout, + .ndo_change_mtu = nv_change_mtu, + .ndo_fix_features = nv_fix_features, + .ndo_set_features = nv_set_features, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = nv_set_mac_address, + .ndo_set_rx_mode = nv_set_multicast, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = nv_poll_controller, +#endif +}; + +static const struct net_device_ops nv_netdev_ops_optimized = { + .ndo_open = nv_open, + .ndo_stop = nv_close, + .ndo_get_stats = nv_get_stats, + .ndo_start_xmit = nv_start_xmit_optimized, + .ndo_tx_timeout = nv_tx_timeout, + .ndo_change_mtu = nv_change_mtu, + .ndo_fix_features = nv_fix_features, + .ndo_set_features = nv_set_features, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = nv_set_mac_address, + .ndo_set_rx_mode = nv_set_multicast, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = nv_poll_controller, +#endif +}; + +static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) +{ + struct net_device *dev; + struct fe_priv *np; + unsigned long addr; + u8 __iomem *base; + int err, i; + u32 powerstate, txreg; + u32 phystate_orig = 0, phystate; + int phyinitialized = 0; + static int printed_version; + + if (!printed_version++) + pr_info("Reverse Engineered nForce ethernet driver. Version %s.\n", + FORCEDETH_VERSION); + + dev = alloc_etherdev(sizeof(struct fe_priv)); + err = -ENOMEM; + if (!dev) + goto out; + + np = netdev_priv(dev); + np->dev = dev; + np->pci_dev = pci_dev; + spin_lock_init(&np->lock); + SET_NETDEV_DEV(dev, &pci_dev->dev); + + init_timer(&np->oom_kick); + np->oom_kick.data = (unsigned long) dev; + np->oom_kick.function = nv_do_rx_refill; /* timer handler */ + init_timer(&np->nic_poll); + np->nic_poll.data = (unsigned long) dev; + np->nic_poll.function = nv_do_nic_poll; /* timer handler */ + init_timer(&np->stats_poll); + np->stats_poll.data = (unsigned long) dev; + np->stats_poll.function = nv_do_stats_poll; /* timer handler */ + + err = pci_enable_device(pci_dev); + if (err) + goto out_free; + + pci_set_master(pci_dev); + + err = pci_request_regions(pci_dev, DRV_NAME); + if (err < 0) + goto out_disable; + + if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) + np->register_size = NV_PCI_REGSZ_VER3; + else if (id->driver_data & DEV_HAS_STATISTICS_V1) + np->register_size = NV_PCI_REGSZ_VER2; + else + np->register_size = NV_PCI_REGSZ_VER1; + + err = -EINVAL; + addr = 0; + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM && + pci_resource_len(pci_dev, i) >= np->register_size) { + addr = pci_resource_start(pci_dev, i); + break; + } + } + if (i == DEVICE_COUNT_RESOURCE) { + dev_info(&pci_dev->dev, "Couldn't find register window\n"); + goto out_relreg; + } + + /* copy of driver data */ + np->driver_data = id->driver_data; + /* copy of device id */ + np->device_id = id->device; + + /* handle different descriptor versions */ + if (id->driver_data & DEV_HAS_HIGH_DMA) { + /* packet format 3: supports 40-bit addressing */ + np->desc_ver = DESC_VER_3; + np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; + if (dma_64bit) { + if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(39))) + dev_info(&pci_dev->dev, + "64-bit DMA failed, using 32-bit addressing\n"); + else + dev->features |= NETIF_F_HIGHDMA; + if (pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(39))) { + dev_info(&pci_dev->dev, + "64-bit DMA (consistent) failed, using 32-bit ring buffers\n"); + } + } + } else if (id->driver_data & DEV_HAS_LARGEDESC) { + /* packet format 2: supports jumbo frames */ + np->desc_ver = DESC_VER_2; + np->txrxctl_bits = NVREG_TXRXCTL_DESC_2; + } else { + /* original packet format */ + np->desc_ver = DESC_VER_1; + np->txrxctl_bits = NVREG_TXRXCTL_DESC_1; + } + + np->pkt_limit = NV_PKTLIMIT_1; + if (id->driver_data & DEV_HAS_LARGEDESC) + np->pkt_limit = NV_PKTLIMIT_2; + + if (id->driver_data & DEV_HAS_CHECKSUM) { + np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; + dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG | + NETIF_F_TSO | NETIF_F_RXCSUM; + } + + np->vlanctl_bits = 0; + if (id->driver_data & DEV_HAS_VLAN) { + np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; + dev->hw_features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX; + } + + dev->features |= dev->hw_features; + + np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG; + if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) || + (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) || + (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) { + np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ; + } + + err = -ENOMEM; + np->base = ioremap(addr, np->register_size); + if (!np->base) + goto out_relreg; + dev->base_addr = (unsigned long)np->base; + + dev->irq = pci_dev->irq; + + np->rx_ring_size = RX_RING_DEFAULT; + np->tx_ring_size = TX_RING_DEFAULT; + + if (!nv_optimized(np)) { + np->rx_ring.orig = pci_alloc_consistent(pci_dev, + sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), + &np->ring_addr); + if (!np->rx_ring.orig) + goto out_unmap; + np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; + } else { + np->rx_ring.ex = pci_alloc_consistent(pci_dev, + sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), + &np->ring_addr); + if (!np->rx_ring.ex) + goto out_unmap; + np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; + } + np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); + np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); + if (!np->rx_skb || !np->tx_skb) + goto out_freering; + + if (!nv_optimized(np)) + dev->netdev_ops = &nv_netdev_ops; + else + dev->netdev_ops = &nv_netdev_ops_optimized; + + netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); + SET_ETHTOOL_OPS(dev, &ops); + dev->watchdog_timeo = NV_WATCHDOG_TIMEO; + + pci_set_drvdata(pci_dev, dev); + + /* read the mac address */ + base = get_hwbase(dev); + np->orig_mac[0] = readl(base + NvRegMacAddrA); + np->orig_mac[1] = readl(base + NvRegMacAddrB); + + /* check the workaround bit for correct mac address order */ + txreg = readl(base + NvRegTransmitPoll); + if (id->driver_data & DEV_HAS_CORRECT_MACADDR) { + /* mac address is already in correct order */ + dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; + dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; + dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; + dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; + dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; + dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; + } else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) { + /* mac address is already in correct order */ + dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; + dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; + dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; + dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; + dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; + dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; + /* + * Set orig mac address back to the reversed version. + * This flag will be cleared during low power transition. + * Therefore, we should always put back the reversed address. + */ + np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) + + (dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24); + np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8); + } else { + /* need to reverse mac address to correct order */ + dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; + dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; + dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; + dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; + dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; + dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; + writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); + dev_dbg(&pci_dev->dev, + "%s: set workaround bit for reversed mac addr\n", + __func__); + } + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); + + if (!is_valid_ether_addr(dev->perm_addr)) { + /* + * Bad mac address. At least one bios sets the mac address + * to 01:23:45:67:89:ab + */ + dev_err(&pci_dev->dev, + "Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n", + dev->dev_addr); + random_ether_addr(dev->dev_addr); + dev_err(&pci_dev->dev, + "Using random MAC address: %pM\n", dev->dev_addr); + } + + /* set mac address */ + nv_copy_mac_to_hw(dev); + + /* disable WOL */ + writel(0, base + NvRegWakeUpFlags); + np->wolenabled = 0; + device_set_wakeup_enable(&pci_dev->dev, false); + + if (id->driver_data & DEV_HAS_POWER_CNTRL) { + + /* take phy and nic out of low power mode */ + powerstate = readl(base + NvRegPowerState2); + powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK; + if ((id->driver_data & DEV_NEED_LOW_POWER_FIX) && + pci_dev->revision >= 0xA3) + powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3; + writel(powerstate, base + NvRegPowerState2); + } + + if (np->desc_ver == DESC_VER_1) + np->tx_flags = NV_TX_VALID; + else + np->tx_flags = NV_TX2_VALID; + + np->msi_flags = 0; + if ((id->driver_data & DEV_HAS_MSI) && msi) + np->msi_flags |= NV_MSI_CAPABLE; + + if ((id->driver_data & DEV_HAS_MSI_X) && msix) { + /* msix has had reported issues when modifying irqmask + as in the case of napi, therefore, disable for now + */ +#if 0 + np->msi_flags |= NV_MSI_X_CAPABLE; +#endif + } + + if (optimization_mode == NV_OPTIMIZATION_MODE_CPU) { + np->irqmask = NVREG_IRQMASK_CPU; + if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ + np->msi_flags |= 0x0001; + } else if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC && + !(id->driver_data & DEV_NEED_TIMERIRQ)) { + /* start off in throughput mode */ + np->irqmask = NVREG_IRQMASK_THROUGHPUT; + /* remove support for msix mode */ + np->msi_flags &= ~NV_MSI_X_CAPABLE; + } else { + optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT; + np->irqmask = NVREG_IRQMASK_THROUGHPUT; + if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ + np->msi_flags |= 0x0003; + } + + if (id->driver_data & DEV_NEED_TIMERIRQ) + np->irqmask |= NVREG_IRQ_TIMER; + if (id->driver_data & DEV_NEED_LINKTIMER) { + np->need_linktimer = 1; + np->link_timeout = jiffies + LINK_TIMEOUT; + } else { + np->need_linktimer = 0; + } + + /* Limit the number of tx's outstanding for hw bug */ + if (id->driver_data & DEV_NEED_TX_LIMIT) { + np->tx_limit = 1; + if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) && + pci_dev->revision >= 0xA2) + np->tx_limit = 0; + } + + /* clear phy state and temporarily halt phy interrupts */ + writel(0, base + NvRegMIIMask); + phystate = readl(base + NvRegAdapterControl); + if (phystate & NVREG_ADAPTCTL_RUNNING) { + phystate_orig = 1; + phystate &= ~NVREG_ADAPTCTL_RUNNING; + writel(phystate, base + NvRegAdapterControl); + } + writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); + + if (id->driver_data & DEV_HAS_MGMT_UNIT) { + /* management unit running on the mac? */ + if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST) && + (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) && + nv_mgmt_acquire_sema(dev) && + nv_mgmt_get_version(dev)) { + np->mac_in_use = 1; + if (np->mgmt_version > 0) + np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE; + /* management unit setup the phy already? */ + if (np->mac_in_use && + ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) == + NVREG_XMITCTL_SYNC_PHY_INIT)) { + /* phy is inited by mgmt unit */ + phyinitialized = 1; + } else { + /* we need to init the phy */ + } + } + } + + /* find a suitable phy */ + for (i = 1; i <= 32; i++) { + int id1, id2; + int phyaddr = i & 0x1F; + + spin_lock_irq(&np->lock); + id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ); + spin_unlock_irq(&np->lock); + if (id1 < 0 || id1 == 0xffff) + continue; + spin_lock_irq(&np->lock); + id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ); + spin_unlock_irq(&np->lock); + if (id2 < 0 || id2 == 0xffff) + continue; + + np->phy_model = id2 & PHYID2_MODEL_MASK; + id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; + id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; + np->phyaddr = phyaddr; + np->phy_oui = id1 | id2; + + /* Realtek hardcoded phy id1 to all zero's on certain phys */ + if (np->phy_oui == PHY_OUI_REALTEK2) + np->phy_oui = PHY_OUI_REALTEK; + /* Setup phy revision for Realtek */ + if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211) + np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK; + + break; + } + if (i == 33) { + dev_info(&pci_dev->dev, "open: Could not find a valid PHY\n"); + goto out_error; + } + + if (!phyinitialized) { + /* reset it */ + phy_init(dev); + } else { + /* see if it is a gigabit phy */ + u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); + if (mii_status & PHY_GIGABIT) + np->gigabit = PHY_GIGABIT; + } + + /* set default link speed settings */ + np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; + np->duplex = 0; + np->autoneg = 1; + + err = register_netdev(dev); + if (err) { + dev_info(&pci_dev->dev, "unable to register netdev: %d\n", err); + goto out_error; + } + - nv_vlan_mode(dev, dev->features); ++ if (id->driver_data & DEV_HAS_VLAN) ++ nv_vlan_mode(dev, dev->features); + + netif_carrier_off(dev); + + dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", + dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); + + dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n", + dev->features & NETIF_F_HIGHDMA ? "highdma " : "", + dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ? + "csum " : "", + dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ? + "vlan " : "", + id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "", + id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "", + id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "", + np->gigabit == PHY_GIGABIT ? "gbit " : "", + np->need_linktimer ? "lnktim " : "", + np->msi_flags & NV_MSI_CAPABLE ? "msi " : "", + np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "", + np->desc_ver); + + return 0; + +out_error: + if (phystate_orig) + writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); + pci_set_drvdata(pci_dev, NULL); +out_freering: + free_rings(dev); +out_unmap: + iounmap(get_hwbase(dev)); +out_relreg: + pci_release_regions(pci_dev); +out_disable: + pci_disable_device(pci_dev); +out_free: + free_netdev(dev); +out: + return err; +} + +static void nv_restore_phy(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u16 phy_reserved, mii_control; + + if (np->phy_oui == PHY_OUI_REALTEK && + np->phy_model == PHY_MODEL_REALTEK_8201 && + phy_cross == NV_CROSSOVER_DETECTION_DISABLED) { + mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3); + phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ); + phy_reserved &= ~PHY_REALTEK_INIT_MSK1; + phy_reserved |= PHY_REALTEK_INIT8; + mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved); + mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1); + + /* restart auto negotiation */ + mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); + mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); + mii_rw(dev, np->phyaddr, MII_BMCR, mii_control); + } +} + +static void nv_restore_mac_addr(struct pci_dev *pci_dev) +{ + struct net_device *dev = pci_get_drvdata(pci_dev); + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + + /* special op: write back the misordered MAC address - otherwise + * the next nv_probe would see a wrong address. + */ + writel(np->orig_mac[0], base + NvRegMacAddrA); + writel(np->orig_mac[1], base + NvRegMacAddrB); + writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV, + base + NvRegTransmitPoll); +} + +static void __devexit nv_remove(struct pci_dev *pci_dev) +{ + struct net_device *dev = pci_get_drvdata(pci_dev); + + unregister_netdev(dev); + + nv_restore_mac_addr(pci_dev); + + /* restore any phy related changes */ + nv_restore_phy(dev); + + nv_mgmt_release_sema(dev); + + /* free all structures */ + free_rings(dev); + iounmap(get_hwbase(dev)); + pci_release_regions(pci_dev); + pci_disable_device(pci_dev); + free_netdev(dev); + pci_set_drvdata(pci_dev, NULL); +} + +#ifdef CONFIG_PM_SLEEP +static int nv_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + int i; + + if (netif_running(dev)) { + /* Gross. */ + nv_close(dev); + } + netif_device_detach(dev); + + /* save non-pci configuration space */ + for (i = 0; i <= np->register_size/sizeof(u32); i++) + np->saved_config_space[i] = readl(base + i*sizeof(u32)); + + return 0; +} + +static int nv_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + int i, rc = 0; + + /* restore non-pci configuration space */ + for (i = 0; i <= np->register_size/sizeof(u32); i++) + writel(np->saved_config_space[i], base+i*sizeof(u32)); + + if (np->driver_data & DEV_NEED_MSI_FIX) + pci_write_config_dword(pdev, NV_MSI_PRIV_OFFSET, NV_MSI_PRIV_VALUE); + + /* restore phy state, including autoneg */ + phy_init(dev); + + netif_device_attach(dev); + if (netif_running(dev)) { + rc = nv_open(dev); + nv_set_multicast(dev); + } + return rc; +} + +static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume); +#define NV_PM_OPS (&nv_pm_ops) + +#else +#define NV_PM_OPS NULL +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM +static void nv_shutdown(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct fe_priv *np = netdev_priv(dev); + + if (netif_running(dev)) + nv_close(dev); + + /* + * Restore the MAC so a kernel started by kexec won't get confused. + * If we really go for poweroff, we must not restore the MAC, + * otherwise the MAC for WOL will be reversed at least on some boards. + */ + if (system_state != SYSTEM_POWER_OFF) + nv_restore_mac_addr(pdev); + + pci_disable_device(pdev); + /* + * Apparently it is not possible to reinitialise from D3 hot, + * only put the device into D3 if we really go for poweroff. + */ + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, np->wolenabled); + pci_set_power_state(pdev, PCI_D3hot); + } +} +#else +#define nv_shutdown NULL +#endif /* CONFIG_PM */ + +static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = { + { /* nForce Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x01C3), + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, + }, + { /* nForce2 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0066), + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, + }, + { /* nForce3 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x00D6), + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, + }, + { /* nForce3 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0086), + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, + }, + { /* nForce3 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x008C), + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, + }, + { /* nForce3 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x00E6), + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, + }, + { /* nForce3 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x00DF), + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, + }, + { /* CK804 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0056), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, + }, + { /* CK804 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0057), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, + }, + { /* MCP04 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0037), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, + }, + { /* MCP04 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0038), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, + }, + { /* MCP51 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0268), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX, + }, + { /* MCP51 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0269), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX, + }, + { /* MCP55 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0372), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX, + }, + { /* MCP55 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0373), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX, + }, + { /* MCP61 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x03E5), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, + }, + { /* MCP61 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x03E6), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, + }, + { /* MCP61 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x03EE), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, + }, + { /* MCP61 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x03EF), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, + }, + { /* MCP65 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0450), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + }, + { /* MCP65 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0451), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + }, + { /* MCP65 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0452), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + }, + { /* MCP65 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0453), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + }, + { /* MCP67 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x054C), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + }, + { /* MCP67 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x054D), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + }, + { /* MCP67 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x054E), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + }, + { /* MCP67 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x054F), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + }, + { /* MCP73 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x07DC), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + }, + { /* MCP73 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x07DD), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + }, + { /* MCP73 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x07DE), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + }, + { /* MCP73 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x07DF), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + }, + { /* MCP77 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0760), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, + }, + { /* MCP77 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0761), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, + }, + { /* MCP77 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0762), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, + }, + { /* MCP77 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0763), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, + }, + { /* MCP79 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0AB0), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, + }, + { /* MCP79 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0AB1), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, + }, + { /* MCP79 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0AB2), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, + }, + { /* MCP79 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0AB3), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, + }, + { /* MCP89 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0D7D), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX, + }, + {0,}, +}; + +static struct pci_driver driver = { + .name = DRV_NAME, + .id_table = pci_tbl, + .probe = nv_probe, + .remove = __devexit_p(nv_remove), + .shutdown = nv_shutdown, + .driver.pm = NV_PM_OPS, +}; + +static int __init init_nic(void) +{ + return pci_register_driver(&driver); +} + +static void __exit exit_nic(void) +{ + pci_unregister_driver(&driver); +} + +module_param(max_interrupt_work, int, 0); +MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt"); +module_param(optimization_mode, int, 0); +MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer. In dynamic mode (2), the mode toggles between throughput and CPU mode based on network load."); +module_param(poll_interval, int, 0); +MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); +module_param(msi, int, 0); +MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0."); +module_param(msix, int, 0); +MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0."); +module_param(dma_64bit, int, 0); +MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0."); +module_param(phy_cross, int, 0); +MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0."); +module_param(phy_power_down, int, 0); +MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0)."); + +MODULE_AUTHOR("Manfred Spraul "); +MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); +MODULE_LICENSE("GPL"); + +MODULE_DEVICE_TABLE(pci, pci_tbl); + +module_init(init_nic); +module_exit(exit_nic); diff --cc drivers/net/ethernet/renesas/sh_eth.c index ef3a3521b835,000000000000..bf2404ae3b87 mode 100644,000000..100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@@ -1,1959 -1,0 +1,1960 @@@ +/* + * SuperH Ethernet device driver + * + * Copyright (C) 2006-2008 Nobuhiro Iwamatsu + * Copyright (C) 2008-2009 Renesas Solutions Corp. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + +#include ++#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sh_eth.h" + +#define SH_ETH_DEF_MSG_ENABLE \ + (NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | \ + NETIF_MSG_RX_ERR| \ + NETIF_MSG_TX_ERR) + +/* There is CPU dependent code */ +#if defined(CONFIG_CPU_SUBTYPE_SH7724) +#define SH_ETH_RESET_DEFAULT 1 +static void sh_eth_set_duplex(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + if (mdp->duplex) /* Full */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); + else /* Half */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); +} + +static void sh_eth_set_rate(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + switch (mdp->speed) { + case 10: /* 10BASE */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR); + break; + case 100:/* 100BASE */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR); + break; + default: + break; + } +} + +/* SH7724 */ +static struct sh_eth_cpu_data sh_eth_my_cpu_data = { + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate, + + .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, + .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, + .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f, + + .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | + EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, + .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .hw_swap = 1, + .rpadir = 1, + .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */ +}; +#elif defined(CONFIG_CPU_SUBTYPE_SH7757) +#define SH_ETH_HAS_BOTH_MODULES 1 +#define SH_ETH_HAS_TSU 1 +static void sh_eth_set_duplex(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + if (mdp->duplex) /* Full */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); + else /* Half */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); +} + +static void sh_eth_set_rate(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + switch (mdp->speed) { + case 10: /* 10BASE */ + sh_eth_write(ndev, 0, RTRATE); + break; + case 100:/* 100BASE */ + sh_eth_write(ndev, 1, RTRATE); + break; + default: + break; + } +} + +/* SH7757 */ +static struct sh_eth_cpu_data sh_eth_my_cpu_data = { + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate, + + .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + .rmcr_value = 0x00000001, + + .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | + EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, + .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .hw_swap = 1, + .no_ade = 1, + .rpadir = 1, + .rpadir_value = 2 << 16, +}; + +#define SH_GIGA_ETH_BASE 0xfee00000 +#define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8) +#define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0) +static void sh_eth_chip_reset_giga(struct net_device *ndev) +{ + int i; + unsigned long mahr[2], malr[2]; + + /* save MAHR and MALR */ + for (i = 0; i < 2; i++) { + malr[i] = readl(GIGA_MALR(i)); + mahr[i] = readl(GIGA_MAHR(i)); + } + + /* reset device */ + writel(ARSTR_ARSTR, SH_GIGA_ETH_BASE + 0x1800); + mdelay(1); + + /* restore MAHR and MALR */ + for (i = 0; i < 2; i++) { + writel(malr[i], GIGA_MALR(i)); + writel(mahr[i], GIGA_MAHR(i)); + } +} + +static int sh_eth_is_gether(struct sh_eth_private *mdp); +static void sh_eth_reset(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int cnt = 100; + + if (sh_eth_is_gether(mdp)) { + sh_eth_write(ndev, 0x03, EDSR); + sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, + EDMR); + while (cnt > 0) { + if (!(sh_eth_read(ndev, EDMR) & 0x3)) + break; + mdelay(1); + cnt--; + } + if (cnt < 0) + printk(KERN_ERR "Device reset fail\n"); + + /* Table Init */ + sh_eth_write(ndev, 0x0, TDLAR); + sh_eth_write(ndev, 0x0, TDFAR); + sh_eth_write(ndev, 0x0, TDFXR); + sh_eth_write(ndev, 0x0, TDFFR); + sh_eth_write(ndev, 0x0, RDLAR); + sh_eth_write(ndev, 0x0, RDFAR); + sh_eth_write(ndev, 0x0, RDFXR); + sh_eth_write(ndev, 0x0, RDFFR); + } else { + sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, + EDMR); + mdelay(3); + sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, + EDMR); + } +} + +static void sh_eth_set_duplex_giga(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + if (mdp->duplex) /* Full */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); + else /* Half */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); +} + +static void sh_eth_set_rate_giga(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + switch (mdp->speed) { + case 10: /* 10BASE */ + sh_eth_write(ndev, 0x00000000, GECMR); + break; + case 100:/* 100BASE */ + sh_eth_write(ndev, 0x00000010, GECMR); + break; + case 1000: /* 1000BASE */ + sh_eth_write(ndev, 0x00000020, GECMR); + break; + default: + break; + } +} + +/* SH7757(GETHERC) */ +static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = { + .chip_reset = sh_eth_chip_reset_giga, + .set_duplex = sh_eth_set_duplex_giga, + .set_rate = sh_eth_set_rate_giga, + + .ecsr_value = ECSR_ICD | ECSR_MPD, + .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, + .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + + .tx_check = EESR_TC1 | EESR_FTC, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ + EESR_ECI, + .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ + EESR_TFE, + .fdr_value = 0x0000072f, + .rmcr_value = 0x00000001, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .bculr = 1, + .hw_swap = 1, + .rpadir = 1, + .rpadir_value = 2 << 16, + .no_trimd = 1, + .no_ade = 1, +}; + +static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp) +{ + if (sh_eth_is_gether(mdp)) + return &sh_eth_my_cpu_data_giga; + else + return &sh_eth_my_cpu_data; +} + +#elif defined(CONFIG_CPU_SUBTYPE_SH7763) +#define SH_ETH_HAS_TSU 1 +static void sh_eth_chip_reset(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + /* reset device */ + sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); + mdelay(1); +} + +static void sh_eth_reset(struct net_device *ndev) +{ + int cnt = 100; + + sh_eth_write(ndev, EDSR_ENALL, EDSR); + sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR); + while (cnt > 0) { + if (!(sh_eth_read(ndev, EDMR) & 0x3)) + break; + mdelay(1); + cnt--; + } + if (cnt == 0) + printk(KERN_ERR "Device reset fail\n"); + + /* Table Init */ + sh_eth_write(ndev, 0x0, TDLAR); + sh_eth_write(ndev, 0x0, TDFAR); + sh_eth_write(ndev, 0x0, TDFXR); + sh_eth_write(ndev, 0x0, TDFFR); + sh_eth_write(ndev, 0x0, RDLAR); + sh_eth_write(ndev, 0x0, RDFAR); + sh_eth_write(ndev, 0x0, RDFXR); + sh_eth_write(ndev, 0x0, RDFFR); +} + +static void sh_eth_set_duplex(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + if (mdp->duplex) /* Full */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); + else /* Half */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); +} + +static void sh_eth_set_rate(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + switch (mdp->speed) { + case 10: /* 10BASE */ + sh_eth_write(ndev, GECMR_10, GECMR); + break; + case 100:/* 100BASE */ + sh_eth_write(ndev, GECMR_100, GECMR); + break; + case 1000: /* 1000BASE */ + sh_eth_write(ndev, GECMR_1000, GECMR); + break; + default: + break; + } +} + +/* sh7763 */ +static struct sh_eth_cpu_data sh_eth_my_cpu_data = { + .chip_reset = sh_eth_chip_reset, + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate, + + .ecsr_value = ECSR_ICD | ECSR_MPD, + .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, + .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + + .tx_check = EESR_TC1 | EESR_FTC, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ + EESR_ECI, + .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ + EESR_TFE, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .bculr = 1, + .hw_swap = 1, + .no_trimd = 1, + .no_ade = 1, + .tsu = 1, +}; + +#elif defined(CONFIG_CPU_SUBTYPE_SH7619) +#define SH_ETH_RESET_DEFAULT 1 +static struct sh_eth_cpu_data sh_eth_my_cpu_data = { + .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .hw_swap = 1, +}; +#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) +#define SH_ETH_RESET_DEFAULT 1 +#define SH_ETH_HAS_TSU 1 +static struct sh_eth_cpu_data sh_eth_my_cpu_data = { + .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + .tsu = 1, +}; +#endif + +static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) +{ + if (!cd->ecsr_value) + cd->ecsr_value = DEFAULT_ECSR_INIT; + + if (!cd->ecsipr_value) + cd->ecsipr_value = DEFAULT_ECSIPR_INIT; + + if (!cd->fcftr_value) + cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \ + DEFAULT_FIFO_F_D_RFD; + + if (!cd->fdr_value) + cd->fdr_value = DEFAULT_FDR_INIT; + + if (!cd->rmcr_value) + cd->rmcr_value = DEFAULT_RMCR_VALUE; + + if (!cd->tx_check) + cd->tx_check = DEFAULT_TX_CHECK; + + if (!cd->eesr_err_check) + cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK; + + if (!cd->tx_error_check) + cd->tx_error_check = DEFAULT_TX_ERROR_CHECK; +} + +#if defined(SH_ETH_RESET_DEFAULT) +/* Chip Reset */ +static void sh_eth_reset(struct net_device *ndev) +{ + sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR); + mdelay(3); + sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR); +} +#endif + +#if defined(CONFIG_CPU_SH4) +static void sh_eth_set_receive_align(struct sk_buff *skb) +{ + int reserve; + + reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1)); + if (reserve) + skb_reserve(skb, reserve); +} +#else +static void sh_eth_set_receive_align(struct sk_buff *skb) +{ + skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN); +} +#endif + + +/* CPU <-> EDMAC endian convert */ +static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x) +{ + switch (mdp->edmac_endian) { + case EDMAC_LITTLE_ENDIAN: + return cpu_to_le32(x); + case EDMAC_BIG_ENDIAN: + return cpu_to_be32(x); + } + return x; +} + +static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x) +{ + switch (mdp->edmac_endian) { + case EDMAC_LITTLE_ENDIAN: + return le32_to_cpu(x); + case EDMAC_BIG_ENDIAN: + return be32_to_cpu(x); + } + return x; +} + +/* + * Program the hardware MAC address from dev->dev_addr. + */ +static void update_mac_address(struct net_device *ndev) +{ + sh_eth_write(ndev, + (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | + (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); + sh_eth_write(ndev, + (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); +} + +/* + * Get MAC address from SuperH MAC address register + * + * SuperH's Ethernet device doesn't have 'ROM' to MAC address. + * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g). + * When you want use this device, you must set MAC address in bootloader. + * + */ +static void read_mac_address(struct net_device *ndev, unsigned char *mac) +{ + if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) { + memcpy(ndev->dev_addr, mac, 6); + } else { + ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24); + ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF; + ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF; + ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF); + ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF; + ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF); + } +} + +static int sh_eth_is_gether(struct sh_eth_private *mdp) +{ + if (mdp->reg_offset == sh_eth_offset_gigabit) + return 1; + else + return 0; +} + +static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp) +{ + if (sh_eth_is_gether(mdp)) + return EDTRR_TRNS_GETHER; + else + return EDTRR_TRNS_ETHER; +} + +struct bb_info { + void (*set_gate)(unsigned long addr); + struct mdiobb_ctrl ctrl; + u32 addr; + u32 mmd_msk;/* MMD */ + u32 mdo_msk; + u32 mdi_msk; + u32 mdc_msk; +}; + +/* PHY bit set */ +static void bb_set(u32 addr, u32 msk) +{ + writel(readl(addr) | msk, addr); +} + +/* PHY bit clear */ +static void bb_clr(u32 addr, u32 msk) +{ + writel((readl(addr) & ~msk), addr); +} + +/* PHY bit read */ +static int bb_read(u32 addr, u32 msk) +{ + return (readl(addr) & msk) != 0; +} + +/* Data I/O pin control */ +static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit) +{ + struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); + + if (bitbang->set_gate) + bitbang->set_gate(bitbang->addr); + + if (bit) + bb_set(bitbang->addr, bitbang->mmd_msk); + else + bb_clr(bitbang->addr, bitbang->mmd_msk); +} + +/* Set bit data*/ +static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit) +{ + struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); + + if (bitbang->set_gate) + bitbang->set_gate(bitbang->addr); + + if (bit) + bb_set(bitbang->addr, bitbang->mdo_msk); + else + bb_clr(bitbang->addr, bitbang->mdo_msk); +} + +/* Get bit data*/ +static int sh_get_mdio(struct mdiobb_ctrl *ctrl) +{ + struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); + + if (bitbang->set_gate) + bitbang->set_gate(bitbang->addr); + + return bb_read(bitbang->addr, bitbang->mdi_msk); +} + +/* MDC pin control */ +static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit) +{ + struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); + + if (bitbang->set_gate) + bitbang->set_gate(bitbang->addr); + + if (bit) + bb_set(bitbang->addr, bitbang->mdc_msk); + else + bb_clr(bitbang->addr, bitbang->mdc_msk); +} + +/* mdio bus control struct */ +static struct mdiobb_ops bb_ops = { + .owner = THIS_MODULE, + .set_mdc = sh_mdc_ctrl, + .set_mdio_dir = sh_mmd_ctrl, + .set_mdio_data = sh_set_mdio, + .get_mdio_data = sh_get_mdio, +}; + +/* free skb and descriptor buffer */ +static void sh_eth_ring_free(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int i; + + /* Free Rx skb ringbuffer */ + if (mdp->rx_skbuff) { + for (i = 0; i < RX_RING_SIZE; i++) { + if (mdp->rx_skbuff[i]) + dev_kfree_skb(mdp->rx_skbuff[i]); + } + } + kfree(mdp->rx_skbuff); + + /* Free Tx skb ringbuffer */ + if (mdp->tx_skbuff) { + for (i = 0; i < TX_RING_SIZE; i++) { + if (mdp->tx_skbuff[i]) + dev_kfree_skb(mdp->tx_skbuff[i]); + } + } + kfree(mdp->tx_skbuff); +} + +/* format skb and descriptor buffer */ +static void sh_eth_ring_format(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int i; + struct sk_buff *skb; + struct sh_eth_rxdesc *rxdesc = NULL; + struct sh_eth_txdesc *txdesc = NULL; + int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE; + int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE; + + mdp->cur_rx = mdp->cur_tx = 0; + mdp->dirty_rx = mdp->dirty_tx = 0; + + memset(mdp->rx_ring, 0, rx_ringsize); + + /* build Rx ring buffer */ + for (i = 0; i < RX_RING_SIZE; i++) { + /* skb */ + mdp->rx_skbuff[i] = NULL; + skb = dev_alloc_skb(mdp->rx_buf_sz); + mdp->rx_skbuff[i] = skb; + if (skb == NULL) + break; + dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz, + DMA_FROM_DEVICE); + skb->dev = ndev; /* Mark as being used by this device. */ + sh_eth_set_receive_align(skb); + + /* RX descriptor */ + rxdesc = &mdp->rx_ring[i]; + rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); + rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); + + /* The size of the buffer is 16 byte boundary. */ + rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); + /* Rx descriptor address set */ + if (i == 0) { + sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); + if (sh_eth_is_gether(mdp)) + sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR); + } + } + + mdp->dirty_rx = (u32) (i - RX_RING_SIZE); + + /* Mark the last entry as wrapping the ring. */ + rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL); + + memset(mdp->tx_ring, 0, tx_ringsize); + + /* build Tx ring buffer */ + for (i = 0; i < TX_RING_SIZE; i++) { + mdp->tx_skbuff[i] = NULL; + txdesc = &mdp->tx_ring[i]; + txdesc->status = cpu_to_edmac(mdp, TD_TFP); + txdesc->buffer_length = 0; + if (i == 0) { + /* Tx descriptor address set */ + sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR); + if (sh_eth_is_gether(mdp)) + sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR); + } + } + + txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); +} + +/* Get skb and descriptor buffer */ +static int sh_eth_ring_init(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int rx_ringsize, tx_ringsize, ret = 0; + + /* + * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the + * card needs room to do 8 byte alignment, +2 so we can reserve + * the first 2 bytes, and +16 gets room for the status word from the + * card. + */ + mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : + (((ndev->mtu + 26 + 7) & ~7) + 2 + 16)); + if (mdp->cd->rpadir) + mdp->rx_buf_sz += NET_IP_ALIGN; + + /* Allocate RX and TX skb rings */ + mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE, + GFP_KERNEL); + if (!mdp->rx_skbuff) { + dev_err(&ndev->dev, "Cannot allocate Rx skb\n"); + ret = -ENOMEM; + return ret; + } + + mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE, + GFP_KERNEL); + if (!mdp->tx_skbuff) { + dev_err(&ndev->dev, "Cannot allocate Tx skb\n"); + ret = -ENOMEM; + goto skb_ring_free; + } + + /* Allocate all Rx descriptors. */ + rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE; + mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma, + GFP_KERNEL); + + if (!mdp->rx_ring) { + dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n", + rx_ringsize); + ret = -ENOMEM; + goto desc_ring_free; + } + + mdp->dirty_rx = 0; + + /* Allocate all Tx descriptors. */ + tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE; + mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma, + GFP_KERNEL); + if (!mdp->tx_ring) { + dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n", + tx_ringsize); + ret = -ENOMEM; + goto desc_ring_free; + } + return ret; + +desc_ring_free: + /* free DMA buffer */ + dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma); + +skb_ring_free: + /* Free Rx and Tx skb ring buffer */ + sh_eth_ring_free(ndev); + + return ret; +} + +static int sh_eth_dev_init(struct net_device *ndev) +{ + int ret = 0; + struct sh_eth_private *mdp = netdev_priv(ndev); + u_int32_t rx_int_var, tx_int_var; + u32 val; + + /* Soft Reset */ + sh_eth_reset(ndev); + + /* Descriptor format */ + sh_eth_ring_format(ndev); + if (mdp->cd->rpadir) + sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR); + + /* all sh_eth int mask */ + sh_eth_write(ndev, 0, EESIPR); + +#if defined(__LITTLE_ENDIAN__) + if (mdp->cd->hw_swap) + sh_eth_write(ndev, EDMR_EL, EDMR); + else +#endif + sh_eth_write(ndev, 0, EDMR); + + /* FIFO size set */ + sh_eth_write(ndev, mdp->cd->fdr_value, FDR); + sh_eth_write(ndev, 0, TFTR); + + /* Frame recv control */ + sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR); + + rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5; + tx_int_var = mdp->tx_int_var = DESC_I_TINT2; + sh_eth_write(ndev, rx_int_var | tx_int_var, TRSCER); + + if (mdp->cd->bculr) + sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */ + + sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR); + + if (!mdp->cd->no_trimd) + sh_eth_write(ndev, 0, TRIMD); + + /* Recv frame limit set register */ + sh_eth_write(ndev, RFLR_VALUE, RFLR); + + sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); + sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); + + /* PAUSE Prohibition */ + val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | + ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE; + + sh_eth_write(ndev, val, ECMR); + + if (mdp->cd->set_rate) + mdp->cd->set_rate(ndev); + + /* E-MAC Status Register clear */ + sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR); + + /* E-MAC Interrupt Enable register */ + sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR); + + /* Set MAC address */ + update_mac_address(ndev); + + /* mask reset */ + if (mdp->cd->apr) + sh_eth_write(ndev, APR_AP, APR); + if (mdp->cd->mpr) + sh_eth_write(ndev, MPR_MP, MPR); + if (mdp->cd->tpauser) + sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER); + + /* Setting the Rx mode will start the Rx process. */ + sh_eth_write(ndev, EDRRR_R, EDRRR); + + netif_start_queue(ndev); + + return ret; +} + +/* free Tx skb function */ +static int sh_eth_txfree(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + struct sh_eth_txdesc *txdesc; + int freeNum = 0; + int entry = 0; + + for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { + entry = mdp->dirty_tx % TX_RING_SIZE; + txdesc = &mdp->tx_ring[entry]; + if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) + break; + /* Free the original skb. */ + if (mdp->tx_skbuff[entry]) { + dma_unmap_single(&ndev->dev, txdesc->addr, + txdesc->buffer_length, DMA_TO_DEVICE); + dev_kfree_skb_irq(mdp->tx_skbuff[entry]); + mdp->tx_skbuff[entry] = NULL; + freeNum++; + } + txdesc->status = cpu_to_edmac(mdp, TD_TFP); + if (entry >= TX_RING_SIZE - 1) + txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); + + mdp->stats.tx_packets++; + mdp->stats.tx_bytes += txdesc->buffer_length; + } + return freeNum; +} + +/* Packet receive function */ +static int sh_eth_rx(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + struct sh_eth_rxdesc *rxdesc; + + int entry = mdp->cur_rx % RX_RING_SIZE; + int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx; + struct sk_buff *skb; + u16 pkt_len = 0; + u32 desc_status; + + rxdesc = &mdp->rx_ring[entry]; + while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { + desc_status = edmac_to_cpu(mdp, rxdesc->status); + pkt_len = rxdesc->frame_length; + + if (--boguscnt < 0) + break; + + if (!(desc_status & RDFEND)) + mdp->stats.rx_length_errors++; + + if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | + RD_RFS5 | RD_RFS6 | RD_RFS10)) { + mdp->stats.rx_errors++; + if (desc_status & RD_RFS1) + mdp->stats.rx_crc_errors++; + if (desc_status & RD_RFS2) + mdp->stats.rx_frame_errors++; + if (desc_status & RD_RFS3) + mdp->stats.rx_length_errors++; + if (desc_status & RD_RFS4) + mdp->stats.rx_length_errors++; + if (desc_status & RD_RFS6) + mdp->stats.rx_missed_errors++; + if (desc_status & RD_RFS10) + mdp->stats.rx_over_errors++; + } else { + if (!mdp->cd->hw_swap) + sh_eth_soft_swap( + phys_to_virt(ALIGN(rxdesc->addr, 4)), + pkt_len + 2); + skb = mdp->rx_skbuff[entry]; + mdp->rx_skbuff[entry] = NULL; + if (mdp->cd->rpadir) + skb_reserve(skb, NET_IP_ALIGN); + skb_put(skb, pkt_len); + skb->protocol = eth_type_trans(skb, ndev); + netif_rx(skb); + mdp->stats.rx_packets++; + mdp->stats.rx_bytes += pkt_len; + } + rxdesc->status |= cpu_to_edmac(mdp, RD_RACT); + entry = (++mdp->cur_rx) % RX_RING_SIZE; + rxdesc = &mdp->rx_ring[entry]; + } + + /* Refill the Rx ring buffers. */ + for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) { + entry = mdp->dirty_rx % RX_RING_SIZE; + rxdesc = &mdp->rx_ring[entry]; + /* The size of the buffer is 16 byte boundary. */ + rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); + + if (mdp->rx_skbuff[entry] == NULL) { + skb = dev_alloc_skb(mdp->rx_buf_sz); + mdp->rx_skbuff[entry] = skb; + if (skb == NULL) + break; /* Better luck next round. */ + dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz, + DMA_FROM_DEVICE); + skb->dev = ndev; + sh_eth_set_receive_align(skb); + + skb_checksum_none_assert(skb); + rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); + } + if (entry >= RX_RING_SIZE - 1) + rxdesc->status |= + cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); + else + rxdesc->status |= + cpu_to_edmac(mdp, RD_RACT | RD_RFP); + } + + /* Restart Rx engine if stopped. */ + /* If we don't need to check status, don't. -KDU */ + if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) + sh_eth_write(ndev, EDRRR_R, EDRRR); + + return 0; +} + +static void sh_eth_rcv_snd_disable(struct net_device *ndev) +{ + /* disable tx and rx */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & + ~(ECMR_RE | ECMR_TE), ECMR); +} + +static void sh_eth_rcv_snd_enable(struct net_device *ndev) +{ + /* enable tx and rx */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | + (ECMR_RE | ECMR_TE), ECMR); +} + +/* error control function */ +static void sh_eth_error(struct net_device *ndev, int intr_status) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u32 felic_stat; + u32 link_stat; + u32 mask; + + if (intr_status & EESR_ECI) { + felic_stat = sh_eth_read(ndev, ECSR); + sh_eth_write(ndev, felic_stat, ECSR); /* clear int */ + if (felic_stat & ECSR_ICD) + mdp->stats.tx_carrier_errors++; + if (felic_stat & ECSR_LCHNG) { + /* Link Changed */ + if (mdp->cd->no_psr || mdp->no_ether_link) { + if (mdp->link == PHY_DOWN) + link_stat = 0; + else + link_stat = PHY_ST_LINK; + } else { + link_stat = (sh_eth_read(ndev, PSR)); + if (mdp->ether_link_active_low) + link_stat = ~link_stat; + } + if (!(link_stat & PHY_ST_LINK)) + sh_eth_rcv_snd_disable(ndev); + else { + /* Link Up */ + sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) & + ~DMAC_M_ECI, EESIPR); + /*clear int */ + sh_eth_write(ndev, sh_eth_read(ndev, ECSR), + ECSR); + sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) | + DMAC_M_ECI, EESIPR); + /* enable tx and rx */ + sh_eth_rcv_snd_enable(ndev); + } + } + } + + if (intr_status & EESR_TWB) { + /* Write buck end. unused write back interrupt */ + if (intr_status & EESR_TABT) /* Transmit Abort int */ + mdp->stats.tx_aborted_errors++; + if (netif_msg_tx_err(mdp)) + dev_err(&ndev->dev, "Transmit Abort\n"); + } + + if (intr_status & EESR_RABT) { + /* Receive Abort int */ + if (intr_status & EESR_RFRMER) { + /* Receive Frame Overflow int */ + mdp->stats.rx_frame_errors++; + if (netif_msg_rx_err(mdp)) + dev_err(&ndev->dev, "Receive Abort\n"); + } + } + + if (intr_status & EESR_TDE) { + /* Transmit Descriptor Empty int */ + mdp->stats.tx_fifo_errors++; + if (netif_msg_tx_err(mdp)) + dev_err(&ndev->dev, "Transmit Descriptor Empty\n"); + } + + if (intr_status & EESR_TFE) { + /* FIFO under flow */ + mdp->stats.tx_fifo_errors++; + if (netif_msg_tx_err(mdp)) + dev_err(&ndev->dev, "Transmit FIFO Under flow\n"); + } + + if (intr_status & EESR_RDE) { + /* Receive Descriptor Empty int */ + mdp->stats.rx_over_errors++; + + if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R) + sh_eth_write(ndev, EDRRR_R, EDRRR); + if (netif_msg_rx_err(mdp)) + dev_err(&ndev->dev, "Receive Descriptor Empty\n"); + } + + if (intr_status & EESR_RFE) { + /* Receive FIFO Overflow int */ + mdp->stats.rx_fifo_errors++; + if (netif_msg_rx_err(mdp)) + dev_err(&ndev->dev, "Receive FIFO Overflow\n"); + } + + if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { + /* Address Error */ + mdp->stats.tx_fifo_errors++; + if (netif_msg_tx_err(mdp)) + dev_err(&ndev->dev, "Address Error\n"); + } + + mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE; + if (mdp->cd->no_ade) + mask &= ~EESR_ADE; + if (intr_status & mask) { + /* Tx error */ + u32 edtrr = sh_eth_read(ndev, EDTRR); + /* dmesg */ + dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ", + intr_status, mdp->cur_tx); + dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n", + mdp->dirty_tx, (u32) ndev->state, edtrr); + /* dirty buffer free */ + sh_eth_txfree(ndev); + + /* SH7712 BUG */ + if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) { + /* tx dma start */ + sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR); + } + /* wakeup */ + netif_wake_queue(ndev); + } +} + +static irqreturn_t sh_eth_interrupt(int irq, void *netdev) +{ + struct net_device *ndev = netdev; + struct sh_eth_private *mdp = netdev_priv(ndev); + struct sh_eth_cpu_data *cd = mdp->cd; + irqreturn_t ret = IRQ_NONE; + u32 intr_status = 0; + + spin_lock(&mdp->lock); + + /* Get interrpt stat */ + intr_status = sh_eth_read(ndev, EESR); + /* Clear interrupt */ + if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF | + EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF | + cd->tx_check | cd->eesr_err_check)) { + sh_eth_write(ndev, intr_status, EESR); + ret = IRQ_HANDLED; + } else + goto other_irq; + + if (intr_status & (EESR_FRC | /* Frame recv*/ + EESR_RMAF | /* Multi cast address recv*/ + EESR_RRF | /* Bit frame recv */ + EESR_RTLF | /* Long frame recv*/ + EESR_RTSF | /* short frame recv */ + EESR_PRE | /* PHY-LSI recv error */ + EESR_CERF)){ /* recv frame CRC error */ + sh_eth_rx(ndev); + } + + /* Tx Check */ + if (intr_status & cd->tx_check) { + sh_eth_txfree(ndev); + netif_wake_queue(ndev); + } + + if (intr_status & cd->eesr_err_check) + sh_eth_error(ndev, intr_status); + +other_irq: + spin_unlock(&mdp->lock); + + return ret; +} + +static void sh_eth_timer(unsigned long data) +{ + struct net_device *ndev = (struct net_device *)data; + struct sh_eth_private *mdp = netdev_priv(ndev); + + mod_timer(&mdp->timer, jiffies + (10 * HZ)); +} + +/* PHY state control function */ +static void sh_eth_adjust_link(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + struct phy_device *phydev = mdp->phydev; + int new_state = 0; + + if (phydev->link != PHY_DOWN) { + if (phydev->duplex != mdp->duplex) { + new_state = 1; + mdp->duplex = phydev->duplex; + if (mdp->cd->set_duplex) + mdp->cd->set_duplex(ndev); + } + + if (phydev->speed != mdp->speed) { + new_state = 1; + mdp->speed = phydev->speed; + if (mdp->cd->set_rate) + mdp->cd->set_rate(ndev); + } + if (mdp->link == PHY_DOWN) { + sh_eth_write(ndev, + (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR); + new_state = 1; + mdp->link = phydev->link; + } + } else if (mdp->link) { + new_state = 1; + mdp->link = PHY_DOWN; + mdp->speed = 0; + mdp->duplex = -1; + } + + if (new_state && netif_msg_link(mdp)) + phy_print_status(phydev); +} + +/* PHY init function */ +static int sh_eth_phy_init(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + char phy_id[MII_BUS_ID_SIZE + 3]; + struct phy_device *phydev = NULL; + + snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, + mdp->mii_bus->id , mdp->phy_id); + + mdp->link = PHY_DOWN; + mdp->speed = 0; + mdp->duplex = -1; + + /* Try connect to PHY */ + phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link, + 0, mdp->phy_interface); + if (IS_ERR(phydev)) { + dev_err(&ndev->dev, "phy_connect failed\n"); + return PTR_ERR(phydev); + } + + dev_info(&ndev->dev, "attached phy %i to driver %s\n", + phydev->addr, phydev->drv->name); + + mdp->phydev = phydev; + + return 0; +} + +/* PHY control start function */ +static int sh_eth_phy_start(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int ret; + + ret = sh_eth_phy_init(ndev); + if (ret) + return ret; + + /* reset phy - this also wakes it from PDOWN */ + phy_write(mdp->phydev, MII_BMCR, BMCR_RESET); + phy_start(mdp->phydev); + + return 0; +} + +static int sh_eth_get_settings(struct net_device *ndev, + struct ethtool_cmd *ecmd) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + unsigned long flags; + int ret; + + spin_lock_irqsave(&mdp->lock, flags); + ret = phy_ethtool_gset(mdp->phydev, ecmd); + spin_unlock_irqrestore(&mdp->lock, flags); + + return ret; +} + +static int sh_eth_set_settings(struct net_device *ndev, + struct ethtool_cmd *ecmd) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + unsigned long flags; + int ret; + + spin_lock_irqsave(&mdp->lock, flags); + + /* disable tx and rx */ + sh_eth_rcv_snd_disable(ndev); + + ret = phy_ethtool_sset(mdp->phydev, ecmd); + if (ret) + goto error_exit; + + if (ecmd->duplex == DUPLEX_FULL) + mdp->duplex = 1; + else + mdp->duplex = 0; + + if (mdp->cd->set_duplex) + mdp->cd->set_duplex(ndev); + +error_exit: + mdelay(1); + + /* enable tx and rx */ + sh_eth_rcv_snd_enable(ndev); + + spin_unlock_irqrestore(&mdp->lock, flags); + + return ret; +} + +static int sh_eth_nway_reset(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + unsigned long flags; + int ret; + + spin_lock_irqsave(&mdp->lock, flags); + ret = phy_start_aneg(mdp->phydev); + spin_unlock_irqrestore(&mdp->lock, flags); + + return ret; +} + +static u32 sh_eth_get_msglevel(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + return mdp->msg_enable; +} + +static void sh_eth_set_msglevel(struct net_device *ndev, u32 value) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + mdp->msg_enable = value; +} + +static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = { + "rx_current", "tx_current", + "rx_dirty", "tx_dirty", +}; +#define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats) + +static int sh_eth_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return SH_ETH_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void sh_eth_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *stats, u64 *data) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int i = 0; + + /* device-specific stats */ + data[i++] = mdp->cur_rx; + data[i++] = mdp->cur_tx; + data[i++] = mdp->dirty_rx; + data[i++] = mdp->dirty_tx; +} + +static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(data, *sh_eth_gstrings_stats, + sizeof(sh_eth_gstrings_stats)); + break; + } +} + +static struct ethtool_ops sh_eth_ethtool_ops = { + .get_settings = sh_eth_get_settings, + .set_settings = sh_eth_set_settings, + .nway_reset = sh_eth_nway_reset, + .get_msglevel = sh_eth_get_msglevel, + .set_msglevel = sh_eth_set_msglevel, + .get_link = ethtool_op_get_link, + .get_strings = sh_eth_get_strings, + .get_ethtool_stats = sh_eth_get_ethtool_stats, + .get_sset_count = sh_eth_get_sset_count, +}; + +/* network device open function */ +static int sh_eth_open(struct net_device *ndev) +{ + int ret = 0; + struct sh_eth_private *mdp = netdev_priv(ndev); + + pm_runtime_get_sync(&mdp->pdev->dev); + + ret = request_irq(ndev->irq, sh_eth_interrupt, +#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \ + defined(CONFIG_CPU_SUBTYPE_SH7764) || \ + defined(CONFIG_CPU_SUBTYPE_SH7757) + IRQF_SHARED, +#else + 0, +#endif + ndev->name, ndev); + if (ret) { + dev_err(&ndev->dev, "Can not assign IRQ number\n"); + return ret; + } + + /* Descriptor set */ + ret = sh_eth_ring_init(ndev); + if (ret) + goto out_free_irq; + + /* device init */ + ret = sh_eth_dev_init(ndev); + if (ret) + goto out_free_irq; + + /* PHY control start*/ + ret = sh_eth_phy_start(ndev); + if (ret) + goto out_free_irq; + + /* Set the timer to check for link beat. */ + init_timer(&mdp->timer); + mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */ + setup_timer(&mdp->timer, sh_eth_timer, (unsigned long)ndev); + + return ret; + +out_free_irq: + free_irq(ndev->irq, ndev); + pm_runtime_put_sync(&mdp->pdev->dev); + return ret; +} + +/* Timeout function */ +static void sh_eth_tx_timeout(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + struct sh_eth_rxdesc *rxdesc; + int i; + + netif_stop_queue(ndev); + + if (netif_msg_timer(mdp)) + dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x," + " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR)); + + /* tx_errors count up */ + mdp->stats.tx_errors++; + + /* timer off */ + del_timer_sync(&mdp->timer); + + /* Free all the skbuffs in the Rx queue. */ + for (i = 0; i < RX_RING_SIZE; i++) { + rxdesc = &mdp->rx_ring[i]; + rxdesc->status = 0; + rxdesc->addr = 0xBADF00D0; + if (mdp->rx_skbuff[i]) + dev_kfree_skb(mdp->rx_skbuff[i]); + mdp->rx_skbuff[i] = NULL; + } + for (i = 0; i < TX_RING_SIZE; i++) { + if (mdp->tx_skbuff[i]) + dev_kfree_skb(mdp->tx_skbuff[i]); + mdp->tx_skbuff[i] = NULL; + } + + /* device init */ + sh_eth_dev_init(ndev); + + /* timer on */ + mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */ + add_timer(&mdp->timer); +} + +/* Packet transmit function */ +static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + struct sh_eth_txdesc *txdesc; + u32 entry; + unsigned long flags; + + spin_lock_irqsave(&mdp->lock, flags); + if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) { + if (!sh_eth_txfree(ndev)) { + if (netif_msg_tx_queued(mdp)) + dev_warn(&ndev->dev, "TxFD exhausted.\n"); + netif_stop_queue(ndev); + spin_unlock_irqrestore(&mdp->lock, flags); + return NETDEV_TX_BUSY; + } + } + spin_unlock_irqrestore(&mdp->lock, flags); + + entry = mdp->cur_tx % TX_RING_SIZE; + mdp->tx_skbuff[entry] = skb; + txdesc = &mdp->tx_ring[entry]; + /* soft swap. */ + if (!mdp->cd->hw_swap) + sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)), + skb->len + 2); + txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (skb->len < ETHERSMALL) + txdesc->buffer_length = ETHERSMALL; + else + txdesc->buffer_length = skb->len; + + if (entry >= TX_RING_SIZE - 1) + txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); + else + txdesc->status |= cpu_to_edmac(mdp, TD_TACT); + + mdp->cur_tx++; + + if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp))) + sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR); + + return NETDEV_TX_OK; +} + +/* device close function */ +static int sh_eth_close(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int ringsize; + + netif_stop_queue(ndev); + + /* Disable interrupts by clearing the interrupt mask. */ + sh_eth_write(ndev, 0x0000, EESIPR); + + /* Stop the chip's Tx and Rx processes. */ + sh_eth_write(ndev, 0, EDTRR); + sh_eth_write(ndev, 0, EDRRR); + + /* PHY Disconnect */ + if (mdp->phydev) { + phy_stop(mdp->phydev); + phy_disconnect(mdp->phydev); + } + + free_irq(ndev->irq, ndev); + + del_timer_sync(&mdp->timer); + + /* Free all the skbuffs in the Rx queue. */ + sh_eth_ring_free(ndev); + + /* free DMA buffer */ + ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE; + dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma); + + /* free DMA buffer */ + ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE; + dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma); + + pm_runtime_put_sync(&mdp->pdev->dev); + + return 0; +} + +static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + pm_runtime_get_sync(&mdp->pdev->dev); + + mdp->stats.tx_dropped += sh_eth_read(ndev, TROCR); + sh_eth_write(ndev, 0, TROCR); /* (write clear) */ + mdp->stats.collisions += sh_eth_read(ndev, CDCR); + sh_eth_write(ndev, 0, CDCR); /* (write clear) */ + mdp->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR); + sh_eth_write(ndev, 0, LCCR); /* (write clear) */ + if (sh_eth_is_gether(mdp)) { + mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR); + sh_eth_write(ndev, 0, CERCR); /* (write clear) */ + mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR); + sh_eth_write(ndev, 0, CEECR); /* (write clear) */ + } else { + mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR); + sh_eth_write(ndev, 0, CNDCR); /* (write clear) */ + } + pm_runtime_put_sync(&mdp->pdev->dev); + + return &mdp->stats; +} + +/* ioctl to device funciotn*/ +static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, + int cmd) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + struct phy_device *phydev = mdp->phydev; + + if (!netif_running(ndev)) + return -EINVAL; + + if (!phydev) + return -ENODEV; + + return phy_mii_ioctl(phydev, rq, cmd); +} + +#if defined(SH_ETH_HAS_TSU) +/* Multicast reception directions set */ +static void sh_eth_set_multicast_list(struct net_device *ndev) +{ + if (ndev->flags & IFF_PROMISC) { + /* Set promiscuous. */ + sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_MCT) | + ECMR_PRM, ECMR); + } else { + /* Normal, unicast/broadcast-only mode. */ + sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | + ECMR_MCT, ECMR); + } +} +#endif /* SH_ETH_HAS_TSU */ + +/* SuperH's TSU register init function */ +static void sh_eth_tsu_init(struct sh_eth_private *mdp) +{ + sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */ + sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */ + sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */ + sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0); + sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1); + sh_eth_tsu_write(mdp, 0, TSU_PRISL0); + sh_eth_tsu_write(mdp, 0, TSU_PRISL1); + sh_eth_tsu_write(mdp, 0, TSU_FWSL0); + sh_eth_tsu_write(mdp, 0, TSU_FWSL1); + sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC); + if (sh_eth_is_gether(mdp)) { + sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */ + sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */ + } else { + sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */ + sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */ + } + sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */ + sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */ + sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ + sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */ + sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */ + sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */ + sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */ +} + +/* MDIO bus release function */ +static int sh_mdio_release(struct net_device *ndev) +{ + struct mii_bus *bus = dev_get_drvdata(&ndev->dev); + + /* unregister mdio bus */ + mdiobus_unregister(bus); + + /* remove mdio bus info from net_device */ + dev_set_drvdata(&ndev->dev, NULL); + + /* free interrupts memory */ + kfree(bus->irq); + + /* free bitbang info */ + free_mdio_bitbang(bus); + + return 0; +} + +/* MDIO bus init function */ +static int sh_mdio_init(struct net_device *ndev, int id, + struct sh_eth_plat_data *pd) +{ + int ret, i; + struct bb_info *bitbang; + struct sh_eth_private *mdp = netdev_priv(ndev); + + /* create bit control struct for PHY */ + bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL); + if (!bitbang) { + ret = -ENOMEM; + goto out; + } + + /* bitbang init */ + bitbang->addr = ndev->base_addr + mdp->reg_offset[PIR]; + bitbang->set_gate = pd->set_mdio_gate; + bitbang->mdi_msk = 0x08; + bitbang->mdo_msk = 0x04; + bitbang->mmd_msk = 0x02;/* MMD */ + bitbang->mdc_msk = 0x01; + bitbang->ctrl.ops = &bb_ops; + + /* MII controller setting */ + mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); + if (!mdp->mii_bus) { + ret = -ENOMEM; + goto out_free_bitbang; + } + + /* Hook up MII support for ethtool */ + mdp->mii_bus->name = "sh_mii"; + mdp->mii_bus->parent = &ndev->dev; + snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%x", id); + + /* PHY IRQ */ + mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); + if (!mdp->mii_bus->irq) { + ret = -ENOMEM; + goto out_free_bus; + } + + for (i = 0; i < PHY_MAX_ADDR; i++) + mdp->mii_bus->irq[i] = PHY_POLL; + + /* regist mdio bus */ + ret = mdiobus_register(mdp->mii_bus); + if (ret) + goto out_free_irq; + + dev_set_drvdata(&ndev->dev, mdp->mii_bus); + + return 0; + +out_free_irq: + kfree(mdp->mii_bus->irq); + +out_free_bus: + free_mdio_bitbang(mdp->mii_bus); + +out_free_bitbang: + kfree(bitbang); + +out: + return ret; +} + +static const u16 *sh_eth_get_register_offset(int register_type) +{ + const u16 *reg_offset = NULL; + + switch (register_type) { + case SH_ETH_REG_GIGABIT: + reg_offset = sh_eth_offset_gigabit; + break; + case SH_ETH_REG_FAST_SH4: + reg_offset = sh_eth_offset_fast_sh4; + break; + case SH_ETH_REG_FAST_SH3_SH2: + reg_offset = sh_eth_offset_fast_sh3_sh2; + break; + default: + printk(KERN_ERR "Unknown register type (%d)\n", register_type); + break; + } + + return reg_offset; +} + +static const struct net_device_ops sh_eth_netdev_ops = { + .ndo_open = sh_eth_open, + .ndo_stop = sh_eth_close, + .ndo_start_xmit = sh_eth_start_xmit, + .ndo_get_stats = sh_eth_get_stats, +#if defined(SH_ETH_HAS_TSU) + .ndo_set_rx_mode = sh_eth_set_multicast_list, +#endif + .ndo_tx_timeout = sh_eth_tx_timeout, + .ndo_do_ioctl = sh_eth_do_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +}; + +static int sh_eth_drv_probe(struct platform_device *pdev) +{ + int ret, devno = 0; + struct resource *res; + struct net_device *ndev = NULL; + struct sh_eth_private *mdp = NULL; + struct sh_eth_plat_data *pd; + + /* get base addr */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (unlikely(res == NULL)) { + dev_err(&pdev->dev, "invalid resource\n"); + ret = -EINVAL; + goto out; + } + + ndev = alloc_etherdev(sizeof(struct sh_eth_private)); + if (!ndev) { + dev_err(&pdev->dev, "Could not allocate device.\n"); + ret = -ENOMEM; + goto out; + } + + /* The sh Ether-specific entries in the device structure. */ + ndev->base_addr = res->start; + devno = pdev->id; + if (devno < 0) + devno = 0; + + ndev->dma = -1; + ret = platform_get_irq(pdev, 0); + if (ret < 0) { + ret = -ENODEV; + goto out_release; + } + ndev->irq = ret; + + SET_NETDEV_DEV(ndev, &pdev->dev); + + /* Fill in the fields of the device structure with ethernet values. */ + ether_setup(ndev); + + mdp = netdev_priv(ndev); + spin_lock_init(&mdp->lock); + mdp->pdev = pdev; + pm_runtime_enable(&pdev->dev); + pm_runtime_resume(&pdev->dev); + + pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data); + /* get PHY ID */ + mdp->phy_id = pd->phy; + mdp->phy_interface = pd->phy_interface; + /* EDMAC endian */ + mdp->edmac_endian = pd->edmac_endian; + mdp->no_ether_link = pd->no_ether_link; + mdp->ether_link_active_low = pd->ether_link_active_low; + mdp->reg_offset = sh_eth_get_register_offset(pd->register_type); + + /* set cpu data */ +#if defined(SH_ETH_HAS_BOTH_MODULES) + mdp->cd = sh_eth_get_cpu_data(mdp); +#else + mdp->cd = &sh_eth_my_cpu_data; +#endif + sh_eth_set_default_cpu_data(mdp->cd); + + /* set function */ + ndev->netdev_ops = &sh_eth_netdev_ops; + SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops); + ndev->watchdog_timeo = TX_TIMEOUT; + + /* debug message level */ + mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE; + mdp->post_rx = POST_RX >> (devno << 1); + mdp->post_fw = POST_FW >> (devno << 1); + + /* read and set MAC address */ + read_mac_address(ndev, pd->mac_addr); + + /* First device only init */ + if (!devno) { + if (mdp->cd->tsu) { + struct resource *rtsu; + rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!rtsu) { + dev_err(&pdev->dev, "Not found TSU resource\n"); + goto out_release; + } + mdp->tsu_addr = ioremap(rtsu->start, + resource_size(rtsu)); + } + if (mdp->cd->chip_reset) + mdp->cd->chip_reset(ndev); + + if (mdp->cd->tsu) { + /* TSU init (Init only)*/ + sh_eth_tsu_init(mdp); + } + } + + /* network device register */ + ret = register_netdev(ndev); + if (ret) + goto out_release; + + /* mdio bus init */ + ret = sh_mdio_init(ndev, pdev->id, pd); + if (ret) + goto out_unregister; + + /* print device information */ + pr_info("Base address at 0x%x, %pM, IRQ %d.\n", + (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); + + platform_set_drvdata(pdev, ndev); + + return ret; + +out_unregister: + unregister_netdev(ndev); + +out_release: + /* net_dev free */ + if (mdp && mdp->tsu_addr) + iounmap(mdp->tsu_addr); + if (ndev) + free_netdev(ndev); + +out: + return ret; +} + +static int sh_eth_drv_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct sh_eth_private *mdp = netdev_priv(ndev); + + iounmap(mdp->tsu_addr); + sh_mdio_release(ndev); + unregister_netdev(ndev); + pm_runtime_disable(&pdev->dev); + free_netdev(ndev); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static int sh_eth_runtime_nop(struct device *dev) +{ + /* + * Runtime PM callback shared between ->runtime_suspend() + * and ->runtime_resume(). Simply returns success. + * + * This driver re-initializes all registers after + * pm_runtime_get_sync() anyway so there is no need + * to save and restore registers here. + */ + return 0; +} + +static struct dev_pm_ops sh_eth_dev_pm_ops = { + .runtime_suspend = sh_eth_runtime_nop, + .runtime_resume = sh_eth_runtime_nop, +}; + +static struct platform_driver sh_eth_driver = { + .probe = sh_eth_drv_probe, + .remove = sh_eth_drv_remove, + .driver = { + .name = CARDNAME, + .pm = &sh_eth_dev_pm_ops, + }, +}; + +static int __init sh_eth_init(void) +{ + return platform_driver_register(&sh_eth_driver); +} + +static void __exit sh_eth_cleanup(void) +{ + platform_driver_unregister(&sh_eth_driver); +} + +module_init(sh_eth_init); +module_exit(sh_eth_cleanup); + +MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda"); +MODULE_DESCRIPTION("Renesas SuperH Ethernet driver"); +MODULE_LICENSE("GPL v2"); diff --cc drivers/net/ethernet/via/via-velocity.c index 095ab566d082,000000000000..086463b141b6 mode 100644,000000..100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@@ -1,3592 -1,0 +1,3588 @@@ +/* + * This code is derived from the VIA reference driver (copyright message + * below) provided to Red Hat by VIA Networking Technologies, Inc. for + * addition to the Linux kernel. + * + * The code has been merged into one source file, cleaned up to follow + * Linux coding style, ported to the Linux 2.6 kernel tree and cleaned + * for 64bit hardware platforms. + * + * TODO + * rx_copybreak/alignment + * More testing + * + * The changes are (c) Copyright 2004, Red Hat Inc. + * Additional fixes and clean up: Francois Romieu + * + * This source has not been verified for use in safety critical systems. + * + * Please direct queries about the revamped driver to the linux-kernel + * list not VIA. + * + * Original code: + * + * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. + * All rights reserved. + * + * This software may be redistributed and/or modified under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or + * any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Author: Chuang Liang-Shing, AJ Jiang + * + * Date: Jan 24, 2003 + * + * MODULE_LICENSE("GPL"); + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "via-velocity.h" + + +static int velocity_nics; +static int msglevel = MSG_LEVEL_INFO; + +/** + * mac_get_cam_mask - Read a CAM mask + * @regs: register block for this velocity + * @mask: buffer to store mask + * + * Fetch the mask bits of the selected CAM and store them into the + * provided mask buffer. + */ +static void mac_get_cam_mask(struct mac_regs __iomem *regs, u8 *mask) +{ + int i; + + /* Select CAM mask */ + BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); + + writeb(0, ®s->CAMADDR); + + /* read mask */ + for (i = 0; i < 8; i++) + *mask++ = readb(&(regs->MARCAM[i])); + + /* disable CAMEN */ + writeb(0, ®s->CAMADDR); + + /* Select mar */ + BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); +} + +/** + * mac_set_cam_mask - Set a CAM mask + * @regs: register block for this velocity + * @mask: CAM mask to load + * + * Store a new mask into a CAM + */ +static void mac_set_cam_mask(struct mac_regs __iomem *regs, u8 *mask) +{ + int i; + /* Select CAM mask */ + BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); + + writeb(CAMADDR_CAMEN, ®s->CAMADDR); + + for (i = 0; i < 8; i++) + writeb(*mask++, &(regs->MARCAM[i])); + + /* disable CAMEN */ + writeb(0, ®s->CAMADDR); + + /* Select mar */ + BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); +} + +static void mac_set_vlan_cam_mask(struct mac_regs __iomem *regs, u8 *mask) +{ + int i; + /* Select CAM mask */ + BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); + + writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, ®s->CAMADDR); + + for (i = 0; i < 8; i++) + writeb(*mask++, &(regs->MARCAM[i])); + + /* disable CAMEN */ + writeb(0, ®s->CAMADDR); + + /* Select mar */ + BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); +} + +/** + * mac_set_cam - set CAM data + * @regs: register block of this velocity + * @idx: Cam index + * @addr: 2 or 6 bytes of CAM data + * + * Load an address or vlan tag into a CAM + */ +static void mac_set_cam(struct mac_regs __iomem *regs, int idx, const u8 *addr) +{ + int i; + + /* Select CAM mask */ + BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); + + idx &= (64 - 1); + + writeb(CAMADDR_CAMEN | idx, ®s->CAMADDR); + + for (i = 0; i < 6; i++) + writeb(*addr++, &(regs->MARCAM[i])); + + BYTE_REG_BITS_ON(CAMCR_CAMWR, ®s->CAMCR); + + udelay(10); + + writeb(0, ®s->CAMADDR); + + /* Select mar */ + BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); +} + +static void mac_set_vlan_cam(struct mac_regs __iomem *regs, int idx, + const u8 *addr) +{ + + /* Select CAM mask */ + BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); + + idx &= (64 - 1); + + writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, ®s->CAMADDR); + writew(*((u16 *) addr), ®s->MARCAM[0]); + + BYTE_REG_BITS_ON(CAMCR_CAMWR, ®s->CAMCR); + + udelay(10); + + writeb(0, ®s->CAMADDR); + + /* Select mar */ + BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); +} + + +/** + * mac_wol_reset - reset WOL after exiting low power + * @regs: register block of this velocity + * + * Called after we drop out of wake on lan mode in order to + * reset the Wake on lan features. This function doesn't restore + * the rest of the logic from the result of sleep/wakeup + */ +static void mac_wol_reset(struct mac_regs __iomem *regs) +{ + + /* Turn off SWPTAG right after leaving power mode */ + BYTE_REG_BITS_OFF(STICKHW_SWPTAG, ®s->STICKHW); + /* clear sticky bits */ + BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), ®s->STICKHW); + + BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, ®s->CHIPGCR); + BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, ®s->CHIPGCR); + /* disable force PME-enable */ + writeb(WOLCFG_PMEOVR, ®s->WOLCFGClr); + /* disable power-event config bit */ + writew(0xFFFF, ®s->WOLCRClr); + /* clear power status */ + writew(0xFFFF, ®s->WOLSRClr); +} + +static const struct ethtool_ops velocity_ethtool_ops; + +/* + Define module options +*/ + +MODULE_AUTHOR("VIA Networking Technologies, Inc."); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver"); + +#define VELOCITY_PARAM(N, D) \ + static int N[MAX_UNITS] = OPTION_DEFAULT;\ + module_param_array(N, int, NULL, 0); \ + MODULE_PARM_DESC(N, D); + +#define RX_DESC_MIN 64 +#define RX_DESC_MAX 255 +#define RX_DESC_DEF 64 +VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors"); + +#define TX_DESC_MIN 16 +#define TX_DESC_MAX 256 +#define TX_DESC_DEF 64 +VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors"); + +#define RX_THRESH_MIN 0 +#define RX_THRESH_MAX 3 +#define RX_THRESH_DEF 0 +/* rx_thresh[] is used for controlling the receive fifo threshold. + 0: indicate the rxfifo threshold is 128 bytes. + 1: indicate the rxfifo threshold is 512 bytes. + 2: indicate the rxfifo threshold is 1024 bytes. + 3: indicate the rxfifo threshold is store & forward. +*/ +VELOCITY_PARAM(rx_thresh, "Receive fifo threshold"); + +#define DMA_LENGTH_MIN 0 +#define DMA_LENGTH_MAX 7 +#define DMA_LENGTH_DEF 6 + +/* DMA_length[] is used for controlling the DMA length + 0: 8 DWORDs + 1: 16 DWORDs + 2: 32 DWORDs + 3: 64 DWORDs + 4: 128 DWORDs + 5: 256 DWORDs + 6: SF(flush till emply) + 7: SF(flush till emply) +*/ +VELOCITY_PARAM(DMA_length, "DMA length"); + +#define IP_ALIG_DEF 0 +/* IP_byte_align[] is used for IP header DWORD byte aligned + 0: indicate the IP header won't be DWORD byte aligned.(Default) . + 1: indicate the IP header will be DWORD byte aligned. + In some environment, the IP header should be DWORD byte aligned, + or the packet will be droped when we receive it. (eg: IPVS) +*/ +VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned"); + +#define FLOW_CNTL_DEF 1 +#define FLOW_CNTL_MIN 1 +#define FLOW_CNTL_MAX 5 + +/* flow_control[] is used for setting the flow control ability of NIC. + 1: hardware deafult - AUTO (default). Use Hardware default value in ANAR. + 2: enable TX flow control. + 3: enable RX flow control. + 4: enable RX/TX flow control. + 5: disable +*/ +VELOCITY_PARAM(flow_control, "Enable flow control ability"); + +#define MED_LNK_DEF 0 +#define MED_LNK_MIN 0 +#define MED_LNK_MAX 5 +/* speed_duplex[] is used for setting the speed and duplex mode of NIC. + 0: indicate autonegotiation for both speed and duplex mode + 1: indicate 100Mbps half duplex mode + 2: indicate 100Mbps full duplex mode + 3: indicate 10Mbps half duplex mode + 4: indicate 10Mbps full duplex mode + 5: indicate 1000Mbps full duplex mode + + Note: + if EEPROM have been set to the force mode, this option is ignored + by driver. +*/ +VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode"); + +#define VAL_PKT_LEN_DEF 0 +/* ValPktLen[] is used for setting the checksum offload ability of NIC. + 0: Receive frame with invalid layer 2 length (Default) + 1: Drop frame with invalid layer 2 length +*/ +VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame"); + +#define WOL_OPT_DEF 0 +#define WOL_OPT_MIN 0 +#define WOL_OPT_MAX 7 +/* wol_opts[] is used for controlling wake on lan behavior. + 0: Wake up if recevied a magic packet. (Default) + 1: Wake up if link status is on/off. + 2: Wake up if recevied an arp packet. + 4: Wake up if recevied any unicast packet. + Those value can be sumed up to support more than one option. +*/ +VELOCITY_PARAM(wol_opts, "Wake On Lan options"); + +static int rx_copybreak = 200; +module_param(rx_copybreak, int, 0644); +MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); + +/* + * Internal board variants. At the moment we have only one + */ +static struct velocity_info_tbl chip_info_table[] = { + {CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL}, + { } +}; + +/* + * Describe the PCI device identifiers that we support in this + * device driver. Used for hotplug autoloading. + */ +static DEFINE_PCI_DEVICE_TABLE(velocity_id_table) = { + { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) }, + { } +}; + +MODULE_DEVICE_TABLE(pci, velocity_id_table); + +/** + * get_chip_name - identifier to name + * @id: chip identifier + * + * Given a chip identifier return a suitable description. Returns + * a pointer a static string valid while the driver is loaded. + */ +static const char __devinit *get_chip_name(enum chip_type chip_id) +{ + int i; + for (i = 0; chip_info_table[i].name != NULL; i++) + if (chip_info_table[i].chip_id == chip_id) + break; + return chip_info_table[i].name; +} + +/** + * velocity_remove1 - device unplug + * @pdev: PCI device being removed + * + * Device unload callback. Called on an unplug or on module + * unload for each active device that is present. Disconnects + * the device from the network layer and frees all the resources + */ +static void __devexit velocity_remove1(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct velocity_info *vptr = netdev_priv(dev); + + unregister_netdev(dev); + iounmap(vptr->mac_regs); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + free_netdev(dev); + + velocity_nics--; +} + +/** + * velocity_set_int_opt - parser for integer options + * @opt: pointer to option value + * @val: value the user requested (or -1 for default) + * @min: lowest value allowed + * @max: highest value allowed + * @def: default value + * @name: property name + * @dev: device name + * + * Set an integer property in the module options. This function does + * all the verification and checking as well as reporting so that + * we don't duplicate code for each option. + */ +static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max, int def, char *name, const char *devname) +{ + if (val == -1) + *opt = def; + else if (val < min || val > max) { + VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (%d-%d)\n", + devname, name, min, max); + *opt = def; + } else { + VELOCITY_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: set value of parameter %s to %d\n", + devname, name, val); + *opt = val; + } +} + +/** + * velocity_set_bool_opt - parser for boolean options + * @opt: pointer to option value + * @val: value the user requested (or -1 for default) + * @def: default value (yes/no) + * @flag: numeric value to set for true. + * @name: property name + * @dev: device name + * + * Set a boolean property in the module options. This function does + * all the verification and checking as well as reporting so that + * we don't duplicate code for each option. + */ +static void __devinit velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag, char *name, const char *devname) +{ + (*opt) &= (~flag); + if (val == -1) + *opt |= (def ? flag : 0); + else if (val < 0 || val > 1) { + printk(KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (0-1)\n", + devname, name); + *opt |= (def ? flag : 0); + } else { + printk(KERN_INFO "%s: set parameter %s to %s\n", + devname, name, val ? "TRUE" : "FALSE"); + *opt |= (val ? flag : 0); + } +} + +/** + * velocity_get_options - set options on device + * @opts: option structure for the device + * @index: index of option to use in module options array + * @devname: device name + * + * Turn the module and command options into a single structure + * for the current device + */ +static void __devinit velocity_get_options(struct velocity_opt *opts, int index, const char *devname) +{ + + velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname); + velocity_set_int_opt(&opts->DMA_length, DMA_length[index], DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF, "DMA_length", devname); + velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname); + velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname); + + velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname); + velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname); + velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname); + velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname); + velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname); + opts->numrx = (opts->numrx & ~3); +} + +/** + * velocity_init_cam_filter - initialise CAM + * @vptr: velocity to program + * + * Initialize the content addressable memory used for filters. Load + * appropriately according to the presence of VLAN + */ +static void velocity_init_cam_filter(struct velocity_info *vptr) +{ + struct mac_regs __iomem *regs = vptr->mac_regs; + unsigned int vid, i = 0; + + /* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */ + WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, ®s->MCFG); + WORD_REG_BITS_ON(MCFG_VIDFR, ®s->MCFG); + + /* Disable all CAMs */ + memset(vptr->vCAMmask, 0, sizeof(u8) * 8); + memset(vptr->mCAMmask, 0, sizeof(u8) * 8); + mac_set_vlan_cam_mask(regs, vptr->vCAMmask); + mac_set_cam_mask(regs, vptr->mCAMmask); + + /* Enable VCAMs */ - - if (test_bit(0, vptr->active_vlans)) - WORD_REG_BITS_ON(MCFG_RTGOPT, ®s->MCFG); - + for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) { + mac_set_vlan_cam(regs, i, (u8 *) &vid); + vptr->vCAMmask[i / 8] |= 0x1 << (i % 8); + if (++i >= VCAM_SIZE) + break; + } + mac_set_vlan_cam_mask(regs, vptr->vCAMmask); +} + +static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) +{ + struct velocity_info *vptr = netdev_priv(dev); + + spin_lock_irq(&vptr->lock); + set_bit(vid, vptr->active_vlans); + velocity_init_cam_filter(vptr); + spin_unlock_irq(&vptr->lock); +} + +static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +{ + struct velocity_info *vptr = netdev_priv(dev); + + spin_lock_irq(&vptr->lock); + clear_bit(vid, vptr->active_vlans); + velocity_init_cam_filter(vptr); + spin_unlock_irq(&vptr->lock); +} + +static void velocity_init_rx_ring_indexes(struct velocity_info *vptr) +{ + vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0; +} + +/** + * velocity_rx_reset - handle a receive reset + * @vptr: velocity we are resetting + * + * Reset the ownership and status for the receive ring side. + * Hand all the receive queue to the NIC. + */ +static void velocity_rx_reset(struct velocity_info *vptr) +{ + + struct mac_regs __iomem *regs = vptr->mac_regs; + int i; + + velocity_init_rx_ring_indexes(vptr); + + /* + * Init state, all RD entries belong to the NIC + */ + for (i = 0; i < vptr->options.numrx; ++i) + vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC; + + writew(vptr->options.numrx, ®s->RBRDU); + writel(vptr->rx.pool_dma, ®s->RDBaseLo); + writew(0, ®s->RDIdx); + writew(vptr->options.numrx - 1, ®s->RDCSize); +} + +/** + * velocity_get_opt_media_mode - get media selection + * @vptr: velocity adapter + * + * Get the media mode stored in EEPROM or module options and load + * mii_status accordingly. The requested link state information + * is also returned. + */ +static u32 velocity_get_opt_media_mode(struct velocity_info *vptr) +{ + u32 status = 0; + + switch (vptr->options.spd_dpx) { + case SPD_DPX_AUTO: + status = VELOCITY_AUTONEG_ENABLE; + break; + case SPD_DPX_100_FULL: + status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL; + break; + case SPD_DPX_10_FULL: + status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL; + break; + case SPD_DPX_100_HALF: + status = VELOCITY_SPEED_100; + break; + case SPD_DPX_10_HALF: + status = VELOCITY_SPEED_10; + break; + case SPD_DPX_1000_FULL: + status = VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL; + break; + } + vptr->mii_status = status; + return status; +} + +/** + * safe_disable_mii_autopoll - autopoll off + * @regs: velocity registers + * + * Turn off the autopoll and wait for it to disable on the chip + */ +static void safe_disable_mii_autopoll(struct mac_regs __iomem *regs) +{ + u16 ww; + + /* turn off MAUTO */ + writeb(0, ®s->MIICR); + for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { + udelay(1); + if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR)) + break; + } +} + +/** + * enable_mii_autopoll - turn on autopolling + * @regs: velocity registers + * + * Enable the MII link status autopoll feature on the Velocity + * hardware. Wait for it to enable. + */ +static void enable_mii_autopoll(struct mac_regs __iomem *regs) +{ + int ii; + + writeb(0, &(regs->MIICR)); + writeb(MIIADR_SWMPL, ®s->MIIADR); + + for (ii = 0; ii < W_MAX_TIMEOUT; ii++) { + udelay(1); + if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR)) + break; + } + + writeb(MIICR_MAUTO, ®s->MIICR); + + for (ii = 0; ii < W_MAX_TIMEOUT; ii++) { + udelay(1); + if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR)) + break; + } + +} + +/** + * velocity_mii_read - read MII data + * @regs: velocity registers + * @index: MII register index + * @data: buffer for received data + * + * Perform a single read of an MII 16bit register. Returns zero + * on success or -ETIMEDOUT if the PHY did not respond. + */ +static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data) +{ + u16 ww; + + /* + * Disable MIICR_MAUTO, so that mii addr can be set normally + */ + safe_disable_mii_autopoll(regs); + + writeb(index, ®s->MIIADR); + + BYTE_REG_BITS_ON(MIICR_RCMD, ®s->MIICR); + + for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { + if (!(readb(®s->MIICR) & MIICR_RCMD)) + break; + } + + *data = readw(®s->MIIDATA); + + enable_mii_autopoll(regs); + if (ww == W_MAX_TIMEOUT) + return -ETIMEDOUT; + return 0; +} + +/** + * mii_check_media_mode - check media state + * @regs: velocity registers + * + * Check the current MII status and determine the link status + * accordingly + */ +static u32 mii_check_media_mode(struct mac_regs __iomem *regs) +{ + u32 status = 0; + u16 ANAR; + + if (!MII_REG_BITS_IS_ON(BMSR_LSTATUS, MII_BMSR, regs)) + status |= VELOCITY_LINK_FAIL; + + if (MII_REG_BITS_IS_ON(ADVERTISE_1000FULL, MII_CTRL1000, regs)) + status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL; + else if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF, MII_CTRL1000, regs)) + status |= (VELOCITY_SPEED_1000); + else { + velocity_mii_read(regs, MII_ADVERTISE, &ANAR); + if (ANAR & ADVERTISE_100FULL) + status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL); + else if (ANAR & ADVERTISE_100HALF) + status |= VELOCITY_SPEED_100; + else if (ANAR & ADVERTISE_10FULL) + status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL); + else + status |= (VELOCITY_SPEED_10); + } + + if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) { + velocity_mii_read(regs, MII_ADVERTISE, &ANAR); + if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) + == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) { + if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs)) + status |= VELOCITY_AUTONEG_ENABLE; + } + } + + return status; +} + +/** + * velocity_mii_write - write MII data + * @regs: velocity registers + * @index: MII register index + * @data: 16bit data for the MII register + * + * Perform a single write to an MII 16bit register. Returns zero + * on success or -ETIMEDOUT if the PHY did not respond. + */ +static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data) +{ + u16 ww; + + /* + * Disable MIICR_MAUTO, so that mii addr can be set normally + */ + safe_disable_mii_autopoll(regs); + + /* MII reg offset */ + writeb(mii_addr, ®s->MIIADR); + /* set MII data */ + writew(data, ®s->MIIDATA); + + /* turn on MIICR_WCMD */ + BYTE_REG_BITS_ON(MIICR_WCMD, ®s->MIICR); + + /* W_MAX_TIMEOUT is the timeout period */ + for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { + udelay(5); + if (!(readb(®s->MIICR) & MIICR_WCMD)) + break; + } + enable_mii_autopoll(regs); + + if (ww == W_MAX_TIMEOUT) + return -ETIMEDOUT; + return 0; +} + +/** + * set_mii_flow_control - flow control setup + * @vptr: velocity interface + * + * Set up the flow control on this interface according to + * the supplied user/eeprom options. + */ +static void set_mii_flow_control(struct velocity_info *vptr) +{ + /*Enable or Disable PAUSE in ANAR */ + switch (vptr->options.flow_cntl) { + case FLOW_CNTL_TX: + MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs); + MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs); + break; + + case FLOW_CNTL_RX: + MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs); + MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs); + break; + + case FLOW_CNTL_TX_RX: + MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs); + MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs); + break; + + case FLOW_CNTL_DISABLE: + MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs); + MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs); + break; + default: + break; + } +} + +/** + * mii_set_auto_on - autonegotiate on + * @vptr: velocity + * + * Enable autonegotation on this interface + */ +static void mii_set_auto_on(struct velocity_info *vptr) +{ + if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs)) + MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs); + else + MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); +} + +static u32 check_connection_type(struct mac_regs __iomem *regs) +{ + u32 status = 0; + u8 PHYSR0; + u16 ANAR; + PHYSR0 = readb(®s->PHYSR0); + + /* + if (!(PHYSR0 & PHYSR0_LINKGD)) + status|=VELOCITY_LINK_FAIL; + */ + + if (PHYSR0 & PHYSR0_FDPX) + status |= VELOCITY_DUPLEX_FULL; + + if (PHYSR0 & PHYSR0_SPDG) + status |= VELOCITY_SPEED_1000; + else if (PHYSR0 & PHYSR0_SPD10) + status |= VELOCITY_SPEED_10; + else + status |= VELOCITY_SPEED_100; + + if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) { + velocity_mii_read(regs, MII_ADVERTISE, &ANAR); + if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) + == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) { + if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs)) + status |= VELOCITY_AUTONEG_ENABLE; + } + } + + return status; +} + +/** + * velocity_set_media_mode - set media mode + * @mii_status: old MII link state + * + * Check the media link state and configure the flow control + * PHY and also velocity hardware setup accordingly. In particular + * we need to set up CD polling and frame bursting. + */ +static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status) +{ + u32 curr_status; + struct mac_regs __iomem *regs = vptr->mac_regs; + + vptr->mii_status = mii_check_media_mode(vptr->mac_regs); + curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL); + + /* Set mii link status */ + set_mii_flow_control(vptr); + + /* + Check if new status is consistent with current status + if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) || + (mii_status==curr_status)) { + vptr->mii_status=mii_check_media_mode(vptr->mac_regs); + vptr->mii_status=check_connection_type(vptr->mac_regs); + VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n"); + return 0; + } + */ + + if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) + MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs); + + /* + * If connection type is AUTO + */ + if (mii_status & VELOCITY_AUTONEG_ENABLE) { + VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity is AUTO mode\n"); + /* clear force MAC mode bit */ + BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, ®s->CHIPGCR); + /* set duplex mode of MAC according to duplex mode of MII */ + MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs); + MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs); + MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); + + /* enable AUTO-NEGO mode */ + mii_set_auto_on(vptr); + } else { + u16 CTRL1000; + u16 ANAR; + u8 CHIPGCR; + + /* + * 1. if it's 3119, disable frame bursting in halfduplex mode + * and enable it in fullduplex mode + * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR + * 3. only enable CD heart beat counter in 10HD mode + */ + + /* set force MAC mode bit */ + BYTE_REG_BITS_ON(CHIPGCR_FCMODE, ®s->CHIPGCR); + + CHIPGCR = readb(®s->CHIPGCR); + + if (mii_status & VELOCITY_SPEED_1000) + CHIPGCR |= CHIPGCR_FCGMII; + else + CHIPGCR &= ~CHIPGCR_FCGMII; + + if (mii_status & VELOCITY_DUPLEX_FULL) { + CHIPGCR |= CHIPGCR_FCFDX; + writeb(CHIPGCR, ®s->CHIPGCR); + VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced full mode\n"); + if (vptr->rev_id < REV_ID_VT3216_A0) + BYTE_REG_BITS_OFF(TCR_TB2BDIS, ®s->TCR); + } else { + CHIPGCR &= ~CHIPGCR_FCFDX; + VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced half mode\n"); + writeb(CHIPGCR, ®s->CHIPGCR); + if (vptr->rev_id < REV_ID_VT3216_A0) + BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR); + } + + velocity_mii_read(vptr->mac_regs, MII_CTRL1000, &CTRL1000); + CTRL1000 &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); + if ((mii_status & VELOCITY_SPEED_1000) && + (mii_status & VELOCITY_DUPLEX_FULL)) { + CTRL1000 |= ADVERTISE_1000FULL; + } + velocity_mii_write(vptr->mac_regs, MII_CTRL1000, CTRL1000); + + if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10)) + BYTE_REG_BITS_OFF(TESTCFG_HBDIS, ®s->TESTCFG); + else + BYTE_REG_BITS_ON(TESTCFG_HBDIS, ®s->TESTCFG); + + /* MII_REG_BITS_OFF(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); */ + velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR); + ANAR &= (~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)); + if (mii_status & VELOCITY_SPEED_100) { + if (mii_status & VELOCITY_DUPLEX_FULL) + ANAR |= ADVERTISE_100FULL; + else + ANAR |= ADVERTISE_100HALF; + } else if (mii_status & VELOCITY_SPEED_10) { + if (mii_status & VELOCITY_DUPLEX_FULL) + ANAR |= ADVERTISE_10FULL; + else + ANAR |= ADVERTISE_10HALF; + } + velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR); + /* enable AUTO-NEGO mode */ + mii_set_auto_on(vptr); + /* MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); */ + } + /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */ + /* vptr->mii_status=check_connection_type(vptr->mac_regs); */ + return VELOCITY_LINK_CHANGE; +} + +/** + * velocity_print_link_status - link status reporting + * @vptr: velocity to report on + * + * Turn the link status of the velocity card into a kernel log + * description of the new link state, detailing speed and duplex + * status + */ +static void velocity_print_link_status(struct velocity_info *vptr) +{ + + if (vptr->mii_status & VELOCITY_LINK_FAIL) { + VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->dev->name); + } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) { + VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->dev->name); + + if (vptr->mii_status & VELOCITY_SPEED_1000) + VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps"); + else if (vptr->mii_status & VELOCITY_SPEED_100) + VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps"); + else + VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps"); + + if (vptr->mii_status & VELOCITY_DUPLEX_FULL) + VELOCITY_PRT(MSG_LEVEL_INFO, " full duplex\n"); + else + VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n"); + } else { + VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name); + switch (vptr->options.spd_dpx) { + case SPD_DPX_1000_FULL: + VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps full duplex\n"); + break; + case SPD_DPX_100_HALF: + VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n"); + break; + case SPD_DPX_100_FULL: + VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps full duplex\n"); + break; + case SPD_DPX_10_HALF: + VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps half duplex\n"); + break; + case SPD_DPX_10_FULL: + VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps full duplex\n"); + break; + default: + break; + } + } +} + +/** + * enable_flow_control_ability - flow control + * @vptr: veloity to configure + * + * Set up flow control according to the flow control options + * determined by the eeprom/configuration. + */ +static void enable_flow_control_ability(struct velocity_info *vptr) +{ + + struct mac_regs __iomem *regs = vptr->mac_regs; + + switch (vptr->options.flow_cntl) { + + case FLOW_CNTL_DEFAULT: + if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, ®s->PHYSR0)) + writel(CR0_FDXRFCEN, ®s->CR0Set); + else + writel(CR0_FDXRFCEN, ®s->CR0Clr); + + if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, ®s->PHYSR0)) + writel(CR0_FDXTFCEN, ®s->CR0Set); + else + writel(CR0_FDXTFCEN, ®s->CR0Clr); + break; + + case FLOW_CNTL_TX: + writel(CR0_FDXTFCEN, ®s->CR0Set); + writel(CR0_FDXRFCEN, ®s->CR0Clr); + break; + + case FLOW_CNTL_RX: + writel(CR0_FDXRFCEN, ®s->CR0Set); + writel(CR0_FDXTFCEN, ®s->CR0Clr); + break; + + case FLOW_CNTL_TX_RX: + writel(CR0_FDXTFCEN, ®s->CR0Set); + writel(CR0_FDXRFCEN, ®s->CR0Set); + break; + + case FLOW_CNTL_DISABLE: + writel(CR0_FDXRFCEN, ®s->CR0Clr); + writel(CR0_FDXTFCEN, ®s->CR0Clr); + break; + + default: + break; + } + +} + +/** + * velocity_soft_reset - soft reset + * @vptr: velocity to reset + * + * Kick off a soft reset of the velocity adapter and then poll + * until the reset sequence has completed before returning. + */ +static int velocity_soft_reset(struct velocity_info *vptr) +{ + struct mac_regs __iomem *regs = vptr->mac_regs; + int i = 0; + + writel(CR0_SFRST, ®s->CR0Set); + + for (i = 0; i < W_MAX_TIMEOUT; i++) { + udelay(5); + if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, ®s->CR0Set)) + break; + } + + if (i == W_MAX_TIMEOUT) { + writel(CR0_FORSRST, ®s->CR0Set); + /* FIXME: PCI POSTING */ + /* delay 2ms */ + mdelay(2); + } + return 0; +} + +/** + * velocity_set_multi - filter list change callback + * @dev: network device + * + * Called by the network layer when the filter lists need to change + * for a velocity adapter. Reload the CAMs with the new address + * filter ruleset. + */ +static void velocity_set_multi(struct net_device *dev) +{ + struct velocity_info *vptr = netdev_priv(dev); + struct mac_regs __iomem *regs = vptr->mac_regs; + u8 rx_mode; + int i; + struct netdev_hw_addr *ha; + + if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ + writel(0xffffffff, ®s->MARCAM[0]); + writel(0xffffffff, ®s->MARCAM[4]); + rx_mode = (RCR_AM | RCR_AB | RCR_PROM); + } else if ((netdev_mc_count(dev) > vptr->multicast_limit) || + (dev->flags & IFF_ALLMULTI)) { + writel(0xffffffff, ®s->MARCAM[0]); + writel(0xffffffff, ®s->MARCAM[4]); + rx_mode = (RCR_AM | RCR_AB); + } else { + int offset = MCAM_SIZE - vptr->multicast_limit; + mac_get_cam_mask(regs, vptr->mCAMmask); + + i = 0; + netdev_for_each_mc_addr(ha, dev) { + mac_set_cam(regs, i + offset, ha->addr); + vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7); + i++; + } + + mac_set_cam_mask(regs, vptr->mCAMmask); + rx_mode = RCR_AM | RCR_AB | RCR_AP; + } + if (dev->mtu > 1500) + rx_mode |= RCR_AL; + + BYTE_REG_BITS_ON(rx_mode, ®s->RCR); + +} + +/* + * MII access , media link mode setting functions + */ + +/** + * mii_init - set up MII + * @vptr: velocity adapter + * @mii_status: links tatus + * + * Set up the PHY for the current link state. + */ +static void mii_init(struct velocity_info *vptr, u32 mii_status) +{ + u16 BMCR; + + switch (PHYID_GET_PHY_ID(vptr->phy_id)) { + case PHYID_CICADA_CS8201: + /* + * Reset to hardware default + */ + MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs); + /* + * Turn on ECHODIS bit in NWay-forced full mode and turn it + * off it in NWay-forced half mode for NWay-forced v.s. + * legacy-forced issue. + */ + if (vptr->mii_status & VELOCITY_DUPLEX_FULL) + MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs); + else + MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs); + /* + * Turn on Link/Activity LED enable bit for CIS8201 + */ + MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs); + break; + case PHYID_VT3216_32BIT: + case PHYID_VT3216_64BIT: + /* + * Reset to hardware default + */ + MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs); + /* + * Turn on ECHODIS bit in NWay-forced full mode and turn it + * off it in NWay-forced half mode for NWay-forced v.s. + * legacy-forced issue + */ + if (vptr->mii_status & VELOCITY_DUPLEX_FULL) + MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs); + else + MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs); + break; + + case PHYID_MARVELL_1000: + case PHYID_MARVELL_1000S: + /* + * Assert CRS on Transmit + */ + MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs); + /* + * Reset to hardware default + */ + MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs); + break; + default: + ; + } + velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR); + if (BMCR & BMCR_ISOLATE) { + BMCR &= ~BMCR_ISOLATE; + velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR); + } +} + +/** + * setup_queue_timers - Setup interrupt timers + * + * Setup interrupt frequency during suppression (timeout if the frame + * count isn't filled). + */ +static void setup_queue_timers(struct velocity_info *vptr) +{ + /* Only for newer revisions */ + if (vptr->rev_id >= REV_ID_VT3216_A0) { + u8 txqueue_timer = 0; + u8 rxqueue_timer = 0; + + if (vptr->mii_status & (VELOCITY_SPEED_1000 | + VELOCITY_SPEED_100)) { + txqueue_timer = vptr->options.txqueue_timer; + rxqueue_timer = vptr->options.rxqueue_timer; + } + + writeb(txqueue_timer, &vptr->mac_regs->TQETMR); + writeb(rxqueue_timer, &vptr->mac_regs->RQETMR); + } +} + +/** + * setup_adaptive_interrupts - Setup interrupt suppression + * + * @vptr velocity adapter + * + * The velocity is able to suppress interrupt during high interrupt load. + * This function turns on that feature. + */ +static void setup_adaptive_interrupts(struct velocity_info *vptr) +{ + struct mac_regs __iomem *regs = vptr->mac_regs; + u16 tx_intsup = vptr->options.tx_intsup; + u16 rx_intsup = vptr->options.rx_intsup; + + /* Setup default interrupt mask (will be changed below) */ + vptr->int_mask = INT_MASK_DEF; + + /* Set Tx Interrupt Suppression Threshold */ + writeb(CAMCR_PS0, ®s->CAMCR); + if (tx_intsup != 0) { + vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I | + ISR_PTX2I | ISR_PTX3I); + writew(tx_intsup, ®s->ISRCTL); + } else + writew(ISRCTL_TSUPDIS, ®s->ISRCTL); + + /* Set Rx Interrupt Suppression Threshold */ + writeb(CAMCR_PS1, ®s->CAMCR); + if (rx_intsup != 0) { + vptr->int_mask &= ~ISR_PRXI; + writew(rx_intsup, ®s->ISRCTL); + } else + writew(ISRCTL_RSUPDIS, ®s->ISRCTL); + + /* Select page to interrupt hold timer */ + writeb(0, ®s->CAMCR); +} + +/** + * velocity_init_registers - initialise MAC registers + * @vptr: velocity to init + * @type: type of initialisation (hot or cold) + * + * Initialise the MAC on a reset or on first set up on the + * hardware. + */ +static void velocity_init_registers(struct velocity_info *vptr, + enum velocity_init_type type) +{ + struct mac_regs __iomem *regs = vptr->mac_regs; + int i, mii_status; + + mac_wol_reset(regs); + + switch (type) { + case VELOCITY_INIT_RESET: + case VELOCITY_INIT_WOL: + + netif_stop_queue(vptr->dev); + + /* + * Reset RX to prevent RX pointer not on the 4X location + */ + velocity_rx_reset(vptr); + mac_rx_queue_run(regs); + mac_rx_queue_wake(regs); + + mii_status = velocity_get_opt_media_mode(vptr); + if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { + velocity_print_link_status(vptr); + if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) + netif_wake_queue(vptr->dev); + } + + enable_flow_control_ability(vptr); + + mac_clear_isr(regs); + writel(CR0_STOP, ®s->CR0Clr); + writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), + ®s->CR0Set); + + break; + + case VELOCITY_INIT_COLD: + default: + /* + * Do reset + */ + velocity_soft_reset(vptr); + mdelay(5); + + mac_eeprom_reload(regs); + for (i = 0; i < 6; i++) + writeb(vptr->dev->dev_addr[i], &(regs->PAR[i])); + + /* + * clear Pre_ACPI bit. + */ + BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA)); + mac_set_rx_thresh(regs, vptr->options.rx_thresh); + mac_set_dma_length(regs, vptr->options.DMA_length); + + writeb(WOLCFG_SAM | WOLCFG_SAB, ®s->WOLCFGSet); + /* + * Back off algorithm use original IEEE standard + */ + BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), ®s->CFGB); + + /* + * Init CAM filter + */ + velocity_init_cam_filter(vptr); + + /* + * Set packet filter: Receive directed and broadcast address + */ + velocity_set_multi(vptr->dev); + + /* + * Enable MII auto-polling + */ + enable_mii_autopoll(regs); + + setup_adaptive_interrupts(vptr); + + writel(vptr->rx.pool_dma, ®s->RDBaseLo); + writew(vptr->options.numrx - 1, ®s->RDCSize); + mac_rx_queue_run(regs); + mac_rx_queue_wake(regs); + + writew(vptr->options.numtx - 1, ®s->TDCSize); + + for (i = 0; i < vptr->tx.numq; i++) { + writel(vptr->tx.pool_dma[i], ®s->TDBaseLo[i]); + mac_tx_queue_run(regs, i); + } + + init_flow_control_register(vptr); + + writel(CR0_STOP, ®s->CR0Clr); + writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), ®s->CR0Set); + + mii_status = velocity_get_opt_media_mode(vptr); + netif_stop_queue(vptr->dev); + + mii_init(vptr, mii_status); + + if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { + velocity_print_link_status(vptr); + if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) + netif_wake_queue(vptr->dev); + } + + enable_flow_control_ability(vptr); + mac_hw_mibs_init(regs); + mac_write_int_mask(vptr->int_mask, regs); + mac_clear_isr(regs); + + } +} + +static void velocity_give_many_rx_descs(struct velocity_info *vptr) +{ + struct mac_regs __iomem *regs = vptr->mac_regs; + int avail, dirty, unusable; + + /* + * RD number must be equal to 4X per hardware spec + * (programming guide rev 1.20, p.13) + */ + if (vptr->rx.filled < 4) + return; + + wmb(); + + unusable = vptr->rx.filled & 0x0003; + dirty = vptr->rx.dirty - unusable; + for (avail = vptr->rx.filled & 0xfffc; avail; avail--) { + dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1; + vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC; + } + + writew(vptr->rx.filled & 0xfffc, ®s->RBRDU); + vptr->rx.filled = unusable; +} + +/** + * velocity_init_dma_rings - set up DMA rings + * @vptr: Velocity to set up + * + * Allocate PCI mapped DMA rings for the receive and transmit layer + * to use. + */ +static int velocity_init_dma_rings(struct velocity_info *vptr) +{ + struct velocity_opt *opt = &vptr->options; + const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc); + const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc); + struct pci_dev *pdev = vptr->pdev; + dma_addr_t pool_dma; + void *pool; + unsigned int i; + + /* + * Allocate all RD/TD rings a single pool. + * + * pci_alloc_consistent() fulfills the requirement for 64 bytes + * alignment + */ + pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq + + rx_ring_size, &pool_dma); + if (!pool) { + dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n", + vptr->dev->name); + return -ENOMEM; + } + + vptr->rx.ring = pool; + vptr->rx.pool_dma = pool_dma; + + pool += rx_ring_size; + pool_dma += rx_ring_size; + + for (i = 0; i < vptr->tx.numq; i++) { + vptr->tx.rings[i] = pool; + vptr->tx.pool_dma[i] = pool_dma; + pool += tx_ring_size; + pool_dma += tx_ring_size; + } + + return 0; +} + +static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu) +{ + vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32; +} + +/** + * velocity_alloc_rx_buf - allocate aligned receive buffer + * @vptr: velocity + * @idx: ring index + * + * Allocate a new full sized buffer for the reception of a frame and + * map it into PCI space for the hardware to use. The hardware + * requires *64* byte alignment of the buffer which makes life + * less fun than would be ideal. + */ +static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) +{ + struct rx_desc *rd = &(vptr->rx.ring[idx]); + struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); + + rd_info->skb = dev_alloc_skb(vptr->rx.buf_sz + 64); + if (rd_info->skb == NULL) + return -ENOMEM; + + /* + * Do the gymnastics to get the buffer head for data at + * 64byte alignment. + */ + skb_reserve(rd_info->skb, + 64 - ((unsigned long) rd_info->skb->data & 63)); + rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, + vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); + + /* + * Fill in the descriptor to match + */ + + *((u32 *) & (rd->rdesc0)) = 0; + rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN; + rd->pa_low = cpu_to_le32(rd_info->skb_dma); + rd->pa_high = 0; + return 0; +} + + +static int velocity_rx_refill(struct velocity_info *vptr) +{ + int dirty = vptr->rx.dirty, done = 0; + + do { + struct rx_desc *rd = vptr->rx.ring + dirty; + + /* Fine for an all zero Rx desc at init time as well */ + if (rd->rdesc0.len & OWNED_BY_NIC) + break; + + if (!vptr->rx.info[dirty].skb) { + if (velocity_alloc_rx_buf(vptr, dirty) < 0) + break; + } + done++; + dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0; + } while (dirty != vptr->rx.curr); + + if (done) { + vptr->rx.dirty = dirty; + vptr->rx.filled += done; + } + + return done; +} + +/** + * velocity_free_rd_ring - free receive ring + * @vptr: velocity to clean up + * + * Free the receive buffers for each ring slot and any + * attached socket buffers that need to go away. + */ +static void velocity_free_rd_ring(struct velocity_info *vptr) +{ + int i; + + if (vptr->rx.info == NULL) + return; + + for (i = 0; i < vptr->options.numrx; i++) { + struct velocity_rd_info *rd_info = &(vptr->rx.info[i]); + struct rx_desc *rd = vptr->rx.ring + i; + + memset(rd, 0, sizeof(*rd)); + + if (!rd_info->skb) + continue; + pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz, + PCI_DMA_FROMDEVICE); + rd_info->skb_dma = 0; + + dev_kfree_skb(rd_info->skb); + rd_info->skb = NULL; + } + + kfree(vptr->rx.info); + vptr->rx.info = NULL; +} + +/** + * velocity_init_rd_ring - set up receive ring + * @vptr: velocity to configure + * + * Allocate and set up the receive buffers for each ring slot and + * assign them to the network adapter. + */ +static int velocity_init_rd_ring(struct velocity_info *vptr) +{ + int ret = -ENOMEM; + + vptr->rx.info = kcalloc(vptr->options.numrx, + sizeof(struct velocity_rd_info), GFP_KERNEL); + if (!vptr->rx.info) + goto out; + + velocity_init_rx_ring_indexes(vptr); + + if (velocity_rx_refill(vptr) != vptr->options.numrx) { + VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR + "%s: failed to allocate RX buffer.\n", vptr->dev->name); + velocity_free_rd_ring(vptr); + goto out; + } + + ret = 0; +out: + return ret; +} + +/** + * velocity_init_td_ring - set up transmit ring + * @vptr: velocity + * + * Set up the transmit ring and chain the ring pointers together. + * Returns zero on success or a negative posix errno code for + * failure. + */ +static int velocity_init_td_ring(struct velocity_info *vptr) +{ + int j; + + /* Init the TD ring entries */ + for (j = 0; j < vptr->tx.numq; j++) { + + vptr->tx.infos[j] = kcalloc(vptr->options.numtx, + sizeof(struct velocity_td_info), + GFP_KERNEL); + if (!vptr->tx.infos[j]) { + while (--j >= 0) + kfree(vptr->tx.infos[j]); + return -ENOMEM; + } + + vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0; + } + return 0; +} + +/** + * velocity_free_dma_rings - free PCI ring pointers + * @vptr: Velocity to free from + * + * Clean up the PCI ring buffers allocated to this velocity. + */ +static void velocity_free_dma_rings(struct velocity_info *vptr) +{ + const int size = vptr->options.numrx * sizeof(struct rx_desc) + + vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq; + + pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma); +} + +static int velocity_init_rings(struct velocity_info *vptr, int mtu) +{ + int ret; + + velocity_set_rxbufsize(vptr, mtu); + + ret = velocity_init_dma_rings(vptr); + if (ret < 0) + goto out; + + ret = velocity_init_rd_ring(vptr); + if (ret < 0) + goto err_free_dma_rings_0; + + ret = velocity_init_td_ring(vptr); + if (ret < 0) + goto err_free_rd_ring_1; +out: + return ret; + +err_free_rd_ring_1: + velocity_free_rd_ring(vptr); +err_free_dma_rings_0: + velocity_free_dma_rings(vptr); + goto out; +} + +/** + * velocity_free_tx_buf - free transmit buffer + * @vptr: velocity + * @tdinfo: buffer + * + * Release an transmit buffer. If the buffer was preallocated then + * recycle it, if not then unmap the buffer. + */ +static void velocity_free_tx_buf(struct velocity_info *vptr, + struct velocity_td_info *tdinfo, struct tx_desc *td) +{ + struct sk_buff *skb = tdinfo->skb; + + /* + * Don't unmap the pre-allocated tx_bufs + */ + if (tdinfo->skb_dma) { + int i; + + for (i = 0; i < tdinfo->nskb_dma; i++) { + size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN); + + /* For scatter-gather */ + if (skb_shinfo(skb)->nr_frags > 0) + pktlen = max_t(size_t, pktlen, + td->td_buf[i].size & ~TD_QUEUE); + + pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], + le16_to_cpu(pktlen), PCI_DMA_TODEVICE); + } + } + dev_kfree_skb_irq(skb); + tdinfo->skb = NULL; +} + +/* + * FIXME: could we merge this with velocity_free_tx_buf ? + */ +static void velocity_free_td_ring_entry(struct velocity_info *vptr, + int q, int n) +{ + struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]); + int i; + + if (td_info == NULL) + return; + + if (td_info->skb) { + for (i = 0; i < td_info->nskb_dma; i++) { + if (td_info->skb_dma[i]) { + pci_unmap_single(vptr->pdev, td_info->skb_dma[i], + td_info->skb->len, PCI_DMA_TODEVICE); + td_info->skb_dma[i] = 0; + } + } + dev_kfree_skb(td_info->skb); + td_info->skb = NULL; + } +} + +/** + * velocity_free_td_ring - free td ring + * @vptr: velocity + * + * Free up the transmit ring for this particular velocity adapter. + * We free the ring contents but not the ring itself. + */ +static void velocity_free_td_ring(struct velocity_info *vptr) +{ + int i, j; + + for (j = 0; j < vptr->tx.numq; j++) { + if (vptr->tx.infos[j] == NULL) + continue; + for (i = 0; i < vptr->options.numtx; i++) + velocity_free_td_ring_entry(vptr, j, i); + + kfree(vptr->tx.infos[j]); + vptr->tx.infos[j] = NULL; + } +} + +static void velocity_free_rings(struct velocity_info *vptr) +{ + velocity_free_td_ring(vptr); + velocity_free_rd_ring(vptr); + velocity_free_dma_rings(vptr); +} + +/** + * velocity_error - handle error from controller + * @vptr: velocity + * @status: card status + * + * Process an error report from the hardware and attempt to recover + * the card itself. At the moment we cannot recover from some + * theoretically impossible errors but this could be fixed using + * the pci_device_failed logic to bounce the hardware + * + */ +static void velocity_error(struct velocity_info *vptr, int status) +{ + + if (status & ISR_TXSTLI) { + struct mac_regs __iomem *regs = vptr->mac_regs; + + printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(®s->TDIdx[0])); + BYTE_REG_BITS_ON(TXESR_TDSTR, ®s->TXESR); + writew(TRDCSR_RUN, ®s->TDCSRClr); + netif_stop_queue(vptr->dev); + + /* FIXME: port over the pci_device_failed code and use it + here */ + } + + if (status & ISR_SRCI) { + struct mac_regs __iomem *regs = vptr->mac_regs; + int linked; + + if (vptr->options.spd_dpx == SPD_DPX_AUTO) { + vptr->mii_status = check_connection_type(regs); + + /* + * If it is a 3119, disable frame bursting in + * halfduplex mode and enable it in fullduplex + * mode + */ + if (vptr->rev_id < REV_ID_VT3216_A0) { + if (vptr->mii_status & VELOCITY_DUPLEX_FULL) + BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR); + else + BYTE_REG_BITS_OFF(TCR_TB2BDIS, ®s->TCR); + } + /* + * Only enable CD heart beat counter in 10HD mode + */ + if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10)) + BYTE_REG_BITS_OFF(TESTCFG_HBDIS, ®s->TESTCFG); + else + BYTE_REG_BITS_ON(TESTCFG_HBDIS, ®s->TESTCFG); + + setup_queue_timers(vptr); + } + /* + * Get link status from PHYSR0 + */ + linked = readb(®s->PHYSR0) & PHYSR0_LINKGD; + + if (linked) { + vptr->mii_status &= ~VELOCITY_LINK_FAIL; + netif_carrier_on(vptr->dev); + } else { + vptr->mii_status |= VELOCITY_LINK_FAIL; + netif_carrier_off(vptr->dev); + } + + velocity_print_link_status(vptr); + enable_flow_control_ability(vptr); + + /* + * Re-enable auto-polling because SRCI will disable + * auto-polling + */ + + enable_mii_autopoll(regs); + + if (vptr->mii_status & VELOCITY_LINK_FAIL) + netif_stop_queue(vptr->dev); + else + netif_wake_queue(vptr->dev); + + } + if (status & ISR_MIBFI) + velocity_update_hw_mibs(vptr); + if (status & ISR_LSTEI) + mac_rx_queue_wake(vptr->mac_regs); +} + +/** + * tx_srv - transmit interrupt service + * @vptr; Velocity + * + * Scan the queues looking for transmitted packets that + * we can complete and clean up. Update any statistics as + * necessary/ + */ +static int velocity_tx_srv(struct velocity_info *vptr) +{ + struct tx_desc *td; + int qnum; + int full = 0; + int idx; + int works = 0; + struct velocity_td_info *tdinfo; + struct net_device_stats *stats = &vptr->dev->stats; + + for (qnum = 0; qnum < vptr->tx.numq; qnum++) { + for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0; + idx = (idx + 1) % vptr->options.numtx) { + + /* + * Get Tx Descriptor + */ + td = &(vptr->tx.rings[qnum][idx]); + tdinfo = &(vptr->tx.infos[qnum][idx]); + + if (td->tdesc0.len & OWNED_BY_NIC) + break; + + if ((works++ > 15)) + break; + + if (td->tdesc0.TSR & TSR0_TERR) { + stats->tx_errors++; + stats->tx_dropped++; + if (td->tdesc0.TSR & TSR0_CDH) + stats->tx_heartbeat_errors++; + if (td->tdesc0.TSR & TSR0_CRS) + stats->tx_carrier_errors++; + if (td->tdesc0.TSR & TSR0_ABT) + stats->tx_aborted_errors++; + if (td->tdesc0.TSR & TSR0_OWC) + stats->tx_window_errors++; + } else { + stats->tx_packets++; + stats->tx_bytes += tdinfo->skb->len; + } + velocity_free_tx_buf(vptr, tdinfo, td); + vptr->tx.used[qnum]--; + } + vptr->tx.tail[qnum] = idx; + + if (AVAIL_TD(vptr, qnum) < 1) + full = 1; + } + /* + * Look to see if we should kick the transmit network + * layer for more work. + */ + if (netif_queue_stopped(vptr->dev) && (full == 0) && + (!(vptr->mii_status & VELOCITY_LINK_FAIL))) { + netif_wake_queue(vptr->dev); + } + return works; +} + +/** + * velocity_rx_csum - checksum process + * @rd: receive packet descriptor + * @skb: network layer packet buffer + * + * Process the status bits for the received packet and determine + * if the checksum was computed and verified by the hardware + */ +static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb) +{ + skb_checksum_none_assert(skb); + + if (rd->rdesc1.CSM & CSM_IPKT) { + if (rd->rdesc1.CSM & CSM_IPOK) { + if ((rd->rdesc1.CSM & CSM_TCPKT) || + (rd->rdesc1.CSM & CSM_UDPKT)) { + if (!(rd->rdesc1.CSM & CSM_TUPOK)) + return; + } + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + } +} + +/** + * velocity_rx_copy - in place Rx copy for small packets + * @rx_skb: network layer packet buffer candidate + * @pkt_size: received data size + * @rd: receive packet descriptor + * @dev: network device + * + * Replace the current skb that is scheduled for Rx processing by a + * shorter, immediately allocated skb, if the received packet is small + * enough. This function returns a negative value if the received + * packet is too big or if memory is exhausted. + */ +static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size, + struct velocity_info *vptr) +{ + int ret = -1; + if (pkt_size < rx_copybreak) { + struct sk_buff *new_skb; + + new_skb = netdev_alloc_skb_ip_align(vptr->dev, pkt_size); + if (new_skb) { + new_skb->ip_summed = rx_skb[0]->ip_summed; + skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size); + *rx_skb = new_skb; + ret = 0; + } + + } + return ret; +} + +/** + * velocity_iph_realign - IP header alignment + * @vptr: velocity we are handling + * @skb: network layer packet buffer + * @pkt_size: received data size + * + * Align IP header on a 2 bytes boundary. This behavior can be + * configured by the user. + */ +static inline void velocity_iph_realign(struct velocity_info *vptr, + struct sk_buff *skb, int pkt_size) +{ + if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) { + memmove(skb->data + 2, skb->data, pkt_size); + skb_reserve(skb, 2); + } +} + +/** + * velocity_receive_frame - received packet processor + * @vptr: velocity we are handling + * @idx: ring index + * + * A packet has arrived. We process the packet and if appropriate + * pass the frame up the network stack + */ +static int velocity_receive_frame(struct velocity_info *vptr, int idx) +{ + void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int); + struct net_device_stats *stats = &vptr->dev->stats; + struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); + struct rx_desc *rd = &(vptr->rx.ring[idx]); + int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff; + struct sk_buff *skb; + + if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) { + VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name); + stats->rx_length_errors++; + return -EINVAL; + } + + if (rd->rdesc0.RSR & RSR_MAR) + stats->multicast++; + + skb = rd_info->skb; + + pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma, + vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); + + /* + * Drop frame not meeting IEEE 802.3 + */ + + if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) { + if (rd->rdesc0.RSR & RSR_RL) { + stats->rx_length_errors++; + return -EINVAL; + } + } + + pci_action = pci_dma_sync_single_for_device; + + velocity_rx_csum(rd, skb); + + if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) { + velocity_iph_realign(vptr, skb, pkt_len); + pci_action = pci_unmap_single; + rd_info->skb = NULL; + } + + pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz, + PCI_DMA_FROMDEVICE); + + skb_put(skb, pkt_len - 4); + skb->protocol = eth_type_trans(skb, vptr->dev); + + if (rd->rdesc0.RSR & RSR_DETAG) { + u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG)); + + __vlan_hwaccel_put_tag(skb, vid); + } + netif_rx(skb); + + stats->rx_bytes += pkt_len; + stats->rx_packets++; + + return 0; +} + +/** + * velocity_rx_srv - service RX interrupt + * @vptr: velocity + * + * Walk the receive ring of the velocity adapter and remove + * any received packets from the receive queue. Hand the ring + * slots back to the adapter for reuse. + */ +static int velocity_rx_srv(struct velocity_info *vptr, int budget_left) +{ + struct net_device_stats *stats = &vptr->dev->stats; + int rd_curr = vptr->rx.curr; + int works = 0; + + while (works < budget_left) { + struct rx_desc *rd = vptr->rx.ring + rd_curr; + + if (!vptr->rx.info[rd_curr].skb) + break; + + if (rd->rdesc0.len & OWNED_BY_NIC) + break; + + rmb(); + + /* + * Don't drop CE or RL error frame although RXOK is off + */ + if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) { + if (velocity_receive_frame(vptr, rd_curr) < 0) + stats->rx_dropped++; + } else { + if (rd->rdesc0.RSR & RSR_CRC) + stats->rx_crc_errors++; + if (rd->rdesc0.RSR & RSR_FAE) + stats->rx_frame_errors++; + + stats->rx_dropped++; + } + + rd->size |= RX_INTEN; + + rd_curr++; + if (rd_curr >= vptr->options.numrx) + rd_curr = 0; + works++; + } + + vptr->rx.curr = rd_curr; + + if ((works > 0) && (velocity_rx_refill(vptr) > 0)) + velocity_give_many_rx_descs(vptr); + + VAR_USED(stats); + return works; +} + +static int velocity_poll(struct napi_struct *napi, int budget) +{ + struct velocity_info *vptr = container_of(napi, + struct velocity_info, napi); + unsigned int rx_done; + unsigned long flags; + + spin_lock_irqsave(&vptr->lock, flags); + /* + * Do rx and tx twice for performance (taken from the VIA + * out-of-tree driver). + */ + rx_done = velocity_rx_srv(vptr, budget / 2); + velocity_tx_srv(vptr); + rx_done += velocity_rx_srv(vptr, budget - rx_done); + velocity_tx_srv(vptr); + + /* If budget not fully consumed, exit the polling mode */ + if (rx_done < budget) { + napi_complete(napi); + mac_enable_int(vptr->mac_regs); + } + spin_unlock_irqrestore(&vptr->lock, flags); + + return rx_done; +} + +/** + * velocity_intr - interrupt callback + * @irq: interrupt number + * @dev_instance: interrupting device + * + * Called whenever an interrupt is generated by the velocity + * adapter IRQ line. We may not be the source of the interrupt + * and need to identify initially if we are, and if not exit as + * efficiently as possible. + */ +static irqreturn_t velocity_intr(int irq, void *dev_instance) +{ + struct net_device *dev = dev_instance; + struct velocity_info *vptr = netdev_priv(dev); + u32 isr_status; + + spin_lock(&vptr->lock); + isr_status = mac_read_isr(vptr->mac_regs); + + /* Not us ? */ + if (isr_status == 0) { + spin_unlock(&vptr->lock); + return IRQ_NONE; + } + + /* Ack the interrupt */ + mac_write_isr(vptr->mac_regs, isr_status); + + if (likely(napi_schedule_prep(&vptr->napi))) { + mac_disable_int(vptr->mac_regs); + __napi_schedule(&vptr->napi); + } + + if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI))) + velocity_error(vptr, isr_status); + + spin_unlock(&vptr->lock); + + return IRQ_HANDLED; +} + +/** + * velocity_open - interface activation callback + * @dev: network layer device to open + * + * Called when the network layer brings the interface up. Returns + * a negative posix error code on failure, or zero on success. + * + * All the ring allocation and set up is done on open for this + * adapter to minimise memory usage when inactive + */ +static int velocity_open(struct net_device *dev) +{ + struct velocity_info *vptr = netdev_priv(dev); + int ret; + + ret = velocity_init_rings(vptr, dev->mtu); + if (ret < 0) + goto out; + + /* Ensure chip is running */ + pci_set_power_state(vptr->pdev, PCI_D0); + + velocity_init_registers(vptr, VELOCITY_INIT_COLD); + + ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED, + dev->name, dev); + if (ret < 0) { + /* Power down the chip */ + pci_set_power_state(vptr->pdev, PCI_D3hot); + velocity_free_rings(vptr); + goto out; + } + + velocity_give_many_rx_descs(vptr); + + mac_enable_int(vptr->mac_regs); + netif_start_queue(dev); + napi_enable(&vptr->napi); + vptr->flags |= VELOCITY_FLAGS_OPENED; +out: + return ret; +} + +/** + * velocity_shutdown - shut down the chip + * @vptr: velocity to deactivate + * + * Shuts down the internal operations of the velocity and + * disables interrupts, autopolling, transmit and receive + */ +static void velocity_shutdown(struct velocity_info *vptr) +{ + struct mac_regs __iomem *regs = vptr->mac_regs; + mac_disable_int(regs); + writel(CR0_STOP, ®s->CR0Set); + writew(0xFFFF, ®s->TDCSRClr); + writeb(0xFF, ®s->RDCSRClr); + safe_disable_mii_autopoll(regs); + mac_clear_isr(regs); +} + +/** + * velocity_change_mtu - MTU change callback + * @dev: network device + * @new_mtu: desired MTU + * + * Handle requests from the networking layer for MTU change on + * this interface. It gets called on a change by the network layer. + * Return zero for success or negative posix error code. + */ +static int velocity_change_mtu(struct net_device *dev, int new_mtu) +{ + struct velocity_info *vptr = netdev_priv(dev); + int ret = 0; + + if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) { + VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n", + vptr->dev->name); + ret = -EINVAL; + goto out_0; + } + + if (!netif_running(dev)) { + dev->mtu = new_mtu; + goto out_0; + } + + if (dev->mtu != new_mtu) { + struct velocity_info *tmp_vptr; + unsigned long flags; + struct rx_info rx; + struct tx_info tx; + + tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL); + if (!tmp_vptr) { + ret = -ENOMEM; + goto out_0; + } + + tmp_vptr->dev = dev; + tmp_vptr->pdev = vptr->pdev; + tmp_vptr->options = vptr->options; + tmp_vptr->tx.numq = vptr->tx.numq; + + ret = velocity_init_rings(tmp_vptr, new_mtu); + if (ret < 0) + goto out_free_tmp_vptr_1; + + spin_lock_irqsave(&vptr->lock, flags); + + netif_stop_queue(dev); + velocity_shutdown(vptr); + + rx = vptr->rx; + tx = vptr->tx; + + vptr->rx = tmp_vptr->rx; + vptr->tx = tmp_vptr->tx; + + tmp_vptr->rx = rx; + tmp_vptr->tx = tx; + + dev->mtu = new_mtu; + + velocity_init_registers(vptr, VELOCITY_INIT_COLD); + + velocity_give_many_rx_descs(vptr); + + mac_enable_int(vptr->mac_regs); + netif_start_queue(dev); + + spin_unlock_irqrestore(&vptr->lock, flags); + + velocity_free_rings(tmp_vptr); + +out_free_tmp_vptr_1: + kfree(tmp_vptr); + } +out_0: + return ret; +} + +/** + * velocity_mii_ioctl - MII ioctl handler + * @dev: network device + * @ifr: the ifreq block for the ioctl + * @cmd: the command + * + * Process MII requests made via ioctl from the network layer. These + * are used by tools like kudzu to interrogate the link state of the + * hardware + */ +static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct velocity_info *vptr = netdev_priv(dev); + struct mac_regs __iomem *regs = vptr->mac_regs; + unsigned long flags; + struct mii_ioctl_data *miidata = if_mii(ifr); + int err; + + switch (cmd) { + case SIOCGMIIPHY: + miidata->phy_id = readb(®s->MIIADR) & 0x1f; + break; + case SIOCGMIIREG: + if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0) + return -ETIMEDOUT; + break; + case SIOCSMIIREG: + spin_lock_irqsave(&vptr->lock, flags); + err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in); + spin_unlock_irqrestore(&vptr->lock, flags); + check_connection_type(vptr->mac_regs); + if (err) + return err; + break; + default: + return -EOPNOTSUPP; + } + return 0; +} + +/** + * velocity_ioctl - ioctl entry point + * @dev: network device + * @rq: interface request ioctl + * @cmd: command code + * + * Called when the user issues an ioctl request to the network + * device in question. The velocity interface supports MII. + */ +static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct velocity_info *vptr = netdev_priv(dev); + int ret; + + /* If we are asked for information and the device is power + saving then we need to bring the device back up to talk to it */ + + if (!netif_running(dev)) + pci_set_power_state(vptr->pdev, PCI_D0); + + switch (cmd) { + case SIOCGMIIPHY: /* Get address of MII PHY in use. */ + case SIOCGMIIREG: /* Read MII PHY register. */ + case SIOCSMIIREG: /* Write to MII PHY register. */ + ret = velocity_mii_ioctl(dev, rq, cmd); + break; + + default: + ret = -EOPNOTSUPP; + } + if (!netif_running(dev)) + pci_set_power_state(vptr->pdev, PCI_D3hot); + + + return ret; +} + +/** + * velocity_get_status - statistics callback + * @dev: network device + * + * Callback from the network layer to allow driver statistics + * to be resynchronized with hardware collected state. In the + * case of the velocity we need to pull the MIB counters from + * the hardware into the counters before letting the network + * layer display them. + */ +static struct net_device_stats *velocity_get_stats(struct net_device *dev) +{ + struct velocity_info *vptr = netdev_priv(dev); + + /* If the hardware is down, don't touch MII */ + if (!netif_running(dev)) + return &dev->stats; + + spin_lock_irq(&vptr->lock); + velocity_update_hw_mibs(vptr); + spin_unlock_irq(&vptr->lock); + + dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts]; + dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts]; + dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors]; + +// unsigned long rx_dropped; /* no space in linux buffers */ + dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions]; + /* detailed rx_errors: */ +// unsigned long rx_length_errors; +// unsigned long rx_over_errors; /* receiver ring buff overflow */ + dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE]; +// unsigned long rx_frame_errors; /* recv'd frame alignment error */ +// unsigned long rx_fifo_errors; /* recv'r fifo overrun */ +// unsigned long rx_missed_errors; /* receiver missed packet */ + + /* detailed tx_errors */ +// unsigned long tx_fifo_errors; + + return &dev->stats; +} + +/** + * velocity_close - close adapter callback + * @dev: network device + * + * Callback from the network layer when the velocity is being + * deactivated by the network layer + */ +static int velocity_close(struct net_device *dev) +{ + struct velocity_info *vptr = netdev_priv(dev); + + napi_disable(&vptr->napi); + netif_stop_queue(dev); + velocity_shutdown(vptr); + + if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) + velocity_get_ip(vptr); + if (dev->irq != 0) + free_irq(dev->irq, dev); + + /* Power down the chip */ + pci_set_power_state(vptr->pdev, PCI_D3hot); + + velocity_free_rings(vptr); + + vptr->flags &= (~VELOCITY_FLAGS_OPENED); + return 0; +} + +/** + * velocity_xmit - transmit packet callback + * @skb: buffer to transmit + * @dev: network device + * + * Called by the networ layer to request a packet is queued to + * the velocity. Returns zero on success. + */ +static netdev_tx_t velocity_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct velocity_info *vptr = netdev_priv(dev); + int qnum = 0; + struct tx_desc *td_ptr; + struct velocity_td_info *tdinfo; + unsigned long flags; + int pktlen; + int index, prev; + int i = 0; + + if (skb_padto(skb, ETH_ZLEN)) + goto out; + + /* The hardware can handle at most 7 memory segments, so merge + * the skb if there are more */ + if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) { + kfree_skb(skb); + return NETDEV_TX_OK; + } + + pktlen = skb_shinfo(skb)->nr_frags == 0 ? + max_t(unsigned int, skb->len, ETH_ZLEN) : + skb_headlen(skb); + + spin_lock_irqsave(&vptr->lock, flags); + + index = vptr->tx.curr[qnum]; + td_ptr = &(vptr->tx.rings[qnum][index]); + tdinfo = &(vptr->tx.infos[qnum][index]); + + td_ptr->tdesc1.TCR = TCR0_TIC; + td_ptr->td_buf[0].size &= ~TD_QUEUE; + + /* + * Map the linear network buffer into PCI space and + * add it to the transmit ring. + */ + tdinfo->skb = skb; + tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE); + td_ptr->tdesc0.len = cpu_to_le16(pktlen); + td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); + td_ptr->td_buf[0].pa_high = 0; + td_ptr->td_buf[0].size = cpu_to_le16(pktlen); + + /* Handle fragments */ + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + tdinfo->skb_dma[i + 1] = pci_map_page(vptr->pdev, frag->page, + frag->page_offset, frag->size, + PCI_DMA_TODEVICE); + + td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]); + td_ptr->td_buf[i + 1].pa_high = 0; + td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size); + } + tdinfo->nskb_dma = i + 1; + + td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16; + + if (vlan_tx_tag_present(skb)) { + td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb)); + td_ptr->tdesc1.TCR |= TCR0_VETAG; + } + + /* + * Handle hardware checksum + */ + if (skb->ip_summed == CHECKSUM_PARTIAL) { + const struct iphdr *ip = ip_hdr(skb); + if (ip->protocol == IPPROTO_TCP) + td_ptr->tdesc1.TCR |= TCR0_TCPCK; + else if (ip->protocol == IPPROTO_UDP) + td_ptr->tdesc1.TCR |= (TCR0_UDPCK); + td_ptr->tdesc1.TCR |= TCR0_IPCK; + } + + prev = index - 1; + if (prev < 0) + prev = vptr->options.numtx - 1; + td_ptr->tdesc0.len |= OWNED_BY_NIC; + vptr->tx.used[qnum]++; + vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx; + + if (AVAIL_TD(vptr, qnum) < 1) + netif_stop_queue(dev); + + td_ptr = &(vptr->tx.rings[qnum][prev]); + td_ptr->td_buf[0].size |= TD_QUEUE; + mac_tx_queue_wake(vptr->mac_regs, qnum); + + spin_unlock_irqrestore(&vptr->lock, flags); +out: + return NETDEV_TX_OK; +} + +static const struct net_device_ops velocity_netdev_ops = { + .ndo_open = velocity_open, + .ndo_stop = velocity_close, + .ndo_start_xmit = velocity_xmit, + .ndo_get_stats = velocity_get_stats, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_set_rx_mode = velocity_set_multi, + .ndo_change_mtu = velocity_change_mtu, + .ndo_do_ioctl = velocity_ioctl, + .ndo_vlan_rx_add_vid = velocity_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = velocity_vlan_rx_kill_vid, +}; + +/** + * velocity_init_info - init private data + * @pdev: PCI device + * @vptr: Velocity info + * @info: Board type + * + * Set up the initial velocity_info struct for the device that has been + * discovered. + */ +static void __devinit velocity_init_info(struct pci_dev *pdev, + struct velocity_info *vptr, + const struct velocity_info_tbl *info) +{ + memset(vptr, 0, sizeof(struct velocity_info)); + + vptr->pdev = pdev; + vptr->chip_id = info->chip_id; + vptr->tx.numq = info->txqueue; + vptr->multicast_limit = MCAM_SIZE; + spin_lock_init(&vptr->lock); +} + +/** + * velocity_get_pci_info - retrieve PCI info for device + * @vptr: velocity device + * @pdev: PCI device it matches + * + * Retrieve the PCI configuration space data that interests us from + * the kernel PCI layer + */ +static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev) +{ + vptr->rev_id = pdev->revision; + + pci_set_master(pdev); + + vptr->ioaddr = pci_resource_start(pdev, 0); + vptr->memaddr = pci_resource_start(pdev, 1); + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) { + dev_err(&pdev->dev, + "region #0 is not an I/O resource, aborting.\n"); + return -EINVAL; + } + + if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) { + dev_err(&pdev->dev, + "region #1 is an I/O resource, aborting.\n"); + return -EINVAL; + } + + if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) { + dev_err(&pdev->dev, "region #1 is too small.\n"); + return -EINVAL; + } + vptr->pdev = pdev; + + return 0; +} + +/** + * velocity_print_info - per driver data + * @vptr: velocity + * + * Print per driver data as the kernel driver finds Velocity + * hardware + */ +static void __devinit velocity_print_info(struct velocity_info *vptr) +{ + struct net_device *dev = vptr->dev; + + printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id)); + printk(KERN_INFO "%s: Ethernet Address: %pM\n", + dev->name, dev->dev_addr); +} + +static u32 velocity_get_link(struct net_device *dev) +{ + struct velocity_info *vptr = netdev_priv(dev); + struct mac_regs __iomem *regs = vptr->mac_regs; + return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, ®s->PHYSR0) ? 1 : 0; +} + +/** + * velocity_found1 - set up discovered velocity card + * @pdev: PCI device + * @ent: PCI device table entry that matched + * + * Configure a discovered adapter from scratch. Return a negative + * errno error code on failure paths. + */ +static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static int first = 1; + struct net_device *dev; + int i; + const char *drv_string; + const struct velocity_info_tbl *info = &chip_info_table[ent->driver_data]; + struct velocity_info *vptr; + struct mac_regs __iomem *regs; + int ret = -ENOMEM; + + /* FIXME: this driver, like almost all other ethernet drivers, + * can support more than MAX_UNITS. + */ + if (velocity_nics >= MAX_UNITS) { + dev_notice(&pdev->dev, "already found %d NICs.\n", + velocity_nics); + return -ENODEV; + } + + dev = alloc_etherdev(sizeof(struct velocity_info)); + if (!dev) { + dev_err(&pdev->dev, "allocate net device failed.\n"); + goto out; + } + + /* Chain it all together */ + + SET_NETDEV_DEV(dev, &pdev->dev); + vptr = netdev_priv(dev); + + + if (first) { + printk(KERN_INFO "%s Ver. %s\n", + VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION); + printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n"); + printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n"); + first = 0; + } + + velocity_init_info(pdev, vptr, info); + + vptr->dev = dev; + + ret = pci_enable_device(pdev); + if (ret < 0) + goto err_free_dev; + + dev->irq = pdev->irq; + + ret = velocity_get_pci_info(vptr, pdev); + if (ret < 0) { + /* error message already printed */ + goto err_disable; + } + + ret = pci_request_regions(pdev, VELOCITY_NAME); + if (ret < 0) { + dev_err(&pdev->dev, "No PCI resources.\n"); + goto err_disable; + } + + regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE); + if (regs == NULL) { + ret = -EIO; + goto err_release_res; + } + + vptr->mac_regs = regs; + + mac_wol_reset(regs); + + dev->base_addr = vptr->ioaddr; + + for (i = 0; i < 6; i++) + dev->dev_addr[i] = readb(®s->PAR[i]); + + + drv_string = dev_driver_string(&pdev->dev); + + velocity_get_options(&vptr->options, velocity_nics, drv_string); + + /* + * Mask out the options cannot be set to the chip + */ + + vptr->options.flags &= info->flags; + + /* + * Enable the chip specified capbilities + */ + + vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL); + + vptr->wol_opts = vptr->options.wol_opts; + vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; + + vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs); + + dev->irq = pdev->irq; + dev->netdev_ops = &velocity_netdev_ops; + dev->ethtool_ops = &velocity_ethtool_ops; + netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT); + + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HW_VLAN_TX; + dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | + NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM; + + ret = register_netdev(dev); + if (ret < 0) + goto err_iounmap; + + if (!velocity_get_link(dev)) { + netif_carrier_off(dev); + vptr->mii_status |= VELOCITY_LINK_FAIL; + } + + velocity_print_info(vptr); + pci_set_drvdata(pdev, dev); + + /* and leave the chip powered down */ + + pci_set_power_state(pdev, PCI_D3hot); + velocity_nics++; +out: + return ret; + +err_iounmap: + iounmap(regs); +err_release_res: + pci_release_regions(pdev); +err_disable: + pci_disable_device(pdev); +err_free_dev: + free_netdev(dev); + goto out; +} + +#ifdef CONFIG_PM +/** + * wol_calc_crc - WOL CRC + * @pattern: data pattern + * @mask_pattern: mask + * + * Compute the wake on lan crc hashes for the packet header + * we are interested in. + */ +static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern) +{ + u16 crc = 0xFFFF; + u8 mask; + int i, j; + + for (i = 0; i < size; i++) { + mask = mask_pattern[i]; + + /* Skip this loop if the mask equals to zero */ + if (mask == 0x00) + continue; + + for (j = 0; j < 8; j++) { + if ((mask & 0x01) == 0) { + mask >>= 1; + continue; + } + mask >>= 1; + crc = crc_ccitt(crc, &(pattern[i * 8 + j]), 1); + } + } + /* Finally, invert the result once to get the correct data */ + crc = ~crc; + return bitrev32(crc) >> 16; +} + +/** + * velocity_set_wol - set up for wake on lan + * @vptr: velocity to set WOL status on + * + * Set a card up for wake on lan either by unicast or by + * ARP packet. + * + * FIXME: check static buffer is safe here + */ +static int velocity_set_wol(struct velocity_info *vptr) +{ + struct mac_regs __iomem *regs = vptr->mac_regs; + enum speed_opt spd_dpx = vptr->options.spd_dpx; + static u8 buf[256]; + int i; + + static u32 mask_pattern[2][4] = { + {0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */ + {0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff} /* Magic Packet */ + }; + + writew(0xFFFF, ®s->WOLCRClr); + writeb(WOLCFG_SAB | WOLCFG_SAM, ®s->WOLCFGSet); + writew(WOLCR_MAGIC_EN, ®s->WOLCRSet); + + /* + if (vptr->wol_opts & VELOCITY_WOL_PHY) + writew((WOLCR_LINKON_EN|WOLCR_LINKOFF_EN), ®s->WOLCRSet); + */ + + if (vptr->wol_opts & VELOCITY_WOL_UCAST) + writew(WOLCR_UNICAST_EN, ®s->WOLCRSet); + + if (vptr->wol_opts & VELOCITY_WOL_ARP) { + struct arp_packet *arp = (struct arp_packet *) buf; + u16 crc; + memset(buf, 0, sizeof(struct arp_packet) + 7); + + for (i = 0; i < 4; i++) + writel(mask_pattern[0][i], ®s->ByteMask[0][i]); + + arp->type = htons(ETH_P_ARP); + arp->ar_op = htons(1); + + memcpy(arp->ar_tip, vptr->ip_addr, 4); + + crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf, + (u8 *) & mask_pattern[0][0]); + + writew(crc, ®s->PatternCRC[0]); + writew(WOLCR_ARP_EN, ®s->WOLCRSet); + } + + BYTE_REG_BITS_ON(PWCFG_WOLTYPE, ®s->PWCFGSet); + BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, ®s->PWCFGSet); + + writew(0x0FFF, ®s->WOLSRClr); + + if (spd_dpx == SPD_DPX_1000_FULL) + goto mac_done; + + if (spd_dpx != SPD_DPX_AUTO) + goto advertise_done; + + if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) { + if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) + MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs); + + MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs); + } + + if (vptr->mii_status & VELOCITY_SPEED_1000) + MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs); + +advertise_done: + BYTE_REG_BITS_ON(CHIPGCR_FCMODE, ®s->CHIPGCR); + + { + u8 GCR; + GCR = readb(®s->CHIPGCR); + GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX; + writeb(GCR, ®s->CHIPGCR); + } + +mac_done: + BYTE_REG_BITS_OFF(ISR_PWEI, ®s->ISR); + /* Turn on SWPTAG just before entering power mode */ + BYTE_REG_BITS_ON(STICKHW_SWPTAG, ®s->STICKHW); + /* Go to bed ..... */ + BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), ®s->STICKHW); + + return 0; +} + +/** + * velocity_save_context - save registers + * @vptr: velocity + * @context: buffer for stored context + * + * Retrieve the current configuration from the velocity hardware + * and stash it in the context structure, for use by the context + * restore functions. This allows us to save things we need across + * power down states + */ +static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context) +{ + struct mac_regs __iomem *regs = vptr->mac_regs; + u16 i; + u8 __iomem *ptr = (u8 __iomem *)regs; + + for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4) + *((u32 *) (context->mac_reg + i)) = readl(ptr + i); + + for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4) + *((u32 *) (context->mac_reg + i)) = readl(ptr + i); + + for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4) + *((u32 *) (context->mac_reg + i)) = readl(ptr + i); + +} + +static int velocity_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct velocity_info *vptr = netdev_priv(dev); + unsigned long flags; + + if (!netif_running(vptr->dev)) + return 0; + + netif_device_detach(vptr->dev); + + spin_lock_irqsave(&vptr->lock, flags); + pci_save_state(pdev); + + if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) { + velocity_get_ip(vptr); + velocity_save_context(vptr, &vptr->context); + velocity_shutdown(vptr); + velocity_set_wol(vptr); + pci_enable_wake(pdev, PCI_D3hot, 1); + pci_set_power_state(pdev, PCI_D3hot); + } else { + velocity_save_context(vptr, &vptr->context); + velocity_shutdown(vptr); + pci_disable_device(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + } + + spin_unlock_irqrestore(&vptr->lock, flags); + return 0; +} + +/** + * velocity_restore_context - restore registers + * @vptr: velocity + * @context: buffer for stored context + * + * Reload the register configuration from the velocity context + * created by velocity_save_context. + */ +static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context) +{ + struct mac_regs __iomem *regs = vptr->mac_regs; + int i; + u8 __iomem *ptr = (u8 __iomem *)regs; + + for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4) + writel(*((u32 *) (context->mac_reg + i)), ptr + i); + + /* Just skip cr0 */ + for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) { + /* Clear */ + writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4); + /* Set */ + writeb(*((u8 *) (context->mac_reg + i)), ptr + i); + } + + for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4) + writel(*((u32 *) (context->mac_reg + i)), ptr + i); + + for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4) + writel(*((u32 *) (context->mac_reg + i)), ptr + i); + + for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++) + writeb(*((u8 *) (context->mac_reg + i)), ptr + i); +} + +static int velocity_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct velocity_info *vptr = netdev_priv(dev); + unsigned long flags; + int i; + + if (!netif_running(vptr->dev)) + return 0; + + pci_set_power_state(pdev, PCI_D0); + pci_enable_wake(pdev, 0, 0); + pci_restore_state(pdev); + + mac_wol_reset(vptr->mac_regs); + + spin_lock_irqsave(&vptr->lock, flags); + velocity_restore_context(vptr, &vptr->context); + velocity_init_registers(vptr, VELOCITY_INIT_WOL); + mac_disable_int(vptr->mac_regs); + + velocity_tx_srv(vptr); + + for (i = 0; i < vptr->tx.numq; i++) { + if (vptr->tx.used[i]) + mac_tx_queue_wake(vptr->mac_regs, i); + } + + mac_enable_int(vptr->mac_regs); + spin_unlock_irqrestore(&vptr->lock, flags); + netif_device_attach(vptr->dev); + + return 0; +} +#endif + +/* + * Definition for our device driver. The PCI layer interface + * uses this to handle all our card discover and plugging + */ +static struct pci_driver velocity_driver = { + .name = VELOCITY_NAME, + .id_table = velocity_id_table, + .probe = velocity_found1, + .remove = __devexit_p(velocity_remove1), +#ifdef CONFIG_PM + .suspend = velocity_suspend, + .resume = velocity_resume, +#endif +}; + + +/** + * velocity_ethtool_up - pre hook for ethtool + * @dev: network device + * + * Called before an ethtool operation. We need to make sure the + * chip is out of D3 state before we poke at it. + */ +static int velocity_ethtool_up(struct net_device *dev) +{ + struct velocity_info *vptr = netdev_priv(dev); + if (!netif_running(dev)) + pci_set_power_state(vptr->pdev, PCI_D0); + return 0; +} + +/** + * velocity_ethtool_down - post hook for ethtool + * @dev: network device + * + * Called after an ethtool operation. Restore the chip back to D3 + * state if it isn't running. + */ +static void velocity_ethtool_down(struct net_device *dev) +{ + struct velocity_info *vptr = netdev_priv(dev); + if (!netif_running(dev)) + pci_set_power_state(vptr->pdev, PCI_D3hot); +} + +static int velocity_get_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct velocity_info *vptr = netdev_priv(dev); + struct mac_regs __iomem *regs = vptr->mac_regs; + u32 status; + status = check_connection_type(vptr->mac_regs); + + cmd->supported = SUPPORTED_TP | + SUPPORTED_Autoneg | + SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full; + + cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg; + if (vptr->options.spd_dpx == SPD_DPX_AUTO) { + cmd->advertising |= + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full; + } else { + switch (vptr->options.spd_dpx) { + case SPD_DPX_1000_FULL: + cmd->advertising |= ADVERTISED_1000baseT_Full; + break; + case SPD_DPX_100_HALF: + cmd->advertising |= ADVERTISED_100baseT_Half; + break; + case SPD_DPX_100_FULL: + cmd->advertising |= ADVERTISED_100baseT_Full; + break; + case SPD_DPX_10_HALF: + cmd->advertising |= ADVERTISED_10baseT_Half; + break; + case SPD_DPX_10_FULL: + cmd->advertising |= ADVERTISED_10baseT_Full; + break; + default: + break; + } + } + + if (status & VELOCITY_SPEED_1000) + ethtool_cmd_speed_set(cmd, SPEED_1000); + else if (status & VELOCITY_SPEED_100) + ethtool_cmd_speed_set(cmd, SPEED_100); + else + ethtool_cmd_speed_set(cmd, SPEED_10); + + cmd->autoneg = (status & VELOCITY_AUTONEG_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE; + cmd->port = PORT_TP; + cmd->transceiver = XCVR_INTERNAL; + cmd->phy_address = readb(®s->MIIADR) & 0x1F; + + if (status & VELOCITY_DUPLEX_FULL) + cmd->duplex = DUPLEX_FULL; + else + cmd->duplex = DUPLEX_HALF; + + return 0; +} + +static int velocity_set_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct velocity_info *vptr = netdev_priv(dev); + u32 speed = ethtool_cmd_speed(cmd); + u32 curr_status; + u32 new_status = 0; + int ret = 0; + + curr_status = check_connection_type(vptr->mac_regs); + curr_status &= (~VELOCITY_LINK_FAIL); + + new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0); + new_status |= ((speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0); + new_status |= ((speed == SPEED_100) ? VELOCITY_SPEED_100 : 0); + new_status |= ((speed == SPEED_10) ? VELOCITY_SPEED_10 : 0); + new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0); + + if ((new_status & VELOCITY_AUTONEG_ENABLE) && + (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) { + ret = -EINVAL; + } else { + enum speed_opt spd_dpx; + + if (new_status & VELOCITY_AUTONEG_ENABLE) + spd_dpx = SPD_DPX_AUTO; + else if ((new_status & VELOCITY_SPEED_1000) && + (new_status & VELOCITY_DUPLEX_FULL)) { + spd_dpx = SPD_DPX_1000_FULL; + } else if (new_status & VELOCITY_SPEED_100) + spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ? + SPD_DPX_100_FULL : SPD_DPX_100_HALF; + else if (new_status & VELOCITY_SPEED_10) + spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ? + SPD_DPX_10_FULL : SPD_DPX_10_HALF; + else + return -EOPNOTSUPP; + + vptr->options.spd_dpx = spd_dpx; + + velocity_set_media_mode(vptr, new_status); + } + + return ret; +} + +static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct velocity_info *vptr = netdev_priv(dev); + strcpy(info->driver, VELOCITY_NAME); + strcpy(info->version, VELOCITY_VERSION); + strcpy(info->bus_info, pci_name(vptr->pdev)); +} + +static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct velocity_info *vptr = netdev_priv(dev); + wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP; + wol->wolopts |= WAKE_MAGIC; + /* + if (vptr->wol_opts & VELOCITY_WOL_PHY) + wol.wolopts|=WAKE_PHY; + */ + if (vptr->wol_opts & VELOCITY_WOL_UCAST) + wol->wolopts |= WAKE_UCAST; + if (vptr->wol_opts & VELOCITY_WOL_ARP) + wol->wolopts |= WAKE_ARP; + memcpy(&wol->sopass, vptr->wol_passwd, 6); +} + +static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct velocity_info *vptr = netdev_priv(dev); + + if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP))) + return -EFAULT; + vptr->wol_opts = VELOCITY_WOL_MAGIC; + + /* + if (wol.wolopts & WAKE_PHY) { + vptr->wol_opts|=VELOCITY_WOL_PHY; + vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED; + } + */ + + if (wol->wolopts & WAKE_MAGIC) { + vptr->wol_opts |= VELOCITY_WOL_MAGIC; + vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; + } + if (wol->wolopts & WAKE_UCAST) { + vptr->wol_opts |= VELOCITY_WOL_UCAST; + vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; + } + if (wol->wolopts & WAKE_ARP) { + vptr->wol_opts |= VELOCITY_WOL_ARP; + vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; + } + memcpy(vptr->wol_passwd, wol->sopass, 6); + return 0; +} + +static u32 velocity_get_msglevel(struct net_device *dev) +{ + return msglevel; +} + +static void velocity_set_msglevel(struct net_device *dev, u32 value) +{ + msglevel = value; +} + +static int get_pending_timer_val(int val) +{ + int mult_bits = val >> 6; + int mult = 1; + + switch (mult_bits) + { + case 1: + mult = 4; break; + case 2: + mult = 16; break; + case 3: + mult = 64; break; + case 0: + default: + break; + } + + return (val & 0x3f) * mult; +} + +static void set_pending_timer_val(int *val, u32 us) +{ + u8 mult = 0; + u8 shift = 0; + + if (us >= 0x3f) { + mult = 1; /* mult with 4 */ + shift = 2; + } + if (us >= 0x3f * 4) { + mult = 2; /* mult with 16 */ + shift = 4; + } + if (us >= 0x3f * 16) { + mult = 3; /* mult with 64 */ + shift = 6; + } + + *val = (mult << 6) | ((us >> shift) & 0x3f); +} + + +static int velocity_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ecmd) +{ + struct velocity_info *vptr = netdev_priv(dev); + + ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup; + ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup; + + ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer); + ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer); + + return 0; +} + +static int velocity_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ecmd) +{ + struct velocity_info *vptr = netdev_priv(dev); + int max_us = 0x3f * 64; + unsigned long flags; + + /* 6 bits of */ + if (ecmd->tx_coalesce_usecs > max_us) + return -EINVAL; + if (ecmd->rx_coalesce_usecs > max_us) + return -EINVAL; + + if (ecmd->tx_max_coalesced_frames > 0xff) + return -EINVAL; + if (ecmd->rx_max_coalesced_frames > 0xff) + return -EINVAL; + + vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames; + vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames; + + set_pending_timer_val(&vptr->options.rxqueue_timer, + ecmd->rx_coalesce_usecs); + set_pending_timer_val(&vptr->options.txqueue_timer, + ecmd->tx_coalesce_usecs); + + /* Setup the interrupt suppression and queue timers */ + spin_lock_irqsave(&vptr->lock, flags); + mac_disable_int(vptr->mac_regs); + setup_adaptive_interrupts(vptr); + setup_queue_timers(vptr); + + mac_write_int_mask(vptr->int_mask, vptr->mac_regs); + mac_clear_isr(vptr->mac_regs); + mac_enable_int(vptr->mac_regs); + spin_unlock_irqrestore(&vptr->lock, flags); + + return 0; +} + +static const char velocity_gstrings[][ETH_GSTRING_LEN] = { + "rx_all", + "rx_ok", + "tx_ok", + "rx_error", + "rx_runt_ok", + "rx_runt_err", + "rx_64", + "tx_64", + "rx_65_to_127", + "tx_65_to_127", + "rx_128_to_255", + "tx_128_to_255", + "rx_256_to_511", + "tx_256_to_511", + "rx_512_to_1023", + "tx_512_to_1023", + "rx_1024_to_1518", + "tx_1024_to_1518", + "tx_ether_collisions", + "rx_crc_errors", + "rx_jumbo", + "tx_jumbo", + "rx_mac_control_frames", + "tx_mac_control_frames", + "rx_frame_alignement_errors", + "rx_long_ok", + "rx_long_err", + "tx_sqe_errors", + "rx_no_buf", + "rx_symbol_errors", + "in_range_length_errors", + "late_collisions" +}; + +static void velocity_get_strings(struct net_device *dev, u32 sset, u8 *data) +{ + switch (sset) { + case ETH_SS_STATS: + memcpy(data, *velocity_gstrings, sizeof(velocity_gstrings)); + break; + } +} + +static int velocity_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(velocity_gstrings); + default: + return -EOPNOTSUPP; + } +} + +static void velocity_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + if (netif_running(dev)) { + struct velocity_info *vptr = netdev_priv(dev); + u32 *p = vptr->mib_counter; + int i; + + spin_lock_irq(&vptr->lock); + velocity_update_hw_mibs(vptr); + spin_unlock_irq(&vptr->lock); + + for (i = 0; i < ARRAY_SIZE(velocity_gstrings); i++) + *data++ = *p++; + } +} + +static const struct ethtool_ops velocity_ethtool_ops = { + .get_settings = velocity_get_settings, + .set_settings = velocity_set_settings, + .get_drvinfo = velocity_get_drvinfo, + .get_wol = velocity_ethtool_get_wol, + .set_wol = velocity_ethtool_set_wol, + .get_msglevel = velocity_get_msglevel, + .set_msglevel = velocity_set_msglevel, + .get_link = velocity_get_link, + .get_strings = velocity_get_strings, + .get_sset_count = velocity_get_sset_count, + .get_ethtool_stats = velocity_get_ethtool_stats, + .get_coalesce = velocity_get_coalesce, + .set_coalesce = velocity_set_coalesce, + .begin = velocity_ethtool_up, + .complete = velocity_ethtool_down +}; + +#if defined(CONFIG_PM) && defined(CONFIG_INET) +static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr) +{ + struct in_ifaddr *ifa = ptr; + struct net_device *dev = ifa->ifa_dev->dev; + + if (dev_net(dev) == &init_net && + dev->netdev_ops == &velocity_netdev_ops) + velocity_get_ip(netdev_priv(dev)); + + return NOTIFY_DONE; +} + +static struct notifier_block velocity_inetaddr_notifier = { + .notifier_call = velocity_netdev_event, +}; + +static void velocity_register_notifier(void) +{ + register_inetaddr_notifier(&velocity_inetaddr_notifier); +} + +static void velocity_unregister_notifier(void) +{ + unregister_inetaddr_notifier(&velocity_inetaddr_notifier); +} + +#else + +#define velocity_register_notifier() do {} while (0) +#define velocity_unregister_notifier() do {} while (0) + +#endif /* defined(CONFIG_PM) && defined(CONFIG_INET) */ + +/** + * velocity_init_module - load time function + * + * Called when the velocity module is loaded. The PCI driver + * is registered with the PCI layer, and in turn will call + * the probe functions for each velocity adapter installed + * in the system. + */ +static int __init velocity_init_module(void) +{ + int ret; + + velocity_register_notifier(); + ret = pci_register_driver(&velocity_driver); + if (ret < 0) + velocity_unregister_notifier(); + return ret; +} + +/** + * velocity_cleanup - module unload + * + * When the velocity hardware is unloaded this function is called. + * It will clean up the notifiers and the unregister the PCI + * driver interface for this hardware. This in turn cleans up + * all discovered interfaces before returning from the function + */ +static void __exit velocity_cleanup_module(void) +{ + velocity_unregister_notifier(); + pci_unregister_driver(&velocity_driver); +} + +module_init(velocity_init_module); +module_exit(velocity_cleanup_module);